diff --git a/.gitignore b/.gitignore index 1f134b58..d8ea74d1 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,9 @@ wgpu/resources/*.so wgpu/resources/*.dylib wgpu/resources/commit-sha examples/screenshots/diffs +docs/gallery/ +docs/_static/*.whl +docs/sg_execution_times.rst # Byte-compiled / optimized / DLL files __pycache__/ diff --git a/codegen/__init__.py b/codegen/__init__.py index d194bc4e..8ba026a7 100644 --- a/codegen/__init__.py +++ b/codegen/__init__.py @@ -1,7 +1,7 @@ import io from .utils import print, PrintToFile -from . import apiwriter, apipatcher, wgpu_native_patcher, idlparser, hparser +from . import apiwriter, apipatcher, wgpu_native_patcher, idlparser, hparser, jswriter from .files import file_cache @@ -15,6 +15,7 @@ def main(): prepare() update_api() update_wgpu_native() + update_js() file_cache.write("resources/codegen_report.md", log.getvalue()) @@ -63,3 +64,19 @@ def update_wgpu_native(): code1 = file_cache.read("backends/wgpu_native/_api.py") code2 = wgpu_native_patcher.patch_wgpu_native_backend(code1) file_cache.write("backends/wgpu_native/_api.py", code2) + + +def update_js(): + """ + Writes? (maybe updates later) the JS webgpu backend API. + """ + + print("## Writing backends/js_webgpu/_api.py") + + code = jswriter.generate_js_webgpu_api() + # TODO: run the code against a patcher that adds hand written API diff methods + + file_cache.write("backends/js_webgpu/_api.py", code) + + + diff --git a/codegen/files.py b/codegen/files.py index ad1a89d1..cb95b3f5 100644 --- a/codegen/files.py +++ b/codegen/files.py @@ -35,6 +35,7 @@ class FileCache: "structs.py", "backends/wgpu_native/_api.py", "backends/wgpu_native/_mappings.py", + "backends/js_webgpu/_api.py", # TODO: maybe this file should be more like _mappings "resources/codegen_report.md", ] diff --git a/codegen/idlparser.py b/codegen/idlparser.py index dfbe6f00..38bce2f0 100644 --- a/codegen/idlparser.py +++ b/codegen/idlparser.py @@ -7,6 +7,7 @@ identify and remove code paths that are no longer used. """ +from typing import Dict from codegen.utils import print from codegen.files import read_file @@ -128,7 +129,7 @@ def peek_line(self): def parse(self, verbose=True): self._interfaces = {} - self.classes = {} + self.classes:Dict[str, Interface] = {} self.structs = {} self.flags = {} self.enums = {} @@ -222,6 +223,7 @@ def resolve_type(self, typename) -> str: "ImageData": "ArrayLike", "VideoFrame": "ArrayLike", "AllowSharedBufferSource": "ArrayLike", + "[AllowShared] Uint32Array": "ArrayLike", "GPUPipelineConstantValue": "float", "GPUExternalTexture": "object", "undefined": "None", diff --git a/codegen/jswriter.py b/codegen/jswriter.py new file mode 100644 index 00000000..ad1a6684 --- /dev/null +++ b/codegen/jswriter.py @@ -0,0 +1,276 @@ +""" +Codegen the JS webgpu backend, based on the parsed idl. + +write to the backends/js_webgpu/_api.py file. +""" + +import os +import re +from codegen.idlparser import Attribute, get_idl_parser, Interface +from codegen.apipatcher import IdlPatcherMixin, BaseApiPatcher +from codegen.utils import Patcher +from textwrap import indent, dedent + + +file_preamble =""" +# Auto-generated API for the JS WebGPU backend, based on the IDL and custom implementations. + +from ... import classes, structs, enums, flags +from ...structs import ArrayLike, Sequence # for typing hints +from typing import Union + +from pyodide.ffi import to_js, run_sync, JsProxy +from js import window, Uint8Array + +from ._helpers import simple_js_accessor +from ._implementation import GPUPromise +""" +# maybe we should also generate a __all__ list to just import the defined classes? + +# TODO: the constructor often needs more args, like device hands down self +# maybe label can be done via the property? +create_template = """ +def {py_method_name}(self, **kwargs): + descriptor = structs.{py_descriptor_name}(**kwargs) + js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor) + js_obj = self._internal.{js_method_name}(js_descriptor) + + label = kwargs.pop("label", "") + return {return_type}(label, js_obj, device=self) +""" + +unary_template = """ +def {py_method_name}(self) -> None: + self._internal.{js_method_name}() +""" + +# TODO: this is a bit more complex but doable. +# return needs to be optional and also resolve the promise? +# TODO: with empty body looks odd :/ +positional_args_template = """ +{header} + {body} + self._internal.{js_method_name}({js_args}) +""" +# TODO: construct a return value if needed? + + +# might require size to be calculated if None? (offset etc) +data_conversion = """ + if {py_data} is not None: + data = memoryview({py_data}).cast("B") + data_size = (data.nbytes + 3) & ~3 # align to 4 bytes + js_data = Uint8Array.new(data_size) + js_data.assign(data) + else: + js_data = None +""" + +# most likely copy and modify the code in apipatcher.py... because we hopefully need code that looks really similar to _classes.py +idl = get_idl_parser() +helper_patcher = BaseApiPatcher() # to get access to name2py_names function + +# can't use importlib because pyodide isn't available -.- +# maybe use ast? +root = os.path.abspath(os.path.join(__file__, "..", "..")) +custom_implementations = open(os.path.join(root, "wgpu", "backends", "js_webgpu", "_implementation.py")).read() + +class JsPatcher(Patcher): + # TODO: we can put custom methods here! + pass + +patcher = JsPatcher(custom_implementations) + +def generate_method_code(class_name: str, function_name: str, idl_line: str) -> str: + # TODO: refactor into something like this + pass + +def get_class_def(class_name: str, interface: Interface) -> str: + # TODO: refactor + pass + + +# basically three cases for methods (from idl+apidiff): +# 1. alreayd exists in _classes.py and can be used as is (generate nothing) +# 2. custom implementation in _implementations.py (copy string over) +# 3. auto-generate remaining methods based on idl + + + +def generate_js_webgpu_api() -> str: + """Generate the JS translation API code we can autogenerate.""" + + + # TODO: preamble? + output = file_preamble + "\n\n" + + # classname, start_line, end_line + custom_classes = {c: (s, e) for c, s, e in patcher.iter_classes()} + + # todo import our to_js converter functions from elsewhere? + # we need to have the mixins first! + ordered_classes = sorted(idl.classes.items(), key=lambda c: "Mixin" not in c[0]) # mixins first + for class_name, interface in ordered_classes: + # write idl line, header + # write the to_js block + # get label (where needed?) + # return the constructor call to the base class maybe? + + custom_methods = {} + + if class_name in custom_classes: + class_line = custom_classes[class_name][0] +1 + for method_name, start_line, end_line in patcher.iter_methods(class_line): + # grab the actual contents ? + # maybe include a comment that is in the line prior from _implementation.py? + method_lines = patcher.lines[start_line:end_line+1] + custom_methods[method_name] = method_lines + + # include custom properties too + for prop_name, start_line, end_line in patcher.iter_properties(class_line): + prop_lines = patcher.lines[start_line-1:end_line+1] + custom_methods[prop_name] = prop_lines + + mixins = [c for c in interface.bases if c not in ("DOMException", "EventTarget")] # skip some we skip + class_header = f"class {class_name}(classes.{class_name}, {', '.join(mixins)}):" + + class_lines = ["\n"] + # TODO: can we property some of the webgpu attributes to replace the existing private mappings + + for function_name, idl_line in interface.functions.items(): + return_type = idl_line.split(" ")[0] # on some parts this doesn't exist + py_method_name = helper_patcher.name2py_names(class_name, function_name) + # TODO: resolve async double methods! + py_method_name = py_method_name[0] # TODO: async always special case? + + if py_method_name in custom_methods: + # Case 2: custom implementation exists! + class_lines.append(f"\n# Custom implementation for {function_name} from _implementation.py:\n") + class_lines.append(dedent("\n".join(custom_methods[py_method_name]))) + class_lines.append("\n") # for space I guess + custom_methods.pop(py_method_name) # remove ones we have added. + continue + + if py_method_name == "__init__": + # whacky way, but essentially this mean classes.py implements a useable constructor already. + continue + + # TODO: mixin classes seem to cause double methods? should we skip them? + + # based on apipatcher.IDlCommentINjector.get_method_comment + args = idl_line.split("(")[1].rsplit(")")[0].split(", ") + args = [Attribute(arg) for arg in args if arg.strip()] + + # TODO: the create_x_pipeline_async methods become the sync variant without suffix! + if return_type and return_type.startswith("Promise<") and return_type.endswith(">"): + return_type = return_type.split("<")[-1].rstrip(">?") + + # skip these for now as they are more troublesome -.- + if py_method_name.endswith("_sync"): + class_lines.append(f"\n# TODO: {function_name} sync variant likely taken from _classes.py directly!") + continue + + if function_name.endswith("Async"): + class_lines.append(f"\n# TODO: was was there a redefinition for {function_name} async variant?") + continue + + # case 1: single argument as a descriptor (TODO: could be optional - but that should just work) + if len(args) == 1 and args[0].typename.endswith( + ("Options", "Descriptor", "Configuration") + ): + method_string = create_template.format( + py_method_name=py_method_name, + py_descriptor_name=args[0].typename.removeprefix("GPU"), + js_method_name=function_name, + return_type=return_type if return_type else "None", + ) + class_lines.append(method_string) + + # case 2: no arguments (and nothing to return?) + elif (len(args) == 0 and return_type == "undefined"): + method_string = unary_template.format( + py_method_name=py_method_name, + js_method_name=function_name, + ) + class_lines.append(method_string) + # TODO: return values, could be simple or complex... so might need a constructor or not at all? + + # case 3: positional arguments, some of which might need ._internal lookup or struct->to_js conversion... but not all. + elif (len(args) > 0): + + header = helper_patcher.get_method_def(class_name, py_method_name).partition("):")[0].lstrip() + # put all potentially forward refrenced classes into quotes + header = " ".join(f'"{h}"' if h.startswith("GPU") else h for h in header.split(" ")).replace(':"','":') + # turn all optional type hints into Union with None + # int | None -> Union[int, None] + exp = r":\s([\w\"]+)\s\| None" + header = re.sub(exp, lambda m: f": Union[{m.group(1)}, None]", header) + header = header.replace('Sequence[GPURenderBundle]', 'Sequence["GPURenderBundle"]') # TODO: just a temporary bodge! + + param_list = [] + conversion_lines = [] + js_arg_list = [] + for idx, arg in enumerate(args): + py_name = helper_patcher.name2py_names(class_name, arg.name)[0] + param_list.append(py_name) + # if it's a GPUObject kinda thing we most likely need to call ._internal to get the correct js object + if arg.typename.removesuffix("?") in idl.classes: + # TODO: do we need to check against none for optionals? + # technically the our js_accessor does this lookup too? + conversion_lines.append(f"js_{arg.name} = {py_name}._internal") + js_arg_list.append(f"js_{arg.name}") + # TODO: sequence of complex type? + + elif arg.typename.removeprefix('GPU').removesuffix("?") in idl.structs and arg.typename not in ("GPUExtent3D", "GPUColor"): + conversion_lines.append(f"{py_name}_desc = structs.{arg.typename.removeprefix('GPU').removesuffix('?')}(**{py_name})") + conversion_lines.append(f"js_{arg.name} = to_js({py_name}_desc, eager_converter=simple_js_accessor)") + js_arg_list.append(f"js_{arg.name}") + elif py_name.endswith("data"): # maybe not an exhaustive check? + conversion_lines.append(data_conversion.format(py_data=py_name)) + js_arg_list.append("js_data") #might be a problem if there is two! + else: + py_type = idl.resolve_type(arg.typename) + if py_type not in __builtins__ and not py_type.startswith(("enums.", "flags.")): + conversion_lines.append(f"# TODO: argument {py_name} of JS type {arg.typename}, py type {py_type} might need conversion") + js_arg_list.append(py_name) + + method_string = positional_args_template.format( + header=header, + body=("\n ".join(conversion_lines)), + js_method_name=function_name, + js_args=", ".join(js_arg_list), + return_type=return_type if return_type != "undefined" else "None", + ) + class_lines.append(method_string) + + # TODO: have a return line constructor function? + + else: + class_lines.append(f"\n# TODO: implement codegen for {function_name} with args {args} or return type {return_type}") + + # if there are some methods not part of the idl, we should write them too + if custom_methods: + class_lines.append("\n# Additional custom methods from _implementation.py:\n") + for method_name, method_lines in custom_methods.items(): + class_lines.append(dedent("\n".join(method_lines))) + class_lines.append("\n\n") + + # do we need them in the first place? + if all(line.lstrip().startswith("#") for line in class_lines if line.strip()): + class_lines.append("\npass") + + output += class_header + output += indent("".join(class_lines), " ") + output += "\n\n" # separation between classes + + # TODO: most likely better to return a structure like + # dict(class: dict(method : code_lines)) + + + # TODO: postamble: + output += "\ngpu = GPU()\n" + + return output + + +# TODO: we need to add some of the apidiff functions too... but I am not yet sure if we want to generate them or maybe import them? diff --git a/codegen/utils.py b/codegen/utils.py index 5b3b2a7a..7ee9d2cc 100644 --- a/codegen/utils.py +++ b/codegen/utils.py @@ -349,7 +349,7 @@ def iter_classes(self, start_line=0): def iter_properties(self, start_line=0): """Generator to iterate over the properties. - Each iteration yields (classname, linenr_first, linenr_last), + Each iteration yields (propertyname, linenr_first, linenr_last), where linenr_first is the line that startswith `def`, and linenr_last is the last line of code. """ @@ -357,7 +357,7 @@ def iter_properties(self, start_line=0): def iter_methods(self, start_line=0): """Generator to iterate over the methods. - Each iteration yields (classname, linenr_first, linenr_last) + Each iteration yields (methodname, linenr_first, linenr_last) where linenr_first is the line that startswith `def`, and linenr_last is the last line of code. """ diff --git a/docs/_static/_pyodide_iframe.html b/docs/_static/_pyodide_iframe.html new file mode 100644 index 00000000..913b59ff --- /dev/null +++ b/docs/_static/_pyodide_iframe.html @@ -0,0 +1,46 @@ + + + + + + Rendercanvas example.py in Pyodide + + + + + +

Loading...

+
+ + + + + \ No newline at end of file diff --git a/docs/_static/style.css b/docs/_static/style.css index e69de29b..61d945d3 100644 --- a/docs/_static/style.css +++ b/docs/_static/style.css @@ -0,0 +1,6 @@ + +div.document iframe { + width: 100%; + height: 520px; + border: none; +} \ No newline at end of file diff --git a/docs/conf.py b/docs/conf.py index 64bbadeb..faafda0c 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -14,6 +14,7 @@ import os import sys import shutil +import subprocess ROOT_DIR = os.path.abspath(os.path.join(__file__, "..", "..")) @@ -124,7 +125,7 @@ def resolve_crossrefs(text): # -- Project information ----------------------------------------------------- project = "wgpu-py" -copyright = "2020-2023, Almar Klein, Korijn van Golen" +copyright = "2020-2025, Almar Klein, Korijn van Golen" author = "Almar Klein, Korijn van Golen" release = wgpu.__version__ @@ -135,9 +136,11 @@ def resolve_crossrefs(text): # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ + "sphinx_rtd_theme", "sphinx.ext.autodoc", "sphinx.ext.napoleon", "sphinx.ext.autosummary", + "sphinx_gallery.gen_gallery", ] # Add any paths that contain templates here, relative to this directory. @@ -165,3 +168,121 @@ def resolve_crossrefs(text): # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] + + +# copied and adapted from the rendercanvas + +# -- Build wheel so Pyodide examples can use exactly this version of wgpu ----------------------------------------------------- + +short_version = ".".join(str(i) for i in wgpu.version_info[:3]) +wheel_name = f"wgpu-{short_version}-py3-none-any.whl" + +# Build the wheel +os.environ["WGPU_BUILD_PLATFORM_INFO"] = " ".join(["pyodide_wasm", "any"]) +subprocess.run([sys.executable, "-m", "build", "-nw"], cwd=ROOT_DIR) +wheel_filename = os.path.join(ROOT_DIR, "dist", wheel_name) +assert os.path.isfile(wheel_filename), f"{wheel_name} does not exist" + +# Copy into static +print("Copy wheel to _static dir") +shutil.copy( + wheel_filename, + os.path.join(ROOT_DIR, "docs", "_static", wheel_name), +) + + +# -- Sphinx Gallery ----------------------------------------------------- + +iframe_placeholder_rst = """ +.. only:: html + + Interactive example + =================== + + This uses Pyodide. If this does not work, your browser may not have sufficient support for wasm/pyodide/wgpu (check your browser dev console). + Stdout (print statements) will also appear in the browser console. + + .. raw:: html + + +""" + +python_files = {} + + +def add_pyodide_to_examples(app): + if app.builder.name != "html": + return + + gallery_dir = os.path.join(ROOT_DIR, "docs", "gallery") + + for fname in os.listdir(gallery_dir): + filename = os.path.join(gallery_dir, fname) + if not fname.endswith(".py"): + continue + with open(filename, "rb") as f: + py = f.read().decode() + if fname in ["cube.py", "triangle.py", "imgui_backend_sea.py", "compute_noop.py", "imgui_renderer_sea.py", "imgui_basic_example.py"]: + # todo: later we detect by using a special comment in the py file + print("Adding Pyodide example to", fname) + fname_rst = fname.replace(".py", ".rst") + # Update rst file + rst = iframe_placeholder_rst.replace("example.py", fname) + with open(os.path.join(gallery_dir, fname_rst), "ab") as f: + f.write(rst.encode()) + python_files[fname] = py + + +def add_files_to_run_pyodide_examples(app, exception): + if app.builder.name != "html": + return + + gallery_build_dir = os.path.join(app.outdir, "gallery") + + # Write html file that can load pyodide examples + with open( + os.path.join(ROOT_DIR, "docs", "_static", "_pyodide_iframe.html"), "rb" + ) as f: + html = f.read().decode() + html = html.replace('"wgpu"', f'"../_static/{wheel_name}"') + with open(os.path.join(gallery_build_dir, "pyodide.html"), "wb") as f: + f.write(html.encode()) + + # Write the python files + for fname, py in python_files.items(): + print("Writing", fname) + with open(os.path.join(gallery_build_dir, fname), "wb") as f: + f.write(py.encode()) + + +# Suppress "cannot cache unpickable configuration value" for sphinx_gallery_conf +# See https://github.com/sphinx-doc/sphinx/issues/12300 +suppress_warnings = ["config.cache"] + +# The gallery conf. See https://sphinx-gallery.github.io/stable/configuration.html +sphinx_gallery_conf = { + "gallery_dirs": "gallery", + "backreferences_dir": "gallery/backreferences", + "doc_module": ("wgpu",), + # "image_scrapers": (), + "remove_config_comments": True, + "examples_dirs": "../examples/", + "ignore_pattern": r"serve_browser_examples\.py", +} + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. + +html_theme = "sphinx_rtd_theme" + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_css_files = ["style.css"] + + +def setup(app): + app.connect("builder-inited", add_pyodide_to_examples) + app.connect("build-finished", add_files_to_run_pyodide_examples) diff --git a/docs/index.rst b/docs/index.rst index 1e217919..3b720352 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -12,6 +12,7 @@ Welcome to the wgpu-py docs! wgpu backends utils + Gallery diff --git a/examples/README.rst b/examples/README.rst new file mode 100644 index 00000000..67ee9a60 --- /dev/null +++ b/examples/README.rst @@ -0,0 +1,4 @@ +wgpu-py examples +================ + +(this is in progress) \ No newline at end of file diff --git a/examples/browser.html b/examples/browser.html new file mode 100644 index 00000000..1e01ba09 --- /dev/null +++ b/examples/browser.html @@ -0,0 +1,90 @@ + + + + + wgpu-py on the HTML RenderCanvas canvas with Pyodide:
+ + + + + + + + + + + +
+ + + + + + + + +
+
+pixels got drawn! + + + \ No newline at end of file diff --git a/examples/compute_int64.py b/examples/compute_int64.py index 33fea9d6..dabb8e91 100644 --- a/examples/compute_int64.py +++ b/examples/compute_int64.py @@ -1,4 +1,7 @@ """ +Compute INT64 +------------- + simple example of using the int64 shader feature """ diff --git a/examples/compute_matmul.py b/examples/compute_matmul.py index eb1882db..675e235d 100644 --- a/examples/compute_matmul.py +++ b/examples/compute_matmul.py @@ -1,4 +1,7 @@ """ +Compute MatMul +-------------- + Simple compute example that performs basic matrix multiplication. Uses linear arrays in storage buffers to represent matrices of arbitrary size since diff --git a/examples/compute_noop.py b/examples/compute_noop.py index 9be2b906..7a0e9d02 100644 --- a/examples/compute_noop.py +++ b/examples/compute_noop.py @@ -1,4 +1,7 @@ """ +Compute Noop +------------ + Example compute shader that does ... nothing but copy a value from one buffer into another. """ diff --git a/examples/compute_textures.py b/examples/compute_textures.py index f7b3467c..0870605a 100644 --- a/examples/compute_textures.py +++ b/examples/compute_textures.py @@ -1,4 +1,7 @@ """ +Compute Textures +---------------- + Example that shows how to use textures in a compute shader to convert an RGBA image to YCbCr. The shader uses workgroups to processes non-overlapping 8x8 blocks of the input rgba texture. diff --git a/examples/compute_timestamps.py b/examples/compute_timestamps.py index 0b3faf8d..63fecedc 100644 --- a/examples/compute_timestamps.py +++ b/examples/compute_timestamps.py @@ -1,4 +1,7 @@ """ +Compute Timestamps +------------------ + A simple example to profile a compute pass using ComputePassTimestampWrites. """ diff --git a/examples/cube.py b/examples/cube.py index 5fdd4da6..10bf0b39 100644 --- a/examples/cube.py +++ b/examples/cube.py @@ -1,4 +1,6 @@ """ +Cube +---- Example that renders a textured rotating cube. This example is a bit more interesting (and larger) than the triangle, @@ -469,7 +471,6 @@ def draw_func(): for a in wgpu.gpu.enumerate_adapters_sync(): print(a.summary) - if __name__ == "__main__": canvas = RenderCanvas( size=(640, 480), diff --git a/examples/extras_debug.py b/examples/extras_debug.py index 31131915..306447a8 100644 --- a/examples/extras_debug.py +++ b/examples/extras_debug.py @@ -1,4 +1,7 @@ """ +Extras Debug +------------ + Basic example of how to use wgpu-native instance extras to enable debug symbols and labels in the shader compiler. As debugger we will use RenderDoc (https://renderdoc.org/) - other tools will require a similar setup. While RenderDoc doesn't fully support WebGPU - it can still be useful for inspecting the render pipeline. diff --git a/examples/extras_dxc.py b/examples/extras_dxc.py index 1d6fc5ab..a8c3f0ca 100644 --- a/examples/extras_dxc.py +++ b/examples/extras_dxc.py @@ -1,4 +1,7 @@ """ +Extras DXC +---------- + Simple example to show how the wgpu-native extras can be used to use dxc compiler for DX12. Since this will only work on Windows it's not meant for the test suite. You can run the download script using `python tools/download_dxc.py` to download the latest Dxc release from GitHub. And extract it to the resource directory. diff --git a/examples/gui_auto.py b/examples/gui_auto.py index f1f728bf..d6d3add1 100644 --- a/examples/gui_auto.py +++ b/examples/gui_auto.py @@ -1,4 +1,8 @@ """ +GUI Auto +-------- + + Run the triangle example in an automatically selected GUI backend. The rendercanvas automatically selects one of its available diff --git a/examples/gui_direct.py b/examples/gui_direct.py index 6a90aa58..74256b93 100644 --- a/examples/gui_direct.py +++ b/examples/gui_direct.py @@ -1,4 +1,7 @@ """ +GUI Direct +---------- + Direct integration of glfw and wgpu-py without using the RenderCanvas library. Demonstration for hardcore users that need total low-level control. diff --git a/examples/gui_events.py b/examples/gui_events.py index 34e33b28..c9ca8c1b 100644 --- a/examples/gui_events.py +++ b/examples/gui_events.py @@ -1,4 +1,7 @@ """ +GUI Events +---------- + A simple example to demonstrate events (no rendering). """ diff --git a/examples/gui_qt_embed.py b/examples/gui_qt_embed.py index 30585e80..ce7e2353 100644 --- a/examples/gui_qt_embed.py +++ b/examples/gui_qt_embed.py @@ -1,4 +1,7 @@ """ +GUI Qt embed +------------ + An example demonstrating a Qt app with a wgpu viz inside. If needed, change the PySide6 import to e.g. PyQt6, PyQt5, or PySide2. diff --git a/examples/imgui_backend_sea.py b/examples/imgui_backend_sea.py index 8e4ad68d..f18a8949 100644 --- a/examples/imgui_backend_sea.py +++ b/examples/imgui_backend_sea.py @@ -1,4 +1,7 @@ """ +ImGui Backend Sea +----------------- + An example demonstrating a wgpu app with imgui backend. """ diff --git a/examples/imgui_basic_example.py b/examples/imgui_basic_example.py index f87a0005..82f8bcb8 100644 --- a/examples/imgui_basic_example.py +++ b/examples/imgui_basic_example.py @@ -1,4 +1,7 @@ """ +ImGui Basic Example +------------------- + An example demonstrating a wgpu app with basic imgui usage and events. """ diff --git a/examples/imgui_cmap_picker.py b/examples/imgui_cmap_picker.py index 3a15a3e8..04878ccd 100644 --- a/examples/imgui_cmap_picker.py +++ b/examples/imgui_cmap_picker.py @@ -1,4 +1,8 @@ """ +ImGui cmap picker +----------------- + + Imgui example that shows how to create a colormap picker menu Uses the cmap library: https://github.com/tlambert03/cmap diff --git a/examples/imgui_multi_canvas.py b/examples/imgui_multi_canvas.py index 7921b06e..522fb761 100644 --- a/examples/imgui_multi_canvas.py +++ b/examples/imgui_multi_canvas.py @@ -1,4 +1,8 @@ """ +ImGui Multi-Canvas +------------------ + + Example showing how to use multiple imgui contexts to draw to multiple canvases """ diff --git a/examples/imgui_renderer_sea.py b/examples/imgui_renderer_sea.py index f46d37da..3ddedd1b 100644 --- a/examples/imgui_renderer_sea.py +++ b/examples/imgui_renderer_sea.py @@ -1,4 +1,7 @@ """ +ImGui Renderer Sea +------------------ + An example demonstrating a wgpu app with imgui renderer """ diff --git a/examples/serve_browser_examples.py b/examples/serve_browser_examples.py new file mode 100644 index 00000000..238de989 --- /dev/null +++ b/examples/serve_browser_examples.py @@ -0,0 +1,295 @@ +""" +A little script that serves browser-based example, using a wheel from the local wgpu. + +* Examples that run wgpu fully in the browser in Pyodide / PyScript. + +What this script does: + +* runs the codegen for js_webgpu backend +* Build the .whl for wgpu, so Pyodide can install the dev version. +* Start a tiny webserver to host html files for a selection of examples. +* Opens a webpage in the default browser. + +Files are loaded from disk on each request, so you can leave the server running +and just update examples, update wgpu and build the wheel, etc. +""" + +# run_example = false + +# this is adapted from the rendercanvas version + +import os +import sys +import webbrowser +from http.server import BaseHTTPRequestHandler, HTTPServer +import subprocess + +import wgpu +from codegen import update_js, file_cache + + +#examples that don't require a canvas, we will capture the output to a div +compute_examples = { + # "compute_int64.py", # this one requires native only features, so won't work in the browser for now + "compute_noop.py": [], # no deps + "compute_matmul.py": ["numpy"], + # "compute_textures.py": ["numpy", "imageio"], #imageio doesn't work in pyodide right now (fetch?) + "compute_timestamps.py": [], # this one still crashes as the descriptor doesn't get converted into an object... +} + +# these need rendercanvas too for now. but might run with just a canvas (no events) in the near future. +graphics_examples = { + "triangle.py":[], # no deps + "cube.py": ["numpy"], + "offscreen_hdr.py": ["numpy", "pypng"], # pyscript says it doesn't work in pyodide. + # "triangle_glsl.py": # we can't use GLSL in the browser... I am looking into maybe using wasm compiled naga manually - at a later date. + "imgui_backend_sea.py": ["numpy", "imgui-bundle"], + "imgui_basic_example.py": ["imgui-bundle"], # might even work without wgpu as imgui already works in pyodide... + "imgui_renderer_sea.py": ["numpy", "imgui-bundle"], +} + + +root = os.path.abspath(os.path.join(__file__, "..", "..")) + +short_version = ".".join(str(i) for i in wgpu.version_info[:3]) +wheel_name = f"wgpu-{short_version}-py3-none-any.whl" + + +def get_html_index(): + """Create a landing page.""" + + compute_examples_list = [f"
  • {name}
  • " for name in compute_examples.keys()] + graphics_examples_list = [f"
  • {name}
  • " for name in graphics_examples.keys()] + + html = """ + + + + wgpu PyScript examples + + + + + Rebuild the wheel

    + """ + + html += "List of compute examples that run in PyScript:\n" + html += f"
    \n\n" + + html += "List of graphics examples that run in PyScript:\n" + html += f"
    \n\n" + + html += "\n\n" + return html + + +html_index = get_html_index() + + +# An html template to show examples using pyscript. +pyscript_graphics_template = """ + + + + + {example_script} via PyScript + + + + + Back to list

    + +

    + {docstring} +

    + +

    Loading...

    +
    + + + + + + + +""" + +# TODO: a pyodide example for the compute examples (so we can capture output?) +# modified from _pyodide_iframe.html from rendercanvas +pyodide_compute_template = """ + + + + + {example_script} via Pyodide + + + + +

    Loading...

    +
    + + Back to list

    +

    + {docstring} +

    +
    +

    Output:

    +
    + + + + +""" + + + + +if not ( + os.path.isfile(os.path.join(root, "wgpu", "__init__.py")) + and os.path.isfile(os.path.join(root, "pyproject.toml")) +): + raise RuntimeError("This script must run in a checkout repo of wgpu-py.") + + +def build_wheel(): + # TODO: run the codegen for js_webgpu backend! + file_cache.reset() + update_js() + file_cache.write_changed_files_to_disk() + # doesn't work right now :/ + # maybe now? not 100% sure + + # TODO: can we use the existing hatch build system? (via build yes, not via flit it seems) + os.environ["WGPU_BUILD_PLATFORM_INFO"] = " ".join(("pyodide_wasm", "any")) + subprocess.run([sys.executable, "-m", "build", "-n", "-w"], cwd=root) + wheel_filename = os.path.join(root, "dist", wheel_name) + assert os.path.isfile(wheel_filename), f"{wheel_name} does not exist" + + +def get_docstring_from_py_file(fname): + filename = os.path.join(root, "examples", fname) + docstate = 0 + doc = "" + with open(filename, "rb") as f: + while True: + line = f.readline().decode() + if docstate == 0: + if line.lstrip().startswith('"""'): + docstate = 1 + else: + if docstate == 1 and line.lstrip().startswith(("---", "===")): + docstate = 2 + doc = "" + elif '"""' in line: + doc += line.partition('"""')[0] + break + else: + doc += line + + return doc.replace("\n\n", "

    ") + + +class MyHandler(BaseHTTPRequestHandler): + def do_GET(self): + if self.path == "/": + self.respond(200, html_index, "text/html") + elif self.path == "/build": + # TODO: add progress instead of blocking before load? + # also seems like this might get called multiple times? + try: + build_wheel() + except Exception as err: + self.respond(500, str(err), "text/plain") + else: + html = f"Wheel build: {wheel_name}

    Back to list" + self.respond(200, html, "text/html") + elif self.path.endswith(".whl"): + filename = os.path.join(root, "dist", self.path.strip("/")) + if os.path.isfile(filename): + with open(filename, "rb") as f: + data = f.read() + self.respond(200, data, "application/octet-stream") + else: + self.respond(404, "wheel not found") + elif self.path.endswith(".html"): + name = self.path.strip("/") + pyname = name.replace(".html", ".py") + if pyname in graphics_examples: + deps = graphics_examples[pyname].copy() # don't modify them multiple times! + deps.append("rendercanvas") + deps.append(f"./{wheel_name}") + # sometimes sniffio is missing, other times it's not? + doc = get_docstring_from_py_file(pyname) + html = pyscript_graphics_template.format(docstring=doc, example_script=pyname, dependencies=", ".join([f'"{d}"' for d in deps])) + self.respond(200, html, "text/html") + elif pyname in compute_examples: + doc = get_docstring_from_py_file(pyname) + deps = compute_examples[pyname].copy() + deps.append(f"./{wheel_name}") + html = pyodide_compute_template.format(docstring=doc, example_script=pyname, dependencies="\n".join([f"await micropip.install({dep!r});" for dep in deps])) + self.respond(200, html, "text/html") + else: + self.respond(404, "example not found") + elif self.path.endswith(".py"): + filename = os.path.join(root, "examples", self.path.strip("/")) + if os.path.isfile(filename): + with open(filename, "rb") as f: + data = f.read() + self.respond(200, data, "text/plain") + else: + self.respond(404, "py file not found") + else: + self.respond(404, "not found") + + def respond(self, code, body, content_type="text/plain"): + self.send_response(code) + self.send_header("Content-type", content_type) + self.end_headers() + if isinstance(body, str): + body = body.encode() + self.wfile.write(body) + + +if __name__ == "__main__": + port = 8000 + if len(sys.argv) > 1: + try: + port = int(sys.argv[-1]) + except ValueError: + pass + + build_wheel() + print("Opening page in web browser ...") + webbrowser.open(f"http://localhost:{port}/") + HTTPServer(("", port), MyHandler).serve_forever() diff --git a/examples/triangle.py b/examples/triangle.py index 8bb05d97..7655f64a 100644 --- a/examples/triangle.py +++ b/examples/triangle.py @@ -1,4 +1,6 @@ """ +Triangle +-------- Example use of the wgpu API to draw a triangle. The triangle is a classic example representing the simplest possible @@ -68,19 +70,20 @@ def get_render_pipeline_kwargs( render_texture_format = context.get_preferred_format(device.adapter) context.configure(device=device, format=render_texture_format) - shader = device.create_shader_module(code=shader_source) + vert_shader = device.create_shader_module(code=shader_source) + frag_shader = device.create_shader_module(code=shader_source) pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[]) return wgpu.RenderPipelineDescriptor( layout=pipeline_layout, vertex=wgpu.VertexState( - module=shader, + module=vert_shader, entry_point="vs_main", ), depth_stencil=None, multisample=None, fragment=wgpu.FragmentState( - module=shader, + module=frag_shader, entry_point="fs_main", targets=[ wgpu.ColorTargetState( diff --git a/examples/triangle_glsl.py b/examples/triangle_glsl.py index 386273cb..1a9fe431 100644 --- a/examples/triangle_glsl.py +++ b/examples/triangle_glsl.py @@ -1,4 +1,8 @@ """ +Triangle GLSL +------------- + + The triangle example, using GLSL shaders. """ diff --git a/pyproject.toml b/pyproject.toml index 3c6ec884..92f9993f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,7 +33,7 @@ tests = [ "rendercanvas>=2.4.0", ] examples = ["pypng", "rendercanvas", "glfw", "imgui_bundle"] -docs = ["sphinx>7.2", "sphinx_rtd_theme"] +docs = ["wgpu[examples,build]", "flit", "sphinx>7.2", "sphinx_rtd_theme", "sphinx-gallery"] dev = ["wgpu[build,codegen,lint,tests,examples,docs]"] [project.entry-points."pyinstaller40"] diff --git a/tools/hatch_build.py b/tools/hatch_build.py index 2483bccf..afc18f89 100644 --- a/tools/hatch_build.py +++ b/tools/hatch_build.py @@ -43,6 +43,8 @@ def initialize(self, version, build_data): # If this is an sdist build, or a wheel build from an sdist, # we go pure-Python mode, and expect the user to set WGPU_LIB_PATH. # We also allow building an arch-agnostic wheel explicitly, using an env var. + print(build_data) + print(self.build_config) if os.getenv("WGPU_PY_BUILD_NOARCH", "").lower() in ("1", "true"): pass # Explicitly disable including the lib @@ -61,7 +63,17 @@ def initialize(self, version, build_data): wgpu_native_tag, wheel_tag = platform_info.split() opsys, arch = wgpu_native_tag.split("_", 1) build_data["tag"] = "py3-none-" + wheel_tag - download_lib(None, opsys, arch) + if opsys == "pyodide": + # special pure python wheel without the resource folder for browser use + # in the future we might have an actual wasm build, so this might need changes again! + build_data["pure_python"] = True + build_data["exclude"] = ["wgpu/resources", "wgpu/resources/*"] + build_data["artifacts"] = [] + # TODO: find the hatchling api that actually excludes files + # then remove the wgpu-native code, so the wheel is as small as possible + # do we need to redownload the lib for the developer? + else: + download_lib(None, opsys, arch) else: # A build for this platform, e.g. ``pip install -e .`` build_data["infer_tag"] = True @@ -72,7 +84,8 @@ def initialize(self, version, build_data): def is_git_repo(): - return os.path.isdir(os.path.join(root_dir, ".git")) + # detect repo (.git is a dir) and submodule (.git is a file) + return os.path.exists(os.path.join(root_dir, ".git")) def check_git_status(): diff --git a/wgpu/_classes.py b/wgpu/_classes.py index e1b41259..dbd30428 100644 --- a/wgpu/_classes.py +++ b/wgpu/_classes.py @@ -366,7 +366,7 @@ def configure( usage = str_flag_to_int(flags.TextureUsage, usage) color_space # noqa - not really supported, just assume srgb for now - tone_mapping # noqa - not supported yet + tone_mapping = {} if tone_mapping is None else tone_mapping # Allow more than the IDL modes, see https://github.com/pygfx/wgpu-py/pull/719 extra_alpha_modes = ["auto", "unpremultiplied", "inherit"] # from webgpu.h @@ -1812,7 +1812,7 @@ def set_index_buffer( call to `GPUDevice.create_render_pipeline()`, it must match. offset (int): The byte offset in the buffer. Default 0. size (int): The number of bytes to use. If zero, the remaining size - (after offset) of the buffer is used. Default 0. + (after offset) of the buffer is used. """ raise NotImplementedError() diff --git a/wgpu/_version.py b/wgpu/_version.py index c884f1eb..4387d748 100644 --- a/wgpu/_version.py +++ b/wgpu/_version.py @@ -50,28 +50,34 @@ def get_extended_version() -> str: # Sample first 3 parts of __version__ base_release = ".".join(__version__.split(".")[:3]) - # Check release - if not release: - release = base_release - elif release != base_release: - warning( - f"{project_name} version from git ({release})" - f" and __version__ ({base_release}) don't match." - ) - - # Build the total version - version = release + # Start version string (__version__ string is leading) + version = base_release + tag_prefix = "#" + + if release and release != base_release: + # Can happen between bumping and tagging. And also when merging a + # version bump into a working branch, because we use --first-parent. + release2, _post, _labels = get_version_info_from_git(first_parent=False) + if release2 != base_release: + warning( + f"{project_name} version from git ({release})" + f" and __version__ ({base_release}) don't match." + ) + version += "+from_tag_" + release.replace(".", "_") + tag_prefix = "." + + # Add git info if post and post != "0": version += f".post{post}" if labels: - version += "+" + ".".join(labels) + version += tag_prefix + ".".join(labels) elif labels and labels[-1] == "dirty": - version += "+" + ".".join(labels) + version += tag_prefix + ".".join(labels) return version -def get_version_info_from_git() -> str: +def get_version_info_from_git(*, first_parent: bool = True) -> str: """ Get (release, post, labels) from Git. @@ -80,15 +86,9 @@ def get_version_info_from_git() -> str: git-hash and optionally a dirty flag. """ # Call out to Git - command = [ - "git", - "describe", - "--long", - "--always", - "--tags", - "--dirty", - "--first-parent", - ] + command = ["git", "describe", "--long", "--always", "--tags", "--dirty"] + if first_parent: + command.append("--first-parent") try: p = subprocess.run(command, check=False, cwd=repo_dir, capture_output=True) except Exception as e: diff --git a/wgpu/backends/js_webgpu/__init__.py b/wgpu/backends/js_webgpu/__init__.py index 7317b133..25952b59 100644 --- a/wgpu/backends/js_webgpu/__init__.py +++ b/wgpu/backends/js_webgpu/__init__.py @@ -6,26 +6,8 @@ generated. """ -# NOTE: this is just a stub for now!! - from .. import _register_backend - - -class GPU: - def request_adapter_sync(self, **parameters): - raise NotImplementedError("Cannot use sync API functions in JS.") - - async def request_adapter_async(self, **parameters): - gpu = window.navigator.gpu # noqa: F821 - return await gpu.request_adapter(**parameters) - - def get_preferred_canvas_format(self): - raise NotImplementedError() - - @property - def wgsl_language_features(self): - return set() - +from ._api import * # includes gpu from _implementation? gpu = GPU() _register_backend(gpu) diff --git a/wgpu/backends/js_webgpu/_api.py b/wgpu/backends/js_webgpu/_api.py new file mode 100644 index 00000000..27896b3e --- /dev/null +++ b/wgpu/backends/js_webgpu/_api.py @@ -0,0 +1,780 @@ + +# Auto-generated API for the JS WebGPU backend, based on the IDL and custom implementations. + +from ... import classes, structs, enums, flags +from ...structs import ArrayLike, Sequence # for typing hints +from typing import Union + +from pyodide.ffi import to_js, run_sync, JsProxy +from js import window, Uint8Array + +from ._helpers import simple_js_accessor +from ._implementation import GPUPromise + + +class GPUCommandsMixin(classes.GPUCommandsMixin, ): + + pass + +class GPUBindingCommandsMixin(classes.GPUBindingCommandsMixin, ): + + # Custom implementation for setBindGroup from _implementation.py: + def set_bind_group(self, index: int, bind_group: classes.GPUBindGroup, dynamic_offsets_data: list[int] = (), dynamic_offsets_data_start=None, dynamic_offsets_data_length=None) -> None: + self._internal.setBindGroup(index, bind_group._internal, dynamic_offsets_data) + + +class GPUDebugCommandsMixin(classes.GPUDebugCommandsMixin, ): + + def push_debug_group(self, group_label: Union[str, None] = None) -> None: + + self._internal.pushDebugGroup(group_label) + + def pop_debug_group(self) -> None: + self._internal.popDebugGroup() + + def insert_debug_marker(self, marker_label: Union[str, None] = None) -> None: + + self._internal.insertDebugMarker(marker_label) + + +class GPURenderCommandsMixin(classes.GPURenderCommandsMixin, ): + + def set_pipeline(self, pipeline: Union["GPURenderPipeline", None] = None) -> None: + js_pipeline = pipeline._internal + self._internal.setPipeline(js_pipeline) + + def set_index_buffer(self, buffer: Union["GPUBuffer", None] = None, index_format: enums.IndexFormatEnum | None = None, offset: int = 0, size: Union[int, None] = None) -> None: + js_buffer = buffer._internal + self._internal.setIndexBuffer(js_buffer, index_format, offset, size) + + def set_vertex_buffer(self, slot: Union[int, None] = None, buffer: Union["GPUBuffer", None] = None, offset: int = 0, size: Union[int, None] = None) -> None: + js_buffer = buffer._internal + self._internal.setVertexBuffer(slot, js_buffer, offset, size) + + def draw(self, vertex_count: Union[int, None] = None, instance_count: int = 1, first_vertex: int = 0, first_instance: int = 0) -> None: + + self._internal.draw(vertex_count, instance_count, first_vertex, first_instance) + + def draw_indexed(self, index_count: Union[int, None] = None, instance_count: int = 1, first_index: int = 0, base_vertex: int = 0, first_instance: int = 0) -> None: + + self._internal.drawIndexed(index_count, instance_count, first_index, base_vertex, first_instance) + + def draw_indirect(self, indirect_buffer: Union["GPUBuffer", None] = None, indirect_offset: Union[int, None] = None) -> None: + js_indirectBuffer = indirect_buffer._internal + self._internal.drawIndirect(js_indirectBuffer, indirect_offset) + + def draw_indexed_indirect(self, indirect_buffer: Union["GPUBuffer", None] = None, indirect_offset: Union[int, None] = None) -> None: + js_indirectBuffer = indirect_buffer._internal + self._internal.drawIndexedIndirect(js_indirectBuffer, indirect_offset) + + +class GPUObjectBase(classes.GPUObjectBase, ): + + pass + +class GPUAdapterInfo(classes.GPUAdapterInfo, ): + + pass + +class GPU(classes.GPU, ): + + # TODO: requestAdapter sync variant likely taken from _classes.py directly! + # TODO: implement codegen for getPreferredCanvasFormat with args [] or return type GPUTextureFormat + # Additional custom methods from _implementation.py: + def __init__(self): + self._internal = window.navigator.gpu # noqa: F821 + + def request_adapter_async(self, loop=None, canvas=None, **options) -> GPUPromise["GPUAdapter"]: + options = structs.RequestAdapterOptions(**options) + js_options = to_js(options, eager_converter=simple_js_accessor) + js_adapter_promise = self._internal.requestAdapter(js_options) + + if loop is None: + # can we use this instead? + webloop = js_adapter_promise.get_loop() + loop = webloop + + def adapter_constructor(js_adapter): + return GPUAdapter(js_adapter, loop=loop) + + promise = GPUPromise("request_adapter", adapter_constructor, loop=loop) + + js_adapter_promise.then(promise._set_input) # we chain the js resolution to our promise + return promise + + def enumerate_adapters_async(self, loop=None) -> GPUPromise[list["GPUAdapter"]]: + adapter_hp = self.request_adapter_sync(power_preference="high-performance") + adapter_lp = self.request_adapter_sync(power_preference="low-power") + + promise = GPUPromise("enumerate_adapters", None, loop=loop) + promise._set_input([adapter_hp, adapter_lp]) + return promise + + def get_canvas_context(self, present_info: dict) -> "GPUCanvasContext": + return GPUCanvasContext(present_info) + + @property + def wgsl_language_features(self): + return self._internal.wgslLanguageFeatures + + + +class GPUAdapter(classes.GPUAdapter, ): + + # TODO: requestDevice sync variant likely taken from _classes.py directly! + # Additional custom methods from _implementation.py: + def __init__(self, js_adapter, loop): + internal = js_adapter + # manually turn these into useful python objects + features = set(js_adapter.features) + + # TODO: _get_limits()? + limits = js_adapter.limits + py_limits = {} + for limit in dir(limits): + # we don't have the GPUSupportedLimits as a struct or list any where in the code right now, maybe we un skip it in the codegen? + if isinstance(getattr(limits, limit), int) and "_" not in limit: + py_limits[limit] = getattr(limits, limit) + + infos = ["vendor", "architecture", "device", "description", "subgroupMinSize", "subgroupMaxSize", "isFallbackAdapter"] + adapter_info = js_adapter.info + py_adapter_info = {} + for info in infos: + if hasattr(adapter_info, info): + py_adapter_info[info] = getattr(adapter_info, info) + + # for compatibility, we fill the native-extra infos too: + py_adapter_info["vendor_id"] = 0 + py_adapter_info["device_id"] = 0 + py_adapter_info["adapter_type"] = "browser" + py_adapter_info["backend_type"] = "WebGPU" + + adapter_info = classes.GPUAdapterInfo(**py_adapter_info) + + super().__init__(internal=internal, features=features, limits=py_limits, adapter_info=adapter_info, loop=loop) + + def request_device_async(self, **kwargs) -> GPUPromise["GPUDevice"]: + descriptor = structs.DeviceDescriptor(**kwargs) + js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor) + js_device_promise = self._internal.requestDevice(js_descriptor) + + label = kwargs.get("label", "") + + def device_constructor(js_device): + # TODO: do we need to hand down a default_queue here? + return GPUDevice(label, js_device, adapter=self) + + promise = GPUPromise("request_device", device_constructor, loop=self._loop) + js_device_promise.then(promise._set_input) + return promise + + + +class GPUDevice(classes.GPUDevice, GPUObjectBase): + + def destroy(self) -> None: + self._internal.destroy() + + def create_buffer(self, **kwargs): + descriptor = structs.BufferDescriptor(**kwargs) + js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor) + js_obj = self._internal.createBuffer(js_descriptor) + + label = kwargs.pop("label", "") + return GPUBuffer(label, js_obj, device=self) + + def create_texture(self, **kwargs): + descriptor = structs.TextureDescriptor(**kwargs) + js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor) + js_obj = self._internal.createTexture(js_descriptor) + + label = kwargs.pop("label", "") + return GPUTexture(label, js_obj, device=self) + + def create_sampler(self, **kwargs): + descriptor = structs.SamplerDescriptor(**kwargs) + js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor) + js_obj = self._internal.createSampler(js_descriptor) + + label = kwargs.pop("label", "") + return GPUSampler(label, js_obj, device=self) + + def import_external_texture(self, **kwargs): + descriptor = structs.ExternalTextureDescriptor(**kwargs) + js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor) + js_obj = self._internal.importExternalTexture(js_descriptor) + + label = kwargs.pop("label", "") + return GPUExternalTexture(label, js_obj, device=self) + + def create_bind_group_layout(self, **kwargs): + descriptor = structs.BindGroupLayoutDescriptor(**kwargs) + js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor) + js_obj = self._internal.createBindGroupLayout(js_descriptor) + + label = kwargs.pop("label", "") + return GPUBindGroupLayout(label, js_obj, device=self) + + def create_pipeline_layout(self, **kwargs): + descriptor = structs.PipelineLayoutDescriptor(**kwargs) + js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor) + js_obj = self._internal.createPipelineLayout(js_descriptor) + + label = kwargs.pop("label", "") + return GPUPipelineLayout(label, js_obj, device=self) + + def create_bind_group(self, **kwargs): + descriptor = structs.BindGroupDescriptor(**kwargs) + js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor) + js_obj = self._internal.createBindGroup(js_descriptor) + + label = kwargs.pop("label", "") + return GPUBindGroup(label, js_obj, device=self) + + def create_shader_module(self, **kwargs): + descriptor = structs.ShaderModuleDescriptor(**kwargs) + js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor) + js_obj = self._internal.createShaderModule(js_descriptor) + + label = kwargs.pop("label", "") + return GPUShaderModule(label, js_obj, device=self) + + def create_compute_pipeline(self, **kwargs): + descriptor = structs.ComputePipelineDescriptor(**kwargs) + js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor) + js_obj = self._internal.createComputePipeline(js_descriptor) + + label = kwargs.pop("label", "") + return GPUComputePipeline(label, js_obj, device=self) + + def create_render_pipeline(self, **kwargs): + descriptor = structs.RenderPipelineDescriptor(**kwargs) + js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor) + js_obj = self._internal.createRenderPipeline(js_descriptor) + + label = kwargs.pop("label", "") + return GPURenderPipeline(label, js_obj, device=self) + + # TODO: was was there a redefinition for createComputePipelineAsync async variant? + # TODO: was was there a redefinition for createRenderPipelineAsync async variant? + def create_command_encoder(self, **kwargs): + descriptor = structs.CommandEncoderDescriptor(**kwargs) + js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor) + js_obj = self._internal.createCommandEncoder(js_descriptor) + + label = kwargs.pop("label", "") + return GPUCommandEncoder(label, js_obj, device=self) + + def create_render_bundle_encoder(self, **kwargs): + descriptor = structs.RenderBundleEncoderDescriptor(**kwargs) + js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor) + js_obj = self._internal.createRenderBundleEncoder(js_descriptor) + + label = kwargs.pop("label", "") + return GPURenderBundleEncoder(label, js_obj, device=self) + + # Custom implementation for createQuerySet from _implementation.py: + def create_query_set(self, **kwargs): + descriptor = structs.QuerySetDescriptor(**kwargs) + js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor) + js_obj = self._internal.createQuerySet(js_descriptor) + + label = kwargs.pop("label", "") + type = descriptor.get("type") + count = descriptor.get("count") + return GPUQuerySet(label, js_obj, device=self, type=type, count=count) + + def push_error_scope(self, filter: enums.ErrorFilterEnum | None = None) -> None: + + self._internal.pushErrorScope(filter) + + # TODO: popErrorScope sync variant likely taken from _classes.py directly! + # Additional custom methods from _implementation.py: + def __init__(self, label: str, js_device, adapter: GPUAdapter): + features = set(js_device.features) + + js_limits = js_device.limits + limits = {} + for limit in dir(js_limits): + if isinstance(getattr(js_limits, limit), int) and "_" not in limit: + limits[limit] = getattr(js_limits, limit) + + queue = GPUQueue(label="default queue", internal=js_device.queue, device=self) + super().__init__(label, internal=js_device, adapter=adapter, features=features, limits=limits, queue=queue) + + def create_buffer_with_data_(self, *, label="", data, usage: flags.BufferUsageFlags) -> "GPUBuffer": + data = memoryview(data).cast("B") # unit8 + data_size = (data.nbytes + 3) & ~3 # align to 4 bytes + + # if it's a Descriptor you need the keywords + # do we need to also need to modify the usages? + js_buf = self._internal.createBuffer(label=label, size=data_size, usage=usage, mappedAtCreation=True) + # print("created buffer", js_buf, dir(js_buf), js_buf.size) + array_buf = js_buf.getMappedRange(0, data_size) + Uint8Array.new(array_buf).assign(data) + # print(array_buf.to_py().tolist()) + js_buf.unmap() + # print("created buffer", js_buf, dir(js_buf), js_buf.size) + return GPUBuffer(label, js_buf, self, data_size, usage, enums.BufferMapState.unmapped) + + def create_compute_pipeline_async(self, **kwargs): + descriptor = structs.ComputePipelineDescriptor(**kwargs) + js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor) + js_promise = self._internal.createComputePipelineAsync(js_descriptor) + + label = kwargs.get("label", "") + + def construct_compute_pipeline(js_cp): + return classes.GPUComputePipeline(label, js_cp, self) + + promise = GPUPromise("create_compute_pipeline", construct_compute_pipeline, loop=self._loop) + js_promise.then(promise._set_input) + + return promise + + def create_render_pipeline_async(self, **kwargs): + descriptor = structs.RenderPipelineDescriptor(**kwargs) + js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor) + js_promise = self._internal.createRenderPipelineAsync(js_descriptor) + + label = kwargs.get("label", "") + + def construct_render_pipeline(js_rp): + return classes.GPURenderPipeline(label, js_rp, self) + + promise = GPUPromise("create_render_pipeline", construct_render_pipeline, loop=self._loop) + js_promise.then(promise._set_input) + + return promise + + @property + def adapter(self) -> GPUAdapter: + return self._adapter + + + +class GPUBuffer(classes.GPUBuffer, GPUObjectBase): + + # TODO: mapAsync sync variant likely taken from _classes.py directly! + def get_mapped_range(self, offset: int = 0, size: Union[int, None] = None) -> ArrayLike: + + self._internal.getMappedRange(offset, size) + + def unmap(self) -> None: + self._internal.unmap() + + def destroy(self) -> None: + self._internal.destroy() + + # Additional custom methods from _implementation.py: + def __init__(self, label, internal, device): + # can we just fill the _classes constructor with properties? + super().__init__(internal.label, internal, device, internal.size, internal.usage, internal.mapState) + + def write_mapped(self, data, buffer_offset: int | None = None): + if self.map_state != enums.BufferMapState.mapped: + raise RuntimeError(f"Can only write to a buffer if its mapped: {self.map_state=}") + + # make sure it's in a known datatype??? + data = memoryview(data).cast("B") + size = (data.nbytes + 3) & ~3 + + # None default values become undefined in js, which should still work as the function can be overloaded. + # TODO: try without this line + if buffer_offset is None: + buffer_offset = 0 + + # these can't be passed as keyword arguments I guess... + array_buf = self._internal.getMappedRange(buffer_offset, size) + Uint8Array.new(array_buf).assign(data) + + def map_async(self, mode: flags.MapModeFlags | None, offset: int = 0, size: int | None = None) -> GPUPromise[None]: + map_promise = self._internal.mapAsync(mode, offset, size) + + promise = GPUPromise("buffer.map_async", None, loop=self._device._loop) + map_promise.then(promise._set_input) # presumably this signals via a none callback to nothing? + return promise + + @property + def map_state(self) -> enums.BufferMapState: + return self._internal.mapState + + @property + def size(self) -> int: + js_size = self._internal.size + # print("GPUBuffer.size", js_size, type(js_size)) + return js_size + + @property + def usage(self) -> flags.BufferUsageFlags: + return self._internal.usage + + + +class GPUTexture(classes.GPUTexture, GPUObjectBase): + + # Custom implementation for createView from _implementation.py: + def create_view(self, **kwargs): + descriptor = structs.TextureViewDescriptor(**kwargs) + js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor) + js_obj = self._internal.createView(js_descriptor) + + label = kwargs.pop("label", "") + return classes.GPUTextureView(label, js_obj, device=self._device, texture=self, size=self._tex_info["size"]) + + def destroy(self) -> None: + self._internal.destroy() + + # Additional custom methods from _implementation.py: + def __init__(self, label: str, internal, device): + # here we create the cached _tex_info dict + + tex_info = { + "size": (internal.width, internal.height, internal.depthOrArrayLayers), + "mip_level_count": internal.mipLevelCount, + "sample_count": internal.sampleCount, + "dimension": internal.dimension, + "format": internal.format, + "usage": internal.usage, + } + super().__init__(internal.label, internal, device, tex_info) + + + +class GPUTextureView(classes.GPUTextureView, GPUObjectBase): + + pass + +class GPUSampler(classes.GPUSampler, GPUObjectBase): + + pass + +class GPUBindGroupLayout(classes.GPUBindGroupLayout, GPUObjectBase): + + pass + +class GPUBindGroup(classes.GPUBindGroup, GPUObjectBase): + + pass + +class GPUPipelineLayout(classes.GPUPipelineLayout, GPUObjectBase): + + pass + +class GPUShaderModule(classes.GPUShaderModule, GPUObjectBase): + + # TODO: getCompilationInfo sync variant likely taken from _classes.py directly! + pass + +class GPUCompilationMessage(classes.GPUCompilationMessage, ): + + pass + +class GPUCompilationInfo(classes.GPUCompilationInfo, ): + + pass + +class GPUPipelineError(classes.GPUPipelineError, ): + + pass + +class GPUPipelineBase(classes.GPUPipelineBase, ): + + # Custom implementation for getBindGroupLayout from _implementation.py: + def get_bind_group_layout(self, index: int) -> classes.GPUBindGroupLayout: + res = self._internal.getBindGroupLayout(index) + # returns the js object... so we call the constructor here manually - for now. + label = res.label + return classes.GPUBindGroupLayout(label, res, self._device) + + +class GPUComputePipeline(classes.GPUComputePipeline, GPUObjectBase, GPUPipelineBase): + + pass + +class GPURenderPipeline(classes.GPURenderPipeline, GPUObjectBase, GPUPipelineBase): + + pass + +class GPUCommandBuffer(classes.GPUCommandBuffer, GPUObjectBase): + + pass + +class GPUCommandEncoder(classes.GPUCommandEncoder, GPUObjectBase, GPUCommandsMixin, GPUDebugCommandsMixin): + + def begin_render_pass(self, **kwargs): + descriptor = structs.RenderPassDescriptor(**kwargs) + js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor) + js_obj = self._internal.beginRenderPass(js_descriptor) + + label = kwargs.pop("label", "") + return GPURenderPassEncoder(label, js_obj, device=self) + + def begin_compute_pass(self, **kwargs): + descriptor = structs.ComputePassDescriptor(**kwargs) + js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor) + js_obj = self._internal.beginComputePass(js_descriptor) + + label = kwargs.pop("label", "") + return GPUComputePassEncoder(label, js_obj, device=self) + + def copy_buffer_to_buffer(self, source: Union["GPUBuffer", None] = None, source_offset: Union[int, None] = None, destination: Union["GPUBuffer", None] = None, destination_offset: Union[int, None] = None, size: Union[int, None] = None) -> None: + js_source = source._internal + js_destination = destination._internal + self._internal.copyBufferToBuffer(js_source, source_offset, js_destination, destination_offset, size) + + def copy_buffer_to_texture(self, source: structs.TexelCopyBufferInfoStruct | None = None, destination: structs.TexelCopyTextureInfoStruct | None = None, copy_size: tuple[int, int, int] | structs.Extent3DStruct | None = None) -> None: + source_desc = structs.TexelCopyBufferInfo(**source) + js_source = to_js(source_desc, eager_converter=simple_js_accessor) + destination_desc = structs.TexelCopyTextureInfo(**destination) + js_destination = to_js(destination_desc, eager_converter=simple_js_accessor) + # TODO: argument copy_size of JS type GPUExtent3D, py type tuple[int, int, int] | structs.Extent3DStruct might need conversion + self._internal.copyBufferToTexture(js_source, js_destination, copy_size) + + def copy_texture_to_buffer(self, source: structs.TexelCopyTextureInfoStruct | None = None, destination: structs.TexelCopyBufferInfoStruct | None = None, copy_size: tuple[int, int, int] | structs.Extent3DStruct | None = None) -> None: + source_desc = structs.TexelCopyTextureInfo(**source) + js_source = to_js(source_desc, eager_converter=simple_js_accessor) + destination_desc = structs.TexelCopyBufferInfo(**destination) + js_destination = to_js(destination_desc, eager_converter=simple_js_accessor) + # TODO: argument copy_size of JS type GPUExtent3D, py type tuple[int, int, int] | structs.Extent3DStruct might need conversion + self._internal.copyTextureToBuffer(js_source, js_destination, copy_size) + + def copy_texture_to_texture(self, source: structs.TexelCopyTextureInfoStruct | None = None, destination: structs.TexelCopyTextureInfoStruct | None = None, copy_size: tuple[int, int, int] | structs.Extent3DStruct | None = None) -> None: + source_desc = structs.TexelCopyTextureInfo(**source) + js_source = to_js(source_desc, eager_converter=simple_js_accessor) + destination_desc = structs.TexelCopyTextureInfo(**destination) + js_destination = to_js(destination_desc, eager_converter=simple_js_accessor) + # TODO: argument copy_size of JS type GPUExtent3D, py type tuple[int, int, int] | structs.Extent3DStruct might need conversion + self._internal.copyTextureToTexture(js_source, js_destination, copy_size) + + def clear_buffer(self, buffer: Union["GPUBuffer", None] = None, offset: int = 0, size: Union[int, None] = None) -> None: + js_buffer = buffer._internal + self._internal.clearBuffer(js_buffer, offset, size) + + def resolve_query_set(self, query_set: Union["GPUQuerySet", None] = None, first_query: Union[int, None] = None, query_count: Union[int, None] = None, destination: Union["GPUBuffer", None] = None, destination_offset: Union[int, None] = None) -> None: + js_querySet = query_set._internal + js_destination = destination._internal + self._internal.resolveQuerySet(js_querySet, first_query, query_count, js_destination, destination_offset) + + def finish(self, **kwargs): + descriptor = structs.CommandBufferDescriptor(**kwargs) + js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor) + js_obj = self._internal.finish(js_descriptor) + + label = kwargs.pop("label", "") + return GPUCommandBuffer(label, js_obj, device=self) + + +class GPUComputePassEncoder(classes.GPUComputePassEncoder, GPUObjectBase, GPUCommandsMixin, GPUDebugCommandsMixin, GPUBindingCommandsMixin): + + def set_pipeline(self, pipeline: Union["GPUComputePipeline", None] = None) -> None: + js_pipeline = pipeline._internal + self._internal.setPipeline(js_pipeline) + + def dispatch_workgroups(self, workgroup_count_x: Union[int, None] = None, workgroup_count_y: int = 1, workgroup_count_z: int = 1) -> None: + + self._internal.dispatchWorkgroups(workgroup_count_x, workgroup_count_y, workgroup_count_z) + + def dispatch_workgroups_indirect(self, indirect_buffer: Union["GPUBuffer", None] = None, indirect_offset: Union[int, None] = None) -> None: + js_indirectBuffer = indirect_buffer._internal + self._internal.dispatchWorkgroupsIndirect(js_indirectBuffer, indirect_offset) + + def end(self) -> None: + self._internal.end() + + +class GPURenderPassEncoder(classes.GPURenderPassEncoder, GPUObjectBase, GPUCommandsMixin, GPUDebugCommandsMixin, GPUBindingCommandsMixin, GPURenderCommandsMixin): + + def set_viewport(self, x: Union[float, None] = None, y: Union[float, None] = None, width: Union[float, None] = None, height: Union[float, None] = None, min_depth: Union[float, None] = None, max_depth: Union[float, None] = None) -> None: + + self._internal.setViewport(x, y, width, height, min_depth, max_depth) + + def set_scissor_rect(self, x: Union[int, None] = None, y: Union[int, None] = None, width: Union[int, None] = None, height: Union[int, None] = None) -> None: + + self._internal.setScissorRect(x, y, width, height) + + def set_blend_constant(self, color: tuple[float, float, float, float] | structs.ColorStruct | None = None) -> None: + # TODO: argument color of JS type GPUColor, py type tuple[float, float, float, float] | structs.ColorStruct might need conversion + self._internal.setBlendConstant(color) + + def set_stencil_reference(self, reference: Union[int, None] = None) -> None: + + self._internal.setStencilReference(reference) + + def begin_occlusion_query(self, query_index: Union[int, None] = None) -> None: + + self._internal.beginOcclusionQuery(query_index) + + def end_occlusion_query(self) -> None: + self._internal.endOcclusionQuery() + + def execute_bundles(self, bundles: Sequence["GPURenderBundle"] | None = None) -> None: + # TODO: argument bundles of JS type sequence, py type list[GPURenderBundle] might need conversion + self._internal.executeBundles(bundles) + + def end(self) -> None: + self._internal.end() + + +class GPURenderBundle(classes.GPURenderBundle, GPUObjectBase): + + pass + +class GPURenderBundleEncoder(classes.GPURenderBundleEncoder, GPUObjectBase, GPUCommandsMixin, GPUDebugCommandsMixin, GPUBindingCommandsMixin, GPURenderCommandsMixin): + + def finish(self, **kwargs): + descriptor = structs.RenderBundleDescriptor(**kwargs) + js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor) + js_obj = self._internal.finish(js_descriptor) + + label = kwargs.pop("label", "") + return GPURenderBundle(label, js_obj, device=self) + + +class GPUQueue(classes.GPUQueue, GPUObjectBase): + + # Custom implementation for submit from _implementation.py: + def submit(self, command_buffers: structs.Sequence["GPUCommandBuffer"]) -> None: + js_command_buffers = [cb._internal for cb in command_buffers] + self._internal.submit(js_command_buffers) + + # TODO: onSubmittedWorkDone sync variant likely taken from _classes.py directly! + def write_buffer(self, buffer: Union["GPUBuffer", None] = None, buffer_offset: Union[int, None] = None, data: Union[ArrayLike, None] = None, data_offset: int = 0, size: Union[int, None] = None) -> None: + js_buffer = buffer._internal + + if data is not None: + data = memoryview(data).cast("B") + data_size = (data.nbytes + 3) & ~3 # align to 4 bytes + js_data = Uint8Array.new(data_size) + js_data.assign(data) + else: + js_data = None + + self._internal.writeBuffer(js_buffer, buffer_offset, js_data, data_offset, size) + + def write_texture(self, destination: structs.TexelCopyTextureInfoStruct | None = None, data: Union[ArrayLike, None] = None, data_layout: structs.TexelCopyBufferLayoutStruct | None = None, size: tuple[int, int, int] | structs.Extent3DStruct | None = None) -> None: + destination_desc = structs.TexelCopyTextureInfo(**destination) + js_destination = to_js(destination_desc, eager_converter=simple_js_accessor) + + if data is not None: + data = memoryview(data).cast("B") + data_size = (data.nbytes + 3) & ~3 # align to 4 bytes + js_data = Uint8Array.new(data_size) + js_data.assign(data) + else: + js_data = None + + data_layout_desc = structs.TexelCopyBufferLayout(**data_layout) + js_dataLayout = to_js(data_layout_desc, eager_converter=simple_js_accessor) + # TODO: argument size of JS type GPUExtent3D, py type tuple[int, int, int] | structs.Extent3DStruct might need conversion + self._internal.writeTexture(js_destination, js_data, js_dataLayout, size) + + def copy_external_image_to_texture(self, source: structs.CopyExternalImageSourceInfoStruct | None = None, destination: structs.CopyExternalImageDestInfoStruct | None = None, copy_size: tuple[int, int, int] | structs.Extent3DStruct | None = None) -> None: + source_desc = structs.CopyExternalImageSourceInfo(**source) + js_source = to_js(source_desc, eager_converter=simple_js_accessor) + destination_desc = structs.CopyExternalImageDestInfo(**destination) + js_destination = to_js(destination_desc, eager_converter=simple_js_accessor) + # TODO: argument copy_size of JS type GPUExtent3D, py type tuple[int, int, int] | structs.Extent3DStruct might need conversion + self._internal.copyExternalImageToTexture(js_source, js_destination, copy_size) + + # Additional custom methods from _implementation.py: + def read_buffer(self, buffer: GPUBuffer, buffer_offset: int = 0, size: int | None = None) -> memoryview: + # largely copied from wgpu-native/_api.py + # print(dir(self)) + device = self._device + + if not size: + data_length = buffer.size - buffer_offset + else: + data_length = int(size) + if not (0 <= buffer_offset < buffer.size): # pragma: no cover + raise ValueError("Invalid buffer_offset") + if not (data_length <= buffer.size - buffer_offset): # pragma: no cover + raise ValueError("Invalid data_length") + data_length = (data_length + 3) & ~3 # align to 4 bytes + + js_temp_buffer = device._internal.createBuffer(size=data_length, usage=flags.BufferUsage.COPY_DST | flags.BufferUsage.MAP_READ, mappedAtCreation=False, label="output buffer temp") + + js_encoder = device._internal.createCommandEncoder() + # TODO: somehow test if all the offset math is correct + js_encoder.copyBufferToBuffer(buffer._internal, buffer_offset, js_temp_buffer, buffer_offset, data_length) + self._internal.submit([js_encoder.finish()]) + + # best way to await the promise directly? + # TODO: can we do more steps async before waiting? + run_sync(js_temp_buffer.mapAsync(flags.MapMode.READ, 0, data_length)) + array_buf = js_temp_buffer.getMappedRange() + res = array_buf.slice(0) + js_temp_buffer.unmap() + return res.to_py() + + + +class GPUQuerySet(classes.GPUQuerySet, GPUObjectBase): + + def destroy(self) -> None: + self._internal.destroy() + + +class GPUCanvasContext(classes.GPUCanvasContext, ): + + # Custom implementation for configure from _implementation.py: + def configure(self, **kwargs): + descriptor = structs.CanvasConfiguration(**kwargs) + js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor) + + self._internal.configure(js_descriptor) + self._config = { + "device": kwargs.get("device"), + "format": kwargs.get("format"), + "usage": kwargs.get("usage", 0x10), + "view_formats": kwargs.get("view_formats", ()), + "color_space": kwargs.get("color_space", "srgb"), + "tone_mapping": kwargs.get("tone_mapping", None), + "alpha_mode": kwargs.get("alpha_mode", "opaque"), + } + + def unconfigure(self) -> None: + self._internal.unconfigure() + + # TODO: implement codegen for getConfiguration with args [] or return type GPUCanvasConfiguration? + # Custom implementation for getCurrentTexture from _implementation.py: + def get_current_texture(self) -> GPUTexture: + js_texture = self._internal.getCurrentTexture() + + label = "" # always empty? + return GPUTexture(label, js_texture, self._config["device"]) + + # Additional custom methods from _implementation.py: + def __init__(self, present_info: dict): + super().__init__(present_info) + canvas_element = present_info["window"] + self._internal = canvas_element.getContext("webgpu") + + def get_preferred_format(self, adapter: GPUAdapter | None) -> enums.TextureFormat: + return gpu._internal.getPreferredCanvasFormat() + + + +class GPUDeviceLostInfo(classes.GPUDeviceLostInfo, ): + + pass + +class GPUError(classes.GPUError, ): + + pass + +class GPUValidationError(classes.GPUValidationError, GPUError): + + pass + +class GPUOutOfMemoryError(classes.GPUOutOfMemoryError, GPUError): + + pass + +class GPUInternalError(classes.GPUInternalError, GPUError): + + pass + + +gpu = GPU() diff --git a/wgpu/backends/js_webgpu/_helpers.py b/wgpu/backends/js_webgpu/_helpers.py new file mode 100644 index 00000000..e4f7501e --- /dev/null +++ b/wgpu/backends/js_webgpu/_helpers.py @@ -0,0 +1,94 @@ +""" +Helper functions for dealing with pyodide for the js webgpu backend. +""" + +from ... import classes, structs +from pyodide.ffi import to_js + + +def to_camel_case(snake_str): + components = snake_str.split('_') + res = components[0] + ''.join(x.title() for x in components[1:]) + # maybe keywords are a problem? + # https://pyodide.org/en/stable/usage/faq.html#how-can-i-access-javascript-objects-attributes-in-python-if-their-names-are-python-keywords + # if res in ["type", "format"]: + # res += "_" + return res + + +# TODO: clean this up before reading for merge! + +# for use in to_js() https://pyodide.org/en/stable/usage/api/python-api/ffi.html#pyodide.ffi.ToJsConverter +# you have to do the recursion yourself... +def simple_js_accessor(value, convert, cache=None): + # print("simple_js_accessor", value, type(value), dir(value)) + if isinstance(value, classes.GPUObjectBase): + # print("GPUObjectBase detected", value) + return value._internal # type : JsProxy + elif isinstance(value, structs.Struct): + result = {} + for k, v in value.items(): + camel_key = to_camel_case(k) + # if there is a dict further down... we still need to fix those keys + if isinstance(v, dict): + if(k == "resource"): # this one is a more complex type.... https://www.w3.org/TR/webgpu/#typedefdef-gpubindingresource + # print("struct with resource dict detected", k, v) + v = structs.BufferBinding(**v) + # print("RESOURCE AS A STRUCT:", v) + down_convert = to_js(v, eager_converter=simple_js_accessor) + down_convert = to_js(down_convert.to_py(depth=1), depth=1) if hasattr(down_convert, "to_py") else down_convert + result[camel_key] = down_convert + # print("called convert(v) on RESOURCE STRUCT", result[camel_key]) + continue + # print("struct with dict detected", value, k, v) + # print(dir(value)) + v_struct_type_name = value.__annotations__[k].partition("Struct")[0] # will not work if there is more than two options -.- + # print("likely v struct type_name", v_struct_type_name) + v_struct_type = structs.__dict__[v_struct_type_name] # because the annotation is just a string... doesn't feel great + # print("likely v struct type", v_struct_type) + v = v_struct_type(**v) + # print("converted to struct", v) + + # if there is a list of dicts... it will still call the the default sequence converter and then dict converter... + elif isinstance(v, (list)): #maybe tuple too? + if v and isinstance(v[0], dict): # assume all elements are the same type too and non empty? + # print("struct with list detected", value, k, v) + v_struct_type_name = value.__annotations__[k].removeprefix("Sequence[").partition("Struct")[0] + # print("likely v struct type_name", v_struct_type_name) + v_struct_type = structs.__dict__[v_struct_type_name] + # print("likely v struct type", v_struct_type) + v = [v_struct_type(**item) for item in v] + # print("converted to list of struct", v) + else: + # could be a list of other objects like GPUBindGroupLayout for example. + pass + # print("initial call to down_convert", v) + down_convert = to_js(v, eager_converter=simple_js_accessor) + # print("first result of down_convert", down_convert, dir(down_convert)) + down_convert = to_js(down_convert.to_py(depth=1), depth=1) if hasattr(down_convert, "to_py") else down_convert + # print("final result of down_convert", down_convert) + result[camel_key] = down_convert + # print("struct conversion result: ", type(result), result) + return result + + elif isinstance(value, (list, tuple)): + result = [to_js(v, eager_converter=simple_js_accessor) for v in value] + return to_js(result, depth=1) # to make sure it's like an ArrayList? + # this might recursively call itself... + # maybe use a map? or do a dict_converted? + # elif isinstance(value, dict): + # result = {} + # # cache(value, result) + # for k, v in value.items(): + # camel_key = to_camel_case(k) if isinstance(k, str) else k + # result[camel_key] = convert(v) + # if len(result) == 0: + # return Object.new() # maybe this? + # let's hope this is only ever reached when all the contents are already converted. + # map = Map.new(result.items()) + # return Object.fromEntries(map) + # print("simple_js_accessor default", value, type(value)) + return convert(value) # or to_js(value)? + +# TODO: can we implement our own variant of JsProxy and PyProxy, to_js and to_py? to work with pyodide and not around it? +# https://pyodide.org/en/stable/usage/type-conversions.html#type-translations diff --git a/wgpu/backends/js_webgpu/_implementation.py b/wgpu/backends/js_webgpu/_implementation.py new file mode 100644 index 00000000..8ad58c24 --- /dev/null +++ b/wgpu/backends/js_webgpu/_implementation.py @@ -0,0 +1,355 @@ +""" +This provides the pyodide implementation for the js_webgpu backend. +Constructors and Methods defined here are picked over auto generated methods for the backend in _api.py +""" + +from ... import classes, structs, flags, enums +from ._helpers import simple_js_accessor + +from pyodide.ffi import to_js, run_sync, JsProxy +from js import window, Uint8Array + +class GPUPromise(classes.GPUPromise): + # TODO: can we resolve the js promises and then call our constructors? + # should loop be globally the webloop? or will rendercanvas give us that in the future? + + def sync_wait(self): + # pyodide way that hopefully works? + # explanation: https://blog.pyodide.org/posts/jspi/ + result = run_sync(self) + return result + + +class GPU(classes.GPU): + def __init__(self): + self._internal = window.navigator.gpu # noqa: F821 + + # TODO: maybe autogenerate async? + def request_adapter_async(self, loop=None, canvas=None, **options) -> GPUPromise["GPUAdapter"]: + options = structs.RequestAdapterOptions(**options) + js_options = to_js(options, eager_converter=simple_js_accessor) + js_adapter_promise = self._internal.requestAdapter(js_options) + + if loop is None: + # can we use this instead? + webloop = js_adapter_promise.get_loop() + loop = webloop + + def adapter_constructor(js_adapter): + return GPUAdapter(js_adapter, loop=loop) + promise = GPUPromise("request_adapter", adapter_constructor, loop=loop) + + js_adapter_promise.then(promise._set_input) # we chain the js resolution to our promise + return promise + + def enumerate_adapters_async(self, loop=None) -> GPUPromise[list["GPUAdapter"]]: + adapter_hp = self.request_adapter_sync(power_preference="high-performance") + adapter_lp = self.request_adapter_sync(power_preference="low-power") + + promise = GPUPromise("enumerate_adapters", None, loop=loop) + promise._set_input([adapter_hp, adapter_lp]) + return promise + + # TODO: autogenerate properties! + @property + def wgsl_language_features(self): + return self._internal.wgslLanguageFeatures + + + # apidiff for low level context access + def get_canvas_context(self, present_info: dict) -> "GPUCanvasContext": + return GPUCanvasContext(present_info) + + +class GPUAdapter(classes.GPUAdapter): + def __init__(self, js_adapter, loop): + internal = js_adapter + # manually turn these into useful python objects + features = set(js_adapter.features) + + # TODO: _get_limits()? + limits = js_adapter.limits + py_limits = {} + for limit in dir(limits): + # we don't have the GPUSupportedLimits as a struct or list any where in the code right now, maybe we un skip it in the codegen? + if isinstance(getattr(limits, limit), int) and "_" not in limit: + py_limits[limit] = getattr(limits, limit) + + infos = ["vendor", "architecture", "device", "description", "subgroupMinSize", "subgroupMaxSize", "isFallbackAdapter"] + adapter_info = js_adapter.info + py_adapter_info = {} + for info in infos: + if hasattr(adapter_info, info): + py_adapter_info[info] = getattr(adapter_info, info) + + #for compatibility, we fill the native-extra infos too: + py_adapter_info["vendor_id"] = 0 + py_adapter_info["device_id"] = 0 + py_adapter_info["adapter_type"] = "browser" + py_adapter_info["backend_type"] = "WebGPU" + + adapter_info = classes.GPUAdapterInfo(**py_adapter_info) + + super().__init__(internal=internal, features=features, limits=py_limits, adapter_info=adapter_info, loop=loop) + + # TODO: we should + def request_device_async(self, **kwargs) -> GPUPromise["GPUDevice"]: + descriptor = structs.DeviceDescriptor(**kwargs) + js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor) + js_device_promise = self._internal.requestDevice(js_descriptor) + + label = kwargs.get("label", "") + def device_constructor(js_device): + # TODO: do we need to hand down a default_queue here? + return GPUDevice(label, js_device, adapter=self) + + promise = GPUPromise("request_device", device_constructor, loop=self._loop) + js_device_promise.then(promise._set_input) + return promise + + +class GPUDevice(classes.GPUDevice): + def __init__(self, label:str, js_device, adapter:GPUAdapter): + features = set(js_device.features) + + js_limits = js_device.limits + limits = {} + for limit in dir(js_limits): + if isinstance(getattr(js_limits, limit), int) and "_" not in limit: + limits[limit] = getattr(js_limits, limit) + + queue = GPUQueue(label="default queue", internal=js_device.queue, device=self) + super().__init__(label, internal=js_device, adapter=adapter, features=features, limits=limits, queue=queue) + + # API diff: useful to have? + @property + def adapter(self) -> GPUAdapter: + return self._adapter + + # TODO: currently unused, rewrite and test! + # TODO: apidiff rewritten so we avoid the buggy mess in map_write for a bit. + def create_buffer_with_data_(self, *, label="", data, usage: flags.BufferUsageFlags) -> "GPUBuffer": + data = memoryview(data).cast("B") # unit8 + data_size = (data.nbytes + 3) & ~3 # align to 4 bytes + + # if it's a Descriptor you need the keywords + # do we need to also need to modify the usages? + js_buf = self._internal.createBuffer(label=label, size=data_size, usage=usage, mappedAtCreation=True) + # print("created buffer", js_buf, dir(js_buf), js_buf.size) + array_buf = js_buf.getMappedRange(0, data_size) + Uint8Array.new(array_buf).assign(data) + # print(array_buf.to_py().tolist()) + js_buf.unmap() + # print("created buffer", js_buf, dir(js_buf), js_buf.size) + return GPUBuffer(label, js_buf, self, data_size, usage, enums.BufferMapState.unmapped) + + # TODO: no example tests this! + # TODO: this exists fake-sync and async in webgpu already. Needs to be handled in the generation correctly! + def create_compute_pipeline_async(self, **kwargs): + descriptor = structs.ComputePipelineDescriptor(**kwargs) + js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor) + js_promise = self._internal.createComputePipelineAsync(js_descriptor) + + label = kwargs.get("label", "") + def construct_compute_pipeline(js_cp): + return classes.GPUComputePipeline(label, js_cp, self) + promise = GPUPromise("create_compute_pipeline", construct_compute_pipeline, loop=self._loop) + js_promise.then(promise._set_input) + + return promise + + # TODO: same as above + def create_render_pipeline_async(self, **kwargs): + descriptor = structs.RenderPipelineDescriptor(**kwargs) + js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor) + js_promise = self._internal.createRenderPipelineAsync(js_descriptor) + + label = kwargs.get("label", "") + def construct_render_pipeline(js_rp): + return classes.GPURenderPipeline(label, js_rp, self) + promise = GPUPromise("create_render_pipeline", construct_render_pipeline, loop=self._loop) + js_promise.then(promise._set_input) + + return promise + + # this one needs additional parameters in the constructor + def create_query_set(self, **kwargs): + descriptor = structs.QuerySetDescriptor(**kwargs) + js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor) + js_obj = self._internal.createQuerySet(js_descriptor) + + label = kwargs.pop("label", "") + type = descriptor.get("type") + count = descriptor.get("count") + return GPUQuerySet(label, js_obj, device=self, type=type, count=count) + +class GPUBuffer(classes.GPUBuffer): + # TODO: remove label from the constructors! + def __init__(self, label, internal, device): + # can we just fill the _classes constructor with properties? + super().__init__(internal.label, internal, device, internal.size, internal.usage, internal.mapState) + + @property + def map_state(self) -> enums.BufferMapState: + return self._internal.mapState + + @property + def size(self) -> int: + js_size = self._internal.size + # print("GPUBuffer.size", js_size, type(js_size)) + return js_size + + @property + def usage(self) -> flags.BufferUsageFlags: + return self._internal.usage + + # TODO apidiff + def write_mapped(self, data, buffer_offset: int | None = None): + if self.map_state != enums.BufferMapState.mapped: + raise RuntimeError(f"Can only write to a buffer if its mapped: {self.map_state=}") + + # make sure it's in a known datatype??? + data = memoryview(data).cast("B") + size = (data.nbytes + 3) & ~3 + + # None default values become undefined in js, which should still work as the function can be overloaded. + # TODO: try without this line + if buffer_offset is None: + buffer_offset = 0 + + # these can't be passed as keyword arguments I guess... + array_buf = self._internal.getMappedRange(buffer_offset, size) + Uint8Array.new(array_buf).assign(data) + + def map_async(self, mode: flags.MapModeFlags | None, offset: int = 0, size: int | None = None) -> GPUPromise[None]: + map_promise = self._internal.mapAsync(mode, offset, size) + + promise = GPUPromise("buffer.map_async", None, loop=self._device._loop) + map_promise.then(promise._set_input) # presumably this signals via a none callback to nothing? + return promise + +# TODO: we can't overwrite mixins already inhereted from.... +class GPUBindingCommandsMixin(classes.GPUBindingCommandsMixin): + # function has overloads! so this simple one works for now. + def set_bind_group( + self, + index:int, + bind_group: classes.GPUBindGroup, + dynamic_offsets_data: list[int] = (), + dynamic_offsets_data_start = None, + dynamic_offsets_data_length = None + ) -> None: + + self._internal.setBindGroup(index, bind_group._internal, dynamic_offsets_data) + + +class GPUPipelineBase(classes.GPUPipelineBase): + # TODO: can we build some kind of "get_constructor" for the codegen instead? + def get_bind_group_layout(self, index: int) -> classes.GPUBindGroupLayout: + res = self._internal.getBindGroupLayout(index) + # returns the js object... so we call the constructor here manually - for now. + label = res.label + return classes.GPUBindGroupLayout(label, res, self._device) + + +class GPUQueue(classes.GPUQueue): + + # TODO: fix the generation for sequence types! + def submit(self, command_buffers: structs.Sequence["GPUCommandBuffer"]) -> None: + js_command_buffers = [cb._internal for cb in command_buffers] + self._internal.submit(js_command_buffers) + + # API diff + def read_buffer(self, buffer: GPUBuffer, buffer_offset: int=0, size: int | None = None) -> memoryview: + # largely copied from wgpu-native/_api.py + # print(dir(self)) + device = self._device + + if not size: + data_length = buffer.size - buffer_offset + else: + data_length = int(size) + if not (0 <= buffer_offset < buffer.size): # pragma: no cover + raise ValueError("Invalid buffer_offset") + if not (data_length <= buffer.size - buffer_offset): # pragma: no cover + raise ValueError("Invalid data_length") + data_length = (data_length + 3) & ~3 # align to 4 bytes + + js_temp_buffer = device._internal.createBuffer( + size=data_length, + usage=flags.BufferUsage.COPY_DST | flags.BufferUsage.MAP_READ, + mappedAtCreation=False, + label="output buffer temp" + ) + + js_encoder = device._internal.createCommandEncoder() + # TODO: somehow test if all the offset math is correct + js_encoder.copyBufferToBuffer(buffer._internal, buffer_offset, js_temp_buffer, buffer_offset, data_length) + self._internal.submit([js_encoder.finish()]) + + # best way to await the promise directly? + # TODO: can we do more steps async before waiting? + run_sync(js_temp_buffer.mapAsync(flags.MapMode.READ, 0, data_length)) + array_buf = js_temp_buffer.getMappedRange() + res = array_buf.slice(0) + js_temp_buffer.unmap() + return res.to_py() + + +class GPUTexture(classes.GPUTexture): + def __init__(self, label: str, internal, device): + # here we create the cached _tex_info dict + + tex_info = { + "size": (internal.width, internal.height, internal.depthOrArrayLayers), + "mip_level_count": internal.mipLevelCount, + "sample_count": internal.sampleCount, + "dimension": internal.dimension, + "format": internal.format, + "usage": internal.usage, + } + super().__init__(internal.label, internal, device, tex_info) + + # has a more complex constructor... + def create_view(self, **kwargs): + descriptor = structs.TextureViewDescriptor(**kwargs) + js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor) + js_obj = self._internal.createView(js_descriptor) + + label = kwargs.pop("label", "") + return classes.GPUTextureView(label, js_obj, device=self._device, texture=self, size=self._tex_info["size"]) + +class GPUCanvasContext(classes.GPUCanvasContext): + def __init__(self, present_info: dict): + super().__init__(present_info) + canvas_element = present_info["window"] + self._internal = canvas_element.getContext("webgpu") + + # we can't really replace ._config by getConfiguration() because the device constructor is so complex? + def configure(self, **kwargs): + descriptor = structs.CanvasConfiguration(**kwargs) + js_descriptor = to_js(descriptor, eager_converter=simple_js_accessor) + + self._internal.configure(js_descriptor) + self._config = { + "device": kwargs.get("device"), + "format": kwargs.get("format"), + "usage": kwargs.get("usage", 0x10), + "view_formats": kwargs.get("view_formats", ()), + "color_space": kwargs.get("color_space", "srgb"), + "tone_mapping": kwargs.get("tone_mapping", None), + "alpha_mode": kwargs.get("alpha_mode", "opaque"), + } + + def get_current_texture(self) -> GPUTexture: + js_texture = self._internal.getCurrentTexture() + + label = "" # always empty? + return GPUTexture(label, js_texture, self._config["device"]) + + # undo the api diff + def get_preferred_format(self, adapter: GPUAdapter | None) -> enums.TextureFormat: + return gpu._internal.getPreferredCanvasFormat() + +# needed here for the CanvasContext? +gpu = GPU() diff --git a/wgpu/resources/codegen_report.md b/wgpu/resources/codegen_report.md index f87fb411..68ab981d 100644 --- a/wgpu/resources/codegen_report.md +++ b/wgpu/resources/codegen_report.md @@ -43,3 +43,4 @@ * Validated 153 C function calls * Not using 68 C functions * Validated 96 C structs +## Writing backends/js_webgpu/_api.py diff --git a/wgpu/utils/compute.py b/wgpu/utils/compute.py index 705b1384..b6f9d7c3 100644 --- a/wgpu/utils/compute.py +++ b/wgpu/utils/compute.py @@ -163,7 +163,6 @@ def compute_with_buffers(input_arrays, output_arrays, shader, constants=None, n= pipeline_layout = device.create_pipeline_layout( bind_group_layouts=[bind_group_layout] ) - bind_group = device.create_bind_group(layout=bind_group_layout, entries=bindings) compute = { "module": cshader, @@ -178,6 +177,7 @@ def compute_with_buffers(input_arrays, output_arrays, shader, constants=None, n= layout=pipeline_layout, compute=compute, ) + bind_group = device.create_bind_group(layout=bind_group_layout, entries=bindings) command_encoder = device.create_command_encoder() compute_pass = command_encoder.begin_compute_pass() compute_pass.set_pipeline(compute_pipeline)