Skip to content

Commit

Permalink
Add an example of enumerating device adapters
Browse files Browse the repository at this point in the history
  • Loading branch information
chances committed Sep 28, 2023
1 parent d844eb1 commit 6c4b9dd
Show file tree
Hide file tree
Showing 5 changed files with 140 additions and 5 deletions.
1 change: 1 addition & 0 deletions dub.json
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
"license": "MIT",
"copyright": "Copyright © 2020-2023, Chance Snow",
"subPackages": [
"examples/enumerate",
"examples/headless",
"examples/triangle",
"examples/cube"
Expand Down
15 changes: 15 additions & 0 deletions examples/enumerate/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
.dub
docs.json
__dummy.html
docs/
/headless
headless.so
headless.dylib
headless.dll
headless.a
headless.lib
headless-test-*
*.exe
*.o
*.obj
*.lst
24 changes: 24 additions & 0 deletions examples/enumerate/dub.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
{
"name": "enumerate",
"description": "Enumerate GPU adapters example",
"authors": [
"Chance Snow"
],
"license": "MIT",
"copyright": "Copyright © 2023, Chance Snow",
"targetType": "executable",
"targetPath": "../../bin",
"dependencies": {
"wgpu-d": {
"path": "../.."
}
},
"preBuildCommands-osx": [
"echo Fixing up libwgpu dylib path...",
"rm -f `find $PACKAGE_DIR/../../bin -name '*enumerate'`"
],
"postBuildCommands-osx": [
"echo Fixing up libwgpu dylib path...",
"install_name_tool -change /Users/runner/work/wgpu-native/wgpu-native/target/x86_64-apple-darwin/debug/deps/libwgpu_native.dylib @executable_path/../lib/libwgpu_native.dylib `find $PACKAGE_DIR/../../bin -name '*enumerate'`"
]
}
46 changes: 46 additions & 0 deletions examples/enumerate/source/app.d
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
import std.conv : text, to;
import std.stdio;
import std.string : fromStringz;

import wgpu.api;

// Adapted from https://github.com/gfx-rs/wgpu-rs/blob/f6123e4fe89f74754093c07b476099623b76dd08/examples/capture/main.rs
void main() {
writeln("Enumerating Device Adapters Example");

auto instance = Instance.create();
assert(instance.id, "Could not create WebGPU instance.");

writefln("Preferred backend type? %s", cast(BackendType) instance.report.backendType);
auto adapters = instance.adapters;
writefln("Found %d suitable adapters:", adapters.length);
for (int i = 0; i < adapters.length; i++) {
auto adapter = adapters[i];
auto props = adapter.properties;
writefln("Adapter %d", i);
writefln("\tvendorID: %u", props.vendorID);
writefln("\tvendorName: %s", props.vendorName.fromStringz.to!string);
writefln("\tarchitecture: %s", props.architecture.fromStringz.to!string);
writefln("\tdeviceID: %u", props.deviceID);
writefln("\tname: %s", props.name.fromStringz.to!string);
writefln("\tdriverDescription: %s", props.driverDescription.fromStringz.to!string);
writefln("\tadapterType: %s", cast(AdapterType) props.adapterType);
writefln("\tbackendType: %s", cast(BackendType) props.backendType);
writeln();
adapter.destroy();
}

auto adapter = instance.requestAdapter(PowerPreference.lowPower, BackendType.d3d12 | BackendType.vulkan);
assert(adapter.ready, "Adapter instance was not initialized: Adapter status: " ~ adapter.status.text);
writefln("Adapter limits:\n%s", adapter.limits.toString);
writeln();

auto device = adapter.requestDevice(adapter.limits);
assert(device.ready, "Device is not ready: Device status: " ~ device.status.text);
writefln("Device limits:\n%s", device.limits.toString);
writeln();

device.destroy();
adapter.destroy();
instance.destroy();
}
59 changes: 54 additions & 5 deletions source/wgpu/api.d
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,47 @@ alias InstanceDescriptor = WGPUInstanceDescriptor;
/// $(LI <a href="https://docs.rs/wgpu/0.10.2/wgpu/struct.Limits.html">wgpu::Limits</a> )
/// )
alias Limits = WGPULimits;

string toString(Limits limits) {
import std.algorithm : joiner;
import std.array : array;
import std.format : format;

return [
"max texture dimension 1D: %u".format(limits.maxTextureDimension1D),
"max texture dimension 2D: %u".format(limits.maxTextureDimension2D),
"max texture dimension 3D: %u".format(limits.maxTextureDimension3D),
"max texture array layers: %u".format(limits.maxTextureArrayLayers),
"max bind groups: %u".format(limits.maxBindGroups),
"max bindings per bind group: %u".format(limits.maxBindingsPerBindGroup),
"max dynamic uniform buffers per pipeline layout: %u".format(limits.maxDynamicUniformBuffersPerPipelineLayout),
"max dynamic storage buffers per pipeline layout: %u".format(limits.maxDynamicStorageBuffersPerPipelineLayout),
"max sampled textures per shader stage: %u".format(limits.maxSampledTexturesPerShaderStage),
"max samplers per shader stage: %u".format(limits.maxSamplersPerShaderStage),
"max storage buffers per shader stage: %u".format(limits.maxStorageBuffersPerShaderStage),
"max storage textures per shader stage: %u".format(limits.maxStorageTexturesPerShaderStage),
"max uniform buffers per shader stage: %u".format(limits.maxUniformBuffersPerShaderStage),
"max uniform buffer binding size: %u bytes".format(limits.maxUniformBufferBindingSize),
"max storage buffer binding size: %u bytes".format(limits.maxStorageBufferBindingSize),
"min uniform buffer offset alignment: %u bytes".format(limits.minUniformBufferOffsetAlignment),
"min storage buffer offset alignment: %u bytes".format(limits.minStorageBufferOffsetAlignment),
"max vertex buffers: %u".format(limits.maxVertexBuffers),
"max buffer size: %u bytes".format(limits.maxBufferSize),
"max vertex attributes: %u".format(limits.maxVertexAttributes),
"max vertex buffer array stride: %u bytes".format(limits.maxVertexBufferArrayStride),
"max inter stage shader components: %u".format(limits.maxInterStageShaderComponents),
"max inter stage shader variables: %u".format(limits.maxInterStageShaderVariables),
"max color attachments: %u".format(limits.maxColorAttachments),
"max color attachment bytes per sample: %u".format(limits.maxColorAttachmentBytesPerSample),
"max compute workgroup storage size: %u bytes".format(limits.maxComputeWorkgroupStorageSize),
"max compute invocations per workgroup: %u".format(limits.maxComputeInvocationsPerWorkgroup),
"max compute workgroup size x: %u".format(limits.maxComputeWorkgroupSizeX),
"max compute workgroup size y: %u".format(limits.maxComputeWorkgroupSizeY),
"max compute workgroup size z: %u".format(limits.maxComputeWorkgroupSizeZ),
"max compute workgroups per dimension: %u".format(limits.maxComputeWorkgroupsPerDimension),
].joiner("\n").array.to!string;
}

/// Origin of a copy to/from a texture.
alias Origin3d = WGPUOrigin3D;
/// Describes a `PipelineLayout`.
Expand Down Expand Up @@ -461,21 +502,29 @@ struct Instance {
return Instance(wgpuCreateInstance(&desc));
}

///
auto @property report() {
WGPUGlobalReport report;
wgpuGenerateReport(this.id, &report);
return report;
}

/// Retrieves all available `Adapter`s that match the given `Backends`.
///
/// Params:
/// backends = Backends from which to enumerate adapters.
/// See_Also: <a href="https://docs.rs/wgpu/latest/wgpu/struct.Instance.html#method.enumerate_adapters">wgpu::Instance.enumerate_adapters</a>
@property Adapter[] adapters(BackendType backends = BackendType.primary) {
@property Adapter[] adapters(InstanceBackend backends = InstanceBackend.all) {
import std.algorithm : map;
import std.array : array;

assert(backends >= 0);
auto options = WGPUInstanceEnumerateAdapterOptions(null, backends);
WGPUAdapter* adaptersPtr = null;
size_t count = wgpuInstanceEnumerateAdapters(this.id, &options, adaptersPtr);
if (adaptersPtr == null) return [];
return adaptersPtr[0 .. count].map!(x => (cast(Adapter) x)).array;

size_t count = wgpuInstanceEnumerateAdapters(this.id, &options, null);
auto adapters = new WGPUAdapter[count];
wgpuInstanceEnumerateAdapters(this.id, &options, cast(WGPUAdapter*) adapters.ptr);
return adapters.map!(id => Adapter(id)).array;
}

/// Retrieves a new Adapter, asynchronously.
Expand Down

0 comments on commit 6c4b9dd

Please sign in to comment.