From 86c378d499499abcc852928e8c4b448205cf4a55 Mon Sep 17 00:00:00 2001 From: atlas dostal Date: Thu, 12 Dec 2024 00:26:37 -0500 Subject: [PATCH] Image atomics support --- CHANGELOG.md | 2 + naga/src/back/dot/mod.rs | 15 ++ naga/src/back/glsl/mod.rs | 64 +++++++ naga/src/back/hlsl/writer.rs | 26 +++ naga/src/back/msl/writer.rs | 41 ++++- naga/src/back/pipeline_constants.rs | 14 ++ naga/src/back/spv/block.rs | 16 ++ naga/src/back/spv/image.rs | 73 ++++++++ naga/src/back/spv/instructions.rs | 35 ++++ naga/src/back/wgsl/writer.rs | 30 +++- naga/src/compact/statements.rs | 28 +++ naga/src/front/glsl/parser/types.rs | 10 +- naga/src/front/glsl/types.rs | 2 +- naga/src/front/spv/convert.rs | 2 +- naga/src/front/spv/mod.rs | 3 +- naga/src/front/wgsl/lower/mod.rs | 44 +++++ naga/src/front/wgsl/parse/lexer.rs | 3 + naga/src/lib.rs | 48 +++++ naga/src/proc/terminator.rs | 1 + naga/src/valid/analyzer.rs | 17 ++ naga/src/valid/expression.rs | 4 + naga/src/valid/function.rs | 161 +++++++++++++++++ naga/src/valid/handles.rs | 13 ++ naga/src/valid/interface.rs | 7 +- naga/src/valid/mod.rs | 2 + naga/tests/in/atomicTexture.param.ron | 24 +++ naga/tests/in/atomicTexture.wgsl | 22 +++ naga/tests/out/hlsl/atomicTexture.hlsl | 27 +++ naga/tests/out/hlsl/atomicTexture.ron | 12 ++ naga/tests/out/msl/atomicTexture.msl | 28 +++ naga/tests/out/spv/atomicTexture.spvasm | 69 +++++++ naga/tests/out/wgsl/atomicTexture.wgsl | 21 +++ naga/tests/snapshots.rs | 4 + .../tests/image_atomics/image_32_atomics.wgsl | 13 ++ tests/tests/image_atomics/mod.rs | 168 ++++++++++++++++++ tests/tests/root.rs | 1 + wgpu-core/src/binding_model.rs | 4 + wgpu-core/src/conv.rs | 8 + wgpu-core/src/device/mod.rs | 4 + wgpu-core/src/device/resource.rs | 28 +++ wgpu-core/src/instance.rs | 14 +- wgpu-core/src/validation.rs | 15 +- wgpu-hal/src/dx12/adapter.rs | 11 ++ wgpu-hal/src/gles/adapter.rs | 6 +- wgpu-hal/src/gles/conv.rs | 1 + wgpu-hal/src/lib.rs | 8 +- wgpu-hal/src/metal/adapter.rs | 16 +- wgpu-hal/src/metal/conv.rs | 5 + wgpu-hal/src/metal/device.rs | 1 + wgpu-hal/src/vulkan/adapter.rs | 12 +- wgpu-hal/src/vulkan/conv.rs | 22 ++- wgpu-types/src/lib.rs | 46 ++++- wgpu/src/backend/webgpu.rs | 4 + 53 files changed, 1216 insertions(+), 39 deletions(-) create mode 100644 naga/tests/in/atomicTexture.param.ron create mode 100644 naga/tests/in/atomicTexture.wgsl create mode 100644 naga/tests/out/hlsl/atomicTexture.hlsl create mode 100644 naga/tests/out/hlsl/atomicTexture.ron create mode 100644 naga/tests/out/msl/atomicTexture.msl create mode 100644 naga/tests/out/spv/atomicTexture.spvasm create mode 100644 naga/tests/out/wgsl/atomicTexture.wgsl create mode 100644 tests/tests/image_atomics/image_32_atomics.wgsl create mode 100644 tests/tests/image_atomics/mod.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index beae54b7a7..501ee4b713 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -105,6 +105,8 @@ By @ErichDonGubler in [#6456](https://github.com/gfx-rs/wgpu/pull/6456), [#6148] ### New Features +Image atomic support in shaders. By @atlv24 in [#6706](https://github.com/gfx-rs/wgpu/pull/6706) + #### Naga - Clean up tests for atomic operations support in SPIR-V frontend. By @schell in [#6692](https://github.com/gfx-rs/wgpu/pull/6692) diff --git a/naga/src/back/dot/mod.rs b/naga/src/back/dot/mod.rs index 2780879657..e44e8d8eae 100644 --- a/naga/src/back/dot/mod.rs +++ b/naga/src/back/dot/mod.rs @@ -254,6 +254,21 @@ impl StatementGraph { } "Atomic" } + S::ImageAtomic { + image, + coordinate, + array_index, + fun: _, + value, + } => { + self.dependencies.push((id, image, "image")); + self.dependencies.push((id, coordinate, "coordinate")); + if let Some(expr) = array_index { + self.dependencies.push((id, expr, "array_index")); + } + self.dependencies.push((id, value, "value")); + "ImageAtomic" + } S::WorkGroupUniformLoad { pointer, result } => { self.emits.push((id, result)); self.dependencies.push((id, pointer, "pointer")); diff --git a/naga/src/back/glsl/mod.rs b/naga/src/back/glsl/mod.rs index 4cd60fc3cc..3698577d82 100644 --- a/naga/src/back/glsl/mod.rs +++ b/naga/src/back/glsl/mod.rs @@ -2472,6 +2472,17 @@ impl<'a, W: Write> Writer<'a, W> { self.write_expr(value, ctx)?; writeln!(self.out, ");")?; } + // Stores a value into an image. + Statement::ImageAtomic { + image, + coordinate, + array_index, + fun, + value, + } => { + write!(self.out, "{level}")?; + self.write_image_atomic(ctx, image, coordinate, array_index, fun, value)? + } Statement::RayQuery { .. } => unreachable!(), Statement::SubgroupBallot { result, predicate } => { write!(self.out, "{level}")?; @@ -4134,6 +4145,56 @@ impl<'a, W: Write> Writer<'a, W> { Ok(()) } + /// Helper method to write the `ImageAtomic` statement + fn write_image_atomic( + &mut self, + ctx: &back::FunctionCtx, + image: Handle, + coordinate: Handle, + array_index: Option>, + fun: crate::AtomicFunction, + value: Handle, + ) -> Result<(), Error> { + use crate::ImageDimension as IDim; + + // NOTE: openGL requires that `imageAtomic`s have no effects when the texel is invalid + // so we don't need to generate bounds checks (OpenGL 4.2 Core ยง3.9.20) + + // This will only panic if the module is invalid + let dim = match *ctx.resolve_type(image, &self.module.types) { + TypeInner::Image { dim, .. } => dim, + _ => unreachable!(), + }; + + // Begin our call to `imageAtomic` + let fun_str = fun.to_glsl(); + write!(self.out, "imageAtomic{fun_str}(")?; + self.write_expr(image, ctx)?; + // Separate the image argument from the coordinates + write!(self.out, ", ")?; + + // openGL es doesn't have 1D images so we need workaround it + let tex_1d_hack = dim == IDim::D1 && self.options.version.is_es(); + // Write the coordinate vector + self.write_texture_coord( + ctx, + // Get the size of the coordinate vector + self.get_coordinate_vector_size(dim, false), + coordinate, + array_index, + tex_1d_hack, + )?; + + // Separate the coordinate from the value to write and write the expression + // of the value to write. + write!(self.out, ", ")?; + self.write_expr(value, ctx)?; + // End the call to `imageAtomic` and the statement. + writeln!(self.out, ");")?; + + Ok(()) + } + /// Helper method for writing an `ImageLoad` expression. #[allow(clippy::too_many_arguments)] fn write_image_load( @@ -4530,6 +4591,9 @@ impl<'a, W: Write> Writer<'a, W> { /// they can only be used to query information about the resource which isn't what /// we want here so when storage access is both `LOAD` and `STORE` add no modifiers fn write_storage_access(&mut self, storage_access: crate::StorageAccess) -> BackendResult { + if storage_access.contains(crate::StorageAccess::ATOMIC) { + return Ok(()); + } if !storage_access.contains(crate::StorageAccess::STORE) { write!(self.out, "readonly ")?; } diff --git a/naga/src/back/hlsl/writer.rs b/naga/src/back/hlsl/writer.rs index bc6086d539..bc71a3d4fd 100644 --- a/naga/src/back/hlsl/writer.rs +++ b/naga/src/back/hlsl/writer.rs @@ -2210,6 +2210,32 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> { writeln!(self.out, ");")?; } + Statement::ImageAtomic { + image, + coordinate, + array_index, + fun, + value, + } => { + write!(self.out, "{level}")?; + + let fun_str = fun.to_hlsl_suffix(); + write!(self.out, "Interlocked{fun_str}(")?; + self.write_expr(module, image, func_ctx)?; + write!(self.out, "[")?; + self.write_texture_coordinates( + "int", + coordinate, + array_index, + None, + module, + func_ctx, + )?; + write!(self.out, "],")?; + + self.write_expr(module, value, func_ctx)?; + writeln!(self.out, ");")?; + } Statement::WorkGroupUniformLoad { pointer, result } => { self.write_barrier(crate::Barrier::WORK_GROUP, level)?; write!(self.out, "{level}")?; diff --git a/naga/src/back/msl/writer.rs b/naga/src/back/msl/writer.rs index c119823800..94abec7d1b 100644 --- a/naga/src/back/msl/writer.rs +++ b/naga/src/back/msl/writer.rs @@ -343,7 +343,9 @@ impl TypedGlobalVariable<'_> { let (space, access, reference) = match var.space.to_msl_name() { Some(space) if self.reference => { let access = if var.space.needs_access_qualifier() - && !self.usage.contains(valid::GlobalUse::WRITE) + && !self + .usage + .intersects(valid::GlobalUse::WRITE | valid::GlobalUse::ATOMIC) { "const" } else { @@ -1198,6 +1200,28 @@ impl Writer { Ok(()) } + fn put_image_atomic( + &mut self, + level: back::Level, + image: Handle, + address: &TexelAddress, + fun: crate::AtomicFunction, + value: Handle, + context: &StatementContext, + ) -> BackendResult { + write!(self.out, "{level}")?; + self.put_expression(image, &context.expression, false)?; + let op = fun.to_msl(); + write!(self.out, ".atomic_{}(", op)?; + // coordinates in IR are int, but Metal expects uint + self.put_cast_to_uint_scalar_or_vector(address.coordinate, &context.expression)?; + write!(self.out, ", ")?; + self.put_expression(value, &context.expression, true)?; + writeln!(self.out, ");")?; + + Ok(()) + } + fn put_image_store( &mut self, level: back::Level, @@ -3236,6 +3260,21 @@ impl Writer { // Done writeln!(self.out, ";")?; } + crate::Statement::ImageAtomic { + image, + coordinate, + array_index, + fun, + value, + } => { + let address = TexelAddress { + coordinate, + array_index, + sample: None, + level: None, + }; + self.put_image_atomic(level, image, &address, fun, value, context)? + } crate::Statement::WorkGroupUniformLoad { pointer, result } => { self.write_barrier(crate::Barrier::WORK_GROUP, level)?; diff --git a/naga/src/back/pipeline_constants.rs b/naga/src/back/pipeline_constants.rs index eb01dd5feb..7f5504352b 100644 --- a/naga/src/back/pipeline_constants.rs +++ b/naga/src/back/pipeline_constants.rs @@ -736,6 +736,20 @@ fn adjust_stmt(new_pos: &HandleVec>, stmt: &mut S | crate::AtomicFunction::Exchange { compare: None } => {} } } + Statement::ImageAtomic { + ref mut image, + ref mut coordinate, + ref mut array_index, + fun: _, + ref mut value, + } => { + adjust(image); + adjust(coordinate); + if let Some(ref mut array_index) = *array_index { + adjust(array_index); + } + adjust(value); + } Statement::WorkGroupUniformLoad { ref mut pointer, ref mut result, diff --git a/naga/src/back/spv/block.rs b/naga/src/back/spv/block.rs index 0fbba5c737..4158e698d2 100644 --- a/naga/src/back/spv/block.rs +++ b/naga/src/back/spv/block.rs @@ -2873,6 +2873,22 @@ impl BlockContext<'_> { block.body.push(instruction); } + Statement::ImageAtomic { + image, + coordinate, + array_index, + fun, + value, + } => { + self.write_image_atomic( + image, + coordinate, + array_index, + fun, + value, + &mut block, + )?; + } Statement::WorkGroupUniformLoad { pointer, result } => { self.writer .write_barrier(crate::Barrier::WORK_GROUP, &mut block); diff --git a/naga/src/back/spv/image.rs b/naga/src/back/spv/image.rs index 7dedf37502..84ec3018e1 100644 --- a/naga/src/back/spv/image.rs +++ b/naga/src/back/spv/image.rs @@ -1225,4 +1225,77 @@ impl BlockContext<'_> { Ok(()) } + + pub(super) fn write_image_atomic( + &mut self, + image: Handle, + coordinate: Handle, + array_index: Option>, + fun: crate::AtomicFunction, + value: Handle, + block: &mut Block, + ) -> Result<(), Error> { + let image_id = match self.ir_function.originating_global(image) { + Some(handle) => self.writer.global_variables[handle].var_id, + _ => return Err(Error::Validation("Unexpected image type")), + }; + let crate::TypeInner::Image { class, .. } = + *self.fun_info[image].ty.inner_with(&self.ir_module.types) + else { + return Err(Error::Validation("Invalid image type")); + }; + let crate::ImageClass::Storage { format, .. } = class else { + return Err(Error::Validation("Invalid image class")); + }; + let scalar = format.into(); + let pointer_type_id = self.get_type_id(LookupType::Local(LocalType::LocalPointer { + base: NumericType::Scalar(scalar), + class: spirv::StorageClass::Image, + })); + let signed = scalar.kind == crate::ScalarKind::Sint; + let pointer_id = self.gen_id(); + let coordinates = self.write_image_coordinates(coordinate, array_index, block)?; + let sample_id = self.writer.get_constant_scalar(crate::Literal::U32(0)); + block.body.push(Instruction::image_texel_pointer( + pointer_type_id, + pointer_id, + image_id, + coordinates.value_id, + sample_id, + )); + + let op = match fun { + crate::AtomicFunction::Add => spirv::Op::AtomicIAdd, + crate::AtomicFunction::Subtract => spirv::Op::AtomicISub, + crate::AtomicFunction::And => spirv::Op::AtomicAnd, + crate::AtomicFunction::ExclusiveOr => spirv::Op::AtomicXor, + crate::AtomicFunction::InclusiveOr => spirv::Op::AtomicOr, + crate::AtomicFunction::Min if signed => spirv::Op::AtomicSMin, + crate::AtomicFunction::Min => spirv::Op::AtomicUMin, + crate::AtomicFunction::Max if signed => spirv::Op::AtomicSMax, + crate::AtomicFunction::Max => spirv::Op::AtomicUMax, + crate::AtomicFunction::Exchange { .. } => { + return Err(Error::Validation("Exchange atomics are not supported yet")) + } + }; + let result_type_id = self.get_expression_type_id(&self.fun_info[value].ty); + let id = self.gen_id(); + let space = crate::AddressSpace::Handle; + let (semantics, scope) = space.to_spirv_semantics_and_scope(); + let scope_constant_id = self.get_scope_constant(scope as u32); + let semantics_id = self.get_index_constant(semantics.bits()); + let value_id = self.cached[value]; + + block.body.push(Instruction::image_atomic( + op, + result_type_id, + id, + pointer_id, + scope_constant_id, + semantics_id, + value_id, + )); + + Ok(()) + } } diff --git a/naga/src/back/spv/instructions.rs b/naga/src/back/spv/instructions.rs index 9bd58508a1..32b8113c69 100644 --- a/naga/src/back/spv/instructions.rs +++ b/naga/src/back/spv/instructions.rs @@ -702,6 +702,41 @@ impl super::Instruction { instruction } + pub(super) fn image_texel_pointer( + result_type_id: Word, + id: Word, + image: Word, + coordinates: Word, + sample: Word, + ) -> Self { + let mut instruction = Self::new(Op::ImageTexelPointer); + instruction.set_type(result_type_id); + instruction.set_result(id); + instruction.add_operand(image); + instruction.add_operand(coordinates); + instruction.add_operand(sample); + instruction + } + + pub(super) fn image_atomic( + op: Op, + result_type_id: Word, + id: Word, + pointer: Word, + scope_id: Word, + semantics_id: Word, + value: Word, + ) -> Self { + let mut instruction = Self::new(op); + instruction.set_type(result_type_id); + instruction.set_result(id); + instruction.add_operand(pointer); + instruction.add_operand(scope_id); + instruction.add_operand(semantics_id); + instruction.add_operand(value); + instruction + } + pub(super) fn image_query(op: Op, result_type_id: Word, id: Word, image: Word) -> Self { let mut instruction = Self::new(op); instruction.set_type(result_type_id); diff --git a/naga/src/back/wgsl/writer.rs b/naga/src/back/wgsl/writer.rs index ed581c59e2..07cffc2e73 100644 --- a/naga/src/back/wgsl/writer.rs +++ b/naga/src/back/wgsl/writer.rs @@ -481,7 +481,10 @@ impl Writer { "storage_", "", storage_format_str(format), - if access.contains(crate::StorageAccess::LOAD | crate::StorageAccess::STORE) + if access.contains(crate::StorageAccess::ATOMIC) { + ",atomic" + } else if access + .contains(crate::StorageAccess::LOAD | crate::StorageAccess::STORE) { ",read_write" } else if access.contains(crate::StorageAccess::LOAD) { @@ -790,6 +793,27 @@ impl Writer { self.write_expr(module, value, func_ctx)?; writeln!(self.out, ");")? } + Statement::ImageAtomic { + image, + coordinate, + array_index, + ref fun, + value, + } => { + write!(self.out, "{level}")?; + let fun_str = fun.to_wgsl(); + write!(self.out, "textureAtomic{fun_str}(")?; + self.write_expr(module, image, func_ctx)?; + write!(self.out, ", ")?; + self.write_expr(module, coordinate, func_ctx)?; + if let Some(array_index_expr) = array_index { + write!(self.out, ", ")?; + self.write_expr(module, array_index_expr, func_ctx)?; + } + write!(self.out, ", ")?; + self.write_expr(module, value, func_ctx)?; + writeln!(self.out, ");")?; + } Statement::WorkGroupUniformLoad { pointer, result } => { write!(self.out, "{level}")?; // TODO: Obey named expressions here. @@ -2107,7 +2131,9 @@ const fn address_space_str( As::Private => "private", As::Uniform => "uniform", As::Storage { access } => { - if access.contains(crate::StorageAccess::STORE) { + if access.contains(crate::StorageAccess::ATOMIC) { + return (Some("storage"), Some("atomic")); + } else if access.contains(crate::StorageAccess::STORE) { return (Some("storage"), Some("read_write")); } else { "storage" diff --git a/naga/src/compact/statements.rs b/naga/src/compact/statements.rs index 759dcc2eda..596f9d4067 100644 --- a/naga/src/compact/statements.rs +++ b/naga/src/compact/statements.rs @@ -79,6 +79,20 @@ impl FunctionTracer<'_> { self.expressions_used.insert(result); } } + St::ImageAtomic { + image, + coordinate, + array_index, + fun: _, + value, + } => { + self.expressions_used.insert(image); + self.expressions_used.insert(coordinate); + if let Some(array_index) = array_index { + self.expressions_used.insert(array_index); + } + self.expressions_used.insert(value); + } St::WorkGroupUniformLoad { pointer, result } => { self.expressions_used.insert(pointer); self.expressions_used.insert(result); @@ -261,6 +275,20 @@ impl FunctionMap { adjust(result); } } + St::ImageAtomic { + ref mut image, + ref mut coordinate, + ref mut array_index, + fun: _, + ref mut value, + } => { + adjust(image); + adjust(coordinate); + if let Some(ref mut array_index) = *array_index { + adjust(array_index); + } + adjust(value); + } St::WorkGroupUniformLoad { ref mut pointer, ref mut result, diff --git a/naga/src/front/glsl/parser/types.rs b/naga/src/front/glsl/parser/types.rs index 73eab8b2f7..c7d61222f8 100644 --- a/naga/src/front/glsl/parser/types.rs +++ b/naga/src/front/glsl/parser/types.rs @@ -228,7 +228,7 @@ impl ParsingContext<'_> { } TokenValue::Buffer => { StorageQualifier::AddressSpace(AddressSpace::Storage { - access: crate::StorageAccess::all(), + access: crate::StorageAccess::LOAD | crate::StorageAccess::STORE, }) } _ => unreachable!(), @@ -277,14 +277,6 @@ impl ParsingContext<'_> { let storage_access = qualifiers .storage_access .get_or_insert((crate::StorageAccess::all(), Span::default())); - if !storage_access.0.contains(!access) { - frontend.errors.push(Error { - kind: ErrorKind::SemanticError( - "The same memory qualifier can only be used once".into(), - ), - meta: token.meta, - }) - } storage_access.0 &= access; storage_access.1.subsume(token.meta); diff --git a/naga/src/front/glsl/types.rs b/naga/src/front/glsl/types.rs index f6836169c0..ad5e188fd9 100644 --- a/naga/src/front/glsl/types.rs +++ b/naga/src/front/glsl/types.rs @@ -154,7 +154,7 @@ pub fn parse_type(type_name: &str) -> Option { let class = ImageClass::Storage { format: crate::StorageFormat::R8Uint, - access: crate::StorageAccess::all(), + access: crate::StorageAccess::LOAD | crate::StorageAccess::STORE, }; // TODO: glsl support multisampled storage images, naga doesn't diff --git a/naga/src/front/spv/convert.rs b/naga/src/front/spv/convert.rs index 68b870fb01..33ed4793cf 100644 --- a/naga/src/front/spv/convert.rs +++ b/naga/src/front/spv/convert.rs @@ -174,7 +174,7 @@ pub(super) fn map_storage_class(word: spirv::Word) -> Result Ec::Global(crate::AddressSpace::Handle), Some(Sc::StorageBuffer) => Ec::Global(crate::AddressSpace::Storage { //Note: this is restricted by decorations later - access: crate::StorageAccess::all(), + access: crate::StorageAccess::LOAD | crate::StorageAccess::STORE, }), // we expect the `Storage` case to be filtered out before calling this function. Some(Sc::Uniform) => Ec::Global(crate::AddressSpace::Uniform), diff --git a/naga/src/front/spv/mod.rs b/naga/src/front/spv/mod.rs index 8afbee247a..d028c51a25 100644 --- a/naga/src/front/spv/mod.rs +++ b/naga/src/front/spv/mod.rs @@ -174,7 +174,7 @@ bitflags::bitflags! { impl DecorationFlags { fn to_storage_access(self) -> crate::StorageAccess { - let mut access = crate::StorageAccess::all(); + let mut access = crate::StorageAccess::LOAD | crate::StorageAccess::STORE; if self.contains(DecorationFlags::NON_READABLE) { access &= !crate::StorageAccess::LOAD; } @@ -4528,6 +4528,7 @@ impl> Frontend { | S::Store { .. } | S::ImageStore { .. } | S::Atomic { .. } + | S::ImageAtomic { .. } | S::RayQuery { .. } | S::SubgroupBallot { .. } | S::SubgroupCollectiveOperation { .. } diff --git a/naga/src/front/wgsl/lower/mod.rs b/naga/src/front/wgsl/lower/mod.rs index fc31e43ecf..f15bb1d68a 100644 --- a/naga/src/front/wgsl/lower/mod.rs +++ b/naga/src/front/wgsl/lower/mod.rs @@ -2414,6 +2414,50 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { ); return Ok(Some(result)); } + "textureAtomicMin" | "textureAtomicMax" | "textureAtomicAdd" + | "textureAtomicAnd" | "textureAtomicOr" | "textureAtomicXor" => { + let mut args = ctx.prepare_args(arguments, 3, span); + + let image = args.next()?; + let image_span = ctx.ast_expressions.get_span(image); + let image = self.expression(image, ctx)?; + + let coordinate = self.expression(args.next()?, ctx)?; + + let (_, arrayed) = ctx.image_data(image, image_span)?; + let array_index = arrayed + .then(|| { + args.min_args += 1; + self.expression(args.next()?, ctx) + }) + .transpose()?; + + let value = self.expression(args.next()?, ctx)?; + + args.finish()?; + + let rctx = ctx.runtime_expression_ctx(span)?; + rctx.block + .extend(rctx.emitter.finish(&rctx.function.expressions)); + rctx.emitter.start(&rctx.function.expressions); + let stmt = crate::Statement::ImageAtomic { + image, + coordinate, + array_index, + fun: match function.name { + "textureAtomicMin" => crate::AtomicFunction::Min, + "textureAtomicMax" => crate::AtomicFunction::Max, + "textureAtomicAdd" => crate::AtomicFunction::Add, + "textureAtomicAnd" => crate::AtomicFunction::And, + "textureAtomicOr" => crate::AtomicFunction::InclusiveOr, + "textureAtomicXor" => crate::AtomicFunction::ExclusiveOr, + _ => unreachable!(), + }, + value, + }; + rctx.block.push(stmt, span); + return Ok(None); + } "storageBarrier" => { ctx.prepare_args(arguments, 0, span).finish()?; diff --git a/naga/src/front/wgsl/parse/lexer.rs b/naga/src/front/wgsl/parse/lexer.rs index df527c1dc4..f42a033531 100644 --- a/naga/src/front/wgsl/parse/lexer.rs +++ b/naga/src/front/wgsl/parse/lexer.rs @@ -430,6 +430,9 @@ impl<'a> Lexer<'a> { "read" => Ok(crate::StorageAccess::LOAD), "write" => Ok(crate::StorageAccess::STORE), "read_write" => Ok(crate::StorageAccess::LOAD | crate::StorageAccess::STORE), + "atomic" => Ok(crate::StorageAccess::ATOMIC + | crate::StorageAccess::LOAD + | crate::StorageAccess::STORE), _ => Err(Error::UnknownAccess(span)), } } diff --git a/naga/src/lib.rs b/naga/src/lib.rs index 687dc5b441..b9d6966b1d 100644 --- a/naga/src/lib.rs +++ b/naga/src/lib.rs @@ -597,6 +597,8 @@ bitflags::bitflags! { const LOAD = 0x1; /// Storage can be used as a target for store ops. const STORE = 0x2; + /// Storage can be used as a target for atomic ops. + const ATOMIC = 0x4; } } @@ -2003,6 +2005,52 @@ pub enum Statement { /// [`SHADER_INT64_ATOMIC_ALL_OPS`]: crate::valid::Capabilities::SHADER_INT64_ATOMIC_ALL_OPS result: Option>, }, + /// Performs an atomic operation on a texel value of an image. + /// + /// Doing atomics on images with mipmaps is not supported, so there is no + /// `level` operand. + /// + /// This statement is a barrier for any operations on the corresponding + /// [`Expression::GlobalVariable`] for this image. + ImageAtomic { + /// The image to perform an atomic operation on. This must have type + /// [`Image`]. (This will necessarily be a [`GlobalVariable`] or + /// [`FunctionArgument`] expression, since no other expressions are + /// allowed to have that type.) + /// + /// [`Image`]: TypeInner::Image + /// [`GlobalVariable`]: Expression::GlobalVariable + /// [`FunctionArgument`]: Expression::FunctionArgument + image: Handle, + + /// The coordinate of the texel we wish to load. This must be a scalar + /// for [`D1`] images, a [`Bi`] vector for [`D2`] images, and a [`Tri`] + /// vector for [`D3`] images. (Array indices, sample indices, and + /// explicit level-of-detail values are supplied separately.) Its + /// component type must be [`Sint`]. + /// + /// [`D1`]: ImageDimension::D1 + /// [`D2`]: ImageDimension::D2 + /// [`D3`]: ImageDimension::D3 + /// [`Bi`]: VectorSize::Bi + /// [`Tri`]: VectorSize::Tri + /// [`Sint`]: ScalarKind::Sint + coordinate: Handle, + + /// The index into an arrayed image. If the [`arrayed`] flag in + /// `image`'s type is `true`, then this must be `Some(expr)`, where + /// `expr` is a [`Sint`] scalar. Otherwise, it must be `None`. + /// + /// [`arrayed`]: TypeInner::Image::arrayed + /// [`Sint`]: ScalarKind::Sint + array_index: Option>, + + /// The kind of atomic operation to perform on the texel. + fun: AtomicFunction, + + /// The value with which to perform the atomic operation. + value: Handle, + }, /// Load uniformly from a uniform pointer in the workgroup address space. /// /// Corresponds to the [`workgroupUniformLoad`](https://www.w3.org/TR/WGSL/#workgroupUniformLoad-builtin) diff --git a/naga/src/proc/terminator.rs b/naga/src/proc/terminator.rs index 5edf55cb73..19c37294ec 100644 --- a/naga/src/proc/terminator.rs +++ b/naga/src/proc/terminator.rs @@ -36,6 +36,7 @@ pub fn ensure_block_returns(block: &mut crate::Block) { | S::Call { .. } | S::RayQuery { .. } | S::Atomic { .. } + | S::ImageAtomic { .. } | S::WorkGroupUniformLoad { .. } | S::SubgroupBallot { .. } | S::SubgroupCollectiveOperation { .. } diff --git a/naga/src/valid/analyzer.rs b/naga/src/valid/analyzer.rs index 4b207d0274..8417bf77be 100644 --- a/naga/src/valid/analyzer.rs +++ b/naga/src/valid/analyzer.rs @@ -131,6 +131,8 @@ bitflags::bitflags! { const WRITE = 0x2; /// The information about the data is queried. const QUERY = 0x4; + /// Atomic operations will be performed on the variable. + const ATOMIC = 0x8; } } @@ -1061,6 +1063,21 @@ impl FunctionInfo { } FunctionUniformity::new() } + S::ImageAtomic { + image, + coordinate, + array_index, + fun: _, + value, + } => { + let _ = self.add_ref_impl(image, GlobalUse::ATOMIC); + let _ = self.add_ref(coordinate); + if let Some(expr) = array_index { + let _ = self.add_ref(expr); + } + let _ = self.add_ref(value); + FunctionUniformity::new() + } S::RayQuery { query, ref fun } => { let _ = self.add_ref(query); if let crate::RayQueryFunction::Initialize { diff --git a/naga/src/valid/expression.rs b/naga/src/valid/expression.rs index 28b9321cce..a86f4e15dd 100644 --- a/naga/src/valid/expression.rs +++ b/naga/src/valid/expression.rs @@ -74,6 +74,10 @@ pub enum ExpressionError { ExpectedSamplerType(Handle), #[error("Unable to operate on image class {0:?}")] InvalidImageClass(crate::ImageClass), + #[error("Image atomics are not supported for storage format {0:?}")] + InvalidImageFormat(crate::StorageFormat), + #[error("Image atomics require atomic storage access, {0:?} is insufficient")] + InvalidImageStorageAccess(crate::StorageAccess), #[error("Derivatives can only be taken from scalar and vector floats")] InvalidDerivative, #[error("Image array index parameter is misplaced")] diff --git a/naga/src/valid/function.rs b/naga/src/valid/function.rs index c695d65144..4f233a75a9 100644 --- a/naga/src/valid/function.rs +++ b/naga/src/valid/function.rs @@ -146,6 +146,12 @@ pub enum FunctionError { }, #[error("Image store parameters are invalid")] InvalidImageStore(#[source] ExpressionError), + #[error("Image atomic parameters are invalid")] + InvalidImageAtomic(#[source] ExpressionError), + #[error("Image atomic function is invalid")] + InvalidImageAtomicFunction(crate::AtomicFunction), + #[error("Image atomic value is invalid")] + InvalidImageAtomicValue(Handle), #[error("Call to {function:?} is invalid")] InvalidCall { function: Handle, @@ -1136,6 +1142,161 @@ impl super::Validator { } => { self.validate_atomic(pointer, fun, value, result, span, context)?; } + S::ImageAtomic { + image, + coordinate, + array_index, + fun, + value, + } => { + let var = match *context.get_expression(image) { + crate::Expression::GlobalVariable(var_handle) => { + &context.global_vars[var_handle] + } + // We're looking at a binding index situation, so punch through the index and look at the global behind it. + crate::Expression::Access { base, .. } + | crate::Expression::AccessIndex { base, .. } => { + match *context.get_expression(base) { + crate::Expression::GlobalVariable(var_handle) => { + &context.global_vars[var_handle] + } + _ => { + return Err(FunctionError::InvalidImageAtomic( + ExpressionError::ExpectedGlobalVariable, + ) + .with_span_handle(image, context.expressions)) + } + } + } + _ => { + return Err(FunctionError::InvalidImageAtomic( + ExpressionError::ExpectedGlobalVariable, + ) + .with_span_handle(image, context.expressions)) + } + }; + + // Punch through a binding array to get the underlying type + let global_ty = match context.types[var.ty].inner { + Ti::BindingArray { base, .. } => &context.types[base].inner, + ref inner => inner, + }; + + let value_ty = match *global_ty { + Ti::Image { + class, + arrayed, + dim, + } => { + match context + .resolve_type(coordinate, &self.valid_expression_set)? + .image_storage_coordinates() + { + Some(coord_dim) if coord_dim == dim => {} + _ => { + return Err(FunctionError::InvalidImageAtomic( + ExpressionError::InvalidImageCoordinateType( + dim, coordinate, + ), + ) + .with_span_handle(coordinate, context.expressions)); + } + }; + if arrayed != array_index.is_some() { + return Err(FunctionError::InvalidImageAtomic( + ExpressionError::InvalidImageArrayIndex, + ) + .with_span_handle(coordinate, context.expressions)); + } + if let Some(expr) = array_index { + match *context.resolve_type(expr, &self.valid_expression_set)? { + Ti::Scalar(crate::Scalar { + kind: crate::ScalarKind::Sint | crate::ScalarKind::Uint, + width: _, + }) => {} + _ => { + return Err(FunctionError::InvalidImageAtomic( + ExpressionError::InvalidImageArrayIndexType(expr), + ) + .with_span_handle(expr, context.expressions)); + } + } + } + match class { + crate::ImageClass::Storage { format, access } => { + if !access.contains(crate::StorageAccess::ATOMIC) { + return Err(FunctionError::InvalidImageAtomic( + ExpressionError::InvalidImageStorageAccess(access), + ) + .with_span_handle(image, context.expressions)); + } + match format { + crate::StorageFormat::R32Sint + | crate::StorageFormat::R32Uint => { + if !self + .capabilities + .intersects(super::Capabilities::TEXTURE_ATOMIC) + { + return Err(FunctionError::MissingCapability( + super::Capabilities::TEXTURE_ATOMIC, + ) + .with_span_static( + span, + "missing capability for this operation", + )); + } + match fun { + crate::AtomicFunction::Add + | crate::AtomicFunction::And + | crate::AtomicFunction::ExclusiveOr + | crate::AtomicFunction::InclusiveOr + | crate::AtomicFunction::Min + | crate::AtomicFunction::Max => {} + _ => { + return Err( + FunctionError::InvalidImageAtomicFunction( + fun, + ) + .with_span_handle( + image, + context.expressions, + ), + ); + } + } + } + _ => { + return Err(FunctionError::InvalidImageAtomic( + ExpressionError::InvalidImageFormat(format), + ) + .with_span_handle(image, context.expressions)); + } + } + crate::TypeInner::Scalar(format.into()) + } + _ => { + return Err(FunctionError::InvalidImageAtomic( + ExpressionError::InvalidImageClass(class), + ) + .with_span_handle(image, context.expressions)); + } + } + } + _ => { + return Err(FunctionError::InvalidImageAtomic( + ExpressionError::ExpectedImageType(var.ty), + ) + .with_span() + .with_handle(var.ty, context.types) + .with_handle(image, context.expressions)) + } + }; + + if *context.resolve_type(value, &self.valid_expression_set)? != value_ty { + return Err(FunctionError::InvalidImageAtomicValue(value) + .with_span_handle(value, context.expressions)); + } + } S::WorkGroupUniformLoad { pointer, result } => { stages &= super::ShaderStages::COMPUTE; let pointer_inner = diff --git a/naga/src/valid/handles.rs b/naga/src/valid/handles.rs index be4eb3dbac..9a2b444bea 100644 --- a/naga/src/valid/handles.rs +++ b/naga/src/valid/handles.rs @@ -557,6 +557,19 @@ impl super::Validator { } Ok(()) } + crate::Statement::ImageAtomic { + image, + coordinate, + array_index, + fun: _, + value, + } => { + validate_expr(image)?; + validate_expr(coordinate)?; + validate_expr_opt(array_index)?; + validate_expr(value)?; + Ok(()) + } crate::Statement::WorkGroupUniformLoad { pointer, result } => { validate_expr(pointer)?; validate_expr(result)?; diff --git a/naga/src/valid/interface.rs b/naga/src/valid/interface.rs index 335826d12c..08bdda0329 100644 --- a/naga/src/valid/interface.rs +++ b/naga/src/valid/interface.rs @@ -129,6 +129,9 @@ fn storage_usage(access: crate::StorageAccess) -> GlobalUse { if access.contains(crate::StorageAccess::STORE) { storage_usage |= GlobalUse::WRITE; } + if access.contains(crate::StorageAccess::ATOMIC) { + storage_usage |= GlobalUse::ATOMIC; + } storage_usage } @@ -758,7 +761,9 @@ impl super::Validator { } => storage_usage(access), _ => GlobalUse::READ | GlobalUse::QUERY, }, - crate::AddressSpace::Private | crate::AddressSpace::WorkGroup => GlobalUse::all(), + crate::AddressSpace::Private | crate::AddressSpace::WorkGroup => { + GlobalUse::READ | GlobalUse::WRITE | GlobalUse::QUERY + } crate::AddressSpace::PushConstant => GlobalUse::READ, }; if !allowed_usage.contains(usage) { diff --git a/naga/src/valid/mod.rs b/naga/src/valid/mod.rs index 6a81bd7c2d..1de5eb9872 100644 --- a/naga/src/valid/mod.rs +++ b/naga/src/valid/mod.rs @@ -143,6 +143,8 @@ bitflags::bitflags! { const SHADER_INT64_ATOMIC_MIN_MAX = 0x80000; /// Support for all atomic operations on 64-bit integers. const SHADER_INT64_ATOMIC_ALL_OPS = 0x100000; + /// Support for atomic operations on images. + const TEXTURE_ATOMIC = 0x200000; } } diff --git a/naga/tests/in/atomicTexture.param.ron b/naga/tests/in/atomicTexture.param.ron new file mode 100644 index 0000000000..8ed7a842c1 --- /dev/null +++ b/naga/tests/in/atomicTexture.param.ron @@ -0,0 +1,24 @@ +( + god_mode: true, + spv: ( + version: (1, 0), + capabilities: [], + ), + hlsl: ( + shader_model: V6_6, + binding_map: {}, + fake_missing_bindings: true, + special_constants_binding: Some((space: 1, register: 0)), + push_constants_target: Some((space: 0, register: 0)), + zero_initialize_workgroup_memory: true, + restrict_indexing: true + ), + msl: ( + lang_version: (3, 1), + per_entry_point_map: {}, + inline_samplers: [], + spirv_cross_compatibility: false, + fake_missing_bindings: true, + zero_initialize_workgroup_memory: true, + ), +) diff --git a/naga/tests/in/atomicTexture.wgsl b/naga/tests/in/atomicTexture.wgsl new file mode 100644 index 0000000000..40859ff04c --- /dev/null +++ b/naga/tests/in/atomicTexture.wgsl @@ -0,0 +1,22 @@ +@group(0) @binding(0) +var image_u: texture_storage_2d; +@group(0) @binding(1) +var image_s: texture_storage_2d; + +@compute +@workgroup_size(2) +fn cs_main(@builtin(local_invocation_id) id: vec3) { + textureAtomicMax(image_u, vec2(0, 0), 1u); + textureAtomicMin(image_u, vec2(0, 0), 1u); + textureAtomicAdd(image_u, vec2(0, 0), 1u); + textureAtomicAnd(image_u, vec2(0, 0), 1u); + textureAtomicOr(image_u, vec2(0, 0), 1u); + textureAtomicXor(image_u, vec2(0, 0), 1u); + + textureAtomicMax(image_s, vec2(0, 0), 1i); + textureAtomicMin(image_s, vec2(0, 0), 1i); + textureAtomicAdd(image_s, vec2(0, 0), 1i); + textureAtomicAnd(image_s, vec2(0, 0), 1i); + textureAtomicOr(image_s, vec2(0, 0), 1i); + textureAtomicXor(image_s, vec2(0, 0), 1i); +} diff --git a/naga/tests/out/hlsl/atomicTexture.hlsl b/naga/tests/out/hlsl/atomicTexture.hlsl new file mode 100644 index 0000000000..241cdab678 --- /dev/null +++ b/naga/tests/out/hlsl/atomicTexture.hlsl @@ -0,0 +1,27 @@ +struct NagaConstants { + int first_vertex; + int first_instance; + uint other; +}; +ConstantBuffer _NagaConstants: register(b0, space1); + +RWTexture2D image_u : register(u0); +RWTexture2D image_s : register(u1); + +[numthreads(2, 1, 1)] +void cs_main(uint3 id : SV_GroupThreadID) +{ + InterlockedMax(image_u[int2(0, 0)],1u); + InterlockedMin(image_u[int2(0, 0)],1u); + InterlockedAdd(image_u[int2(0, 0)],1u); + InterlockedAnd(image_u[int2(0, 0)],1u); + InterlockedOr(image_u[int2(0, 0)],1u); + InterlockedXor(image_u[int2(0, 0)],1u); + InterlockedMax(image_s[int2(0, 0)],1); + InterlockedMin(image_s[int2(0, 0)],1); + InterlockedAdd(image_s[int2(0, 0)],1); + InterlockedAnd(image_s[int2(0, 0)],1); + InterlockedOr(image_s[int2(0, 0)],1); + InterlockedXor(image_s[int2(0, 0)],1); + return; +} diff --git a/naga/tests/out/hlsl/atomicTexture.ron b/naga/tests/out/hlsl/atomicTexture.ron new file mode 100644 index 0000000000..67a9035512 --- /dev/null +++ b/naga/tests/out/hlsl/atomicTexture.ron @@ -0,0 +1,12 @@ +( + vertex:[ + ], + fragment:[ + ], + compute:[ + ( + entry_point:"cs_main", + target_profile:"cs_6_6", + ), + ], +) diff --git a/naga/tests/out/msl/atomicTexture.msl b/naga/tests/out/msl/atomicTexture.msl new file mode 100644 index 0000000000..2cb17a081e --- /dev/null +++ b/naga/tests/out/msl/atomicTexture.msl @@ -0,0 +1,28 @@ +// language: metal3.1 +#include +#include + +using metal::uint; + + +struct cs_mainInput { +}; +kernel void cs_main( + metal::uint3 id [[thread_position_in_threadgroup]] +, metal::texture2d image_u [[user(fake0)]] +, metal::texture2d image_s [[user(fake0)]] +) { + image_u.atomic_fetch_max(metal::uint2(metal::int2(0, 0)), 1u); + image_u.atomic_fetch_min(metal::uint2(metal::int2(0, 0)), 1u); + image_u.atomic_fetch_add(metal::uint2(metal::int2(0, 0)), 1u); + image_u.atomic_fetch_and(metal::uint2(metal::int2(0, 0)), 1u); + image_u.atomic_fetch_or(metal::uint2(metal::int2(0, 0)), 1u); + image_u.atomic_fetch_xor(metal::uint2(metal::int2(0, 0)), 1u); + image_s.atomic_fetch_max(metal::uint2(metal::int2(0, 0)), 1); + image_s.atomic_fetch_min(metal::uint2(metal::int2(0, 0)), 1); + image_s.atomic_fetch_add(metal::uint2(metal::int2(0, 0)), 1); + image_s.atomic_fetch_and(metal::uint2(metal::int2(0, 0)), 1); + image_s.atomic_fetch_or(metal::uint2(metal::int2(0, 0)), 1); + image_s.atomic_fetch_xor(metal::uint2(metal::int2(0, 0)), 1); + return; +} diff --git a/naga/tests/out/spv/atomicTexture.spvasm b/naga/tests/out/spv/atomicTexture.spvasm new file mode 100644 index 0000000000..42eaa3d33f --- /dev/null +++ b/naga/tests/out/spv/atomicTexture.spvasm @@ -0,0 +1,69 @@ +; SPIR-V +; Version: 1.0 +; Generator: rspirv +; Bound: 54 +OpCapability Shader +%1 = OpExtInstImport "GLSL.std.450" +OpMemoryModel Logical GLSL450 +OpEntryPoint GLCompute %17 "cs_main" %14 +OpExecutionMode %17 LocalSize 2 1 1 +OpDecorate %9 DescriptorSet 0 +OpDecorate %9 Binding 0 +OpDecorate %11 DescriptorSet 0 +OpDecorate %11 Binding 1 +OpDecorate %14 BuiltIn LocalInvocationId +%2 = OpTypeVoid +%4 = OpTypeInt 32 0 +%3 = OpTypeImage %4 2D 0 0 0 2 R32ui +%6 = OpTypeInt 32 1 +%5 = OpTypeImage %6 2D 0 0 0 2 R32i +%7 = OpTypeVector %4 3 +%8 = OpTypeVector %6 2 +%10 = OpTypePointer UniformConstant %3 +%9 = OpVariable %10 UniformConstant +%12 = OpTypePointer UniformConstant %5 +%11 = OpVariable %12 UniformConstant +%15 = OpTypePointer Input %7 +%14 = OpVariable %15 Input +%18 = OpTypeFunction %2 +%21 = OpConstant %6 0 +%22 = OpConstantComposite %8 %21 %21 +%23 = OpConstant %4 1 +%24 = OpConstant %6 1 +%26 = OpTypePointer Image %4 +%28 = OpConstant %4 0 +%30 = OpConstant %6 4 +%41 = OpTypePointer Image %6 +%17 = OpFunction %2 None %18 +%13 = OpLabel +%16 = OpLoad %7 %14 +%19 = OpLoad %3 %9 +%20 = OpLoad %5 %11 +OpBranch %25 +%25 = OpLabel +%27 = OpImageTexelPointer %26 %9 %22 %28 +%29 = OpAtomicUMax %4 %27 %30 %28 %23 +%31 = OpImageTexelPointer %26 %9 %22 %28 +%32 = OpAtomicUMin %4 %31 %30 %28 %23 +%33 = OpImageTexelPointer %26 %9 %22 %28 +%34 = OpAtomicIAdd %4 %33 %30 %28 %23 +%35 = OpImageTexelPointer %26 %9 %22 %28 +%36 = OpAtomicAnd %4 %35 %30 %28 %23 +%37 = OpImageTexelPointer %26 %9 %22 %28 +%38 = OpAtomicOr %4 %37 %30 %28 %23 +%39 = OpImageTexelPointer %26 %9 %22 %28 +%40 = OpAtomicXor %4 %39 %30 %28 %23 +%42 = OpImageTexelPointer %41 %11 %22 %28 +%43 = OpAtomicSMax %6 %42 %30 %28 %24 +%44 = OpImageTexelPointer %41 %11 %22 %28 +%45 = OpAtomicSMin %6 %44 %30 %28 %24 +%46 = OpImageTexelPointer %41 %11 %22 %28 +%47 = OpAtomicIAdd %6 %46 %30 %28 %24 +%48 = OpImageTexelPointer %41 %11 %22 %28 +%49 = OpAtomicAnd %6 %48 %30 %28 %24 +%50 = OpImageTexelPointer %41 %11 %22 %28 +%51 = OpAtomicOr %6 %50 %30 %28 %24 +%52 = OpImageTexelPointer %41 %11 %22 %28 +%53 = OpAtomicXor %6 %52 %30 %28 %24 +OpReturn +OpFunctionEnd \ No newline at end of file diff --git a/naga/tests/out/wgsl/atomicTexture.wgsl b/naga/tests/out/wgsl/atomicTexture.wgsl new file mode 100644 index 0000000000..f5bd7db002 --- /dev/null +++ b/naga/tests/out/wgsl/atomicTexture.wgsl @@ -0,0 +1,21 @@ +@group(0) @binding(0) +var image_u: texture_storage_2d; +@group(0) @binding(1) +var image_s: texture_storage_2d; + +@compute @workgroup_size(2, 1, 1) +fn cs_main(@builtin(local_invocation_id) id: vec3) { + textureAtomicMax(image_u, vec2(0i, 0i), 1u); + textureAtomicMin(image_u, vec2(0i, 0i), 1u); + textureAtomicAdd(image_u, vec2(0i, 0i), 1u); + textureAtomicAnd(image_u, vec2(0i, 0i), 1u); + textureAtomicOr(image_u, vec2(0i, 0i), 1u); + textureAtomicXor(image_u, vec2(0i, 0i), 1u); + textureAtomicMax(image_s, vec2(0i, 0i), 1i); + textureAtomicMin(image_s, vec2(0i, 0i), 1i); + textureAtomicAdd(image_s, vec2(0i, 0i), 1i); + textureAtomicAnd(image_s, vec2(0i, 0i), 1i); + textureAtomicOr(image_s, vec2(0i, 0i), 1i); + textureAtomicXor(image_s, vec2(0i, 0i), 1i); + return; +} diff --git a/naga/tests/snapshots.rs b/naga/tests/snapshots.rs index 72ce323585..93d131b739 100644 --- a/naga/tests/snapshots.rs +++ b/naga/tests/snapshots.rs @@ -788,6 +788,10 @@ fn convert_wgsl() { "atomicOps-int64-min-max", Targets::SPIRV | Targets::METAL | Targets::HLSL | Targets::WGSL, ), + ( + "atomicTexture", + Targets::SPIRV | Targets::METAL | Targets::HLSL | Targets::WGSL, + ), ( "atomicCompareExchange-int64", Targets::SPIRV | Targets::WGSL, diff --git a/tests/tests/image_atomics/image_32_atomics.wgsl b/tests/tests/image_atomics/image_32_atomics.wgsl new file mode 100644 index 0000000000..32ae1bb4ee --- /dev/null +++ b/tests/tests/image_atomics/image_32_atomics.wgsl @@ -0,0 +1,13 @@ +@group(0) @binding(0) +var image: texture_storage_2d; + +@compute +@workgroup_size(4, 4, 1) +fn cs_main(@builtin(local_invocation_id) id: vec3, @builtin(workgroup_id) group_id: vec3) { + let pixel = id + group_id * 4; + textureAtomicMax(image, pixel.xy, u32(pixel.x)); + + storageBarrier(); + + textureAtomicMin(image, pixel.xy, u32(pixel.y)); +} \ No newline at end of file diff --git a/tests/tests/image_atomics/mod.rs b/tests/tests/image_atomics/mod.rs new file mode 100644 index 0000000000..5a8cd1a076 --- /dev/null +++ b/tests/tests/image_atomics/mod.rs @@ -0,0 +1,168 @@ +//! Tests for image atomics. + +use wgpu::ShaderModuleDescriptor; +use wgpu_test::{gpu_test, GpuTestConfiguration, TestParameters, TestingContext}; + +#[gpu_test] +static IMAGE_32_ATOMICS: GpuTestConfiguration = GpuTestConfiguration::new() + .parameters( + TestParameters::default() + .limits(wgt::Limits { + max_storage_textures_per_shader_stage: 1, + max_compute_invocations_per_workgroup: 64, + max_compute_workgroup_size_x: 4, + max_compute_workgroup_size_y: 4, + max_compute_workgroup_size_z: 4, + max_compute_workgroups_per_dimension: wgt::COPY_BYTES_PER_ROW_ALIGNMENT, + ..wgt::Limits::downlevel_webgl2_defaults() + }) + .features( + wgpu::Features::TEXTURE_ATOMIC + | wgpu::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES, + ), + ) + .run_async(|ctx| async move { + test_format( + ctx, + wgpu::TextureFormat::R32Uint, + wgpu::include_wgsl!("image_32_atomics.wgsl"), + ) + .await; + }); + +async fn test_format( + ctx: TestingContext, + format: wgpu::TextureFormat, + desc: ShaderModuleDescriptor<'_>, +) { + let pixel_bytes = format.target_pixel_byte_cost().unwrap(); + let size = wgpu::Extent3d { + width: wgt::COPY_BYTES_PER_ROW_ALIGNMENT, + height: wgt::COPY_BYTES_PER_ROW_ALIGNMENT, + depth_or_array_layers: 1, + }; + let bind_group_layout_entries = vec![wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::COMPUTE, + ty: wgpu::BindingType::StorageTexture { + access: wgpu::StorageTextureAccess::Atomic, + format, + view_dimension: wgpu::TextureViewDimension::D2, + }, + count: None, + }]; + + let bind_group_layout = ctx + .device + .create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + label: None, + entries: &bind_group_layout_entries, + }); + + let pipeline_layout = ctx + .device + .create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { + label: None, + bind_group_layouts: &[&bind_group_layout], + push_constant_ranges: &[], + }); + let shader = ctx.device.create_shader_module(desc); + let pipeline = ctx + .device + .create_compute_pipeline(&wgpu::ComputePipelineDescriptor { + label: Some("image atomics pipeline"), + layout: Some(&pipeline_layout), + module: &shader, + entry_point: Some("cs_main"), + compilation_options: wgpu::PipelineCompilationOptions::default(), + cache: None, + }); + + let tex = ctx.device.create_texture(&wgpu::TextureDescriptor { + label: None, + dimension: wgpu::TextureDimension::D2, + size, + format, + usage: wgpu::TextureUsages::STORAGE_BINDING + | wgpu::TextureUsages::STORAGE_ATOMIC + | wgpu::TextureUsages::COPY_SRC, + mip_level_count: 1, + sample_count: 1, + view_formats: &[], + }); + let view = tex.create_view(&wgpu::TextureViewDescriptor { + format: Some(format), + aspect: wgpu::TextureAspect::All, + ..wgpu::TextureViewDescriptor::default() + }); + let bind_group = ctx.device.create_bind_group(&wgpu::BindGroupDescriptor { + label: None, + layout: &pipeline.get_bind_group_layout(0), + entries: &[wgpu::BindGroupEntry { + binding: 0, + resource: wgpu::BindingResource::TextureView(&view), + }], + }); + + let mut encoder = ctx + .device + .create_command_encoder(&wgpu::CommandEncoderDescriptor::default()); + let mut rpass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor { + label: None, + timestamp_writes: None, + }); + rpass.set_pipeline(&pipeline); + rpass.set_bind_group(0, Some(&bind_group), &[]); + rpass.dispatch_workgroups(size.width, size.height, 1); + drop(rpass); + ctx.queue.submit(Some(encoder.finish())); + + let read_buffer = ctx.device.create_buffer(&wgpu::BufferDescriptor { + label: None, + size: (size.height * size.width * size.depth_or_array_layers * pixel_bytes) as u64, + usage: wgpu::BufferUsages::MAP_READ | wgpu::BufferUsages::COPY_DST, + mapped_at_creation: false, + }); + + let mut encoder = ctx + .device + .create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None }); + + encoder.copy_texture_to_buffer( + wgpu::TexelCopyTextureInfo { + texture: &tex, + mip_level: 0, + origin: wgpu::Origin3d::ZERO, + aspect: wgpu::TextureAspect::All, + }, + wgpu::TexelCopyBufferInfo { + buffer: &read_buffer, + layout: wgpu::TexelCopyBufferLayout { + offset: 0, + bytes_per_row: Some(size.width * pixel_bytes), + rows_per_image: Some(size.height), + }, + }, + size, + ); + + ctx.queue.submit(Some(encoder.finish())); + + let slice = read_buffer.slice(..); + slice.map_async(wgpu::MapMode::Read, |_| ()); + ctx.async_poll(wgpu::Maintain::wait()) + .await + .panic_on_timeout(); + let data: Vec = slice.get_mapped_range().to_vec(); + + assert_eq!(data.len() as u32, size.width * size.height * pixel_bytes); + for (i, long) in data.chunks(pixel_bytes as usize).enumerate() { + let x = (i as u32 % size.width) as u8; + let y = (i as u32 / size.width) as u8; + assert_eq!(long[0], u8::min(x, y), "{i}"); + assert_eq!( + long[1..pixel_bytes as usize], + [0].repeat(pixel_bytes as usize - 1) + ); + } +} diff --git a/tests/tests/root.rs b/tests/tests/root.rs index dac56a9db0..84f1e48f9f 100644 --- a/tests/tests/root.rs +++ b/tests/tests/root.rs @@ -24,6 +24,7 @@ mod dispatch_workgroups_indirect; mod encoder; mod external_texture; mod float32_filterable; +mod image_atomics; mod instance; mod life_cycle; mod mem_leaks; diff --git a/wgpu-core/src/binding_model.rs b/wgpu-core/src/binding_model.rs index c7867ab210..a74cf51857 100644 --- a/wgpu-core/src/binding_model.rs +++ b/wgpu-core/src/binding_model.rs @@ -39,6 +39,8 @@ pub enum BindGroupLayoutEntryError { StorageTextureCube, #[error("Read-write and read-only storage textures are not allowed by webgpu, they require the native only feature TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES")] StorageTextureReadWrite, + #[error("Atomic storage textures are not allowed by webgpu, they require the native only feature TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES")] + StorageTextureAtomic, #[error("Arrays of bindings unsupported for this type of binding")] ArrayUnsupported, #[error("Multisampled binding with sample type `TextureSampleType::Float` must have filterable set to false.")] @@ -185,6 +187,8 @@ pub enum CreateBindGroupError { DepthStencilAspect, #[error("The adapter does not support read access for storage textures of format {0:?}")] StorageReadNotSupported(wgt::TextureFormat), + #[error("The adapter does not support atomics for storage textures of format {0:?}")] + StorageAtomicNotSupported(wgt::TextureFormat), #[error("The adapter does not support write access for storage textures of format {0:?}")] StorageWriteNotSupported(wgt::TextureFormat), #[error("The adapter does not support read-write access for storage textures of format {0:?}")] diff --git a/wgpu-core/src/conv.rs b/wgpu-core/src/conv.rs index a4f967c4c5..27eaff6039 100644 --- a/wgpu-core/src/conv.rs +++ b/wgpu-core/src/conv.rs @@ -145,6 +145,10 @@ pub fn map_texture_usage( hal::TextureUses::DEPTH_STENCIL_READ | hal::TextureUses::DEPTH_STENCIL_WRITE, usage.contains(wgt::TextureUsages::RENDER_ATTACHMENT) && !is_color, ); + u.set( + hal::TextureUses::STORAGE_ATOMIC, + usage.contains(wgt::TextureUsages::STORAGE_ATOMIC), + ); u } @@ -200,6 +204,10 @@ pub fn map_texture_usage_from_hal(uses: hal::TextureUses) -> wgt::TextureUsages wgt::TextureUsages::RENDER_ATTACHMENT, uses.contains(hal::TextureUses::COLOR_TARGET), ); + u.set( + wgt::TextureUsages::STORAGE_ATOMIC, + uses.contains(hal::TextureUses::STORAGE_ATOMIC), + ); u } diff --git a/wgpu-core/src/device/mod.rs b/wgpu-core/src/device/mod.rs index 8b4da3b2ec..bf202702e9 100644 --- a/wgpu-core/src/device/mod.rs +++ b/wgpu-core/src/device/mod.rs @@ -426,6 +426,10 @@ pub fn create_validator( Caps::SHADER_INT64_ATOMIC_ALL_OPS, features.contains(wgt::Features::SHADER_INT64_ATOMIC_ALL_OPS), ); + caps.set( + Caps::TEXTURE_ATOMIC, + features.contains(wgt::Features::TEXTURE_ATOMIC), + ); caps.set( Caps::MULTISAMPLED_SHADING, downlevel.contains(wgt::DownlevelFlags::MULTISAMPLED_SHADING), diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index d41156f0b9..4372f00f7a 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -1760,6 +1760,17 @@ impl Device { _ => (), } match access { + wgt::StorageTextureAccess::Atomic + if !self.features.contains( + wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES + | wgt::Features::TEXTURE_ATOMIC, + ) => + { + return Err(binding_model::CreateBindGroupLayoutError::Entry { + binding: entry.binding, + error: BindGroupLayoutEntryError::StorageTextureAtomic, + }); + } wgt::StorageTextureAccess::ReadOnly | wgt::StorageTextureAccess::ReadWrite if !self.features.contains( @@ -1790,6 +1801,12 @@ impl Device { wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES; WritableStorage::Yes } + wgt::StorageTextureAccess::Atomic => { + required_features |= + wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES + | wgt::Features::TEXTURE_ATOMIC; + WritableStorage::Yes + } }, ) } @@ -2523,6 +2540,17 @@ impl Device { hal::TextureUses::STORAGE_READ_WRITE } + wgt::StorageTextureAccess::Atomic => { + if !view + .format_features + .flags + .contains(wgt::TextureFormatFeatureFlags::STORAGE_ATOMIC) + { + return Err(Error::StorageAtomicNotSupported(view.desc.format)); + } + + hal::TextureUses::STORAGE_ATOMIC + } }; Ok((wgt::TextureUsages::STORAGE_BINDING, internal_use)) } diff --git a/wgpu-core/src/instance.rs b/wgpu-core/src/instance.rs index 07f9d158e6..81af91e7b8 100644 --- a/wgpu-core/src/instance.rs +++ b/wgpu-core/src/instance.rs @@ -538,13 +538,20 @@ impl Adapter { allowed_usages.set( wgt::TextureUsages::STORAGE_BINDING, caps.intersects( - Tfc::STORAGE_WRITE_ONLY | Tfc::STORAGE_READ_ONLY | Tfc::STORAGE_READ_WRITE, + Tfc::STORAGE_WRITE_ONLY + | Tfc::STORAGE_READ_ONLY + | Tfc::STORAGE_READ_WRITE + | Tfc::STORAGE_ATOMIC, ), ); allowed_usages.set( wgt::TextureUsages::RENDER_ATTACHMENT, caps.intersects(Tfc::COLOR_ATTACHMENT | Tfc::DEPTH_STENCIL_ATTACHMENT), ); + allowed_usages.set( + wgt::TextureUsages::STORAGE_ATOMIC, + caps.contains(Tfc::STORAGE_ATOMIC), + ); let mut flags = wgt::TextureFormatFeatureFlags::empty(); flags.set( @@ -560,6 +567,11 @@ impl Adapter { caps.contains(Tfc::STORAGE_READ_WRITE), ); + flags.set( + wgt::TextureFormatFeatureFlags::STORAGE_ATOMIC, + caps.contains(Tfc::STORAGE_ATOMIC), + ); + flags.set( wgt::TextureFormatFeatureFlags::FILTERABLE, caps.contains(Tfc::SAMPLED_LINEAR), diff --git a/wgpu-core/src/validation.rs b/wgpu-core/src/validation.rs index b1c0051902..833188aa5a 100644 --- a/wgpu-core/src/validation.rs +++ b/wgpu-core/src/validation.rs @@ -519,7 +519,14 @@ impl Resource { let naga_access = match access { wgt::StorageTextureAccess::ReadOnly => naga::StorageAccess::LOAD, wgt::StorageTextureAccess::WriteOnly => naga::StorageAccess::STORE, - wgt::StorageTextureAccess::ReadWrite => naga::StorageAccess::all(), + wgt::StorageTextureAccess::ReadWrite => { + naga::StorageAccess::LOAD | naga::StorageAccess::STORE + } + wgt::StorageTextureAccess::Atomic => { + naga::StorageAccess::ATOMIC + | naga::StorageAccess::LOAD + | naga::StorageAccess::STORE + } }; naga::ImageClass::Storage { format: naga_format, @@ -610,11 +617,15 @@ impl Resource { }, naga::ImageClass::Storage { format, access } => BindingType::StorageTexture { access: { - const LOAD_STORE: naga::StorageAccess = naga::StorageAccess::all(); + const LOAD_STORE: naga::StorageAccess = + naga::StorageAccess::LOAD.union(naga::StorageAccess::STORE); match access { naga::StorageAccess::LOAD => wgt::StorageTextureAccess::ReadOnly, naga::StorageAccess::STORE => wgt::StorageTextureAccess::WriteOnly, LOAD_STORE => wgt::StorageTextureAccess::ReadWrite, + _ if access.contains(naga::StorageAccess::ATOMIC) => { + wgt::StorageTextureAccess::Atomic + } _ => unreachable!(), } }, diff --git a/wgpu-hal/src/dx12/adapter.rs b/wgpu-hal/src/dx12/adapter.rs index f081febfdb..e39bf5450d 100644 --- a/wgpu-hal/src/dx12/adapter.rs +++ b/wgpu-hal/src/dx12/adapter.rs @@ -382,6 +382,11 @@ impl super::Adapter { && features1.Int64ShaderOps.as_bool(), ); + features.set( + wgt::Features::TEXTURE_ATOMIC, + shader_model >= naga::back::hlsl::ShaderModel::V5_0, + ); + features.set( wgt::Features::SUBGROUP, shader_model >= naga::back::hlsl::ShaderModel::V6_0 @@ -677,6 +682,12 @@ impl crate::Adapter for super::Adapter { .Support2 .contains(Direct3D12::D3D12_FORMAT_SUPPORT2_UAV_TYPED_LOAD), ); + caps.set( + Tfc::STORAGE_ATOMIC, + data_srv_uav + .Support2 + .contains(Direct3D12::D3D12_FORMAT_SUPPORT2_UAV_ATOMIC_UNSIGNED_MIN_OR_MAX), + ); caps.set( Tfc::STORAGE_WRITE_ONLY, data_srv_uav diff --git a/wgpu-hal/src/gles/adapter.rs b/wgpu-hal/src/gles/adapter.rs index a95511908e..60e3d0fb35 100644 --- a/wgpu-hal/src/gles/adapter.rs +++ b/wgpu-hal/src/gles/adapter.rs @@ -1080,6 +1080,8 @@ impl crate::Adapter for super::Adapter { let texture_float_linear = feature_fn(wgt::Features::FLOAT32_FILTERABLE, filterable); + let image_atomic = feature_fn(wgt::Features::TEXTURE_ATOMIC, Tfc::STORAGE_ATOMIC); + match format { Tf::R8Unorm => filterable_renderable, Tf::R8Snorm => filterable, @@ -1094,8 +1096,8 @@ impl crate::Adapter for super::Adapter { Tf::Rg8Snorm => filterable, Tf::Rg8Uint => renderable, Tf::Rg8Sint => renderable, - Tf::R32Uint => renderable | storage, - Tf::R32Sint => renderable | storage, + Tf::R32Uint => renderable | storage | image_atomic, + Tf::R32Sint => renderable | storage | image_atomic, Tf::R32Float => unfilterable | storage | float_renderable | texture_float_linear, Tf::Rg16Uint => renderable, Tf::Rg16Sint => renderable, diff --git a/wgpu-hal/src/gles/conv.rs b/wgpu-hal/src/gles/conv.rs index 3a6d5ebb2e..59bcf43b83 100644 --- a/wgpu-hal/src/gles/conv.rs +++ b/wgpu-hal/src/gles/conv.rs @@ -404,6 +404,7 @@ pub(super) fn map_storage_access(access: wgt::StorageTextureAccess) -> u32 { wgt::StorageTextureAccess::ReadOnly => glow::READ_ONLY, wgt::StorageTextureAccess::WriteOnly => glow::WRITE_ONLY, wgt::StorageTextureAccess::ReadWrite => glow::READ_WRITE, + wgt::StorageTextureAccess::Atomic => glow::READ_WRITE, } } diff --git a/wgpu-hal/src/lib.rs b/wgpu-hal/src/lib.rs index dd86a2f621..9924d1f5de 100644 --- a/wgpu-hal/src/lib.rs +++ b/wgpu-hal/src/lib.rs @@ -1723,20 +1723,22 @@ bitflags::bitflags! { const STORAGE_WRITE_ONLY = 1 << 9; /// Read-write storage texture usage. const STORAGE_READ_WRITE = 1 << 10; + /// Image atomic enabled storage + const STORAGE_ATOMIC = 1 << 11; /// The combination of states that a texture may be in _at the same time_. const INCLUSIVE = Self::COPY_SRC.bits() | Self::RESOURCE.bits() | Self::DEPTH_STENCIL_READ.bits(); /// The combination of states that a texture must exclusively be in. - const EXCLUSIVE = Self::COPY_DST.bits() | Self::COLOR_TARGET.bits() | Self::DEPTH_STENCIL_WRITE.bits() | Self::STORAGE_READ_ONLY.bits() | Self::STORAGE_WRITE_ONLY.bits() | Self::STORAGE_READ_WRITE.bits() | Self::PRESENT.bits(); + const EXCLUSIVE = Self::COPY_DST.bits() | Self::COLOR_TARGET.bits() | Self::DEPTH_STENCIL_WRITE.bits() | Self::STORAGE_READ_ONLY.bits() | Self::STORAGE_WRITE_ONLY.bits() | Self::STORAGE_READ_WRITE.bits() | Self::STORAGE_ATOMIC.bits() | Self::PRESENT.bits(); /// The combination of all usages that the are guaranteed to be be ordered by the hardware. /// If a usage is ordered, then if the texture state doesn't change between draw calls, there /// are no barriers needed for synchronization. const ORDERED = Self::INCLUSIVE.bits() | Self::COLOR_TARGET.bits() | Self::DEPTH_STENCIL_WRITE.bits() | Self::STORAGE_READ_ONLY.bits(); /// Flag used by the wgpu-core texture tracker to say a texture is in different states for every sub-resource - const COMPLEX = 1 << 11; + const COMPLEX = 1 << 12; /// Flag used by the wgpu-core texture tracker to say that the tracker does not know the state of the sub-resource. /// This is different from UNINITIALIZED as that says the tracker does know, but the texture has not been initialized. - const UNKNOWN = 1 << 12; + const UNKNOWN = 1 << 13; } } diff --git a/wgpu-hal/src/metal/adapter.rs b/wgpu-hal/src/metal/adapter.rs index d343d8881a..1315d98182 100644 --- a/wgpu-hal/src/metal/adapter.rs +++ b/wgpu-hal/src/metal/adapter.rs @@ -109,6 +109,12 @@ impl crate::Adapter for super::Adapter { ], ); + let image_atomic_if = if pc.msl_version >= MTLLanguageVersion::V3_1 { + Tfc::STORAGE_ATOMIC + } else { + Tfc::empty() + }; + // Metal defined pixel format capabilities let all_caps = Tfc::SAMPLED_LINEAR | Tfc::STORAGE_WRITE_ONLY @@ -154,7 +160,11 @@ impl crate::Adapter for super::Adapter { Tfc::STORAGE_WRITE_ONLY | Tfc::COLOR_ATTACHMENT | msaa_count } Tf::R32Uint | Tf::R32Sint => { - read_write_tier1_if | Tfc::STORAGE_WRITE_ONLY | Tfc::COLOR_ATTACHMENT | msaa_count + read_write_tier1_if + | Tfc::STORAGE_WRITE_ONLY + | Tfc::COLOR_ATTACHMENT + | msaa_count + | image_atomic_if } Tf::R32Float => { let flags = if pc.format_r32float_all { @@ -925,6 +935,10 @@ impl super::PrivateCapabilities { F::SHADER_INT64_ATOMIC_MIN_MAX, self.int64_atomics && self.msl_version >= MTLLanguageVersion::V2_4, ); + features.set( + F::TEXTURE_ATOMIC, + self.msl_version >= MTLLanguageVersion::V3_1, + ); features.set( F::ADDRESS_MODE_CLAMP_TO_BORDER, diff --git a/wgpu-hal/src/metal/conv.rs b/wgpu-hal/src/metal/conv.rs index f56141d5a7..0b39d3016d 100644 --- a/wgpu-hal/src/metal/conv.rs +++ b/wgpu-hal/src/metal/conv.rs @@ -27,6 +27,11 @@ pub fn map_texture_usage( format.is_combined_depth_stencil_format(), ); + mtl_usage.set( + metal::MTLTextureUsage::ShaderAtomic, + usage.intersects(Tu::STORAGE_ATOMIC), + ); + mtl_usage } diff --git a/wgpu-hal/src/metal/device.rs b/wgpu-hal/src/metal/device.rs index c281317099..577f5a7fcc 100644 --- a/wgpu-hal/src/metal/device.rs +++ b/wgpu-hal/src/metal/device.rs @@ -708,6 +708,7 @@ impl crate::Device for super::Device { wgt::StorageTextureAccess::ReadOnly => false, wgt::StorageTextureAccess::WriteOnly => true, wgt::StorageTextureAccess::ReadWrite => true, + wgt::StorageTextureAccess::Atomic => true, }; } wgt::BindingType::AccelerationStructure => unimplemented!(), diff --git a/wgpu-hal/src/vulkan/adapter.rs b/wgpu-hal/src/vulkan/adapter.rs index 0b6bd1e4a4..2bb68e1012 100644 --- a/wgpu-hal/src/vulkan/adapter.rs +++ b/wgpu-hal/src/vulkan/adapter.rs @@ -478,7 +478,8 @@ impl PhysicalDeviceFeatures { | F::TIMESTAMP_QUERY_INSIDE_PASSES | F::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES | F::CLEAR_TEXTURE - | F::PIPELINE_CACHE; + | F::PIPELINE_CACHE + | F::TEXTURE_ATOMIC; let mut dl_flags = Df::COMPUTE_SHADERS | Df::BASE_VERTEX @@ -2115,7 +2116,10 @@ impl crate::Adapter for super::Adapter { // features.contains(vk::FormatFeatureFlags::SAMPLED_IMAGE_FILTER_MINMAX), // ); flags.set( - Tfc::STORAGE_READ_WRITE | Tfc::STORAGE_WRITE_ONLY | Tfc::STORAGE_READ_ONLY, + Tfc::STORAGE_READ_WRITE + | Tfc::STORAGE_WRITE_ONLY + | Tfc::STORAGE_READ_ONLY + | Tfc::STORAGE_ATOMIC, features.contains(vk::FormatFeatureFlags::STORAGE_IMAGE), ); flags.set( @@ -2142,6 +2146,10 @@ impl crate::Adapter for super::Adapter { Tfc::COPY_DST, features.intersects(vk::FormatFeatureFlags::TRANSFER_DST), ); + flags.set( + Tfc::STORAGE_ATOMIC, + features.intersects(vk::FormatFeatureFlags::STORAGE_IMAGE_ATOMIC), + ); // Vulkan is very permissive about MSAA flags.set(Tfc::MULTISAMPLE_RESOLVE, !format.is_compressed()); diff --git a/wgpu-hal/src/vulkan/conv.rs b/wgpu-hal/src/vulkan/conv.rs index 21ebd6c7b5..75dd6905df 100644 --- a/wgpu-hal/src/vulkan/conv.rs +++ b/wgpu-hal/src/vulkan/conv.rs @@ -266,7 +266,8 @@ pub fn map_texture_usage(usage: crate::TextureUses) -> vk::ImageUsageFlags { if usage.intersects( crate::TextureUses::STORAGE_READ_ONLY | crate::TextureUses::STORAGE_WRITE_ONLY - | crate::TextureUses::STORAGE_READ_WRITE, + | crate::TextureUses::STORAGE_READ_WRITE + | crate::TextureUses::STORAGE_ATOMIC, ) { flags |= vk::ImageUsageFlags::STORAGE; } @@ -309,15 +310,19 @@ pub fn map_texture_usage_to_barrier( access |= vk::AccessFlags::DEPTH_STENCIL_ATTACHMENT_READ | vk::AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE; } - if usage - .intersects(crate::TextureUses::STORAGE_READ_ONLY | crate::TextureUses::STORAGE_READ_WRITE) - { + if usage.intersects( + crate::TextureUses::STORAGE_READ_ONLY + | crate::TextureUses::STORAGE_READ_WRITE + | crate::TextureUses::STORAGE_ATOMIC, + ) { stages |= shader_stages; access |= vk::AccessFlags::SHADER_READ; } - if usage - .intersects(crate::TextureUses::STORAGE_WRITE_ONLY | crate::TextureUses::STORAGE_READ_WRITE) - { + if usage.intersects( + crate::TextureUses::STORAGE_WRITE_ONLY + | crate::TextureUses::STORAGE_READ_WRITE + | crate::TextureUses::STORAGE_ATOMIC, + ) { stages |= shader_stages; access |= vk::AccessFlags::SHADER_WRITE; } @@ -352,7 +357,8 @@ pub fn map_vk_image_usage(usage: vk::ImageUsageFlags) -> crate::TextureUses { if usage.contains(vk::ImageUsageFlags::STORAGE) { bits |= crate::TextureUses::STORAGE_READ_ONLY | crate::TextureUses::STORAGE_WRITE_ONLY - | crate::TextureUses::STORAGE_READ_WRITE; + | crate::TextureUses::STORAGE_READ_WRITE + | crate::TextureUses::STORAGE_ATOMIC; } bits } diff --git a/wgpu-types/src/lib.rs b/wgpu-types/src/lib.rs index a366681ddf..0b44462189 100644 --- a/wgpu-types/src/lib.rs +++ b/wgpu-types/src/lib.rs @@ -781,6 +781,15 @@ bitflags::bitflags! { /// /// This is a native only feature. const VERTEX_ATTRIBUTE_64BIT = 1 << 45; + /// Enables image atomic fetch add, and, xor, or, min, and max for R32Uint and R32Sint textures. + /// + /// Supported platforms: + /// - Vulkan + /// - DX12 (with SM 5+) + /// - Metal (with MSL 3.1+) + /// + /// This is a native only feature. + const TEXTURE_ATOMIC = 1 << 46; /// Allows for creation of textures of format [`TextureFormat::NV12`] /// /// Supported platforms: @@ -2376,8 +2385,11 @@ bitflags::bitflags! { /// When used as a STORAGE texture, then a texture with this format can be bound with /// [`StorageTextureAccess::ReadWrite`]. const STORAGE_READ_WRITE = 1 << 8; + /// When used as a STORAGE texture, then a texture with this format can be bound with + /// [`StorageTextureAccess::Atomic`]. + const STORAGE_ATOMIC = 1 << 9; /// If not present, the texture can't be blended into the render target. - const BLENDABLE = 1 << 9; + const BLENDABLE = 1 << 10; } } @@ -3410,7 +3422,8 @@ impl TextureFormat { let attachment = basic | TextureUsages::RENDER_ATTACHMENT; let storage = basic | TextureUsages::STORAGE_BINDING; let binding = TextureUsages::TEXTURE_BINDING; - let all_flags = TextureUsages::all(); + let all_flags = attachment | storage | binding; + let atomic = all_flags | TextureUsages::STORAGE_ATOMIC; let rg11b10f = if device_features.contains(Features::RG11B10UFLOAT_RENDERABLE) { attachment } else { @@ -3441,8 +3454,8 @@ impl TextureFormat { Self::Rg8Snorm => ( none, basic), Self::Rg8Uint => ( msaa, attachment), Self::Rg8Sint => ( msaa, attachment), - Self::R32Uint => ( s_all, all_flags), - Self::R32Sint => ( s_all, all_flags), + Self::R32Uint => ( s_all, atomic), + Self::R32Sint => ( s_all, atomic), Self::R32Float => (msaa | s_all, all_flags), Self::Rg16Uint => ( msaa, attachment), Self::Rg16Sint => ( msaa, attachment), @@ -5508,6 +5521,11 @@ bitflags::bitflags! { #[cfg_attr(feature = "serde", serde(transparent))] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct TextureUsages: u32 { + // + // ---- Start numbering at 1 << 0 ---- + // + // WebGPU features: + // /// Allows a texture to be the source in a [`CommandEncoder::copy_texture_to_buffer`] or /// [`CommandEncoder::copy_texture_to_texture`] operation. const COPY_SRC = 1 << 0; @@ -5520,6 +5538,14 @@ bitflags::bitflags! { const STORAGE_BINDING = 1 << 3; /// Allows a texture to be an output attachment of a render pass. const RENDER_ATTACHMENT = 1 << 4; + + // + // ---- Restart Numbering for Native Features --- + // + // Native Features: + // + /// Allows a texture to be used with image atomics. Requires [`Features::TEXTURE_ATOMIC`]. + const STORAGE_ATOMIC = 1 << 16; } } @@ -6701,6 +6727,18 @@ pub enum StorageTextureAccess { /// layout(set=0, binding=0, r32f) uniform image2D myStorageImage; /// ``` ReadWrite, + /// The texture can be both read and written in the shader via atomics and must be annotated + /// with `read_write` in WGSL. + /// + /// [`Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES`] must be enabled to use this access + /// mode. This is a nonstandard, native-only extension. + /// + /// Example WGSL syntax: + /// ```rust,ignore + /// @group(0) @binding(0) + /// var my_storage_image: texture_storage_2d; + /// ``` + Atomic, } /// Specific type of a sampler binding. diff --git a/wgpu/src/backend/webgpu.rs b/wgpu/src/backend/webgpu.rs index 789d2f22cd..9797ad66b5 100644 --- a/wgpu/src/backend/webgpu.rs +++ b/wgpu/src/backend/webgpu.rs @@ -1885,6 +1885,10 @@ impl dispatch::DeviceInterface for WebDevice { wgt::StorageTextureAccess::ReadWrite => { webgpu_sys::GpuStorageTextureAccess::ReadWrite } + wgt::StorageTextureAccess::Atomic => { + // Validated out by `BindGroupLayoutEntryError::StorageTextureAtomic` + unreachable!() + } }; let storage_texture = webgpu_sys::GpuStorageTextureBindingLayout::new( map_texture_format(format),