diff --git a/source/slang/glsl.meta.slang b/source/slang/glsl.meta.slang index 7f2ca0aa20..b3df78c7af 100644 --- a/source/slang/glsl.meta.slang +++ b/source/slang/glsl.meta.slang @@ -4578,13 +4578,13 @@ __spirv_version(1.3) [require(spirv)] //// GLSL atomic -// this is actaully just a fancy storage buffer, and will be -// converted into one as such +// The following type internally is a Shader Storage Buffer +// as per GL_EXT_vulkan_glsl_relaxed __magic_type(GLSLAtomicUintType) __intrinsic_type($(kIROp_GLSLAtomicUintType)) public struct atomic_uint{}; -// teir of float refers to atomic extension support of float1 or float2. +// tier of float refers to atomic extension support of float1 or float2. // if we are inside a atomic_float function we will run the check for float1 teir // types and operations to enable the according ext needed for these operations @@ -4767,7 +4767,8 @@ struct TypeClassSubclass { const char* classType; const char *subclassType; const char *suffix; -} atomics[] = +}; +const TypeClassSubclass atomics[6] = { { "uint", "I", "U", "", @@ -4968,7 +4969,7 @@ __glsl_version(430) [require(glsl)] } __glsl_version(430) [require(glsl)] -[ForceInline] public uint atomicCounterDecrement_GLSL_wrapper(atomic_uint c) +[ForceInline] public uint atomicCounterDecrement_GLSL_helper(atomic_uint c) { __target_switch { @@ -5007,7 +5008,7 @@ __glsl_version(430) [require(glsl)] { case glsl: { - atomicCounterDecrement_GLSL_wrapper(c); + atomicCounterDecrement_GLSL_helper(c); return atomicCounter(c); } case spirv: @@ -5177,4 +5178,4 @@ __glsl_version(430) [require(glsl)] }; } } -} \ No newline at end of file +} diff --git a/source/slang/slang-parser.cpp b/source/slang/slang-parser.cpp index f979857a78..6644fb27f6 100644 --- a/source/slang/slang-parser.cpp +++ b/source/slang/slang-parser.cpp @@ -237,10 +237,15 @@ namespace Slang } const int64_t getNextBindingOffset(int binding) { - const int64_t currentOffset = bindingToByteOffset.getOrAddValue(binding, 0); + int64_t currentOffset; + if (bindingToByteOffset.addIfNotExists(binding, 0)) + currentOffset = 0; + else + currentOffset = bindingToByteOffset.getValue(binding) + sizeof(uint32_t); + bindingToByteOffset.set( binding, - currentOffset+4 + currentOffset + sizeof(uint32_t) ); return currentOffset; } @@ -4389,35 +4394,33 @@ namespace Slang { // we intentionally have findModifier twice since GLSLOffsetLayoutAttribute is rare // better to early leave - if (auto bindingMod = modifiers->findModifier()) - { - if (auto varDeclBase = as(decl)) - { - if (auto declRefExpr = as(varDeclBase->type.exp)) + auto bindingMod = modifiers->findModifier(); + if (!bindingMod) return; + auto varDeclBase = as(decl); + if (!varDeclBase) return; + auto declRefExpr = as(varDeclBase->type.exp); + if (!declRefExpr) return; + + // here is a problem; we link types into a literal in IR stage post parse + // but, order (top down) mattter when parsing atomic_uint offset + // more over, we can have patterns like: offset = 20, no offset [+4], offset = 16. + // Therefore we must parse all in order. The issue then is we will struggle to + // subsitute atomic_uint for storage buffers... + if (auto name = declRefExpr->name) + { + if (name->text.equals("atomic_uint")) + { + if (!modifiers->findModifier()) { - // here is a problem; we link types into a literal in IR stage post parse - // but, order (top down) mattter when parsing atomic_uint offset - // more over, we can have patterns like: offset = 20, no offset [+4], offset = 16. - // Therefore we must parse all in order. The issue then is will struggle to - // subsitute atomic_uint for storage buffers... - if (auto name = declRefExpr->name) - { - if (name->text.equals("atomic_uint")) - { - if (!modifiers->findModifier()) - { - const int64_t nextOffset = parser->getNextBindingOffset(bindingMod->binding); - GLSLOffsetLayoutAttribute* modifier = parser->astBuilder->create(); - modifier->keywordName = NULL; //no keyword name given - modifier->loc = bindingMod->loc; //has no location in file, set to parent binding - modifier->offset = nextOffset; - - Modifiers newModifier; - newModifier.first = modifier; - _addModifiers(decl, newModifier); - } - } - } + const int64_t nextOffset = parser->getNextBindingOffset(bindingMod->binding); + GLSLOffsetLayoutAttribute* modifier = parser->astBuilder->create(); + modifier->keywordName = NULL; //no keyword name given + modifier->loc = bindingMod->loc; //has no location in file, set to parent binding + modifier->offset = nextOffset; + + Modifiers newModifier; + newModifier.first = modifier; + _addModifiers(decl, newModifier); } } } diff --git a/tests/glsl-intrinsic/atomic/atomicCounterTestMultiple.slang b/tests/glsl-intrinsic/atomic/atomicCounterTestMultiple.slang new file mode 100644 index 0000000000..71b2579a23 --- /dev/null +++ b/tests/glsl-intrinsic/atomic/atomicCounterTestMultiple.slang @@ -0,0 +1,33 @@ +//DIAGNOSTIC_TEST:SIMPLE(filecheck=CHECK_GLSL): -allow-glsl -stage compute -entry computeMain -target glsl -DTARGET_GLSL +#version 430 + +//TEST_INPUT:ubuffer(data=[0], stride=4):out,name=outputBuffer +buffer MyBlockName +{ + uint data[1]; +} outputBuffer; + +layout(binding = 1, offset = 12) uniform atomic_uint one; +layout(binding = 1) uniform atomic_uint two; +layout(binding = 1, offset = 4) uniform atomic_uint three; +layout(binding = 1) uniform atomic_uint four; +layout(binding = 2) uniform atomic_uint five; + +void computeMain() +{ + + outputBuffer.data[0] = true +// CHECK_GLSL-DAG: one_0._data_0[3] + && atomicCounter(one) == 0 +// CHECK_GLSL-DAG: one_0._data_0[4] + && atomicCounter(two) == 0 +// CHECK_GLSL-DAG: one_0._data_0[1] + && atomicCounter(three) == 0 +// CHECK_GLSL-DAG: one_0._data_0[2] + && atomicCounter(four) == 0 +// CHECK_GLSL-DAG: five_0._data_1[0] + && atomicCounter(five) == 0 + + ; + +} diff --git a/tests/glsl-intrinsic/atomic/atomicStorageBuffer.slang b/tests/glsl-intrinsic/atomic/atomicStorageBuffer.slang index 146e32f55f..5e00e1ec8a 100644 --- a/tests/glsl-intrinsic/atomic/atomicStorageBuffer.slang +++ b/tests/glsl-intrinsic/atomic/atomicStorageBuffer.slang @@ -57,234 +57,306 @@ buffer MyBlockName7 double data[1]; } float64Buffer; +// added to tests `out TYPE data` due to Slang bug +bool i32_init(int val, out int data) +{ + data = val; + return true; +} +bool i32_expect(int val) +{ + return int32Buffer.data[0] == val; +} bool testAtomicInt32() { return true - && (int32Buffer.data[0] = 5) == 5 + + && i32_init(5, int32Buffer.data[0]) && atomicAdd(int32Buffer.data[0], 1) == 5 - && int32Buffer.data[0] == 6 + && i32_expect(6) - && (int32Buffer.data[0] = 5) == 5 + && i32_init(5, int32Buffer.data[0]) && atomicMin(int32Buffer.data[0], 1) == 5 - && int32Buffer.data[0] == 1 + && i32_expect(1) - && (int32Buffer.data[0] = 5) == 5 + && i32_init(5, int32Buffer.data[0]) && atomicMax(int32Buffer.data[0], 1) == 5 - && int32Buffer.data[0] == 5 + && i32_expect(5) - && (int32Buffer.data[0] = 5) == 5 + && i32_init(5, int32Buffer.data[0]) && atomicExchange(int32Buffer.data[0], 2) == 5 - && int32Buffer.data[0] == 2 + && i32_expect(2) - && (int32Buffer.data[0] = 5) == 5 + && i32_init(5, int32Buffer.data[0]) && atomicAnd(int32Buffer.data[0], 1) == 5 - && int32Buffer.data[0] == 1 + && i32_expect(1) - && (int32Buffer.data[0] = 5) == 5 + && i32_init(5, int32Buffer.data[0]) && atomicOr(int32Buffer.data[0], 2) == 5 - && int32Buffer.data[0] == 7 + && i32_expect(7) - && (int32Buffer.data[0] = 5) == 5 + && i32_init(5, int32Buffer.data[0]) && atomicXor(int32Buffer.data[0], 3) == 5 - && int32Buffer.data[0] == 6 + && i32_expect(6) - && (int32Buffer.data[0] = 5) == 5 + && i32_init(5, int32Buffer.data[0]) && atomicCompSwap(int32Buffer.data[0], 5, 2) == 5 - && int32Buffer.data[0] == 2 + && i32_expect(2) - && (int32Buffer.data[0] = 5) == 5 + && i32_init(5, int32Buffer.data[0]) && atomicCompSwap(int32Buffer.data[0], 4, 2) == 5 - && int32Buffer.data[0] == 5 + && i32_expect(5) ; } + +bool i64_init(int64_t val, out int64_t data) +{ + data = val; + return true; +} +bool i64_expect(int64_t val) +{ + return int64Buffer.data[0] == val; +} bool testAtomicInt64() { return true - && (int64Buffer.data[0] = 5) == 5 + + && i64_init(5, int64Buffer.data[0]) && atomicAdd(int64Buffer.data[0], 1) == 5 - && int64Buffer.data[0] == 6 + && i64_expect(6) - && (int64Buffer.data[0] = 5) == 5 + && i64_init(5, int64Buffer.data[0]) && atomicMin(int64Buffer.data[0], 1) == 5 - && int64Buffer.data[0] == 1 + && i64_expect(1) - && (int64Buffer.data[0] = 5) == 5 + && i64_init(5, int64Buffer.data[0]) && atomicMax(int64Buffer.data[0], 1) == 5 - && int64Buffer.data[0] == 5 + && i64_expect(5) - && (int64Buffer.data[0] = 5) == 5 + && i64_init(5, int64Buffer.data[0]) && atomicExchange(int64Buffer.data[0], 2) == 5 - && int64Buffer.data[0] == 2 + && i64_expect(2) - && (int64Buffer.data[0] = 5) == 5 + && i64_init(5, int64Buffer.data[0]) && atomicAnd(int64Buffer.data[0], 1) == 5 - && int64Buffer.data[0] == 1 + && i64_expect(1) - && (int64Buffer.data[0] = 5) == 5 + && i64_init(5, int64Buffer.data[0]) && atomicOr(int64Buffer.data[0], 2) == 5 - && int64Buffer.data[0] == 7 + && i64_expect(7) - && (int64Buffer.data[0] = 5) == 5 + && i64_init(5, int64Buffer.data[0]) && atomicXor(int64Buffer.data[0], 3) == 5 - && int64Buffer.data[0] == 6 + && i64_expect(6) - && (int64Buffer.data[0] = 5) == 5 + && i64_init(5, int64Buffer.data[0]) && atomicCompSwap(int64Buffer.data[0], 5, 2) == 5 - && int64Buffer.data[0] == 2 + && i64_expect(2) - && (int64Buffer.data[0] = 5) == 5 + && i64_init(5, int64Buffer.data[0]) && atomicCompSwap(int64Buffer.data[0], 4, 2) == 5 - && int64Buffer.data[0] == 5 + && i64_expect(5) ; } + +bool u32_init(uint val, out uint data) +{ + data = val; + return true; +} +bool u32_expect(uint val) +{ + return uint32Buffer.data[0] == val; +} bool testAtomicUint32() { return true - && (uint32Buffer.data[0] = 5) == 5 + + && u32_init(5, uint32Buffer.data[0]) && atomicAdd(uint32Buffer.data[0], 1) == 5 - && uint32Buffer.data[0] == 6 + && u32_expect(6) - && (uint32Buffer.data[0] = 5) == 5 + && u32_init(5, uint32Buffer.data[0]) && atomicMin(uint32Buffer.data[0], 1) == 5 - && uint32Buffer.data[0] == 1 + && u32_expect(1) - && (uint32Buffer.data[0] = 5) == 5 + && u32_init(5, uint32Buffer.data[0]) && atomicMax(uint32Buffer.data[0], 1) == 5 - && uint32Buffer.data[0] == 5 + && u32_expect(5) - && (uint32Buffer.data[0] = 5) == 5 + && u32_init(5, uint32Buffer.data[0]) && atomicExchange(uint32Buffer.data[0], 2) == 5 - && uint32Buffer.data[0] == 2 + && u32_expect(2) - && (uint32Buffer.data[0] = 5) == 5 + && u32_init(5, uint32Buffer.data[0]) && atomicAnd(uint32Buffer.data[0], 1) == 5 - && uint32Buffer.data[0] == 1 + && u32_expect(1) - && (uint32Buffer.data[0] = 5) == 5 + && u32_init(5, uint32Buffer.data[0]) && atomicOr(uint32Buffer.data[0], 2) == 5 - && uint32Buffer.data[0] == 7 + && u32_expect(7) - && (uint32Buffer.data[0] = 5) == 5 + && u32_init(5, uint32Buffer.data[0]) && atomicXor(uint32Buffer.data[0], 3) == 5 - && uint32Buffer.data[0] == 6 + && u32_expect(6) - && (uint32Buffer.data[0] = 5) == 5 + && u32_init(5, uint32Buffer.data[0]) && atomicCompSwap(uint32Buffer.data[0], 5, 2) == 5 - && uint32Buffer.data[0] == 2 + && u32_expect(2) - && (uint32Buffer.data[0] = 5) == 5 + && u32_init(5, uint32Buffer.data[0]) && atomicCompSwap(uint32Buffer.data[0], 4, 2) == 5 - && uint32Buffer.data[0] == 5 + && u32_expect(5) ; } + +bool u64_init(uint64_t val, out uint64_t data) +{ + data = val; + return true; +} +bool u64_expect(uint64_t val) +{ + return uint64Buffer.data[0] == val; +} bool testAtomicUint64() { return true - && (uint64Buffer.data[0] = 5) == 5 + + && u64_init(5, uint64Buffer.data[0]) && atomicAdd(uint64Buffer.data[0], 1) == 5 - && uint64Buffer.data[0] == 6 + && u64_expect(6) - && (uint64Buffer.data[0] = 5) == 5 + && u64_init(5, uint64Buffer.data[0]) && atomicMin(uint64Buffer.data[0], 1) == 5 - && uint64Buffer.data[0] == 1 + && u64_expect(1) - && (uint64Buffer.data[0] = 5) == 5 + && u64_init(5, uint64Buffer.data[0]) && atomicMax(uint64Buffer.data[0], 1) == 5 - && uint64Buffer.data[0] == 5 + && u64_expect(5) - && (uint64Buffer.data[0] = 5) == 5 + && u64_init(5, uint64Buffer.data[0]) && atomicExchange(uint64Buffer.data[0], 2) == 5 - && uint64Buffer.data[0] == 2 + && u64_expect(2) - && (uint64Buffer.data[0] = 5) == 5 + && u64_init(5, uint64Buffer.data[0]) && atomicAnd(uint64Buffer.data[0], 1) == 5 - && uint64Buffer.data[0] == 1 + && u64_expect(1) - && (uint64Buffer.data[0] = 5) == 5 + && u64_init(5, uint64Buffer.data[0]) && atomicOr(uint64Buffer.data[0], 2) == 5 - && uint64Buffer.data[0] == 7 + && u64_expect(7) - && (uint64Buffer.data[0] = 5) == 5 + && u64_init(5, uint64Buffer.data[0]) && atomicXor(uint64Buffer.data[0], 3) == 5 - && uint64Buffer.data[0] == 6 + && u64_expect(6) - && (uint64Buffer.data[0] = 5) == 5 + && u64_init(5, uint64Buffer.data[0]) && atomicCompSwap(uint64Buffer.data[0], 5, 2) == 5 - && uint64Buffer.data[0] == 2 + && u64_expect(2) - && (uint64Buffer.data[0] = 5) == 5 + && u64_init(5, uint64Buffer.data[0]) && atomicCompSwap(uint64Buffer.data[0], 4, 2) == 5 - && uint64Buffer.data[0] == 5 + && u64_expect(5) ; } +bool f16_init(half val, out half data) +{ + data = val; + return true; +} +bool f16_expect(half val) +{ + return float16Buffer.data[0] == val; +} bool testAtomicFloat16() { return true + #ifdef TEST_when_shader_atomic_float2_is_available - && (float16Buffer.data[0] = half(5)) == half(5) + && f16_init(5, float16Buffer.data[0]) && atomicAdd(float16Buffer.data[0], half(1)) == half(5) - && float16Buffer.data[0] == half(6) + && f16_expect(6) - && (float16Buffer.data[0] = half(5)) == half(5) + && f16_init(5, float16Buffer.data[0]) && atomicMin(float16Buffer.data[0], half(1)) == half(5) - && float16Buffer.data[0] == half(1) + && f16_expect(1) - && (float16Buffer.data[0] = half(5)) == half(5) + && f16_init(5, float16Buffer.data[0]) && atomicMax(float16Buffer.data[0], half(1)) == half(5) - && float16Buffer.data[0] == half(5) + && f16_expect(5) - && (float16Buffer.data[0] = half(5)) == half(5) + && f16_init(5, float16Buffer.data[0]) && atomicExchange(float16Buffer.data[0], half(2)) == half(5) - && float16Buffer.data[0] == half(2) + && f16_expect(2) #endif // TEST_when_shader_atomic_float2_is_available ; } +bool f32_init(float val, out float data) +{ + data = val; + return true; +} +bool f32_expect(float val) +{ + return float32Buffer.data[0] == val; +} bool testAtomicFloat32() { return true - && (float32Buffer.data[0] = float(5)) == float(5) + && f32_init(5, float32Buffer.data[0]) && atomicAdd(float32Buffer.data[0], float(1)) == float(5) - && float32Buffer.data[0] == float(6) + && f32_expect(6) #ifdef TEST_when_shader_atomic_float2_is_available - && (float32Buffer.data[0] = float(5)) == float(5) + && f32_init(5, float32Buffer.data[0]) && atomicMin(float32Buffer.data[0], float(1)) == float(5) - && float32Buffer.data[0] == float(1) + && f32_expect(1) - && (float32Buffer.data[0] = float(5)) == float(5) + && f32_init(5, float32Buffer.data[0]) && atomicMax(float32Buffer.data[0], float(1)) == float(5) - && float32Buffer.data[0] == float(5) + && f32_expect(5) - && (float32Buffer.data[0] = float(5)) == float(5) + && f32_init(5, float32Buffer.data[0]) && atomicExchange(float32Buffer.data[0], float(2)) == float(5) - && float32Buffer.data[0] == float(2) + && f32_expect(2) #endif // TEST_when_shader_atomic_float2_is_available ; } +bool f64_init(double val, out double data) +{ + data = val; + return true; +} +bool f64_expect(double val) +{ + return float64Buffer.data[0] == val; +} bool testAtomicFloat64() { return true - && (float64Buffer.data[0] = double(5)) == double(5) + && f64_init(5, float64Buffer.data[0]) && atomicAdd(float64Buffer.data[0], double(1)) == double(5) - && float64Buffer.data[0] == double(6) + && f64_expect(6) #ifdef TEST_when_shader_atomic_float2_is_available - && (float64Buffer.data[0] = double(5)) == double(5) + && f64_init(5, float64Buffer.data[0]) && atomicMin(float64Buffer.data[0], double(1)) == double(5) - && float64Buffer.data[0] == double(1) + && f64_expect(1) - && (float64Buffer.data[0] = double(5)) == double(5) + && f64_init(5, float64Buffer.data[0]) && atomicMax(float64Buffer.data[0], double(1)) == double(5) - && float64Buffer.data[0] == double(5) + && f64_expect(5) - && (float64Buffer.data[0] = double(5)) == double(5) + && f64_init(5, float64Buffer.data[0]) && atomicExchange(float64Buffer.data[0], double(2)) == double(5) - && float64Buffer.data[0] == double(2) + && f64_expect(2) #endif // TEST_when_shader_atomic_float2_is_available ; }