From ffcb1030f72af757fc03aa395e19b976d6405126 Mon Sep 17 00:00:00 2001 From: Yong He Date: Tue, 3 Dec 2024 15:52:02 -0800 Subject: [PATCH] Add intrinsics for aligned load/store. (#5736) * Add intrinsics for aligned load/store. * Fix. * Update comment. * Implement aligned load/store as intrinsic_op. * Fix. * Add proposal doc. * fix typo. --- docs/proposals/013-aligned-load-store.md | 58 ++++++++++++++++++++++++ source/slang/core.meta.slang | 41 +++++++++++++++++ source/slang/slang-emit-spirv.cpp | 30 ++++++++---- source/slang/slang-ir-inst-defs.h | 2 + source/slang/slang-ir-insts.h | 6 +++ tests/spirv/aligned-load-store.slang | 13 ++++++ 6 files changed, 142 insertions(+), 8 deletions(-) create mode 100644 docs/proposals/013-aligned-load-store.md create mode 100644 tests/spirv/aligned-load-store.slang diff --git a/docs/proposals/013-aligned-load-store.md b/docs/proposals/013-aligned-load-store.md new file mode 100644 index 0000000000..ea6f495628 --- /dev/null +++ b/docs/proposals/013-aligned-load-store.md @@ -0,0 +1,58 @@ +SP #013: Aligned load store +========================================= + +Status: Experimental + +Implementation: [PR 5736](https://github.com/shader-slang/slang/pull/5736) + +Author: Yong He (yhe@nvidia.com) + +Reviewer: + +Introduction +---------- + +On many architectures, aligned vector loads (e.g. loading a float4 with 16 byte alignment) is often more efficient than ordinary unaligned loads. Slang's pointer type does not encode any additional alignment info, and all pointer read/writes are by default assuming the alignment of the underlying pointee type, which is 4 bytes for float4 vectors. This means that loading from a `float4*` will result in unaligned load instructions. + +This proposal attempts to provide a way for performance sensitive code to specify an aligned load/store through Slang pointers. + + +Proposed Approach +------------ + +We propose to add intrinsic functions to perform aligned load/store through a pointer: + +``` +T loadAligned(T* ptr); +void storeAligned(T* ptr, T value); +``` + +Example: + +``` +uniform float4* data; + +[numthreads(1,1,1)] +void computeMain() +{ + var v = loadAligned<8>(data); + storeAligned<16>(data+1, v); +} +``` + +Related Work +------------ + +### GLSL ### + +GLSL supports the `align` layout on a `buffer_reference` block to specify the alignment of the buffer pointer. + +### SPIRV ### + +In SPIRV, the alignment can either be encoded as a decoration on the pointer type, or as a memory operand on the OpLoad and OpStore operations. + +### Other Languages ### + +Most C-like languages allow users to put additional attributes on types to specify the alignment of the type. All loads/stores through pointers of the type will use the alignment. + +Instead of introducing type modifiers on data or pointer types, Slang should explicitly provide a `loadAligned` and `storeAligned` intrinsic functions to leads to `OpLoad` and `OpStore` with the `Aligned` memory operand when generating SPIRV. This way we don't have to deal with the complexity around rules of handling type coercion between modified/unmodified types and recalculate alignment for pointers representing an access chain. Developers writing performance sentisitive code can always be assured that the alignment specified on each critical load or store will be assumed, without having to work backwards through type modifications and thinking about the typing rules associated with such modifiers. \ No newline at end of file diff --git a/source/slang/core.meta.slang b/source/slang/core.meta.slang index a9d53162cf..3a7df8e7a5 100644 --- a/source/slang/core.meta.slang +++ b/source/slang/core.meta.slang @@ -1038,6 +1038,47 @@ struct Ptr } }; +//@hidden: +__intrinsic_op($(kIROp_AlignedAttr)) +void __align_attr(int alignment); + +__intrinsic_op($(kIROp_Load)) +T __load_aligned(T* ptr, U alignmentAttr); + +__intrinsic_op($(kIROp_Store)) +void __store_aligned(T* ptr, T value, U alignmentAttr); + +//@public: + +/// Load a value from a pointer with a known alignment. +/// Aligned loads are more efficient than unaligned loads on some platforms. +/// @param alignment The alignment of the load operation. +/// @param ptr The pointer to load from. +/// @return The value loaded from the pointer. +/// @remarks When targeting SPIRV, this function maps to an `OpLoad` instruction with the `Aligned` memory operand. +/// The functions maps to normal load operation on other targets. +/// +[__NoSideEffect] +[ForceInline] +T loadAligned(T* ptr) +{ + return __load_aligned(ptr, __align_attr(alignment)); +} + +/// Store a value to a pointer with a known alignment. +/// Aligned stores are more efficient than unaligned stores on some platforms. +/// @param alignment The alignment of the store operation. +/// @param ptr The pointer to store value to. +/// @param value The value to store. +/// @remarks When targeting SPIRV, this function maps to an `OpStore` instruction with the `Aligned` memory operand. +/// The functions maps to normal store operation on other targets. +/// +[ForceInline] +void storeAligned(T* ptr, T value) +{ + __store_aligned(ptr, value, __align_attr(alignment)); +} + //@hidden: __intrinsic_op($(kIROp_Load)) T __load(Ptr ptr); diff --git a/source/slang/slang-emit-spirv.cpp b/source/slang/slang-emit-spirv.cpp index 90d0db5d74..b0da7e4a65 100644 --- a/source/slang/slang-emit-spirv.cpp +++ b/source/slang/slang-emit-spirv.cpp @@ -5975,10 +5975,17 @@ struct SPIRVEmitContext : public SourceEmitterBase, public SPIRVEmitSharedContex SpvStorageClassPhysicalStorageBuffer) { IRSizeAndAlignment sizeAndAlignment; - getNaturalSizeAndAlignment( - m_targetProgram->getOptionSet(), - ptrType->getValueType(), - &sizeAndAlignment); + if (auto alignedAttr = inst->findAttr()) + { + sizeAndAlignment.alignment = (int)getIntVal(alignedAttr->getAlignment()); + } + else + { + getNaturalSizeAndAlignment( + m_targetProgram->getOptionSet(), + ptrType->getValueType(), + &sizeAndAlignment); + } return emitOpLoadAligned( parent, inst, @@ -5999,10 +6006,17 @@ struct SPIRVEmitContext : public SourceEmitterBase, public SPIRVEmitSharedContex SpvStorageClassPhysicalStorageBuffer) { IRSizeAndAlignment sizeAndAlignment; - getNaturalSizeAndAlignment( - m_targetProgram->getOptionSet(), - ptrType->getValueType(), - &sizeAndAlignment); + if (auto alignedAttr = inst->findAttr()) + { + sizeAndAlignment.alignment = (int)getIntVal(alignedAttr->getAlignment()); + } + else + { + getNaturalSizeAndAlignment( + m_targetProgram->getOptionSet(), + ptrType->getValueType(), + &sizeAndAlignment); + } return emitOpStoreAligned( parent, inst, diff --git a/source/slang/slang-ir-inst-defs.h b/source/slang/slang-ir-inst-defs.h index 33baefa518..6d4f7a2ca5 100644 --- a/source/slang/slang-ir-inst-defs.h +++ b/source/slang/slang-ir-inst-defs.h @@ -1250,6 +1250,7 @@ INST_RANGE(Layout, VarLayout, EntryPointLayout) INST(SNormAttr, snorm, 0, HOISTABLE) INST(NoDiffAttr, no_diff, 0, HOISTABLE) INST(NonUniformAttr, nonuniform, 0, HOISTABLE) + INST(AlignedAttr, Aligned, 1, HOISTABLE) /* SemanticAttr */ INST(UserSemanticAttr, userSemantic, 2, HOISTABLE) @@ -1260,6 +1261,7 @@ INST_RANGE(Layout, VarLayout, EntryPointLayout) INST(VarOffsetAttr, offset, 2, HOISTABLE) INST_RANGE(LayoutResourceInfoAttr, TypeSizeAttr, VarOffsetAttr) INST(FuncThrowTypeAttr, FuncThrowType, 1, HOISTABLE) + INST_RANGE(Attr, PendingLayoutAttr, FuncThrowTypeAttr) /* Liveness */ diff --git a/source/slang/slang-ir-insts.h b/source/slang/slang-ir-insts.h index fb06863d4a..4c72727554 100644 --- a/source/slang/slang-ir-insts.h +++ b/source/slang/slang-ir-insts.h @@ -2389,6 +2389,12 @@ struct IRCall : IRInst void setArg(UInt index, IRInst* arg) { setOperand(index + 1, arg); } }; +struct IRAlignedAttr : IRAttr +{ + IR_LEAF_ISA(AlignedAttr) + IRInst* getAlignment() { return getOperand(0); } +}; + struct IRLoad : IRInst { IRUse ptr; diff --git a/tests/spirv/aligned-load-store.slang b/tests/spirv/aligned-load-store.slang new file mode 100644 index 0000000000..c6b958dc28 --- /dev/null +++ b/tests/spirv/aligned-load-store.slang @@ -0,0 +1,13 @@ +//TEST:SIMPLE(filecheck=CHECK): -target spirv + +// CHECK: OpLoad {{.*}} Aligned 8 +// CHECK: OpStore {{.*}} Aligned 16 + +uniform float4* data; + +[numthreads(1,1,1)] +void computeMain() +{ + var v = loadAligned<8>((float2x4*)data); + storeAligned<16>((float2x4*)data+1, v); +} \ No newline at end of file