diff --git a/onnxruntime/core/providers/xnnpack/math/matmul_int.cc b/onnxruntime/core/providers/xnnpack/math/matmul_int.cc index 4f05b9c7a743a..334b7194f7682 100644 --- a/onnxruntime/core/providers/xnnpack/math/matmul_int.cc +++ b/onnxruntime/core/providers/xnnpack/math/matmul_int.cc @@ -70,6 +70,7 @@ bool MatMulIntegerCommon::IsOnnxNodeSupported(const NodeUnit& node_unit, const G return supported; } +template<> Status MatMulInteger::PrePack(const Tensor& tensor, int input_idx, AllocatorPtr alloc, /*out*/ bool& is_packed, /*out*/ PrePackedWeights* /*Not used*/) { @@ -136,6 +137,7 @@ Status MatMulInteger::PrePack(const Tensor& tensor, int input_idx, Alloc return Status::OK(); } +template<> Status MatMulInteger::PrePack(const Tensor& tensor, int input_idx, AllocatorPtr alloc, /*out*/ bool& is_packed, /*out*/ PrePackedWeights* /*Not used*/) { @@ -204,6 +206,7 @@ Status MatMulInteger::PrePack(const Tensor& tensor, int input_idx, Allo return Status::OK(); } +template<> Status MatMulInteger::Compute(OpKernelContext* ctx) const { const Tensor* a = ctx->Input(0); pthreadpool_t threadpool = GetThreadPool(); @@ -249,6 +252,7 @@ Status MatMulInteger::Compute(OpKernelContext* ctx) const { return Status::OK(); } +template<> Status MatMulInteger::Compute(OpKernelContext* ctx) const { const Tensor* a = ctx->Input(0); pthreadpool_t threadpool = GetThreadPool();