diff --git a/src/kernels/cpu/optimized/riscv64/instancenorm.cpp b/src/kernels/cpu/optimized/riscv64/instancenorm.cpp index cd9c6a97a2..d19dc3a894 100644 --- a/src/kernels/cpu/optimized/riscv64/instancenorm.cpp +++ b/src/kernels/cpu/optimized/riscv64/instancenorm.cpp @@ -155,7 +155,7 @@ using namespace nncase::kernels::cpu::optimized; // } // #endif -// template <> +template <> result optimized::instancenorm(const float *input, float *output, float *scale, float *bias, const runtime_shape_t &in_shape, float epsilon) noexcept { // #if __riscv_vector diff --git a/src/kernels/cpu/reference/instancenorm.cpp b/src/kernels/cpu/reference/instancenorm.cpp index 46aa33a93a..d688aa77a1 100644 --- a/src/kernels/cpu/reference/instancenorm.cpp +++ b/src/kernels/cpu/reference/instancenorm.cpp @@ -29,13 +29,13 @@ template result reference::instancenorm(const float *input, float * template result reference::instancenorm(const T *input, T *output, T *scale, T *bias, const runtime_shape_t &in_shape, float epsilon) noexcept { - auto outer_size = in_shape[0]; + auto outer_size = static_cast(in_shape[0]); auto inner_size = 1; - for (auto i = 2; i < static_cast(in_shape.size()); i++) - inner_size *= in_shape[i]; - for (int32_t batch = 0; batch < outer_size; batch++) + for (size_t i = 2; i < in_shape.size(); i++) + inner_size *= static_cast(in_shape[i]); + for (auto batch = 0; batch < outer_size; batch++) { - for (int32_t c = 0; c < in_shape[1]; c++) + for (size_t c = 0; c < in_shape[1]; c++) { auto src = input + batch * inner_size * in_shape[1] + c * inner_size; auto dest = output + batch * inner_size * in_shape[1] + c * inner_size;