CMSIS-NN: clang format include files
clang format files in the NN/include folder
Change-Id: I69cf711c00c65fb907e049ab1852290dd28f8800
diff --git a/CMSIS/NN/Include/arm_nn_tables.h b/CMSIS/NN/Include/arm_nn_tables.h
index f4bd7f9..35dfc3b 100644
--- a/CMSIS/NN/Include/arm_nn_tables.h
+++ b/CMSIS/NN/Include/arm_nn_tables.h
@@ -42,15 +42,15 @@
extern const q7_t tanhTable_q7[256];
extern const q15_t tanhTable_q15[256];
- /**
- * @brief 2-way tables for various activation functions
- *
- * 2-way table, H table for value larger than 1/4
- * L table for value smaller than 1/4, H table for remaining
- * We have this only for the q15_t version. It does not make
- * sense to have it for q7_t type
- */
+/**
+ * @brief 2-way tables for various activation functions
+ *
+ * 2-way table, H table for value larger than 1/4
+ * L table for value smaller than 1/4, H table for remaining
+ * We have this only for the q15_t version. It does not make
+ * sense to have it for q7_t type
+ */
extern const q15_t sigmoidHTable_q15[192];
extern const q15_t sigmoidLTable_q15[128];
-#endif /* ARM_NN_TABLES_H */
+#endif /* ARM_NN_TABLES_H */
diff --git a/CMSIS/NN/Include/arm_nn_types.h b/CMSIS/NN/Include/arm_nn_types.h
index e90b400..206af07 100644
--- a/CMSIS/NN/Include/arm_nn_types.h
+++ b/CMSIS/NN/Include/arm_nn_types.h
@@ -28,7 +28,6 @@
* Target Processor: Cortex-M cores
* -------------------------------------------------------------------- */
-
#ifndef _ARM_NN_TYPES_H
#define _ARM_NN_TYPES_H
@@ -37,21 +36,22 @@
/** CMSIS-NN object to contain the width and height of a tile */
typedef struct
{
- int32_t w; /**< Width */
- int32_t h; /**< Height */
+ int32_t w; /**< Width */
+ int32_t h; /**< Height */
} cmsis_nn_tile;
/** CMSIS-NN object used for the function context. */
typedef struct
{
- void *buf; /**< Pointer to a buffer needed for the optimization */
- int32_t size; /**< Buffer size */
+ void *buf; /**< Pointer to a buffer needed for the optimization */
+ int32_t size; /**< Buffer size */
} cmsis_nn_context;
/** CMSIS-NN object to contain the dimensions of the tensors */
typedef struct
{
- int32_t n; /**< Generic dimension to contain either the batch size or output channels. Please refer to the function documentation for more information */
+ int32_t n; /**< Generic dimension to contain either the batch size or output channels.
+ Please refer to the function documentation for more information */
int32_t h; /**< Height */
int32_t w; /**< Width */
int32_t c; /**< Input channels */
@@ -81,39 +81,39 @@
/** CMSIS-NN object for the convolution layer parameters */
typedef struct
{
- int32_t input_offset; /**< Zero value for the input tensor */
- int32_t output_offset; /**< Zero value for the output tensor */
- cmsis_nn_tile stride;
- cmsis_nn_tile padding;
- cmsis_nn_tile dilation;
+ int32_t input_offset; /**< Zero value for the input tensor */
+ int32_t output_offset; /**< Zero value for the output tensor */
+ cmsis_nn_tile stride;
+ cmsis_nn_tile padding;
+ cmsis_nn_tile dilation;
cmsis_nn_activation activation;
} cmsis_nn_conv_params;
/** CMSIS-NN object for Depthwise convolution layer parameters */
typedef struct
{
- int32_t input_offset; /**< Zero value for the input tensor */
- int32_t output_offset; /**< Zero value for the output tensor */
- int32_t ch_mult; /**< Channel Multiplier. ch_mult * in_ch = out_ch */
- cmsis_nn_tile stride;
- cmsis_nn_tile padding;
- cmsis_nn_tile dilation;
+ int32_t input_offset; /**< Zero value for the input tensor */
+ int32_t output_offset; /**< Zero value for the output tensor */
+ int32_t ch_mult; /**< Channel Multiplier. ch_mult * in_ch = out_ch */
+ cmsis_nn_tile stride;
+ cmsis_nn_tile padding;
+ cmsis_nn_tile dilation;
cmsis_nn_activation activation;
} cmsis_nn_dw_conv_params;
/** CMSIS-NN object for pooling layer parameters */
typedef struct
{
- cmsis_nn_tile stride;
- cmsis_nn_tile padding;
+ cmsis_nn_tile stride;
+ cmsis_nn_tile padding;
cmsis_nn_activation activation;
} cmsis_nn_pool_params;
/** CMSIS-NN object for Fully Connected layer parameters */
typedef struct
{
- int32_t input_offset; /**< Zero value for the input tensor */
- int32_t filter_offset; /**< Zero value for the filter tensor */
- int32_t output_offset; /**< Zero value for the output tensor */
+ int32_t input_offset; /**< Zero value for the input tensor */
+ int32_t filter_offset; /**< Zero value for the filter tensor */
+ int32_t output_offset; /**< Zero value for the output tensor */
cmsis_nn_activation activation;
} cmsis_nn_fc_params;
@@ -121,12 +121,10 @@
typedef struct
{
int32_t rank;
- int32_t input_offset; /**< Zero value for the input tensor */
+ int32_t input_offset; /**< Zero value for the input tensor */
int32_t output_offset; /**< Zero value for the output tensor */
cmsis_nn_activation input_activation;
cmsis_nn_activation output_activation;
} cmsis_nn_svdf_params;
#endif // _ARM_NN_TYPES_H
-
-
diff --git a/CMSIS/NN/Include/arm_nnfunctions.h b/CMSIS/NN/Include/arm_nnfunctions.h
index 7680c9d..7f77e9a 100644
--- a/CMSIS/NN/Include/arm_nnfunctions.h
+++ b/CMSIS/NN/Include/arm_nnfunctions.h
@@ -57,11 +57,12 @@
* - Legacy functions supporting ARM's internal symmetric quantization(8 bits).
* - Functions that support TensorFlow Lite framework with symmetric quantization(8 bits).
*
- * The legacy functions can be identified with their suffix of _q7 or _q15 and are no new development is done there. The article in [2] describes in detail
- * how to run a network using the legacy functions.
+ * The legacy functions can be identified with their suffix of _q7 or _q15 and are no new development is done there.
+ * The article in [2] describes in detail how to run a network using the legacy functions.
*
- * The functions supporting TensorFlow Lite framework is identified by the _s8 suffix and can be invoked from TFL micro. The functions are bit exact to
- * TensorFlow Lite. Refer to the TensorFlow's documentation in [3] on how to run a TensorFlow Lite model using optimized CMSIS-NN kernels.
+ * The functions supporting TensorFlow Lite framework is identified by the _s8 suffix and can be invoked from TFL
+ * micro. The functions are bit exact to TensorFlow Lite. Refer to the TensorFlow's documentation in [3] on how to run
+ * a TensorFlow Lite model using optimized CMSIS-NN kernels.
*
* Block Diagram
* --------
@@ -86,12 +87,13 @@
* Define macro ARM_MATH_MVEI, If the silicon supports M-Profile Vector Extension.
* - ARM_MATH_AUTOVECTORIZE
- * Used in conjucture with ARM_MATH_MVEI to let the compiler auto vectorize for the functions that uses inline assembly.
- * It does not affect functions that use C or intrinsics.
+ * Used in conjucture with ARM_MATH_MVEI to let the compiler auto vectorize for the functions that uses inline
+ * assembly. It does not affect functions that use C or intrinsics.
* - ARM_MATH_BIG_ENDIAN:
*
- * Define macro ARM_MATH_BIG_ENDIAN to build the library for big endian targets. This is supported only for the legacy functions i.e, functions targetted at
- * TensorFlow Lite do not support big endianness. By default library builds for little endian targets.
+ * Define macro ARM_MATH_BIG_ENDIAN to build the library for big endian targets. This is supported only for the legacy
+ * functions i.e, functions targetted at TensorFlow Lite do not support big endianness. By default library builds for
+ * little endian targets.
*
* - ARM_NN_TRUNCATE:
*
@@ -107,7 +109,8 @@
* -# improve validation
* -# improve code readability
*
- * The upcoming API interface change will be based on "struct" and only affect the TensorFlowLite micro compliant APIs [4] (functions with _s8 suffix)
+ * The upcoming API interface change will be based on "struct" and only affect the TensorFlowLite micro compliant
+ * APIs [4] (functions with _s8 suffix)
*
* Below you can find a snapshot of how the new API interface will look like (names can change)
*
@@ -146,7 +149,8 @@
* [1] CMSIS-NN: Efficient Neural Network Kernels for Arm Cortex-M CPUs https://arxiv.org/abs/1801.06601
*
* [2] Converting a Neural Network for Arm Cortex-M with CMSIS-NN
- * https://developer.arm.com/solutions/machine-learning-on-arm/developer-material/how-to-guides/converting-a-neural-network-for-arm-cortex-m-with-cmsis-nn/single-page
+ *
+ https://developer.arm.com/solutions/machine-learning-on-arm/developer-material/how-to-guides/converting-a-neural-network-for-arm-cortex-m-with-cmsis-nn/single-page
* [3] https://www.tensorflow.org/lite/microcontrollers/library
*
* [4] https://github.com/ARM-software/CMSIS_5/tree/develop/CMSIS/NN#legacy-vs-tfl-micro-compliant-apis
@@ -161,31 +165,28 @@
#ifndef _ARM_NNFUNCTIONS_H
#define _ARM_NNFUNCTIONS_H
-#include "arm_nn_types.h"
#include "arm_math_types.h"
+#include "arm_nn_types.h"
#define USE_INTRINSIC
//#define ARM_NN_TRUNCATE /* This config the rounding model to floor or round to the nearest int */
#ifdef __cplusplus
-extern "C"
-{
+extern "C" {
#endif
/**
* @brief Struct for specifying activation function types
*
*/
-typedef enum
-{
+typedef enum {
ARM_SIGMOID = 0,
- /**< Sigmoid activation function */
+ /**< Sigmoid activation function */
ARM_TANH = 1,
- /**< Tanh activation function */
+ /**< Tanh activation function */
} arm_nn_activation_type;
-
/**
* @defgroup NNConv Convolution Functions
*
@@ -203,174 +204,363 @@
*
*/
- /**
- * @brief s8 convolution layer wrapper function with the main purpose to call the optimal kernel available in cmsis-nn to perform the convolution.
- *
- * @param[in, out] ctx Function context that contains the additional buffer if required by the implementation.
- arm_convolve_wrapper_s8_get_buffer_size will return the buffer_size if required
- * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
- * Range of conv_params->input_offset : [-127, 128]
- * Range of conv_params->output_offset : [-128, 127]
- * @param[in] quant_params Per-channel quantization info.
- * It contains the multiplier and shift values to be applied to each output channel
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in] input_data Input (activation) data pointer. Data type: int8
- * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the spatial filter dimensions
- * @param[in] filter_data Filter data pointer. Data type: int8
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * @param[in] bias_data Bias data pointer. Data type: int32
- * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
- * @param[out] output_data Output data pointer. Data type: int8
- *
- * @return The function returns either
- * <code>ARM_MATH_SIZE_MISMATCH</code> if argument constraints fail. or,
- * <code>ARM_MATH_SUCCESS</code> on successful completion.
- *
- */
- arm_status arm_convolve_wrapper_s8(const cmsis_nn_context* ctx,
- const cmsis_nn_conv_params* conv_params,
- const cmsis_nn_per_channel_quant_params* quant_params,
- const cmsis_nn_dims* input_dims,
- const q7_t *input_data,
- const cmsis_nn_dims* filter_dims,
- const q7_t *filter_data,
- const cmsis_nn_dims* bias_dims,
- const int32_t *bias_data,
- const cmsis_nn_dims* output_dims,
- q7_t *output_data);
+/**
+ * @brief s8 convolution layer wrapper function with the main purpose to call the optimal kernel available in cmsis-nn
+ * to perform the convolution.
+ *
+ * @param[in, out] ctx Function context that contains the additional buffer if required by the function.
+ arm_convolve_wrapper_s8_get_buffer_size will return the buffer_size if required
+ * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
+ * Range of conv_params->input_offset : [-127, 128]
+ * Range of conv_params->output_offset : [-128, 127]
+ * @param[in] quant_params Per-channel quantization info.
+ * It contains the multiplier and shift values to be applied to each output channel
+ * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
+ * @param[in] input_data Input (activation) data pointer. Data type: int8
+ * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the
+ * spatial filter dimensions
+ * @param[in] filter_data Filter data pointer. Data type: int8
+ * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
+ * @param[in] bias_data Bias data pointer. Data type: int32
+ * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
+ * @param[out] output_data Output data pointer. Data type: int8
+ *
+ * @return The function returns either
+ * <code>ARM_MATH_SIZE_MISMATCH</code> if argument constraints fail. or,
+ * <code>ARM_MATH_SUCCESS</code> on successful completion.
+ *
+ */
+arm_status arm_convolve_wrapper_s8(const cmsis_nn_context *ctx,
+ const cmsis_nn_conv_params *conv_params,
+ const cmsis_nn_per_channel_quant_params *quant_params,
+ const cmsis_nn_dims *input_dims,
+ const q7_t *input_data,
+ const cmsis_nn_dims *filter_dims,
+ const q7_t *filter_data,
+ const cmsis_nn_dims *bias_dims,
+ const int32_t *bias_data,
+ const cmsis_nn_dims *output_dims,
+ q7_t *output_data);
- /**
- * @brief Get the required buffer size for arm_convolve_wrapper_s8
- *
- * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
- * Range of conv_params->input_offset : [-127, 128]
- * Range of conv_params->output_offset : [-128, 127]
- * @param[in] input_dims Input (activation) dimensions. Format: [N, H, W, C_IN]
- * @param[in] filter_dims Filter dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the spatial filter dimensions
- * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
- *
- * @return The function returns required buffer size(bytes)
- *
- */
- int32_t arm_convolve_wrapper_s8_get_buffer_size(const cmsis_nn_conv_params* conv_params,
- const cmsis_nn_dims* input_dims,
- const cmsis_nn_dims* filter_dims,
- const cmsis_nn_dims* output_dims);
+/**
+ * @brief Get the required buffer size for arm_convolve_wrapper_s8
+ *
+ * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
+ * Range of conv_params->input_offset : [-127, 128]
+ * Range of conv_params->output_offset : [-128, 127]
+ * @param[in] input_dims Input (activation) dimensions. Format: [N, H, W, C_IN]
+ * @param[in] filter_dims Filter dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the spatial
+ * filter dimensions
+ * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
+ *
+ * @return The function returns required buffer size(bytes)
+ *
+ */
+int32_t arm_convolve_wrapper_s8_get_buffer_size(const cmsis_nn_conv_params *conv_params,
+ const cmsis_nn_dims *input_dims,
+ const cmsis_nn_dims *filter_dims,
+ const cmsis_nn_dims *output_dims);
- /**
- * @brief Basic s8 convolution function
- * @param[in, out] ctx Function context that contains the additional buffer if required by the implementation.
- arm_convolve_s8_get_buffer_size will return the buffer_size if required
- * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
- * Range of conv_params->input_offset : [-127, 128]
- * Range of conv_params->output_offset : [-128, 127]
- * @param[in] quant_params Per-channel quantization info.
- * It contains the multiplier and shift values to be applied to each output channel
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in] input_data Input (activation) data pointer. Data type: int8
- * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the spatial filter dimensions
- * @param[in] filter_data Filter data pointer. Data type: int8
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * @param[in] bias_data Optional bias data pointer. Data type: int32
- * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
- * @param[out] output_data Output data pointer. Data type: int8
+/**
+ * @brief Basic s8 convolution function
+ * @param[in, out] ctx Function context that contains the additional buffer if required by the function.
+ arm_convolve_s8_get_buffer_size will return the buffer_size if required
+ * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
+ * Range of conv_params->input_offset : [-127, 128]
+ * Range of conv_params->output_offset : [-128, 127]
+ * @param[in] quant_params Per-channel quantization info.
+ * It contains the multiplier and shift values to be applied to each output channel
+ * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
+ * @param[in] input_data Input (activation) data pointer. Data type: int8
+ * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the
+ * spatial filter dimensions
+ * @param[in] filter_data Filter data pointer. Data type: int8
+ * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
+ * @param[in] bias_data Optional bias data pointer. Data type: int32
+ * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
+ * @param[out] output_data Output data pointer. Data type: int8
- * @return The function returns <code>ARM_MATH_SUCCESS</code>
- *
- * @details
- * 1. Supported framework: TensorFlow Lite micro
- * 2. q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs.
- * 3. Additional memory is required for optimization. Refer to argument 'ctx' for details.
- *
- */
- arm_status arm_convolve_s8(const cmsis_nn_context* ctx,
- const cmsis_nn_conv_params* conv_params,
- const cmsis_nn_per_channel_quant_params* quant_params,
- const cmsis_nn_dims* input_dims,
- const q7_t *input_data,
- const cmsis_nn_dims* filter_dims,
- const q7_t *filter_data,
- const cmsis_nn_dims* bias_dims,
- const int32_t *bias_data,
- const cmsis_nn_dims* output_dims,
- q7_t *output_data);
+ * @return The function returns <code>ARM_MATH_SUCCESS</code>
+ *
+ * @details
+ * 1. Supported framework: TensorFlow Lite micro
+ * 2. q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs.
+ * 3. Additional memory is required for optimization. Refer to argument 'ctx' for details.
+ *
+ */
+arm_status arm_convolve_s8(const cmsis_nn_context *ctx,
+ const cmsis_nn_conv_params *conv_params,
+ const cmsis_nn_per_channel_quant_params *quant_params,
+ const cmsis_nn_dims *input_dims,
+ const q7_t *input_data,
+ const cmsis_nn_dims *filter_dims,
+ const q7_t *filter_data,
+ const cmsis_nn_dims *bias_dims,
+ const int32_t *bias_data,
+ const cmsis_nn_dims *output_dims,
+ q7_t *output_data);
- /**
- * @brief Get the required buffer size for s8 convolution function
- *
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the spatial filter dimensions
- * @return The function returns required buffer size(bytes)
- *
- */
- int32_t arm_convolve_s8_get_buffer_size(const cmsis_nn_dims* input_dims,
- const cmsis_nn_dims* filter_dims);
+/**
+ * @brief Get the required buffer size for s8 convolution function
+ *
+ * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
+ * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are
+ * the spatial filter dimensions
+ * @return The function returns required buffer size(bytes)
+ *
+ */
+int32_t arm_convolve_s8_get_buffer_size(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims);
- /**
- * @brief Basic Q7 convolution function
- * @param[in] Im_in pointer to input tensor
- * @param[in] dim_im_in input tensor dimension
- * @param[in] ch_im_in number of input tensor channels
- * @param[in] wt pointer to kernel weights
- * @param[in] ch_im_out number of filters, i.e., output tensor channels
- * @param[in] dim_kernel filter kernel size
- * @param[in] padding padding sizes
- * @param[in] stride convolution stride
- * @param[in] bias pointer to bias
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in,out] Im_out pointer to output tensor
- * @param[in] dim_im_out output tensor dimension
- * @param[in,out] bufferA pointer to buffer space for input
- * @param[in,out] bufferB pointer to buffer space for output
- * @return The function returns <code>ARM_MATH_SUCCESS</code>
- *
- */
- arm_status arm_convolve_HWC_q7_basic(const q7_t * Im_in,
- const uint16_t dim_im_in,
- const uint16_t ch_im_in,
- const q7_t * wt,
- const uint16_t ch_im_out,
- const uint16_t dim_kernel,
- const uint16_t padding,
- const uint16_t stride,
- const q7_t * bias,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- q7_t * Im_out,
- const uint16_t dim_im_out,
- q15_t * bufferA,
- q7_t * bufferB);
+/**
+ * @brief Basic Q7 convolution function
+ * @param[in] Im_in pointer to input tensor
+ * @param[in] dim_im_in input tensor dimension
+ * @param[in] ch_im_in number of input tensor channels
+ * @param[in] wt pointer to kernel weights
+ * @param[in] ch_im_out number of filters, i.e., output tensor channels
+ * @param[in] dim_kernel filter kernel size
+ * @param[in] padding padding sizes
+ * @param[in] stride convolution stride
+ * @param[in] bias pointer to bias
+ * @param[in] bias_shift amount of left-shift for bias
+ * @param[in] out_shift amount of right-shift for output
+ * @param[in,out] Im_out pointer to output tensor
+ * @param[in] dim_im_out output tensor dimension
+ * @param[in,out] bufferA pointer to buffer space for input
+ * @param[in,out] bufferB pointer to buffer space for output
+ * @return The function returns <code>ARM_MATH_SUCCESS</code>
+ *
+ */
+arm_status arm_convolve_HWC_q7_basic(const q7_t *Im_in,
+ const uint16_t dim_im_in,
+ const uint16_t ch_im_in,
+ const q7_t *wt,
+ const uint16_t ch_im_out,
+ const uint16_t dim_kernel,
+ const uint16_t padding,
+ const uint16_t stride,
+ const q7_t *bias,
+ const uint16_t bias_shift,
+ const uint16_t out_shift,
+ q7_t *Im_out,
+ const uint16_t dim_im_out,
+ q15_t *bufferA,
+ q7_t *bufferB);
- /**
- * @brief Basic Q7 convolution function (non-square shape)
- * @param[in] Im_in pointer to input tensor
- * @param[in] dim_im_in_x input tensor dimension x
- * @param[in] dim_im_in_y input tensor dimension y
- * @param[in] ch_im_in number of input tensor channels
- * @param[in] wt pointer to kernel weights
- * @param[in] ch_im_out number of filters, i.e., output tensor channels
- * @param[in] dim_kernel_x filter kernel size x
- * @param[in] dim_kernel_y filter kernel size y
- * @param[in] padding_x padding size x
- * @param[in] padding_y padding size y
- * @param[in] stride_x convolution stride x
- * @param[in] stride_y convolution stride y
- * @param[in] bias pointer to bias
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in,out] Im_out pointer to output tensor
- * @param[in] dim_im_out_x output tensor dimension x
- * @param[in] dim_im_out_y output tensor dimension y
- * @param[in,out] bufferA pointer to buffer space for input
- * @param[in,out] bufferB pointer to buffer space for output
- * @return The function returns <code>ARM_MATH_SUCCESS</code>
- */
- arm_status arm_convolve_HWC_q7_basic_nonsquare(const q7_t * Im_in,
+/**
+ * @brief Basic Q7 convolution function (non-square shape)
+ * @param[in] Im_in pointer to input tensor
+ * @param[in] dim_im_in_x input tensor dimension x
+ * @param[in] dim_im_in_y input tensor dimension y
+ * @param[in] ch_im_in number of input tensor channels
+ * @param[in] wt pointer to kernel weights
+ * @param[in] ch_im_out number of filters, i.e., output tensor channels
+ * @param[in] dim_kernel_x filter kernel size x
+ * @param[in] dim_kernel_y filter kernel size y
+ * @param[in] padding_x padding size x
+ * @param[in] padding_y padding size y
+ * @param[in] stride_x convolution stride x
+ * @param[in] stride_y convolution stride y
+ * @param[in] bias pointer to bias
+ * @param[in] bias_shift amount of left-shift for bias
+ * @param[in] out_shift amount of right-shift for output
+ * @param[in,out] Im_out pointer to output tensor
+ * @param[in] dim_im_out_x output tensor dimension x
+ * @param[in] dim_im_out_y output tensor dimension y
+ * @param[in,out] bufferA pointer to buffer space for input
+ * @param[in,out] bufferB pointer to buffer space for output
+ * @return The function returns <code>ARM_MATH_SUCCESS</code>
+ */
+arm_status arm_convolve_HWC_q7_basic_nonsquare(const q7_t *Im_in,
+ const uint16_t dim_im_in_x,
+ const uint16_t dim_im_in_y,
+ const uint16_t ch_im_in,
+ const q7_t *wt,
+ const uint16_t ch_im_out,
+ const uint16_t dim_kernel_x,
+ const uint16_t dim_kernel_y,
+ const uint16_t padding_x,
+ const uint16_t padding_y,
+ const uint16_t stride_x,
+ const uint16_t stride_y,
+ const q7_t *bias,
+ const uint16_t bias_shift,
+ const uint16_t out_shift,
+ q7_t *Im_out,
+ const uint16_t dim_im_out_x,
+ const uint16_t dim_im_out_y,
+ q15_t *bufferA,
+ q7_t *bufferB);
+
+/**
+ * @brief Basic Q15 convolution function
+ * @param[in] Im_in pointer to input tensor
+ * @param[in] dim_im_in input tensor dimension
+ * @param[in] ch_im_in number of input tensor channels
+ * @param[in] wt pointer to kernel weights
+ * @param[in] ch_im_out number of filters, i.e., output tensor channels
+ * @param[in] dim_kernel filter kernel size
+ * @param[in] padding padding sizes
+ * @param[in] stride convolution stride
+ * @param[in] bias pointer to bias
+ * @param[in] bias_shift amount of left-shift for bias
+ * @param[in] out_shift amount of right-shift for output
+ * @param[in,out] Im_out pointer to output tensor
+ * @param[in] dim_im_out output tensor dimension
+ * @param[in,out] bufferA pointer to buffer space for input
+ * @param[in,out] bufferB pointer to buffer space for output
+ * @return The function returns <code>ARM_MATH_SUCCESS</code>
+ *
+ */
+arm_status arm_convolve_HWC_q15_basic(const q15_t *Im_in,
+ const uint16_t dim_im_in,
+ const uint16_t ch_im_in,
+ const q15_t *wt,
+ const uint16_t ch_im_out,
+ const uint16_t dim_kernel,
+ const uint16_t padding,
+ const uint16_t stride,
+ const q15_t *bias,
+ const uint16_t bias_shift,
+ const uint16_t out_shift,
+ q15_t *Im_out,
+ const uint16_t dim_im_out,
+ q15_t *bufferA,
+ q7_t *bufferB);
+
+/**
+ * @brief Fast Q7 convolution function
+ * @param[in] Im_in pointer to input tensor
+ * @param[in] dim_im_in input tensor dimension
+ * @param[in] ch_im_in number of input tensor channels
+ * @param[in] wt pointer to kernel weights
+ * @param[in] ch_im_out number of filters, i.e., output tensor channels
+ * @param[in] dim_kernel filter kernel size
+ * @param[in] padding padding sizes
+ * @param[in] stride convolution stride
+ * @param[in] bias pointer to bias
+ * @param[in] bias_shift amount of left-shift for bias
+ * @param[in] out_shift amount of right-shift for output
+ * @param[in,out] Im_out pointer to output tensor
+ * @param[in] dim_im_out output tensor dimension
+ * @param[in,out] bufferA pointer to buffer space for input
+ * @param[in,out] bufferB pointer to buffer space for output
+ * @return The function returns either
+ * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.
+ *
+ * This function is the version with full list of optimization tricks, but with
+ * some contraints:
+ * ch_im_in is multiple of 4
+ * ch_im_out is multiple of 2
+ */
+arm_status arm_convolve_HWC_q7_fast(const q7_t *Im_in,
+ const uint16_t dim_im_in,
+ const uint16_t ch_im_in,
+ const q7_t *wt,
+ const uint16_t ch_im_out,
+ const uint16_t dim_kernel,
+ const uint16_t padding,
+ const uint16_t stride,
+ const q7_t *bias,
+ const uint16_t bias_shift,
+ const uint16_t out_shift,
+ q7_t *Im_out,
+ const uint16_t dim_im_out,
+ q15_t *bufferA,
+ q7_t *bufferB);
+
+/**
+ * @brief Fast Q7 convolution function (non-sqaure shape)
+ * @param[in] Im_in pointer to input tensor
+ * @param[in] dim_im_in_x input tensor dimension x
+ * @param[in] dim_im_in_y input tensor dimension y
+ * @param[in] ch_im_in number of input tensor channels
+ * @param[in] wt pointer to kernel weights
+ * @param[in] ch_im_out number of filters, i.e., output tensor channels
+ * @param[in] dim_kernel_x filter kernel size x
+ * @param[in] dim_kernel_y filter kernel size y
+ * @param[in] padding_x padding size x
+ * @param[in] padding_y padding size y
+ * @param[in] stride_x convolution stride x
+ * @param[in] stride_y convolution stride y
+ * @param[in] bias pointer to bias
+ * @param[in] bias_shift amount of left-shift for bias
+ * @param[in] out_shift amount of right-shift for output
+ * @param[in,out] Im_out pointer to output tensor
+ * @param[in] dim_im_out_x output tensor dimension x
+ * @param[in] dim_im_out_y output tensor dimension y
+ * @param[in,out] bufferA pointer to buffer space for input
+ * @param[in,out] bufferB pointer to buffer space for output
+ * @return The function returns either
+ * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.
+ *
+ * This function is the version with full list of optimization tricks, but with
+ * some contraints:
+ * ch_im_in is multiple of 4
+ * ch_im_out is multiple of 2
+ */
+
+arm_status arm_convolve_HWC_q7_fast_nonsquare(const q7_t *Im_in,
+ const uint16_t dim_im_in_x,
+ const uint16_t dim_im_in_y,
+ const uint16_t ch_im_in,
+ const q7_t *wt,
+ const uint16_t ch_im_out,
+ const uint16_t dim_kernel_x,
+ const uint16_t dim_kernel_y,
+ const uint16_t padding_x,
+ const uint16_t padding_y,
+ const uint16_t stride_x,
+ const uint16_t stride_y,
+ const q7_t *bias,
+ const uint16_t bias_shift,
+ const uint16_t out_shift,
+ q7_t *Im_out,
+ const uint16_t dim_im_out_x,
+ const uint16_t dim_im_out_y,
+ q15_t *bufferA,
+ q7_t *bufferB);
+
+/**
+ * @brief Fast Q7 version of 1x1 convolution (non-sqaure shape)
+ * @param[in] Im_in pointer to input tensor
+ * @param[in] dim_im_in_x input tensor dimension x
+ * @param[in] dim_im_in_y input tensor dimension y
+ * @param[in] ch_im_in number of input tensor channels
+ * @param[in] wt pointer to kernel weights
+ * @param[in] ch_im_out number of filters, i.e., output tensor channels
+ * @param[in] dim_kernel_x filter kernel size x
+ * @param[in] dim_kernel_y filter kernel size y
+ * @param[in] padding_x padding size x
+ * @param[in] padding_y padding size y
+ * @param[in] stride_x convolution stride x
+ * @param[in] stride_y convolution stride y
+ * @param[in] bias pointer to bias
+ * @param[in] bias_shift amount of left-shift for bias
+ * @param[in] out_shift amount of right-shift for output
+ * @param[in,out] Im_out pointer to output tensor
+ * @param[in] dim_im_out_x output tensor dimension x
+ * @param[in] dim_im_out_y output tensor dimension y
+ * @param[in,out] bufferA pointer to buffer space for input
+ * @param[in,out] bufferB pointer to buffer space for output
+ * @return The function returns either
+ * <code>ARM_MATH_SIZE_MISMATCH</code> if argument constraints fail. or,
+ * <code>ARM_MATH_SUCCESS</code> on successful completion.
+ *
+ * This function implement convolution with 1x1 kernel size (i.e., dim_kernel_x=1
+ * and dim_kernel_y=1). It can be used for
+ * second half of MobileNets after depthwise separable convolution.
+ *
+ * This function is the version with full list of optimization tricks, but with
+ * some contraints:
+ * ch_im_in is multiple of 4
+ * ch_im_out is multiple of 2
+ */
+arm_status arm_convolve_1x1_HWC_q7_fast_nonsquare(const q7_t *Im_in,
const uint16_t dim_im_in_x,
const uint16_t dim_im_in_y,
const uint16_t ch_im_in,
- const q7_t * wt,
+ const q7_t *wt,
const uint16_t ch_im_out,
const uint16_t dim_kernel_x,
const uint16_t dim_kernel_y,
@@ -378,647 +568,48 @@
const uint16_t padding_y,
const uint16_t stride_x,
const uint16_t stride_y,
- const q7_t * bias,
+ const q7_t *bias,
const uint16_t bias_shift,
const uint16_t out_shift,
- q7_t * Im_out,
+ q7_t *Im_out,
const uint16_t dim_im_out_x,
const uint16_t dim_im_out_y,
- q15_t * bufferA,
- q7_t * bufferB);
+ q15_t *bufferA,
+ q7_t *bufferB);
- /**
- * @brief Basic Q15 convolution function
- * @param[in] Im_in pointer to input tensor
- * @param[in] dim_im_in input tensor dimension
- * @param[in] ch_im_in number of input tensor channels
- * @param[in] wt pointer to kernel weights
- * @param[in] ch_im_out number of filters, i.e., output tensor channels
- * @param[in] dim_kernel filter kernel size
- * @param[in] padding padding sizes
- * @param[in] stride convolution stride
- * @param[in] bias pointer to bias
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in,out] Im_out pointer to output tensor
- * @param[in] dim_im_out output tensor dimension
- * @param[in,out] bufferA pointer to buffer space for input
- * @param[in,out] bufferB pointer to buffer space for output
- * @return The function returns <code>ARM_MATH_SUCCESS</code>
- *
- */
- arm_status arm_convolve_HWC_q15_basic(const q15_t * Im_in,
- const uint16_t dim_im_in,
- const uint16_t ch_im_in,
- const q15_t * wt,
- const uint16_t ch_im_out,
- const uint16_t dim_kernel,
- const uint16_t padding,
- const uint16_t stride,
- const q15_t * bias,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- q15_t * Im_out,
- const uint16_t dim_im_out,
- q15_t * bufferA,
- q7_t * bufferB);
-
- /**
- * @brief Fast Q7 convolution function
- * @param[in] Im_in pointer to input tensor
- * @param[in] dim_im_in input tensor dimension
- * @param[in] ch_im_in number of input tensor channels
- * @param[in] wt pointer to kernel weights
- * @param[in] ch_im_out number of filters, i.e., output tensor channels
- * @param[in] dim_kernel filter kernel size
- * @param[in] padding padding sizes
- * @param[in] stride convolution stride
- * @param[in] bias pointer to bias
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in,out] Im_out pointer to output tensor
- * @param[in] dim_im_out output tensor dimension
- * @param[in,out] bufferA pointer to buffer space for input
- * @param[in,out] bufferB pointer to buffer space for output
- * @return The function returns either
- * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.
- *
- * This function is the version with full list of optimization tricks, but with
- * some contraints:
- * ch_im_in is multiple of 4
- * ch_im_out is multiple of 2
- */
- arm_status arm_convolve_HWC_q7_fast(const q7_t * Im_in,
- const uint16_t dim_im_in,
- const uint16_t ch_im_in,
- const q7_t * wt,
- const uint16_t ch_im_out,
- const uint16_t dim_kernel,
- const uint16_t padding,
- const uint16_t stride,
- const q7_t * bias,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- q7_t * Im_out,
- const uint16_t dim_im_out,
- q15_t * bufferA,
- q7_t * bufferB);
-
- /**
- * @brief Fast Q7 convolution function (non-sqaure shape)
- * @param[in] Im_in pointer to input tensor
- * @param[in] dim_im_in_x input tensor dimension x
- * @param[in] dim_im_in_y input tensor dimension y
- * @param[in] ch_im_in number of input tensor channels
- * @param[in] wt pointer to kernel weights
- * @param[in] ch_im_out number of filters, i.e., output tensor channels
- * @param[in] dim_kernel_x filter kernel size x
- * @param[in] dim_kernel_y filter kernel size y
- * @param[in] padding_x padding size x
- * @param[in] padding_y padding size y
- * @param[in] stride_x convolution stride x
- * @param[in] stride_y convolution stride y
- * @param[in] bias pointer to bias
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in,out] Im_out pointer to output tensor
- * @param[in] dim_im_out_x output tensor dimension x
- * @param[in] dim_im_out_y output tensor dimension y
- * @param[in,out] bufferA pointer to buffer space for input
- * @param[in,out] bufferB pointer to buffer space for output
- * @return The function returns either
- * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.
- *
- * This function is the version with full list of optimization tricks, but with
- * some contraints:
- * ch_im_in is multiple of 4
- * ch_im_out is multiple of 2
- */
-
- arm_status arm_convolve_HWC_q7_fast_nonsquare(const q7_t * Im_in,
- const uint16_t dim_im_in_x,
- const uint16_t dim_im_in_y,
- const uint16_t ch_im_in,
- const q7_t * wt,
- const uint16_t ch_im_out,
- const uint16_t dim_kernel_x,
- const uint16_t dim_kernel_y,
- const uint16_t padding_x,
- const uint16_t padding_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const q7_t * bias,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- q7_t * Im_out,
- const uint16_t dim_im_out_x,
- const uint16_t dim_im_out_y,
- q15_t * bufferA,
- q7_t * bufferB);
-
- /**
- * @brief Fast Q7 version of 1x1 convolution (non-sqaure shape)
- * @param[in] Im_in pointer to input tensor
- * @param[in] dim_im_in_x input tensor dimension x
- * @param[in] dim_im_in_y input tensor dimension y
- * @param[in] ch_im_in number of input tensor channels
- * @param[in] wt pointer to kernel weights
- * @param[in] ch_im_out number of filters, i.e., output tensor channels
- * @param[in] dim_kernel_x filter kernel size x
- * @param[in] dim_kernel_y filter kernel size y
- * @param[in] padding_x padding size x
- * @param[in] padding_y padding size y
- * @param[in] stride_x convolution stride x
- * @param[in] stride_y convolution stride y
- * @param[in] bias pointer to bias
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in,out] Im_out pointer to output tensor
- * @param[in] dim_im_out_x output tensor dimension x
- * @param[in] dim_im_out_y output tensor dimension y
- * @param[in,out] bufferA pointer to buffer space for input
- * @param[in,out] bufferB pointer to buffer space for output
- * @return The function returns either
- * <code>ARM_MATH_SIZE_MISMATCH</code> if argument constraints fail. or,
- * <code>ARM_MATH_SUCCESS</code> on successful completion.
- *
- * This function implement convolution with 1x1 kernel size (i.e., dim_kernel_x=1
- * and dim_kernel_y=1). It can be used for
- * second half of MobileNets after depthwise separable convolution.
- *
- * This function is the version with full list of optimization tricks, but with
- * some contraints:
- * ch_im_in is multiple of 4
- * ch_im_out is multiple of 2
- */
- arm_status arm_convolve_1x1_HWC_q7_fast_nonsquare(const q7_t * Im_in,
- const uint16_t dim_im_in_x,
- const uint16_t dim_im_in_y,
- const uint16_t ch_im_in,
- const q7_t * wt,
- const uint16_t ch_im_out,
- const uint16_t dim_kernel_x,
- const uint16_t dim_kernel_y,
- const uint16_t padding_x,
- const uint16_t padding_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const q7_t * bias,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- q7_t * Im_out,
- const uint16_t dim_im_out_x,
- const uint16_t dim_im_out_y,
- q15_t * bufferA,
- q7_t * bufferB);
-
- /**
- * @brief Fast s8 version for 1x1 convolution (non-square shape)
- *
- * @param[in, out] ctx Function context that contains the additional buffer if required by the implementation.
- arm_convolve_1x1_s8_fast_get_buffer_size will return the buffer_size if required
- * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
- * Range of conv_params->input_offset : [-127, 128]
- * Range of conv_params->output_offset : [-128, 127]
- * @param[in] quant_params Per-channel quantization info.
- * It contains the multiplier and shift values to be applied to each output channel
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in] input_data Input (activation) data pointer. Data type: int8
- * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, 1, C_IN]
- * @param[in] filter_data Filter data pointer. Data type: int8
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * @param[in] bias_data Optional bias data pointer. Data type: int32
- * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
- * @param[out] output_data Output data pointer. Data type: int8
- *
- * @return The function returns either
- * <code>ARM_MATH_SIZE_MISMATCH</code> if argument constraints fail. or,
- * <code>ARM_MATH_SUCCESS</code> on successful completion.
- *
- * @details
- * - Supported framework : TensorFlow Lite Micro
- * - The following constrains on the arguments apply
- * -# input_dims->c is a multiple of 4
- * -# conv_params->padding.w = conv_params->padding.h = 0
- * -# conv_params->stride.w = conv_params->stride.h = 1
- *
- */
- arm_status arm_convolve_1x1_s8_fast(const cmsis_nn_context* ctx,
- const cmsis_nn_conv_params* conv_params,
- const cmsis_nn_per_channel_quant_params* quant_params,
- const cmsis_nn_dims* input_dims,
- const q7_t *input_data,
- const cmsis_nn_dims* filter_dims,
- const q7_t *filter_data,
- const cmsis_nn_dims* bias_dims,
- const int32_t *bias_data,
- const cmsis_nn_dims* output_dims,
- q7_t *output_data);
-
- /**
- * @brief Get the required buffer size for arm_convolve_1x1_s8_fast
- *
- * @param[in] input_dims Input (activation) dimensions
- * @return The function returns the required buffer size in bytes
- *
- */
- int32_t arm_convolve_1x1_s8_fast_get_buffer_size(const cmsis_nn_dims* input_dims);
-
- /**
- * @brief 1xn convolution
- *
- * @param[in, out] ctx Function context that contains the additional buffer if required by the implementation.
- arm_convolve_1_x_n_s8_get_buffer_size will return the buffer_size if required
- * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
- * Range of conv_params->input_offset : [-127, 128]
- * Range of conv_params->output_offset : [-128, 127]
- * @param[in] quant_params Per-channel quantization info.
- * It contains the multiplier and shift values to be applied to each output channel
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in] input_data Input (activation) data pointer. Data type: int8
- * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, WK, C_IN] where WK is the horizontal spatial filter dimension
- * @param[in] filter_data Filter data pointer. Data type: int8
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * @param[in] bias_data Optional bias data pointer. Data type: int32
- * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
- * @param[out] output_data Output data pointer. Data type: int8
- *
- * @return The function returns either
- * <code>ARM_MATH_SIZE_MISMATCH</code> if argument constraints fail. or,
- * <code>ARM_MATH_SUCCESS</code> on successful completion.
- *
- * @details
- * - Supported framework : TensorFlow Lite Micro
- * - The following constrains on the arguments apply
- * -# input_dims->n equals 1
- * -# ouput_dims->w is a multiple of 4
- * -# Explicit constraints(since it is for 1xN convolution)
- * -## input_dims->h equals 1
- * -## output_dims->h equals 1
- * -## filter_dims->h equals 1
- *@todo Remove constraint on output_dims->w to make the function generic.
- *
- */
- arm_status arm_convolve_1_x_n_s8(const cmsis_nn_context* ctx,
- const cmsis_nn_conv_params* conv_params,
- const cmsis_nn_per_channel_quant_params* quant_params,
- const cmsis_nn_dims* input_dims,
- const q7_t *input_data,
- const cmsis_nn_dims* filter_dims,
- const q7_t *filter_data,
- const cmsis_nn_dims* bias_dims,
- const int32_t *bias_data,
- const cmsis_nn_dims* output_dims,
- q7_t *output_data);
-
- /**
- * @brief Get the required additional buffer size for 1xn convolution
- *
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, WK, C_IN] where WK is the horizontal spatial filter dimension
- * @return The function returns required buffer size(bytes)
- *
- */
- int32_t arm_convolve_1_x_n_s8_get_buffer_size(const cmsis_nn_dims* input_dims,
- const cmsis_nn_dims* filter_dims);
-
- /**
- * @brief Q7 version of convolution for RGB image
- * @param[in] Im_in pointer to input tensor
- * @param[in] dim_im_in input tensor dimension
- * @param[in] ch_im_in number of input tensor channels
- * @param[in] wt pointer to kernel weights
- * @param[in] ch_im_out number of filters, i.e., output tensor channels
- * @param[in] dim_kernel filter kernel size
- * @param[in] padding padding sizes
- * @param[in] stride convolution stride
- * @param[in] bias pointer to bias
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in,out] Im_out pointer to output tensor
- * @param[in] dim_im_out output tensor dimension
- * @param[in,out] bufferA pointer to buffer space for input
- * @param[in,out] bufferB pointer to buffer space for output
- * @return The function returns either
- * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.
- *
- * This kernel is written exclusively for convolution with ch_im_in
- * equals 3. This applies on the first layer of CNNs which has input
- * image with RGB format.
- */
-
- arm_status arm_convolve_HWC_q7_RGB(const q7_t * Im_in,
- const uint16_t dim_im_in,
- const uint16_t ch_im_in,
- const q7_t * wt,
- const uint16_t ch_im_out,
- const uint16_t dim_kernel,
- const uint16_t padding,
- const uint16_t stride,
- const q7_t * bias,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- q7_t * Im_out,
- const uint16_t dim_im_out,
- q15_t * bufferA,
- q7_t * bufferB);
-
- /**
- * @brief Fast Q15 convolution function
- * @param[in] Im_in pointer to input tensor
- * @param[in] dim_im_in input tensor dimension
- * @param[in] ch_im_in number of input tensor channels
- * @param[in] wt pointer to kernel weights
- * @param[in] ch_im_out number of filters, i.e., output tensor channels
- * @param[in] dim_kernel filter kernel size
- * @param[in] padding padding sizes
- * @param[in] stride convolution stride
- * @param[in] bias pointer to bias
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in,out] Im_out pointer to output tensor
- * @param[in] dim_im_out output tensor dimension
- * @param[in,out] bufferA pointer to buffer space for input
- * @param[in,out] bufferB pointer to buffer space for output
- * @return The function returns either
- * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.
- *
- * This function is the version with full list of optimization tricks, but with
- * some contraints:
- * ch_im_in is multiple of 2
- * ch_im_out is multiple of 2
- */
-
- arm_status arm_convolve_HWC_q15_fast(const q15_t * Im_in,
- const uint16_t dim_im_in,
- const uint16_t ch_im_in,
- const q15_t * wt,
- const uint16_t ch_im_out,
- const uint16_t dim_kernel,
- const uint16_t padding,
- const uint16_t stride,
- const q15_t * bias,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- q15_t * Im_out,
- const uint16_t dim_im_out,
- q15_t * bufferA,
- q7_t * bufferB);
-
- /**
- * @brief Fast Q15 convolution function (non-sqaure shape)
- * @param[in] Im_in pointer to input tensor
- * @param[in] dim_im_in_x input tensor dimension x
- * @param[in] dim_im_in_y input tensor dimension y
- * @param[in] ch_im_in number of input tensor channels
- * @param[in] wt pointer to kernel weights
- * @param[in] ch_im_out number of filters, i.e., output tensor channels
- * @param[in] dim_kernel_x filter kernel size x
- * @param[in] dim_kernel_y filter kernel size y
- * @param[in] padding_x padding size x
- * @param[in] padding_y padding size y
- * @param[in] stride_x convolution stride x
- * @param[in] stride_y convolution stride y
- * @param[in] bias pointer to bias
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in,out] Im_out pointer to output tensor
- * @param[in] dim_im_out_x output tensor dimension x
- * @param[in] dim_im_out_y output tensor dimension y
- * @param[in,out] bufferA pointer to buffer space for input
- * @param[in,out] bufferB pointer to buffer space for output
- * @return The function returns either
- * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.
- *
- * @details
- *
- * <b>Buffer size:</b>
- *
- * bufferA size: 2*ch_im_in*dim_kernel*dim_kernel
- *
- * bufferB size: 0
- *
- * <b>Input dimension constraints:</b>
- *
- * ch_im_in is multiple of 2
- *
- * ch_im_out is multipe of 2
- *
- */
-
- arm_status
- arm_convolve_HWC_q15_fast_nonsquare(const q15_t * Im_in,
- const uint16_t dim_im_in_x,
- const uint16_t dim_im_in_y,
- const uint16_t ch_im_in,
- const q15_t * wt,
- const uint16_t ch_im_out,
- const uint16_t dim_kernel_x,
- const uint16_t dim_kernel_y,
- const uint16_t padding_x,
- const uint16_t padding_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const q15_t * bias,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- q15_t * Im_out,
- const uint16_t dim_im_out_x,
- const uint16_t dim_im_out_y,
- q15_t * bufferA,
- q7_t * bufferB);
-
- /**
- * @brief Q7 depthwise separable convolution function
- * @param[in] Im_in pointer to input tensor
- * @param[in] dim_im_in input tensor dimension
- * @param[in] ch_im_in number of input tensor channels
- * @param[in] wt pointer to kernel weights
- * @param[in] ch_im_out number of filters, i.e., output tensor channels
- * @param[in] dim_kernel filter kernel size
- * @param[in] padding padding sizes
- * @param[in] stride convolution stride
- * @param[in] bias pointer to bias
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in,out] Im_out pointer to output tensor
- * @param[in] dim_im_out output tensor dimension
- * @param[in,out] bufferA pointer to buffer space for input
- * @param[in,out] bufferB pointer to buffer space for output
- * @return The function returns either
- * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.
- *
- * This function is the version with full list of optimization tricks, but with
- * some contraints:
- * ch_im_in is multiple of 2
- * ch_im_out is multiple of 2
- */
-
- arm_status arm_depthwise_separable_conv_HWC_q7(const q7_t * Im_in,
- const uint16_t dim_im_in,
- const uint16_t ch_im_in,
- const q7_t * wt,
- const uint16_t ch_im_out,
- const uint16_t dim_kernel,
- const uint16_t padding,
- const uint16_t stride,
- const q7_t * bias,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- q7_t * Im_out,
- const uint16_t dim_im_out,
- q15_t * bufferA,
- q7_t * bufferB);
-
- /**
- * @brief Q7 depthwise separable convolution function (non-square shape)
- * @param[in] Im_in pointer to input tensor
- * @param[in] dim_im_in_x input tensor dimension x
- * @param[in] dim_im_in_y input tensor dimension y
- * @param[in] ch_im_in number of input tensor channels
- * @param[in] wt pointer to kernel weights
- * @param[in] ch_im_out number of filters, i.e., output tensor channels
- * @param[in] dim_kernel_x filter kernel size x
- * @param[in] dim_kernel_y filter kernel size y
- * @param[in] padding_x padding sizes x
- * @param[in] padding_y padding sizes y
- * @param[in] stride_x convolution stride x
- * @param[in] stride_y convolution stride y
- * @param[in] bias pointer to bias
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in,out] Im_out pointer to output tensor
- * @param[in] dim_im_out_x output tensor dimension x
- * @param[in] dim_im_out_y output tensor dimension y
- * @param[in,out] bufferA pointer to buffer space for input
- * @param[in,out] bufferB pointer to buffer space for output
- * @return The function returns either
- * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.
- *
- * This function is the version with full list of optimization tricks, but with
- * some contraints:
- * ch_im_in is multiple of 2
- * ch_im_out is multiple of 2
- */
- arm_status arm_depthwise_separable_conv_HWC_q7_nonsquare(const q7_t * Im_in,
- const uint16_t dim_im_in_x,
- const uint16_t dim_im_in_y,
- const uint16_t ch_im_in,
- const q7_t * wt,
- const uint16_t ch_im_out,
- const uint16_t dim_kernel_x,
- const uint16_t dim_kernel_y,
- const uint16_t padding_x,
- const uint16_t padding_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const q7_t * bias,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- q7_t * Im_out,
- const uint16_t dim_im_out_x,
- const uint16_t dim_im_out_y,
- q15_t * bufferA,
- q7_t * bufferB);
-
- /**
- * @brief Wrapper function to pick the right optimized s8 depthwise convolution function
- *
- * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
- * definition file to see if an additional buffer is required.
- * Optional function {API}_get_buffer_size() provides the buffer
- * size if required.
- * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...)
- * dw_conv_params->dilation is not used.
- * Range of dw_conv_params->input_offset : [-127, 128]
- * Range of dw_conv_params->output_offset : [-128, 127]
- * @param[in] quant_params Per-channel quantization info.
- * It contains the multiplier and shift values to be applied to each
- * output channel
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN]
- * Batch argument N is not used and assumed to be 1.
- * @param[in] input_data Input (activation) data pointer. Data type: int8
- * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT]
- * @param[in] filter_data Filter data pointer. Data type: int8
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * @param[in] bias_data Bias data pointer. Data type: int32
- * @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT]
- * @param[in, out] output_data Output data pointer. Data type: int8
- * @return The function returns
- * <code>ARM_MATH_SUCCESS</code> - Successful completion.
- *
- * @details
- * - Supported framework: TensorFlow Lite
- * - Picks one of the the following functions
- * -# arm_depthwise_conv_s8()
- * -# arm_depthwise_conv_3x3_s8() - Cortex-M CPUs with DSP extension only
- * -# arm_depthwise_conv_s8_opt()
- * - q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs.
- * - Check details of arm_depthwise_conv_s8_opt() for potential data that can be accessed outside of the boundary.
- */
- arm_status arm_depthwise_conv_wrapper_s8(const cmsis_nn_context *ctx,
- const cmsis_nn_dw_conv_params *dw_conv_params,
- const cmsis_nn_per_channel_quant_params *quant_params,
- const cmsis_nn_dims *input_dims,
- const q7_t *input_data,
- const cmsis_nn_dims *filter_dims,
- const q7_t *filter_data,
- const cmsis_nn_dims *bias_dims,
- const int32_t *bias_data,
- const cmsis_nn_dims *output_dims,
- q7_t *output_data);
-
- /**
- * @brief Get size of additional buffer required by arm_depthwise_conv_wrapper_s8()
- *
- * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...)
- * dw_conv_params->dilation is not used.
- * Range of dw_conv_params->input_offset : [-127, 128]
- * Range of dw_conv_params->input_offset : [-128, 127]
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN]
- * Batch argument N is not used and assumed to be 1.
- * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT]
- * @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT]
- * @return Size of additional memory required for optimizations in bytes.
- *
- */
- int32_t arm_depthwise_conv_wrapper_s8_get_buffer_size(const cmsis_nn_dw_conv_params *dw_conv_params,
- const cmsis_nn_dims *input_dims,
- const cmsis_nn_dims *filter_dims,
- const cmsis_nn_dims *output_dims);
-
- /**
- * @brief Basic s8 depthwise convolution function that doesn't have any constraints on the input dimensions.
- *
- * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
- * definition file to see if an additional buffer is required.
- * Optional function {API}_get_buffer_size() provides the buffer
- * size if an additional buffer is required.
- * exists if additional memory is.
- * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...)
- * dw_conv_params->dilation is not used.
- * Range of dw_conv_params->input_offset : [-127, 128]
- * Range of dw_conv_params->input_offset : [-128, 127]
- * @param[in] quant_params Per-channel quantization info.
- * It contains the multiplier and shift values to be applied to each
- * output channel
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [1, H, W, C_IN]
- * Batch argument N is not used.
- * @param[in] input_data Input (activation) data pointer. Data type: int8
- * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT]
- * @param[in] filter_data Filter data pointer. Data type: int8
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * @param[in] bias_data Bias data pointer. Data type: int32
- * @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT]
- * @param[in, out] output_data Output data pointer. Data type: int8
- * @return The function returns <code>ARM_MATH_SUCCESS</code>
- *
- * @details
- * - Supported framework: TensorFlow Lite
- * - q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs.
- */
- arm_status arm_depthwise_conv_s8(const cmsis_nn_context *ctx,
- const cmsis_nn_dw_conv_params *dw_conv_params,
+/**
+ * @brief Fast s8 version for 1x1 convolution (non-square shape)
+ *
+ * @param[in, out] ctx Function context that contains the additional buffer if required by the function.
+ arm_convolve_1x1_s8_fast_get_buffer_size will return the buffer_size if required
+ * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
+ * Range of conv_params->input_offset : [-127, 128]
+ * Range of conv_params->output_offset : [-128, 127]
+ * @param[in] quant_params Per-channel quantization info.
+ * It contains the multiplier and shift values to be applied to each output channel
+ * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
+ * @param[in] input_data Input (activation) data pointer. Data type: int8
+ * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, 1, C_IN]
+ * @param[in] filter_data Filter data pointer. Data type: int8
+ * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
+ * @param[in] bias_data Optional bias data pointer. Data type: int32
+ * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
+ * @param[out] output_data Output data pointer. Data type: int8
+ *
+ * @return The function returns either
+ * <code>ARM_MATH_SIZE_MISMATCH</code> if argument constraints fail. or,
+ * <code>ARM_MATH_SUCCESS</code> on successful completion.
+ *
+ * @details
+ * - Supported framework : TensorFlow Lite Micro
+ * - The following constrains on the arguments apply
+ * -# input_dims->c is a multiple of 4
+ * -# conv_params->padding.w = conv_params->padding.h = 0
+ * -# conv_params->stride.w = conv_params->stride.h = 1
+ *
+ */
+arm_status arm_convolve_1x1_s8_fast(const cmsis_nn_context *ctx,
+ const cmsis_nn_conv_params *conv_params,
const cmsis_nn_per_channel_quant_params *quant_params,
const cmsis_nn_dims *input_dims,
const q7_t *input_data,
@@ -1029,305 +620,717 @@
const cmsis_nn_dims *output_dims,
q7_t *output_data);
- /**
- * @brief Optimized s8 depthwise convolution function for 3x3 kernel size with some constraints on
- * the input arguments(documented below). Refer arm_depthwise_conv_s8() for function
- * argument details.
- *
- * @return The function returns one of the following
- * <code>ARM_MATH_SIZE_MISMATCH</code> - Unsupported dimension of tensors
- * <code>ARM_MATH_ARGUMENT_ERROR</code> - Unsupported pad size along the x axis
- * <code>ARM_MATH_SUCCESS</code> - Successful operation
- *
- * @details
- * - Supported framework : TensorFlow Lite Micro
- * - The following constrains on the arguments apply
- * -# Number of input channel equals number of output channels
- * -# Filter height and width equals 3
- * -# Padding along x is either 0 or 1.
- *
- */
- arm_status arm_depthwise_conv_3x3_s8(const cmsis_nn_context *ctx,
- const cmsis_nn_dw_conv_params *dw_conv_params,
- const cmsis_nn_per_channel_quant_params *quant_params,
- const cmsis_nn_dims *input_dims,
- const q7_t *input_data,
- const cmsis_nn_dims *filter_dims,
- const q7_t *filter_data,
- const cmsis_nn_dims *bias_dims,
- const int32_t *bias_data,
- const cmsis_nn_dims *output_dims,
- q7_t *output_data);
-
- /**
- * @brief Optimized s8 depthwise convolution function with constraint that in_channel equals out_channel.
- * Refer arm_depthwise_conv_s8() for function argument details.
- *
- * @return The function returns one of the following
- * <code>ARM_MATH_SIZE_MISMATCH</code> - input channel != output channel or
- * ch_mult != 1
- * <code>ARM_MATH_SUCCESS</code> - Successful operation
- *
- * @note If number of channels is not a multiple of 4, upto 3 elements outside the boundary will be read out
- * for the following if MVE optimizations(Arm Helium Technology) are used.
- * - Output shift
- * - Output multiplier
- * - Output bias
- * - kernel
- * @details
- * - Supported framework: TensorFlow Lite
- * - The following constrains on the arguments apply
- * -# Number of input channel equals number of output channels or ch_mult equals 1
- * - q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs.
- * - Reccomended when number of channels is 4 or greater.
- *
- */
- arm_status arm_depthwise_conv_s8_opt(const cmsis_nn_context *ctx,
- const cmsis_nn_dw_conv_params *dw_conv_params,
- const cmsis_nn_per_channel_quant_params *quant_params,
- const cmsis_nn_dims *input_dims,
- const q7_t *input_data,
- const cmsis_nn_dims *filter_dims,
- const q7_t *filter_data,
- const cmsis_nn_dims *bias_dims,
- const int32_t *bias_data,
- const cmsis_nn_dims *output_dims,
- q7_t *output_data);
-
- /**
- * @brief Get the required buffer size for optimized s8 depthwise convolution
- * function with constraint that in_channel equals out_channel.
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [1, H, W, C_IN]
- * Batch argument N is not used.
- * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT]
- * @return The function returns required buffer size in bytes
- *
- */
- int32_t arm_depthwise_conv_s8_opt_get_buffer_size(const cmsis_nn_dims* input_dims,
- const cmsis_nn_dims* filter_dims);
-
- /**
- * @defgroup FC Fully-connected Layer Functions
+/**
+ * @brief Get the required buffer size for arm_convolve_1x1_s8_fast
*
- * Collection of fully-connected and matrix multiplication functions.
+ * @param[in] input_dims Input (activation) dimensions
+ * @return The function returns the required buffer size in bytes
*
- * Fully-connected layer is basically a matrix-vector multiplication
- * with bias. The matrix is the weights and the input/output vectors
- * are the activation values. Supported {weight, activation} precisions
- * include {8-bit, 8-bit}, {16-bit, 16-bit}, and {8-bit, 16-bit}.
+ */
+int32_t arm_convolve_1x1_s8_fast_get_buffer_size(const cmsis_nn_dims *input_dims);
+
+/**
+ * @brief 1xn convolution
*
- * Here we have two types of kernel functions. The basic function
- * implements the function using regular GEMV approach. The opt functions
- * operates with weights in interleaved formats.
+ * @param[in, out] ctx Function context that contains the additional buffer if required by the function.
+ arm_convolve_1_x_n_s8_get_buffer_size will return the buffer_size if required
+ * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...).
+ * Range of conv_params->input_offset : [-127, 128]
+ * Range of conv_params->output_offset : [-128, 127]
+ * @param[in] quant_params Per-channel quantization info.
+ * It contains the multiplier and shift values to be applied to each output channel
+ * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
+ * @param[in] input_data Input (activation) data pointer. Data type: int8
+ * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, WK, C_IN] where WK is the horizontal
+ * spatial filter dimension
+ * @param[in] filter_data Filter data pointer. Data type: int8
+ * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
+ * @param[in] bias_data Optional bias data pointer. Data type: int32
+ * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT]
+ * @param[out] output_data Output data pointer. Data type: int8
+ *
+ * @return The function returns either
+ * <code>ARM_MATH_SIZE_MISMATCH</code> if argument constraints fail. or,
+ * <code>ARM_MATH_SUCCESS</code> on successful completion.
+ *
+ * @details
+ * - Supported framework : TensorFlow Lite Micro
+ * - The following constrains on the arguments apply
+ * -# input_dims->n equals 1
+ * -# ouput_dims->w is a multiple of 4
+ * -# Explicit constraints(since it is for 1xN convolution)
+ * -## input_dims->h equals 1
+ * -## output_dims->h equals 1
+ * -## filter_dims->h equals 1
+ *@todo Remove constraint on output_dims->w to make the function generic.
+ *
+ */
+arm_status arm_convolve_1_x_n_s8(const cmsis_nn_context *ctx,
+ const cmsis_nn_conv_params *conv_params,
+ const cmsis_nn_per_channel_quant_params *quant_params,
+ const cmsis_nn_dims *input_dims,
+ const q7_t *input_data,
+ const cmsis_nn_dims *filter_dims,
+ const q7_t *filter_data,
+ const cmsis_nn_dims *bias_dims,
+ const int32_t *bias_data,
+ const cmsis_nn_dims *output_dims,
+ q7_t *output_data);
+
+/**
+ * @brief Get the required additional buffer size for 1xn convolution
+ *
+ * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
+ * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, WK, C_IN] where WK is the
+ * horizontal spatial filter dimension
+ * @return The function returns required buffer size(bytes)
+ *
+ */
+int32_t arm_convolve_1_x_n_s8_get_buffer_size(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims);
+
+/**
+ * @brief Q7 version of convolution for RGB image
+ * @param[in] Im_in pointer to input tensor
+ * @param[in] dim_im_in input tensor dimension
+ * @param[in] ch_im_in number of input tensor channels
+ * @param[in] wt pointer to kernel weights
+ * @param[in] ch_im_out number of filters, i.e., output tensor channels
+ * @param[in] dim_kernel filter kernel size
+ * @param[in] padding padding sizes
+ * @param[in] stride convolution stride
+ * @param[in] bias pointer to bias
+ * @param[in] bias_shift amount of left-shift for bias
+ * @param[in] out_shift amount of right-shift for output
+ * @param[in,out] Im_out pointer to output tensor
+ * @param[in] dim_im_out output tensor dimension
+ * @param[in,out] bufferA pointer to buffer space for input
+ * @param[in,out] bufferB pointer to buffer space for output
+ * @return The function returns either
+ * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.
+ *
+ * This kernel is written exclusively for convolution with ch_im_in
+ * equals 3. This applies on the first layer of CNNs which has input
+ * image with RGB format.
+ */
+
+arm_status arm_convolve_HWC_q7_RGB(const q7_t *Im_in,
+ const uint16_t dim_im_in,
+ const uint16_t ch_im_in,
+ const q7_t *wt,
+ const uint16_t ch_im_out,
+ const uint16_t dim_kernel,
+ const uint16_t padding,
+ const uint16_t stride,
+ const q7_t *bias,
+ const uint16_t bias_shift,
+ const uint16_t out_shift,
+ q7_t *Im_out,
+ const uint16_t dim_im_out,
+ q15_t *bufferA,
+ q7_t *bufferB);
+
+/**
+ * @brief Fast Q15 convolution function
+ * @param[in] Im_in pointer to input tensor
+ * @param[in] dim_im_in input tensor dimension
+ * @param[in] ch_im_in number of input tensor channels
+ * @param[in] wt pointer to kernel weights
+ * @param[in] ch_im_out number of filters, i.e., output tensor channels
+ * @param[in] dim_kernel filter kernel size
+ * @param[in] padding padding sizes
+ * @param[in] stride convolution stride
+ * @param[in] bias pointer to bias
+ * @param[in] bias_shift amount of left-shift for bias
+ * @param[in] out_shift amount of right-shift for output
+ * @param[in,out] Im_out pointer to output tensor
+ * @param[in] dim_im_out output tensor dimension
+ * @param[in,out] bufferA pointer to buffer space for input
+ * @param[in,out] bufferB pointer to buffer space for output
+ * @return The function returns either
+ * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.
+ *
+ * This function is the version with full list of optimization tricks, but with
+ * some contraints:
+ * ch_im_in is multiple of 2
+ * ch_im_out is multiple of 2
+ */
+
+arm_status arm_convolve_HWC_q15_fast(const q15_t *Im_in,
+ const uint16_t dim_im_in,
+ const uint16_t ch_im_in,
+ const q15_t *wt,
+ const uint16_t ch_im_out,
+ const uint16_t dim_kernel,
+ const uint16_t padding,
+ const uint16_t stride,
+ const q15_t *bias,
+ const uint16_t bias_shift,
+ const uint16_t out_shift,
+ q15_t *Im_out,
+ const uint16_t dim_im_out,
+ q15_t *bufferA,
+ q7_t *bufferB);
+
+/**
+ * @brief Fast Q15 convolution function (non-sqaure shape)
+ * @param[in] Im_in pointer to input tensor
+ * @param[in] dim_im_in_x input tensor dimension x
+ * @param[in] dim_im_in_y input tensor dimension y
+ * @param[in] ch_im_in number of input tensor channels
+ * @param[in] wt pointer to kernel weights
+ * @param[in] ch_im_out number of filters, i.e., output tensor channels
+ * @param[in] dim_kernel_x filter kernel size x
+ * @param[in] dim_kernel_y filter kernel size y
+ * @param[in] padding_x padding size x
+ * @param[in] padding_y padding size y
+ * @param[in] stride_x convolution stride x
+ * @param[in] stride_y convolution stride y
+ * @param[in] bias pointer to bias
+ * @param[in] bias_shift amount of left-shift for bias
+ * @param[in] out_shift amount of right-shift for output
+ * @param[in,out] Im_out pointer to output tensor
+ * @param[in] dim_im_out_x output tensor dimension x
+ * @param[in] dim_im_out_y output tensor dimension y
+ * @param[in,out] bufferA pointer to buffer space for input
+ * @param[in,out] bufferB pointer to buffer space for output
+ * @return The function returns either
+ * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.
+ *
+ * @details
+ *
+ * <b>Buffer size:</b>
+ *
+ * bufferA size: 2*ch_im_in*dim_kernel*dim_kernel
+ *
+ * bufferB size: 0
+ *
+ * <b>Input dimension constraints:</b>
+ *
+ * ch_im_in is multiple of 2
+ *
+ * ch_im_out is multipe of 2
*
*/
- /**
- * @brief Q7 basic fully-connected layer function
- * @param[in] pV pointer to input vector
- * @param[in] pM pointer to matrix weights
- * @param[in] dim_vec length of the vector
- * @param[in] num_of_rows number of rows in weight matrix
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in] bias pointer to bias
- * @param[in,out] pOut pointer to output vector
- * @param[in,out] vec_buffer pointer to buffer space for input
- * @return The function returns <code>ARM_MATH_SUCCESS</code>
- *
- */
+arm_status arm_convolve_HWC_q15_fast_nonsquare(const q15_t *Im_in,
+ const uint16_t dim_im_in_x,
+ const uint16_t dim_im_in_y,
+ const uint16_t ch_im_in,
+ const q15_t *wt,
+ const uint16_t ch_im_out,
+ const uint16_t dim_kernel_x,
+ const uint16_t dim_kernel_y,
+ const uint16_t padding_x,
+ const uint16_t padding_y,
+ const uint16_t stride_x,
+ const uint16_t stride_y,
+ const q15_t *bias,
+ const uint16_t bias_shift,
+ const uint16_t out_shift,
+ q15_t *Im_out,
+ const uint16_t dim_im_out_x,
+ const uint16_t dim_im_out_y,
+ q15_t *bufferA,
+ q7_t *bufferB);
- arm_status arm_fully_connected_q7(const q7_t * pV,
- const q7_t * pM,
+/**
+ * @brief Q7 depthwise separable convolution function
+ * @param[in] Im_in pointer to input tensor
+ * @param[in] dim_im_in input tensor dimension
+ * @param[in] ch_im_in number of input tensor channels
+ * @param[in] wt pointer to kernel weights
+ * @param[in] ch_im_out number of filters, i.e., output tensor channels
+ * @param[in] dim_kernel filter kernel size
+ * @param[in] padding padding sizes
+ * @param[in] stride convolution stride
+ * @param[in] bias pointer to bias
+ * @param[in] bias_shift amount of left-shift for bias
+ * @param[in] out_shift amount of right-shift for output
+ * @param[in,out] Im_out pointer to output tensor
+ * @param[in] dim_im_out output tensor dimension
+ * @param[in,out] bufferA pointer to buffer space for input
+ * @param[in,out] bufferB pointer to buffer space for output
+ * @return The function returns either
+ * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.
+ *
+ * This function is the version with full list of optimization tricks, but with
+ * some contraints:
+ * ch_im_in is multiple of 2
+ * ch_im_out is multiple of 2
+ */
+
+arm_status arm_depthwise_separable_conv_HWC_q7(const q7_t *Im_in,
+ const uint16_t dim_im_in,
+ const uint16_t ch_im_in,
+ const q7_t *wt,
+ const uint16_t ch_im_out,
+ const uint16_t dim_kernel,
+ const uint16_t padding,
+ const uint16_t stride,
+ const q7_t *bias,
+ const uint16_t bias_shift,
+ const uint16_t out_shift,
+ q7_t *Im_out,
+ const uint16_t dim_im_out,
+ q15_t *bufferA,
+ q7_t *bufferB);
+
+/**
+ * @brief Q7 depthwise separable convolution function (non-square shape)
+ * @param[in] Im_in pointer to input tensor
+ * @param[in] dim_im_in_x input tensor dimension x
+ * @param[in] dim_im_in_y input tensor dimension y
+ * @param[in] ch_im_in number of input tensor channels
+ * @param[in] wt pointer to kernel weights
+ * @param[in] ch_im_out number of filters, i.e., output tensor channels
+ * @param[in] dim_kernel_x filter kernel size x
+ * @param[in] dim_kernel_y filter kernel size y
+ * @param[in] padding_x padding sizes x
+ * @param[in] padding_y padding sizes y
+ * @param[in] stride_x convolution stride x
+ * @param[in] stride_y convolution stride y
+ * @param[in] bias pointer to bias
+ * @param[in] bias_shift amount of left-shift for bias
+ * @param[in] out_shift amount of right-shift for output
+ * @param[in,out] Im_out pointer to output tensor
+ * @param[in] dim_im_out_x output tensor dimension x
+ * @param[in] dim_im_out_y output tensor dimension y
+ * @param[in,out] bufferA pointer to buffer space for input
+ * @param[in,out] bufferB pointer to buffer space for output
+ * @return The function returns either
+ * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.
+ *
+ * This function is the version with full list of optimization tricks, but with
+ * some contraints:
+ * ch_im_in is multiple of 2
+ * ch_im_out is multiple of 2
+ */
+arm_status arm_depthwise_separable_conv_HWC_q7_nonsquare(const q7_t *Im_in,
+ const uint16_t dim_im_in_x,
+ const uint16_t dim_im_in_y,
+ const uint16_t ch_im_in,
+ const q7_t *wt,
+ const uint16_t ch_im_out,
+ const uint16_t dim_kernel_x,
+ const uint16_t dim_kernel_y,
+ const uint16_t padding_x,
+ const uint16_t padding_y,
+ const uint16_t stride_x,
+ const uint16_t stride_y,
+ const q7_t *bias,
+ const uint16_t bias_shift,
+ const uint16_t out_shift,
+ q7_t *Im_out,
+ const uint16_t dim_im_out_x,
+ const uint16_t dim_im_out_y,
+ q15_t *bufferA,
+ q7_t *bufferB);
+
+/**
+* @brief Wrapper function to pick the right optimized s8 depthwise convolution function
+*
+* @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
+* definition file to see if an additional buffer is required.
+* Optional function {API}_get_buffer_size() provides the buffer
+* size if required.
+* @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...)
+* dw_conv_params->dilation is not used.
+* Range of dw_conv_params->input_offset : [-127, 128]
+* Range of dw_conv_params->output_offset : [-128, 127]
+* @param[in] quant_params Per-channel quantization info.
+ * It contains the multiplier and shift values to be applied to each
+ * output channel
+* @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN]
+* Batch argument N is not used and assumed to be 1.
+* @param[in] input_data Input (activation) data pointer. Data type: int8
+* @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT]
+* @param[in] filter_data Filter data pointer. Data type: int8
+* @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
+* @param[in] bias_data Bias data pointer. Data type: int32
+* @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT]
+* @param[in, out] output_data Output data pointer. Data type: int8
+* @return The function returns
+* <code>ARM_MATH_SUCCESS</code> - Successful completion.
+*
+* @details
+* - Supported framework: TensorFlow Lite
+* - Picks one of the the following functions
+* -# arm_depthwise_conv_s8()
+* -# arm_depthwise_conv_3x3_s8() - Cortex-M CPUs with DSP extension only
+* -# arm_depthwise_conv_s8_opt()
+* - q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs.
+* - Check details of arm_depthwise_conv_s8_opt() for potential data that can be accessed outside of the boundary.
+*/
+arm_status arm_depthwise_conv_wrapper_s8(const cmsis_nn_context *ctx,
+ const cmsis_nn_dw_conv_params *dw_conv_params,
+ const cmsis_nn_per_channel_quant_params *quant_params,
+ const cmsis_nn_dims *input_dims,
+ const q7_t *input_data,
+ const cmsis_nn_dims *filter_dims,
+ const q7_t *filter_data,
+ const cmsis_nn_dims *bias_dims,
+ const int32_t *bias_data,
+ const cmsis_nn_dims *output_dims,
+ q7_t *output_data);
+
+/**
+* @brief Get size of additional buffer required by arm_depthwise_conv_wrapper_s8()
+*
+* @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...)
+* dw_conv_params->dilation is not used.
+* Range of dw_conv_params->input_offset : [-127, 128]
+* Range of dw_conv_params->input_offset : [-128, 127]
+* @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN]
+* Batch argument N is not used and assumed to be 1.
+* @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT]
+* @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT]
+* @return Size of additional memory required for optimizations in bytes.
+*
+*/
+int32_t arm_depthwise_conv_wrapper_s8_get_buffer_size(const cmsis_nn_dw_conv_params *dw_conv_params,
+ const cmsis_nn_dims *input_dims,
+ const cmsis_nn_dims *filter_dims,
+ const cmsis_nn_dims *output_dims);
+
+/**
+* @brief Basic s8 depthwise convolution function that doesn't have any constraints on the input dimensions.
+*
+* @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
+* definition file to see if an additional buffer is required.
+* Optional function {API}_get_buffer_size() provides the buffer
+* size if an additional buffer is required.
+* exists if additional memory is.
+* @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...)
+* dw_conv_params->dilation is not used.
+* Range of dw_conv_params->input_offset : [-127, 128]
+* Range of dw_conv_params->input_offset : [-128, 127]
+* @param[in] quant_params Per-channel quantization info.
+ * It contains the multiplier and shift values to be applied to each
+ * output channel
+* @param[in] input_dims Input (activation) tensor dimensions. Format: [1, H, W, C_IN]
+* Batch argument N is not used.
+* @param[in] input_data Input (activation) data pointer. Data type: int8
+* @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT]
+* @param[in] filter_data Filter data pointer. Data type: int8
+* @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
+* @param[in] bias_data Bias data pointer. Data type: int32
+* @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT]
+* @param[in, out] output_data Output data pointer. Data type: int8
+* @return The function returns <code>ARM_MATH_SUCCESS</code>
+*
+* @details
+* - Supported framework: TensorFlow Lite
+* - q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs.
+*/
+arm_status arm_depthwise_conv_s8(const cmsis_nn_context *ctx,
+ const cmsis_nn_dw_conv_params *dw_conv_params,
+ const cmsis_nn_per_channel_quant_params *quant_params,
+ const cmsis_nn_dims *input_dims,
+ const q7_t *input_data,
+ const cmsis_nn_dims *filter_dims,
+ const q7_t *filter_data,
+ const cmsis_nn_dims *bias_dims,
+ const int32_t *bias_data,
+ const cmsis_nn_dims *output_dims,
+ q7_t *output_data);
+
+/**
+* @brief Optimized s8 depthwise convolution function for 3x3 kernel size with some constraints on
+* the input arguments(documented below). Refer arm_depthwise_conv_s8() for function
+* argument details.
+*
+* @return The function returns one of the following
+* <code>ARM_MATH_SIZE_MISMATCH</code> - Unsupported dimension of tensors
+* <code>ARM_MATH_ARGUMENT_ERROR</code> - Unsupported pad size along the x axis
+* <code>ARM_MATH_SUCCESS</code> - Successful operation
+*
+* @details
+* - Supported framework : TensorFlow Lite Micro
+* - The following constrains on the arguments apply
+* -# Number of input channel equals number of output channels
+* -# Filter height and width equals 3
+* -# Padding along x is either 0 or 1.
+*
+*/
+arm_status arm_depthwise_conv_3x3_s8(const cmsis_nn_context *ctx,
+ const cmsis_nn_dw_conv_params *dw_conv_params,
+ const cmsis_nn_per_channel_quant_params *quant_params,
+ const cmsis_nn_dims *input_dims,
+ const q7_t *input_data,
+ const cmsis_nn_dims *filter_dims,
+ const q7_t *filter_data,
+ const cmsis_nn_dims *bias_dims,
+ const int32_t *bias_data,
+ const cmsis_nn_dims *output_dims,
+ q7_t *output_data);
+
+/**
+* @brief Optimized s8 depthwise convolution function with constraint that in_channel equals out_channel.
+* Refer arm_depthwise_conv_s8() for function argument details.
+*
+* @return The function returns one of the following
+* <code>ARM_MATH_SIZE_MISMATCH</code> - input channel != output channel or
+* ch_mult != 1
+* <code>ARM_MATH_SUCCESS</code> - Successful operation
+*
+* @note If number of channels is not a multiple of 4, upto 3 elements outside the boundary will be read out
+* for the following if MVE optimizations(Arm Helium Technology) are used.
+* - Output shift
+* - Output multiplier
+* - Output bias
+* - kernel
+* @details
+* - Supported framework: TensorFlow Lite
+* - The following constrains on the arguments apply
+* -# Number of input channel equals number of output channels or ch_mult equals 1
+* - q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs.
+* - Reccomended when number of channels is 4 or greater.
+*
+*/
+arm_status arm_depthwise_conv_s8_opt(const cmsis_nn_context *ctx,
+ const cmsis_nn_dw_conv_params *dw_conv_params,
+ const cmsis_nn_per_channel_quant_params *quant_params,
+ const cmsis_nn_dims *input_dims,
+ const q7_t *input_data,
+ const cmsis_nn_dims *filter_dims,
+ const q7_t *filter_data,
+ const cmsis_nn_dims *bias_dims,
+ const int32_t *bias_data,
+ const cmsis_nn_dims *output_dims,
+ q7_t *output_data);
+
+/**
+* @brief Get the required buffer size for optimized s8 depthwise convolution
+* function with constraint that in_channel equals out_channel.
+* @param[in] input_dims Input (activation) tensor dimensions. Format: [1, H, W, C_IN]
+* Batch argument N is not used.
+* @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT]
+* @return The function returns required buffer size in bytes
+*
+*/
+int32_t arm_depthwise_conv_s8_opt_get_buffer_size(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims);
+
+/**
+* @defgroup FC Fully-connected Layer Functions
+*
+* Collection of fully-connected and matrix multiplication functions.
+*
+* Fully-connected layer is basically a matrix-vector multiplication
+* with bias. The matrix is the weights and the input/output vectors
+* are the activation values. Supported {weight, activation} precisions
+* include {8-bit, 8-bit}, {16-bit, 16-bit}, and {8-bit, 16-bit}.
+*
+* Here we have two types of kernel functions. The basic function
+* implements the function using regular GEMV approach. The opt functions
+* operates with weights in interleaved formats.
+*
+*/
+
+/**
+*@brief Q7 basic fully-connected layer function
+*@param[in] pV pointer to input vector
+*@param[in] pM pointer to matrix weights
+*@param[in] dim_vec length of the vector
+*@param[in] num_of_rows number of rows in weight matrix
+*@param[in] bias_shift amount of left-shift for bias
+*@param[in] out_shift amount of right-shift for output
+*@param[in] bias pointer to bias
+*@param[in,out] pOut pointer to output vector
+*@param[in,out] vec_buffer pointer to buffer space for input
+*@return The function returns <code>ARM_MATH_SUCCESS</code>
+*
+*/
+
+arm_status arm_fully_connected_q7(const q7_t *pV,
+ const q7_t *pM,
+ const uint16_t dim_vec,
+ const uint16_t num_of_rows,
+ const uint16_t bias_shift,
+ const uint16_t out_shift,
+ const q7_t *bias,
+ q7_t *pOut,
+ q15_t *vec_buffer);
+
+/**
+* @brief Basic s8 Fully Connected function.
+*
+* @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
+* definition file to see if an additional buffer is required.
+* Optional function {API}_get_buffer_size() provides the buffer
+* size if an additional buffer is required.
+* @param[in] fc_params Fully Connected layer parameters (e.g. strides, dilations, pads,...)
+* Range of fc_params->input_offset : [-127, 128]
+* Range of fc_params->filter_offset : [-127, 128]
+* Range of fc_params->output_offset : [-128, 127]
+* @param[in] quant_params Per-tensor quantization info.
+* It contains the multiplier and shift values to be applied to the output tensor.
+* @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
+* Input dimension is taken as Nx(H * W * C_IN)
+* @param[in] input_data Input (activation) data pointer. Data type: int8
+* @param[in] filter_dims Two dimensional filter dimensions. Format: [N, C]
+* N : accumulation depth and equals (H * W * C_IN) from input_dims
+* C : output depth and equals C_OUT in output_dims
+* H & W : Not used
+* @param[in] filter_data Filter data pointer. Data type: int8
+* @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
+* N, H, W : Not used
+* @param[in] bias_data Bias data pointer. Data type: int32
+* @param[in] output_dims Output tensor dimensions. Format: [N, C_OUT]
+* N : Batches
+* C_OUT : Output depth
+* H & W : Not used.
+* @param[in, out] output_data Output data pointer. Data type: int8
+* @return The function returns <code>ARM_MATH_SUCCESS</code>
+*
+* @details
+* - Supported framework: TensorFlow Lite
+* - q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs.
+*/
+arm_status arm_fully_connected_s8(const cmsis_nn_context *ctx,
+ const cmsis_nn_fc_params *fc_params,
+ const cmsis_nn_per_tensor_quant_params *quant_params,
+ const cmsis_nn_dims *input_dims,
+ const q7_t *input_data,
+ const cmsis_nn_dims *filter_dims,
+ const q7_t *filter_data,
+ const cmsis_nn_dims *bias_dims,
+ const int32_t *bias_data,
+ const cmsis_nn_dims *output_dims,
+ q7_t *output_data);
+
+/**
+ * @brief Get the required buffer size for S8 basic fully-connected and
+ * matrix multiplication layer function for TF Lite
+ * @param[in] filter_dims dimension of filter
+ * @return The function returns required buffer size in bytes
+ *
+ */
+int32_t arm_fully_connected_s8_get_buffer_size(const cmsis_nn_dims *filter_dims);
+
+/**
+ * @brief Q7 opt fully-connected layer function
+ * @param[in] pV pointer to input vector
+ * @param[in] pM pointer to matrix weights
+ * @param[in] dim_vec length of the vector
+ * @param[in] num_of_rows number of rows in weight matrix
+ * @param[in] bias_shift amount of left-shift for bias
+ * @param[in] out_shift amount of right-shift for output
+ * @param[in] bias pointer to bias
+ * @param[in,out] pOut pointer to output vector
+ * @param[in,out] vec_buffer pointer to buffer space for input
+ * @return The function returns <code>ARM_MATH_SUCCESS</code>
+ *
+ */
+
+arm_status arm_fully_connected_q7_opt(const q7_t *pV,
+ const q7_t *pM,
const uint16_t dim_vec,
const uint16_t num_of_rows,
const uint16_t bias_shift,
const uint16_t out_shift,
- const q7_t * bias,
- q7_t * pOut,
- q15_t * vec_buffer);
+ const q7_t *bias,
+ q7_t *pOut,
+ q15_t *vec_buffer);
- /**
- * @brief Basic s8 Fully Connected function.
- *
- * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
- * definition file to see if an additional buffer is required.
- * Optional function {API}_get_buffer_size() provides the buffer
- * size if an additional buffer is required.
- * @param[in] fc_params Fully Connected layer parameters (e.g. strides, dilations, pads,...)
- * Range of fc_params->input_offset : [-127, 128]
- * Range of fc_params->filter_offset : [-127, 128]
- * Range of fc_params->output_offset : [-128, 127]
- * @param[in] quant_params Per-tensor quantization info.
- * It contains the multiplier and shift values to be applied to the output tensor.
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * Input dimension is taken as Nx(H * W * C_IN)
- * @param[in] input_data Input (activation) data pointer. Data type: int8
- * @param[in] filter_dims Two dimensional filter dimensions. Format: [N, C]
- * N : accumulation depth and equals (H * W * C_IN) from input_dims
- * C : output depth and equals C_OUT in output_dims
- * H & W : Not used
- * @param[in] filter_data Filter data pointer. Data type: int8
- * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT]
- * N, H, W : Not used
- * @param[in] bias_data Bias data pointer. Data type: int32
- * @param[in] output_dims Output tensor dimensions. Format: [N, C_OUT]
- * N : Batches
- * C_OUT : Output depth
- * H & W : Not used.
- * @param[in, out] output_data Output data pointer. Data type: int8
- * @return The function returns <code>ARM_MATH_SUCCESS</code>
- *
- * @details
- * - Supported framework: TensorFlow Lite
- * - q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs.
- */
- arm_status
- arm_fully_connected_s8(const cmsis_nn_context *ctx,
- const cmsis_nn_fc_params *fc_params,
- const cmsis_nn_per_tensor_quant_params *quant_params,
- const cmsis_nn_dims *input_dims,
- const q7_t *input_data,
- const cmsis_nn_dims *filter_dims,
- const q7_t *filter_data,
- const cmsis_nn_dims *bias_dims,
- const int32_t *bias_data,
- const cmsis_nn_dims *output_dims,
- q7_t *output_data);
+/**
+ * @brief Q15 basic fully-connected layer function
+ * @param[in] pV pointer to input vector
+ * @param[in] pM pointer to matrix weights
+ * @param[in] dim_vec length of the vector
+ * @param[in] num_of_rows number of rows in weight matrix
+ * @param[in] bias_shift amount of left-shift for bias
+ * @param[in] out_shift amount of right-shift for output
+ * @param[in] bias pointer to bias
+ * @param[in,out] pOut pointer to output vector
+ * @param[in,out] vec_buffer pointer to buffer space for input
+ * @return The function returns <code>ARM_MATH_SUCCESS</code>
+ *
+ */
- /**
- * @brief Get the required buffer size for S8 basic fully-connected and
- * matrix multiplication layer function for TF Lite
- * @param[in] filter_dims dimension of filter
- * @return The function returns required buffer size in bytes
- *
- */
- int32_t arm_fully_connected_s8_get_buffer_size(const cmsis_nn_dims *filter_dims);
+arm_status arm_fully_connected_q15(const q15_t *pV,
+ const q15_t *pM,
+ const uint16_t dim_vec,
+ const uint16_t num_of_rows,
+ const uint16_t bias_shift,
+ const uint16_t out_shift,
+ const q15_t *bias,
+ q15_t *pOut,
+ q15_t *vec_buffer);
- /**
- * @brief Q7 opt fully-connected layer function
- * @param[in] pV pointer to input vector
- * @param[in] pM pointer to matrix weights
- * @param[in] dim_vec length of the vector
- * @param[in] num_of_rows number of rows in weight matrix
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in] bias pointer to bias
- * @param[in,out] pOut pointer to output vector
- * @param[in,out] vec_buffer pointer to buffer space for input
- * @return The function returns <code>ARM_MATH_SUCCESS</code>
- *
- */
+/**
+ * @brief Q15 opt fully-connected layer function
+ * @param[in] pV pointer to input vector
+ * @param[in] pM pointer to matrix weights
+ * @param[in] dim_vec length of the vector
+ * @param[in] num_of_rows number of rows in weight matrix
+ * @param[in] bias_shift amount of left-shift for bias
+ * @param[in] out_shift amount of right-shift for output
+ * @param[in] bias pointer to bias
+ * @param[in,out] pOut pointer to output vector
+ * @param[in,out] vec_buffer pointer to buffer space for input
+ * @return The function returns <code>ARM_MATH_SUCCESS</code>
+ *
+ */
- arm_status arm_fully_connected_q7_opt(const q7_t * pV,
- const q7_t * pM,
- const uint16_t dim_vec,
- const uint16_t num_of_rows,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- const q7_t * bias,
- q7_t * pOut,
- q15_t * vec_buffer);
-
- /**
- * @brief Q15 basic fully-connected layer function
- * @param[in] pV pointer to input vector
- * @param[in] pM pointer to matrix weights
- * @param[in] dim_vec length of the vector
- * @param[in] num_of_rows number of rows in weight matrix
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in] bias pointer to bias
- * @param[in,out] pOut pointer to output vector
- * @param[in,out] vec_buffer pointer to buffer space for input
- * @return The function returns <code>ARM_MATH_SUCCESS</code>
- *
- */
-
- arm_status arm_fully_connected_q15(const q15_t * pV,
- const q15_t * pM,
+arm_status arm_fully_connected_q15_opt(const q15_t *pV,
+ const q15_t *pM,
const uint16_t dim_vec,
const uint16_t num_of_rows,
const uint16_t bias_shift,
const uint16_t out_shift,
- const q15_t * bias,
- q15_t * pOut,
- q15_t * vec_buffer);
+ const q15_t *bias,
+ q15_t *pOut,
+ q15_t *vec_buffer);
- /**
- * @brief Q15 opt fully-connected layer function
- * @param[in] pV pointer to input vector
- * @param[in] pM pointer to matrix weights
- * @param[in] dim_vec length of the vector
- * @param[in] num_of_rows number of rows in weight matrix
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in] bias pointer to bias
- * @param[in,out] pOut pointer to output vector
- * @param[in,out] vec_buffer pointer to buffer space for input
- * @return The function returns <code>ARM_MATH_SUCCESS</code>
- *
- */
+/**
+ * @brief Mixed Q15-Q7 fully-connected layer function
+ * @param[in] pV pointer to input vector
+ * @param[in] pM pointer to matrix weights
+ * @param[in] dim_vec length of the vector
+ * @param[in] num_of_rows number of rows in weight matrix
+ * @param[in] bias_shift amount of left-shift for bias
+ * @param[in] out_shift amount of right-shift for output
+ * @param[in] bias pointer to bias
+ * @param[in,out] pOut pointer to output vector
+ * @param[in,out] vec_buffer pointer to buffer space for input
+ * @return The function returns <code>ARM_MATH_SUCCESS</code>
+ *
+ */
- arm_status arm_fully_connected_q15_opt(const q15_t * pV,
- const q15_t * pM,
- const uint16_t dim_vec,
- const uint16_t num_of_rows,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- const q15_t * bias,
- q15_t * pOut,
- q15_t * vec_buffer);
+arm_status arm_fully_connected_mat_q7_vec_q15(const q15_t *pV,
+ const q7_t *pM,
+ const uint16_t dim_vec,
+ const uint16_t num_of_rows,
+ const uint16_t bias_shift,
+ const uint16_t out_shift,
+ const q7_t *bias,
+ q15_t *pOut,
+ q15_t *vec_buffer);
- /**
- * @brief Mixed Q15-Q7 fully-connected layer function
- * @param[in] pV pointer to input vector
- * @param[in] pM pointer to matrix weights
- * @param[in] dim_vec length of the vector
- * @param[in] num_of_rows number of rows in weight matrix
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in] bias pointer to bias
- * @param[in,out] pOut pointer to output vector
- * @param[in,out] vec_buffer pointer to buffer space for input
- * @return The function returns <code>ARM_MATH_SUCCESS</code>
- *
- */
+/**
+ * @brief Mixed Q15-Q7 opt fully-connected layer function
+ * @param[in] pV pointer to input vector
+ * @param[in] pM pointer to matrix weights
+ * @param[in] dim_vec length of the vector
+ * @param[in] num_of_rows number of rows in weight matrix
+ * @param[in] bias_shift amount of left-shift for bias
+ * @param[in] out_shift amount of right-shift for output
+ * @param[in] bias pointer to bias
+ * @param[in,out] pOut pointer to output vector
+ * @param[in,out] vec_buffer pointer to buffer space for input
+ * @return The function returns <code>ARM_MATH_SUCCESS</code>
+ *
+ */
- arm_status arm_fully_connected_mat_q7_vec_q15(const q15_t * pV,
- const q7_t * pM,
+arm_status arm_fully_connected_mat_q7_vec_q15_opt(const q15_t *pV,
+ const q7_t *pM,
const uint16_t dim_vec,
const uint16_t num_of_rows,
const uint16_t bias_shift,
const uint16_t out_shift,
- const q7_t * bias,
- q15_t * pOut,
- q15_t * vec_buffer);
-
- /**
- * @brief Mixed Q15-Q7 opt fully-connected layer function
- * @param[in] pV pointer to input vector
- * @param[in] pM pointer to matrix weights
- * @param[in] dim_vec length of the vector
- * @param[in] num_of_rows number of rows in weight matrix
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in] bias pointer to bias
- * @param[in,out] pOut pointer to output vector
- * @param[in,out] vec_buffer pointer to buffer space for input
- * @return The function returns <code>ARM_MATH_SUCCESS</code>
- *
- */
-
- arm_status arm_fully_connected_mat_q7_vec_q15_opt(const q15_t * pV,
- const q7_t * pM,
- const uint16_t dim_vec,
- const uint16_t num_of_rows,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- const q7_t * bias,
- q15_t * pOut,
- q15_t * vec_buffer);
+ const q7_t *bias,
+ q15_t *pOut,
+ q15_t *vec_buffer);
/**
* @brief Matrix-Multiplication Kernels for Convolution
@@ -1341,108 +1344,108 @@
*
*/
- /**
- * @brief Matrix-multiplication function for convolution
- * @param[in] pA pointer to operand A
- * @param[in] pInBuffer pointer to operand B, always conssists of 2 vectors
- * @param[in] ch_im_out numRow of A
- * @param[in] numCol_A numCol of A
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in] bias the bias
- * @param[in,out] pOut pointer to output
- * @return The function returns the incremented output pointer
- */
+/**
+* @brief Matrix-multiplication function for convolution
+* @param[in] pA pointer to operand A
+* @param[in] pInBuffer pointer to operand B, always conssists of 2 vectors
+* @param[in] ch_im_out numRow of A
+* @param[in] numCol_A numCol of A
+* @param[in] bias_shift amount of left-shift for bias
+* @param[in] out_shift amount of right-shift for output
+* @param[in] bias the bias
+* @param[in,out] pOut pointer to output
+* @return The function returns the incremented output pointer
+*/
- q7_t *arm_nn_mat_mult_kernel_q7_q15(const q7_t * pA,
- const q15_t * pInBuffer,
- const uint16_t ch_im_out,
- const uint16_t numCol_A,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- const q7_t * bias,
- q7_t * pOut);
- /**
- * @brief Matrix-multiplication function for convolution with per-channel requantization.
- * @param[in] input_a pointer to operand A
- * @param[in] input_b pointer to operand B, always consists of 2 vectors.
- * @param[in] output_ch number of rows of A
- * @param[in] out_shift pointer to per output channel requantization shift parameter.
- * @param[in] out_mult pointer to per output channel requantization multiplier parameter.
- * @param[in] out_offset output tensor offset.
- * @param[in] activation_min minimum value to clamp the output to. Range : int8
- * @param[in] activation_max maximum value to clamp the output to. Range : int8
- * @param[in] num_col_a number of columns of A
- * @param[in] output_bias per output channel bias. Range : int32
- * @param[in,out] out_0 pointer to output
- * @return The function returns one of the two
- * 1. The incremented output pointer for a successful operation or
- * 2. NULL if implementation is not available.
- *
- * @details This function does the matrix multiplication of weight matrix for all output channels
- * with 2 columns from im2col and produces two elements/output_channel. The outputs are
- * clamped in the range provided by activation min and max.
- * Supported framework: TensorFlow Lite micro.
- */
- q7_t *arm_nn_mat_mult_kernel_s8_s16(const q7_t *input_a,
- const q15_t *input_b,
- const uint16_t output_ch,
- const int32_t *out_shift,
- const int32_t *out_mult,
- const int32_t out_offset,
- const int16_t activation_min,
- const int16_t activation_max,
- const uint16_t num_col_a,
- const int32_t *const output_bias,
- q7_t *out_0);
+q7_t *arm_nn_mat_mult_kernel_q7_q15(const q7_t *pA,
+ const q15_t *pInBuffer,
+ const uint16_t ch_im_out,
+ const uint16_t numCol_A,
+ const uint16_t bias_shift,
+ const uint16_t out_shift,
+ const q7_t *bias,
+ q7_t *pOut);
+/**
+* @brief Matrix-multiplication function for convolution with per-channel requantization.
+* @param[in] input_a pointer to operand A
+* @param[in] input_b pointer to operand B, always consists of 2 vectors.
+* @param[in] output_ch number of rows of A
+* @param[in] out_shift pointer to per output channel requantization shift parameter.
+* @param[in] out_mult pointer to per output channel requantization multiplier parameter.
+* @param[in] out_offset output tensor offset.
+* @param[in] activation_min minimum value to clamp the output to. Range : int8
+* @param[in] activation_max maximum value to clamp the output to. Range : int8
+* @param[in] num_col_a number of columns of A
+* @param[in] output_bias per output channel bias. Range : int32
+* @param[in,out] out_0 pointer to output
+* @return The function returns one of the two
+* 1. The incremented output pointer for a successful operation or
+* 2. NULL if implementation is not available.
+*
+* @details This function does the matrix multiplication of weight matrix for all output channels
+* with 2 columns from im2col and produces two elements/output_channel. The outputs are
+* clamped in the range provided by activation min and max.
+* Supported framework: TensorFlow Lite micro.
+*/
+q7_t *arm_nn_mat_mult_kernel_s8_s16(const q7_t *input_a,
+ const q15_t *input_b,
+ const uint16_t output_ch,
+ const int32_t *out_shift,
+ const int32_t *out_mult,
+ const int32_t out_offset,
+ const int16_t activation_min,
+ const int16_t activation_max,
+ const uint16_t num_col_a,
+ const int32_t *const output_bias,
+ q7_t *out_0);
- /**
- * @brief Matrix-multiplication of re-ordered input B with A.
- *
- * @details For arguments, refer arm_nn_mat_mult_kernel_s8_s16. The re-ordering is a consequence
- * of sign extension done by the SXTB16 command on input_b. The outputs are clamped in the range
- * provided by activation min and max.
- * * @details
- * - Supported framework : TensorFlow Lite Micro
- * - The following constrains on the arguments apply
- * -# num_col_a is a multiple of 4
- * -# output_ch is a multiple of 2
- *
- */
- q7_t *arm_nn_mat_mult_kernel_s8_s16_reordered(const q7_t *input_a,
- const q15_t *input_b,
- const uint16_t output_ch,
- const int32_t *out_shift,
- const int32_t *out_mult,
- const int32_t out_offset,
- const int16_t activation_min,
- const int16_t activation_max,
- const uint16_t num_col_a,
- const int32_t *const output_bias,
- q7_t *out_0);
+/**
+* @brief Matrix-multiplication of re-ordered input B with A.
+*
+* @details For arguments, refer arm_nn_mat_mult_kernel_s8_s16. The re-ordering is a consequence
+* of sign extension done by the SXTB16 command on input_b. The outputs are clamped in the range
+* provided by activation min and max.
+* * @details
+* - Supported framework : TensorFlow Lite Micro
+* - The following constrains on the arguments apply
+* -# num_col_a is a multiple of 4
+* -# output_ch is a multiple of 2
+*
+*/
+q7_t *arm_nn_mat_mult_kernel_s8_s16_reordered(const q7_t *input_a,
+ const q15_t *input_b,
+ const uint16_t output_ch,
+ const int32_t *out_shift,
+ const int32_t *out_mult,
+ const int32_t out_offset,
+ const int16_t activation_min,
+ const int16_t activation_max,
+ const uint16_t num_col_a,
+ const int32_t *const output_bias,
+ q7_t *out_0);
- /**
- * @brief Matrix-multiplication function for convolution with reordered columns
- * @param[in] pA pointer to operand A
- * @param[in] pInBuffer pointer to operand B, always conssists of 2 vectors
- * @param[in] ch_im_out numRow of A
- * @param[in] numCol_A numCol of A
- * @param[in] bias_shift amount of left-shift for bias
- * @param[in] out_shift amount of right-shift for output
- * @param[in] bias the bias
- * @param[in,out] pOut pointer to output
- * @return The function returns the incremented output pointer
- *
- * @details This function assumes that data in pInBuffer are reordered
- */
- q7_t *arm_nn_mat_mult_kernel_q7_q15_reordered(const q7_t * pA,
- const q15_t * pInBuffer,
- const uint16_t ch_im_out,
- const uint16_t numCol_A,
- const uint16_t bias_shift,
- const uint16_t out_shift,
- const q7_t * bias,
- q7_t * pOut);
+/**
+*@brief Matrix-multiplication function for convolution with reordered columns
+*@param[in] pA pointer to operand A
+*@param[in] pInBuffer pointer to operand B, always conssists of 2 vectors
+*@param[in] ch_im_out numRow of A
+*@param[in] numCol_A numCol of A
+*@param[in] bias_shift amount of left-shift for bias
+*@param[in] out_shift amount of right-shift for output
+*@param[in] bias the bias
+*@param[in,out] pOut pointer to output
+*@return The function returns the incremented output pointer
+*
+*@details This function assumes that data in pInBuffer are reordered
+*/
+q7_t *arm_nn_mat_mult_kernel_q7_q15_reordered(const q7_t *pA,
+ const q15_t *pInBuffer,
+ const uint16_t ch_im_out,
+ const uint16_t numCol_A,
+ const uint16_t bias_shift,
+ const uint16_t out_shift,
+ const q7_t *bias,
+ q7_t *pOut);
#ifdef __cplusplus
}
@@ -1455,8 +1458,7 @@
*/
#ifdef __cplusplus
-extern "C"
-{
+extern "C" {
#endif
/**
@@ -1486,22 +1488,22 @@
* @param[in] block_size number of samples
* @return The function returns ARM_MATH_SUCCESS
*/
- arm_status arm_elementwise_add_s8(const int8_t *input_1_vect,
- const int8_t *input_2_vect,
- const int32_t input_1_offset,
- const int32_t input_1_mult,
- const int32_t input_1_shift,
- const int32_t input_2_offset,
- const int32_t input_2_mult,
- const int32_t input_2_shift,
- const int32_t left_shift,
- int8_t *output,
- const int32_t out_offset,
- const int32_t out_mult,
- const int32_t out_shift,
- const int32_t out_activation_min,
- const int32_t out_activation_max,
- const uint32_t block_size);
+arm_status arm_elementwise_add_s8(const int8_t *input_1_vect,
+ const int8_t *input_2_vect,
+ const int32_t input_1_offset,
+ const int32_t input_1_mult,
+ const int32_t input_1_shift,
+ const int32_t input_2_offset,
+ const int32_t input_2_mult,
+ const int32_t input_2_shift,
+ const int32_t left_shift,
+ int8_t *output,
+ const int32_t out_offset,
+ const int32_t out_mult,
+ const int32_t out_shift,
+ const int32_t out_activation_min,
+ const int32_t out_activation_max,
+ const uint32_t block_size);
/**
* @brief s8 element wise multiplication
@@ -1520,17 +1522,17 @@
*
* @details Supported framework: TensorFlow Lite micro
*/
- arm_status arm_elementwise_mul_s8(const int8_t *input_1_vect,
- const int8_t *input_2_vect,
- const int32_t input_1_offset,
- const int32_t input_2_offset,
- int8_t *output,
- const int32_t out_offset,
- const int32_t out_mult,
- const int32_t out_shift,
- const int32_t out_activation_min,
- const int32_t out_activation_max,
- const uint32_t block_size);
+arm_status arm_elementwise_mul_s8(const int8_t *input_1_vect,
+ const int8_t *input_2_vect,
+ const int32_t input_1_offset,
+ const int32_t input_2_offset,
+ int8_t *output,
+ const int32_t out_offset,
+ const int32_t out_mult,
+ const int32_t out_shift,
+ const int32_t out_activation_min,
+ const int32_t out_activation_max,
+ const uint32_t block_size);
/**
* @defgroup Acti Activation Functions
*
@@ -1539,63 +1541,61 @@
*
*/
- /**
- * @brief Q7 RELU function
- * @param[in,out] data pointer to input
- * @param[in] size number of elements
- * @return none.
- */
+/**
+ * @brief Q7 RELU function
+ * @param[in,out] data pointer to input
+ * @param[in] size number of elements
+ * @return none.
+ */
- void arm_relu_q7(q7_t *data, uint16_t size);
+void arm_relu_q7(q7_t *data, uint16_t size);
- /**
- * @brief s8 ReLU6 function
- * @param[in,out] data pointer to input
- * @param[in] size number of elements
- */
+/**
+ * @brief s8 ReLU6 function
+ * @param[in,out] data pointer to input
+ * @param[in] size number of elements
+ */
- void arm_relu6_s8(q7_t *data, uint16_t size);
+void arm_relu6_s8(q7_t *data, uint16_t size);
- /**
- * @brief Q15 RELU function
- * @param[in,out] data pointer to input
- * @param[in] size number of elements
- * @return none.
- */
+/**
+ * @brief Q15 RELU function
+ * @param[in,out] data pointer to input
+ * @param[in] size number of elements
+ * @return none.
+ */
- void arm_relu_q15(q15_t *data, uint16_t size);
+void arm_relu_q15(q15_t *data, uint16_t size);
- /**
- * @brief Q7 neural network activation function using direct table look-up
- * @param[in,out] data pointer to input
- * @param[in] size number of elements
- * @param[in] int_width bit-width of the integer part, assume to be smaller than 3
- * @param[in] type type of activation functions
- * @return none.
- */
+/**
+ * @brief Q7 neural network activation function using direct table look-up
+ * @param[in,out] data pointer to input
+ * @param[in] size number of elements
+ * @param[in] int_width bit-width of the integer part, assume to be smaller than 3
+ * @param[in] type type of activation functions
+ * @return none.
+ */
- void arm_nn_activations_direct_q7(q7_t * data, uint16_t size, uint16_t int_width,
- arm_nn_activation_type type);
+void arm_nn_activations_direct_q7(q7_t *data, uint16_t size, uint16_t int_width, arm_nn_activation_type type);
- /**
- * @brief Q15 neural network activation function using direct table look-up
- * @param[in,out] data pointer to input
- * @param[in] size number of elements
- * @param[in] int_width bit-width of the integer part, assume to be smaller than 3
- * @param[in] type type of activation functions
- * @return none.
- *
- * @details
- *
- * This is the direct table look-up approach.
- *
- * Assume here the integer part of the fixed-point is <= 3.
- * More than 3 just not making much sense, makes no difference with
- * saturation followed by any of these activation functions.
- */
+/**
+ * @brief Q15 neural network activation function using direct table look-up
+ * @param[in,out] data pointer to input
+ * @param[in] size number of elements
+ * @param[in] int_width bit-width of the integer part, assume to be smaller than 3
+ * @param[in] type type of activation functions
+ * @return none.
+ *
+ * @details
+ *
+ * This is the direct table look-up approach.
+ *
+ * Assume here the integer part of the fixed-point is <= 3.
+ * More than 3 just not making much sense, makes no difference with
+ * saturation followed by any of these activation functions.
+ */
- void arm_nn_activations_direct_q15(q15_t * data, uint16_t size, uint16_t int_width,
- arm_nn_activation_type type);
+void arm_nn_activations_direct_q15(q15_t *data, uint16_t size, uint16_t int_width, arm_nn_activation_type type);
/**
* @defgroup Pooling Pooling Functions
@@ -1604,129 +1604,128 @@
*
*/
- /**
- * @brief Q7 max pooling function
- * @param[in] Im_in pointer to input tensor
- * @param[in] dim_im_in input tensor dimension
- * @param[in] ch_im_in number of input tensor channels
- * @param[in] dim_kernel filter kernel size
- * @param[in] padding padding sizes
- * @param[in] stride convolution stride
- * @param[in] dim_im_out output tensor dimension
- * @param[in,out] bufferA pointer to buffer space for input
- * @param[in,out] Im_out pointer to output tensor
- * @return none.
- *
- */
+/**
+ * @brief Q7 max pooling function
+ * @param[in] Im_in pointer to input tensor
+ * @param[in] dim_im_in input tensor dimension
+ * @param[in] ch_im_in number of input tensor channels
+ * @param[in] dim_kernel filter kernel size
+ * @param[in] padding padding sizes
+ * @param[in] stride convolution stride
+ * @param[in] dim_im_out output tensor dimension
+ * @param[in,out] bufferA pointer to buffer space for input
+ * @param[in,out] Im_out pointer to output tensor
+ * @return none.
+ *
+ */
- void arm_maxpool_q7_HWC(q7_t * Im_in,
- const uint16_t dim_im_in,
- const uint16_t ch_im_in,
- const uint16_t dim_kernel,
- const uint16_t padding,
- const uint16_t stride,
- const uint16_t dim_im_out,
- q7_t * bufferA,
- q7_t * Im_out);
+void arm_maxpool_q7_HWC(q7_t *Im_in,
+ const uint16_t dim_im_in,
+ const uint16_t ch_im_in,
+ const uint16_t dim_kernel,
+ const uint16_t padding,
+ const uint16_t stride,
+ const uint16_t dim_im_out,
+ q7_t *bufferA,
+ q7_t *Im_out);
- /**
- * @brief Q7 average pooling function
- * @param[in] Im_in pointer to input tensor
- * @param[in] dim_im_in input tensor dimension
- * @param[in] ch_im_in number of input tensor channels
- * @param[in] dim_kernel filter kernel size
- * @param[in] padding padding sizes
- * @param[in] stride convolution stride
- * @param[in] dim_im_out output tensor dimension
- * @param[in,out] bufferA pointer to buffer space for input
- * @param[in,out] Im_out pointer to output tensor
- * @return none.
- *
- */
+/**
+ * @brief Q7 average pooling function
+ * @param[in] Im_in pointer to input tensor
+ * @param[in] dim_im_in input tensor dimension
+ * @param[in] ch_im_in number of input tensor channels
+ * @param[in] dim_kernel filter kernel size
+ * @param[in] padding padding sizes
+ * @param[in] stride convolution stride
+ * @param[in] dim_im_out output tensor dimension
+ * @param[in,out] bufferA pointer to buffer space for input
+ * @param[in,out] Im_out pointer to output tensor
+ * @return none.
+ *
+ */
- void arm_avepool_q7_HWC(q7_t * Im_in,
- const uint16_t dim_im_in,
- const uint16_t ch_im_in,
- const uint16_t dim_kernel,
- const uint16_t padding,
- const uint16_t stride,
- const uint16_t dim_im_out,
- q7_t * bufferA,
- q7_t * Im_out);
+void arm_avepool_q7_HWC(q7_t *Im_in,
+ const uint16_t dim_im_in,
+ const uint16_t ch_im_in,
+ const uint16_t dim_kernel,
+ const uint16_t padding,
+ const uint16_t stride,
+ const uint16_t dim_im_out,
+ q7_t *bufferA,
+ q7_t *Im_out);
- /**
- * @brief s8 average pooling function.
- *
- * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
- * definition file to see if an additional buffer is required.
- * Optional function {API}_get_buffer_size() provides the buffer
- * size if an additional buffer is required.
- * @param[in] pool_params Pooling parameters
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN]
- * Argument 'N' is not used.
- * @param[in] input_data Input (activation) data pointer. Data type: int8
- * @param[in] filter_dims Filter tensor dimensions. Format: [H, W]
- * Argument N and C are not used.
- * @param[in] output_dims Output tensor dimensions. Format: [H, W, C_OUT]
- * Argument N is not used.
- * C_OUT equals C_IN.
- * @param[in, out] output_data Output data pointer. Data type: int8
- * @return The function returns
- * <code>ARM_MATH_SUCCESS</code> - Successful operation
- *
- * @details
- * - Supported Framework: TensorFlow Lite
- *
- */
- arm_status arm_avgpool_s8(const cmsis_nn_context *ctx,
- const cmsis_nn_pool_params *pool_params,
- const cmsis_nn_dims *input_dims,
- const q7_t *input_data,
- const cmsis_nn_dims *filter_dims,
- const cmsis_nn_dims *output_dims,
- q7_t *output_data);
+/**
+* @brief s8 average pooling function.
+*
+* @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
+* definition file to see if an additional buffer is required.
+* Optional function {API}_get_buffer_size() provides the buffer
+* size if an additional buffer is required.
+* @param[in] pool_params Pooling parameters
+* @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN]
+* Argument 'N' is not used.
+* @param[in] input_data Input (activation) data pointer. Data type: int8
+* @param[in] filter_dims Filter tensor dimensions. Format: [H, W]
+* Argument N and C are not used.
+* @param[in] output_dims Output tensor dimensions. Format: [H, W, C_OUT]
+* Argument N is not used.
+* C_OUT equals C_IN.
+* @param[in, out] output_data Output data pointer. Data type: int8
+* @return The function returns
+* <code>ARM_MATH_SUCCESS</code> - Successful operation
+*
+* @details
+* - Supported Framework: TensorFlow Lite
+*
+*/
+arm_status arm_avgpool_s8(const cmsis_nn_context *ctx,
+ const cmsis_nn_pool_params *pool_params,
+ const cmsis_nn_dims *input_dims,
+ const q7_t *input_data,
+ const cmsis_nn_dims *filter_dims,
+ const cmsis_nn_dims *output_dims,
+ q7_t *output_data);
- /**
- * @brief Get the required buffer size for S8 average pooling function
- * @param[in] dim_dst_width output tensor dimension
- * @param[in] ch_src number of input tensor channels
- * @return The function returns required buffer size in bytes
- *
- */
- int32_t arm_avgpool_s8_get_buffer_size(const int dim_dst_width,
- const int ch_src);
+/**
+ * @brief Get the required buffer size for S8 average pooling function
+ * @param[in] dim_dst_width output tensor dimension
+ * @param[in] ch_src number of input tensor channels
+ * @return The function returns required buffer size in bytes
+ *
+ */
+int32_t arm_avgpool_s8_get_buffer_size(const int dim_dst_width, const int ch_src);
- /**
- * @brief s8 max pooling function.
- *
- * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
- * definition file to see if an additional buffer is required.
- * Optional function {API}_get_buffer_size() provides the buffer
- * size if an additional buffer is required.
- * @param[in] pool_params Pooling parameters
- * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN]
- * Argument 'N' is not used.
- * @param[in] input_data Input (activation) data pointer. Data type: int8
- * @param[in] filter_dims Filter tensor dimensions. Format: [H, W]
- * Argument N and C are not used.
- * @param[in] output_dims Output tensor dimensions. Format: [H, W, C_OUT]
- * Argument N is not used.
- * C_OUT equals C_IN.
- * @param[in, out] output_data Output data pointer. Data type: int8
- * @return The function returns
- * <code>ARM_MATH_SUCCESS</code> - Successful operation
- *
- * @details
- * - Supported Framework: TensorFlow Lite
- *
- */
- arm_status arm_max_pool_s8(const cmsis_nn_context *ctx,
- const cmsis_nn_pool_params *pool_params,
- const cmsis_nn_dims *input_dims,
- const q7_t *input_data,
- const cmsis_nn_dims *filter_dims,
- const cmsis_nn_dims *output_dims,
- q7_t *output_data);
+/**
+* @brief s8 max pooling function.
+*
+* @param[in, out] ctx Function context (e.g. temporary buffer). Check the function
+* definition file to see if an additional buffer is required.
+* Optional function {API}_get_buffer_size() provides the buffer
+* size if an additional buffer is required.
+* @param[in] pool_params Pooling parameters
+* @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN]
+* Argument 'N' is not used.
+* @param[in] input_data Input (activation) data pointer. Data type: int8
+* @param[in] filter_dims Filter tensor dimensions. Format: [H, W]
+* Argument N and C are not used.
+* @param[in] output_dims Output tensor dimensions. Format: [H, W, C_OUT]
+* Argument N is not used.
+* C_OUT equals C_IN.
+* @param[in, out] output_data Output data pointer. Data type: int8
+* @return The function returns
+* <code>ARM_MATH_SUCCESS</code> - Successful operation
+*
+* @details
+* - Supported Framework: TensorFlow Lite
+*
+*/
+arm_status arm_max_pool_s8(const cmsis_nn_context *ctx,
+ const cmsis_nn_pool_params *pool_params,
+ const cmsis_nn_dims *input_dims,
+ const q7_t *input_data,
+ const cmsis_nn_dims *filter_dims,
+ const cmsis_nn_dims *output_dims,
+ q7_t *output_data);
/**
* @defgroup Softmax Softmax Functions
*
@@ -1734,61 +1733,61 @@
*
*/
- /**
- * @brief Q7 softmax function
- * @param[in] vec_in pointer to input vector
- * @param[in] dim_vec input vector dimension
- * @param[out] p_out pointer to output vector
- *
- * @note This function is an optimized version which is not bit-accurate with
- * TensorFlow Lite's kernel
- *
- */
+/**
+ * @brief Q7 softmax function
+ * @param[in] vec_in pointer to input vector
+ * @param[in] dim_vec input vector dimension
+ * @param[out] p_out pointer to output vector
+ *
+ * @note This function is an optimized version which is not bit-accurate with
+ * TensorFlow Lite's kernel
+ *
+ */
-void arm_softmax_q7(const q7_t * vec_in, const uint16_t dim_vec, q7_t * p_out);
+void arm_softmax_q7(const q7_t *vec_in, const uint16_t dim_vec, q7_t *p_out);
- /**
- * @brief Q7 softmax function with batch parameter
- * @param[in] vec_in pointer to input vector
- * @param[in] nb_batches number of batches
- * @param[in] dim_vec input vector dimension
- * @param[out] p_out pointer to output vector
- * @return none.
- *
- * @note This function is an optimized version which is not bit-accurate with
- * TensorFlow Lite's kernel
- *
- */
+/**
+ * @brief Q7 softmax function with batch parameter
+ * @param[in] vec_in pointer to input vector
+ * @param[in] nb_batches number of batches
+ * @param[in] dim_vec input vector dimension
+ * @param[out] p_out pointer to output vector
+ * @return none.
+ *
+ * @note This function is an optimized version which is not bit-accurate with
+ * TensorFlow Lite's kernel
+ *
+ */
-void arm_softmax_with_batch_q7(const q7_t * vec_in, const uint16_t nb_batches,const uint16_t dim_vec, q7_t * p_out );
- /**
- * @brief Q15 softmax function
- * @param[in] vec_in pointer to input vector
- * @param[in] dim_vec input vector dimension
- * @param[out] p_out pointer to output vector
- * @return none.
- *
- * @note This function is an optimized version which is not bit-accurate with
- * TensorFlow Lite's kernel
- *
- */
+void arm_softmax_with_batch_q7(const q7_t *vec_in, const uint16_t nb_batches, const uint16_t dim_vec, q7_t *p_out);
+/**
+ * @brief Q15 softmax function
+ * @param[in] vec_in pointer to input vector
+ * @param[in] dim_vec input vector dimension
+ * @param[out] p_out pointer to output vector
+ * @return none.
+ *
+ * @note This function is an optimized version which is not bit-accurate with
+ * TensorFlow Lite's kernel
+ *
+ */
-void arm_softmax_q15(const q15_t * vec_in, const uint16_t dim_vec, q15_t * p_out);
+void arm_softmax_q15(const q15_t *vec_in, const uint16_t dim_vec, q15_t *p_out);
- /**
- * @brief S8 softmax function
- * @param[in] input Pointer to the input tensor
- * @param[in] num_rows Number of rows in the input tensor
- * @param[in] row_size Number of elements in each input row
- * @param[in] mult Input quantization multiplier
- * @param[in] shift Input quantization shift within the range [0, 31]
- * @param[in] diff_min Minimum difference with max in row. Used to check if
- * the quantized exponential operation can be performed
- * @param[out] output Pointer to the output tensor
- *
- * @note Supported framework: TensorFlow Lite micro (bit-accurate)
- *
- */
+/**
+ * @brief S8 softmax function
+ * @param[in] input Pointer to the input tensor
+ * @param[in] num_rows Number of rows in the input tensor
+ * @param[in] row_size Number of elements in each input row
+ * @param[in] mult Input quantization multiplier
+ * @param[in] shift Input quantization shift within the range [0, 31]
+ * @param[in] diff_min Minimum difference with max in row. Used to check if
+ * the quantized exponential operation can be performed
+ * @param[out] output Pointer to the output tensor
+ *
+ * @note Supported framework: TensorFlow Lite micro (bit-accurate)
+ *
+ */
void arm_softmax_s8(const int8_t *input,
const int32_t num_rows,
@@ -1798,20 +1797,20 @@
const int32_t diff_min,
int8_t *output);
- /**
- * @brief U8 softmax function
- * @param[in] input Pointer to the input tensor
- * @param[in] num_rows Number of rows in the input tensor
- * @param[in] row_size Number of elements in each input row
- * @param[in] mult Input quantization multiplier
- * @param[in] shift Input quantization shift within the range [0, 31]
- * @param[in] diff_min Minimum difference with max in row. Used to check if
- * the quantized exponential operation can be performed
- * @param[out] output Pointer to the output tensor
- *
- * @note Supported framework: TensorFlow Lite micro (bit-accurate)
- *
- */
+/**
+ * @brief U8 softmax function
+ * @param[in] input Pointer to the input tensor
+ * @param[in] num_rows Number of rows in the input tensor
+ * @param[in] row_size Number of elements in each input row
+ * @param[in] mult Input quantization multiplier
+ * @param[in] shift Input quantization shift within the range [0, 31]
+ * @param[in] diff_min Minimum difference with max in row. Used to check if
+ * the quantized exponential operation can be performed
+ * @param[out] output Pointer to the output tensor
+ *
+ * @note Supported framework: TensorFlow Lite micro (bit-accurate)
+ *
+ */
void arm_softmax_u8(const uint8_t *input,
const int32_t num_rows,
@@ -1821,313 +1820,313 @@
const int32_t diff_min,
uint8_t *output);
- /**
- * @brief uint8 depthwise convolution function with asymmetric quantization
- * Unless specified otherwise, arguments are mandatory.
- *
- * @param[in] input Pointer to input tensor
- * @param[in] input_x Width of input tensor
- * @param[in] input_y Height of input tensor
- * @param[in] input_ch Channels in input tensor
- * @param[in] kernel Pointer to kernel weights
- * @param[in] kernel_x Width of kernel
- * @param[in] kernel_y Height of kernel
- * @param[in] ch_mult Number of channel multiplier
- * @param[in] pad_x Padding sizes x
- * @param[in] pad_y Padding sizes y
- * @param[in] stride_x stride along the width
- * @param[in] stride_y stride along the height
- * @param[in] dilation_x Dilation along width. Not used and intended for future enhancement.
- * @param[in] dilation_y Dilation along height. Not used and intended for future enhancement.
- * @param[in] bias Pointer to optional bias values. If no bias is
- * availble, NULL is expected
- * @param[in] input_offset Input tensor zero offset
- * @param[in] filter_offset Kernel tensor zero offset
- * @param[in] output_offset Output tensor zero offset
- * @param[in,out] output Pointer to output tensor
- * @param[in] output_x Width of output tensor
- * @param[in] output_y Height of output tensor
- * @param[in] output_activation_min Minimum value to clamp the output to. Range : {0, 255}
- * @param[in] output_activation_max Minimum value to clamp the output to. Range : {0, 255}
- * @param[in] out_shift Amount of right-shift for output
- * @param[in] out_mult Output multiplier for requantization
- * @return The function returns the following
- * <code>ARM_MATH_SUCCESS</code> - Successful operation
- *
- */
- arm_status arm_depthwise_conv_u8_basic_ver1(const uint8_t *input,
- const uint16_t input_x,
- const uint16_t input_y,
- const uint16_t input_ch,
- const uint8_t *kernel,
- const uint16_t kernel_x,
- const uint16_t kernel_y,
- const int16_t ch_mult,
- const int16_t pad_x,
- const int16_t pad_y,
- const int16_t stride_x,
- const int16_t stride_y,
- const int16_t dilation_x,
- const int16_t dilation_y,
- const int32_t *bias,
- const int32_t input_offset,
- const int32_t filter_offset,
- const int32_t output_offset,
- uint8_t *output,
- const uint16_t output_x,
- const uint16_t output_y,
- const int32_t output_activation_min,
- const int32_t output_activation_max,
- const int32_t out_shift,
- const int32_t out_mult);
+/**
+ * @brief uint8 depthwise convolution function with asymmetric quantization
+ * Unless specified otherwise, arguments are mandatory.
+ *
+ * @param[in] input Pointer to input tensor
+ * @param[in] input_x Width of input tensor
+ * @param[in] input_y Height of input tensor
+ * @param[in] input_ch Channels in input tensor
+ * @param[in] kernel Pointer to kernel weights
+ * @param[in] kernel_x Width of kernel
+ * @param[in] kernel_y Height of kernel
+ * @param[in] ch_mult Number of channel multiplier
+ * @param[in] pad_x Padding sizes x
+ * @param[in] pad_y Padding sizes y
+ * @param[in] stride_x stride along the width
+ * @param[in] stride_y stride along the height
+ * @param[in] dilation_x Dilation along width. Not used and intended for future enhancement.
+ * @param[in] dilation_y Dilation along height. Not used and intended for future enhancement.
+ * @param[in] bias Pointer to optional bias values. If no bias is
+ * availble, NULL is expected
+ * @param[in] input_offset Input tensor zero offset
+ * @param[in] filter_offset Kernel tensor zero offset
+ * @param[in] output_offset Output tensor zero offset
+ * @param[in,out] output Pointer to output tensor
+ * @param[in] output_x Width of output tensor
+ * @param[in] output_y Height of output tensor
+ * @param[in] output_activation_min Minimum value to clamp the output to. Range : {0, 255}
+ * @param[in] output_activation_max Minimum value to clamp the output to. Range : {0, 255}
+ * @param[in] out_shift Amount of right-shift for output
+ * @param[in] out_mult Output multiplier for requantization
+ * @return The function returns the following
+ * <code>ARM_MATH_SUCCESS</code> - Successful operation
+ *
+ */
+arm_status arm_depthwise_conv_u8_basic_ver1(const uint8_t *input,
+ const uint16_t input_x,
+ const uint16_t input_y,
+ const uint16_t input_ch,
+ const uint8_t *kernel,
+ const uint16_t kernel_x,
+ const uint16_t kernel_y,
+ const int16_t ch_mult,
+ const int16_t pad_x,
+ const int16_t pad_y,
+ const int16_t stride_x,
+ const int16_t stride_y,
+ const int16_t dilation_x,
+ const int16_t dilation_y,
+ const int32_t *bias,
+ const int32_t input_offset,
+ const int32_t filter_offset,
+ const int32_t output_offset,
+ uint8_t *output,
+ const uint16_t output_x,
+ const uint16_t output_y,
+ const int32_t output_activation_min,
+ const int32_t output_activation_max,
+ const int32_t out_shift,
+ const int32_t out_mult);
/**
* @defgroup Reshape Reshape Functions
*
*/
- /**
- * @brief Reshape a s8 vector into another with different shape
- * @param[in] input points to the s8 input vector
- * @param[out] output points to the s8 output vector
- * @param[in] total_size total size of the input and output vectors in bytes
- *
- * @note The output is expected to be in a memory area that does not overlap with the input's
- *
- */
- void arm_reshape_s8(const int8_t *input,
- int8_t *output,
- const uint32_t total_size);
+/**
+ * @brief Reshape a s8 vector into another with different shape
+ * @param[in] input points to the s8 input vector
+ * @param[out] output points to the s8 output vector
+ * @param[in] total_size total size of the input and output vectors in bytes
+ *
+ * @note The output is expected to be in a memory area that does not overlap with the input's
+ *
+ */
+void arm_reshape_s8(const int8_t *input, int8_t *output, const uint32_t total_size);
/**
* @defgroup Concatenation Concatenation Functions
*
*/
- /**
- * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the X axis
- * This function should be called for each input tensor to concatenate. The argument offset_x
- * will be used to store the input tensor in the correct position in the output tensor
- *
- * i.e. offset_x = 0
- * for(i = 0 i < num_input_tensors; ++i)
- * {
- * arm_concatenation_s8_x(&input[i], ..., &output, ..., ..., offset_x)
- * offset_x += input_x[i]
- * }
- *
- * This function assumes that the output tensor has:
- * -# The same height of the input tensor
- * -# The same number of channels of the input tensor
- * -# The same batch size of the input tensor
- *
- * Unless specified otherwise, arguments are mandatory.
- *
- * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because does not involve any arithmetic operation
- *
- * @param[in] input Pointer to input tensor
- * @param[in] input_x Width of input tensor
- * @param[in] input_y Height of input tensor
- * @param[in] input_z Channels in input tensor
- * @param[in] input_w Batch size in input tensor
- * @param[out] output Pointer to output tensor
- * @param[in] output_x Width of output tensor
- * @param[in] offset_x The offset (in number of elements) on the X axis to start concatenating the input tensor
- * It is user responsibility to provide the correct value
- *
- * <b> Input constraints</b>
- * offset_x is less than output_x
- *
- */
- void arm_concatenation_s8_x(const int8_t *input,
- const uint16_t input_x,
- const uint16_t input_y,
- const uint16_t input_z,
- const uint16_t input_w,
- int8_t *output,
- const uint16_t output_x,
- const uint32_t offset_x);
+/**
+ * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the X axis
+ * This function should be called for each input tensor to concatenate. The argument offset_x
+ * will be used to store the input tensor in the correct position in the output tensor
+ *
+ * i.e. offset_x = 0
+ * for(i = 0 i < num_input_tensors; ++i)
+ * {
+ * arm_concatenation_s8_x(&input[i], ..., &output, ..., ..., offset_x)
+ * offset_x += input_x[i]
+ * }
+ *
+ * This function assumes that the output tensor has:
+ * -# The same height of the input tensor
+ * -# The same number of channels of the input tensor
+ * -# The same batch size of the input tensor
+ *
+ * Unless specified otherwise, arguments are mandatory.
+ *
+ * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because it
+ * does not involve any arithmetic operation
+ *
+ * @param[in] input Pointer to input tensor
+ * @param[in] input_x Width of input tensor
+ * @param[in] input_y Height of input tensor
+ * @param[in] input_z Channels in input tensor
+ * @param[in] input_w Batch size in input tensor
+ * @param[out] output Pointer to output tensor
+ * @param[in] output_x Width of output tensor
+ * @param[in] offset_x The offset (in number of elements) on the X axis to start concatenating the input tensor
+ * It is user responsibility to provide the correct value
+ *
+ * <b> Input constraints</b>
+ * offset_x is less than output_x
+ *
+ */
+void arm_concatenation_s8_x(const int8_t *input,
+ const uint16_t input_x,
+ const uint16_t input_y,
+ const uint16_t input_z,
+ const uint16_t input_w,
+ int8_t *output,
+ const uint16_t output_x,
+ const uint32_t offset_x);
- /**
- * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the Y axis
- * This function should be called for each input tensor to concatenate. The argument offset_y
- * will be used to store the input tensor in the correct position in the output tensor
- *
- * i.e. offset_y = 0
- * for(i = 0 i < num_input_tensors; ++i)
- * {
- * arm_concatenation_s8_y(&input[i], ..., &output, ..., ..., offset_y)
- * offset_y += input_y[i]
- * }
- *
- * This function assumes that the output tensor has:
- * -# The same width of the input tensor
- * -# The same number of channels of the input tensor
- * -# The same batch size of the input tensor
- *
- * Unless specified otherwise, arguments are mandatory.
- *
- * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because does not involve any arithmetic operation
- *
- * @param[in] input Pointer to input tensor
- * @param[in] input_x Width of input tensor
- * @param[in] input_y Height of input tensor
- * @param[in] input_z Channels in input tensor
- * @param[in] input_w Batch size in input tensor
- * @param[out] output Pointer to output tensor
- * @param[in] output_y Height of output tensor
- * @param[in] offset_y The offset on the Y axis to start concatenating the input tensor
- * It is user responsibility to provide the correct value
- *
- * <b> Input constraints</b>
- * offset_y is less than output_y
- *
- */
- void arm_concatenation_s8_y(const int8_t *input,
- const uint16_t input_x,
- const uint16_t input_y,
- const uint16_t input_z,
- const uint16_t input_w,
- int8_t *output,
- const uint16_t output_y,
- const uint32_t offset_y);
+/**
+ * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the Y axis
+ * This function should be called for each input tensor to concatenate. The argument offset_y
+ * will be used to store the input tensor in the correct position in the output tensor
+ *
+ * i.e. offset_y = 0
+ * for(i = 0 i < num_input_tensors; ++i)
+ * {
+ * arm_concatenation_s8_y(&input[i], ..., &output, ..., ..., offset_y)
+ * offset_y += input_y[i]
+ * }
+ *
+ * This function assumes that the output tensor has:
+ * -# The same width of the input tensor
+ * -# The same number of channels of the input tensor
+ * -# The same batch size of the input tensor
+ *
+ * Unless specified otherwise, arguments are mandatory.
+ *
+ * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because it
+ * does not involve any arithmetic operation
+ *
+ * @param[in] input Pointer to input tensor
+ * @param[in] input_x Width of input tensor
+ * @param[in] input_y Height of input tensor
+ * @param[in] input_z Channels in input tensor
+ * @param[in] input_w Batch size in input tensor
+ * @param[out] output Pointer to output tensor
+ * @param[in] output_y Height of output tensor
+ * @param[in] offset_y The offset on the Y axis to start concatenating the input tensor
+ * It is user responsibility to provide the correct value
+ *
+ * <b> Input constraints</b>
+ * offset_y is less than output_y
+ *
+ */
+void arm_concatenation_s8_y(const int8_t *input,
+ const uint16_t input_x,
+ const uint16_t input_y,
+ const uint16_t input_z,
+ const uint16_t input_w,
+ int8_t *output,
+ const uint16_t output_y,
+ const uint32_t offset_y);
- /**
- * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the Z axis
- * This function should be called for each input tensor to concatenate. The argument offset_z
- * will be used to store the input tensor in the correct position in the output tensor
- *
- * i.e. offset_z = 0
- * for(i = 0 i < num_input_tensors; ++i)
- * {
- * arm_concatenation_s8_z(&input[i], ..., &output, ..., ..., offset_z)
- * offset_z += input_z[i]
- * }
- *
- * This function assumes that the output tensor has:
- * -# The same width of the input tensor
- * -# The same height of the input tensor
- * -# The same batch size of the input tensor
- *
- * Unless specified otherwise, arguments are mandatory.
- *
- * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because does not involve any arithmetic operation
- *
- * @param[in] input Pointer to input tensor
- * @param[in] input_x Width of input tensor
- * @param[in] input_y Height of input tensor
- * @param[in] input_z Channels in input tensor
- * @param[in] input_w Batch size in input tensor
- * @param[out] output Pointer to output tensor
- * @param[in] output_z Channels in output tensor
- * @param[in] offset_z The offset on the Z axis to start concatenating the input tensor
- * It is user responsibility to provide the correct value
- *
- * <b> Input constraints</b>
- * offset_z is less than output_z
- *
- */
- void arm_concatenation_s8_z(const int8_t *input,
- const uint16_t input_x,
- const uint16_t input_y,
- const uint16_t input_z,
- const uint16_t input_w,
- int8_t *output,
- const uint16_t output_z,
- const uint32_t offset_z);
+/**
+ * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the Z axis
+ * This function should be called for each input tensor to concatenate. The argument offset_z
+ * will be used to store the input tensor in the correct position in the output tensor
+ *
+ * i.e. offset_z = 0
+ * for(i = 0 i < num_input_tensors; ++i)
+ * {
+ * arm_concatenation_s8_z(&input[i], ..., &output, ..., ..., offset_z)
+ * offset_z += input_z[i]
+ * }
+ *
+ * This function assumes that the output tensor has:
+ * -# The same width of the input tensor
+ * -# The same height of the input tensor
+ * -# The same batch size of the input tensor
+ *
+ * Unless specified otherwise, arguments are mandatory.
+ *
+ * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because it
+ * does not involve any arithmetic operation
+ *
+ * @param[in] input Pointer to input tensor
+ * @param[in] input_x Width of input tensor
+ * @param[in] input_y Height of input tensor
+ * @param[in] input_z Channels in input tensor
+ * @param[in] input_w Batch size in input tensor
+ * @param[out] output Pointer to output tensor
+ * @param[in] output_z Channels in output tensor
+ * @param[in] offset_z The offset on the Z axis to start concatenating the input tensor
+ * It is user responsibility to provide the correct value
+ *
+ * <b> Input constraints</b>
+ * offset_z is less than output_z
+ *
+ */
+void arm_concatenation_s8_z(const int8_t *input,
+ const uint16_t input_x,
+ const uint16_t input_y,
+ const uint16_t input_z,
+ const uint16_t input_w,
+ int8_t *output,
+ const uint16_t output_z,
+ const uint32_t offset_z);
- /**
- * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the W axis (Batch size)
- * This function should be called for each input tensor to concatenate. The argument offset_w
- * will be used to store the input tensor in the correct position in the output tensor
- *
- * i.e. offset_w = 0
- * for(i = 0 i < num_input_tensors; ++i)
- * {
- * arm_concatenation_s8_w(&input[i], ..., &output, ..., ..., offset_w)
- * offset_w += input_w[i]
- * }
- *
- * This function assumes that the output tensor has:
- * -# The same width of the input tensor
- * -# The same height of the input tensor
- * -# The same number o channels of the input tensor
- *
- * Unless specified otherwise, arguments are mandatory.
- *
- * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because does not involve any arithmetic operation
- *
- * @param[in] input Pointer to input tensor
- * @param[in] input_x Width of input tensor
- * @param[in] input_y Height of input tensor
- * @param[in] input_z Channels in input tensor
- * @param[in] input_w Batch size in input tensor
- * @param[out] output Pointer to output tensor
- * @param[in] offset_w The offset on the W axis to start concatenating the input tensor
- * It is user responsibility to provide the correct value
- *
- */
- void arm_concatenation_s8_w(const int8_t *input,
- const uint16_t input_x,
- const uint16_t input_y,
- const uint16_t input_z,
- const uint16_t input_w,
- int8_t *output,
- const uint32_t offset_w);
+/**
+ * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the W axis (Batch size)
+ * This function should be called for each input tensor to concatenate. The argument offset_w
+ * will be used to store the input tensor in the correct position in the output tensor
+ *
+ * i.e. offset_w = 0
+ * for(i = 0 i < num_input_tensors; ++i)
+ * {
+ * arm_concatenation_s8_w(&input[i], ..., &output, ..., ..., offset_w)
+ * offset_w += input_w[i]
+ * }
+ *
+ * This function assumes that the output tensor has:
+ * -# The same width of the input tensor
+ * -# The same height of the input tensor
+ * -# The same number o channels of the input tensor
+ *
+ * Unless specified otherwise, arguments are mandatory.
+ *
+ * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because it
+ * does not involve any arithmetic operation
+ *
+ * @param[in] input Pointer to input tensor
+ * @param[in] input_x Width of input tensor
+ * @param[in] input_y Height of input tensor
+ * @param[in] input_z Channels in input tensor
+ * @param[in] input_w Batch size in input tensor
+ * @param[out] output Pointer to output tensor
+ * @param[in] offset_w The offset on the W axis to start concatenating the input tensor
+ * It is user responsibility to provide the correct value
+ *
+ */
+void arm_concatenation_s8_w(const int8_t *input,
+ const uint16_t input_x,
+ const uint16_t input_y,
+ const uint16_t input_z,
+ const uint16_t input_w,
+ int8_t *output,
+ const uint32_t offset_w);
/**
* @defgroup SVDF SVDF Layer Functions
*
*/
- /**
- * @brief s8 SVDF function
- *
- * @param[in] input_ctx Temporary scratch buffer
- * @param[in] output_ctx Temporary output scratch buffer
- * @param[in] svdf_params SVDF Parameters
- * Range of svdf_params->input_offset : [-128, 127]
- * Range of svdf_params->output_offset : [-128, 127]
- * @param[in] input_quant_params Input quantization parameters
- * @param[in] output_quant_params Output quantization parameters
- * @param[in] input_dims Input tensor dimensions
- * @param[in] input_data Pointer to input tensor
- * @param[in] state_dims State tensor dimensions
- * @param[in] state_data Pointer to state tensor
- * @param[in] weights_feature_dims Weights (feature) tensor dimensions
- * @param[in] weights_feature_data Pointer to the weights (feature) tensor
- * @param[in] weights_time_dims Weights (time) tensor dimensions
- * @param[in] weights_time_data Pointer to the weights (time) tensor
- * @param[in] bias_dims Bias tensor dimensions
- * @param[in] bias_data Pointer to bias tensor
- * @param[in] output_dims Output tensor dimensions
- * @param[out] output_data Pointer to the output tensor
- *
- * @return The function returns <code>ARM_MATH_SUCCESS</code>
- *
- * @details
- * 1. Supported framework: TensorFlow Lite micro
- * 2. q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs.
- *
- */
- arm_status
- arm_svdf_s8(const cmsis_nn_context *input_ctx,
- const cmsis_nn_context *output_ctx,
- const cmsis_nn_svdf_params *svdf_params,
- const cmsis_nn_per_tensor_quant_params *input_quant_params,
- const cmsis_nn_per_tensor_quant_params *output_quant_params,
- const cmsis_nn_dims *input_dims,
- const q7_t *input_data,
- const cmsis_nn_dims *state_dims,
- q15_t *state_data,
- const cmsis_nn_dims *weights_feature_dims,
- const q7_t *weights_feature_data,
- const cmsis_nn_dims *weights_time_dims,
- const q15_t *weights_time_data,
- const cmsis_nn_dims *bias_dims,
- const q31_t *bias_data,
- const cmsis_nn_dims *output_dims,
- q7_t *output_data);
-
+/**
+ * @brief s8 SVDF function
+ *
+ * @param[in] input_ctx Temporary scratch buffer
+ * @param[in] output_ctx Temporary output scratch buffer
+ * @param[in] svdf_params SVDF Parameters
+ * Range of svdf_params->input_offset : [-128, 127]
+ * Range of svdf_params->output_offset : [-128, 127]
+ * @param[in] input_quant_params Input quantization parameters
+ * @param[in] output_quant_params Output quantization parameters
+ * @param[in] input_dims Input tensor dimensions
+ * @param[in] input_data Pointer to input tensor
+ * @param[in] state_dims State tensor dimensions
+ * @param[in] state_data Pointer to state tensor
+ * @param[in] weights_feature_dims Weights (feature) tensor dimensions
+ * @param[in] weights_feature_data Pointer to the weights (feature) tensor
+ * @param[in] weights_time_dims Weights (time) tensor dimensions
+ * @param[in] weights_time_data Pointer to the weights (time) tensor
+ * @param[in] bias_dims Bias tensor dimensions
+ * @param[in] bias_data Pointer to bias tensor
+ * @param[in] output_dims Output tensor dimensions
+ * @param[out] output_data Pointer to the output tensor
+ *
+ * @return The function returns <code>ARM_MATH_SUCCESS</code>
+ *
+ * @details
+ * 1. Supported framework: TensorFlow Lite micro
+ * 2. q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs.
+ *
+ */
+arm_status arm_svdf_s8(const cmsis_nn_context *input_ctx,
+ const cmsis_nn_context *output_ctx,
+ const cmsis_nn_svdf_params *svdf_params,
+ const cmsis_nn_per_tensor_quant_params *input_quant_params,
+ const cmsis_nn_per_tensor_quant_params *output_quant_params,
+ const cmsis_nn_dims *input_dims,
+ const q7_t *input_data,
+ const cmsis_nn_dims *state_dims,
+ q15_t *state_data,
+ const cmsis_nn_dims *weights_feature_dims,
+ const q7_t *weights_feature_data,
+ const cmsis_nn_dims *weights_time_dims,
+ const q15_t *weights_time_data,
+ const cmsis_nn_dims *bias_dims,
+ const q31_t *bias_data,
+ const cmsis_nn_dims *output_dims,
+ q7_t *output_data);
#ifdef __cplusplus
}
diff --git a/CMSIS/NN/Include/arm_nnsupportfunctions.h b/CMSIS/NN/Include/arm_nnsupportfunctions.h
index 44a677b..0e8fbd3 100644
--- a/CMSIS/NN/Include/arm_nnsupportfunctions.h
+++ b/CMSIS/NN/Include/arm_nnsupportfunctions.h
@@ -30,35 +30,33 @@
#ifndef _ARM_NNSUPPORTFUNCTIONS_H_
#define _ARM_NNSUPPORTFUNCTIONS_H_
-#include "arm_math_types.h"
#include "arm_common_tables.h"
+#include "arm_math_types.h"
#ifdef __cplusplus
-extern "C"
-{
+extern "C" {
#endif
-#define LEFT_SHIFT(_shift) (_shift > 0 ? _shift : 0)
+#define LEFT_SHIFT(_shift) (_shift > 0 ? _shift : 0)
#define RIGHT_SHIFT(_shift) (_shift > 0 ? 0 : -_shift)
-#define MASK_IF_ZERO(x) (x) == 0 ? ~0 : 0
+#define MASK_IF_ZERO(x) (x) == 0 ? ~0 : 0
#define MASK_IF_NON_ZERO(x) (x) != 0 ? ~0 : 0
#define SELECT_USING_MASK(mask, a, b) ((mask) & (a)) ^ (~(mask) & (b))
-#define MAX(A,B) ((A) > (B) ? (A) : (B))
-#define MIN(A,B) ((A) < (B) ? (A) : (B))
+#define MAX(A, B) ((A) > (B) ? (A) : (B))
+#define MIN(A, B) ((A) < (B) ? (A) : (B))
#define CLAMP(x, h, l) MAX(MIN((x), (h)), (l))
/**
* @brief Union for SIMD access of q31/q15/q7 types
*/
-union arm_nnword
-{
- q31_t word;
- /**< q31 type */
- q15_t half_words[2];
- /**< q15 type */
- q7_t bytes[4];
- /**< q7 type */
+union arm_nnword {
+ q31_t word;
+ /**< q31 type */
+ q15_t half_words[2];
+ /**< q15 type */
+ q7_t bytes[4];
+ /**< q7 type */
};
/**
@@ -66,14 +64,13 @@
*/
struct arm_nn_double
{
- uint32_t low;
- int32_t high;
+ uint32_t low;
+ int32_t high;
};
-union arm_nn_long_long
-{
- int64_t long_long;
- struct arm_nn_double word;
+union arm_nn_long_long {
+ int64_t long_long;
+ struct arm_nn_double word;
};
/**
@@ -118,7 +115,7 @@
* @return none.
*
*/
-void arm_q7_to_q15_reordered_no_shift(const q7_t * pSrc, q15_t * pDst, uint32_t blockSize);
+void arm_q7_to_q15_reordered_no_shift(const q7_t *pSrc, q15_t *pDst, uint32_t blockSize);
/**
* @brief Converts the elements from a q7 vector to a q15 vector with an added offset
@@ -300,11 +297,13 @@
*
* @param[in] lhs Pointer to the LHS input matrix
* @param[in] rhs Pointer to the RHS input matrix
-* @param[in] bias Pointer to the bias vector. The length of this vector is equal to the number of output columns (or RHS input rows)
+* @param[in] bias Pointer to the bias vector. The length of this vector is equal to the number of output
+* columns (or RHS input rows)
* @param[out] dst Pointer to the output matrix with "m" rows and "n" columns
-* @param[in] dst_multipliers Pointer to the multipliers vector needed for the per-channel requantization. The length of this vector is equal to
-* the number of output columns (or RHS input rows)
-* @param[in] dst_shifts Pointer to the shifts vector needed for the per-channel requantization. The length of this vector is equal to
+* @param[in] dst_multipliers Pointer to the multipliers vector needed for the per-channel requantization.
+* The length of this vector is equal to the number of output columns (or RHS input rows)
+* @param[in] dst_shifts Pointer to the shifts vector needed for the per-channel requantization. The length of
+* this vector is equal to
* the number of output columns (or RHS input rows)
* @param[in] lhs_rows Number of LHS input rows
* @param[in] rhs_rows Number of RHS input rows
@@ -338,8 +337,10 @@
* @param[in] rhs Input right-hand side matrix (transposed)
* @param[in] bias Input bias
* @param[out] dst Output vector
- * @param[in] lhs_offset Offset to be added to the input values of the left-hand side vector. Range: -127 to 128
- * @param[in] rhs_offset Offset to be added to the input values of the right-hand side matrix. Range: -127 to 128
+ * @param[in] lhs_offset Offset to be added to the input values of the left-hand side vector.
+ * Range: -127 to 128
+ * @param[in] rhs_offset Offset to be added to the input values of the right-hand side matrix.
+ * Range: -127 to 128
* @param[in] dst_offset Offset to be added to the output values. Range: -127 to 128
* @param[in] dst_multiplier Output multiplier
* @param[in] dst_shift Output shift
@@ -454,12 +455,12 @@
*/
__STATIC_FORCEINLINE q31_t arm_nn_read_q15x2_ia(const q15_t **in_q15)
{
- q31_t val;
+ q31_t val;
- memcpy(&val, *in_q15, 4);
- *in_q15 += 2;
+ memcpy(&val, *in_q15, 4);
+ *in_q15 += 2;
- return (val);
+ return (val);
}
/**
@@ -469,11 +470,11 @@
*/
__STATIC_FORCEINLINE q31_t arm_nn_read_q7x4_ia(const q7_t **in_q7)
{
- q31_t val;
- memcpy(&val, *in_q7, 4);
- *in_q7 += 4;
+ q31_t val;
+ memcpy(&val, *in_q7, 4);
+ *in_q7 += 4;
- return (val);
+ return (val);
}
/**
@@ -483,10 +484,10 @@
*/
__STATIC_FORCEINLINE q31_t arm_nn_read_q15x2(const q15_t *in_q15)
{
- q31_t val;
- memcpy(&val, in_q15, 4);
+ q31_t val;
+ memcpy(&val, in_q15, 4);
- return (val);
+ return (val);
}
/**
@@ -496,10 +497,10 @@
*/
__STATIC_FORCEINLINE q31_t arm_nn_read_q7x4(const q7_t *in_q7)
{
- q31_t val;
- memcpy(&val, in_q7, 4);
+ q31_t val;
+ memcpy(&val, in_q7, 4);
- return (val);
+ return (val);
}
/**
@@ -509,91 +510,87 @@
* @param[in] block_size Number of bytes to copy.
*
*/
-__STATIC_FORCEINLINE void arm_memset_q7(q7_t *dst,
- const q7_t val,
- uint32_t block_size)
+__STATIC_FORCEINLINE void arm_memset_q7(q7_t *dst, const q7_t val, uint32_t block_size)
{
#if defined(ARM_MATH_MVEI)
- __asm volatile (
- " vdup.8 q0, %[set_val] \n"
- " wlstp.8 lr, %[cnt], 1f \n"
- "2: \n"
- " vstrb.8 q0, [%[in]], 16 \n"
- " letp lr, 2b \n"
- "1: \n"
- :[in] "+r"(dst)
- :[cnt] "r"(block_size), [set_val] "r"(val)
- :"q0", "memory", "r14");
+ __asm volatile(" vdup.8 q0, %[set_val] \n"
+ " wlstp.8 lr, %[cnt], 1f \n"
+ "2: \n"
+ " vstrb.8 q0, [%[in]], 16 \n"
+ " letp lr, 2b \n"
+ "1: \n"
+ : [in] "+r"(dst)
+ : [cnt] "r"(block_size), [set_val] "r"(val)
+ : "q0", "memory", "r14");
#else
memset(dst, val, block_size);
#endif
}
-#if defined (ARM_MATH_DSP)
+#if defined(ARM_MATH_DSP)
/**
* @brief read and expand one q7 word into two q15 words
*/
-__STATIC_FORCEINLINE const q7_t *read_and_pad(const q7_t *source, q31_t * out1, q31_t * out2)
+__STATIC_FORCEINLINE const q7_t *read_and_pad(const q7_t *source, q31_t *out1, q31_t *out2)
{
- q31_t inA = arm_nn_read_q7x4_ia(&source);
- q31_t inAbuf1 = __SXTB16(__ROR((uint32_t)inA, 8));
- q31_t inAbuf2 = __SXTB16(inA);
+ q31_t inA = arm_nn_read_q7x4_ia(&source);
+ q31_t inAbuf1 = __SXTB16(__ROR((uint32_t)inA, 8));
+ q31_t inAbuf2 = __SXTB16(inA);
#ifndef ARM_MATH_BIG_ENDIAN
- *out2 = (int32_t) (__PKHTB (inAbuf1, inAbuf2, 16));
- *out1 = (int32_t) (__PKHBT (inAbuf2, inAbuf1, 16));
+ *out2 = (int32_t)(__PKHTB(inAbuf1, inAbuf2, 16));
+ *out1 = (int32_t)(__PKHBT(inAbuf2, inAbuf1, 16));
#else
- *out1 = (int32_t) (__PKHTB(inAbuf1, inAbuf2, 16));
- *out2 = (int32_t) (__PKHBT(inAbuf2, inAbuf1, 16));
+ *out1 = (int32_t)(__PKHTB(inAbuf1, inAbuf2, 16));
+ *out2 = (int32_t)(__PKHBT(inAbuf2, inAbuf1, 16));
#endif
- return source;
+ return source;
}
/**
* @brief read and expand one q7 word into two q15 words with reordering
*/
-__STATIC_FORCEINLINE const q7_t *read_and_pad_reordered(const q7_t *source, q31_t * out1, q31_t * out2)
+__STATIC_FORCEINLINE const q7_t *read_and_pad_reordered(const q7_t *source, q31_t *out1, q31_t *out2)
{
- q31_t inA = arm_nn_read_q7x4_ia(&source);
+ q31_t inA = arm_nn_read_q7x4_ia(&source);
#ifndef ARM_MATH_BIG_ENDIAN
- *out2 = __SXTB16(__ROR((uint32_t)inA, 8));
- *out1 = __SXTB16(inA);
+ *out2 = __SXTB16(__ROR((uint32_t)inA, 8));
+ *out1 = __SXTB16(inA);
#else
- *out1 = __SXTB16(__ROR((uint32_t)inA, 8));
- *out2 = __SXTB16(inA);
+ *out1 = __SXTB16(__ROR((uint32_t)inA, 8));
+ *out2 = __SXTB16(inA);
#endif
- return source;
+ return source;
}
/**
* @brief read and expand one q7 word into two q15 words with reordering and add an offset
*/
-__STATIC_FORCEINLINE const q7_t *read_and_pad_reordered_with_offset(const q7_t *source, q31_t * out1, q31_t * out2, q31_t offset)
+__STATIC_FORCEINLINE const q7_t *
+read_and_pad_reordered_with_offset(const q7_t *source, q31_t *out1, q31_t *out2, q31_t offset)
{
- q31_t inA = arm_nn_read_q7x4_ia(&source);
+ q31_t inA = arm_nn_read_q7x4_ia(&source);
#ifndef ARM_MATH_BIG_ENDIAN
- *out2 = __SXTB16(__ROR((uint32_t)inA, 8));
- *out1 = __SXTB16(inA);
+ *out2 = __SXTB16(__ROR((uint32_t)inA, 8));
+ *out1 = __SXTB16(inA);
#else
- *out1 = __SXTB16(__ROR((uint32_t)inA, 8));
- *out2 = __SXTB16(inA);
+ *out1 = __SXTB16(__ROR((uint32_t)inA, 8));
+ *out2 = __SXTB16(inA);
#endif
- *out1 = __QADD16(*out1,offset);
- *out2 = __QADD16(*out2,offset);
+ *out1 = __QADD16(*out1, offset);
+ *out2 = __QADD16(*out2, offset);
- return source;
+ return source;
}
#endif
-
-
/**
* @defgroup NNBasicMath Basic Math Functions for Neural Network Computation
*
@@ -616,12 +613,7 @@
* Results outside of the allowable q15 range [0x8000 0x7FFF] will be saturated.
*/
-void arm_nn_mult_q15(
- q15_t * pSrcA,
- q15_t * pSrcB,
- q15_t * pDst,
- const uint16_t out_shift,
- uint32_t blockSize);
+void arm_nn_mult_q15(q15_t *pSrcA, q15_t *pSrcB, q15_t *pDst, const uint16_t out_shift, uint32_t blockSize);
/**
* @brief q7 vector multiplication with variable output shifts
@@ -638,34 +630,27 @@
* Results outside of the allowable q7 range [0x80 0x7F] will be saturated.
*/
-void arm_nn_mult_q7(
- q7_t * pSrcA,
- q7_t * pSrcB,
- q7_t * pDst,
- const uint16_t out_shift,
- uint32_t blockSize);
+void arm_nn_mult_q7(q7_t *pSrcA, q7_t *pSrcB, q7_t *pDst, const uint16_t out_shift, uint32_t blockSize);
/**
* @brief macro for adding rounding offset
*/
#ifndef ARM_NN_TRUNCATE
- #define NN_ROUND(out_shift) ( (0x1u << out_shift) >> 1 )
+#define NN_ROUND(out_shift) ((0x1u << out_shift) >> 1)
#else
- #define NN_ROUND(out_shift) 0
+#define NN_ROUND(out_shift) 0
#endif
// Macros for shortening quantization functions' names and avoid long lines
-#define MUL_SAT(a, b) arm_nn_doubling_high_mult((a), (b))
+#define MUL_SAT(a, b) arm_nn_doubling_high_mult((a), (b))
#define MUL_SAT_MVE(a, b) arm_doubling_high_mult_mve_32x4((a), (b))
#define MUL_POW2(a, b) arm_nn_mult_by_power_of_two((a), (b))
-
#define DIV_POW2(a, b) arm_nn_divide_by_power_of_two((a), (b))
#define DIV_POW2_MVE(a, b) arm_divide_by_power_of_two_mve((a), (b))
-
-#define EXP_ON_NEG(x) arm_nn_exp_on_negative_values((x))
-#define ONE_OVER1(x) arm_nn_one_over_one_plus_x_for_x_in_0_1((x))
+#define EXP_ON_NEG(x) arm_nn_exp_on_negative_values((x))
+#define ONE_OVER1(x) arm_nn_one_over_one_plus_x_for_x_in_0_1((x))
/**
* @brief Saturating doubling high multiply. Result matches
@@ -690,7 +675,7 @@
// Utilize all of the upper 32 bits. This is the doubling step
// as well.
- result = (int32_t) (mult / (1ll << 31));
+ result = (int32_t)(mult / (1ll << 31));
if ((m1 == m2) && (m1 == (int32_t)Q31_MIN))
{
@@ -774,9 +759,8 @@
*/
__STATIC_FORCEINLINE q31_t arm_nn_requantize(const q31_t val, const q31_t multiplier, const q31_t shift)
{
- return arm_nn_divide_by_power_of_two(
- arm_nn_doubling_high_mult_no_sat(val * (1 << LEFT_SHIFT(shift)), multiplier),
- RIGHT_SHIFT(shift));
+ return arm_nn_divide_by_power_of_two(arm_nn_doubling_high_mult_no_sat(val * (1 << LEFT_SHIFT(shift)), multiplier),
+ RIGHT_SHIFT(shift));
}
/**
@@ -786,22 +770,18 @@
* @param[in] block_size Number of bytes to copy.
*
*/
-__STATIC_FORCEINLINE void arm_memcpy_q7(q7_t *__RESTRICT dst,
- const q7_t *__RESTRICT src,
- uint32_t block_size)
+__STATIC_FORCEINLINE void arm_memcpy_q7(q7_t *__RESTRICT dst, const q7_t *__RESTRICT src, uint32_t block_size)
{
#if defined(ARM_MATH_MVEI)
- __asm volatile (
- " wlstp.8 lr, %[cnt], 1f \n"
- "2: \n"
- " vldrb.8 q0, [%[in]], 16 \n"
- " vstrb.8 q0, [%[out]], 16 \n"
- " letp lr, 2b \n"
- "1: \n"
- :[in] "+r"(src)
- ,[out] "+r"(dst)
- :[cnt] "r"(block_size)
- :"q0", "memory", "r14");
+ __asm volatile(" wlstp.8 lr, %[cnt], 1f \n"
+ "2: \n"
+ " vldrb.8 q0, [%[in]], 16 \n"
+ " vstrb.8 q0, [%[out]], 16 \n"
+ " letp lr, 2b \n"
+ "1: \n"
+ : [in] "+r"(src), [out] "+r"(dst)
+ : [cnt] "r"(block_size)
+ : "q0", "memory", "r14");
#else
memcpy(dst, src, block_size);
#endif
@@ -830,10 +810,10 @@
*/
__STATIC_FORCEINLINE int32x4_t arm_divide_by_power_of_two_mve(const int32x4_t dividend, const q31_t exponent)
{
- const int32x4_t shift = vdupq_n_s32(-exponent);
- const int32x4_t fixup = vshrq_n_s32(vandq_s32(dividend, shift), 31);
- const int32x4_t fixed_up_dividend = vqaddq_s32(dividend, fixup);
- return vrshlq_s32(fixed_up_dividend, shift);
+ const int32x4_t shift = vdupq_n_s32(-exponent);
+ const int32x4_t fixup = vshrq_n_s32(vandq_s32(dividend, shift), 31);
+ const int32x4_t fixed_up_dividend = vqaddq_s32(dividend, fixup);
+ return vrshlq_s32(fixed_up_dividend, shift);
}
/**
@@ -847,33 +827,35 @@
*/
__STATIC_FORCEINLINE int32x4_t arm_requantize_mve(const int32x4_t val, const q31_t multiplier, const q31_t shift)
{
- return arm_divide_by_power_of_two_mve(
- arm_doubling_high_mult_mve(vshlq_s32(val, vdupq_n_s32(LEFT_SHIFT(shift))), multiplier),
- RIGHT_SHIFT(shift));
+ return arm_divide_by_power_of_two_mve(
+ arm_doubling_high_mult_mve(vshlq_s32(val, vdupq_n_s32(LEFT_SHIFT(shift))), multiplier), RIGHT_SHIFT(shift));
}
__STATIC_FORCEINLINE int32x4_t arm_doubling_high_mult_mve_32x4(const int32x4_t m1, const int32x4_t m2)
{
- return vqrdmulhq_s32(m1, m2);
+ return vqrdmulhq_s32(m1, m2);
}
__STATIC_FORCEINLINE int32x4_t arm_divide_by_power_of_two_mve_32x4(const int32x4_t dividend, const int32x4_t exponent)
{
- const int32x4_t shift = -exponent;
- const int32x4_t fixup = vshrq_n_s32(vandq_s32(dividend, shift), 31);
- const int32x4_t fixed_up_dividend = vqaddq_s32(dividend, fixup);
- return vrshlq_s32(fixed_up_dividend, shift);
+ const int32x4_t shift = -exponent;
+ const int32x4_t fixup = vshrq_n_s32(vandq_s32(dividend, shift), 31);
+ const int32x4_t fixed_up_dividend = vqaddq_s32(dividend, fixup);
+ return vrshlq_s32(fixed_up_dividend, shift);
}
-__STATIC_FORCEINLINE int32x4_t arm_requantize_mve_32x4(const int32x4_t val, const int32x4_t multiplier, const int32x4_t shift)
+__STATIC_FORCEINLINE int32x4_t arm_requantize_mve_32x4(const int32x4_t val,
+ const int32x4_t multiplier,
+ const int32x4_t shift)
{
- const int32x4_t zz = vdupq_n_s32(0);
- const mve_pred16_t p = vcmpgtq_n_s32(shift, 0);
+ const int32x4_t zz = vdupq_n_s32(0);
+ const mve_pred16_t p = vcmpgtq_n_s32(shift, 0);
- const int32x4_t left_shift = vpselq_s32(shift, zz, p);
- const int32x4_t right_shift = -vpselq_s32(zz, shift, p);
+ const int32x4_t left_shift = vpselq_s32(shift, zz, p);
+ const int32x4_t right_shift = -vpselq_s32(zz, shift, p);
- return arm_divide_by_power_of_two_mve_32x4(arm_doubling_high_mult_mve_32x4(vshlq_s32(val, left_shift), multiplier), right_shift);
+ return arm_divide_by_power_of_two_mve_32x4(arm_doubling_high_mult_mve_32x4(vshlq_s32(val, left_shift), multiplier),
+ right_shift);
}
#endif
@@ -881,22 +863,22 @@
__STATIC_FORCEINLINE int32_t arm_nn_exp_on_negative_values(int32_t val)
{
- int32_t mask = 0;
+ int32_t mask = 0;
int32_t shift = 24;
const int32_t val_mod_minus_quarter = (val & ((1 << shift) - 1)) - (1 << shift);
- const int32_t remainder = val_mod_minus_quarter - val;
- const int32_t x = (val_mod_minus_quarter << 5) + (1 << 28);
- const int32_t x2 = MUL_SAT(x, x);
+ const int32_t remainder = val_mod_minus_quarter - val;
+ const int32_t x = (val_mod_minus_quarter << 5) + (1 << 28);
+ const int32_t x2 = MUL_SAT(x, x);
- int32_t result = 1895147668 + MUL_SAT(1895147668, x +
- DIV_POW2(MUL_SAT(DIV_POW2(MUL_SAT(x2, x2), 2) + MUL_SAT(x2, x), 715827883) + x2, 1));
+ int32_t result = 1895147668 +
+ MUL_SAT(1895147668, x + DIV_POW2(MUL_SAT(DIV_POW2(MUL_SAT(x2, x2), 2) + MUL_SAT(x2, x), 715827883) + x2, 1));
-#define SELECT_IF_NON_ZERO(x) \
-{ \
- mask = MASK_IF_NON_ZERO(remainder & (1 << shift++)); \
- result = SELECT_USING_MASK(mask, MUL_SAT(result, x), result); \
-}
+#define SELECT_IF_NON_ZERO(x) \
+ { \
+ mask = MASK_IF_NON_ZERO(remainder & (1 << shift++)); \
+ result = SELECT_USING_MASK(mask, MUL_SAT(result, x), result); \
+ }
SELECT_IF_NON_ZERO(1672461947)
SELECT_IF_NON_ZERO(1302514674)