CoreFlow 1.0.0
A modern orchestration and execution runtime
|
Convolutional Network Nodes. More...
Classes | |
struct | _vx_nn_convolution_params_t |
Input parameters for a convolution operation. More... | |
struct | _vx_nn_deconvolution_params_t |
Input parameters for a deconvolution operation. More... | |
struct | _vx_nn_roi_pool_params_t |
Input parameters for ROI pooling operation. More... | |
Macros | |
#define | VX_LIBRARY_KHR_NN_EXTENSION (0x1) |
The Neural Network Extension Library Set. | |
Typedefs | |
typedef struct _vx_nn_convolution_params_t | vx_nn_convolution_params_t |
Input parameters for a convolution operation. | |
typedef struct _vx_nn_deconvolution_params_t | vx_nn_deconvolution_params_t |
Input parameters for a deconvolution operation. | |
typedef struct _vx_nn_roi_pool_params_t | vx_nn_roi_pool_params_t |
Input parameters for ROI pooling operation. | |
Enumerations | |
enum | vx_kernel_nn_ext_e { VX_KERNEL_CONVOLUTION_LAYER = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_NN_EXTENSION) + 0x0 , VX_KERNEL_FULLY_CONNECTED_LAYER = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_NN_EXTENSION) + 0x1 , VX_KERNEL_POOLING_LAYER = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_NN_EXTENSION) + 0x2 , VX_KERNEL_SOFTMAX_LAYER = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_NN_EXTENSION) + 0x3 , VX_KERNEL_ACTIVATION_LAYER = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_NN_EXTENSION) + 0x5 , VX_KERNEL_ROI_POOLING_LAYER = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_NN_EXTENSION) + 0x6 , VX_KERNEL_DECONVOLUTION_LAYER = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_NN_EXTENSION) + 0x7 , VX_KERNEL_LOCAL_RESPONSE_NORMALIZATION_LAYER = VX_KERNEL_BASE(VX_ID_KHRONOS, VX_LIBRARY_KHR_NN_EXTENSION) + 0x8 } |
The list of Neural Network Extension Kernels. More... | |
enum | vx_nn_enum_e { VX_ENUM_NN_ROUNDING_TYPE = 0x1A , VX_ENUM_NN_POOLING_TYPE = 0x1B , VX_ENUM_NN_NORMALIZATION_TYPE = 0x1C , VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE = 0x1D } |
NN extension type enums. More... | |
enum | vx_nn_rounding_type_e { VX_NN_DS_SIZE_ROUNDING_FLOOR = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NN_ROUNDING_TYPE) + 0x0 , VX_NN_DS_SIZE_ROUNDING_CEILING = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NN_ROUNDING_TYPE) + 0x1 } |
down scale rounding. More... | |
enum | vx_nn_pooling_type_e { VX_NN_POOLING_MAX = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NN_POOLING_TYPE) + 0x0 , VX_NN_POOLING_AVG = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NN_POOLING_TYPE) + 0x1 } |
The Neural Network pooling type list. More... | |
enum | vx_nn_norm_type_e { VX_NN_NORMALIZATION_SAME_MAP = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NN_NORMALIZATION_TYPE) + 0x0 , VX_NN_NORMALIZATION_ACROSS_MAPS = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NN_NORMALIZATION_TYPE) + 0x1 } |
The Neural Network normalization type list. More... | |
enum | vx_nn_activation_function_e { VX_NN_ACTIVATION_LOGISTIC = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x0 , VX_NN_ACTIVATION_HYPERBOLIC_TAN = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x1 , VX_NN_ACTIVATION_RELU = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x2 , VX_NN_ACTIVATION_BRELU = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x3 , VX_NN_ACTIVATION_SOFTRELU = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x4 , VX_NN_ACTIVATION_ABS = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x5 , VX_NN_ACTIVATION_SQUARE = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x6 , VX_NN_ACTIVATION_SQRT = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x7 , VX_NN_ACTIVATION_LINEAR = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x8 } |
The Neural Network activation functions list. More... | |
enum | vx_nn_type_e { VX_TYPE_NN_CONVOLUTION_PARAMS = 0x025 , VX_TYPE_NN_DECONVOLUTION_PARAMS = 0x026 , VX_TYPE_NN_ROI_POOL_PARAMS = 0x027 } |
The type enumeration lists all NN extension types. More... | |
Functions | |
VX_API_ENTRY vx_node VX_API_CALL | vxConvolutionLayer (vx_graph graph, vx_tensor inputs, vx_tensor weights, vx_tensor biases, const vx_nn_convolution_params_t *convolution_params, vx_size size_of_convolution_params, vx_tensor outputs) |
[Graph] Creates a Convolutional Network Convolution Layer Node. | |
VX_API_ENTRY vx_node VX_API_CALL | vxFullyConnectedLayer (vx_graph graph, vx_tensor inputs, vx_tensor weights, vx_tensor biases, vx_enum overflow_policy, vx_enum rounding_policy, vx_tensor outputs) |
[Graph] Creates a Fully connected Convolutional Network Layer Node. | |
VX_API_ENTRY vx_node VX_API_CALL | vxPoolingLayer (vx_graph graph, vx_tensor inputs, vx_enum pooling_type, vx_size pooling_size_x, vx_size pooling_size_y, vx_size pooling_padding_x, vx_size pooling_padding_y, vx_enum rounding, vx_tensor outputs) |
[Graph] Creates a Convolutional Network Pooling Layer Node. | |
VX_API_ENTRY vx_node VX_API_CALL | vxSoftmaxLayer (vx_graph graph, vx_tensor inputs, vx_tensor outputs) |
[Graph] Creates a Convolutional Network Softmax Layer Node. | |
VX_API_ENTRY vx_node VX_API_CALL | vxLocalResponseNormalizationLayer (vx_graph graph, vx_tensor inputs, vx_enum type, vx_size normalization_size, vx_float32 alpha, vx_float32 beta, vx_float32 bias, vx_tensor outputs) |
[Graph] Creates a Convolutional Network Local Response Normalization Layer Node. This function is optional for 8-bit extension with the extension string 'KHR_NN_8'. | |
VX_API_ENTRY vx_node VX_API_CALL | vxActivationLayer (vx_graph graph, vx_tensor inputs, vx_enum function, vx_float32 a, vx_float32 b, vx_tensor outputs) |
[Graph] Creates a Convolutional Network Activation Layer Node. The function operate a specific function (Specified in vx_nn_activation_function_e ), On the input data. the equation for the layer is: \( outputs(i,j,k,l) = function(inputs(i,j,k,l), a, b) \) for all i,j,k,l. | |
VX_API_ENTRY vx_node VX_API_CALL | vxROIPoolingLayer (vx_graph graph, vx_tensor input_data, vx_tensor input_rois, const vx_nn_roi_pool_params_t *roi_pool_params, vx_size size_of_roi_params, vx_tensor output_arr) |
[Graph] Creates a Convolutional Network ROI pooling node | |
VX_API_ENTRY vx_node VX_API_CALL | vxDeconvolutionLayer (vx_graph graph, vx_tensor inputs, vx_tensor weights, vx_tensor biases, const vx_nn_deconvolution_params_t *deconvolution_params, vx_size size_of_deconv_params, vx_tensor outputs) |
[Graph] Creates a Convolutional Network Deconvolution Layer Node. | |
Convolutional Network Nodes.
#define VX_LIBRARY_KHR_NN_EXTENSION (0x1) |
#include <vx_khr_nn.h>
The Neural Network Extension Library Set.
typedef struct _vx_nn_convolution_params_t vx_nn_convolution_params_t |
#include <vx_khr_nn.h>
Input parameters for a convolution operation.
typedef struct _vx_nn_deconvolution_params_t vx_nn_deconvolution_params_t |
#include <vx_khr_nn.h>
Input parameters for a deconvolution operation.
typedef struct _vx_nn_roi_pool_params_t vx_nn_roi_pool_params_t |
#include <vx_khr_nn.h>
Input parameters for ROI pooling operation.
enum vx_kernel_nn_ext_e |
#include <vx_khr_nn.h>
The list of Neural Network Extension Kernels.
#include <vx_khr_nn.h>
The Neural Network activation functions list.
Function name | Mathematical definition | Parameters | Parameters type |
logistic | \(f(x)=1/(1+e^{-x}) \) | ||
hyperbolic tangent | \(f(x)=a\cdot tanh(b\cdot x) \) | a,b | VX_FLOAT32 |
relu | \(f(x)=max(0,x)\) | ||
bounded relu | \(f(x)=min(a,max(0,x)) \) | a | VX_FLOAT32 |
soft relu | \(f(x)=log(1+e^{x}) \) | ||
abs | \(f(x)=\mid x\mid \) | ||
square | \(f(x)= x^2 \) | ||
square root | \(f(x)=\sqrt{x} \) | ||
linear | \(f(x)=ax+b \) | a,b | VX_FLOAT32 |
enum vx_nn_enum_e |
#include <vx_khr_nn.h>
NN extension type enums.
Enumerator | |
---|---|
VX_ENUM_NN_ROUNDING_TYPE | |
VX_ENUM_NN_POOLING_TYPE | |
VX_ENUM_NN_NORMALIZATION_TYPE | |
VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE |
enum vx_nn_norm_type_e |
#include <vx_khr_nn.h>
The Neural Network normalization type list.
Enumerator | |
---|---|
VX_NN_NORMALIZATION_SAME_MAP | normalization is done on same IFM |
VX_NN_NORMALIZATION_ACROSS_MAPS | Normalization is done across different IFMs. |
enum vx_nn_pooling_type_e |
#include <vx_khr_nn.h>
The Neural Network pooling type list.
kind of pooling done in pooling function
Enumerator | |
---|---|
VX_NN_POOLING_MAX | max pooling |
VX_NN_POOLING_AVG | average pooling |
#include <vx_khr_nn.h>
down scale rounding.
Due to different scheme of downscale size calculation in the various training frameworks. Implementation must support 2 rounding methods for down scale calculation. The floor and the ceiling. In convolution and pooling functions. Relevant when input size is even.
Enumerator | |
---|---|
VX_NN_DS_SIZE_ROUNDING_FLOOR | floor rounding |
VX_NN_DS_SIZE_ROUNDING_CEILING | ceil rounding |
enum vx_nn_type_e |
#include <vx_khr_nn.h>
The type enumeration lists all NN extension types.
Enumerator | |
---|---|
VX_TYPE_NN_CONVOLUTION_PARAMS | |
VX_TYPE_NN_DECONVOLUTION_PARAMS | |
VX_TYPE_NN_ROI_POOL_PARAMS |
VX_API_ENTRY vx_node VX_API_CALL vxActivationLayer | ( | vx_graph | graph, |
vx_tensor | inputs, | ||
vx_enum | function, | ||
vx_float32 | a, | ||
vx_float32 | b, | ||
vx_tensor | outputs ) |
#include <vx_khr_nn.h>
[Graph] Creates a Convolutional Network Activation Layer Node. The function operate a specific function (Specified in vx_nn_activation_function_e
), On the input data. the equation for the layer is: \( outputs(i,j,k,l) = function(inputs(i,j,k,l), a, b) \) for all i,j,k,l.
[in] | graph | The handle to the graph. |
[in] | inputs | The input tensor data. Implementations must support input tensor data types indicated by the extension strings 'KHR_NN_8' or 'KHR_NN_8 KHR_NN_16'. |
[in] | function | [static] Non-linear function (see vx_nn_activation_function_e ). Implementations must support VX_NN_ACTIVATION_LOGISTIC , VX_NN_ACTIVATION_HYPERBOLIC_TAN and VX_NN_ACTIVATION_RELU |
[in] | a | [static] Function parameters a. must be positive. |
[in] | b | [static] Function parameters b. must be positive. |
[out] | outputs | The output tensor data. Output will have the same number of dimensions as input. |
vx_node
. vx_node
. Any possible errors preventing a successful creation should be checked using vxGetStatus
. VX_API_ENTRY vx_node VX_API_CALL vxConvolutionLayer | ( | vx_graph | graph, |
vx_tensor | inputs, | ||
vx_tensor | weights, | ||
vx_tensor | biases, | ||
const vx_nn_convolution_params_t * | convolution_params, | ||
vx_size | size_of_convolution_params, | ||
vx_tensor | outputs ) |
#include <vx_khr_nn.h>
[Graph] Creates a Convolutional Network Convolution Layer Node.
This function implement Convolutional Network Convolution layer. For fixed-point data types, a fixed point calculation is performed with round and saturate according to the number of accumulator bits. The number of the accumulator bits are implementation defined, and should be at least 16.
round: rounding according the vx_round_policy_e
enumeration.
saturate: A saturation according the vx_convert_policy_e
enumeration. The following equation is implemented:
\( outputs[j,k,i] = saturate(round(\sum_{l} (\sum_{m,n} inputs[j+m,k+n,l] \times
weights[m,n,l,i])+biasses[j,k,i])) \)
Where \(m,n\) are indexes on the convolution matrices. \( l\) is an index on all the convolutions per input. \( i\) is an index per output. \( j,k \) are the inputs/outputs spatial indexes. Convolution is done on the width and height dimensions of the vx_tensor
. Therefore, we use here the term x for index along the width dimension and y for index along the height dimension.
before the Convolution is done, a padding with zeros of the width and height input dimensions is performed. Then down scale is done by picking the results according to a skip jump. The skip in the x and y is determined by the output size dimensions. The relation between input to output is as follows:
\( width_{output} = round(\frac{(width_{input} + 2 * padding_x - kernel_x - (kernel_x -1) *
dilation_x)}{skip_x} + 1) \)
and
\( height_{output} = round(\frac{(height + 2 * padding_y - kernel_y - (kernel_y -1) *
dilation_y)}{skip_y} + 1) \)
where \(width\) is the size of the input width dimension. \(height\) is the size of the input height dimension. \(width_{output}\) is the size of the output width dimension. \(height_{output}\) is the size of the output height dimension. \(kernel_x\) and \(kernel_y\) are the convolution sizes in width and height dimensions. skip is calculated by the relation between input and output. In case of ambiguity in the inverse calculation of the skip. The minimum solution is chosen. Skip must be a positive non zero integer. rounding is done according to vx_nn_rounding_type_e
. Notice that this node creation function has more parameters than the corresponding kernel. Numbering of kernel parameters (required if you create this node using the generic interface) is explicitly specified here.
[in] | graph | The handle to the graph. |
[in] | inputs | The input tensor data. 3 lower dimensions represent a single input, all following dimensions represent number of batches, possibly nested. The dimension order is [width, height, IFM, batches] . Implementations must support input tensor data types indicated by the extension strings 'KHR_NN_8' or 'KHR_NN_8 KHR_NN_16'. (Kernel parameter #0) |
[in] | weights | [static] Weights are 4d tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. see vxCreateTensor and vxCreateVirtualTensor Weights data type must match the data type of the inputs. (Kernel parameter #1) |
[in] | biases | [static] Optional, ignored if NULL. The biases, which may be shared (one per ofm) or unshared (one per ofm * output location). The possible layouts are either [OFM] or [width, height, OFM]. Biases data type must match the data type of the inputs. (Kernel parameter #2) |
[in] | convolution_params | [static] Pointer to parameters of type vx_nn_convolution_params_t . (Kernel parameter #3) |
[in] | size_of_convolution_params | [static] Size in bytes of convolution_params. Note that this parameter is not counted as one of the kernel parameters. |
[out] | outputs | The output tensor data. Output will have the same number and structure of dimensions as input. Output tensor data type must be same as the inputs. (Kernel parameter #4) |
vx_node
. vx_node
. Any possible errors preventing a successful creation should be checked using vxGetStatus
. VX_API_ENTRY vx_node VX_API_CALL vxDeconvolutionLayer | ( | vx_graph | graph, |
vx_tensor | inputs, | ||
vx_tensor | weights, | ||
vx_tensor | biases, | ||
const vx_nn_deconvolution_params_t * | deconvolution_params, | ||
vx_size | size_of_deconv_params, | ||
vx_tensor | outputs ) |
#include <vx_khr_nn.h>
[Graph] Creates a Convolutional Network Deconvolution Layer Node.
Deconvolution denote a sort of reverse convolution, which importantly and confusingly is not actually a proper mathematical deconvolution. Convolutional Network Deconvolution is up-sampling of an image by learned Deconvolution coefficients. The operation is similar to convolution but can be implemented by up-sampling the inputs with zeros insertions between the inputs, and convolving the Deconvolution kernels on the up-sampled result. For fixed-point data types, a fixed point calculation is performed with round and saturate according to the number of accumulator bits. The number of the accumulator bits are implementation defined, and should be at least 16.
round: rounding according the vx_round_policy_e
enumeration.
saturate: A saturation according the vx_convert_policy_e
enumeration. The following equation is implemented:
\( outputs[j,k,i] = saturate(round(\sum_{l} \sum_{m,n}(inputs_{upscaled}[j+m,k+n,l] \times
weights[m,n,l,i])+biasses[j,k,i])) \)
Where \(m,n\) are indexes on the convolution matrices. \( l\) is an index on all the convolutions per input. \( i\) is an index per output. \( j,k \) are the inputs/outputs spatial indexes. Deconvolution is done on the width and height dimensions of the vx_tensor
. Therefore, we use here the term x for the width dimension and y for the height dimension.
before the Deconvolution is done, up-scaling the width and height dimensions with zeros is performed. The relation between input to output is as follows:
\( width_{output} = (width_{input} -1) * upscale_x - 2 * padding_x + kernel_x + a_x \)
and
\( height_{output} = (height_{input} - 1) * upscale_y - 2 * padding_y + kernel_y + a_y \)
where \(width_{input}\) is the size of the input width dimension. \(height_{input}\) is the size of the input height dimension. \(width_{output}\) is the size of the output width dimension. \(height_{output}\) is the size of the output height dimension. \(kernel_x\) and \(kernel_y\) are the convolution sizes in width and height. \(a_x\) and \(a_y\) are user-specified quantity used to distinguish between the \(upscale_x\) and \(upscale_y\) different possible output sizes. \(upscale_x\) and \(upscale_y\) are calculated by the relation between input and output. \(a_x\) and \(a_y\) must be positive and smaller then \(upscale_x\) and \(upscale_y\) respectively. Since the padding parameter is on the output. The effective input padding is:
\( padding_{input_x} = kernel_x -padding_x -1\)
\( padding_{input_y} = kernel_y -padding_y -1\)
Therfore the following constarints apply : \(kernel_x >= padding_x - 1\) and \(kernel_y >=
padding_y - 1\). rounding is done according to vx_nn_rounding_type_e
. Notice that this node creation function has more parameters than the corresponding kernel. Numbering of kernel parameters (required if you create this node using the generic interface) is explicitly specified here.
[in] | graph | The handle to the graph. |
[in] | inputs | The input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Dimension layout is [width, height, IFM, batches]. See vxCreateTensor and vxCreateVirtualTensor . Implementations must support input tensor data types indicated by the extension strings 'KHR_NN_8' or 'KHR_NN_8 KHR_NN_16'. (Kernel parameter #0) |
[in] | weights | [static] The 4d weights with dimensions [width, height, IFM, OFM]. See vxCreateTensor and vxCreateVirtualTensor . (Kernel parameter #1) |
[in] | biases | [static] Optional, ignored if NULL. The biases have one dimension [OFM]. Implementations must support input tensor data type same as the inputs. (Kernel parameter #2) |
[in] | deconvolution_params | [static] Pointer to parameters of type vx_nn_deconvolution_params_t (Kernel parameter #3) |
[in] | size_of_deconv_params | [static] Size in bytes of deconvolution_params. Note that this parameter is not counted as one of the kernel parameters. |
[out] | outputs | The output tensor. The output has the same number of dimensions as the input. (Kernel parameter #4) |
vx_node
. vx_node
. Any possible errors preventing a successful creation should be checked using vxGetStatus
. VX_API_ENTRY vx_node VX_API_CALL vxFullyConnectedLayer | ( | vx_graph | graph, |
vx_tensor | inputs, | ||
vx_tensor | weights, | ||
vx_tensor | biases, | ||
vx_enum | overflow_policy, | ||
vx_enum | rounding_policy, | ||
vx_tensor | outputs ) |
#include <vx_khr_nn.h>
[Graph] Creates a Fully connected Convolutional Network Layer Node.
This function implement Fully connected Convolutional Network layers. For fixed-point data types, a fixed point calculation is performed with round and saturate according to the number of accumulator bits. The number of the accumulator bits are implementation defined, and should be at least 16.
round: rounding according the vx_round_policy_e
enumeration.
saturate: A saturation according the vx_convert_policy_e
enumeration. The equation for Fully connected layer:
\( outputs[i] = saturate(round(\sum_{j} (inputs[j] \times weights[j,i])+biasses[i])) \)
Where \(j\) is a index on the input feature and \(i\) is a index on the output.
[in] | graph | The handle to the graph. |
[in] | inputs | The input tensor data. There two possible input layouts:
|
[in] | weights | [static] Number of dimensions is 2. Dimensions are [IFM, OFM]. See vxCreateTensor and vxCreateVirtualTensor .Implementations must support input tensor data type same as the inputs. |
[in] | biases | [static] Optional, ignored if NULL. The biases have one dimension [OFM]. Implementations must support input tensor data type same as the inputs. |
[in] | overflow_policy | [static] A VX_TYPE_ENUM of the vx_convert_policy_e enumeration. |
[in] | rounding_policy | [static] A VX_TYPE_ENUM of the vx_round_policy_e enumeration. |
[out] | outputs | The output tensor data. Output dimension layout is [OFM,batches]. See vxCreateTensor and vxCreateVirtualTensor , where batches may be multidimensional. Output tensor data type must be same as the inputs. |
vx_node
. vx_node
. Any possible errors preventing a successful creation should be checked using vxGetStatus
. VX_API_ENTRY vx_node VX_API_CALL vxLocalResponseNormalizationLayer | ( | vx_graph | graph, |
vx_tensor | inputs, | ||
vx_enum | type, | ||
vx_size | normalization_size, | ||
vx_float32 | alpha, | ||
vx_float32 | beta, | ||
vx_float32 | bias, | ||
vx_tensor | outputs ) |
#include <vx_khr_nn.h>
[Graph] Creates a Convolutional Network Local Response Normalization Layer Node. This function is optional for 8-bit extension with the extension string 'KHR_NN_8'.
Normalizing over local input regions. Each input value is divided by \((\bias+\frac{\alpha}{n}\sum_i x^2_i)^\beta \) , where n is the number of elements to normalize across. and the sum is taken over a rectangle region centred at that value (zero padding is added where necessary).
[in] | graph | The handle to the graph. |
[in] | inputs | The input tensor data. 3 lower dimensions represent a single input, 4th dimension for batch of inputs is optional. Dimension layout is [width, height, IFM, batches]. See vxCreateTensor and vxCreateVirtualTensor . Implementations must support input tensor data types indicated by the extension strings 'KHR_NN_8 KHR_NN_16'. Since this function is optional for 'KHR_NN_8', so implementations only must support VX_TYPE_INT16 with fixed_point_position 8. |
[in] | type | [static] Either same map or across maps (see vx_nn_norm_type_e ). |
[in] | normalization_size | [static] Number of elements to normalize across. Must be a positive odd number with maximum size of 7 and minimum of 3. |
[in] | alpha | [static] Alpha parameter in the local response normalization equation. must be positive. |
[in] | beta | [static] Beta parameter in the local response normalization equation. must be positive. |
[in] | bias | [static] Bias parameter in the local response normalization equation. must be positive. |
[out] | outputs | The output tensor data. Output will have the same number of dimensions as input. |
vx_node
. vx_node
. Any possible errors preventing a successful creation should be checked using vxGetStatus
. VX_API_ENTRY vx_node VX_API_CALL vxPoolingLayer | ( | vx_graph | graph, |
vx_tensor | inputs, | ||
vx_enum | pooling_type, | ||
vx_size | pooling_size_x, | ||
vx_size | pooling_size_y, | ||
vx_size | pooling_padding_x, | ||
vx_size | pooling_padding_y, | ||
vx_enum | rounding, | ||
vx_tensor | outputs ) |
#include <vx_khr_nn.h>
[Graph] Creates a Convolutional Network Pooling Layer Node.
Pooling is done on the width and height dimensions of the vx_tensor
. Therefore, we use here the term x for the width dimension and y for the height dimension.
Pooling operation is a function operation over a rectangle size and then a nearest neighbour down scale. Here we use pooling_size_x and pooling_size_y to specify the rectangle size on which the operation is performed.
before the operation is done (average or maximum value). the data is padded with zeros in width and height dimensions . The down scale is done by picking the results according to a skip jump. The skip in the x and y dimension is determined by the output size dimensions. The first pixel of the down scale output is the first pixel in the input.
[in] | graph | The handle to the graph. |
[in] | inputs | The input tensor data. 3 lower dimensions represent a single input, 4th dimension for batch of inputs is optional.Dimension layout is [width, height, IFM, batches]. See vxCreateTensor and vxCreateVirtualTensor Implementations must support input tensor data types indicated by the extension strings 'KHR_NN_8' or 'KHR_NN_8 KHR_NN_16'. |
[in] | pooling_type | [static] Either max pooling or average pooling (see vx_nn_pooling_type_e ). |
[in] | pooling_size_x | [static] Size of the pooling region in the x dimension |
[in] | pooling_size_y | [static] Size of the pooling region in the y dimension. |
[in] | pooling_padding_x | [static] Padding size in the x dimension. |
[in] | pooling_padding_y | [static] Padding size in the y dimension. |
[in] | rounding | [static] Rounding method for calculating output dimensions. See vx_nn_rounding_type_e |
[out] | outputs | The output tensor data. Output will have the same number of dimensions as input. Output tensor data type must be same as the inputs. |
vx_node
. vx_node
. Any possible errors preventing a successful creation should be checked using vxGetStatus
. VX_API_ENTRY vx_node VX_API_CALL vxROIPoolingLayer | ( | vx_graph | graph, |
vx_tensor | input_data, | ||
vx_tensor | input_rois, | ||
const vx_nn_roi_pool_params_t * | roi_pool_params, | ||
vx_size | size_of_roi_params, | ||
vx_tensor | output_arr ) |
#include <vx_khr_nn.h>
[Graph] Creates a Convolutional Network ROI pooling node
Pooling is done on the width and height dimensions of the vx_tensor
. The ROI Pooling get an array of roi rectangles, and an input tensor. The kernel crop the width and height dimensions of the input tensor with the ROI rectangles and down scale the result to the size of the output tensor. The output tensor width and height are the pooled width and pooled height. The down scale method is determined by the pool_type. Notice that this node creation function has more parameters than the corresponding kernel. Numbering of kernel parameters (required if you create this node using the generic interface) is explicitly specified here.
[in] | graph | The handle to the graph. |
[in] | input_data | The input tensor data. 3 lower dimensions represent a single input, 4th dimension for batch of inputs is optional. Dimension layout is [width, height, IFM, batches]. See vxCreateTensor and vxCreateVirtualTensor . Implementations must support input tensor data types indicated by the extension strings 'KHR_NN_8' or 'KHR_NN_8 KHR_NN_16'. (Kernel parameter #0) |
[in] | input_rois | The roi array tensor. ROI array with dimensions [4, roi_count, batches] where the first dimension represents 4 coordinates of the top left and bottom right corners of the roi rectangles, based on the input tensor width and height. batches is optional and must be the same as in inputs. roi_count is the number of ROI rectangles. (Kernel parameter #1) |
[in] | roi_pool_params | [static] Of type vx_nn_pooling_type_e . Only VX_NN_POOLING_MAX pooling is supported. (Kernel parameter #2) |
[in] | size_of_roi_params | [static] Size in bytes of roi_pool_params. Note that this parameter is not counted as one of the kernel parameters. |
[out] | output_arr | The output tensor. Output will have [output_width, output_height, IFM, batches] dimensions. batches is optional and must be the same as in inputs. (Kernel parameter #3) |
vx_node
. vx_node
. Any possible errors preventing a successful creation should be checked using vxGetStatus
. VX_API_ENTRY vx_node VX_API_CALL vxSoftmaxLayer | ( | vx_graph | graph, |
vx_tensor | inputs, | ||
vx_tensor | outputs ) |
#include <vx_khr_nn.h>
[Graph] Creates a Convolutional Network Softmax Layer Node.
the softmax function, is a generalization of the logistic function that "squashes" a K-dimensional vector \( z \) of arbitrary real values to a K-dimensional vector \( \sigma(z) \) of real values in the range (0, 1) that add up to 1. The function is given by: \( \sigma(z) = \frac{\exp^z}{\sum_i \exp^{z_i}} \)
[in] | graph | The handle to the graph. |
[in] | inputs | The input tensor, with the number of dimensions according to the following scheme. In case IFM dimension is 1. Softmax is be calculated on that dimension. In case IFM dimension is 2. Softmax is be calculated on the first dimension. The second dimension is batching. In case IFM dimension is 3. Dimensions are [Width, Height, Classes]. And Softmax is calculated on the third dimension. In case IFM dimension is 4. Dimensions are [Width, Height, Classes, batching]. Softmax is calculated on the third dimension. Regarding the layout specification, see vxCreateTensor and vxCreateVirtualTensor . In all cases Implementations must support input tensor data types indicated by the extension strings 'KHR_NN_8' or 'KHR_NN_8 KHR_NN_16'. |
[out] | outputs | The output tensor. Output will have the same number of dimensions as input. Output tensor data type must be same as the inputs. |
vx_node
. vx_node
. Any possible errors preventing a successful creation should be checked using vxGetStatus
.