1 /*
2  * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Licensed under the Apache License, Version 2.0 (the License); you may
7  * not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  * www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  */
18 
19 /* ----------------------------------------------------------------------
20  * Project:      CMSIS NN Library
21  * Title:        arm_nn_mat_mult_kernel_q7_q15_reordered.c
22  * Description:  Matrix-multiplication function for convolution with reordered columns
23  *
24  * $Date:        17. January 2018
25  * $Revision:    V.1.0.0
26  *
27  * Target Processor:  Cortex-M cores
28  * -------------------------------------------------------------------- */
29 
30 #include "arm_nnfunctions.h"
31 #include "arm_math.h"
32 
33   /**
34    * @brief Matrix-multiplication function for convolution with reordered columns
35    * @param[in]       pA          pointer to operand A
36    * @param[in]       pInBuffer   pointer to operand B, always conssists of 2 vectors
37    * @param[in]       ch_im_out   numRow of A
38    * @param[in]       numCol_A    numCol of A
39    * @param[in]       bias_shift  amount of left-shift for bias
40    * @param[in]       out_shift   amount of right-shift for output
41    * @param[in]       bias        the bias
42    * @param[in,out]   pOut        pointer to output
43    * @return     The function returns the incremented output pointer
44    *
45    * @details
46    *
47    * This function assumes that data in pInBuffer are reordered
48    */
49 
arm_nn_mat_mult_kernel_q7_q15_reordered(const q7_t * pA,const q15_t * pInBuffer,const uint16_t ch_im_out,const uint16_t numCol_A,const uint16_t bias_shift,const uint16_t out_shift,const q7_t * bias,q7_t * pOut)50 q7_t     *arm_nn_mat_mult_kernel_q7_q15_reordered(const q7_t * pA,
51                                                   const q15_t * pInBuffer,
52                                                   const uint16_t ch_im_out,
53                                                   const uint16_t numCol_A,
54                                                   const uint16_t bias_shift,
55                                                   const uint16_t out_shift,
56                                                   const q7_t * bias,
57                                                   q7_t * pOut)
58 {
59 
60 #if defined (ARM_MATH_DSP)
61     /* set up the second output pointers */
62     q7_t     *pOut2 = pOut + ch_im_out;
63     int       i;
64 
65     /* this loop over rows in A */
66     for (i = 0; i < ch_im_out; i += 2)
67     {
68         /* setup pointers for B */
69         const q15_t *pB = pInBuffer;
70         const q15_t *pB2 = pB + numCol_A;
71 
72         /* align the second pointer for A */
73         const q7_t *pA2 = pA + numCol_A;
74 
75         /* init the sum with bias */
76         q31_t     sum =  ((q31_t)(bias[i]) << bias_shift) + NN_ROUND(out_shift);
77         q31_t     sum2 = ((q31_t)(bias[i]) << bias_shift) + NN_ROUND(out_shift);
78         q31_t     sum3 = ((q31_t)(bias[i + 1]) << bias_shift) + NN_ROUND(out_shift);
79         q31_t     sum4 = ((q31_t)(bias[i + 1]) << bias_shift) + NN_ROUND(out_shift);
80 
81         uint16_t  colCnt = numCol_A >> 2;
82         /* accumulate over the vector */
83         while (colCnt)
84         {
85             q31_t     inA11, inA12, inA21, inA22;
86             q31_t     inB1 = *__SIMD32(pB)++;
87             q31_t     inB2 = *__SIMD32(pB2)++;
88 
89             pA = (q7_t *) read_and_pad_reordered((void *)pA, &inA11, &inA12);
90             pA2 = (q7_t *) read_and_pad_reordered((void *)pA2, &inA21, &inA22);
91 
92             sum = __SMLAD(inA11, inB1, sum);
93             sum2 = __SMLAD(inA11, inB2, sum2);
94             sum3 = __SMLAD(inA21, inB1, sum3);
95             sum4 = __SMLAD(inA21, inB2, sum4);
96 
97             inB1 = *__SIMD32(pB)++;
98             inB2 = *__SIMD32(pB2)++;
99 
100             sum = __SMLAD(inA12, inB1, sum);
101             sum2 = __SMLAD(inA12, inB2, sum2);
102             sum3 = __SMLAD(inA22, inB1, sum3);
103             sum4 = __SMLAD(inA22, inB2, sum4);
104 
105             colCnt--;
106         }                       /* while over colCnt */
107         colCnt = numCol_A & 0x3;
108         while (colCnt)
109         {
110             q7_t      inA1 = *pA++;
111             q15_t     inB1 = *pB++;
112             q7_t      inA2 = *pA2++;
113             q15_t     inB2 = *pB2++;
114 
115             sum += inA1 * inB1;
116             sum2 += inA1 * inB2;
117             sum3 += inA2 * inB1;
118             sum4 += inA2 * inB2;
119             colCnt--;
120         }                       /* while over colCnt */
121         *pOut++ = (q7_t) __SSAT((sum >> out_shift), 8);
122         *pOut++ = (q7_t) __SSAT((sum3 >> out_shift), 8);
123         *pOut2++ = (q7_t) __SSAT((sum2 >> out_shift), 8);
124         *pOut2++ = (q7_t) __SSAT((sum4 >> out_shift), 8);
125 
126         /* skip the row computed with A2 */
127         pA += numCol_A;
128     }                           /* for over ch_im_out */
129 
130     pOut += ch_im_out;
131 
132     /* return the new output pointer with offset */
133     return pOut;
134 #else
135     /* To be completed */
136     return NULL;
137 #endif                          /* ARM_MATH_DSP */
138 }
139