Lines Matching full:dr

30   int16_t* dr = data + samples * 2;  in xnn_cs16_fftr_ukernel__scalar_x3()  local
40 dr[0] = vdcr - vdci; in xnn_cs16_fftr_ukernel__scalar_x3()
41 dr[1] = 0; in xnn_cs16_fftr_ukernel__scalar_x3()
46 dr -= 3 * 2; in xnn_cs16_fftr_ukernel__scalar_x3()
53 int32_t virr0 = (int32_t) dr[4]; in xnn_cs16_fftr_ukernel__scalar_x3()
54 int32_t viri0 = -(int32_t) dr[5]; in xnn_cs16_fftr_ukernel__scalar_x3()
55 int32_t virr1 = (int32_t) dr[2]; in xnn_cs16_fftr_ukernel__scalar_x3()
56 int32_t viri1 = -(int32_t) dr[3]; in xnn_cs16_fftr_ukernel__scalar_x3()
57 int32_t virr2 = (int32_t) dr[0]; in xnn_cs16_fftr_ukernel__scalar_x3()
58 int32_t viri2 = -(int32_t) dr[1]; in xnn_cs16_fftr_ukernel__scalar_x3()
105 dr[4] = math_asr_s32(vacc1r0 - twr0, 1); in xnn_cs16_fftr_ukernel__scalar_x3()
106 dr[5] = math_asr_s32(twi0 - vacc1i0, 1); in xnn_cs16_fftr_ukernel__scalar_x3()
107 dr[2] = math_asr_s32(vacc1r1 - twr1, 1); in xnn_cs16_fftr_ukernel__scalar_x3()
108 dr[3] = math_asr_s32(twi1 - vacc1i1, 1); in xnn_cs16_fftr_ukernel__scalar_x3()
109 dr[0] = math_asr_s32(vacc1r2 - twr2, 1); in xnn_cs16_fftr_ukernel__scalar_x3()
110 dr[1] = math_asr_s32(twi2 - vacc1i2, 1); in xnn_cs16_fftr_ukernel__scalar_x3()
116 dr -= 2; in xnn_cs16_fftr_ukernel__scalar_x3()
119 int32_t virr = (int32_t) dr[0]; in xnn_cs16_fftr_ukernel__scalar_x3()
120 int32_t viri = -(int32_t) dr[1]; in xnn_cs16_fftr_ukernel__scalar_x3()
139 dr[0] = math_asr_s32(vacc1r - twr, 1); in xnn_cs16_fftr_ukernel__scalar_x3()
140 dr[1] = math_asr_s32(twi - vacc1i, 1); in xnn_cs16_fftr_ukernel__scalar_x3()