xref: /aosp_15_r20/external/tremolo/Tremolo/mdctLARM.s (revision bda690e46497e1f65c5077173b9c548e6e0cd5a1)
1@ Tremolo library
2@-----------------------------------------------------------------------
3@ Copyright (C) 2002-2009, Xiph.org Foundation
4@ Copyright (C) 2010, Robin Watts for Pinknoise Productions Ltd
5@ All rights reserved.
6
7@ Redistribution and use in source and binary forms, with or without
8@ modification, are permitted provided that the following conditions
9@ are met:
10
11@     * Redistributions of source code must retain the above copyright
12@ notice, this list of conditions and the following disclaimer.
13@     * Redistributions in binary form must reproduce the above
14@ copyright notice, this list of conditions and the following disclaimer
15@ in the documentation and/or other materials provided with the
16@ distribution.
17@     * Neither the names of the Xiph.org Foundation nor Pinknoise
18@ Productions Ltd nor the names of its contributors may be used to
19@ endorse or promote products derived from this software without
20@ specific prior written permission.
21@
22@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25@ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26@ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27@ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28@ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33@ ----------------------------------------------------------------------
34
35    .text
36
37	@ low accuracy version
38
39	.global mdct_backwardARM
40	.global mdct_shift_right
41	.global mdct_unroll_prelap
42	.global mdct_unroll_part2
43	.global mdct_unroll_part3
44	.global mdct_unroll_postlap
45
46	.type mdct_backwardARM, %function
47	.type mdct_shift_right, %function
48	.type mdct_unroll_prelap, %function
49	.type mdct_unroll_part2, %function
50	.type mdct_unroll_part3, %function
51	.type mdct_unroll_postlap, %function
52
53	.extern	sincos_lookup0
54	.extern	sincos_lookup1
55
56mdct_unroll_prelap:
57	@ r0 = out
58	@ r1 = post
59	@ r2 = r
60	@ r3 = step
61	STMFD	r13!,{r4-r7,r14}
62	MVN	r4, #0x8000
63	MOV	r3, r3, LSL #1
64	SUB	r1, r2, r1		@ r1 = r - post
65	SUBS	r1, r1, #16		@ r1 = r - post - 16
66	BLT	unroll_over
67unroll_loop:
68	LDMDB	r2!,{r5,r6,r7,r12}
69
70	MOV	r5, r5, ASR #9		@ r5 = (*--r)>>9
71	MOV	r6, r6, ASR #9		@ r6 = (*--r)>>9
72	MOV	r7, r7, ASR #9		@ r7 = (*--r)>>9
73	MOV	r12,r12,ASR #9		@ r12= (*--r)>>9
74
75	MOV	r14,r12,ASR #15
76	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
77	EORNE	r12,r4, r14,ASR #31
78	STRH	r12,[r0], r3
79
80	MOV	r14,r7, ASR #15
81	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
82	EORNE	r7, r4, r14,ASR #31
83	STRH	r7, [r0], r3
84
85	MOV	r14,r6, ASR #15
86	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
87	EORNE	r6, r4, r14,ASR #31
88	STRH	r6, [r0], r3
89
90	MOV	r14,r5, ASR #15
91	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
92	EORNE	r5, r4, r14,ASR #31
93	STRH	r5, [r0], r3
94
95	SUBS	r1, r1, #16
96	BGE	unroll_loop
97
98unroll_over:
99	ADDS	r1, r1, #16
100	BLE	unroll_end
101unroll_loop2:
102	LDR	r5,[r2,#-4]!
103	@ stall
104	@ stall (Xscale)
105	MOV	r5, r5, ASR #9		@ r5 = (*--r)>>9
106	MOV	r14,r5, ASR #15
107	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
108	EORNE	r5, r4, r14,ASR #31
109	STRH	r5, [r0], r3
110	SUBS	r1, r1, #4
111	BGT	unroll_loop2
112unroll_end:
113	LDMFD	r13!,{r4-r7,PC}
114
115mdct_unroll_postlap:
116	@ r0 = out
117	@ r1 = post
118	@ r2 = l
119	@ r3 = step
120	STMFD	r13!,{r4-r7,r14}
121	MVN	r4, #0x8000
122	MOV	r3, r3, LSL #1
123	SUB	r1, r1, r2		@ r1 = post - l
124	MOV	r1, r1, ASR #1		@ r1 = (post - l)>>1
125	SUBS	r1, r1, #16		@ r1 = ((post - l)>>1) - 4
126	BLT	unroll_over3
127unroll_loop3:
128	LDR	r12,[r2],#8
129	LDR	r7, [r2],#8
130	LDR	r6, [r2],#8
131	LDR	r5, [r2],#8
132
133	RSB	r12,r12,#0
134	RSB	r5, r5, #0
135	RSB	r6, r6, #0
136	RSB	r7, r7, #0
137
138	MOV	r12, r12,ASR #9		@ r12= (-*l)>>9
139	MOV	r5,  r5, ASR #9		@ r5 = (-*l)>>9
140	MOV	r6,  r6, ASR #9		@ r6 = (-*l)>>9
141	MOV	r7,  r7, ASR #9		@ r7 = (-*l)>>9
142
143	MOV	r14,r12,ASR #15
144	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
145	EORNE	r12,r4, r14,ASR #31
146	STRH	r12,[r0], r3
147
148	MOV	r14,r7, ASR #15
149	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
150	EORNE	r7, r4, r14,ASR #31
151	STRH	r7, [r0], r3
152
153	MOV	r14,r6, ASR #15
154	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
155	EORNE	r6, r4, r14,ASR #31
156	STRH	r6, [r0], r3
157
158	MOV	r14,r5, ASR #15
159	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
160	EORNE	r5, r4, r14,ASR #31
161	STRH	r5, [r0], r3
162
163	SUBS	r1, r1, #16
164	BGE	unroll_loop3
165
166unroll_over3:
167	ADDS	r1, r1, #16
168	BLE	unroll_over4
169unroll_loop4:
170	LDR	r5,[r2], #8
171	@ stall
172	@ stall (Xscale)
173	RSB	r5, r5, #0
174	MOV	r5, r5, ASR #9		@ r5 = (-*l)>>9
175	MOV	r14,r5, ASR #15
176	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
177	EORNE	r5, r4, r14,ASR #31
178	STRH	r5, [r0], r3
179	SUBS	r1, r1, #4
180	BGT	unroll_loop4
181unroll_over4:
182	LDMFD	r13!,{r4-r7,PC}
183
184mdct_unroll_part2:
185	@ r0 = out
186	@ r1 = post
187	@ r2 = l
188	@ r3 = r
189	@ <> = step
190	@ <> = wL
191	@ <> = wR
192	MOV	r12,r13
193	STMFD	r13!,{r4,r6-r11,r14}
194	LDMFD	r12,{r8,r9,r10}		@ r8 = step
195					@ r9 = wL
196					@ r10= wR
197	MVN	r4, #0x8000
198	MOV	r8, r8, LSL #1
199	SUBS	r1, r3, r1		@ r1 = (r - post)
200	BLE	unroll_over5
201unroll_loop5:
202	LDR	r12,[r2, #-8]!		@ r12= *l       (but l -= 2 first)
203	LDR	r7, [r3, #-4]!		@ r7 = *--r
204	LDRB	r6, [r10,#-1]!		@ r6 = *--wR
205	LDRB	r11,[r9],#1		@ r11= *wL++
206
207	MOV	r12, r12, ASR #8
208	@ Can save a cycle here, at the cost of 1bit errors in rounding
209	MUL	r11,r12,r11		@ r11  = *l   * *wL++
210	MOV	r7, r7, ASR #8
211	MLA	r6, r7, r6, r11		@ r6   = *--r * *--wR
212	MOV	r6, r6, ASR #9
213	MOV	r14,r6, ASR #15
214	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
215	EORNE	r6, r4, r14,ASR #31
216	STRH	r6, [r0], r8
217
218	SUBS	r1, r1, #4
219	BGT	unroll_loop5
220
221unroll_over5:
222	LDMFD	r13!,{r4,r6-r11,PC}
223
224mdct_unroll_part3:
225	@ r0 = out
226	@ r1 = post
227	@ r2 = l
228	@ r3 = r
229	@ <> = step
230	@ <> = wL
231	@ <> = wR
232	MOV	r12,r13
233	STMFD	r13!,{r4,r6-r11,r14}
234	LDMFD	r12,{r8,r9,r10}		@ r8 = step
235					@ r9 = wL
236					@ r10= wR
237	MVN	r4, #0x8000
238	MOV	r8, r8, LSL #1
239	SUBS	r1, r1, r3		@ r1 = (post - r)
240	BLE	unroll_over6
241unroll_loop6:
242	LDR	r12,[r2],#8		@ r12= *l       (but l += 2 first)
243	LDR	r7, [r3],#4		@ r7 = *r++
244	LDRB	r11,[r9],#1		@ r11= *wL++
245	LDRB	r6, [r10,#-1]!		@ r6 = *--wR
246
247	@ Can save a cycle here, at the cost of 1bit errors in rounding
248	MOV	r12,r12,ASR #8
249	MUL	r11,r12,r11		@ (r14,r11)  = *l   * *wL++
250	MOV	r7, r7, ASR #8
251	MUL	r6, r7, r6		@ (r14,r6)   = *--r * *--wR
252	SUB	r6, r6, r11
253	MOV	r6, r6, ASR #9
254	MOV	r14,r6, ASR #15
255	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
256	EORNE	r6, r4, r14,ASR #31
257	STRH	r6, [r0], r8
258
259	SUBS	r1, r1, #4
260	BGT	unroll_loop6
261
262unroll_over6:
263	LDMFD	r13!,{r4,r6-r11,PC}
264
265mdct_shift_right:
266	@ r0 = n
267	@ r1 = in
268	@ r2 = right
269	STMFD	r13!,{r4-r11,r14}
270
271	MOV	r0, r0, LSR #2		@ n >>= 2
272	ADD	r1, r1, #4
273
274	SUBS	r0, r0,	#8
275	BLT	sr_less_than_8
276sr_loop:
277	LDR	r3, [r1], #8
278	LDR	r4, [r1], #8
279	LDR	r5, [r1], #8
280	LDR	r6, [r1], #8
281	LDR	r7, [r1], #8
282	LDR	r8, [r1], #8
283	LDR	r12,[r1], #8
284	LDR	r14,[r1], #8
285	SUBS	r0, r0, #8
286	STMIA	r2!,{r3,r4,r5,r6,r7,r8,r12,r14}
287	BGE	sr_loop
288sr_less_than_8:
289	ADDS	r0, r0, #8
290	BEQ	sr_end
291sr_loop2:
292	LDR	r3, [r1], #8
293	SUBS	r0, r0, #1
294	STR	r3, [r2], #4
295	BGT	sr_loop2
296sr_end:
297	LDMFD	r13!,{r4-r11,PC}
298
299mdct_backwardARM:
300	@ r0 = n
301	@ r1 = in
302	STMFD	r13!,{r4-r11,r14}
303
304	MOV	r2, #1<<4	@ r2 = 1<<shift
305	MOV	r3, #13-4	@ r3 = 13-shift
306find_shift_loop:
307	TST	r0, r2		@ if (n & (1<<shift)) == 0
308	MOV	r2, r2, LSL #1
309	SUBEQ	r3, r3, #1	@ shift--
310	BEQ	find_shift_loop
311	MOV	r2, #2
312	MOV	r2, r2, LSL r3	@ r2 = step = 2<<shift
313
314	@ presymmetry
315	@ r0 = n (a multiple of 4)
316	@ r1 = in
317	@ r2 = step
318	@ r3 = shift
319
320	ADD	r4, r1, r0, LSL #1	@ r4 = aX = in+(n>>1)
321	ADD	r14,r1, r0		@ r14= in+(n>>2)
322	SUB	r4, r4, #3*4		@ r4 = aX = in+n2-3
323	LDR	r5, =sincos_lookup0	@ r5 = T=sincos_lookup0
324
325presymmetry_loop1:
326	LDR	r7, [r4,#8]		@ r6 = s2 = aX[2]
327	LDRB	r11,[r5,#1]		@ r11= T[1]
328	LDR	r6, [r4],#-16		@ r6 = s0 = aX[0]
329	LDRB	r10,[r5],r2		@ r10= T[0]   T += step
330	MOV	r6, r6, ASR #8
331	MOV	r7, r7, ASR #8
332
333	@ XPROD31(s0, s2, T[0], T[1], 0xaX[0], &ax[2])
334	MUL	r9, r6, r10		@ r9   = s0*T[0]
335	RSB	r6, r6, #0
336	MLA	r9, r7, r11,r9		@ r9  += s2*T[1]
337	CMP	r4, r14
338	MUL	r12,r7, r10		@ r12  = s2*T[0]
339	STR	r9, [r4,#16]		@ aX[0] = r9
340	MLA	r12,r6, r11,r12		@ r12 -= s0*T[1]
341	STR	r12,[r4,#8+16]		@ aX[2] = r12
342
343	BGE	presymmetry_loop1	@ while (aX >= in+n4)
344
345presymmetry_loop2:
346	LDR	r6, [r4],#-16		@ r6 = s0 = aX[0]
347	LDRB	r10,[r5,#1]		@ r10= T[1]
348	LDR	r7, [r4,#16+8]		@ r6 = s2 = aX[2]
349	LDRB	r11,[r5],-r2		@ r11= T[0]   T -= step
350	MOV	r6, r6, ASR #8
351	MOV	r7, r7, ASR #8
352
353	@ XPROD31(s0, s2, T[1], T[0], 0xaX[0], &ax[2])
354	MUL	r9, r6, r10		@ r9   = s0*T[1]
355	RSB	r6, r6, #0
356	MLA	r9, r7, r11,r9		@ r9  += s2*T[0]
357	CMP	r4, r1
358	MUL	r12,r7, r10		@ r12  = s2*T[1]
359	STR	r9, [r4,#16]		@ aX[0] = r9
360	MLA	r12,r6, r11,r12		@ r12 -= s0*T[0]
361	STR	r12,[r4,#8+16]		@ aX[2] = r12
362
363	BGE	presymmetry_loop2	@ while (aX >= in)
364
365	@ r0 = n
366	@ r1 = in
367	@ r2 = step
368	@ r3 = shift
369	STMFD	r13!,{r3}
370	LDR	r5, =sincos_lookup0	@ r5 = T=sincos_lookup0
371	ADD	r4, r1, r0, LSL #1	@ r4 = aX = in+(n>>1)
372	SUB	r4, r4, #4*4		@ r4 = aX = in+(n>>1)-4
373	LDRB	r11,[r5,#1]		@ r11= T[1]
374	LDRB	r10,[r5],r2		@ r10= T[0]    T += step
375presymmetry_loop3:
376	LDR	r8, [r1],#16 		@ r8 = ro0 = bX[0]
377	LDR	r9, [r1,#8-16]		@ r9 = ro2 = bX[2]
378	LDR	r6, [r4],#-16		@ r6 = ri0 = aX[0]
379	LDR	r7, [r4,#8+16]		@ r7 = ri2 = aX[2]
380	MOV	r8, r8, ASR #8
381	MOV	r9, r9, ASR #8
382	MOV	r6, r6, ASR #8
383
384	@ XNPROD31( ro2, ro0, T[1], T[0], 0xaX[0], &aX[2] )
385	@ aX[0] = (ro2*T[1] - ro0*T[0])>>31 aX[2] = (ro0*T[1] + ro2*T[0])>>31
386	MUL	r12,r8, r11		@ r12  = ro0*T[1]
387	MOV	r7, r7, ASR #8
388	MLA	r12,r9, r10,r12		@ r12 += ro2*T[0]
389	RSB	r8, r8, #0		@ r8 = -ro0
390	MUL	r3, r9, r11		@ r3   = ro2*T[1]
391	LDRB	r11,[r5,#1]		@ r11= T[1]
392	MLA	r3, r8, r10,r3		@ r3  -= ro0*T[0]
393	LDRB	r10,[r5],r2		@ r10= T[0]    T += step
394	STR	r12,[r4,#16+8]
395	STR	r3, [r4,#16]
396
397	@ XNPROD31( ri2, ri0, T[0], T[1], 0xbX[0], &bX[2] )
398	@ bX[0] = (ri2*T[0] - ri0*T[1])>>31 bX[2] = (ri0*T[0] + ri2*T[1])>>31
399	MUL	r12,r6, r10		@ r12  = ri0*T[0]
400	RSB	r6, r6, #0		@ r6 = -ri0
401	MLA	r12,r7, r11,r12		@ r12 += ri2*T[1]
402	CMP	r4, r1
403	MUL	r3, r7, r10		@ r3   = ri2*T[0]
404	STR	r12,[r1,#8-16]
405	MLA	r3, r6, r11,r3		@ r3  -= ri0*T[1]
406	STR	r3, [r1,#-16]
407
408	BGE	presymmetry_loop3
409
410	SUB	r1,r1,r0		@ r1 = in -= n>>2 (i.e. restore in)
411
412	LDR	r3,[r13]
413	STR	r2,[r13,#-4]!
414
415	@ mdct_butterflies
416	@ r0 = n  = (points * 2)
417	@ r1 = in = x
418	@ r2 = i
419	@ r3 = shift
420	STMFD	r13!,{r0-r1}
421	RSBS	r4,r3,#6		@ r4 = stages = 7-shift then --stages
422	LDR	r5,=sincos_lookup0
423	BLE	no_generics
424	MOV	r14,#4			@ r14= 4               (i=0)
425	MOV	r6, r14,LSL r3		@ r6 = (4<<i)<<shift
426mdct_butterflies_loop1:
427	MOV	r0, r0, LSR #1		@ r0 = points>>i = POINTS
428	MOV	r2, r14,LSR #2		@ r2 = (1<<i)-j        (j=0)
429	STMFD	r13!,{r4,r14}
430mdct_butterflies_loop2:
431
432	@ mdct_butterfly_generic(x+POINTS*j, POINTS, 4<<(i+shift))
433	@ mdct_butterfly_generic(r1, r0, r6)
434	@ r0 = points
435	@ r1 = x
436	@ preserve r2 (external loop counter)
437	@ preserve r3
438	@ preserve r4 (external loop counter)
439	@ r5 = T = sincos_lookup0
440	@ r6 = step
441	@ preserve r14
442
443	STR	r2,[r13,#-4]!		@ stack r2
444	ADD	r1,r1,r0,LSL #1		@ r1 = x2+4 = x + (POINTS>>1)
445	ADD	r7,r1,r0,LSL #1		@ r7 = x1+4 = x + POINTS
446	ADD	r12,r5,#1024		@ r12= sincos_lookup0+1024
447
448mdct_bufferfly_generic_loop1:
449	LDMDB	r7!,{r2,r3,r8,r11}	@ r2 = x1[0]
450					@ r3 = x1[1]
451					@ r8 = x1[2]
452					@ r11= x1[3]    x1 -= 4
453	LDMDB	r1!,{r4,r9,r10,r14}	@ r4 = x2[0]
454					@ r9 = x2[1]
455					@ r10= x2[2]
456					@ r14= x2[3]    x2 -= 4
457
458	SUB	r2, r2, r3		@ r2 = s0 = x1[0] - x1[1]
459	ADD	r3, r2, r3, LSL #1	@ r3 =      x1[0] + x1[1] (-> x1[0])
460	SUB	r11,r11,r8		@ r11= s1 = x1[3] - x1[2]
461	ADD	r8, r11,r8, LSL #1	@ r8 =      x1[3] + x1[2] (-> x1[2])
462	SUB	r9, r9, r4		@ r9 = s2 = x2[1] - x2[0]
463	ADD	r4, r9, r4, LSL #1	@ r4 =      x2[1] + x2[0] (-> x1[1])
464	SUB	r14,r14,r10		@ r14= s3 = x2[3] - x2[2]
465	ADD	r10,r14,r10,LSL #1	@ r10=      x2[3] + x2[2] (-> x1[3])
466	STMIA	r7,{r3,r4,r8,r10}
467
468	@ r0 = points
469	@ r1 = x2
470	@ r2 = s0
471	@ r3 free
472	@ r4 free
473	@ r5 = T
474	@ r6 = step
475	@ r7 = x1
476	@ r8 free
477	@ r9 = s2
478	@ r10 free
479	@ r11= s1
480	@ r12= limit
481	@ r14= s3
482
483	LDRB	r8, [r5,#1]		@ r8 = T[1]
484	LDRB	r10,[r5],r6		@ r10= T[0]		T += step
485	MOV	r2, r2, ASR #8
486	MOV	r11,r11,ASR #8
487	MOV	r9, r9, ASR #8
488	MOV	r14,r14,ASR #8
489
490	@ XPROD31(s1, s0, T[0], T[1], &x2[0], &x2[2])
491	@ x2[0] = (s1*T[0] + s0*T[1])>>31     x2[2] = (s0*T[0] - s1*T[1])>>31
492	@ stall Xscale
493	MUL	r3, r2, r8		@ r3   = s0*T[1]
494	MLA	r3, r11,r10,r3		@ r3  += s1*T[0]
495	RSB	r11,r11,#0
496	MUL	r4, r8, r11		@ r4   = -s1*T[1]
497	MLA	r4, r2, r10,r4		@ r4  += s0*T[0] = Value for x2[2]
498	MOV	r2, r3			@ r2 = r3 = Value for x2[0]
499
500	@ XPROD31(s2, s3, T[0], T[1], &x2[1], &x2[3])
501	@ x2[1] = (s2*T[0] + s3*T[1])>>31     x2[3] = (s3*T[0] - s2*T[1])>>31
502	MUL	r3, r9, r10		@ r3   = s2*T[0]
503	MLA	r3, r14,r8, r3		@ r3  += s3*T[1] = Value for x2[1]
504	RSB	r9, r9, #0
505	MUL	r11,r14,r10		@ r11  = s3*T[0]
506	MLA	r11,r9, r8, r11		@ r11 -= s2*T[1] = Value for x2[3]
507	CMP	r5, r12
508
509	STMIA	r1,{r2,r3,r4,r11}
510
511	BLT	mdct_bufferfly_generic_loop1
512
513	SUB	r12,r12,#1024
514mdct_bufferfly_generic_loop2:
515	LDMDB	r7!,{r2,r3,r9,r10}	@ r2 = x1[0]
516					@ r3 = x1[1]
517					@ r9 = x1[2]
518					@ r10= x1[3]    x1 -= 4
519	LDMDB	r1!,{r4,r8,r11,r14}	@ r4 = x2[0]
520					@ r8 = x2[1]
521					@ r11= x2[2]
522					@ r14= x2[3]    x2 -= 4
523
524	SUB	r2, r2, r3		@ r2 = s0 = x1[0] - x1[1]
525	ADD	r3, r2, r3, LSL #1	@ r3 =      x1[0] + x1[1] (-> x1[0])
526	SUB	r9, r9,r10		@ r9 = s1 = x1[2] - x1[3]
527	ADD	r10,r9,r10, LSL #1	@ r10=      x1[2] + x1[3] (-> x1[2])
528	SUB	r4, r4, r8		@ r4 = s2 = x2[0] - x2[1]
529	ADD	r8, r4, r8, LSL #1	@ r8 =      x2[0] + x2[1] (-> x1[1])
530	SUB	r14,r14,r11		@ r14= s3 = x2[3] - x2[2]
531	ADD	r11,r14,r11,LSL #1	@ r11=      x2[3] + x2[2] (-> x1[3])
532	STMIA	r7,{r3,r8,r10,r11}
533
534	@ r0 = points
535	@ r1 = x2
536	@ r2 = s0
537	@ r3 free
538	@ r4 = s2
539	@ r5 = T
540	@ r6 = step
541	@ r7 = x1
542	@ r8 free
543	@ r9 = s1
544	@ r10 free
545	@ r11 free
546	@ r12= limit
547	@ r14= s3
548
549	LDRB	r8, [r5,#1]		@ r8 = T[1]
550	LDRB	r10,[r5],-r6		@ r10= T[0]		T -= step
551	MOV	r2, r2, ASR #8
552	MOV	r9, r9, ASR #8
553	MOV	r4, r4, ASR #8
554	MOV	r14,r14,ASR #8
555
556	@ XNPROD31(s0, s1, T[0], T[1], &x2[0], &x2[2])
557	@ x2[0] = (s0*T[0] - s1*T[1])>>31     x2[2] = (s1*T[0] + s0*T[1])>>31
558	@ stall Xscale
559	MUL	r11,r2, r8		@ r11  = s0*T[1]
560	MLA	r11,r9, r10,r11		@ r11 += s1*T[0]
561	RSB	r9, r9, #0
562	MUL	r2, r10,r2		@ r2   = s0*T[0]
563	MLA	r2, r9, r8, r2		@ r2  += -s1*T[1] = Value for x2[0]
564	MOV	r9, r11			@ r9 = r11 = Value for x2[2]
565
566	@ XNPROD31(s3, s2, T[0], T[1], &x2[1], &x2[3])
567	@ x2[1] = (s3*T[0] - s2*T[1])>>31     x2[3] = (s2*T[0] + s3*T[1])>>31
568	MUL	r11,r4, r10		@ r11   = s2*T[0]
569	MLA	r11,r14,r8, r11		@ r11  += s3*T[1] = Value for x2[3]
570	RSB	r4, r4, #0
571	MUL	r3, r14,r10		@ r3   = s3*T[0]
572	MLA	r3, r4, r8, r3		@ r3  -= s2*T[1] = Value for x2[1]
573	CMP	r5, r12
574
575	STMIA	r1,{r2,r3,r9,r11}
576
577	BGT	mdct_bufferfly_generic_loop2
578
579	LDR	r2,[r13],#4		@ unstack r2
580	ADD	r1, r1, r0, LSL #2	@ r1 = x+POINTS*j
581	@ stall Xscale
582	SUBS	r2, r2, #1		@ r2--                 (j++)
583	BGT	mdct_butterflies_loop2
584
585	LDMFD	r13!,{r4,r14}
586
587	LDR	r1,[r13,#4]
588
589	SUBS	r4, r4, #1		@ stages--
590	MOV	r14,r14,LSL #1		@ r14= 4<<i            (i++)
591	MOV	r6, r6, LSL #1		@ r6 = step <<= 1      (i++)
592	BGE	mdct_butterflies_loop1
593	LDMFD	r13,{r0-r1}
594
595no_generics:
596	@ mdct_butterflies part2 (loop around mdct_bufferfly_32)
597	@ r0 = points
598	@ r1 = in
599	@ r2 = step
600	@ r3 = shift
601
602mdct_bufferflies_loop3:
603	@ mdct_bufferfly_32
604
605	@ block1
606	ADD	r4, r1, #16*4		@ r4 = &in[16]
607	LDMIA	r4,{r5,r6,r9,r10}	@ r5 = x[16]
608					@ r6 = x[17]
609					@ r9 = x[18]
610					@ r10= x[19]
611	LDMIA	r1,{r7,r8,r11,r12}	@ r7 = x[0]
612					@ r8 = x[1]
613					@ r11= x[2]
614					@ r12= x[3]
615	SUB	r5, r5, r6		@ r5 = s0 = x[16] - x[17]
616	ADD	r6, r5, r6, LSL #1	@ r6 =      x[16] + x[17]  -> x[16]
617	SUB	r9, r9, r10		@ r9 = s1 = x[18] - x[19]
618	ADD	r10,r9, r10,LSL #1	@ r10=      x[18] + x[19]  -> x[18]
619	SUB	r8, r8, r7		@ r8 = s2 = x[ 1] - x[ 0]
620	ADD	r7, r8, r7, LSL #1	@ r7 =      x[ 1] + x[ 0]  -> x[17]
621	SUB	r12,r12,r11		@ r12= s3 = x[ 3] - x[ 2]
622	ADD	r11,r12,r11, LSL #1	@ r11=      x[ 3] + x[ 2]  -> x[19]
623	STMIA	r4!,{r6,r7,r10,r11}
624
625	MOV	r6,#0xed		@ r6 =cPI1_8
626	MOV	r7,#0x62		@ r7 =cPI3_8
627
628	MOV	r5, r5, ASR #8
629	MOV	r9, r9, ASR #8
630	MOV	r8, r8, ASR #8
631	MOV	r12,r12,ASR #8
632
633	@ XNPROD31( s0, s1, cPI3_8, cPI1_8, &x[ 0], &x[ 2] )
634	@ x[0] = s0*cPI3_8 - s1*cPI1_8     x[2] = s1*cPI3_8 + s0*cPI1_8
635	@ stall Xscale
636	MUL	r11,r5, r6		@ r11  = s0*cPI1_8
637	MLA	r11,r9, r7, r11		@ r11 += s1*cPI3_8
638	RSB	r9, r9, #0
639	MUL	r5, r7, r5		@ r5   = s0*cPI3_8
640	MLA	r5, r9, r6, r5		@ r5  -= s1*cPI1_8
641
642	@ XPROD31 ( s2, s3, cPI1_8, cPI3_8, &x[ 1], &x[ 3] )
643	@ x[1] = s2*cPI1_8 + s3*cPI3_8     x[3] = s3*cPI1_8 - s2*cPI3_8
644	MUL	r9, r8, r6		@ r9   = s2*cPI1_8
645	MLA	r9, r12,r7, r9		@ r9  += s3*cPI3_8
646	RSB	r8,r8,#0
647	MUL	r12,r6, r12		@ r12  = s3*cPI1_8
648	MLA	r12,r8, r7, r12		@ r12 -= s2*cPI3_8
649	STMIA	r1!,{r5,r9,r11,r12}
650
651	@ block2
652	LDMIA	r4,{r5,r6,r9,r10}	@ r5 = x[20]
653					@ r6 = x[21]
654					@ r9 = x[22]
655					@ r10= x[23]
656	LDMIA	r1,{r7,r8,r11,r12}	@ r7 = x[4]
657					@ r8 = x[5]
658					@ r11= x[6]
659					@ r12= x[7]
660	SUB	r5, r5, r6		@ r5 = s0 = x[20] - x[21]
661	ADD	r6, r5, r6, LSL #1	@ r6 =      x[20] + x[21]  -> x[20]
662	SUB	r9, r9, r10		@ r9 = s1 = x[22] - x[23]
663	ADD	r10,r9, r10,LSL #1	@ r10=      x[22] + x[23]  -> x[22]
664	SUB	r8, r8, r7		@ r8 = s2 = x[ 5] - x[ 4]
665	ADD	r7, r8, r7, LSL #1	@ r7 =      x[ 5] + x[ 4]  -> x[21]
666	SUB	r12,r12,r11		@ r12= s3 = x[ 7] - x[ 6]
667	ADD	r11,r12,r11, LSL #1	@ r11=      x[ 7] + x[ 6]  -> x[23]
668	MOV	r14,#0xb5		@ cPI2_8
669	STMIA	r4!,{r6,r7,r10,r11}
670
671	SUB	r5, r5, r9		@ r5 = s0 - s1
672	ADD	r9, r5, r9, LSL #1	@ r9 = s0 + s1
673	MOV	r5, r5, ASR #8
674	MUL	r5, r14,r5		@ r5 = (s0-s1)*cPI2_8
675	SUB	r12,r12,r8		@ r12= s3 - s2
676	ADD	r8, r12,r8, LSL #1	@ r8 = s3 + s2
677
678	MOV	r8, r8, ASR #8
679	MUL	r8, r14,r8		@ r8  = (s3+s2)*cPI2_8
680	MOV	r9, r9, ASR #8
681	MUL	r9, r14,r9		@ r9  = (s0+s1)*cPI2_8
682	MOV	r12,r12,ASR #8
683	MUL	r12,r14,r12		@ r12 = (s3-s2)*cPI2_8
684	STMIA	r1!,{r5,r8,r9,r12}
685
686	@ block3
687	LDMIA	r4,{r5,r6,r9,r10}	@ r5 = x[24]
688					@ r6 = x[25]
689					@ r9 = x[25]
690					@ r10= x[26]
691	LDMIA	r1,{r7,r8,r11,r12}	@ r7 = x[8]
692					@ r8 = x[9]
693					@ r11= x[10]
694					@ r12= x[11]
695	SUB	r5, r5, r6		@ r5 = s0 = x[24] - x[25]
696	ADD	r6, r5, r6, LSL #1	@ r6 =      x[24] + x[25]  -> x[25]
697	SUB	r9, r9, r10		@ r9 = s1 = x[26] - x[27]
698	ADD	r10,r9, r10,LSL #1	@ r10=      x[26] + x[27]  -> x[26]
699	SUB	r8, r8, r7		@ r8 = s2 = x[ 9] - x[ 8]
700	ADD	r7, r8, r7, LSL #1	@ r7 =      x[ 9] + x[ 8]  -> x[25]
701	SUB	r12,r12,r11		@ r12= s3 = x[11] - x[10]
702	ADD	r11,r12,r11, LSL #1	@ r11=      x[11] + x[10]  -> x[27]
703	STMIA	r4!,{r6,r7,r10,r11}
704
705	MOV	r6,#0x62		@ r6 = cPI3_8
706	MOV	r7,#0xED		@ r7 = cPI1_8
707
708	@ XNPROD31( s0, s1, cPI1_8, cPI3_8, &x[ 8], &x[10] )
709	@ x[8] = s0*cPI1_8 - s1*cPI3_8     x[10] = s1*cPI1_8 + s0*cPI3_8
710	@ stall Xscale
711	MOV	r5, r5, ASR #8
712	MUL	r11,r5, r6		@ r11  = s0*cPI3_8
713	MOV	r9, r9, ASR #8
714	MLA	r11,r9, r7, r11		@ r11 += s1*cPI1_8
715	RSB	r9, r9, #0
716	MUL	r5, r7, r5		@ r5   = s0*cPI1_8
717	MLA	r5, r9, r6, r5		@ r5  -= s1*cPI3_8
718
719	@ XPROD31 ( s2, s3, cPI3_8, cPI1_8, &x[ 9], &x[11] )
720	@ x[9] = s2*cPI3_8 + s3*cPI1_8     x[11] = s3*cPI3_8 - s2*cPI1_8
721	MOV	r8, r8, ASR #8
722	MUL	r9, r8, r6		@ r9   = s2*cPI3_8
723	MOV	r12,r12,ASR #8
724	MLA	r9, r12,r7, r9		@ r9  += s3*cPI1_8
725	RSB	r8,r8,#0
726	MUL	r12,r6, r12		@ r12  = s3*cPI3_8
727	MLA	r12,r8, r7, r12		@ r12 -= s2*cPI1_8
728	STMIA	r1!,{r5,r9,r11,r12}
729
730	@ block4
731	LDMIA	r4,{r5,r6,r10,r11}	@ r5 = x[28]
732					@ r6 = x[29]
733					@ r10= x[30]
734					@ r11= x[31]
735	LDMIA	r1,{r8,r9,r12,r14}	@ r8 = x[12]
736					@ r9 = x[13]
737					@ r12= x[14]
738					@ r14= x[15]
739	SUB	r5, r5, r6		@ r5 = s0 = x[28] - x[29]
740	ADD	r6, r5, r6, LSL #1	@ r6 =      x[28] + x[29]  -> x[28]
741	SUB	r7, r14,r12		@ r7 = s3 = x[15] - x[14]
742	ADD	r12,r7, r12, LSL #1	@ r12=      x[15] + x[14]  -> x[31]
743	SUB	r10,r10,r11		@ r10= s1 = x[30] - x[31]
744	ADD	r11,r10,r11,LSL #1	@ r11=      x[30] + x[31]  -> x[30]
745	SUB	r14, r8, r9		@ r14= s2 = x[12] - x[13]
746	ADD	r9, r14, r9, LSL #1	@ r9 =      x[12] + x[13]  -> x[29]
747	STMIA	r4!,{r6,r9,r11,r12}
748	STMIA	r1!,{r5,r7,r10,r14}
749
750	@ mdct_butterfly16 (1st version)
751	@ block 1
752	SUB	r1,r1,#16*4
753	ADD	r4,r1,#8*4
754	LDMIA	r4,{r5,r6,r9,r10}	@ r5 = x[ 8]
755					@ r6 = x[ 9]
756					@ r9 = x[10]
757					@ r10= x[11]
758	LDMIA	r1,{r7,r8,r11,r12}	@ r7 = x[0]
759					@ r8 = x[1]
760					@ r11= x[2]
761					@ r12= x[3]
762	SUB	r5, r5, r6		@ r5 = s0 = x[ 8] - x[ 9]
763	ADD	r6, r5, r6, LSL #1	@ r6 =      x[ 8] + x[ 9]  -> x[ 8]
764	SUB	r9, r9, r10		@ r9 = s1 = x[10] - x[11]
765	ADD	r10,r9, r10,LSL #1	@ r10=      x[10] + x[11]  -> x[10]
766	SUB	r8, r8, r7		@ r8 = s2 = x[ 1] - x[ 0]
767	ADD	r7, r8, r7, LSL #1	@ r7 =      x[ 1] + x[ 0]  -> x[ 9]
768	SUB	r12,r12,r11		@ r12= s3 = x[ 3] - x[ 2]
769	ADD	r11,r12,r11, LSL #1	@ r11=      x[ 3] + x[ 2]  -> x[11]
770	MOV	r14,#0xB5		@ r14= cPI2_8
771	STMIA	r4!,{r6,r7,r10,r11}
772
773	SUB	r5, r5, r9		@ r5 = s0 - s1
774	ADD	r9, r5, r9, LSL #1	@ r9 = s0 + s1
775	MOV	r5, r5, ASR #8
776	MUL	r5, r14,r5		@ r5  = (s0-s1)*cPI2_8
777	SUB	r12,r12,r8		@ r12= s3 - s2
778	ADD	r8, r12,r8, LSL #1	@ r8 = s3 + s2
779
780	MOV	r8, r8, ASR #8
781	MUL	r8, r14,r8		@ r8  = (s3+s2)*cPI2_8
782	MOV	r9, r9, ASR #8
783	MUL	r9, r14,r9		@ r9  = (s0+s1)*cPI2_8
784	MOV	r12,r12,ASR #8
785	MUL	r12,r14,r12		@ r12 = (s3-s2)*cPI2_8
786	STMIA	r1!,{r5,r8,r9,r12}
787
788	@ block2
789	LDMIA	r4,{r5,r6,r9,r10}	@ r5 = x[12]
790					@ r6 = x[13]
791					@ r9 = x[14]
792					@ r10= x[15]
793	LDMIA	r1,{r7,r8,r11,r12}	@ r7 = x[ 4]
794					@ r8 = x[ 5]
795					@ r11= x[ 6]
796					@ r12= x[ 7]
797	SUB	r14,r7, r8		@ r14= s0 = x[ 4] - x[ 5]
798	ADD	r8, r14,r8, LSL #1	@ r8 =      x[ 4] + x[ 5]  -> x[13]
799	SUB	r7, r12,r11		@ r7 = s1 = x[ 7] - x[ 6]
800	ADD	r11,r7, r11, LSL #1	@ r11=      x[ 7] + x[ 6]  -> x[15]
801	SUB	r5, r5, r6		@ r5 = s2 = x[12] - x[13]
802	ADD	r6, r5, r6, LSL #1	@ r6 =      x[12] + x[13]  -> x[12]
803	SUB	r12,r9, r10		@ r12= s3 = x[14] - x[15]
804	ADD	r10,r12,r10,LSL #1	@ r10=      x[14] + x[15]  -> x[14]
805	STMIA	r4!,{r6,r8,r10,r11}
806	STMIA	r1!,{r5,r7,r12,r14}
807
808	@ mdct_butterfly_8
809	LDMDB	r1,{r6,r7,r8,r9,r10,r11,r12,r14}
810					@ r6 = x[0]
811					@ r7 = x[1]
812					@ r8 = x[2]
813					@ r9 = x[3]
814					@ r10= x[4]
815					@ r11= x[5]
816					@ r12= x[6]
817					@ r14= x[7]
818	ADD	r6, r6, r7		@ r6 = s0 = x[0] + x[1]
819	SUB	r7, r6, r7, LSL #1	@ r7 = s1 = x[0] - x[1]
820	ADD	r8, r8, r9		@ r8 = s2 = x[2] + x[3]
821	SUB	r9, r8, r9, LSL #1	@ r9 = s3 = x[2] - x[3]
822	ADD	r10,r10,r11		@ r10= s4 = x[4] + x[5]
823	SUB	r11,r10,r11,LSL #1	@ r11= s5 = x[4] - x[5]
824	ADD	r12,r12,r14		@ r12= s6 = x[6] + x[7]
825	SUB	r14,r12,r14,LSL #1	@ r14= s7 = x[6] - x[7]
826
827	ADD	r2, r11,r9		@ r2 = x[0] = s5 + s3
828	SUB	r4, r2, r9, LSL #1	@ r4 = x[2] = s5 - s3
829	SUB	r3, r14,r7		@ r3 = x[1] = s7 - s1
830	ADD	r5, r3, r7, LSL #1	@ r5 = x[3] = s7 + s1
831	SUB	r10,r10,r6		@ r10= x[4] = s4 - s0
832	SUB	r11,r12,r8		@ r11= x[5] = s6 - s2
833	ADD	r12,r10,r6, LSL #1	@ r12= x[6] = s4 + s0
834	ADD	r14,r11,r8, LSL #1	@ r14= x[7] = s6 + s2
835	STMDB	r1,{r2,r3,r4,r5,r10,r11,r12,r14}
836
837	@ mdct_butterfly_8
838	LDMIA	r1,{r6,r7,r8,r9,r10,r11,r12,r14}
839					@ r6 = x[0]
840					@ r7 = x[1]
841					@ r8 = x[2]
842					@ r9 = x[3]
843					@ r10= x[4]
844					@ r11= x[5]
845					@ r12= x[6]
846					@ r14= x[7]
847	ADD	r6, r6, r7		@ r6 = s0 = x[0] + x[1]
848	SUB	r7, r6, r7, LSL #1	@ r7 = s1 = x[0] - x[1]
849	ADD	r8, r8, r9		@ r8 = s2 = x[2] + x[3]
850	SUB	r9, r8, r9, LSL #1	@ r9 = s3 = x[2] - x[3]
851	ADD	r10,r10,r11		@ r10= s4 = x[4] + x[5]
852	SUB	r11,r10,r11,LSL #1	@ r11= s5 = x[4] - x[5]
853	ADD	r12,r12,r14		@ r12= s6 = x[6] + x[7]
854	SUB	r14,r12,r14,LSL #1	@ r14= s7 = x[6] - x[7]
855
856	ADD	r2, r11,r9		@ r2 = x[0] = s5 + s3
857	SUB	r4, r2, r9, LSL #1	@ r4 = x[2] = s5 - s3
858	SUB	r3, r14,r7		@ r3 = x[1] = s7 - s1
859	ADD	r5, r3, r7, LSL #1	@ r5 = x[3] = s7 + s1
860	SUB	r10,r10,r6		@ r10= x[4] = s4 - s0
861	SUB	r11,r12,r8		@ r11= x[5] = s6 - s2
862	ADD	r12,r10,r6, LSL #1	@ r12= x[6] = s4 + s0
863	ADD	r14,r11,r8, LSL #1	@ r14= x[7] = s6 + s2
864	STMIA	r1,{r2,r3,r4,r5,r10,r11,r12,r14}
865
866	@ mdct_butterfly16 (2nd version)
867	@ block 1
868	ADD	r1,r1,#16*4-8*4
869	ADD	r4,r1,#8*4
870	LDMIA	r4,{r5,r6,r9,r10}	@ r5 = x[ 8]
871					@ r6 = x[ 9]
872					@ r9 = x[10]
873					@ r10= x[11]
874	LDMIA	r1,{r7,r8,r11,r12}	@ r7 = x[0]
875					@ r8 = x[1]
876					@ r11= x[2]
877					@ r12= x[3]
878	SUB	r5, r5, r6		@ r5 = s0 = x[ 8] - x[ 9]
879	ADD	r6, r5, r6, LSL #1	@ r6 =      x[ 8] + x[ 9]  -> x[ 8]
880	SUB	r9, r9, r10		@ r9 = s1 = x[10] - x[11]
881	ADD	r10,r9, r10,LSL #1	@ r10=      x[10] + x[11]  -> x[10]
882	SUB	r8, r8, r7		@ r8 = s2 = x[ 1] - x[ 0]
883	ADD	r7, r8, r7, LSL #1	@ r7 =      x[ 1] + x[ 0]  -> x[ 9]
884	SUB	r12,r12,r11		@ r12= s3 = x[ 3] - x[ 2]
885	ADD	r11,r12,r11, LSL #1	@ r11=      x[ 3] + x[ 2]  -> x[11]
886	MOV	r14,#0xb5		@ r14= cPI2_8
887	STMIA	r4!,{r6,r7,r10,r11}
888
889	SUB	r5, r5, r9		@ r5 = s0 - s1
890	ADD	r9, r5, r9, LSL #1	@ r9 = s0 + s1
891	MOV	r5, r5, ASR #8
892	MUL	r5, r14,r5		@ r5  = (s0-s1)*cPI2_8
893	SUB	r12,r12,r8		@ r12= s3 - s2
894	ADD	r8, r12,r8, LSL #1	@ r8 = s3 + s2
895
896	MOV	r8, r8, ASR #8
897	MUL	r8, r14,r8		@ r8  = (s3+s2)*cPI2_8
898	MOV	r9, r9, ASR #8
899	MUL	r9, r14,r9		@ r9  = (s0+s1)*cPI2_8
900	MOV	r12,r12,ASR #8
901	MUL	r12,r14,r12		@ r12 = (s3-s2)*cPI2_8
902	STMIA	r1!,{r5,r8,r9,r12}
903
904	@ block2
905	LDMIA	r4,{r5,r6,r9,r10}	@ r5 = x[12]
906					@ r6 = x[13]
907					@ r9 = x[14]
908					@ r10= x[15]
909	LDMIA	r1,{r7,r8,r11,r12}	@ r7 = x[ 4]
910					@ r8 = x[ 5]
911					@ r11= x[ 6]
912					@ r12= x[ 7]
913	SUB	r5, r5, r6		@ r5 = s2 = x[12] - x[13]
914	ADD	r6, r5, r6, LSL #1	@ r6 =      x[12] + x[13]  -> x[12]
915	SUB	r9, r9, r10		@ r9 = s3 = x[14] - x[15]
916	ADD	r10,r9, r10,LSL #1	@ r10=      x[14] + x[15]  -> x[14]
917	SUB	r14,r7, r8		@ r14= s0 = x[ 4] - x[ 5]
918	ADD	r8, r14,r8, LSL #1	@ r8 =      x[ 4] + x[ 5]  -> x[13]
919	SUB	r7, r12,r11		@ r7 = s1 = x[ 7] - x[ 6]
920	ADD	r11,r7, r11, LSL #1	@ r11=      x[ 7] + x[ 6]  -> x[15]
921	STMIA	r4!,{r6,r8,r10,r11}
922	STMIA	r1!,{r5,r7,r9,r14}
923
924	@ mdct_butterfly_8
925	LDMDB	r1,{r6,r7,r8,r9,r10,r11,r12,r14}
926					@ r6 = x[0]
927					@ r7 = x[1]
928					@ r8 = x[2]
929					@ r9 = x[3]
930					@ r10= x[4]
931					@ r11= x[5]
932					@ r12= x[6]
933					@ r14= x[7]
934	ADD	r6, r6, r7		@ r6 = s0 = x[0] + x[1]
935	SUB	r7, r6, r7, LSL #1	@ r7 = s1 = x[0] - x[1]
936	ADD	r8, r8, r9		@ r8 = s2 = x[2] + x[3]
937	SUB	r9, r8, r9, LSL #1	@ r9 = s3 = x[2] - x[3]
938	ADD	r10,r10,r11		@ r10= s4 = x[4] + x[5]
939	SUB	r11,r10,r11,LSL #1	@ r11= s5 = x[4] - x[5]
940	ADD	r12,r12,r14		@ r12= s6 = x[6] + x[7]
941	SUB	r14,r12,r14,LSL #1	@ r14= s7 = x[6] - x[7]
942
943	ADD	r2, r11,r9		@ r2 = x[0] = s5 + s3
944	SUB	r4, r2, r9, LSL #1	@ r4 = x[2] = s5 - s3
945	SUB	r3, r14,r7		@ r3 = x[1] = s7 - s1
946	ADD	r5, r3, r7, LSL #1	@ r5 = x[3] = s7 + s1
947	SUB	r10,r10,r6		@ r10= x[4] = s4 - s0
948	SUB	r11,r12,r8		@ r11= x[5] = s6 - s2
949	ADD	r12,r10,r6, LSL #1	@ r12= x[6] = s4 + s0
950	ADD	r14,r11,r8, LSL #1	@ r14= x[7] = s6 + s2
951	STMDB	r1,{r2,r3,r4,r5,r10,r11,r12,r14}
952
953	@ mdct_butterfly_8
954	LDMIA	r1,{r6,r7,r8,r9,r10,r11,r12,r14}
955					@ r6 = x[0]
956					@ r7 = x[1]
957					@ r8 = x[2]
958					@ r9 = x[3]
959					@ r10= x[4]
960					@ r11= x[5]
961					@ r12= x[6]
962					@ r14= x[7]
963	ADD	r6, r6, r7		@ r6 = s0 = x[0] + x[1]
964	SUB	r7, r6, r7, LSL #1	@ r7 = s1 = x[0] - x[1]
965	ADD	r8, r8, r9		@ r8 = s2 = x[2] + x[3]
966	SUB	r9, r8, r9, LSL #1	@ r9 = s3 = x[2] - x[3]
967	ADD	r10,r10,r11		@ r10= s4 = x[4] + x[5]
968	SUB	r11,r10,r11,LSL #1	@ r11= s5 = x[4] - x[5]
969	ADD	r12,r12,r14		@ r12= s6 = x[6] + x[7]
970	SUB	r14,r12,r14,LSL #1	@ r14= s7 = x[6] - x[7]
971
972	ADD	r2, r11,r9		@ r2 = x[0] = s5 + s3
973	SUB	r4, r2, r9, LSL #1	@ r4 = x[2] = s5 - s3
974	SUB	r3, r14,r7		@ r3 = x[1] = s7 - s1
975	ADD	r5, r3, r7, LSL #1	@ r5 = x[3] = s7 + s1
976	SUB	r10,r10,r6		@ r10= x[4] = s4 - s0
977	SUB	r11,r12,r8		@ r11= x[5] = s6 - s2
978	ADD	r12,r10,r6, LSL #1	@ r12= x[6] = s4 + s0
979	ADD	r14,r11,r8, LSL #1	@ r14= x[7] = s6 + s2
980	STMIA	r1,{r2,r3,r4,r5,r10,r11,r12,r14}
981
982	ADD	r1,r1,#8*4
983	SUBS	r0,r0,#64
984	BGT	mdct_bufferflies_loop3
985
986	LDMFD	r13,{r0-r3}
987
988mdct_bitreverseARM:
989	@ r0 = points
990	@ r1 = in
991	@ r2 = step
992	@ r3 = shift
993
994	MOV	r4, #0			@ r4 = bit = 0
995	ADD	r5, r1, r0, LSL #1	@ r5 = w = x + (n>>1)
996	ADR	r6, bitrev
997	SUB	r3, r3, #2		@ r3 = shift -= 2
998	SUB	r5, r5, #8
999brev_lp:
1000	LDRB	r7, [r6, r4, LSR #6]
1001	AND	r8, r4, #0x3f
1002	LDRB	r8, [r6, r8]
1003	ADD	r4, r4, #1		@ bit++
1004	@ stall XScale
1005	ORR	r7, r7, r8, LSL #6	@ r7 = bitrev[bit]
1006	ADD	r9, r1, r7, LSR r3	@ r9 = xx = x + (b>>shift)
1007	CMP	r5, r9			@ if (w > xx)
1008	LDR	r10,[r5],#-8		@   r10 = w[0]		w -= 2
1009	LDRGT	r11,[r5,#12]		@   r11 = w[1]
1010	LDRGT	r12,[r9]		@   r12 = xx[0]
1011	LDRGT	r14,[r9,#4]		@   r14 = xx[1]
1012	STRGT	r10,[r9]		@   xx[0]= w[0]
1013	STRGT	r11,[r9,#4]		@   xx[1]= w[1]
1014	STRGT	r12,[r5,#8]		@   w[0] = xx[0]
1015	STRGT	r14,[r5,#12]		@   w[1] = xx[1]
1016	CMP	r5,r1
1017	BGT	brev_lp
1018
1019	@ mdct_step7
1020	@ r0 = points
1021	@ r1 = in
1022	@ r2 = step
1023	@ r3 = shift-2
1024
1025	CMP	r2, #4			@ r5 = T = (step>=4) ?
1026	LDRGE	r5, =sincos_lookup0	@          sincos_lookup0 +
1027	LDRLT	r5, =sincos_lookup1	@          sincos_lookup0 +
1028	ADD	r7, r1, r0, LSL #1	@ r7 = w1 = x + (n>>1)
1029	ADDGE	r5, r5, r2, LSR #1	@		            (step>>1)
1030	ADD	r8, r5, #1024		@ r8 = Ttop
1031step7_loop1:
1032	LDR	r6, [r1]		@ r6 = w0[0]
1033	LDR	r9, [r1,#4]		@ r9 = w0[1]
1034	LDR	r10,[r7,#-8]!		@ r10= w1[0]	w1 -= 2
1035	LDR	r11,[r7,#4]		@ r11= w1[1]
1036	LDRB	r14,[r5,#1]		@ r14= T[1]
1037	LDRB	r12,[r5],r2		@ r12= T[0]	T += step
1038
1039	ADD	r6, r6, r10		@ r6 = s0 = w0[0] + w1[0]
1040	SUB	r10,r6, r10,LSL #1	@ r10= s1b= w0[0] - w1[0]
1041	SUB	r11,r11,r9		@ r11= s1 = w1[1] - w0[1]
1042	ADD	r9, r11,r9, LSL #1	@ r9 = s0b= w1[1] + w0[1]
1043
1044	MOV	r6, r6, ASR #9
1045	MUL	r3, r6, r14		@ r3   = s0*T[1]
1046	MOV	r11,r11,ASR #9
1047	MUL	r4, r11,r12		@ r4  += s1*T[0] = s2
1048	ADD	r3, r3, r4
1049	MUL	r14,r11,r14		@ r14  = s1*T[1]
1050	MUL	r12,r6, r12		@ r12 += s0*T[0] = s3
1051	SUB	r14,r14,r12
1052
1053	@ r9 = s0b<<1
1054	@ r10= s1b<<1
1055	ADD	r9, r3, r9, ASR #1	@ r9 = s0b + s2
1056	SUB	r3, r9, r3, LSL #1	@ r3 = s0b - s2
1057
1058	SUB	r12,r14,r10,ASR #1	@ r12= s3  - s1b
1059	ADD	r10,r14,r10,ASR #1	@ r10= s3  + s1b
1060	STR	r9, [r1],#4
1061	STR	r10,[r1],#4		@ w0 += 2
1062	STR	r3, [r7]
1063	STR	r12,[r7,#4]
1064
1065	CMP	r5,r8
1066	BLT	step7_loop1
1067
1068step7_loop2:
1069	LDR	r6, [r1]		@ r6 = w0[0]
1070	LDR	r9, [r1,#4]		@ r9 = w0[1]
1071	LDR	r10,[r7,#-8]!		@ r10= w1[0]	w1 -= 2
1072	LDR	r11,[r7,#4]		@ r11= w1[1]
1073	LDRB	r14,[r5,-r2]!		@ r12= T[1]	T -= step
1074	LDRB	r12,[r5,#1]		@ r14= T[0]
1075
1076	ADD	r6, r6, r10		@ r6 = s0 = w0[0] + w1[0]
1077	SUB	r10,r6, r10,LSL #1	@ r10= s1b= w0[0] - w1[0]
1078	SUB	r11,r11,r9		@ r11= s1 = w1[1] - w0[1]
1079	ADD	r9, r11,r9, LSL #1	@ r9 = s0b= w1[1] + w0[1]
1080
1081	MOV	r6, r6, ASR #9
1082	MUL	r3, r6, r14		@ r3   = s0*T[0]
1083	MOV	r11,r11,ASR #9
1084	MUL	r4, r11,r12		@ r4  += s1*T[1] = s2
1085	ADD	r3, r3, r4
1086	MUL	r14,r11,r14		@ r14  = s1*T[0]
1087	MUL	r12,r6, r12		@ r12 += s0*T[1] = s3
1088	SUB	r14,r14,r12
1089
1090	@ r9 = s0b<<1
1091	@ r10= s1b<<1
1092	ADD	r9, r3, r9, ASR #1	@ r9 = s0b + s2
1093	SUB	r3, r9, r3, LSL #1	@ r3 = s0b - s2
1094
1095	SUB	r12,r14,r10,ASR #1	@ r12= s3  - s1b
1096	ADD	r10,r14,r10,ASR #1	@ r10= s3  + s1b
1097	STR	r9, [r1],#4
1098	STR	r10,[r1],#4		@ w0 += 2
1099	STR	r3, [r7]
1100	STR	r12,[r7,#4]
1101
1102	CMP	r1,r7
1103	BLT	step7_loop2
1104
1105	LDMFD	r13!,{r0-r3}
1106
1107	@ r0 = points
1108	@ r1 = in
1109	@ r2 = step
1110	@ r3 = shift
1111	MOV	r2, r2, ASR #2		@ r2 = step >>= 2
1112	CMP	r2, #0
1113	CMPNE	r2, #1
1114	BEQ	mdct_end
1115
1116	@ step > 1 (default case)
1117	CMP	r2, #4			@ r5 = T = (step>=4) ?
1118	LDRGE	r5, =sincos_lookup0	@          sincos_lookup0 +
1119	LDRLT	r5, =sincos_lookup1	@          sincos_lookup1
1120	ADD	r7, r1, r0, LSL #1	@ r7 = iX = x + (n>>1)
1121	ADDGE	r5, r5, r2, LSR #1	@		            (step>>1)
1122mdct_step8_default:
1123	LDR	r6, [r1],#4		@ r6 =  s0 = x[0]
1124	LDR	r8, [r1],#4		@ r8 = -s1 = x[1]
1125	LDRB	r12,[r5,#1]       	@ r12= T[1]
1126	LDRB	r14,[r5],r2		@ r14= T[0]	T += step
1127	RSB	r8, r8, #0		@ r8 = s1
1128
1129	@ XPROD31(s0, s1, T[0], T[1], x, x+1)
1130	@ x[0] = s0 * T[0] + s1 * T[1]      x[1] = s1 * T[0] - s0 * T[1]
1131	MOV	r6, r6, ASR #8
1132	MOV	r8, r8, ASR #8
1133	MUL	r10,r8, r12		@ r10  = s1 * T[1]
1134	CMP	r1, r7
1135	MLA	r10,r6, r14,r10	@ r10 += s0 * T[0]
1136	RSB	r6, r6, #0		@ r6 = -s0
1137	MUL	r11,r8, r14		@ r11  = s1 * T[0]
1138	MLA	r11,r6, r12,r11	@ r11 -= s0 * T[1]
1139	STR	r10,[r1,#-8]
1140	STR	r11,[r1,#-4]
1141	BLT	mdct_step8_default
1142
1143mdct_end:
1144	MOV	r0, r2
1145	LDMFD	r13!,{r4-r11,PC}
1146
1147bitrev:
1148	.byte	0
1149	.byte	32
1150	.byte	16
1151	.byte	48
1152	.byte	8
1153	.byte	40
1154	.byte	24
1155	.byte	56
1156	.byte	4
1157	.byte	36
1158	.byte	20
1159	.byte	52
1160	.byte	12
1161	.byte	44
1162	.byte	28
1163	.byte	60
1164	.byte	2
1165	.byte	34
1166	.byte	18
1167	.byte	50
1168	.byte	10
1169	.byte	42
1170	.byte	26
1171	.byte	58
1172	.byte	6
1173	.byte	38
1174	.byte	22
1175	.byte	54
1176	.byte	14
1177	.byte	46
1178	.byte	30
1179	.byte	62
1180	.byte	1
1181	.byte	33
1182	.byte	17
1183	.byte	49
1184	.byte	9
1185	.byte	41
1186	.byte	25
1187	.byte	57
1188	.byte	5
1189	.byte	37
1190	.byte	21
1191	.byte	53
1192	.byte	13
1193	.byte	45
1194	.byte	29
1195	.byte	61
1196	.byte	3
1197	.byte	35
1198	.byte	19
1199	.byte	51
1200	.byte	11
1201	.byte	43
1202	.byte	27
1203	.byte	59
1204	.byte	7
1205	.byte	39
1206	.byte	23
1207	.byte	55
1208	.byte	15
1209	.byte	47
1210	.byte	31
1211	.byte	63
1212
1213	@ END
1214