xref: /nrf52832-nimble/rt-thread/libcpu/ppc/ppc405/cache_gcc.S (revision 104654410c56c573564690304ae786df310c91fc)
1*10465441SEvalZero#define	L1_CACHE_SHIFT		5
2*10465441SEvalZero#define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
3*10465441SEvalZero#define DCACHE_SIZE		(16 << 10)/* For AMCC 405 CPUs	*/
4*10465441SEvalZero
5*10465441SEvalZero/*
6*10465441SEvalZero * Flush instruction cache.
7*10465441SEvalZero */
8*10465441SEvalZero	.globl invalidate_icache
9*10465441SEvalZeroinvalidate_icache:
10*10465441SEvalZero	iccci	r0,r0
11*10465441SEvalZero	isync
12*10465441SEvalZero	blr
13*10465441SEvalZero
14*10465441SEvalZero/*
15*10465441SEvalZero * Write any modified data cache blocks out to memory
16*10465441SEvalZero * and invalidate the corresponding instruction cache blocks.
17*10465441SEvalZero *
18*10465441SEvalZero * flush_icache_range(unsigned long start, unsigned long stop)
19*10465441SEvalZero */
20*10465441SEvalZero	.globl flush_icache_range
21*10465441SEvalZeroflush_icache_range:
22*10465441SEvalZero	li	r5,L1_CACHE_BYTES-1
23*10465441SEvalZero	andc	r3,r3,r5
24*10465441SEvalZero	subf	r4,r3,r4
25*10465441SEvalZero	add	r4,r4,r5
26*10465441SEvalZero	srwi.	r4,r4,L1_CACHE_SHIFT
27*10465441SEvalZero	beqlr
28*10465441SEvalZero	mtctr	r4
29*10465441SEvalZero	mr	r6,r3
30*10465441SEvalZero1:	dcbst	0,r3
31*10465441SEvalZero	addi	r3,r3,L1_CACHE_BYTES
32*10465441SEvalZero	bdnz	1b
33*10465441SEvalZero	sync				/* wait for dcbst's to get to ram */
34*10465441SEvalZero	mtctr	r4
35*10465441SEvalZero2:	icbi	0,r6
36*10465441SEvalZero	addi	r6,r6,L1_CACHE_BYTES
37*10465441SEvalZero	bdnz	2b
38*10465441SEvalZero	sync				/* additional sync needed on g4 */
39*10465441SEvalZero	isync
40*10465441SEvalZero	blr
41*10465441SEvalZero
42*10465441SEvalZero/*
43*10465441SEvalZero * Write any modified data cache blocks out to memory.
44*10465441SEvalZero * Does not invalidate the corresponding cache lines (especially for
45*10465441SEvalZero * any corresponding instruction cache).
46*10465441SEvalZero *
47*10465441SEvalZero * clean_dcache_range(unsigned long start, unsigned long stop)
48*10465441SEvalZero */
49*10465441SEvalZero	.globl clean_dcache_range
50*10465441SEvalZeroclean_dcache_range:
51*10465441SEvalZero	li	r5,L1_CACHE_BYTES-1
52*10465441SEvalZero	andc	r3,r3,r5
53*10465441SEvalZero	subf	r4,r3,r4
54*10465441SEvalZero	add	r4,r4,r5
55*10465441SEvalZero	srwi.	r4,r4,L1_CACHE_SHIFT
56*10465441SEvalZero	beqlr
57*10465441SEvalZero	mtctr	r4
58*10465441SEvalZero
59*10465441SEvalZero1:	dcbst	0,r3
60*10465441SEvalZero	addi	r3,r3,L1_CACHE_BYTES
61*10465441SEvalZero	bdnz	1b
62*10465441SEvalZero	sync				/* wait for dcbst's to get to ram */
63*10465441SEvalZero	blr
64*10465441SEvalZero
65*10465441SEvalZero/*
66*10465441SEvalZero * Write any modified data cache blocks out to memory and invalidate them.
67*10465441SEvalZero * Does not invalidate the corresponding instruction cache blocks.
68*10465441SEvalZero *
69*10465441SEvalZero * flush_dcache_range(unsigned long start, unsigned long stop)
70*10465441SEvalZero */
71*10465441SEvalZero	.globl flush_dcache_range
72*10465441SEvalZeroflush_dcache_range:
73*10465441SEvalZero	li	r5,L1_CACHE_BYTES-1
74*10465441SEvalZero	andc	r3,r3,r5
75*10465441SEvalZero	subf	r4,r3,r4
76*10465441SEvalZero	add	r4,r4,r5
77*10465441SEvalZero	srwi.	r4,r4,L1_CACHE_SHIFT
78*10465441SEvalZero	beqlr
79*10465441SEvalZero	mtctr	r4
80*10465441SEvalZero
81*10465441SEvalZero1:	dcbf	0,r3
82*10465441SEvalZero	addi	r3,r3,L1_CACHE_BYTES
83*10465441SEvalZero	bdnz	1b
84*10465441SEvalZero	sync				/* wait for dcbst's to get to ram */
85*10465441SEvalZero	blr
86*10465441SEvalZero
87*10465441SEvalZero/*
88*10465441SEvalZero * Like above, but invalidate the D-cache.  This is used by the 8xx
89*10465441SEvalZero * to invalidate the cache so the PPC core doesn't get stale data
90*10465441SEvalZero * from the CPM (no cache snooping here :-).
91*10465441SEvalZero *
92*10465441SEvalZero * invalidate_dcache_range(unsigned long start, unsigned long stop)
93*10465441SEvalZero */
94*10465441SEvalZero	.globl invalidate_dcache_range
95*10465441SEvalZeroinvalidate_dcache_range:
96*10465441SEvalZero	li	r5,L1_CACHE_BYTES-1
97*10465441SEvalZero	andc	r3,r3,r5
98*10465441SEvalZero	subf	r4,r3,r4
99*10465441SEvalZero	add	r4,r4,r5
100*10465441SEvalZero	srwi.	r4,r4,L1_CACHE_SHIFT
101*10465441SEvalZero	beqlr
102*10465441SEvalZero	mtctr	r4
103*10465441SEvalZero
104*10465441SEvalZero1:	dcbi	0,r3
105*10465441SEvalZero	addi	r3,r3,L1_CACHE_BYTES
106*10465441SEvalZero	bdnz	1b
107*10465441SEvalZero	sync				/* wait for dcbi's to get to ram */
108*10465441SEvalZero	blr
109*10465441SEvalZero
110*10465441SEvalZero/*
111*10465441SEvalZero * 40x cores have 8K or 16K dcache and 32 byte line size.
112*10465441SEvalZero * 44x has a 32K dcache and 32 byte line size.
113*10465441SEvalZero * 8xx has 1, 2, 4, 8K variants.
114*10465441SEvalZero * For now, cover the worst case of the 44x.
115*10465441SEvalZero * Must be called with external interrupts disabled.
116*10465441SEvalZero */
117*10465441SEvalZero#define CACHE_NWAYS     64
118*10465441SEvalZero#define CACHE_NLINES    32
119*10465441SEvalZero
120*10465441SEvalZero	.globl flush_dcache
121*10465441SEvalZeroflush_dcache:
122*10465441SEvalZero	li	r4,(2 * CACHE_NWAYS * CACHE_NLINES)
123*10465441SEvalZero	mtctr	r4
124*10465441SEvalZero	lis	r5,0
125*10465441SEvalZero1:	lwz	r3,0(r5)		/* Load one word from every line */
126*10465441SEvalZero	addi	r5,r5,L1_CACHE_BYTES
127*10465441SEvalZero	bdnz	1b
128*10465441SEvalZero	sync
129*10465441SEvalZero	blr
130*10465441SEvalZero
131*10465441SEvalZero	.globl invalidate_dcache
132*10465441SEvalZeroinvalidate_dcache:
133*10465441SEvalZero	addi	r6,0,0x0000		/* clear GPR 6 */
134*10465441SEvalZero	/* Do loop for # of dcache congruence classes. */
135*10465441SEvalZero	lis	r7,(DCACHE_SIZE / L1_CACHE_BYTES / 2)@ha	/* TBS for large sized cache */
136*10465441SEvalZero	ori	r7,r7,(DCACHE_SIZE / L1_CACHE_BYTES / 2)@l
137*10465441SEvalZero					/* NOTE: dccci invalidates both */
138*10465441SEvalZero	mtctr	r7			/* ways in the D cache */
139*10465441SEvalZerodcloop:
140*10465441SEvalZero	dccci	0,r6			/* invalidate line */
141*10465441SEvalZero	addi	r6,r6,L1_CACHE_BYTES	/* bump to next line */
142*10465441SEvalZero	bdnz	dcloop
143*10465441SEvalZero	sync
144*10465441SEvalZero	blr
145*10465441SEvalZero
146*10465441SEvalZero/*
147*10465441SEvalZero * Cache functions.
148*10465441SEvalZero *
149*10465441SEvalZero * Icache-related functions are used in POST framework.
150*10465441SEvalZero */
151*10465441SEvalZero	.globl	icache_enable
152*10465441SEvalZeroicache_enable:
153*10465441SEvalZero	mflr	r8
154*10465441SEvalZero	bl	invalidate_icache
155*10465441SEvalZero	mtlr	r8
156*10465441SEvalZero	isync
157*10465441SEvalZero	addis	r3,r0, 0xc000	      /* set bit 0 */
158*10465441SEvalZero	mticcr	r3
159*10465441SEvalZero	blr
160*10465441SEvalZero
161*10465441SEvalZero	.globl	icache_disable
162*10465441SEvalZeroicache_disable:
163*10465441SEvalZero	addis	r3,r0, 0x0000	      /* clear bit 0 */
164*10465441SEvalZero	mticcr	r3
165*10465441SEvalZero	isync
166*10465441SEvalZero	blr
167*10465441SEvalZero
168*10465441SEvalZero	.globl	icache_status
169*10465441SEvalZeroicache_status:
170*10465441SEvalZero	mficcr	r3
171*10465441SEvalZero	srwi	r3, r3, 31	/* >>31 => select bit 0 */
172*10465441SEvalZero	blr
173*10465441SEvalZero
174*10465441SEvalZero	.globl	dcache_enable
175*10465441SEvalZerodcache_enable:
176*10465441SEvalZero	mflr	r8
177*10465441SEvalZero	bl	invalidate_dcache
178*10465441SEvalZero	mtlr	r8
179*10465441SEvalZero	isync
180*10465441SEvalZero	addis	r3,r0, 0x8000	      /* set bit 0 */
181*10465441SEvalZero	mtdccr	r3
182*10465441SEvalZero	blr
183*10465441SEvalZero
184*10465441SEvalZero	.globl	dcache_disable
185*10465441SEvalZerodcache_disable:
186*10465441SEvalZero	mflr	r8
187*10465441SEvalZero	bl	flush_dcache
188*10465441SEvalZero	mtlr	r8
189*10465441SEvalZero	addis	r3,r0, 0x0000	      /* clear bit 0 */
190*10465441SEvalZero	mtdccr	r3
191*10465441SEvalZero	blr
192*10465441SEvalZero
193*10465441SEvalZero	.globl	dcache_status
194*10465441SEvalZerodcache_status:
195*10465441SEvalZero	mfdccr	r3
196*10465441SEvalZero	srwi	r3, r3, 31	/* >>31 => select bit 0 */
197*10465441SEvalZero	blr
198