xref: /nrf52832-nimble/rt-thread/libcpu/mips/common/mips_cache.h (revision 104654410c56c573564690304ae786df310c91fc)
1 /*
2  * File      : mips_cache.h
3  * This file is part of RT-Thread RTOS
4  * COPYRIGHT (C) 2008 - 2012, RT-Thread Development Team
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License along
17  *  with this program; if not, write to the Free Software Foundation, Inc.,
18  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19  *
20  * Change Logs:
21  * Date           Author       Notes
22  * 2016��9��10��     Urey         the first version
23  */
24 
25 #ifndef _MIPS_CACHE_H_
26 #define _MIPS_CACHE_H_
27 
28 #ifndef __ASSEMBLER__
29 #include <rtdef.h>
30 #include <mips_cfg.h>
31 
32 /*
33  * Cache Operations available on all MIPS processors with R4000-style caches
34  */
35 #define INDEX_INVALIDATE_I      0x00
36 #define INDEX_WRITEBACK_INV_D   0x01
37 #define INDEX_LOAD_TAG_I        0x04
38 #define INDEX_LOAD_TAG_D        0x05
39 #define INDEX_STORE_TAG_I       0x08
40 #define INDEX_STORE_TAG_D       0x09
41 #if defined(CONFIG_CPU_LOONGSON2)
42 #define HIT_INVALIDATE_I        0x00
43 #else
44 #define HIT_INVALIDATE_I        0x10
45 #endif
46 #define HIT_INVALIDATE_D        0x11
47 #define HIT_WRITEBACK_INV_D     0x15
48 
49 /*
50  *The lock state is cleared by executing an Index
51 Invalidate, Index Writeback Invalidate, Hit
52 Invalidate, or Hit Writeback Invalidate
53 operation to the locked line, or via an Index
54 Store Tag operation with the lock bit reset in
55 the TagLo register.
56  */
57 #define FETCH_AND_LOCK_I        0x1c
58 #define FETCH_AND_LOCK_D        0x1d
59 
60 
61 enum dma_data_direction
62 {
63     DMA_BIDIRECTIONAL = 0,
64     DMA_TO_DEVICE = 1,
65     DMA_FROM_DEVICE = 2,
66     DMA_NONE = 3,
67 };
68 
69 /*
70  * R4000-specific cacheops
71  */
72 #define CREATE_DIRTY_EXCL_D     0x0d
73 #define FILL                    0x14
74 #define HIT_WRITEBACK_I         0x18
75 #define HIT_WRITEBACK_D         0x19
76 
77 /*
78  * R4000SC and R4400SC-specific cacheops
79  */
80 #define INDEX_INVALIDATE_SI     0x02
81 #define INDEX_WRITEBACK_INV_SD  0x03
82 #define INDEX_LOAD_TAG_SI       0x06
83 #define INDEX_LOAD_TAG_SD       0x07
84 #define INDEX_STORE_TAG_SI      0x0A
85 #define INDEX_STORE_TAG_SD      0x0B
86 #define CREATE_DIRTY_EXCL_SD    0x0f
87 #define HIT_INVALIDATE_SI       0x12
88 #define HIT_INVALIDATE_SD       0x13
89 #define HIT_WRITEBACK_INV_SD    0x17
90 #define HIT_WRITEBACK_SD        0x1b
91 #define HIT_SET_VIRTUAL_SI      0x1e
92 #define HIT_SET_VIRTUAL_SD      0x1f
93 
94 /*
95  * R5000-specific cacheops
96  */
97 #define R5K_PAGE_INVALIDATE_S   0x17
98 
99 /*
100  * RM7000-specific cacheops
101  */
102 #define PAGE_INVALIDATE_T       0x16
103 
104 /*
105  * R10000-specific cacheops
106  *
107  * Cacheops 0x02, 0x06, 0x0a, 0x0c-0x0e, 0x16, 0x1a and 0x1e are unused.
108  * Most of the _S cacheops are identical to the R4000SC _SD cacheops.
109  */
110 #define INDEX_WRITEBACK_INV_S   0x03
111 #define INDEX_LOAD_TAG_S        0x07
112 #define INDEX_STORE_TAG_S       0x0B
113 #define HIT_INVALIDATE_S        0x13
114 #define CACHE_BARRIER           0x14
115 #define HIT_WRITEBACK_INV_S     0x17
116 #define INDEX_LOAD_DATA_I       0x18
117 #define INDEX_LOAD_DATA_D       0x19
118 #define INDEX_LOAD_DATA_S       0x1b
119 #define INDEX_STORE_DATA_I      0x1c
120 #define INDEX_STORE_DATA_D      0x1d
121 #define INDEX_STORE_DATA_S      0x1f
122 
123 #define cache_op(op, addr)		    \
124     __asm__ __volatile__(        \
125         ".set   push\n"             \
126         ".set   noreorder\n"        \
127         ".set   mips3\n"            \
128         "cache  %0, %1\n"           \
129         ".set   pop\n"              \
130         :                           \
131         : "i" (op), "R" (*(unsigned char *)(addr)))
132 
133 #define cache16_unroll32(base, op)					\
134 	__asm__ __volatile__(						\
135 	"	.set noreorder					\n"	\
136 	"	.set mips3					\n"	\
137 	"	cache %1, 0x000(%0); cache %1, 0x010(%0)	\n"	\
138 	"	cache %1, 0x020(%0); cache %1, 0x030(%0)	\n"	\
139 	"	cache %1, 0x040(%0); cache %1, 0x050(%0)	\n"	\
140 	"	cache %1, 0x060(%0); cache %1, 0x070(%0)	\n"	\
141 	"	cache %1, 0x080(%0); cache %1, 0x090(%0)	\n"	\
142 	"	cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)	\n"	\
143 	"	cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)	\n"	\
144 	"	cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)	\n"	\
145 	"	cache %1, 0x100(%0); cache %1, 0x110(%0)	\n"	\
146 	"	cache %1, 0x120(%0); cache %1, 0x130(%0)	\n"	\
147 	"	cache %1, 0x140(%0); cache %1, 0x150(%0)	\n"	\
148 	"	cache %1, 0x160(%0); cache %1, 0x170(%0)	\n"	\
149 	"	cache %1, 0x180(%0); cache %1, 0x190(%0)	\n"	\
150 	"	cache %1, 0x1a0(%0); cache %1, 0x1b0(%0)	\n"	\
151 	"	cache %1, 0x1c0(%0); cache %1, 0x1d0(%0)	\n"	\
152 	"	cache %1, 0x1e0(%0); cache %1, 0x1f0(%0)	\n"	\
153 	"	.set mips0					\n"	\
154 	"	.set reorder					\n"	\
155 		:							\
156 		: "r" (base),						\
157 		  "i" (op));
158 
159 
flush_icache_line_indexed(rt_ubase_t addr)160 static inline void flush_icache_line_indexed(rt_ubase_t addr)
161 {
162 	cache_op(INDEX_INVALIDATE_I, addr);
163 }
164 
flush_dcache_line_indexed(rt_ubase_t addr)165 static inline void flush_dcache_line_indexed(rt_ubase_t addr)
166 {
167 	cache_op(INDEX_WRITEBACK_INV_D, addr);
168 }
169 
flush_icache_line(rt_ubase_t addr)170 static inline void flush_icache_line(rt_ubase_t addr)
171 {
172 	cache_op(HIT_INVALIDATE_I, addr);
173 }
174 
lock_icache_line(rt_ubase_t addr)175 static inline void lock_icache_line(rt_ubase_t addr)
176 {
177 	cache_op(FETCH_AND_LOCK_I, addr);
178 }
179 
lock_dcache_line(rt_ubase_t addr)180 static inline void lock_dcache_line(rt_ubase_t addr)
181 {
182 	cache_op(FETCH_AND_LOCK_D, addr);
183 }
184 
flush_dcache_line(rt_ubase_t addr)185 static inline void flush_dcache_line(rt_ubase_t addr)
186 {
187 	cache_op(HIT_WRITEBACK_INV_D, addr);
188 }
189 
invalidate_dcache_line(rt_ubase_t addr)190 static inline void invalidate_dcache_line(rt_ubase_t addr)
191 {
192 	cache_op(HIT_INVALIDATE_D, addr);
193 }
blast_dcache16(void)194 static inline void blast_dcache16(void)
195 {
196 	rt_ubase_t start = KSEG0BASE;
197 	rt_ubase_t end = start + g_mips_core.dcache_size;
198 	rt_ubase_t addr;
199 
200 	for (addr = start; addr < end; addr += g_mips_core.dcache_line_size)
201 		cache16_unroll32(addr, INDEX_WRITEBACK_INV_D);
202 }
203 
inv_dcache16(void)204 static inline void inv_dcache16(void)
205 {
206 	rt_ubase_t start = KSEG0BASE;
207 	rt_ubase_t end = start + g_mips_core.dcache_size;
208 	rt_ubase_t addr;
209 
210 	for (addr = start; addr < end; addr += g_mips_core.dcache_line_size)
211 		cache16_unroll32(addr, HIT_INVALIDATE_D);
212 }
213 
blast_icache16(void)214 static inline void blast_icache16(void)
215 {
216 	rt_ubase_t start = KSEG0BASE;
217 	rt_ubase_t end = start + g_mips_core.icache_size;
218 	rt_ubase_t addr;
219 
220 	for (addr = start; addr < end; addr += g_mips_core.icache_line_size)
221 		cache16_unroll32(addr, INDEX_INVALIDATE_I);
222 }
223 
224 void r4k_cache_init(void);
225 void r4k_cache_flush_all(void);
226 void r4k_icache_flush_all(void);
227 void r4k_icache_flush_range(rt_ubase_t addr, rt_ubase_t size);
228 void r4k_icache_lock_range(rt_ubase_t addr, rt_ubase_t size);
229 void r4k_dcache_inv(rt_ubase_t addr, rt_ubase_t size);
230 void r4k_dcache_wback_inv(rt_ubase_t addr, rt_ubase_t size);
231 void r4k_dma_cache_sync(rt_ubase_t addr, rt_size_t size, enum dma_data_direction direction);
232 #endif
233 
234 #endif /* _MIPS_CACHE_H_ */
235