1 /*
2 * Copyright (c) 2009 Corey Tabaka
3 * Copyright (c) 2019 LK Trusty Authors. All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files
7 * (the "Software"), to deal in the Software without restriction,
8 * including without limitation the rights to use, copy, modify, merge,
9 * publish, distribute, sublicense, and/or sell copies of the Software,
10 * and to permit persons to whom the Software is furnished to do so,
11 * subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be
14 * included in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
20 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24 #include <arch/ops.h>
25 #include <arch/x86/mmu.h>
26 #include <bits.h>
27 #include <lk/init.h>
28 #include <lk/macros.h>
29 #include <debug.h>
30
31 typedef void (*cache_func_type_t)(addr_t addr);
32
33 static cache_func_type_t cache_func;
34 static uint32_t cache_line_size;
35
x86_get_cache_line_size(void)36 static uint32_t x86_get_cache_line_size(void) {
37 uint32_t unused;
38 uint32_t ebx = 0;
39
40 /* CPUID.01H:EBX[bit 19-15] indicates cache line size in bytes */
41 cpuid(0x01, &unused, &ebx, &unused, &unused);
42
43 return ((ebx >> 8) &0xff) * 8;
44 }
45
x86_wbinvd(void)46 static inline void x86_wbinvd(void) {
47 __asm__ __volatile__ ("wbinvd");
48 }
49
x86_clflush(addr_t addr)50 static inline void x86_clflush(addr_t addr) {
51 __asm__ __volatile__ ("clflush %0"::"m"(addr));
52 }
53
x86_clflushopt(addr_t addr)54 static inline void x86_clflushopt(addr_t addr) {
55 __asm__ __volatile__ ("clflushopt %0"::"m"(addr));
56 }
57
x86_clwb(addr_t addr)58 static inline void x86_clwb(addr_t addr) {
59 __asm__ __volatile__ ("clwb %0"::"m"(addr));
60 }
61
is_clflush_avail(void)62 static inline bool is_clflush_avail(void) {
63 uint32_t edx;
64 uint32_t unused;
65
66 /* CPUID.01H:EDX[bit 19] indicates availablity of CLFLUSH */
67 cpuid(X86_CPUID_VERSION_INFO, &unused, &unused, &unused, &edx);
68 return !!BIT(edx, X86_CPUID_CLFLUSH_BIT);
69 }
70
is_clflushopt_avail(void)71 static inline bool is_clflushopt_avail(void) {
72 uint32_t ebx;
73 uint32_t unused;
74
75 /* CPUID.(EAX=7,ECX=0):EBX[bit 23] indicates availability of CLFLUSHOPT */
76 cpuid_count(X86_CPUID_EXTEND_FEATURE, 0, &unused, &ebx, &unused, &unused);
77 return !!BIT(ebx, X86_CPUID_CLFLUSHOPT_BIT);
78 }
79
is_clwb_avail(void)80 static inline bool is_clwb_avail(void) {
81 uint32_t ebx;
82 uint32_t unused;
83
84 /* CPUID.(EAX=7,ECX=0):EBX[bit 24] indicates availability of CLWB */
85 cpuid_count(X86_CPUID_EXTEND_FEATURE, 0, &unused, &ebx, &unused, &unused);
86 return !!BIT(ebx, X86_CPUID_CLWS_BIT);
87 }
88
x86_cache_operation_inner(addr_t start,size_t len,cache_func_type_t func)89 static void x86_cache_operation_inner(addr_t start,
90 size_t len,
91 cache_func_type_t func) {
92
93 addr_t ptr = round_down(start, cache_line_size);
94
95 while (ptr < start + len) {
96 func(ptr);
97
98 ptr += cache_line_size;
99 }
100 mb();
101 }
102
arch_clean_cache_range(addr_t start,size_t len)103 void arch_clean_cache_range(addr_t start, size_t len)
104 {
105 if (is_clwb_avail()) {
106 x86_cache_operation_inner(start, len, x86_clwb);
107 } else {
108 arch_clean_invalidate_cache_range(start, len);
109 }
110 }
111
arch_clean_invalidate_cache_range(addr_t start,size_t len)112 void arch_clean_invalidate_cache_range(addr_t start, size_t len)
113 {
114 if (NULL == cache_func) {
115 x86_wbinvd();
116 } else {
117 x86_cache_operation_inner(start, len, cache_func);
118 }
119 }
120
121 /* nothing to do to sync I & D cache on x86 */
arch_sync_cache_range(addr_t start,size_t len)122 void arch_sync_cache_range(addr_t start, size_t len) { }
123
x86_arch_cache_init(uint level)124 void x86_arch_cache_init(uint level) {
125 cache_line_size = x86_get_cache_line_size();
126
127 if (is_clflushopt_avail()) {
128 cache_func = x86_clflushopt;
129 } else if (is_clflush_avail()) {
130 cache_func = x86_clflush;
131 } else {
132 cache_func = NULL;
133 }
134 }
135
136 LK_INIT_HOOK(x86_cache_init, x86_arch_cache_init, LK_INIT_LEVEL_ARCH_EARLY+1);
137