1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_IO_H
3 #define _ASM_X86_IO_H
4
5 /*
6 * This file contains the definitions for the x86 IO instructions
7 * inb/inw/inl/outb/outw/outl and the "string versions" of the same
8 * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
9 * versions of the single-IO instructions (inb_p/inw_p/..).
10 *
11 * This file is not meant to be obfuscating: it's just complicated
12 * to (a) handle it all in a way that makes gcc able to optimize it
13 * as well as possible and (b) trying to avoid writing the same thing
14 * over and over again with slight variations and possibly making a
15 * mistake somewhere.
16 */
17
18 /*
19 * Thanks to James van Artsdalen for a better timing-fix than
20 * the two short jumps: using outb's to a nonexistent port seems
21 * to guarantee better timings even on fast machines.
22 *
23 * On the other hand, I'd like to be sure of a non-existent port:
24 * I feel a bit unsafe about using 0x80 (should be safe, though)
25 *
26 * Linus
27 */
28
29 /*
30 * Bit simplified and optimized by Jan Hubicka
31 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
32 *
33 * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
34 * isa_read[wl] and isa_write[wl] fixed
35 * - Arnaldo Carvalho de Melo <[email protected]>
36 */
37
38 #include <linux/string.h>
39 #include <linux/compiler.h>
40 #include <linux/cc_platform.h>
41 #include <asm/page.h>
42 #include <asm/early_ioremap.h>
43 #include <asm/pgtable_types.h>
44 #include <asm/shared/io.h>
45 #include <asm/special_insns.h>
46
47 #define build_mmio_read(name, size, type, reg, barrier) \
48 static inline type name(const volatile void __iomem *addr) \
49 { type ret; asm volatile("mov" size " %1,%0":reg (ret) \
50 :"m" (*(volatile type __force *)addr) barrier); return ret; }
51
52 #define build_mmio_write(name, size, type, reg, barrier) \
53 static inline void name(type val, volatile void __iomem *addr) \
54 { asm volatile("mov" size " %0,%1": :reg (val), \
55 "m" (*(volatile type __force *)addr) barrier); }
56
57 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
58 build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
59 build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
60
61 build_mmio_read(__readb, "b", unsigned char, "=q", )
62 build_mmio_read(__readw, "w", unsigned short, "=r", )
63 build_mmio_read(__readl, "l", unsigned int, "=r", )
64
65 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
66 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
67 build_mmio_write(writel, "l", unsigned int, "r", :"memory")
68
69 build_mmio_write(__writeb, "b", unsigned char, "q", )
70 build_mmio_write(__writew, "w", unsigned short, "r", )
71 build_mmio_write(__writel, "l", unsigned int, "r", )
72
73 #define readb readb
74 #define readw readw
75 #define readl readl
76 #define readb_relaxed(a) __readb(a)
77 #define readw_relaxed(a) __readw(a)
78 #define readl_relaxed(a) __readl(a)
79 #define __raw_readb __readb
80 #define __raw_readw __readw
81 #define __raw_readl __readl
82
83 #define writeb writeb
84 #define writew writew
85 #define writel writel
86 #define writeb_relaxed(v, a) __writeb(v, a)
87 #define writew_relaxed(v, a) __writew(v, a)
88 #define writel_relaxed(v, a) __writel(v, a)
89 #define __raw_writeb __writeb
90 #define __raw_writew __writew
91 #define __raw_writel __writel
92
93 #ifdef CONFIG_X86_64
94
95 build_mmio_read(readq, "q", u64, "=r", :"memory")
96 build_mmio_read(__readq, "q", u64, "=r", )
97 build_mmio_write(writeq, "q", u64, "r", :"memory")
98 build_mmio_write(__writeq, "q", u64, "r", )
99
100 #define readq_relaxed(a) __readq(a)
101 #define writeq_relaxed(v, a) __writeq(v, a)
102
103 #define __raw_readq __readq
104 #define __raw_writeq __writeq
105
106 /* Let people know that we have them */
107 #define readq readq
108 #define writeq writeq
109
110 #endif
111
112 #define ARCH_HAS_VALID_PHYS_ADDR_RANGE
113 extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
114 extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
115
116 /**
117 * virt_to_phys - map virtual addresses to physical
118 * @address: address to remap
119 *
120 * The returned physical address is the physical (CPU) mapping for
121 * the memory address given. It is only valid to use this function on
122 * addresses directly mapped or allocated via kmalloc.
123 *
124 * This function does not give bus mappings for DMA transfers. In
125 * almost all conceivable cases a device driver should not be using
126 * this function
127 */
128
virt_to_phys(volatile void * address)129 static inline phys_addr_t virt_to_phys(volatile void *address)
130 {
131 return __pa(address);
132 }
133 #define virt_to_phys virt_to_phys
134
135 /**
136 * phys_to_virt - map physical address to virtual
137 * @address: address to remap
138 *
139 * The returned virtual address is a current CPU mapping for
140 * the memory address given. It is only valid to use this function on
141 * addresses that have a kernel mapping
142 *
143 * This function does not handle bus mappings for DMA transfers. In
144 * almost all conceivable cases a device driver should not be using
145 * this function
146 */
147
phys_to_virt(phys_addr_t address)148 static inline void *phys_to_virt(phys_addr_t address)
149 {
150 return __va(address);
151 }
152 #define phys_to_virt phys_to_virt
153
154 /*
155 * ISA I/O bus memory addresses are 1:1 with the physical address.
156 * However, we truncate the address to unsigned int to avoid undesirable
157 * promotions in legacy drivers.
158 */
isa_virt_to_bus(volatile void * address)159 static inline unsigned int isa_virt_to_bus(volatile void *address)
160 {
161 return (unsigned int)virt_to_phys(address);
162 }
163 #define isa_bus_to_virt phys_to_virt
164
165 /*
166 * The default ioremap() behavior is non-cached; if you need something
167 * else, you probably want one of the following.
168 */
169 extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size);
170 #define ioremap_uc ioremap_uc
171 extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
172 #define ioremap_cache ioremap_cache
173 extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, unsigned long prot_val);
174 #define ioremap_prot ioremap_prot
175 extern void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size);
176 #define ioremap_encrypted ioremap_encrypted
177
178 /**
179 * ioremap - map bus memory into CPU space
180 * @offset: bus address of the memory
181 * @size: size of the resource to map
182 *
183 * ioremap performs a platform specific sequence of operations to
184 * make bus memory CPU accessible via the readb/readw/readl/writeb/
185 * writew/writel functions and the other mmio helpers. The returned
186 * address is not guaranteed to be usable directly as a virtual
187 * address.
188 *
189 * If the area you are trying to map is a PCI BAR you should have a
190 * look at pci_iomap().
191 */
192 void __iomem *ioremap(resource_size_t offset, unsigned long size);
193 #define ioremap ioremap
194
195 extern void iounmap(volatile void __iomem *addr);
196 #define iounmap iounmap
197
198 #ifdef __KERNEL__
199
200 void memcpy_fromio(void *, const volatile void __iomem *, size_t);
201 void memcpy_toio(volatile void __iomem *, const void *, size_t);
202 void memset_io(volatile void __iomem *, int, size_t);
203
204 #define memcpy_fromio memcpy_fromio
205 #define memcpy_toio memcpy_toio
206 #define memset_io memset_io
207
208 #ifdef CONFIG_X86_64
209 /*
210 * Commit 0f07496144c2 ("[PATCH] Add faster __iowrite32_copy routine for
211 * x86_64") says that circa 2006 rep movsl is noticeably faster than a copy
212 * loop.
213 */
__iowrite32_copy(void __iomem * to,const void * from,size_t count)214 static inline void __iowrite32_copy(void __iomem *to, const void *from,
215 size_t count)
216 {
217 asm volatile("rep ; movsl"
218 : "=&c"(count), "=&D"(to), "=&S"(from)
219 : "0"(count), "1"(to), "2"(from)
220 : "memory");
221 }
222 #define __iowrite32_copy __iowrite32_copy
223 #endif
224
225 /*
226 * ISA space is 'always mapped' on a typical x86 system, no need to
227 * explicitly ioremap() it. The fact that the ISA IO space is mapped
228 * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
229 * are physical addresses. The following constant pointer can be
230 * used as the IO-area pointer (it can be iounmapped as well, so the
231 * analogy with PCI is quite large):
232 */
233 #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
234
235 #endif /* __KERNEL__ */
236
237 extern void native_io_delay(void);
238
239 extern int io_delay_type;
240 extern void io_delay_init(void);
241
242 #if defined(CONFIG_PARAVIRT)
243 #include <asm/paravirt.h>
244 #else
245
slow_down_io(void)246 static inline void slow_down_io(void)
247 {
248 native_io_delay();
249 #ifdef REALLY_SLOW_IO
250 native_io_delay();
251 native_io_delay();
252 native_io_delay();
253 #endif
254 }
255
256 #endif
257
258 #define BUILDIO(bwl, type) \
259 static inline void out##bwl##_p(type value, u16 port) \
260 { \
261 out##bwl(value, port); \
262 slow_down_io(); \
263 } \
264 \
265 static inline type in##bwl##_p(u16 port) \
266 { \
267 type value = in##bwl(port); \
268 slow_down_io(); \
269 return value; \
270 } \
271 \
272 static inline void outs##bwl(u16 port, const void *addr, unsigned long count) \
273 { \
274 if (cc_platform_has(CC_ATTR_GUEST_UNROLL_STRING_IO)) { \
275 type *value = (type *)addr; \
276 while (count) { \
277 out##bwl(*value, port); \
278 value++; \
279 count--; \
280 } \
281 } else { \
282 asm volatile("rep; outs" #bwl \
283 : "+S"(addr), "+c"(count) \
284 : "d"(port) : "memory"); \
285 } \
286 } \
287 \
288 static inline void ins##bwl(u16 port, void *addr, unsigned long count) \
289 { \
290 if (cc_platform_has(CC_ATTR_GUEST_UNROLL_STRING_IO)) { \
291 type *value = (type *)addr; \
292 while (count) { \
293 *value = in##bwl(port); \
294 value++; \
295 count--; \
296 } \
297 } else { \
298 asm volatile("rep; ins" #bwl \
299 : "+D"(addr), "+c"(count) \
300 : "d"(port) : "memory"); \
301 } \
302 }
303
304 BUILDIO(b, u8)
305 BUILDIO(w, u16)
306 BUILDIO(l, u32)
307 #undef BUILDIO
308
309 #define inb_p inb_p
310 #define inw_p inw_p
311 #define inl_p inl_p
312 #define insb insb
313 #define insw insw
314 #define insl insl
315
316 #define outb_p outb_p
317 #define outw_p outw_p
318 #define outl_p outl_p
319 #define outsb outsb
320 #define outsw outsw
321 #define outsl outsl
322
323 extern void *xlate_dev_mem_ptr(phys_addr_t phys);
324 extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
325
326 #define xlate_dev_mem_ptr xlate_dev_mem_ptr
327 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
328
329 extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
330 enum page_cache_mode pcm);
331 extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
332 #define ioremap_wc ioremap_wc
333 extern void __iomem *ioremap_wt(resource_size_t offset, unsigned long size);
334 #define ioremap_wt ioremap_wt
335
336 extern bool is_early_ioremap_ptep(pte_t *ptep);
337
338 #define IO_SPACE_LIMIT 0xffff
339
340 #include <asm-generic/io.h>
341 #undef PCI_IOBASE
342
343 #ifdef CONFIG_MTRR
344 extern int __must_check arch_phys_wc_index(int handle);
345 #define arch_phys_wc_index arch_phys_wc_index
346
347 extern int __must_check arch_phys_wc_add(unsigned long base,
348 unsigned long size);
349 extern void arch_phys_wc_del(int handle);
350 #define arch_phys_wc_add arch_phys_wc_add
351 #endif
352
353 #ifdef CONFIG_X86_PAT
354 extern int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size);
355 extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size);
356 #define arch_io_reserve_memtype_wc arch_io_reserve_memtype_wc
357 #endif
358
359 #ifdef CONFIG_AMD_MEM_ENCRYPT
360 extern bool arch_memremap_can_ram_remap(resource_size_t offset,
361 unsigned long size,
362 unsigned long flags);
363 #define arch_memremap_can_ram_remap arch_memremap_can_ram_remap
364
365 extern bool phys_mem_access_encrypted(unsigned long phys_addr,
366 unsigned long size);
367 #else
phys_mem_access_encrypted(unsigned long phys_addr,unsigned long size)368 static inline bool phys_mem_access_encrypted(unsigned long phys_addr,
369 unsigned long size)
370 {
371 return true;
372 }
373 #endif
374
375 /**
376 * iosubmit_cmds512 - copy data to single MMIO location, in 512-bit units
377 * @dst: destination, in MMIO space (must be 512-bit aligned)
378 * @src: source
379 * @count: number of 512 bits quantities to submit
380 *
381 * Submit data from kernel space to MMIO space, in units of 512 bits at a
382 * time. Order of access is not guaranteed, nor is a memory barrier
383 * performed afterwards.
384 *
385 * Warning: Do not use this helper unless your driver has checked that the CPU
386 * instruction is supported on the platform.
387 */
iosubmit_cmds512(void __iomem * dst,const void * src,size_t count)388 static inline void iosubmit_cmds512(void __iomem *dst, const void *src,
389 size_t count)
390 {
391 const u8 *from = src;
392 const u8 *end = from + count * 64;
393
394 while (from < end) {
395 movdir64b_io(dst, from);
396 from += 64;
397 }
398 }
399
400 #endif /* _ASM_X86_IO_H */
401