1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 */
5 #ifndef _ASM_IO_H
6 #define _ASM_IO_H
7
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10
11 #include <asm/addrspace.h>
12 #include <asm/cpu.h>
13 #include <asm/page.h>
14 #include <asm/pgtable-bits.h>
15 #include <asm/string.h>
16
17 extern void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size);
18 extern void __init early_iounmap(void __iomem *addr, unsigned long size);
19
20 #define early_memremap early_ioremap
21 #define early_memunmap early_iounmap
22
23 #ifdef CONFIG_ARCH_IOREMAP
24
ioremap_prot(phys_addr_t offset,unsigned long size,unsigned long prot_val)25 static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
26 unsigned long prot_val)
27 {
28 switch (prot_val & _CACHE_MASK) {
29 case _CACHE_CC:
30 return (void __iomem *)(unsigned long)(CACHE_BASE + offset);
31 case _CACHE_SUC:
32 return (void __iomem *)(unsigned long)(UNCACHE_BASE + offset);
33 case _CACHE_WUC:
34 return (void __iomem *)(unsigned long)(WRITECOMBINE_BASE + offset);
35 default:
36 return NULL;
37 }
38 }
39
40 #define ioremap(offset, size) \
41 ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL_SUC))
42
43 #define iounmap(addr) ((void)(addr))
44
45 #endif
46
47 /*
48 * On LoongArch, ioremap() has two variants, ioremap_wc() and ioremap_cache().
49 * They map bus memory into CPU space, the mapped memory is marked uncachable
50 * (_CACHE_SUC), uncachable but accelerated by write-combine (_CACHE_WUC) and
51 * cachable (_CACHE_CC) respectively for CPU access.
52 *
53 * @offset: bus address of the memory
54 * @size: size of the resource to map
55 */
56 #define ioremap_wc(offset, size) \
57 ioremap_prot((offset), (size), \
58 pgprot_val(wc_enabled ? PAGE_KERNEL_WUC : PAGE_KERNEL_SUC))
59
60 #define ioremap_cache(offset, size) \
61 ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL))
62
63 #define mmiowb() wmb()
64
65 #define __io_aw() mmiowb()
66
67 #ifdef CONFIG_KFENCE
68 #define virt_to_phys(kaddr) \
69 ({ \
70 (likely((unsigned long)kaddr < vm_map_base)) ? __pa((unsigned long)kaddr) : \
71 page_to_phys(tlb_virt_to_page((unsigned long)kaddr)) + offset_in_page((unsigned long)kaddr);\
72 })
73
74 #define phys_to_virt(paddr) \
75 ({ \
76 extern char *__kfence_pool; \
77 (unlikely(__kfence_pool == NULL)) ? __va((unsigned long)paddr) : \
78 page_address(phys_to_page((unsigned long)paddr)) + offset_in_page((unsigned long)paddr);\
79 })
80 #endif
81
82 #include <asm-generic/io.h>
83
84 #define ARCH_HAS_VALID_PHYS_ADDR_RANGE
85 extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
86 extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
87
88 #endif /* _ASM_IO_H */
89