1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 */
5 #include <linux/export.h>
6 #include <linux/hugetlb.h>
7 #include <linux/io.h>
8 #include <linux/kfence.h>
9 #include <linux/memblock.h>
10 #include <linux/mm.h>
11 #include <linux/mman.h>
12
13 #define SHM_ALIGN_MASK (SHMLBA - 1)
14
15 #define COLOUR_ALIGN(addr, pgoff) \
16 ((((addr) + SHM_ALIGN_MASK) & ~SHM_ALIGN_MASK) \
17 + (((pgoff) << PAGE_SHIFT) & SHM_ALIGN_MASK))
18
19 enum mmap_allocation_direction {UP, DOWN};
20
arch_get_unmapped_area_common(struct file * filp,unsigned long addr0,unsigned long len,unsigned long pgoff,unsigned long flags,enum mmap_allocation_direction dir)21 static unsigned long arch_get_unmapped_area_common(struct file *filp,
22 unsigned long addr0, unsigned long len, unsigned long pgoff,
23 unsigned long flags, enum mmap_allocation_direction dir)
24 {
25 struct mm_struct *mm = current->mm;
26 struct vm_area_struct *vma;
27 unsigned long addr = addr0;
28 int do_color_align;
29 struct vm_unmapped_area_info info = {};
30
31 if (unlikely(len > TASK_SIZE))
32 return -ENOMEM;
33
34 if (flags & MAP_FIXED) {
35 /* Even MAP_FIXED mappings must reside within TASK_SIZE */
36 if (TASK_SIZE - len < addr)
37 return -EINVAL;
38
39 /*
40 * We do not accept a shared mapping if it would violate
41 * cache aliasing constraints.
42 */
43 if ((flags & MAP_SHARED) &&
44 ((addr - (pgoff << PAGE_SHIFT)) & SHM_ALIGN_MASK))
45 return -EINVAL;
46 return addr;
47 }
48
49 do_color_align = 0;
50 if (filp || (flags & MAP_SHARED))
51 do_color_align = 1;
52
53 /* requesting a specific address */
54 if (addr) {
55 if (do_color_align)
56 addr = COLOUR_ALIGN(addr, pgoff);
57 else
58 addr = PAGE_ALIGN(addr);
59
60 vma = find_vma(mm, addr);
61 if (TASK_SIZE - len >= addr &&
62 (!vma || addr + len <= vm_start_gap(vma)))
63 return addr;
64 }
65
66 info.length = len;
67 info.align_offset = pgoff << PAGE_SHIFT;
68 if (filp && is_file_hugepages(filp))
69 info.align_mask = huge_page_mask_align(filp);
70 else
71 info.align_mask = do_color_align ? (PAGE_MASK & SHM_ALIGN_MASK) : 0;
72
73 if (dir == DOWN) {
74 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
75 info.low_limit = PAGE_SIZE;
76 info.high_limit = mm->mmap_base;
77 addr = vm_unmapped_area(&info);
78
79 if (!(addr & ~PAGE_MASK))
80 return addr;
81
82 /*
83 * A failed mmap() very likely causes application failure,
84 * so fall back to the bottom-up function here. This scenario
85 * can happen with large stack limits and large mmap()
86 * allocations.
87 */
88 }
89
90 info.low_limit = mm->mmap_base;
91 info.high_limit = TASK_SIZE;
92 return vm_unmapped_area(&info);
93 }
94
arch_get_unmapped_area(struct file * filp,unsigned long addr0,unsigned long len,unsigned long pgoff,unsigned long flags,vm_flags_t vm_flags)95 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
96 unsigned long len, unsigned long pgoff, unsigned long flags,
97 vm_flags_t vm_flags)
98 {
99 return arch_get_unmapped_area_common(filp,
100 addr0, len, pgoff, flags, UP);
101 }
102
103 /*
104 * There is no need to export this but sched.h declares the function as
105 * extern so making it static here results in an error.
106 */
arch_get_unmapped_area_topdown(struct file * filp,unsigned long addr0,unsigned long len,unsigned long pgoff,unsigned long flags,vm_flags_t vm_flags)107 unsigned long arch_get_unmapped_area_topdown(struct file *filp,
108 unsigned long addr0, unsigned long len, unsigned long pgoff,
109 unsigned long flags, vm_flags_t vm_flags)
110 {
111 return arch_get_unmapped_area_common(filp,
112 addr0, len, pgoff, flags, DOWN);
113 }
114
__virt_addr_valid(volatile void * kaddr)115 int __virt_addr_valid(volatile void *kaddr)
116 {
117 unsigned long vaddr = (unsigned long)kaddr;
118
119 if (is_kfence_address((void *)kaddr))
120 return 1;
121
122 if ((vaddr < PAGE_OFFSET) || (vaddr >= vm_map_base))
123 return 0;
124
125 return pfn_valid(PFN_DOWN(PHYSADDR(kaddr)));
126 }
127 EXPORT_SYMBOL_GPL(__virt_addr_valid);
128
129 /*
130 * You really shouldn't be using read() or write() on /dev/mem. This might go
131 * away in the future.
132 */
valid_phys_addr_range(phys_addr_t addr,size_t size)133 int valid_phys_addr_range(phys_addr_t addr, size_t size)
134 {
135 /*
136 * Check whether addr is covered by a memory region without the
137 * MEMBLOCK_NOMAP attribute, and whether that region covers the
138 * entire range. In theory, this could lead to false negatives
139 * if the range is covered by distinct but adjacent memory regions
140 * that only differ in other attributes. However, few of such
141 * attributes have been defined, and it is debatable whether it
142 * follows that /dev/mem read() calls should be able traverse
143 * such boundaries.
144 */
145 return memblock_is_region_memory(addr, size) && memblock_is_map_memory(addr);
146 }
147
148 /*
149 * Do not allow /dev/mem mappings beyond the supported physical range.
150 */
valid_mmap_phys_addr_range(unsigned long pfn,size_t size)151 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
152 {
153 return !(((pfn << PAGE_SHIFT) + size) & ~(GENMASK_ULL(cpu_pabits, 0)));
154 }
155