1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Support for Medifield PNW Camera Imaging ISP subsystem.
4  *
5  * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
6  *
7  * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
8  */
9 /*
10  * ISP MMU management wrap code
11  */
12 #include <linux/kernel.h>
13 #include <linux/types.h>
14 #include <linux/gfp.h>
15 #include <linux/mm.h>		/* for GFP_ATOMIC */
16 #include <linux/slab.h>		/* for kmalloc */
17 #include <linux/list.h>
18 #include <linux/io.h>
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/sizes.h>
24 
25 #ifdef CONFIG_X86
26 #include <asm/set_memory.h>
27 #endif
28 
29 #include "atomisp_internal.h"
30 #include "mmu/isp_mmu.h"
31 
32 /*
33  * 64-bit x86 processor physical address layout:
34  * 0		- 0x7fffffff		DDR RAM	(2GB)
35  * 0x80000000	- 0xffffffff		MMIO	(2GB)
36  * 0x100000000	- 0x3fffffffffff	DDR RAM	(64TB)
37  * So if the system has more than 2GB DDR memory, the lower 2GB occupies the
38  * physical address 0 - 0x7fffffff and the rest will start from 0x100000000.
39  * We have to make sure memory is allocated from the lower 2GB for devices
40  * that are only 32-bit capable(e.g. the ISP MMU).
41  *
42  * For any confusion, contact [email protected].
43  */
44 #define NR_PAGES_2GB	(SZ_2G / PAGE_SIZE)
45 
46 static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt,
47 			 unsigned int end_isp_virt);
48 
atomisp_get_pte(phys_addr_t pt,unsigned int idx)49 static unsigned int atomisp_get_pte(phys_addr_t pt, unsigned int idx)
50 {
51 	unsigned int *pt_virt = phys_to_virt(pt);
52 
53 	return *(pt_virt + idx);
54 }
55 
atomisp_set_pte(phys_addr_t pt,unsigned int idx,unsigned int pte)56 static void atomisp_set_pte(phys_addr_t pt,
57 			    unsigned int idx, unsigned int pte)
58 {
59 	unsigned int *pt_virt = phys_to_virt(pt);
60 	*(pt_virt + idx) = pte;
61 }
62 
isp_pt_phys_to_virt(phys_addr_t phys)63 static void *isp_pt_phys_to_virt(phys_addr_t phys)
64 {
65 	return phys_to_virt(phys);
66 }
67 
isp_pte_to_pgaddr(struct isp_mmu * mmu,unsigned int pte)68 static phys_addr_t isp_pte_to_pgaddr(struct isp_mmu *mmu,
69 				     unsigned int pte)
70 {
71 	return mmu->driver->pte_to_phys(mmu, pte);
72 }
73 
isp_pgaddr_to_pte_valid(struct isp_mmu * mmu,phys_addr_t phys)74 static unsigned int isp_pgaddr_to_pte_valid(struct isp_mmu *mmu,
75 	phys_addr_t phys)
76 {
77 	unsigned int pte = mmu->driver->phys_to_pte(mmu, phys);
78 
79 	return (unsigned int)(pte | ISP_PTE_VALID_MASK(mmu));
80 }
81 
82 /*
83  * allocate a uncacheable page table.
84  * return physical address.
85  */
alloc_page_table(struct isp_mmu * mmu)86 static phys_addr_t alloc_page_table(struct isp_mmu *mmu)
87 {
88 	int i;
89 	phys_addr_t page;
90 	void *virt;
91 
92 	virt = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32);
93 
94 	if (!virt)
95 		return (phys_addr_t)NULL_PAGE;
96 
97 	/*
98 	 * we need a uncacheable page table.
99 	 */
100 #ifdef	CONFIG_X86
101 	set_memory_uc((unsigned long)virt, 1);
102 #endif
103 
104 	page = virt_to_phys(virt);
105 
106 	for (i = 0; i < 1024; i++) {
107 		/* NEED CHECK */
108 		atomisp_set_pte(page, i, mmu->driver->null_pte);
109 	}
110 
111 	return page;
112 }
113 
free_page_table(struct isp_mmu * mmu,phys_addr_t page)114 static void free_page_table(struct isp_mmu *mmu, phys_addr_t page)
115 {
116 	void *virt;
117 
118 	page &= ISP_PAGE_MASK;
119 	/*
120 	 * reset the page to write back before free
121 	 */
122 	virt = phys_to_virt(page);
123 
124 #ifdef	CONFIG_X86
125 	set_memory_wb((unsigned long)virt, 1);
126 #endif
127 
128 	free_page((unsigned long)virt);
129 }
130 
mmu_remap_error(struct isp_mmu * mmu,phys_addr_t l1_pt,unsigned int l1_idx,phys_addr_t l2_pt,unsigned int l2_idx,unsigned int isp_virt,phys_addr_t old_phys,phys_addr_t new_phys)131 static void mmu_remap_error(struct isp_mmu *mmu,
132 			    phys_addr_t l1_pt, unsigned int l1_idx,
133 			    phys_addr_t l2_pt, unsigned int l2_idx,
134 			    unsigned int isp_virt, phys_addr_t old_phys,
135 			    phys_addr_t new_phys)
136 {
137 	dev_err(atomisp_dev, "address remap:\n\n"
138 		"\tL1 PT: virt = %p, phys = 0x%llx, idx = %d\n"
139 		"\tL2 PT: virt = %p, phys = 0x%llx, idx = %d\n"
140 		"\told: isp_virt = 0x%x, phys = 0x%llx\n"
141 		"\tnew: isp_virt = 0x%x, phys = 0x%llx\n",
142 		isp_pt_phys_to_virt(l1_pt),
143 		(u64)l1_pt, l1_idx,
144 		isp_pt_phys_to_virt(l2_pt),
145 		(u64)l2_pt, l2_idx, isp_virt,
146 		(u64)old_phys, isp_virt,
147 		(u64)new_phys);
148 }
149 
mmu_unmap_l2_pte_error(struct isp_mmu * mmu,phys_addr_t l1_pt,unsigned int l1_idx,phys_addr_t l2_pt,unsigned int l2_idx,unsigned int isp_virt,unsigned int pte)150 static void mmu_unmap_l2_pte_error(struct isp_mmu *mmu,
151 				   phys_addr_t l1_pt, unsigned int l1_idx,
152 				   phys_addr_t l2_pt, unsigned int l2_idx,
153 				   unsigned int isp_virt, unsigned int pte)
154 {
155 	dev_err(atomisp_dev, "unmap invalid L2 pte:\n\n"
156 		"\tL1 PT: virt = %p, phys = 0x%llx, idx = %d\n"
157 		"\tL2 PT: virt = %p, phys = 0x%llx, idx = %d\n"
158 		"\tisp_virt = 0x%x, pte(page phys) = 0x%x\n",
159 		isp_pt_phys_to_virt(l1_pt),
160 		(u64)l1_pt, l1_idx,
161 		isp_pt_phys_to_virt(l2_pt),
162 		(u64)l2_pt, l2_idx, isp_virt,
163 		pte);
164 }
165 
mmu_unmap_l1_pte_error(struct isp_mmu * mmu,phys_addr_t l1_pt,unsigned int l1_idx,unsigned int isp_virt,unsigned int pte)166 static void mmu_unmap_l1_pte_error(struct isp_mmu *mmu,
167 				   phys_addr_t l1_pt, unsigned int l1_idx,
168 				   unsigned int isp_virt, unsigned int pte)
169 {
170 	dev_err(atomisp_dev, "unmap invalid L1 pte (L2 PT):\n\n"
171 		"\tL1 PT: virt = %p, phys = 0x%llx, idx = %d\n"
172 		"\tisp_virt = 0x%x, l1_pte(L2 PT) = 0x%x\n",
173 		isp_pt_phys_to_virt(l1_pt),
174 		(u64)l1_pt, l1_idx, (unsigned int)isp_virt,
175 		pte);
176 }
177 
mmu_unmap_l1_pt_error(struct isp_mmu * mmu,unsigned int pte)178 static void mmu_unmap_l1_pt_error(struct isp_mmu *mmu, unsigned int pte)
179 {
180 	dev_err(atomisp_dev, "unmap invalid L1PT:\n\n"
181 		"L1PT = 0x%x\n", (unsigned int)pte);
182 }
183 
184 /*
185  * Update L2 page table according to isp virtual address and page physical
186  * address
187  */
mmu_l2_map(struct isp_mmu * mmu,phys_addr_t l1_pt,unsigned int l1_idx,phys_addr_t l2_pt,unsigned int start,unsigned int end,phys_addr_t phys)188 static int mmu_l2_map(struct isp_mmu *mmu, phys_addr_t l1_pt,
189 		      unsigned int l1_idx, phys_addr_t l2_pt,
190 		      unsigned int start, unsigned int end, phys_addr_t phys)
191 {
192 	unsigned int ptr;
193 	unsigned int idx;
194 	unsigned int pte;
195 
196 	l2_pt &= ISP_PAGE_MASK;
197 
198 	start = start & ISP_PAGE_MASK;
199 	end = ISP_PAGE_ALIGN(end);
200 	phys &= ISP_PAGE_MASK;
201 
202 	ptr = start;
203 	do {
204 		idx = ISP_PTR_TO_L2_IDX(ptr);
205 
206 		pte = atomisp_get_pte(l2_pt, idx);
207 
208 		if (ISP_PTE_VALID(mmu, pte)) {
209 			mmu_remap_error(mmu, l1_pt, l1_idx,
210 					l2_pt, idx, ptr, pte, phys);
211 
212 			/* free all mapped pages */
213 			free_mmu_map(mmu, start, ptr);
214 
215 			return -EINVAL;
216 		}
217 
218 		pte = isp_pgaddr_to_pte_valid(mmu, phys);
219 
220 		atomisp_set_pte(l2_pt, idx, pte);
221 		mmu->l2_pgt_refcount[l1_idx]++;
222 		ptr += (1U << ISP_L2PT_OFFSET);
223 		phys += (1U << ISP_L2PT_OFFSET);
224 	} while (ptr < end && idx < ISP_L2PT_PTES - 1);
225 
226 	return 0;
227 }
228 
229 /*
230  * Update L1 page table according to isp virtual address and page physical
231  * address
232  */
mmu_l1_map(struct isp_mmu * mmu,phys_addr_t l1_pt,unsigned int start,unsigned int end,phys_addr_t phys)233 static int mmu_l1_map(struct isp_mmu *mmu, phys_addr_t l1_pt,
234 		      unsigned int start, unsigned int end,
235 		      phys_addr_t phys)
236 {
237 	phys_addr_t l2_pt;
238 	unsigned int ptr, l1_aligned;
239 	unsigned int idx;
240 	unsigned int l2_pte;
241 	int ret;
242 
243 	l1_pt &= ISP_PAGE_MASK;
244 
245 	start = start & ISP_PAGE_MASK;
246 	end = ISP_PAGE_ALIGN(end);
247 	phys &= ISP_PAGE_MASK;
248 
249 	ptr = start;
250 	do {
251 		idx = ISP_PTR_TO_L1_IDX(ptr);
252 
253 		l2_pte = atomisp_get_pte(l1_pt, idx);
254 
255 		if (!ISP_PTE_VALID(mmu, l2_pte)) {
256 			l2_pt = alloc_page_table(mmu);
257 			if (l2_pt == NULL_PAGE) {
258 				dev_err(atomisp_dev,
259 					"alloc page table fail.\n");
260 
261 				/* free all mapped pages */
262 				free_mmu_map(mmu, start, ptr);
263 
264 				return -ENOMEM;
265 			}
266 
267 			l2_pte = isp_pgaddr_to_pte_valid(mmu, l2_pt);
268 
269 			atomisp_set_pte(l1_pt, idx, l2_pte);
270 			mmu->l2_pgt_refcount[idx] = 0;
271 		}
272 
273 		l2_pt = isp_pte_to_pgaddr(mmu, l2_pte);
274 
275 		l1_aligned = (ptr & ISP_PAGE_MASK) + (1U << ISP_L1PT_OFFSET);
276 
277 		if (l1_aligned < end) {
278 			ret = mmu_l2_map(mmu, l1_pt, idx,
279 					 l2_pt, ptr, l1_aligned, phys);
280 			phys += (l1_aligned - ptr);
281 			ptr = l1_aligned;
282 		} else {
283 			ret = mmu_l2_map(mmu, l1_pt, idx,
284 					 l2_pt, ptr, end, phys);
285 			phys += (end - ptr);
286 			ptr = end;
287 		}
288 
289 		if (ret) {
290 			dev_err(atomisp_dev, "setup mapping in L2PT fail.\n");
291 
292 			/* free all mapped pages */
293 			free_mmu_map(mmu, start, ptr);
294 
295 			return -EINVAL;
296 		}
297 	} while (ptr < end && idx < ISP_L1PT_PTES);
298 
299 	return 0;
300 }
301 
302 /*
303  * Update page table according to isp virtual address and page physical
304  * address
305  */
mmu_map(struct isp_mmu * mmu,unsigned int isp_virt,phys_addr_t phys,unsigned int pgnr)306 static int mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
307 		   phys_addr_t phys, unsigned int pgnr)
308 {
309 	unsigned int start, end;
310 	phys_addr_t l1_pt;
311 	int ret;
312 
313 	mutex_lock(&mmu->pt_mutex);
314 	if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
315 		/*
316 		 * allocate 1 new page for L1 page table
317 		 */
318 		l1_pt = alloc_page_table(mmu);
319 		if (l1_pt == NULL_PAGE) {
320 			dev_err(atomisp_dev, "alloc page table fail.\n");
321 			mutex_unlock(&mmu->pt_mutex);
322 			return -ENOMEM;
323 		}
324 
325 		/*
326 		 * setup L1 page table physical addr to MMU
327 		 */
328 		mmu->base_address = l1_pt;
329 		mmu->l1_pte = isp_pgaddr_to_pte_valid(mmu, l1_pt);
330 		memset(mmu->l2_pgt_refcount, 0, sizeof(int) * ISP_L1PT_PTES);
331 	}
332 
333 	l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
334 
335 	start = (isp_virt) & ISP_PAGE_MASK;
336 	end = start + (pgnr << ISP_PAGE_OFFSET);
337 	phys &= ISP_PAGE_MASK;
338 
339 	ret = mmu_l1_map(mmu, l1_pt, start, end, phys);
340 
341 	if (ret)
342 		dev_err(atomisp_dev, "setup mapping in L1PT fail.\n");
343 
344 	mutex_unlock(&mmu->pt_mutex);
345 	return ret;
346 }
347 
348 /*
349  * Free L2 page table according to isp virtual address and page physical
350  * address
351  */
mmu_l2_unmap(struct isp_mmu * mmu,phys_addr_t l1_pt,unsigned int l1_idx,phys_addr_t l2_pt,unsigned int start,unsigned int end)352 static void mmu_l2_unmap(struct isp_mmu *mmu, phys_addr_t l1_pt,
353 			 unsigned int l1_idx, phys_addr_t l2_pt,
354 			 unsigned int start, unsigned int end)
355 {
356 	unsigned int ptr;
357 	unsigned int idx;
358 	unsigned int pte;
359 
360 	l2_pt &= ISP_PAGE_MASK;
361 
362 	start = start & ISP_PAGE_MASK;
363 	end = ISP_PAGE_ALIGN(end);
364 
365 	ptr = start;
366 	do {
367 		idx = ISP_PTR_TO_L2_IDX(ptr);
368 
369 		pte = atomisp_get_pte(l2_pt, idx);
370 
371 		if (!ISP_PTE_VALID(mmu, pte))
372 			mmu_unmap_l2_pte_error(mmu, l1_pt, l1_idx,
373 					       l2_pt, idx, ptr, pte);
374 
375 		atomisp_set_pte(l2_pt, idx, mmu->driver->null_pte);
376 		mmu->l2_pgt_refcount[l1_idx]--;
377 		ptr += (1U << ISP_L2PT_OFFSET);
378 	} while (ptr < end && idx < ISP_L2PT_PTES - 1);
379 
380 	if (mmu->l2_pgt_refcount[l1_idx] == 0) {
381 		free_page_table(mmu, l2_pt);
382 		atomisp_set_pte(l1_pt, l1_idx, mmu->driver->null_pte);
383 	}
384 }
385 
386 /*
387  * Free L1 page table according to isp virtual address and page physical
388  * address
389  */
mmu_l1_unmap(struct isp_mmu * mmu,phys_addr_t l1_pt,unsigned int start,unsigned int end)390 static void mmu_l1_unmap(struct isp_mmu *mmu, phys_addr_t l1_pt,
391 			 unsigned int start, unsigned int end)
392 {
393 	phys_addr_t l2_pt;
394 	unsigned int ptr, l1_aligned;
395 	unsigned int idx;
396 	unsigned int l2_pte;
397 
398 	l1_pt &= ISP_PAGE_MASK;
399 
400 	start = start & ISP_PAGE_MASK;
401 	end = ISP_PAGE_ALIGN(end);
402 
403 	ptr = start;
404 	do {
405 		idx = ISP_PTR_TO_L1_IDX(ptr);
406 
407 		l2_pte = atomisp_get_pte(l1_pt, idx);
408 
409 		if (!ISP_PTE_VALID(mmu, l2_pte)) {
410 			mmu_unmap_l1_pte_error(mmu, l1_pt, idx, ptr, l2_pte);
411 			continue;
412 		}
413 
414 		l2_pt = isp_pte_to_pgaddr(mmu, l2_pte);
415 
416 		l1_aligned = (ptr & ISP_PAGE_MASK) + (1U << ISP_L1PT_OFFSET);
417 
418 		if (l1_aligned < end) {
419 			mmu_l2_unmap(mmu, l1_pt, idx, l2_pt, ptr, l1_aligned);
420 			ptr = l1_aligned;
421 		} else {
422 			mmu_l2_unmap(mmu, l1_pt, idx, l2_pt, ptr, end);
423 			ptr = end;
424 		}
425 		/*
426 		 * use the same L2 page next time, so we don't
427 		 * need to invalidate and free this PT.
428 		 */
429 		/*      atomisp_set_pte(l1_pt, idx, NULL_PTE); */
430 	} while (ptr < end && idx < ISP_L1PT_PTES);
431 }
432 
433 /*
434  * Free page table according to isp virtual address and page physical
435  * address
436  */
mmu_unmap(struct isp_mmu * mmu,unsigned int isp_virt,unsigned int pgnr)437 static void mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
438 		      unsigned int pgnr)
439 {
440 	unsigned int start, end;
441 	phys_addr_t l1_pt;
442 
443 	mutex_lock(&mmu->pt_mutex);
444 	if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
445 		mmu_unmap_l1_pt_error(mmu, mmu->l1_pte);
446 		mutex_unlock(&mmu->pt_mutex);
447 		return;
448 	}
449 
450 	l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
451 
452 	start = (isp_virt) & ISP_PAGE_MASK;
453 	end = start + (pgnr << ISP_PAGE_OFFSET);
454 
455 	mmu_l1_unmap(mmu, l1_pt, start, end);
456 	mutex_unlock(&mmu->pt_mutex);
457 }
458 
459 /*
460  * Free page tables according to isp start virtual address and end virtual
461  * address.
462  */
free_mmu_map(struct isp_mmu * mmu,unsigned int start_isp_virt,unsigned int end_isp_virt)463 static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt,
464 			 unsigned int end_isp_virt)
465 {
466 	unsigned int pgnr;
467 	unsigned int start, end;
468 
469 	start = (start_isp_virt) & ISP_PAGE_MASK;
470 	end = (end_isp_virt) & ISP_PAGE_MASK;
471 	pgnr = (end - start) >> ISP_PAGE_OFFSET;
472 	mmu_unmap(mmu, start, pgnr);
473 }
474 
isp_mmu_map(struct isp_mmu * mmu,unsigned int isp_virt,phys_addr_t phys,unsigned int pgnr)475 int isp_mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
476 		phys_addr_t phys, unsigned int pgnr)
477 {
478 	return mmu_map(mmu, isp_virt, phys, pgnr);
479 }
480 
isp_mmu_unmap(struct isp_mmu * mmu,unsigned int isp_virt,unsigned int pgnr)481 void isp_mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
482 		   unsigned int pgnr)
483 {
484 	mmu_unmap(mmu, isp_virt, pgnr);
485 }
486 
isp_mmu_flush_tlb_range_default(struct isp_mmu * mmu,unsigned int start,unsigned int size)487 static void isp_mmu_flush_tlb_range_default(struct isp_mmu *mmu,
488 	unsigned int start,
489 	unsigned int size)
490 {
491 	isp_mmu_flush_tlb(mmu);
492 }
493 
494 /*MMU init for internal structure*/
isp_mmu_init(struct isp_mmu * mmu,struct isp_mmu_client * driver)495 int isp_mmu_init(struct isp_mmu *mmu, struct isp_mmu_client *driver)
496 {
497 	if (!mmu)		/* error */
498 		return -EINVAL;
499 	if (!driver)		/* error */
500 		return -EINVAL;
501 
502 	if (!driver->name)
503 		dev_warn(atomisp_dev, "NULL name for MMU driver...\n");
504 
505 	mmu->driver = driver;
506 
507 	if (!driver->tlb_flush_all) {
508 		dev_err(atomisp_dev, "tlb_flush_all operation not provided.\n");
509 		return -EINVAL;
510 	}
511 
512 	if (!driver->tlb_flush_range)
513 		driver->tlb_flush_range = isp_mmu_flush_tlb_range_default;
514 
515 	if (!driver->pte_valid_mask) {
516 		dev_err(atomisp_dev, "PTE_MASK is missing from mmu driver\n");
517 		return -EINVAL;
518 	}
519 
520 	mmu->l1_pte = driver->null_pte;
521 
522 	mutex_init(&mmu->pt_mutex);
523 
524 	return 0;
525 }
526 
527 /*Free L1 and L2 page table*/
isp_mmu_exit(struct isp_mmu * mmu)528 void isp_mmu_exit(struct isp_mmu *mmu)
529 {
530 	unsigned int idx;
531 	unsigned int pte;
532 	phys_addr_t l1_pt, l2_pt;
533 
534 	if (!mmu)
535 		return;
536 
537 	if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
538 		dev_warn(atomisp_dev, "invalid L1PT: pte = 0x%x\n",
539 			 (unsigned int)mmu->l1_pte);
540 		return;
541 	}
542 
543 	l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
544 
545 	for (idx = 0; idx < ISP_L1PT_PTES; idx++) {
546 		pte = atomisp_get_pte(l1_pt, idx);
547 
548 		if (ISP_PTE_VALID(mmu, pte)) {
549 			l2_pt = isp_pte_to_pgaddr(mmu, pte);
550 
551 			free_page_table(mmu, l2_pt);
552 		}
553 	}
554 
555 	free_page_table(mmu, l1_pt);
556 }
557