1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder_alloc.c
3  *
4  * Android IPC Subsystem
5  *
6  * Copyright (C) 2007-2017 Google, Inc.
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include <linux/list.h>
12 #include <linux/sched/mm.h>
13 #include <linux/module.h>
14 #include <linux/rtmutex.h>
15 #include <linux/rbtree.h>
16 #include <linux/seq_file.h>
17 #include <linux/vmalloc.h>
18 #include <linux/slab.h>
19 #include <linux/sched.h>
20 #include <linux/list_lru.h>
21 #include <linux/ratelimit.h>
22 #include <asm/cacheflush.h>
23 #include <linux/uaccess.h>
24 #include <linux/highmem.h>
25 #include <linux/sizes.h>
26 #include "binder_alloc.h"
27 #include "binder_trace.h"
28 
29 struct list_lru binder_freelist;
30 
31 static DEFINE_MUTEX(binder_alloc_mmap_lock);
32 
33 enum {
34 	BINDER_DEBUG_USER_ERROR             = 1U << 0,
35 	BINDER_DEBUG_OPEN_CLOSE             = 1U << 1,
36 	BINDER_DEBUG_BUFFER_ALLOC           = 1U << 2,
37 	BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 3,
38 };
39 static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
40 
41 module_param_named(debug_mask, binder_alloc_debug_mask,
42 		   uint, 0644);
43 
44 #define binder_alloc_debug(mask, x...) \
45 	do { \
46 		if (binder_alloc_debug_mask & mask) \
47 			pr_info_ratelimited(x); \
48 	} while (0)
49 
binder_buffer_next(struct binder_buffer * buffer)50 static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
51 {
52 	return list_entry(buffer->entry.next, struct binder_buffer, entry);
53 }
54 
binder_buffer_prev(struct binder_buffer * buffer)55 static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
56 {
57 	return list_entry(buffer->entry.prev, struct binder_buffer, entry);
58 }
59 
binder_alloc_buffer_size(struct binder_alloc * alloc,struct binder_buffer * buffer)60 static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
61 				       struct binder_buffer *buffer)
62 {
63 	if (list_is_last(&buffer->entry, &alloc->buffers))
64 		return alloc->vm_start + alloc->buffer_size - buffer->user_data;
65 	return binder_buffer_next(buffer)->user_data - buffer->user_data;
66 }
67 
binder_insert_free_buffer(struct binder_alloc * alloc,struct binder_buffer * new_buffer)68 static void binder_insert_free_buffer(struct binder_alloc *alloc,
69 				      struct binder_buffer *new_buffer)
70 {
71 	struct rb_node **p = &alloc->free_buffers.rb_node;
72 	struct rb_node *parent = NULL;
73 	struct binder_buffer *buffer;
74 	size_t buffer_size;
75 	size_t new_buffer_size;
76 
77 	BUG_ON(!new_buffer->free);
78 
79 	new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
80 
81 	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
82 		     "%d: add free buffer, size %zd, at %pK\n",
83 		      alloc->pid, new_buffer_size, new_buffer);
84 
85 	while (*p) {
86 		parent = *p;
87 		buffer = rb_entry(parent, struct binder_buffer, rb_node);
88 		BUG_ON(!buffer->free);
89 
90 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
91 
92 		if (new_buffer_size < buffer_size)
93 			p = &parent->rb_left;
94 		else
95 			p = &parent->rb_right;
96 	}
97 	rb_link_node(&new_buffer->rb_node, parent, p);
98 	rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
99 }
100 
binder_insert_allocated_buffer_locked(struct binder_alloc * alloc,struct binder_buffer * new_buffer)101 static void binder_insert_allocated_buffer_locked(
102 		struct binder_alloc *alloc, struct binder_buffer *new_buffer)
103 {
104 	struct rb_node **p = &alloc->allocated_buffers.rb_node;
105 	struct rb_node *parent = NULL;
106 	struct binder_buffer *buffer;
107 
108 	BUG_ON(new_buffer->free);
109 
110 	while (*p) {
111 		parent = *p;
112 		buffer = rb_entry(parent, struct binder_buffer, rb_node);
113 		BUG_ON(buffer->free);
114 
115 		if (new_buffer->user_data < buffer->user_data)
116 			p = &parent->rb_left;
117 		else if (new_buffer->user_data > buffer->user_data)
118 			p = &parent->rb_right;
119 		else
120 			BUG();
121 	}
122 	rb_link_node(&new_buffer->rb_node, parent, p);
123 	rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
124 }
125 
binder_alloc_prepare_to_free_locked(struct binder_alloc * alloc,unsigned long user_ptr)126 static struct binder_buffer *binder_alloc_prepare_to_free_locked(
127 		struct binder_alloc *alloc,
128 		unsigned long user_ptr)
129 {
130 	struct rb_node *n = alloc->allocated_buffers.rb_node;
131 	struct binder_buffer *buffer;
132 
133 	while (n) {
134 		buffer = rb_entry(n, struct binder_buffer, rb_node);
135 		BUG_ON(buffer->free);
136 
137 		if (user_ptr < buffer->user_data) {
138 			n = n->rb_left;
139 		} else if (user_ptr > buffer->user_data) {
140 			n = n->rb_right;
141 		} else {
142 			/*
143 			 * Guard against user threads attempting to
144 			 * free the buffer when in use by kernel or
145 			 * after it's already been freed.
146 			 */
147 			if (!buffer->allow_user_free)
148 				return ERR_PTR(-EPERM);
149 			buffer->allow_user_free = 0;
150 			return buffer;
151 		}
152 	}
153 	return NULL;
154 }
155 
156 /**
157  * binder_alloc_prepare_to_free() - get buffer given user ptr
158  * @alloc:	binder_alloc for this proc
159  * @user_ptr:	User pointer to buffer data
160  *
161  * Validate userspace pointer to buffer data and return buffer corresponding to
162  * that user pointer. Search the rb tree for buffer that matches user data
163  * pointer.
164  *
165  * Return:	Pointer to buffer or NULL
166  */
binder_alloc_prepare_to_free(struct binder_alloc * alloc,unsigned long user_ptr)167 struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
168 						   unsigned long user_ptr)
169 {
170 	struct binder_buffer *buffer;
171 
172 	mutex_lock(&alloc->mutex);
173 	buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
174 	mutex_unlock(&alloc->mutex);
175 	return buffer;
176 }
177 
178 static inline void
binder_set_installed_page(struct binder_alloc * alloc,unsigned long index,struct page * page)179 binder_set_installed_page(struct binder_alloc *alloc,
180 			  unsigned long index,
181 			  struct page *page)
182 {
183 	/* Pairs with acquire in binder_get_installed_page() */
184 	smp_store_release(&alloc->pages[index], page);
185 }
186 
187 static inline struct page *
binder_get_installed_page(struct binder_alloc * alloc,unsigned long index)188 binder_get_installed_page(struct binder_alloc *alloc, unsigned long index)
189 {
190 	/* Pairs with release in binder_set_installed_page() */
191 	return smp_load_acquire(&alloc->pages[index]);
192 }
193 
binder_lru_freelist_add(struct binder_alloc * alloc,unsigned long start,unsigned long end)194 static void binder_lru_freelist_add(struct binder_alloc *alloc,
195 				    unsigned long start, unsigned long end)
196 {
197 	unsigned long page_addr;
198 	struct page *page;
199 
200 	trace_binder_update_page_range(alloc, false, start, end);
201 
202 	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
203 		size_t index;
204 		int ret;
205 
206 		index = (page_addr - alloc->vm_start) / PAGE_SIZE;
207 		page = binder_get_installed_page(alloc, index);
208 		if (!page)
209 			continue;
210 
211 		trace_binder_free_lru_start(alloc, index);
212 
213 		ret = list_lru_add(&binder_freelist,
214 				   page_to_lru(page),
215 				   page_to_nid(page),
216 				   NULL);
217 		WARN_ON(!ret);
218 
219 		trace_binder_free_lru_end(alloc, index);
220 	}
221 }
222 
223 static inline
binder_alloc_set_mapped(struct binder_alloc * alloc,bool state)224 void binder_alloc_set_mapped(struct binder_alloc *alloc, bool state)
225 {
226 	/* pairs with smp_load_acquire in binder_alloc_is_mapped() */
227 	smp_store_release(&alloc->mapped, state);
228 }
229 
binder_alloc_is_mapped(struct binder_alloc * alloc)230 static inline bool binder_alloc_is_mapped(struct binder_alloc *alloc)
231 {
232 	/* pairs with smp_store_release in binder_alloc_set_mapped() */
233 	return smp_load_acquire(&alloc->mapped);
234 }
235 
binder_page_lookup(struct binder_alloc * alloc,unsigned long addr)236 static struct page *binder_page_lookup(struct binder_alloc *alloc,
237 				       unsigned long addr)
238 {
239 	struct mm_struct *mm = alloc->mm;
240 	struct page *page;
241 	long npages = 0;
242 
243 	/*
244 	 * Find an existing page in the remote mm. If missing,
245 	 * don't attempt to fault-in just propagate an error.
246 	 */
247 	mmap_read_lock(mm);
248 	if (binder_alloc_is_mapped(alloc))
249 		npages = get_user_pages_remote(mm, addr, 1, FOLL_NOFAULT,
250 					       &page, NULL);
251 	mmap_read_unlock(mm);
252 
253 	return npages > 0 ? page : NULL;
254 }
255 
binder_page_insert(struct binder_alloc * alloc,unsigned long addr,struct page * page)256 static int binder_page_insert(struct binder_alloc *alloc,
257 			      unsigned long addr,
258 			      struct page *page)
259 {
260 	struct mm_struct *mm = alloc->mm;
261 	struct vm_area_struct *vma;
262 	int ret = -ESRCH;
263 
264 	/* attempt per-vma lock first */
265 	vma = lock_vma_under_rcu(mm, addr);
266 	if (vma) {
267 		if (binder_alloc_is_mapped(alloc))
268 			ret = vm_insert_page(vma, addr, page);
269 		vma_end_read(vma);
270 		return ret;
271 	}
272 
273 	/* fall back to mmap_lock */
274 	mmap_read_lock(mm);
275 	vma = vma_lookup(mm, addr);
276 	if (vma && binder_alloc_is_mapped(alloc))
277 		ret = vm_insert_page(vma, addr, page);
278 	mmap_read_unlock(mm);
279 
280 	return ret;
281 }
282 
binder_page_alloc(struct binder_alloc * alloc,unsigned long index)283 static struct page *binder_page_alloc(struct binder_alloc *alloc,
284 				      unsigned long index)
285 {
286 	struct binder_shrinker_mdata *mdata;
287 	struct page *page;
288 
289 	page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
290 	if (!page)
291 		return NULL;
292 
293 	/* allocate and install shrinker metadata under page->private */
294 	mdata = kzalloc(sizeof(*mdata), GFP_KERNEL);
295 	if (!mdata) {
296 		__free_page(page);
297 		return NULL;
298 	}
299 
300 	mdata->alloc = alloc;
301 	mdata->page_index = index;
302 	INIT_LIST_HEAD(&mdata->lru);
303 	set_page_private(page, (unsigned long)mdata);
304 
305 	return page;
306 }
307 
binder_free_page(struct page * page)308 static void binder_free_page(struct page *page)
309 {
310 	kfree((struct binder_shrinker_mdata *)page_private(page));
311 	__free_page(page);
312 }
313 
binder_install_single_page(struct binder_alloc * alloc,unsigned long index,unsigned long addr)314 static int binder_install_single_page(struct binder_alloc *alloc,
315 				      unsigned long index,
316 				      unsigned long addr)
317 {
318 	struct page *page;
319 	int ret;
320 
321 	if (!mmget_not_zero(alloc->mm))
322 		return -ESRCH;
323 
324 	page = binder_page_alloc(alloc, index);
325 	if (!page) {
326 		ret = -ENOMEM;
327 		goto out;
328 	}
329 
330 	ret = binder_page_insert(alloc, addr, page);
331 	switch (ret) {
332 	case -EBUSY:
333 		/*
334 		 * EBUSY is ok. Someone installed the pte first but the
335 		 * alloc->pages[index] has not been updated yet. Discard
336 		 * our page and look up the one already installed.
337 		 */
338 		ret = 0;
339 		binder_free_page(page);
340 		page = binder_page_lookup(alloc, addr);
341 		if (!page) {
342 			pr_err("%d: failed to find page at offset %lx\n",
343 			       alloc->pid, addr - alloc->vm_start);
344 			ret = -ESRCH;
345 			break;
346 		}
347 		fallthrough;
348 	case 0:
349 		/* Mark page installation complete and safe to use */
350 		binder_set_installed_page(alloc, index, page);
351 		break;
352 	default:
353 		binder_free_page(page);
354 		pr_err("%d: %s failed to insert page at offset %lx with %d\n",
355 		       alloc->pid, __func__, addr - alloc->vm_start, ret);
356 		break;
357 	}
358 out:
359 	mmput_async(alloc->mm);
360 	return ret;
361 }
362 
binder_install_buffer_pages(struct binder_alloc * alloc,struct binder_buffer * buffer,size_t size)363 static int binder_install_buffer_pages(struct binder_alloc *alloc,
364 				       struct binder_buffer *buffer,
365 				       size_t size)
366 {
367 	unsigned long start, final;
368 	unsigned long page_addr;
369 
370 	start = buffer->user_data & PAGE_MASK;
371 	final = PAGE_ALIGN(buffer->user_data + size);
372 
373 	for (page_addr = start; page_addr < final; page_addr += PAGE_SIZE) {
374 		unsigned long index;
375 		int ret;
376 
377 		index = (page_addr - alloc->vm_start) / PAGE_SIZE;
378 		if (binder_get_installed_page(alloc, index))
379 			continue;
380 
381 		trace_binder_alloc_page_start(alloc, index);
382 
383 		ret = binder_install_single_page(alloc, index, page_addr);
384 		if (ret)
385 			return ret;
386 
387 		trace_binder_alloc_page_end(alloc, index);
388 	}
389 
390 	return 0;
391 }
392 
393 /* The range of pages should exclude those shared with other buffers */
binder_lru_freelist_del(struct binder_alloc * alloc,unsigned long start,unsigned long end)394 static void binder_lru_freelist_del(struct binder_alloc *alloc,
395 				    unsigned long start, unsigned long end)
396 {
397 	unsigned long page_addr;
398 	struct page *page;
399 
400 	trace_binder_update_page_range(alloc, true, start, end);
401 
402 	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
403 		unsigned long index;
404 		bool on_lru;
405 
406 		index = (page_addr - alloc->vm_start) / PAGE_SIZE;
407 		page = binder_get_installed_page(alloc, index);
408 
409 		if (page) {
410 			trace_binder_alloc_lru_start(alloc, index);
411 
412 			on_lru = list_lru_del(&binder_freelist,
413 					      page_to_lru(page),
414 					      page_to_nid(page),
415 					      NULL);
416 			WARN_ON(!on_lru);
417 
418 			trace_binder_alloc_lru_end(alloc, index);
419 			continue;
420 		}
421 
422 		if (index + 1 > alloc->pages_high)
423 			alloc->pages_high = index + 1;
424 	}
425 }
426 
debug_no_space_locked(struct binder_alloc * alloc)427 static void debug_no_space_locked(struct binder_alloc *alloc)
428 {
429 	size_t largest_alloc_size = 0;
430 	struct binder_buffer *buffer;
431 	size_t allocated_buffers = 0;
432 	size_t largest_free_size = 0;
433 	size_t total_alloc_size = 0;
434 	size_t total_free_size = 0;
435 	size_t free_buffers = 0;
436 	size_t buffer_size;
437 	struct rb_node *n;
438 
439 	for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
440 		buffer = rb_entry(n, struct binder_buffer, rb_node);
441 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
442 		allocated_buffers++;
443 		total_alloc_size += buffer_size;
444 		if (buffer_size > largest_alloc_size)
445 			largest_alloc_size = buffer_size;
446 	}
447 
448 	for (n = rb_first(&alloc->free_buffers); n; n = rb_next(n)) {
449 		buffer = rb_entry(n, struct binder_buffer, rb_node);
450 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
451 		free_buffers++;
452 		total_free_size += buffer_size;
453 		if (buffer_size > largest_free_size)
454 			largest_free_size = buffer_size;
455 	}
456 
457 	binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
458 			   "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
459 			   total_alloc_size, allocated_buffers,
460 			   largest_alloc_size, total_free_size,
461 			   free_buffers, largest_free_size);
462 }
463 
debug_low_async_space_locked(struct binder_alloc * alloc)464 static bool debug_low_async_space_locked(struct binder_alloc *alloc)
465 {
466 	/*
467 	 * Find the amount and size of buffers allocated by the current caller;
468 	 * The idea is that once we cross the threshold, whoever is responsible
469 	 * for the low async space is likely to try to send another async txn,
470 	 * and at some point we'll catch them in the act. This is more efficient
471 	 * than keeping a map per pid.
472 	 */
473 	struct binder_buffer *buffer;
474 	size_t total_alloc_size = 0;
475 	int pid = current->tgid;
476 	size_t num_buffers = 0;
477 	struct rb_node *n;
478 
479 	/*
480 	 * Only start detecting spammers once we have less than 20% of async
481 	 * space left (which is less than 10% of total buffer size).
482 	 */
483 	if (alloc->free_async_space >= alloc->buffer_size / 10) {
484 		alloc->oneway_spam_detected = false;
485 		return false;
486 	}
487 
488 	for (n = rb_first(&alloc->allocated_buffers); n != NULL;
489 		 n = rb_next(n)) {
490 		buffer = rb_entry(n, struct binder_buffer, rb_node);
491 		if (buffer->pid != pid)
492 			continue;
493 		if (!buffer->async_transaction)
494 			continue;
495 		total_alloc_size += binder_alloc_buffer_size(alloc, buffer);
496 		num_buffers++;
497 	}
498 
499 	/*
500 	 * Warn if this pid has more than 50 transactions, or more than 50% of
501 	 * async space (which is 25% of total buffer size). Oneway spam is only
502 	 * detected when the threshold is exceeded.
503 	 */
504 	if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
505 		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
506 			     "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
507 			      alloc->pid, pid, num_buffers, total_alloc_size);
508 		if (!alloc->oneway_spam_detected) {
509 			alloc->oneway_spam_detected = true;
510 			return true;
511 		}
512 	}
513 	return false;
514 }
515 
516 /* Callers preallocate @new_buffer, it is freed by this function if unused */
binder_alloc_new_buf_locked(struct binder_alloc * alloc,struct binder_buffer * new_buffer,size_t size,int is_async)517 static struct binder_buffer *binder_alloc_new_buf_locked(
518 				struct binder_alloc *alloc,
519 				struct binder_buffer *new_buffer,
520 				size_t size,
521 				int is_async)
522 {
523 	struct rb_node *n = alloc->free_buffers.rb_node;
524 	struct rb_node *best_fit = NULL;
525 	struct binder_buffer *buffer;
526 	unsigned long next_used_page;
527 	unsigned long curr_last_page;
528 	size_t buffer_size;
529 
530 	if (is_async && alloc->free_async_space < size) {
531 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
532 			     "%d: binder_alloc_buf size %zd failed, no async space left\n",
533 			      alloc->pid, size);
534 		buffer = ERR_PTR(-ENOSPC);
535 		goto out;
536 	}
537 
538 	while (n) {
539 		buffer = rb_entry(n, struct binder_buffer, rb_node);
540 		BUG_ON(!buffer->free);
541 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
542 
543 		if (size < buffer_size) {
544 			best_fit = n;
545 			n = n->rb_left;
546 		} else if (size > buffer_size) {
547 			n = n->rb_right;
548 		} else {
549 			best_fit = n;
550 			break;
551 		}
552 	}
553 
554 	if (unlikely(!best_fit)) {
555 		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
556 				   "%d: binder_alloc_buf size %zd failed, no address space\n",
557 				   alloc->pid, size);
558 		debug_no_space_locked(alloc);
559 		buffer = ERR_PTR(-ENOSPC);
560 		goto out;
561 	}
562 
563 	if (buffer_size != size) {
564 		/* Found an oversized buffer and needs to be split */
565 		buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
566 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
567 
568 		WARN_ON(n || buffer_size == size);
569 		new_buffer->user_data = buffer->user_data + size;
570 		list_add(&new_buffer->entry, &buffer->entry);
571 		new_buffer->free = 1;
572 		binder_insert_free_buffer(alloc, new_buffer);
573 		new_buffer = NULL;
574 	}
575 
576 	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
577 		     "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
578 		      alloc->pid, size, buffer, buffer_size);
579 
580 	/*
581 	 * Now we remove the pages from the freelist. A clever calculation
582 	 * with buffer_size determines if the last page is shared with an
583 	 * adjacent in-use buffer. In such case, the page has been already
584 	 * removed from the freelist so we trim our range short.
585 	 */
586 	next_used_page = (buffer->user_data + buffer_size) & PAGE_MASK;
587 	curr_last_page = PAGE_ALIGN(buffer->user_data + size);
588 	binder_lru_freelist_del(alloc, PAGE_ALIGN(buffer->user_data),
589 				min(next_used_page, curr_last_page));
590 
591 	rb_erase(&buffer->rb_node, &alloc->free_buffers);
592 	buffer->free = 0;
593 	buffer->allow_user_free = 0;
594 	binder_insert_allocated_buffer_locked(alloc, buffer);
595 	buffer->async_transaction = is_async;
596 	buffer->oneway_spam_suspect = false;
597 	if (is_async) {
598 		alloc->free_async_space -= size;
599 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
600 			     "%d: binder_alloc_buf size %zd async free %zd\n",
601 			      alloc->pid, size, alloc->free_async_space);
602 		if (debug_low_async_space_locked(alloc))
603 			buffer->oneway_spam_suspect = true;
604 	}
605 
606 out:
607 	/* Discard possibly unused new_buffer */
608 	kfree(new_buffer);
609 	return buffer;
610 }
611 
612 /* Calculate the sanitized total size, returns 0 for invalid request */
sanitized_size(size_t data_size,size_t offsets_size,size_t extra_buffers_size)613 static inline size_t sanitized_size(size_t data_size,
614 				    size_t offsets_size,
615 				    size_t extra_buffers_size)
616 {
617 	size_t total, tmp;
618 
619 	/* Align to pointer size and check for overflows */
620 	tmp = ALIGN(data_size, sizeof(void *)) +
621 		ALIGN(offsets_size, sizeof(void *));
622 	if (tmp < data_size || tmp < offsets_size)
623 		return 0;
624 	total = tmp + ALIGN(extra_buffers_size, sizeof(void *));
625 	if (total < tmp || total < extra_buffers_size)
626 		return 0;
627 
628 	/* Pad 0-sized buffers so they get a unique address */
629 	total = max(total, sizeof(void *));
630 
631 	return total;
632 }
633 
634 /**
635  * binder_alloc_new_buf() - Allocate a new binder buffer
636  * @alloc:              binder_alloc for this proc
637  * @data_size:          size of user data buffer
638  * @offsets_size:       user specified buffer offset
639  * @extra_buffers_size: size of extra space for meta-data (eg, security context)
640  * @is_async:           buffer for async transaction
641  *
642  * Allocate a new buffer given the requested sizes. Returns
643  * the kernel version of the buffer pointer. The size allocated
644  * is the sum of the three given sizes (each rounded up to
645  * pointer-sized boundary)
646  *
647  * Return:	The allocated buffer or %ERR_PTR(-errno) if error
648  */
binder_alloc_new_buf(struct binder_alloc * alloc,size_t data_size,size_t offsets_size,size_t extra_buffers_size,int is_async)649 struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
650 					   size_t data_size,
651 					   size_t offsets_size,
652 					   size_t extra_buffers_size,
653 					   int is_async)
654 {
655 	struct binder_buffer *buffer, *next;
656 	size_t size;
657 	int ret;
658 
659 	/* Check binder_alloc is fully initialized */
660 	if (!binder_alloc_is_mapped(alloc)) {
661 		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
662 				   "%d: binder_alloc_buf, no vma\n",
663 				   alloc->pid);
664 		return ERR_PTR(-ESRCH);
665 	}
666 
667 	size = sanitized_size(data_size, offsets_size, extra_buffers_size);
668 	if (unlikely(!size)) {
669 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
670 				   "%d: got transaction with invalid size %zd-%zd-%zd\n",
671 				   alloc->pid, data_size, offsets_size,
672 				   extra_buffers_size);
673 		return ERR_PTR(-EINVAL);
674 	}
675 
676 	/* Preallocate the next buffer */
677 	next = kzalloc(sizeof(*next), GFP_KERNEL);
678 	if (!next)
679 		return ERR_PTR(-ENOMEM);
680 
681 	mutex_lock(&alloc->mutex);
682 	buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async);
683 	if (IS_ERR(buffer)) {
684 		mutex_unlock(&alloc->mutex);
685 		goto out;
686 	}
687 
688 	buffer->data_size = data_size;
689 	buffer->offsets_size = offsets_size;
690 	buffer->extra_buffers_size = extra_buffers_size;
691 	buffer->pid = current->tgid;
692 	mutex_unlock(&alloc->mutex);
693 
694 	ret = binder_install_buffer_pages(alloc, buffer, size);
695 	if (ret) {
696 		binder_alloc_free_buf(alloc, buffer);
697 		buffer = ERR_PTR(ret);
698 	}
699 out:
700 	return buffer;
701 }
702 
buffer_start_page(struct binder_buffer * buffer)703 static unsigned long buffer_start_page(struct binder_buffer *buffer)
704 {
705 	return buffer->user_data & PAGE_MASK;
706 }
707 
prev_buffer_end_page(struct binder_buffer * buffer)708 static unsigned long prev_buffer_end_page(struct binder_buffer *buffer)
709 {
710 	return (buffer->user_data - 1) & PAGE_MASK;
711 }
712 
binder_delete_free_buffer(struct binder_alloc * alloc,struct binder_buffer * buffer)713 static void binder_delete_free_buffer(struct binder_alloc *alloc,
714 				      struct binder_buffer *buffer)
715 {
716 	struct binder_buffer *prev, *next;
717 
718 	if (PAGE_ALIGNED(buffer->user_data))
719 		goto skip_freelist;
720 
721 	BUG_ON(alloc->buffers.next == &buffer->entry);
722 	prev = binder_buffer_prev(buffer);
723 	BUG_ON(!prev->free);
724 	if (prev_buffer_end_page(prev) == buffer_start_page(buffer))
725 		goto skip_freelist;
726 
727 	if (!list_is_last(&buffer->entry, &alloc->buffers)) {
728 		next = binder_buffer_next(buffer);
729 		if (buffer_start_page(next) == buffer_start_page(buffer))
730 			goto skip_freelist;
731 	}
732 
733 	binder_lru_freelist_add(alloc, buffer_start_page(buffer),
734 				buffer_start_page(buffer) + PAGE_SIZE);
735 skip_freelist:
736 	list_del(&buffer->entry);
737 	kfree(buffer);
738 }
739 
binder_free_buf_locked(struct binder_alloc * alloc,struct binder_buffer * buffer)740 static void binder_free_buf_locked(struct binder_alloc *alloc,
741 				   struct binder_buffer *buffer)
742 {
743 	size_t size, buffer_size;
744 
745 	buffer_size = binder_alloc_buffer_size(alloc, buffer);
746 
747 	size = ALIGN(buffer->data_size, sizeof(void *)) +
748 		ALIGN(buffer->offsets_size, sizeof(void *)) +
749 		ALIGN(buffer->extra_buffers_size, sizeof(void *));
750 
751 	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
752 		     "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
753 		      alloc->pid, buffer, size, buffer_size);
754 
755 	BUG_ON(buffer->free);
756 	BUG_ON(size > buffer_size);
757 	BUG_ON(buffer->transaction != NULL);
758 	BUG_ON(buffer->user_data < alloc->vm_start);
759 	BUG_ON(buffer->user_data > alloc->vm_start + alloc->buffer_size);
760 
761 	if (buffer->async_transaction) {
762 		alloc->free_async_space += buffer_size;
763 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
764 			     "%d: binder_free_buf size %zd async free %zd\n",
765 			      alloc->pid, size, alloc->free_async_space);
766 	}
767 
768 	binder_lru_freelist_add(alloc, PAGE_ALIGN(buffer->user_data),
769 				(buffer->user_data + buffer_size) & PAGE_MASK);
770 
771 	rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
772 	buffer->free = 1;
773 	if (!list_is_last(&buffer->entry, &alloc->buffers)) {
774 		struct binder_buffer *next = binder_buffer_next(buffer);
775 
776 		if (next->free) {
777 			rb_erase(&next->rb_node, &alloc->free_buffers);
778 			binder_delete_free_buffer(alloc, next);
779 		}
780 	}
781 	if (alloc->buffers.next != &buffer->entry) {
782 		struct binder_buffer *prev = binder_buffer_prev(buffer);
783 
784 		if (prev->free) {
785 			binder_delete_free_buffer(alloc, buffer);
786 			rb_erase(&prev->rb_node, &alloc->free_buffers);
787 			buffer = prev;
788 		}
789 	}
790 	binder_insert_free_buffer(alloc, buffer);
791 }
792 
793 /**
794  * binder_alloc_get_page() - get kernel pointer for given buffer offset
795  * @alloc: binder_alloc for this proc
796  * @buffer: binder buffer to be accessed
797  * @buffer_offset: offset into @buffer data
798  * @pgoffp: address to copy final page offset to
799  *
800  * Lookup the struct page corresponding to the address
801  * at @buffer_offset into @buffer->user_data. If @pgoffp is not
802  * NULL, the byte-offset into the page is written there.
803  *
804  * The caller is responsible to ensure that the offset points
805  * to a valid address within the @buffer and that @buffer is
806  * not freeable by the user. Since it can't be freed, we are
807  * guaranteed that the corresponding elements of @alloc->pages[]
808  * cannot change.
809  *
810  * Return: struct page
811  */
binder_alloc_get_page(struct binder_alloc * alloc,struct binder_buffer * buffer,binder_size_t buffer_offset,pgoff_t * pgoffp)812 static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
813 					  struct binder_buffer *buffer,
814 					  binder_size_t buffer_offset,
815 					  pgoff_t *pgoffp)
816 {
817 	binder_size_t buffer_space_offset = buffer_offset +
818 		(buffer->user_data - alloc->vm_start);
819 	pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
820 	size_t index = buffer_space_offset >> PAGE_SHIFT;
821 
822 	*pgoffp = pgoff;
823 
824 	return alloc->pages[index];
825 }
826 
827 /**
828  * binder_alloc_clear_buf() - zero out buffer
829  * @alloc: binder_alloc for this proc
830  * @buffer: binder buffer to be cleared
831  *
832  * memset the given buffer to 0
833  */
binder_alloc_clear_buf(struct binder_alloc * alloc,struct binder_buffer * buffer)834 static void binder_alloc_clear_buf(struct binder_alloc *alloc,
835 				   struct binder_buffer *buffer)
836 {
837 	size_t bytes = binder_alloc_buffer_size(alloc, buffer);
838 	binder_size_t buffer_offset = 0;
839 
840 	while (bytes) {
841 		unsigned long size;
842 		struct page *page;
843 		pgoff_t pgoff;
844 
845 		page = binder_alloc_get_page(alloc, buffer,
846 					     buffer_offset, &pgoff);
847 		size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
848 		memset_page(page, pgoff, 0, size);
849 		bytes -= size;
850 		buffer_offset += size;
851 	}
852 }
853 
854 /**
855  * binder_alloc_free_buf() - free a binder buffer
856  * @alloc:	binder_alloc for this proc
857  * @buffer:	kernel pointer to buffer
858  *
859  * Free the buffer allocated via binder_alloc_new_buf()
860  */
binder_alloc_free_buf(struct binder_alloc * alloc,struct binder_buffer * buffer)861 void binder_alloc_free_buf(struct binder_alloc *alloc,
862 			    struct binder_buffer *buffer)
863 {
864 	/*
865 	 * We could eliminate the call to binder_alloc_clear_buf()
866 	 * from binder_alloc_deferred_release() by moving this to
867 	 * binder_free_buf_locked(). However, that could
868 	 * increase contention for the alloc mutex if clear_on_free
869 	 * is used frequently for large buffers. The mutex is not
870 	 * needed for correctness here.
871 	 */
872 	if (buffer->clear_on_free) {
873 		binder_alloc_clear_buf(alloc, buffer);
874 		buffer->clear_on_free = false;
875 	}
876 	mutex_lock(&alloc->mutex);
877 	binder_free_buf_locked(alloc, buffer);
878 	mutex_unlock(&alloc->mutex);
879 }
880 
881 /**
882  * binder_alloc_mmap_handler() - map virtual address space for proc
883  * @alloc:	alloc structure for this proc
884  * @vma:	vma passed to mmap()
885  *
886  * Called by binder_mmap() to initialize the space specified in
887  * vma for allocating binder buffers
888  *
889  * Return:
890  *      0 = success
891  *      -EBUSY = address space already mapped
892  *      -ENOMEM = failed to map memory to given address space
893  */
binder_alloc_mmap_handler(struct binder_alloc * alloc,struct vm_area_struct * vma)894 int binder_alloc_mmap_handler(struct binder_alloc *alloc,
895 			      struct vm_area_struct *vma)
896 {
897 	struct binder_buffer *buffer;
898 	const char *failure_string;
899 	int ret;
900 
901 	if (unlikely(vma->vm_mm != alloc->mm)) {
902 		ret = -EINVAL;
903 		failure_string = "invalid vma->vm_mm";
904 		goto err_invalid_mm;
905 	}
906 
907 	mutex_lock(&binder_alloc_mmap_lock);
908 	if (alloc->buffer_size) {
909 		ret = -EBUSY;
910 		failure_string = "already mapped";
911 		goto err_already_mapped;
912 	}
913 	alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
914 				   SZ_4M);
915 	mutex_unlock(&binder_alloc_mmap_lock);
916 
917 	alloc->vm_start = vma->vm_start;
918 
919 	alloc->pages = kvcalloc(alloc->buffer_size / PAGE_SIZE,
920 				sizeof(alloc->pages[0]),
921 				GFP_KERNEL);
922 	if (!alloc->pages) {
923 		ret = -ENOMEM;
924 		failure_string = "alloc page array";
925 		goto err_alloc_pages_failed;
926 	}
927 
928 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
929 	if (!buffer) {
930 		ret = -ENOMEM;
931 		failure_string = "alloc buffer struct";
932 		goto err_alloc_buf_struct_failed;
933 	}
934 
935 	buffer->user_data = alloc->vm_start;
936 	list_add(&buffer->entry, &alloc->buffers);
937 	buffer->free = 1;
938 	binder_insert_free_buffer(alloc, buffer);
939 	alloc->free_async_space = alloc->buffer_size / 2;
940 
941 	/* Signal binder_alloc is fully initialized */
942 	binder_alloc_set_mapped(alloc, true);
943 
944 	return 0;
945 
946 err_alloc_buf_struct_failed:
947 	kvfree(alloc->pages);
948 	alloc->pages = NULL;
949 err_alloc_pages_failed:
950 	alloc->vm_start = 0;
951 	mutex_lock(&binder_alloc_mmap_lock);
952 	alloc->buffer_size = 0;
953 err_already_mapped:
954 	mutex_unlock(&binder_alloc_mmap_lock);
955 err_invalid_mm:
956 	binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
957 			   "%s: %d %lx-%lx %s failed %d\n", __func__,
958 			   alloc->pid, vma->vm_start, vma->vm_end,
959 			   failure_string, ret);
960 	return ret;
961 }
962 
963 
binder_alloc_deferred_release(struct binder_alloc * alloc)964 void binder_alloc_deferred_release(struct binder_alloc *alloc)
965 {
966 	struct rb_node *n;
967 	int buffers, page_count;
968 	struct binder_buffer *buffer;
969 
970 	buffers = 0;
971 	mutex_lock(&alloc->mutex);
972 	BUG_ON(alloc->mapped);
973 
974 	while ((n = rb_first(&alloc->allocated_buffers))) {
975 		buffer = rb_entry(n, struct binder_buffer, rb_node);
976 
977 		/* Transaction should already have been freed */
978 		BUG_ON(buffer->transaction);
979 
980 		if (buffer->clear_on_free) {
981 			binder_alloc_clear_buf(alloc, buffer);
982 			buffer->clear_on_free = false;
983 		}
984 		binder_free_buf_locked(alloc, buffer);
985 		buffers++;
986 	}
987 
988 	while (!list_empty(&alloc->buffers)) {
989 		buffer = list_first_entry(&alloc->buffers,
990 					  struct binder_buffer, entry);
991 		WARN_ON(!buffer->free);
992 
993 		list_del(&buffer->entry);
994 		WARN_ON_ONCE(!list_empty(&alloc->buffers));
995 		kfree(buffer);
996 	}
997 
998 	page_count = 0;
999 	if (alloc->pages) {
1000 		int i;
1001 
1002 		for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
1003 			struct page *page;
1004 			bool on_lru;
1005 
1006 			page = binder_get_installed_page(alloc, i);
1007 			if (!page)
1008 				continue;
1009 
1010 			on_lru = list_lru_del(&binder_freelist,
1011 					      page_to_lru(page),
1012 					      page_to_nid(page),
1013 					      NULL);
1014 			binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
1015 				     "%s: %d: page %d %s\n",
1016 				     __func__, alloc->pid, i,
1017 				     on_lru ? "on lru" : "active");
1018 			binder_free_page(page);
1019 			page_count++;
1020 		}
1021 	}
1022 	mutex_unlock(&alloc->mutex);
1023 	kvfree(alloc->pages);
1024 	if (alloc->mm)
1025 		mmdrop(alloc->mm);
1026 
1027 	binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
1028 		     "%s: %d buffers %d, pages %d\n",
1029 		     __func__, alloc->pid, buffers, page_count);
1030 }
1031 
1032 /**
1033  * binder_alloc_print_allocated() - print buffer info
1034  * @m:     seq_file for output via seq_printf()
1035  * @alloc: binder_alloc for this proc
1036  *
1037  * Prints information about every buffer associated with
1038  * the binder_alloc state to the given seq_file
1039  */
binder_alloc_print_allocated(struct seq_file * m,struct binder_alloc * alloc)1040 void binder_alloc_print_allocated(struct seq_file *m,
1041 				  struct binder_alloc *alloc)
1042 {
1043 	struct binder_buffer *buffer;
1044 	struct rb_node *n;
1045 
1046 	mutex_lock(&alloc->mutex);
1047 	for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
1048 		buffer = rb_entry(n, struct binder_buffer, rb_node);
1049 		seq_printf(m, "  buffer %d: %lx size %zd:%zd:%zd %s\n",
1050 			   buffer->debug_id,
1051 			   buffer->user_data - alloc->vm_start,
1052 			   buffer->data_size, buffer->offsets_size,
1053 			   buffer->extra_buffers_size,
1054 			   buffer->transaction ? "active" : "delivered");
1055 	}
1056 	mutex_unlock(&alloc->mutex);
1057 }
1058 
1059 /**
1060  * binder_alloc_print_pages() - print page usage
1061  * @m:     seq_file for output via seq_printf()
1062  * @alloc: binder_alloc for this proc
1063  */
binder_alloc_print_pages(struct seq_file * m,struct binder_alloc * alloc)1064 void binder_alloc_print_pages(struct seq_file *m,
1065 			      struct binder_alloc *alloc)
1066 {
1067 	struct page *page;
1068 	int i;
1069 	int active = 0;
1070 	int lru = 0;
1071 	int free = 0;
1072 
1073 	mutex_lock(&alloc->mutex);
1074 	/*
1075 	 * Make sure the binder_alloc is fully initialized, otherwise we might
1076 	 * read inconsistent state.
1077 	 */
1078 	if (binder_alloc_is_mapped(alloc)) {
1079 		for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
1080 			page = binder_get_installed_page(alloc, i);
1081 			if (!page)
1082 				free++;
1083 			else if (list_empty(page_to_lru(page)))
1084 				active++;
1085 			else
1086 				lru++;
1087 		}
1088 	}
1089 	mutex_unlock(&alloc->mutex);
1090 	seq_printf(m, "  pages: %d:%d:%d\n", active, lru, free);
1091 	seq_printf(m, "  pages high watermark: %zu\n", alloc->pages_high);
1092 }
1093 
1094 /**
1095  * binder_alloc_get_allocated_count() - return count of buffers
1096  * @alloc: binder_alloc for this proc
1097  *
1098  * Return: count of allocated buffers
1099  */
binder_alloc_get_allocated_count(struct binder_alloc * alloc)1100 int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
1101 {
1102 	struct rb_node *n;
1103 	int count = 0;
1104 
1105 	mutex_lock(&alloc->mutex);
1106 	for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
1107 		count++;
1108 	mutex_unlock(&alloc->mutex);
1109 	return count;
1110 }
1111 
1112 
1113 /**
1114  * binder_alloc_vma_close() - invalidate address space
1115  * @alloc: binder_alloc for this proc
1116  *
1117  * Called from binder_vma_close() when releasing address space.
1118  * Clears alloc->mapped to prevent new incoming transactions from
1119  * allocating more buffers.
1120  */
binder_alloc_vma_close(struct binder_alloc * alloc)1121 void binder_alloc_vma_close(struct binder_alloc *alloc)
1122 {
1123 	binder_alloc_set_mapped(alloc, false);
1124 }
1125 
1126 /**
1127  * binder_alloc_free_page() - shrinker callback to free pages
1128  * @item:   item to free
1129  * @lru:    list_lru instance of the item
1130  * @cb_arg: callback argument
1131  *
1132  * Called from list_lru_walk() in binder_shrink_scan() to free
1133  * up pages when the system is under memory pressure.
1134  */
binder_alloc_free_page(struct list_head * item,struct list_lru_one * lru,void * cb_arg)1135 enum lru_status binder_alloc_free_page(struct list_head *item,
1136 				       struct list_lru_one *lru,
1137 				       void *cb_arg)
1138 	__must_hold(&lru->lock)
1139 {
1140 	struct binder_shrinker_mdata *mdata = container_of(item, typeof(*mdata), lru);
1141 	struct binder_alloc *alloc = mdata->alloc;
1142 	struct mm_struct *mm = alloc->mm;
1143 	struct vm_area_struct *vma;
1144 	struct page *page_to_free;
1145 	unsigned long page_addr;
1146 	int mm_locked = 0;
1147 	size_t index;
1148 
1149 	if (!mmget_not_zero(mm))
1150 		goto err_mmget;
1151 
1152 	index = mdata->page_index;
1153 	page_addr = alloc->vm_start + index * PAGE_SIZE;
1154 
1155 	/* attempt per-vma lock first */
1156 	vma = lock_vma_under_rcu(mm, page_addr);
1157 	if (!vma) {
1158 		/* fall back to mmap_lock */
1159 		if (!mmap_read_trylock(mm))
1160 			goto err_mmap_read_lock_failed;
1161 		mm_locked = 1;
1162 		vma = vma_lookup(mm, page_addr);
1163 	}
1164 
1165 	if (!mutex_trylock(&alloc->mutex))
1166 		goto err_get_alloc_mutex_failed;
1167 
1168 	/*
1169 	 * Since a binder_alloc can only be mapped once, we ensure
1170 	 * the vma corresponds to this mapping by checking whether
1171 	 * the binder_alloc is still mapped.
1172 	 */
1173 	if (vma && !binder_alloc_is_mapped(alloc))
1174 		goto err_invalid_vma;
1175 
1176 	trace_binder_unmap_kernel_start(alloc, index);
1177 
1178 	page_to_free = alloc->pages[index];
1179 	binder_set_installed_page(alloc, index, NULL);
1180 
1181 	trace_binder_unmap_kernel_end(alloc, index);
1182 
1183 	list_lru_isolate(lru, item);
1184 	spin_unlock(&lru->lock);
1185 
1186 	if (vma) {
1187 		trace_binder_unmap_user_start(alloc, index);
1188 
1189 		zap_page_range_single(vma, page_addr, PAGE_SIZE, NULL);
1190 
1191 		trace_binder_unmap_user_end(alloc, index);
1192 	}
1193 
1194 	mutex_unlock(&alloc->mutex);
1195 	if (mm_locked)
1196 		mmap_read_unlock(mm);
1197 	else
1198 		vma_end_read(vma);
1199 	mmput_async(mm);
1200 	binder_free_page(page_to_free);
1201 
1202 	return LRU_REMOVED_RETRY;
1203 
1204 err_invalid_vma:
1205 	mutex_unlock(&alloc->mutex);
1206 err_get_alloc_mutex_failed:
1207 	if (mm_locked)
1208 		mmap_read_unlock(mm);
1209 	else
1210 		vma_end_read(vma);
1211 err_mmap_read_lock_failed:
1212 	mmput_async(mm);
1213 err_mmget:
1214 	return LRU_SKIP;
1215 }
1216 
1217 static unsigned long
binder_shrink_count(struct shrinker * shrink,struct shrink_control * sc)1218 binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1219 {
1220 	return list_lru_count(&binder_freelist);
1221 }
1222 
1223 static unsigned long
binder_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)1224 binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1225 {
1226 	return list_lru_walk(&binder_freelist, binder_alloc_free_page,
1227 			    NULL, sc->nr_to_scan);
1228 }
1229 
1230 static struct shrinker *binder_shrinker;
1231 
1232 /**
1233  * binder_alloc_init() - called by binder_open() for per-proc initialization
1234  * @alloc: binder_alloc for this proc
1235  *
1236  * Called from binder_open() to initialize binder_alloc fields for
1237  * new binder proc
1238  */
binder_alloc_init(struct binder_alloc * alloc)1239 void binder_alloc_init(struct binder_alloc *alloc)
1240 {
1241 	alloc->pid = current->group_leader->pid;
1242 	alloc->mm = current->mm;
1243 	mmgrab(alloc->mm);
1244 	mutex_init(&alloc->mutex);
1245 	INIT_LIST_HEAD(&alloc->buffers);
1246 }
1247 
binder_alloc_shrinker_init(void)1248 int binder_alloc_shrinker_init(void)
1249 {
1250 	int ret;
1251 
1252 	ret = list_lru_init(&binder_freelist);
1253 	if (ret)
1254 		return ret;
1255 
1256 	binder_shrinker = shrinker_alloc(0, "android-binder");
1257 	if (!binder_shrinker) {
1258 		list_lru_destroy(&binder_freelist);
1259 		return -ENOMEM;
1260 	}
1261 
1262 	binder_shrinker->count_objects = binder_shrink_count;
1263 	binder_shrinker->scan_objects = binder_shrink_scan;
1264 
1265 	shrinker_register(binder_shrinker);
1266 
1267 	return 0;
1268 }
1269 
binder_alloc_shrinker_exit(void)1270 void binder_alloc_shrinker_exit(void)
1271 {
1272 	shrinker_free(binder_shrinker);
1273 	list_lru_destroy(&binder_freelist);
1274 }
1275 
1276 /**
1277  * check_buffer() - verify that buffer/offset is safe to access
1278  * @alloc: binder_alloc for this proc
1279  * @buffer: binder buffer to be accessed
1280  * @offset: offset into @buffer data
1281  * @bytes: bytes to access from offset
1282  *
1283  * Check that the @offset/@bytes are within the size of the given
1284  * @buffer and that the buffer is currently active and not freeable.
1285  * Offsets must also be multiples of sizeof(u32). The kernel is
1286  * allowed to touch the buffer in two cases:
1287  *
1288  * 1) when the buffer is being created:
1289  *     (buffer->free == 0 && buffer->allow_user_free == 0)
1290  * 2) when the buffer is being torn down:
1291  *     (buffer->free == 0 && buffer->transaction == NULL).
1292  *
1293  * Return: true if the buffer is safe to access
1294  */
check_buffer(struct binder_alloc * alloc,struct binder_buffer * buffer,binder_size_t offset,size_t bytes)1295 static inline bool check_buffer(struct binder_alloc *alloc,
1296 				struct binder_buffer *buffer,
1297 				binder_size_t offset, size_t bytes)
1298 {
1299 	size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
1300 
1301 	return buffer_size >= bytes &&
1302 		offset <= buffer_size - bytes &&
1303 		IS_ALIGNED(offset, sizeof(u32)) &&
1304 		!buffer->free &&
1305 		(!buffer->allow_user_free || !buffer->transaction);
1306 }
1307 
1308 /**
1309  * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
1310  * @alloc: binder_alloc for this proc
1311  * @buffer: binder buffer to be accessed
1312  * @buffer_offset: offset into @buffer data
1313  * @from: userspace pointer to source buffer
1314  * @bytes: bytes to copy
1315  *
1316  * Copy bytes from source userspace to target buffer.
1317  *
1318  * Return: bytes remaining to be copied
1319  */
1320 unsigned long
binder_alloc_copy_user_to_buffer(struct binder_alloc * alloc,struct binder_buffer * buffer,binder_size_t buffer_offset,const void __user * from,size_t bytes)1321 binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
1322 				 struct binder_buffer *buffer,
1323 				 binder_size_t buffer_offset,
1324 				 const void __user *from,
1325 				 size_t bytes)
1326 {
1327 	if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1328 		return bytes;
1329 
1330 	while (bytes) {
1331 		unsigned long size;
1332 		unsigned long ret;
1333 		struct page *page;
1334 		pgoff_t pgoff;
1335 		void *kptr;
1336 
1337 		page = binder_alloc_get_page(alloc, buffer,
1338 					     buffer_offset, &pgoff);
1339 		size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1340 		kptr = kmap_local_page(page) + pgoff;
1341 		ret = copy_from_user(kptr, from, size);
1342 		kunmap_local(kptr);
1343 		if (ret)
1344 			return bytes - size + ret;
1345 		bytes -= size;
1346 		from += size;
1347 		buffer_offset += size;
1348 	}
1349 	return 0;
1350 }
1351 
binder_alloc_do_buffer_copy(struct binder_alloc * alloc,bool to_buffer,struct binder_buffer * buffer,binder_size_t buffer_offset,void * ptr,size_t bytes)1352 static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
1353 				       bool to_buffer,
1354 				       struct binder_buffer *buffer,
1355 				       binder_size_t buffer_offset,
1356 				       void *ptr,
1357 				       size_t bytes)
1358 {
1359 	/* All copies must be 32-bit aligned and 32-bit size */
1360 	if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1361 		return -EINVAL;
1362 
1363 	while (bytes) {
1364 		unsigned long size;
1365 		struct page *page;
1366 		pgoff_t pgoff;
1367 
1368 		page = binder_alloc_get_page(alloc, buffer,
1369 					     buffer_offset, &pgoff);
1370 		size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1371 		if (to_buffer)
1372 			memcpy_to_page(page, pgoff, ptr, size);
1373 		else
1374 			memcpy_from_page(ptr, page, pgoff, size);
1375 		bytes -= size;
1376 		pgoff = 0;
1377 		ptr = ptr + size;
1378 		buffer_offset += size;
1379 	}
1380 	return 0;
1381 }
1382 
binder_alloc_copy_to_buffer(struct binder_alloc * alloc,struct binder_buffer * buffer,binder_size_t buffer_offset,void * src,size_t bytes)1383 int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
1384 				struct binder_buffer *buffer,
1385 				binder_size_t buffer_offset,
1386 				void *src,
1387 				size_t bytes)
1388 {
1389 	return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
1390 					   src, bytes);
1391 }
1392 
binder_alloc_copy_from_buffer(struct binder_alloc * alloc,void * dest,struct binder_buffer * buffer,binder_size_t buffer_offset,size_t bytes)1393 int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
1394 				  void *dest,
1395 				  struct binder_buffer *buffer,
1396 				  binder_size_t buffer_offset,
1397 				  size_t bytes)
1398 {
1399 	return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
1400 					   dest, bytes);
1401 }
1402