Lines Matching full:pages

35 static inline void sanity_check_pinned_pages(struct page **pages,  in sanity_check_pinned_pages()  argument
42 * We only pin anonymous pages if they are exclusive. Once pinned, we in sanity_check_pinned_pages()
46 * We'd like to verify that our pinned anonymous pages are still mapped in sanity_check_pinned_pages()
53 for (; npages; npages--, pages++) { in sanity_check_pinned_pages()
54 struct page *page = *pages; in sanity_check_pinned_pages()
186 * Pages that were pinned via pin_user_pages*() must be released via either
188 * that such pages can be separately tracked and uniquely handled. In
269 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
270 * @pages: array of pages to be maybe marked dirty, and definitely released.
271 * @npages: number of pages in the @pages array.
272 * @make_dirty: whether to mark the pages dirty
277 * For each page in the @pages array, make that page (or its head page, if a
279 * listed as clean. In any case, releases all pages using unpin_user_page(),
290 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, in unpin_user_pages_dirty_lock() argument
298 unpin_user_pages(pages, npages); in unpin_user_pages_dirty_lock()
302 sanity_check_pinned_pages(pages, npages); in unpin_user_pages_dirty_lock()
304 folio = gup_folio_next(pages, npages, i, &nr); in unpin_user_pages_dirty_lock()
340 * @npages: number of consecutive pages to release.
341 * @make_dirty: whether to mark the pages dirty
343 * "gup-pinned page range" refers to a range of pages that has had one of the
347 * its head pages, if a compound page) dirty, if @make_dirty is true, and if the
375 static void gup_fast_unpin_user_pages(struct page **pages, unsigned long npages) in gup_fast_unpin_user_pages() argument
383 * fork() and some anonymous pages might now actually be shared -- in gup_fast_unpin_user_pages()
387 folio = gup_folio_next(pages, npages, i, &nr); in gup_fast_unpin_user_pages()
393 * unpin_user_pages() - release an array of gup-pinned pages.
394 * @pages: array of pages to be marked dirty and released.
395 * @npages: number of pages in the @pages array.
397 * For each page in the @pages array, release the page using unpin_user_page().
401 void unpin_user_pages(struct page **pages, unsigned long npages) in unpin_user_pages() argument
408 * If this WARN_ON() fires, then the system *might* be leaking pages (by in unpin_user_pages()
415 sanity_check_pinned_pages(pages, npages); in unpin_user_pages()
417 if (!pages[i]) { in unpin_user_pages()
421 folio = gup_folio_next(pages, npages, i, &nr); in unpin_user_pages()
428 * unpin_user_folio() - release pages of a folio
430 * @npages: number of pages of same folio
490 struct page **pages) in record_subpages() argument
497 pages[nr] = nth_page(start_page, nr); in record_subpages()
633 * When core dumping, we don't want to allocate unnecessary pages or in no_page_table()
688 * device mapped pages can only be returned if the caller in follow_huge_pud()
867 * We only care about anon pages in can_follow_write_pte() and don't in follow_page_pte()
878 * Only return device mapping pages in the FOLL_GET or FOLL_PIN in follow_page_pte()
889 /* Avoid special (like zero) pages in core dumps */ in follow_page_pte()
1060 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
1109 /* user gate pages are read-only */ in get_gate_page()
1306 * Anon pages in shared mappings are surprising: now in check_vma_flags()
1371 * __get_user_pages() - pin user pages in memory
1374 * @nr_pages: number of pages from start to pin
1376 * @pages: array that receives pointers to the pages pinned.
1378 * only intends to ensure the pages are faulted in.
1381 * Returns either number of pages pinned (which may be less than the
1385 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1386 * -- If nr_pages is >0, and some pages were pinned, returns the number of
1387 * pages pinned. Again, this may be less than nr_pages.
1390 * The caller is responsible for releasing returned @pages, via put_page().
1426 unsigned int gup_flags, struct page **pages, in __get_user_pages() argument
1438 VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); in __get_user_pages()
1466 pages ? &page : NULL); in __get_user_pages()
1483 * If we have a pending SIGKILL, don't keep faulting pages and in __get_user_pages()
1512 * struct page. If the caller expects **pages to be in __get_user_pages()
1516 if (pages) { in __get_user_pages()
1529 if (pages) { in __get_user_pages()
1541 * pages. in __get_user_pages()
1564 pages[i + j] = subpage; in __get_user_pages()
1720 struct page **pages, in __get_user_pages_locked() argument
1748 * is to set FOLL_GET if the caller wants pages[] filled in (but has in __get_user_pages_locked()
1752 * FOLL_PIN always expects pages to be non-null, but no need to assert in __get_user_pages_locked()
1755 if (pages && !(flags & FOLL_PIN)) in __get_user_pages_locked()
1760 ret = __get_user_pages(mm, start, nr_pages, flags, pages, in __get_user_pages_locked()
1791 * For the prefault case (!pages) we only update counts. in __get_user_pages_locked()
1793 if (likely(pages)) in __get_user_pages_locked()
1794 pages += ret; in __get_user_pages_locked()
1825 pages, locked); in __get_user_pages_locked()
1841 if (likely(pages)) in __get_user_pages_locked()
1842 pages++; in __get_user_pages_locked()
1866 * populate_vma_page_range() - populate a range of pages in the vma.
1872 * This takes care of mlocking the pages too if VM_LOCKED is set.
1874 * Return either number of pages pinned in the vma, or a negative error
1907 /* ... similarly, we've never faulted in PROT_NONE pages */ in populate_vma_page_range()
1942 * This takes care of mlocking the pages, too, if VM_LOCKED is set.
1950 * Returns either number of processed pages in the MM, or a negative error
1991 * __mm_populate - populate and/or mlock pages within a range of address space.
2009 * We want to fault in pages for [nstart; end) address range. in __mm_populate()
2031 * Now fault in a range of pages. populate_vma_page_range() in __mm_populate()
2032 * double checks the vma flags, so that it won't mlock pages in __mm_populate()
2052 unsigned long nr_pages, struct page **pages, in __get_user_pages_locked() argument
2092 if (pages) { in __get_user_pages_locked()
2093 pages[i] = virt_to_page((void *)start); in __get_user_pages_locked()
2094 if (pages[i]) in __get_user_pages_locked()
2095 get_page(pages[i]); in __get_user_pages_locked()
2181 * already know that some or all of the pages in the address range aren't in
2186 * Note that we don't pin or otherwise hold the pages referenced that we fault
2287 * An array of either pages or folios ("pofs"). Although it may seem tempting to
2289 * pages, that approach won't work in the longer term, because eventually the
2295 struct page **pages; member
2307 return page_folio(pofs->pages[i]); in pofs_get_folio()
2320 unpin_user_pages(pofs->pages, pofs->nr_entries); in pofs_unpin()
2487 struct page **pages) in check_and_migrate_movable_pages() argument
2490 .pages = pages, in check_and_migrate_movable_pages()
2499 struct page **pages) in check_and_migrate_movable_pages() argument
2518 struct page **pages, in __gup_longterm_locked() argument
2526 return __get_user_pages_locked(mm, start, nr_pages, pages, in __gup_longterm_locked()
2532 pages, locked, in __gup_longterm_locked()
2540 rc = check_and_migrate_movable_pages(nr_pinned_pages, pages); in __gup_longterm_locked()
2550 static bool is_valid_gup_args(struct page **pages, int *locked, in is_valid_gup_args() argument
2583 /* Pages input must be given if using GET/PIN */ in is_valid_gup_args()
2584 if (WARN_ON_ONCE((gup_flags & (FOLL_GET | FOLL_PIN)) && !pages)) in is_valid_gup_args()
2598 * get_user_pages_remote() - pin user pages in memory
2601 * @nr_pages: number of pages from start to pin
2603 * @pages: array that receives pointers to the pages pinned.
2605 * only intends to ensure the pages are faulted in.
2610 * Returns either number of pages pinned (which may be less than the
2614 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
2615 * -- If nr_pages is >0, and some pages were pinned, returns the number of
2616 * pages pinned. Again, this may be less than nr_pages.
2618 * The caller is responsible for releasing returned @pages, via put_page().
2642 * via the user virtual addresses. The pages may be submitted for
2655 unsigned int gup_flags, struct page **pages, in get_user_pages_remote() argument
2660 if (!is_valid_gup_args(pages, locked, &gup_flags, in get_user_pages_remote()
2664 return __get_user_pages_locked(mm, start, nr_pages, pages, in get_user_pages_remote()
2673 unsigned int gup_flags, struct page **pages, in get_user_pages_remote() argument
2681 * get_user_pages() - pin user pages in memory
2683 * @nr_pages: number of pages from start to pin
2685 * @pages: array that receives pointers to the pages pinned.
2687 * only intends to ensure the pages are faulted in.
2695 unsigned int gup_flags, struct page **pages) in get_user_pages() argument
2699 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_TOUCH)) in get_user_pages()
2702 return __get_user_pages_locked(current->mm, start, nr_pages, pages, in get_user_pages()
2711 * get_user_pages(mm, ..., pages, NULL);
2716 * get_user_pages_unlocked(mm, ..., pages);
2723 struct page **pages, unsigned int gup_flags) in get_user_pages_unlocked() argument
2727 if (!is_valid_gup_args(pages, NULL, &gup_flags, in get_user_pages_unlocked()
2731 return __get_user_pages_locked(current->mm, start, nr_pages, pages, in get_user_pages_unlocked()
2739 * get_user_pages_fast attempts to pin user pages by walking the page
2741 * protected from page table pages being freed from under it, and should
2746 * pages are freed. This is unsuitable for architectures that do not need
2749 * Another way to achieve this is to batch up page table containing pages
2751 * pages. Disabling interrupts will allow the gup_fast() walker to both block
2759 * free pages containing page tables or TLB flushing requires IPI broadcast.
2860 unsigned int flags, struct page **pages) in gup_fast_undo_dev_pagemap() argument
2863 struct folio *folio = page_folio(pages[--(*nr)]); in gup_fast_undo_dev_pagemap()
2891 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_pte_range() argument
2907 * Always fallback to ordinary GUP on PROT_NONE-mapped pages: in gup_fast_pte_range()
2908 * pte_access_permitted() better should reject these pages in gup_fast_pte_range()
2925 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); in gup_fast_pte_range()
2968 pages[*nr] = page; in gup_fast_pte_range()
2988 * get_user_pages_fast_only implementation that can pin pages. Thus it's still
2992 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_pte_range() argument
3001 unsigned long end, unsigned int flags, struct page **pages, int *nr) in gup_fast_devmap_leaf() argument
3012 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); in gup_fast_devmap_leaf()
3017 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); in gup_fast_devmap_leaf()
3023 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); in gup_fast_devmap_leaf()
3027 pages[*nr] = page; in gup_fast_devmap_leaf()
3037 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_devmap_pmd_leaf() argument
3044 if (!gup_fast_devmap_leaf(fault_pfn, addr, end, flags, pages, nr)) in gup_fast_devmap_pmd_leaf()
3048 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); in gup_fast_devmap_pmd_leaf()
3055 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_devmap_pud_leaf() argument
3062 if (!gup_fast_devmap_leaf(fault_pfn, addr, end, flags, pages, nr)) in gup_fast_devmap_pud_leaf()
3066 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); in gup_fast_devmap_pud_leaf()
3073 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_devmap_pmd_leaf() argument
3081 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_devmap_pud_leaf() argument
3090 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_pmd_leaf() argument
3107 pages, nr); in gup_fast_pmd_leaf()
3111 refs = record_subpages(page, PMD_SIZE, addr, end, pages + *nr); in gup_fast_pmd_leaf()
3137 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_pud_leaf() argument
3154 pages, nr); in gup_fast_pud_leaf()
3158 refs = record_subpages(page, PUD_SIZE, addr, end, pages + *nr); in gup_fast_pud_leaf()
3185 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_pgd_leaf() argument
3198 refs = record_subpages(page, PGDIR_SIZE, addr, end, pages + *nr); in gup_fast_pgd_leaf()
3225 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_pmd_range() argument
3245 pages, nr)) in gup_fast_pmd_range()
3249 pages, nr)) in gup_fast_pmd_range()
3257 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_pud_range() argument
3272 pages, nr)) in gup_fast_pud_range()
3275 pages, nr)) in gup_fast_pud_range()
3283 unsigned long end, unsigned int flags, struct page **pages, in gup_fast_p4d_range() argument
3298 pages, nr)) in gup_fast_p4d_range()
3306 unsigned int flags, struct page **pages, int *nr) in gup_fast_pgd_range() argument
3320 pages, nr)) in gup_fast_pgd_range()
3323 pages, nr)) in gup_fast_pgd_range()
3329 unsigned int flags, struct page **pages, int *nr) in gup_fast_pgd_range() argument
3346 unsigned int gup_flags, struct page **pages) in gup_fast() argument
3365 * With interrupts disabled, we block page table pages from being freed in gup_fast()
3373 gup_fast_pgd_range(start, end, gup_flags, pages, &nr_pinned); in gup_fast()
3377 * When pinning pages for DMA there could be a concurrent write protect in gup_fast()
3382 gup_fast_unpin_user_pages(pages, nr_pinned); in gup_fast()
3385 sanity_check_pinned_pages(pages, nr_pinned); in gup_fast()
3392 unsigned int gup_flags, struct page **pages) in gup_fast_fallback() argument
3420 nr_pinned = gup_fast(start, end, gup_flags, pages); in gup_fast_fallback()
3424 /* Slow path: try to get the remaining pages with get_user_pages */ in gup_fast_fallback()
3426 pages += nr_pinned; in gup_fast_fallback()
3428 pages, &locked, in gup_fast_fallback()
3432 * The caller has to unpin the pages we already pinned so in gup_fast_fallback()
3443 * get_user_pages_fast_only() - pin user pages in memory
3445 * @nr_pages: number of pages from start to pin
3447 * @pages: array that receives pointers to the pages pinned.
3454 * pages pinned.
3461 unsigned int gup_flags, struct page **pages) in get_user_pages_fast_only() argument
3470 if (!is_valid_gup_args(pages, NULL, &gup_flags, in get_user_pages_fast_only()
3474 return gup_fast_fallback(start, nr_pages, gup_flags, pages); in get_user_pages_fast_only()
3479 * get_user_pages_fast() - pin user pages in memory
3481 * @nr_pages: number of pages from start to pin
3483 * @pages: array that receives pointers to the pages pinned.
3486 * Attempt to pin user pages in memory without taking mm->mmap_lock.
3490 * Returns number of pages pinned. This may be fewer than the number requested.
3491 * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
3495 unsigned int gup_flags, struct page **pages) in get_user_pages_fast() argument
3503 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_GET)) in get_user_pages_fast()
3505 return gup_fast_fallback(start, nr_pages, gup_flags, pages); in get_user_pages_fast()
3510 * pin_user_pages_fast() - pin user pages in memory without taking locks
3513 * @nr_pages: number of pages from start to pin
3515 * @pages: array that receives pointers to the pages pinned.
3522 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3525 * Note that if a zero_page is amongst the returned pages, it will not have
3529 unsigned int gup_flags, struct page **pages) in pin_user_pages_fast() argument
3531 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN)) in pin_user_pages_fast()
3533 return gup_fast_fallback(start, nr_pages, gup_flags, pages); in pin_user_pages_fast()
3538 * pin_user_pages_remote() - pin pages of a remote process
3542 * @nr_pages: number of pages from start to pin
3544 * @pages: array that receives pointers to the pages pinned.
3554 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3557 * Note that if a zero_page is amongst the returned pages, it will not have
3562 unsigned int gup_flags, struct page **pages, in pin_user_pages_remote() argument
3567 if (!is_valid_gup_args(pages, locked, &gup_flags, in pin_user_pages_remote()
3570 return __gup_longterm_locked(mm, start, nr_pages, pages, in pin_user_pages_remote()
3577 * pin_user_pages() - pin user pages in memory for use by other devices
3580 * @nr_pages: number of pages from start to pin
3582 * @pages: array that receives pointers to the pages pinned.
3588 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3591 * Note that if a zero_page is amongst the returned pages, it will not have
3595 unsigned int gup_flags, struct page **pages) in pin_user_pages() argument
3599 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN)) in pin_user_pages()
3602 pages, &locked, gup_flags); in pin_user_pages()
3611 * Note that if a zero_page is amongst the returned pages, it will not have
3615 struct page **pages, unsigned int gup_flags) in pin_user_pages_unlocked() argument
3619 if (!is_valid_gup_args(pages, NULL, &gup_flags, in pin_user_pages_unlocked()
3623 return __gup_longterm_locked(current->mm, start, nr_pages, pages, in pin_user_pages_unlocked()
3777 * (e.g., individual pages) of the folio later, for example, using