Lines Matching full:cma

15 #define pr_fmt(fmt) "cma: " fmt
25 #include <linux/cma.h>
29 #include <trace/events/cma.h>
32 #include "cma.h"
34 struct cma cma_areas[MAX_CMA_AREAS];
38 phys_addr_t cma_get_base(const struct cma *cma) in cma_get_base() argument
40 return PFN_PHYS(cma->base_pfn); in cma_get_base()
43 unsigned long cma_get_size(const struct cma *cma) in cma_get_size() argument
45 return cma->count << PAGE_SHIFT; in cma_get_size()
48 const char *cma_get_name(const struct cma *cma) in cma_get_name() argument
50 return cma->name; in cma_get_name()
53 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, in cma_bitmap_aligned_mask() argument
56 if (align_order <= cma->order_per_bit) in cma_bitmap_aligned_mask()
58 return (1UL << (align_order - cma->order_per_bit)) - 1; in cma_bitmap_aligned_mask()
65 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma, in cma_bitmap_aligned_offset() argument
68 return (cma->base_pfn & ((1UL << align_order) - 1)) in cma_bitmap_aligned_offset()
69 >> cma->order_per_bit; in cma_bitmap_aligned_offset()
72 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, in cma_bitmap_pages_to_bits() argument
75 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; in cma_bitmap_pages_to_bits()
78 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, in cma_clear_bitmap() argument
84 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; in cma_clear_bitmap()
85 bitmap_count = cma_bitmap_pages_to_bits(cma, count); in cma_clear_bitmap()
87 spin_lock_irqsave(&cma->lock, flags); in cma_clear_bitmap()
88 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); in cma_clear_bitmap()
89 spin_unlock_irqrestore(&cma->lock, flags); in cma_clear_bitmap()
92 static void __init cma_activate_area(struct cma *cma) in cma_activate_area() argument
94 unsigned long base_pfn = cma->base_pfn, pfn; in cma_activate_area()
97 cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL); in cma_activate_area()
98 if (!cma->bitmap) in cma_activate_area()
103 * same zone. Simplify by forcing the entire CMA resv range to be in the in cma_activate_area()
108 for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) { in cma_activate_area()
114 for (pfn = base_pfn; pfn < base_pfn + cma->count; in cma_activate_area()
118 spin_lock_init(&cma->lock); in cma_activate_area()
121 INIT_HLIST_HEAD(&cma->mem_head); in cma_activate_area()
122 spin_lock_init(&cma->mem_head_lock); in cma_activate_area()
128 bitmap_free(cma->bitmap); in cma_activate_area()
130 /* Expose all pages to the buddy, they are useless for CMA. */ in cma_activate_area()
131 if (!cma->reserve_pages_on_error) { in cma_activate_area()
132 for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++) in cma_activate_area()
135 totalcma_pages -= cma->count; in cma_activate_area()
136 cma->count = 0; in cma_activate_area()
137 pr_err("CMA area %s could not be activated\n", cma->name); in cma_activate_area()
151 void __init cma_reserve_pages_on_error(struct cma *cma) in cma_reserve_pages_on_error() argument
153 cma->reserve_pages_on_error = true; in cma_reserve_pages_on_error()
164 * @res_cma: Pointer to store the created cma region.
171 struct cma **res_cma) in cma_init_reserved_mem()
173 struct cma *cma; in cma_init_reserved_mem() local
177 pr_err("Not enough slots for CMA reserved regions!\n"); in cma_init_reserved_mem()
185 * CMA uses CMA_MIN_ALIGNMENT_BYTES as alignment requirement which in cma_init_reserved_mem()
201 cma = &cma_areas[cma_area_count]; in cma_init_reserved_mem()
204 snprintf(cma->name, CMA_MAX_NAME, name); in cma_init_reserved_mem()
206 snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count); in cma_init_reserved_mem()
208 cma->base_pfn = PFN_DOWN(base); in cma_init_reserved_mem()
209 cma->count = size >> PAGE_SHIFT; in cma_init_reserved_mem()
210 cma->order_per_bit = order_per_bit; in cma_init_reserved_mem()
211 *res_cma = cma; in cma_init_reserved_mem()
213 totalcma_pages += cma->count; in cma_init_reserved_mem()
223 * @alignment: Alignment for the CMA area, should be power of 2 or zero
227 * @res_cma: Pointer to store the created cma region.
241 bool fixed, const char *name, struct cma **res_cma, in cma_declare_contiguous_nid()
259 pr_err("Not enough slots for CMA reserved regions!\n"); in cma_declare_contiguous_nid()
329 * It will place the new cma area close to the start of the node in cma_declare_contiguous_nid()
331 * cma area and not into it. in cma_declare_contiguous_nid()
389 static void cma_debug_show_areas(struct cma *cma) in cma_debug_show_areas() argument
394 unsigned long nbits = cma_bitmap_maxno(cma); in cma_debug_show_areas()
396 spin_lock_irq(&cma->lock); in cma_debug_show_areas()
399 next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start); in cma_debug_show_areas()
402 next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit); in cma_debug_show_areas()
404 nr_part = nr_zero << cma->order_per_bit; in cma_debug_show_areas()
410 pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count); in cma_debug_show_areas()
411 spin_unlock_irq(&cma->lock); in cma_debug_show_areas()
414 static struct page *__cma_alloc(struct cma *cma, unsigned long count, in __cma_alloc() argument
424 const char *name = cma ? cma->name : NULL; in __cma_alloc()
428 if (!cma || !cma->count || !cma->bitmap) in __cma_alloc()
431 pr_debug("%s(cma %p, name: %s, count %lu, align %d)\n", __func__, in __cma_alloc()
432 (void *)cma, cma->name, count, align); in __cma_alloc()
437 mask = cma_bitmap_aligned_mask(cma, align); in __cma_alloc()
438 offset = cma_bitmap_aligned_offset(cma, align); in __cma_alloc()
439 bitmap_maxno = cma_bitmap_maxno(cma); in __cma_alloc()
440 bitmap_count = cma_bitmap_pages_to_bits(cma, count); in __cma_alloc()
446 spin_lock_irq(&cma->lock); in __cma_alloc()
447 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, in __cma_alloc()
451 spin_unlock_irq(&cma->lock); in __cma_alloc()
454 bitmap_set(cma->bitmap, bitmap_no, bitmap_count); in __cma_alloc()
460 spin_unlock_irq(&cma->lock); in __cma_alloc()
462 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); in __cma_alloc()
471 cma_clear_bitmap(cma, pfn, count); in __cma_alloc()
478 trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn), in __cma_alloc()
485 * CMA can allocate multiple page blocks, which results in different in __cma_alloc()
496 __func__, cma->name, count, ret); in __cma_alloc()
497 cma_debug_show_areas(cma); in __cma_alloc()
504 cma_sysfs_account_success_pages(cma, count); in __cma_alloc()
507 cma_sysfs_account_fail_pages(cma, count); in __cma_alloc()
515 * @cma: Contiguous memory region for which the allocation is performed.
523 struct page *cma_alloc(struct cma *cma, unsigned long count, in cma_alloc() argument
526 return __cma_alloc(cma, count, align, GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0)); in cma_alloc()
529 struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp) in cma_alloc_folio() argument
536 page = __cma_alloc(cma, 1 << order, order, gfp); in cma_alloc_folio()
541 bool cma_pages_valid(struct cma *cma, const struct page *pages, in cma_pages_valid() argument
546 if (!cma || !pages) in cma_pages_valid()
551 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) { in cma_pages_valid()
562 * @cma: Contiguous memory region for which the allocation is performed.
570 bool cma_release(struct cma *cma, const struct page *pages, in cma_release() argument
575 if (!cma_pages_valid(cma, pages, count)) in cma_release()
582 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); in cma_release()
585 cma_clear_bitmap(cma, pfn, count); in cma_release()
586 cma_sysfs_account_release_pages(cma, count); in cma_release()
587 trace_cma_release(cma->name, pfn, pages, count); in cma_release()
592 bool cma_free_folio(struct cma *cma, const struct folio *folio) in cma_free_folio() argument
597 return cma_release(cma, &folio->page, folio_nr_pages(folio)); in cma_free_folio()
600 int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) in cma_for_each_area() argument