Lines Matching +full:mm +full:- +full:0

1 // SPDX-License-Identifier: MIT
31 struct drm_buddy mm; in drm_test_buddy_alloc_range_bias() local
37 mm_size = (SZ_8M-1) & ~(ps-1); /* Multiple roots */ in drm_test_buddy_alloc_range_bias()
41 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps), in drm_test_buddy_alloc_range_bias()
55 for (i = 0; i < count; i++) { in drm_test_buddy_alloc_range_bias()
65 drm_buddy_alloc_blocks(&mm, bias_start, in drm_test_buddy_alloc_range_bias()
69 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n", in drm_test_buddy_alloc_range_bias()
74 drm_buddy_alloc_blocks(&mm, bias_start, in drm_test_buddy_alloc_range_bias()
78 "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n", in drm_test_buddy_alloc_range_bias()
83 drm_buddy_alloc_blocks(&mm, bias_start + ps, in drm_test_buddy_alloc_range_bias()
87 "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n", in drm_test_buddy_alloc_range_bias()
92 drm_buddy_alloc_blocks(&mm, bias_start + ps, in drm_test_buddy_alloc_range_bias()
93 bias_end - ps, in drm_test_buddy_alloc_range_bias()
97 "buddy_alloc h didn't fail with bias(%x-%x), size=%u, ps=%u\n", in drm_test_buddy_alloc_range_bias()
98 bias_start + ps, bias_end - ps, bias_size >> 1, bias_size >> 1); in drm_test_buddy_alloc_range_bias()
102 drm_buddy_alloc_blocks(&mm, bias_start, in drm_test_buddy_alloc_range_bias()
106 "buddy_alloc i failed with bias(%x-%x), size=%u, ps=%u\n", in drm_test_buddy_alloc_range_bias()
108 drm_buddy_free_list(&mm, &tmp, 0); in drm_test_buddy_alloc_range_bias()
112 drm_buddy_alloc_blocks(&mm, bias_start, in drm_test_buddy_alloc_range_bias()
116 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n", in drm_test_buddy_alloc_range_bias()
118 drm_buddy_free_list(&mm, &tmp, 0); in drm_test_buddy_alloc_range_bias()
124 drm_buddy_alloc_blocks(&mm, bias_start, in drm_test_buddy_alloc_range_bias()
128 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n", in drm_test_buddy_alloc_range_bias()
131 bias_rem -= size; in drm_test_buddy_alloc_range_bias()
134 drm_buddy_alloc_blocks(&mm, bias_start, in drm_test_buddy_alloc_range_bias()
138 "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n", in drm_test_buddy_alloc_range_bias()
147 drm_buddy_alloc_blocks(&mm, bias_start, in drm_test_buddy_alloc_range_bias()
151 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n", in drm_test_buddy_alloc_range_bias()
158 drm_buddy_free_list(&mm, &tmp, 0); in drm_test_buddy_alloc_range_bias()
165 drm_buddy_free_list(&mm, &allocated, 0); in drm_test_buddy_alloc_range_bias()
166 drm_buddy_fini(&mm); in drm_test_buddy_alloc_range_bias()
169 * Something more free-form. Idea is to pick a random starting bias in drm_test_buddy_alloc_range_bias()
177 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps), in drm_test_buddy_alloc_range_bias()
180 bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps); in drm_test_buddy_alloc_range_bias()
181 bias_end = round_up(bias_start + prandom_u32_state(&prng) % (mm_size - bias_start), ps); in drm_test_buddy_alloc_range_bias()
183 bias_rem = bias_end - bias_start; in drm_test_buddy_alloc_range_bias()
189 drm_buddy_alloc_blocks(&mm, bias_start, in drm_test_buddy_alloc_range_bias()
193 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n", in drm_test_buddy_alloc_range_bias()
195 bias_rem -= size; in drm_test_buddy_alloc_range_bias()
206 bias_start -= round_up(prandom_u32_state(&prng) % bias_start, ps); in drm_test_buddy_alloc_range_bias()
208 bias_end += round_up(prandom_u32_state(&prng) % (mm_size - bias_end), ps); in drm_test_buddy_alloc_range_bias()
210 bias_rem += old_bias_start - bias_start; in drm_test_buddy_alloc_range_bias()
211 bias_rem += bias_end - old_bias_end; in drm_test_buddy_alloc_range_bias()
215 KUNIT_ASSERT_EQ(test, bias_start, 0); in drm_test_buddy_alloc_range_bias()
218 drm_buddy_alloc_blocks(&mm, bias_start, bias_end, in drm_test_buddy_alloc_range_bias()
222 "buddy_alloc passed with bias(%x-%x), size=%u\n", in drm_test_buddy_alloc_range_bias()
225 drm_buddy_free_list(&mm, &allocated, 0); in drm_test_buddy_alloc_range_bias()
226 drm_buddy_fini(&mm); in drm_test_buddy_alloc_range_bias()
235 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps), in drm_test_buddy_alloc_range_bias()
238 bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps); in drm_test_buddy_alloc_range_bias()
239 bias_end = round_up(bias_start + prandom_u32_state(&prng) % (mm_size - bias_start), ps); in drm_test_buddy_alloc_range_bias()
241 bias_rem = bias_end - bias_start; in drm_test_buddy_alloc_range_bias()
247 drm_buddy_alloc_blocks(&mm, bias_start, in drm_test_buddy_alloc_range_bias()
251 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n", in drm_test_buddy_alloc_range_bias()
257 drm_buddy_free_list(&mm, &allocated, 0); in drm_test_buddy_alloc_range_bias()
258 drm_buddy_fini(&mm); in drm_test_buddy_alloc_range_bias()
263 unsigned long n_pages, total, i = 0; in drm_test_buddy_alloc_clear()
269 struct drm_buddy mm; in drm_test_buddy_alloc_clear() local
276 KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps)); in drm_test_buddy_alloc_clear()
278 KUNIT_EXPECT_EQ(test, mm.max_order, max_order); in drm_test_buddy_alloc_clear()
282 * returning those pages as non-dirty and randomly alternate between in drm_test_buddy_alloc_clear()
283 * requesting dirty and non-dirty pages (not going over the limit in drm_test_buddy_alloc_clear()
284 * we freed as non-dirty), putting that into two separate lists. in drm_test_buddy_alloc_clear()
289 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, in drm_test_buddy_alloc_clear()
293 drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED); in drm_test_buddy_alloc_clear()
301 if (slot == 0) { in drm_test_buddy_alloc_clear()
303 flags = 0; in drm_test_buddy_alloc_clear()
309 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, in drm_test_buddy_alloc_clear()
321 drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED); in drm_test_buddy_alloc_clear()
325 * The allocation should never fail with reasonable page-size. in drm_test_buddy_alloc_clear()
327 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, in drm_test_buddy_alloc_clear()
332 drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED); in drm_test_buddy_alloc_clear()
333 drm_buddy_free_list(&mm, &dirty, 0); in drm_test_buddy_alloc_clear()
334 drm_buddy_fini(&mm); in drm_test_buddy_alloc_clear()
336 KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps)); in drm_test_buddy_alloc_clear()
339 * Create a new mm. Intentionally fragment the address space by creating in drm_test_buddy_alloc_clear()
347 i = 0; in drm_test_buddy_alloc_clear()
353 if (slot == 0) in drm_test_buddy_alloc_clear()
358 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, in drm_test_buddy_alloc_clear()
359 ps, ps, list, 0), in drm_test_buddy_alloc_clear()
363 drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED); in drm_test_buddy_alloc_clear()
364 drm_buddy_free_list(&mm, &dirty, 0); in drm_test_buddy_alloc_clear()
370 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, in drm_test_buddy_alloc_clear()
374 total = 0; in drm_test_buddy_alloc_clear()
378 total += drm_buddy_block_size(&mm, block); in drm_test_buddy_alloc_clear()
382 drm_buddy_free_list(&mm, &allocated, 0); in drm_test_buddy_alloc_clear()
385 drm_buddy_fini(&mm); in drm_test_buddy_alloc_clear()
388 * Create a new mm with a non power-of-two size. Allocate a random size, free as in drm_test_buddy_alloc_clear()
389 * cleared and then call fini. This will ensure the multi-root force merge during in drm_test_buddy_alloc_clear()
394 KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps)); in drm_test_buddy_alloc_clear()
395 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, in drm_test_buddy_alloc_clear()
399 drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED); in drm_test_buddy_alloc_clear()
400 drm_buddy_fini(&mm); in drm_test_buddy_alloc_clear()
408 struct drm_buddy mm; in drm_test_buddy_alloc_contiguous() local
414 KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps)); in drm_test_buddy_alloc_contiguous()
424 i = 0; in drm_test_buddy_alloc_contiguous()
430 if (slot == 0) in drm_test_buddy_alloc_contiguous()
437 drm_buddy_alloc_blocks(&mm, 0, mm_size, in drm_test_buddy_alloc_contiguous()
438 ps, ps, list, 0), in drm_test_buddy_alloc_contiguous()
443 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, in drm_test_buddy_alloc_contiguous()
448 drm_buddy_free_list(&mm, &middle, 0); in drm_test_buddy_alloc_contiguous()
449 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, in drm_test_buddy_alloc_contiguous()
453 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, in drm_test_buddy_alloc_contiguous()
458 drm_buddy_free_list(&mm, &right, 0); in drm_test_buddy_alloc_contiguous()
459 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, in drm_test_buddy_alloc_contiguous()
468 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, in drm_test_buddy_alloc_contiguous()
473 drm_buddy_free_list(&mm, &left, 0); in drm_test_buddy_alloc_contiguous()
474 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, in drm_test_buddy_alloc_contiguous()
479 total = 0; in drm_test_buddy_alloc_contiguous()
481 total += drm_buddy_block_size(&mm, block); in drm_test_buddy_alloc_contiguous()
485 drm_buddy_free_list(&mm, &allocated, 0); in drm_test_buddy_alloc_contiguous()
486 drm_buddy_fini(&mm); in drm_test_buddy_alloc_contiguous()
491 u64 mm_size, size, start = 0; in drm_test_buddy_alloc_pathological()
494 unsigned long flags = 0; in drm_test_buddy_alloc_pathological()
496 struct drm_buddy mm; in drm_test_buddy_alloc_pathological() local
502 * Create a pot-sized mm, then allocate one of each possible in drm_test_buddy_alloc_pathological()
503 * order within. This should leave the mm with exactly one in drm_test_buddy_alloc_pathological()
505 * Eventually we will have a fully 50% fragmented mm. in drm_test_buddy_alloc_pathological()
509 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K), in drm_test_buddy_alloc_pathological()
512 KUNIT_EXPECT_EQ(test, mm.max_order, max_order); in drm_test_buddy_alloc_pathological()
514 for (top = max_order; top; top--) { in drm_test_buddy_alloc_pathological()
518 list_del(&block->link); in drm_test_buddy_alloc_pathological()
519 drm_buddy_free_block(&mm, block); in drm_test_buddy_alloc_pathological()
522 for (order = top; order--;) { in drm_test_buddy_alloc_pathological()
523 size = get_size(order, mm.chunk_size); in drm_test_buddy_alloc_pathological()
524 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, in drm_test_buddy_alloc_pathological()
527 "buddy_alloc hit -ENOMEM with order=%d, top=%d\n", in drm_test_buddy_alloc_pathological()
533 list_move_tail(&block->link, &blocks); in drm_test_buddy_alloc_pathological()
536 /* There should be one final page for this sub-allocation */ in drm_test_buddy_alloc_pathological()
537 size = get_size(0, mm.chunk_size); in drm_test_buddy_alloc_pathological()
538 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, in drm_test_buddy_alloc_pathological()
540 "buddy_alloc hit -ENOMEM for hole\n"); in drm_test_buddy_alloc_pathological()
545 list_move_tail(&block->link, &holes); in drm_test_buddy_alloc_pathological()
547 size = get_size(top, mm.chunk_size); in drm_test_buddy_alloc_pathological()
548 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, in drm_test_buddy_alloc_pathological()
550 "buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!", in drm_test_buddy_alloc_pathological()
554 drm_buddy_free_list(&mm, &holes, 0); in drm_test_buddy_alloc_pathological()
558 size = get_size(order, mm.chunk_size); in drm_test_buddy_alloc_pathological()
559 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, in drm_test_buddy_alloc_pathological()
566 drm_buddy_free_list(&mm, &blocks, 0); in drm_test_buddy_alloc_pathological()
567 drm_buddy_fini(&mm); in drm_test_buddy_alloc_pathological()
572 u64 mm_size, size, start = 0; in drm_test_buddy_alloc_pessimistic()
575 unsigned long flags = 0; in drm_test_buddy_alloc_pessimistic()
576 struct drm_buddy mm; in drm_test_buddy_alloc_pessimistic() local
582 * Create a pot-sized mm, then allocate one of each possible in drm_test_buddy_alloc_pessimistic()
583 * order within. This should leave the mm with exactly one in drm_test_buddy_alloc_pessimistic()
588 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K), in drm_test_buddy_alloc_pessimistic()
591 KUNIT_EXPECT_EQ(test, mm.max_order, max_order); in drm_test_buddy_alloc_pessimistic()
593 for (order = 0; order < max_order; order++) { in drm_test_buddy_alloc_pessimistic()
594 size = get_size(order, mm.chunk_size); in drm_test_buddy_alloc_pessimistic()
595 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, in drm_test_buddy_alloc_pessimistic()
597 "buddy_alloc hit -ENOMEM with order=%d\n", in drm_test_buddy_alloc_pessimistic()
603 list_move_tail(&block->link, &blocks); in drm_test_buddy_alloc_pessimistic()
607 size = get_size(0, mm.chunk_size); in drm_test_buddy_alloc_pessimistic()
608 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, in drm_test_buddy_alloc_pessimistic()
610 "buddy_alloc hit -ENOMEM on final alloc\n"); in drm_test_buddy_alloc_pessimistic()
615 list_move_tail(&block->link, &blocks); in drm_test_buddy_alloc_pessimistic()
618 for (order = max_order; order--;) { in drm_test_buddy_alloc_pessimistic()
619 size = get_size(order, mm.chunk_size); in drm_test_buddy_alloc_pessimistic()
620 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, in drm_test_buddy_alloc_pessimistic()
626 list_del(&block->link); in drm_test_buddy_alloc_pessimistic()
627 drm_buddy_free_block(&mm, block); in drm_test_buddy_alloc_pessimistic()
632 list_del(&block->link); in drm_test_buddy_alloc_pessimistic()
633 drm_buddy_free_block(&mm, block); in drm_test_buddy_alloc_pessimistic()
635 size = get_size(order, mm.chunk_size); in drm_test_buddy_alloc_pessimistic()
636 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, in drm_test_buddy_alloc_pessimistic()
638 "buddy_alloc hit -ENOMEM with order=%d\n", in drm_test_buddy_alloc_pessimistic()
644 list_del(&block->link); in drm_test_buddy_alloc_pessimistic()
645 drm_buddy_free_block(&mm, block); in drm_test_buddy_alloc_pessimistic()
649 /* To confirm, now the whole mm should be available */ in drm_test_buddy_alloc_pessimistic()
650 size = get_size(max_order, mm.chunk_size); in drm_test_buddy_alloc_pessimistic()
651 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, in drm_test_buddy_alloc_pessimistic()
653 "buddy_alloc (realloc) hit -ENOMEM with order=%d\n", in drm_test_buddy_alloc_pessimistic()
659 list_del(&block->link); in drm_test_buddy_alloc_pessimistic()
660 drm_buddy_free_block(&mm, block); in drm_test_buddy_alloc_pessimistic()
661 drm_buddy_free_list(&mm, &blocks, 0); in drm_test_buddy_alloc_pessimistic()
662 drm_buddy_fini(&mm); in drm_test_buddy_alloc_pessimistic()
667 u64 mm_size, size, start = 0; in drm_test_buddy_alloc_optimistic()
669 unsigned long flags = 0; in drm_test_buddy_alloc_optimistic()
671 struct drm_buddy mm; in drm_test_buddy_alloc_optimistic() local
677 * Create a mm with one block of each order available, and in drm_test_buddy_alloc_optimistic()
681 mm_size = SZ_4K * ((1 << (max_order + 1)) - 1); in drm_test_buddy_alloc_optimistic()
683 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K), in drm_test_buddy_alloc_optimistic()
686 KUNIT_EXPECT_EQ(test, mm.max_order, max_order); in drm_test_buddy_alloc_optimistic()
688 for (order = 0; order <= max_order; order++) { in drm_test_buddy_alloc_optimistic()
689 size = get_size(order, mm.chunk_size); in drm_test_buddy_alloc_optimistic()
690 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, in drm_test_buddy_alloc_optimistic()
692 "buddy_alloc hit -ENOMEM with order=%d\n", in drm_test_buddy_alloc_optimistic()
698 list_move_tail(&block->link, &blocks); in drm_test_buddy_alloc_optimistic()
702 size = get_size(0, mm.chunk_size); in drm_test_buddy_alloc_optimistic()
703 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, in drm_test_buddy_alloc_optimistic()
707 drm_buddy_free_list(&mm, &blocks, 0); in drm_test_buddy_alloc_optimistic()
708 drm_buddy_fini(&mm); in drm_test_buddy_alloc_optimistic()
713 u64 size = U64_MAX, start = 0; in drm_test_buddy_alloc_limit()
715 unsigned long flags = 0; in drm_test_buddy_alloc_limit()
717 struct drm_buddy mm; in drm_test_buddy_alloc_limit() local
719 KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, SZ_4K)); in drm_test_buddy_alloc_limit()
721 KUNIT_EXPECT_EQ_MSG(test, mm.max_order, DRM_BUDDY_MAX_ORDER, in drm_test_buddy_alloc_limit()
722 "mm.max_order(%d) != %d\n", mm.max_order, in drm_test_buddy_alloc_limit()
725 size = mm.chunk_size << mm.max_order; in drm_test_buddy_alloc_limit()
726 KUNIT_EXPECT_FALSE(test, drm_buddy_alloc_blocks(&mm, start, size, size, in drm_test_buddy_alloc_limit()
727 mm.chunk_size, &allocated, flags)); in drm_test_buddy_alloc_limit()
732 KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), mm.max_order, in drm_test_buddy_alloc_limit()
734 drm_buddy_block_order(block), mm.max_order); in drm_test_buddy_alloc_limit()
736 KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_size(&mm, block), in drm_test_buddy_alloc_limit()
737 BIT_ULL(mm.max_order) * mm.chunk_size, in drm_test_buddy_alloc_limit()
739 drm_buddy_block_size(&mm, block), in drm_test_buddy_alloc_limit()
740 BIT_ULL(mm.max_order) * mm.chunk_size); in drm_test_buddy_alloc_limit()
742 drm_buddy_free_list(&mm, &allocated, 0); in drm_test_buddy_alloc_limit()
743 drm_buddy_fini(&mm); in drm_test_buddy_alloc_limit()
751 kunit_info(suite, "Testing DRM buddy manager, with random_seed=0x%x\n", in drm_buddy_suite_init()
754 return 0; in drm_buddy_suite_init()