/linux-6.14.4/include/linux/ |
D | huge_mm.h | 69 * Mask of all large folio orders supported for anonymous THP; all orders up to 76 * Mask of all large folio orders supported for file THP. Folios in a DAX 86 * Mask of all large folio orders supported for THP. 189 static inline int highest_order(unsigned long orders) in highest_order() argument 191 return fls_long(orders) - 1; in highest_order() 194 static inline int next_order(unsigned long *orders, int prev) in next_order() argument 196 *orders &= ~BIT(prev); in next_order() 197 return highest_order(*orders); in next_order() 232 * Filter the bitfield of input orders to the ones suitable for use in the vma. 234 * All orders that pass the checks are returned as a bitfield. [all …]
|
D | folio_queue.h | 31 u8 orders[PAGEVEC_SIZE]; /* Order of each folio */ member 244 folioq->orders[slot] = __folio_order(folio); in folioq_append() 266 folioq->orders[slot] = __folio_order(folio); in folioq_append_mark() 297 return folioq->orders[slot]; in folioq_folio_order()
|
D | refcount.h | 167 * and thereby orders future stores. See the comment on top. 204 * and thereby orders future stores. See the comment on top. 230 * and thereby orders future stores. See the comment on top.
|
/linux-6.14.4/drivers/gpu/drm/ttm/tests/ |
D | ttm_pool_test.c | 250 pt = &pool->caching[caching].orders[order]; in ttm_pool_alloc_order_caching_match() 279 pt_pool = &pool->caching[pool_caching].orders[order]; in ttm_pool_alloc_caching_mismatch() 280 pt_tt = &pool->caching[tt_caching].orders[order]; in ttm_pool_alloc_caching_mismatch() 313 pt_pool = &pool->caching[caching].orders[order]; in ttm_pool_alloc_order_mismatch() 314 pt_tt = &pool->caching[caching].orders[0]; in ttm_pool_alloc_order_mismatch() 354 pt = &pool->caching[caching].orders[order]; in ttm_pool_free_dma_alloc() 385 pt = &pool->caching[caching].orders[order]; in ttm_pool_free_no_dma_alloc() 405 pt = &pool->caching[caching].orders[order]; in ttm_pool_fini_basic()
|
/linux-6.14.4/tools/testing/selftests/mm/ |
D | thp_settings.c | 203 unsigned long orders = thp_supported_orders(); in thp_read_settings() local 230 if (!((1 << i) & orders)) { in thp_read_settings() 255 unsigned long orders = thp_supported_orders(); in thp_write_settings() local 281 if (!((1 << i) & orders)) in thp_write_settings() 352 unsigned long orders = 0; in __thp_supported_orders() local 369 orders |= 1UL << i; in __thp_supported_orders() 372 return orders; in __thp_supported_orders()
|
D | uffd-wp-mremap.c | 30 unsigned long orders; in detect_thp_sizes() local 38 orders = thp_supported_orders(); in detect_thp_sizes() 40 for (i = 0; orders && count < max; i++) { in detect_thp_sizes() 41 if (!(orders & (1UL << i))) in detect_thp_sizes() 43 orders &= ~(1UL << i); in detect_thp_sizes()
|
/linux-6.14.4/drivers/dma-buf/heaps/ |
D | system_heap.c | 49 * The selection of the orders used for allocation (1MB, 64K, 4K) is designed 54 static const unsigned int orders[] = {8, 4, 0}; variable 55 #define NUM_ORDERS ARRAY_SIZE(orders) 321 if (size < (PAGE_SIZE << orders[i])) in alloc_largest_available() 323 if (max_order < orders[i]) in alloc_largest_available() 326 page = alloc_pages(order_flags[i], orders[i]); in alloc_largest_available() 342 unsigned int max_order = orders[0]; in system_heap_allocate()
|
/linux-6.14.4/tools/memory-model/Documentation/ |
D | glossary.txt | 29 a special operation that includes a load and which orders that 117 Fully Ordered: An operation such as smp_mb() that orders all of 120 that orders all of its CPU's prior accesses, itself, and 167 a special operation that includes a store and which orders that
|
D | cheatsheet.txt | 34 SELF: Orders self, as opposed to accesses before and/or after 35 SV: Orders later accesses to the same variable
|
D | recipes.txt | 232 The smp_store_release() macro orders any prior accesses against the 233 store, while the smp_load_acquire macro orders the load against any 273 smp_store_release(), but the rcu_dereference() macro orders the load only 310 The smp_wmb() macro orders prior stores against later stores, and the 311 smp_rmb() macro orders prior loads against later loads. Therefore, if
|
/linux-6.14.4/arch/s390/kvm/ |
D | sigp.c | 266 /* handle unknown orders in user space */ in __prepare_sigp_unknown() 280 * SIGP RESTART, SIGP STOP, and SIGP STOP AND STORE STATUS orders in handle_sigp_dst() 283 * interrupt, we need to return any new non-reset orders "busy". in handle_sigp_dst() 298 * their orders, while the guest cannot observe a in handle_sigp_dst() 299 * difference when issuing other orders from two in handle_sigp_dst()
|
/linux-6.14.4/include/drm/ttm/ |
D | ttm_pool.h | 62 * struct ttm_pool - Pool for all caching and orders 78 struct ttm_pool_type orders[NR_PAGE_ORDERS]; member
|
/linux-6.14.4/arch/powerpc/mm/ |
D | mmu_context.c | 56 * This full barrier orders the store to the cpumask above vs in switch_mm_irqs_off() 69 * radix which orders earlier stores to clear the PTEs before in switch_mm_irqs_off()
|
/linux-6.14.4/arch/arm64/boot/dts/renesas/ |
D | white-hawk-csi-dsi.dtsi | 24 line-orders = <0 3 0>; 45 line-orders = <0 3 0>;
|
/linux-6.14.4/drivers/gpu/drm/ttm/ |
D | ttm_pool.c | 292 return &pool->caching[caching].orders[order]; in ttm_pool_select_type() 298 return &pool->caching[caching].orders[order]; in ttm_pool_select_type() 306 return &pool->caching[caching].orders[order]; in ttm_pool_select_type() 581 if (pt != &pool->caching[i].orders[j]) in ttm_pool_init() 619 if (pt != &pool->caching[i].orders[j]) in ttm_pool_fini() 755 ttm_pool_debugfs_orders(pool->caching[i].orders, m); in ttm_pool_debugfs()
|
/linux-6.14.4/Documentation/devicetree/bindings/media/ |
D | video-interfaces.yaml | 213 line-orders: 226 An array of line orders of the CSI-2 C-PHY data lanes. The order of the 230 data-lanes property. If the line-orders property is omitted, the value
|
/linux-6.14.4/Documentation/ |
D | atomic_t.txt | 194 smp_mb__before_atomic() orders all earlier accesses against the RMW op 195 itself and all accesses following it, and smp_mb__after_atomic() orders all 226 a RELEASE because it orders preceding instructions against both the read
|
/linux-6.14.4/drivers/media/v4l2-core/ |
D | v4l2-fwnode.c | 200 rval = fwnode_property_count_u32(fwnode, "line-orders"); in v4l2_fwnode_endpoint_parse_csi2_bus() 203 pr_warn("invalid number of line-orders entries (need %u, got %u)\n", in v4l2_fwnode_endpoint_parse_csi2_bus() 267 "line-orders", array, in v4l2_fwnode_endpoint_parse_csi2_bus() 271 static const char * const orders[] = { in v4l2_fwnode_endpoint_parse_csi2_bus() local 275 if (array[i] >= ARRAY_SIZE(orders)) { in v4l2_fwnode_endpoint_parse_csi2_bus() 285 orders[array[i]]); in v4l2_fwnode_endpoint_parse_csi2_bus() 292 pr_debug("no line orders defined, assuming ABC\n"); in v4l2_fwnode_endpoint_parse_csi2_bus()
|
/linux-6.14.4/Documentation/userspace-api/media/v4l/ |
D | field-order.rst | 80 If multiple field orders are possible the 81 driver must choose one of the possible field orders during
|
D | pixfmt-bayer.rst | 15 orders. See also `the Wikipedia article on Bayer filter
|
/linux-6.14.4/Documentation/admin-guide/mm/ |
D | transhuge.rst | 579 lower orders or small pages. 583 instead falls back to using huge pages with lower orders or 596 and instead falls back to using huge pages with lower orders or 601 falls back to using huge pages with lower orders or small pages 628 smaller orders. This can happen for a variety of reasons but a
|
/linux-6.14.4/samples/bpf/ |
D | tc_l2_redirect_user.c | 59 /* bpf_tunnel_key.remote_ipv4 expects host byte orders */ in main()
|
/linux-6.14.4/mm/ |
D | huge_memory.c | 104 unsigned long orders) in __thp_vma_allowable_orders() argument 111 /* Check the intersection of requested and supported orders. */ in __thp_vma_allowable_orders() 119 orders &= supported_orders; in __thp_vma_allowable_orders() 120 if (!orders) in __thp_vma_allowable_orders() 131 return in_pf ? orders : 0; in __thp_vma_allowable_orders() 143 * filtering out the unsuitable orders. in __thp_vma_allowable_orders() 149 int order = highest_order(orders); in __thp_vma_allowable_orders() 152 while (orders) { in __thp_vma_allowable_orders() 156 order = next_order(&orders, order); in __thp_vma_allowable_orders() 159 if (!orders) in __thp_vma_allowable_orders() [all …]
|
D | memory.c | 4179 unsigned long orders) in thp_swap_suitable_orders() argument 4183 order = highest_order(orders); in thp_swap_suitable_orders() 4190 while (orders) { in thp_swap_suitable_orders() 4194 order = next_order(&orders, order); in thp_swap_suitable_orders() 4197 return orders; in thp_swap_suitable_orders() 4203 unsigned long orders; in alloc_swap_folio() local 4229 * Get a list of all the (large) orders below PMD_ORDER that are enabled in alloc_swap_folio() 4232 orders = thp_vma_allowable_orders(vma, vma->vm_flags, in alloc_swap_folio() 4234 orders = thp_vma_suitable_orders(vma, vmf->address, orders); in alloc_swap_folio() 4235 orders = thp_swap_suitable_orders(swp_offset(entry), in alloc_swap_folio() [all …]
|
/linux-6.14.4/drivers/android/ |
D | binder_alloc_selftest.c | 208 /* Generate BUFFER_NUM factorial free orders. */ 283 * then free them in all orders possible. Check that pages are
|