/linux/include/linux/ |
H A D | huge_mm.h | 70 * Mask of all large folio orders supported for anonymous THP; all orders up to 77 * Mask of all large folio orders supported for file THP. Folios in a DAX 87 * Mask of all large folio orders supported for THP. 186 static inline int highest_order(unsigned long orders) in highest_order() argument 188 return fls_long(orders) - 1; in highest_order() 191 static inline int next_order(unsigned long *orders, int prev) in next_order() argument 193 *orders &= ~BIT(prev); in next_order() 194 return highest_order(*orders); in next_order() 229 * Filter the bitfield of input orders to the ones suitable for use in the vma. 231 * All orders that pass the checks are returned as a bitfield. [all …]
|
H A D | folio_queue.h | 31 u8 orders[PAGEVEC_SIZE]; /* Order of each folio */ member 238 folioq->orders[slot] = __folio_order(folio); in folioq_append() 260 folioq->orders[slot] = __folio_order(folio); in folioq_append_mark() 291 return folioq->orders[slot]; in folioq_folio_order()
|
H A D | refcount.h | 167 * and thereby orders future stores. See the comment on top. 204 * and thereby orders future stores. See the comment on top. 230 * and thereby orders future stores. See the comment on top.
|
H A D | ihex.h | 4 * firmware loaded in strange orders rather than a single big blob, but
|
/linux/drivers/gpu/drm/ttm/tests/ |
H A D | ttm_pool_test.c | 250 pt = &pool->caching[caching].orders[order]; in ttm_pool_alloc_order_caching_match() 279 pt_pool = &pool->caching[pool_caching].orders[order]; in ttm_pool_alloc_caching_mismatch() 280 pt_tt = &pool->caching[tt_caching].orders[order]; in ttm_pool_alloc_caching_mismatch() 313 pt_pool = &pool->caching[caching].orders[order]; in ttm_pool_alloc_order_mismatch() 314 pt_tt = &pool->caching[caching].orders[0]; in ttm_pool_alloc_order_mismatch() 354 pt = &pool->caching[caching].orders[order]; in ttm_pool_free_dma_alloc() 385 pt = &pool->caching[caching].orders[order]; in ttm_pool_free_no_dma_alloc() 405 pt = &pool->caching[caching].orders[order]; in ttm_pool_fini_basic()
|
/linux/tools/testing/selftests/mm/ |
H A D | thp_settings.c | 203 unsigned long orders = thp_supported_orders(); in thp_read_settings() local 230 if (!((1 << i) & orders)) { in thp_read_settings() 255 unsigned long orders = thp_supported_orders(); in thp_write_settings() local 281 if (!((1 << i) & orders)) in thp_write_settings() 352 unsigned long orders = 0; in __thp_supported_orders() local 369 orders |= 1UL << i; in __thp_supported_orders() 372 return orders; in __thp_supported_orders()
|
/linux/drivers/dma-buf/heaps/ |
H A D | system_heap.c | 49 * The selection of the orders used for allocation (1MB, 64K, 4K) is designed 54 static const unsigned int orders[] = {8, 4, 0}; variable 55 #define NUM_ORDERS ARRAY_SIZE(orders) 321 if (size < (PAGE_SIZE << orders[i])) in alloc_largest_available() 323 if (max_order < orders[i]) in alloc_largest_available() 326 page = alloc_pages(order_flags[i], orders[i]); in alloc_largest_available() 342 unsigned int max_order = orders[0]; in system_heap_allocate()
|
/linux/tools/memory-model/Documentation/ |
H A D | glossary.txt | 29 a special operation that includes a load and which orders that 117 Fully Ordered: An operation such as smp_mb() that orders all of 120 that orders all of its CPU's prior accesses, itself, and 167 a special operation that includes a store and which orders that
|
H A D | cheatsheet.txt | 34 SELF: Orders self, as opposed to accesses before and/or after 35 SV: Orders later accesses to the same variable
|
H A D | recipes.txt | 232 The smp_store_release() macro orders any prior accesses against the 233 store, while the smp_load_acquire macro orders the load against any 273 smp_store_release(), but the rcu_dereference() macro orders the load only 310 The smp_wmb() macro orders prior stores against later stores, and the 311 smp_rmb() macro orders prior loads against later loads. Therefore, if
|
H A D | ordering.txt | 11 1. Barriers (also known as "fences"). A barrier orders some or 67 First, the smp_mb() full memory barrier orders all of the CPU's prior 115 synchronize_srcu() and so on. However, these primitives have orders
|
/linux/arch/s390/kvm/ |
H A D | sigp.c | 266 /* handle unknown orders in user space */ in __prepare_sigp_unknown() 280 * SIGP RESTART, SIGP STOP, and SIGP STOP AND STORE STATUS orders in handle_sigp_dst() 283 * interrupt, we need to return any new non-reset orders "busy". in handle_sigp_dst() 298 * their orders, while the guest cannot observe a in handle_sigp_dst() 299 * difference when issuing other orders from two in handle_sigp_dst()
|
/linux/include/drm/ttm/ |
H A D | ttm_pool.h | 62 * struct ttm_pool - Pool for all caching and orders 78 struct ttm_pool_type orders[NR_PAGE_ORDERS]; member
|
/linux/arch/powerpc/mm/ |
H A D | mmu_context.c | 56 * This full barrier orders the store to the cpumask above vs in switch_mm_irqs_off() 69 * radix which orders earlier stores to clear the PTEs before in switch_mm_irqs_off()
|
/linux/drivers/gpu/drm/ttm/ |
H A D | ttm_pool.c | 292 return &pool->caching[caching].orders[order]; in ttm_pool_select_type() 298 return &pool->caching[caching].orders[order]; in ttm_pool_select_type() 306 return &pool->caching[caching].orders[order]; in ttm_pool_select_type() 581 if (pt != &pool->caching[i].orders[j]) in ttm_pool_init() 619 if (pt != &pool->caching[i].orders[j]) in ttm_pool_fini() 755 ttm_pool_debugfs_orders(pool->caching[i].orders, m); in ttm_pool_debugfs()
|
/linux/Documentation/ |
H A D | atomic_t.txt | 194 smp_mb__before_atomic() orders all earlier accesses against the RMW op 195 itself and all accesses following it, and smp_mb__after_atomic() orders all 226 a RELEASE because it orders preceding instructions against both the read
|
/linux/Documentation/userspace-api/media/v4l/ |
H A D | field-order.rst | 80 If multiple field orders are possible the 81 driver must choose one of the possible field orders during
|
H A D | pixfmt-bayer.rst | 15 orders. See also `the Wikipedia article on Bayer filter
|
/linux/mm/ |
H A D | huge_memory.c | 90 unsigned long orders) in __thp_vma_allowable_orders() argument 97 /* Check the intersection of requested and supported orders. */ in __thp_vma_allowable_orders() 105 orders &= supported_orders; in __thp_vma_allowable_orders() 106 if (!orders) in __thp_vma_allowable_orders() 117 return in_pf ? orders : 0; in __thp_vma_allowable_orders() 129 * filtering out the unsuitable orders. in __thp_vma_allowable_orders() 135 int order = highest_order(orders); in __thp_vma_allowable_orders() 138 while (orders) { in __thp_vma_allowable_orders() 142 order = next_order(&orders, order); in __thp_vma_allowable_orders() 145 if (!orders) in __thp_vma_allowable_orders() [all …]
|
H A D | memory.c | 4085 unsigned long orders) in thp_swap_suitable_orders() argument 4089 order = highest_order(orders); in thp_swap_suitable_orders() 4096 while (orders) { in thp_swap_suitable_orders() 4100 order = next_order(&orders, order); in thp_swap_suitable_orders() 4103 return orders; in thp_swap_suitable_orders() 4109 unsigned long orders; in alloc_swap_folio() local 4135 * Get a list of all the (large) orders below PMD_ORDER that are enabled in alloc_swap_folio() 4138 orders = thp_vma_allowable_orders(vma, vma->vm_flags, in alloc_swap_folio() 4140 orders = thp_vma_suitable_orders(vma, vmf->address, orders); in alloc_swap_folio() 4141 orders = thp_swap_suitable_orders(swp_offset(entry), in alloc_swap_folio() [all …]
|
H A D | shmem.c | 1690 * Only allow inherit orders if the top-level value is 'force', which in shmem_allowable_huge_orders() 1720 unsigned long orders) in shmem_suitable_orders() argument 1728 orders = thp_vma_suitable_orders(vma, vmf->address, orders); in shmem_suitable_orders() 1729 if (!orders) in shmem_suitable_orders() 1734 order = highest_order(orders); in shmem_suitable_orders() 1735 while (orders) { in shmem_suitable_orders() 1749 order = next_order(&orders, order); in shmem_suitable_orders() 1752 return orders; in shmem_suitable_orders() 1757 unsigned long orders) in shmem_suitable_orders() argument 1779 struct mm_struct *fault_mm, unsigned long orders) in shmem_alloc_and_add_folio() argument [all …]
|
/linux/drivers/s390/block/ |
H A D | dasd_eckd.h | 56 /* Define Subsystem Function / Orders */ 60 * Perform Subsystem Function / Orders 67 * Perform Subsystem Function / Sub-Orders
|
/linux/samples/bpf/ |
H A D | tc_l2_redirect_user.c | 59 /* bpf_tunnel_key.remote_ipv4 expects host byte orders */ in main()
|
/linux/drivers/android/ |
H A D | binder_alloc_selftest.c | 208 /* Generate BUFFER_NUM factorial free orders. */ 283 * then free them in all orders possible. Check that pages are
|
/linux/arch/s390/include/uapi/asm/ |
H A D | raw3270.h | 13 /* Buffer Control Orders */
|