/linux/drivers/iommu/ |
H A D | iommu-pages.h | 26 * @order: order of the page 28 static inline void __iommu_alloc_account(struct page *page, int order) in __iommu_alloc_account() argument 30 const long pgcnt = 1l << order; in __iommu_alloc_account() 39 * @order: order of the page 41 static inline void __iommu_free_account(struct page *page, int order) in __iommu_free_account() argument 43 const long pgcnt = 1l << order; in __iommu_free_account() 50 * __iommu_alloc_pages - allocate a zeroed page of a given order. 52 * @order: page order 56 static inline struct page *__iommu_alloc_pages(gfp_t gfp, int order) in __iommu_alloc_pages() argument 60 page = alloc_pages(gfp | __GFP_ZERO, order); in __iommu_alloc_pages() [all …]
|
/linux/arch/arm64/kvm/hyp/nvhe/ |
H A D | page_alloc.c | 25 * Order 2 1 0 28 * __find_buddy_nocheck(pool, page 0, order 0) => page 1 29 * __find_buddy_nocheck(pool, page 0, order 1) => page 2 30 * __find_buddy_nocheck(pool, page 1, order 0) => page 0 31 * __find_buddy_nocheck(pool, page 2, order 0) => page 3 35 unsigned short order) in __find_buddy_nocheck() argument 39 addr ^= (PAGE_SIZE << order); in __find_buddy_nocheck() 54 unsigned short order) in __find_buddy_avail() argument 56 struct hyp_page *buddy = __find_buddy_nocheck(pool, p, order); in __find_buddy_avail() 58 if (!buddy || buddy->order != order || buddy->refcount) in __find_buddy_avail() [all …]
|
/linux/include/trace/events/ |
H A D | compaction.h | 168 int order, 172 TP_ARGS(order, gfp_mask, prio), 175 __field(int, order) 181 __entry->order = order; 186 TP_printk("order=%d gfp_mask=%s priority=%d", 187 __entry->order, 195 int order, 198 TP_ARGS(zone, order, ret), 203 __field(int, order) 210 __entry->order = order; [all …]
|
H A D | vmscan.h | 68 TP_PROTO(int nid, int zid, int order), 70 TP_ARGS(nid, zid, order), 75 __field( int, order ) 81 __entry->order = order; 84 TP_printk("nid=%d order=%d", 86 __entry->order) 91 TP_PROTO(int nid, int zid, int order, gfp_t gfp_flags), 93 TP_ARGS(nid, zid, order, gfp_flags), 98 __field( int, order ) 105 __entry->order = order; [all …]
|
H A D | kmem.h | 138 TP_PROTO(struct page *page, unsigned int order), 140 TP_ARGS(page, order), 144 __field( unsigned int, order ) 149 __entry->order = order; 152 TP_printk("page=%p pfn=0x%lx order=%d", 155 __entry->order) 172 TP_printk("page=%p pfn=0x%lx order=0", 179 TP_PROTO(struct page *page, unsigned int order, 182 TP_ARGS(page, order, gfp_flags, migratetype), 186 __field( unsigned int, order ) [all …]
|
/linux/scripts/atomic/ |
H A D | gen-atomic-fallback.sh | 8 #gen_template_fallback(template, meta, pfx, name, sfx, order, atomic, int, args...) 16 local order="$1"; shift 28 #gen_order_fallback(meta, pfx, name, sfx, order, atomic, int, args...) 35 local order="$1"; shift 37 local tmpl_order=${order#_} 39 gen_template_fallback "${tmpl}" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@" 42 #gen_proto_fallback(meta, pfx, name, sfx, order, atomic, int, args...) 49 local order="$1"; shift 51 local tmpl="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")" 52 gen_template_fallback "${tmpl}" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@" [all …]
|
/linux/Documentation/netlink/specs/ |
H A D | nftables.yaml | 23 byte-order: big-endian 242 byte-order: big-endian 253 byte-order: big-endian 260 byte-order: big-endian 265 byte-order: big-endian 281 byte-order: big-endian 295 byte-order: big-endian 300 byte-order: big-endian 314 byte-order: big-endian 321 byte-order: big-endian [all …]
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
H A D | dr_buddy.c | 29 /* Allocating max_order bitmaps, one for each order */ in mlx5dr_buddy_init() 39 /* In the beginning, we have only one order that is available for in mlx5dr_buddy_init() 75 unsigned int *order) in dr_buddy_find_free_seg() argument 88 "ICM Buddy: failed finding free mem for order %d\n", in dr_buddy_find_free_seg() 99 *order = order_iter; in dr_buddy_find_free_seg() 106 * @order: Order of the buddy to update. 110 * It uses the data structures of the buddy system in order to find the first 111 * area of free place, starting from the current order till the maximum order 120 unsigned int order, in mlx5dr_buddy_alloc_mem() argument 126 err = dr_buddy_find_free_seg(buddy, order, &seg, &order_iter); in mlx5dr_buddy_alloc_mem() [all …]
|
/linux/lib/ |
H A D | test_xarray.c | 72 unsigned order, void *entry, gfp_t gfp) in xa_store_order() argument 74 XA_STATE_ORDER(xas, xa, index, order); in xa_store_order() 177 unsigned int order; in check_xa_mark_1() local 207 for (order = 2; order < max_order; order++) { in check_xa_mark_1() 208 unsigned long base = round_down(index, 1UL << order); in check_xa_mark_1() 209 unsigned long next = base + (1UL << order); in check_xa_mark_1() 217 xa_store_order(xa, index, order, xa_mk_index(index), in check_xa_mark_1() 328 unsigned int order; in check_xa_shrink() local 353 for (order = 0; order < max_order; order++) { in check_xa_shrink() 354 unsigned long max = (1UL << order) - 1; in check_xa_shrink() [all …]
|
/linux/mm/ |
H A D | page_alloc.c | 214 static void __free_pages_ok(struct page *page, unsigned int order, 288 static bool page_contains_unaccepted(struct page *page, unsigned int order); 289 static bool cond_accept_memory(struct zone *zone, unsigned int order); 314 _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument 316 return deferred_grow_zone(zone, order); in _deferred_grow_zone() 324 static inline bool _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument 509 static inline unsigned int order_to_pindex(int migratetype, int order) in order_to_pindex() argument 514 if (order > PAGE_ALLOC_COSTLY_ORDER) { in order_to_pindex() 515 VM_BUG_ON(order != HPAGE_PMD_ORDER); in order_to_pindex() 522 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); in order_to_pindex() [all …]
|
H A D | compaction.c | 46 * order == -1 is expected when compacting proactively via 51 static inline bool is_via_compact_memory(int order) in is_via_compact_memory() argument 53 return order == -1; in is_via_compact_memory() 59 static inline bool is_via_compact_memory(int order) { return false; } in is_via_compact_memory() argument 67 #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) argument 68 #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) argument 71 * Page order with-respect-to which proactive compaction 83 static struct page *mark_allocated_noprof(struct page *page, unsigned int order, gfp_t gfp_flags) in mark_allocated_noprof() argument 85 post_alloc_hook(page, order, __GFP_MOVABLE); in mark_allocated_noprof() 92 int order; in release_free_list() local [all …]
|
/linux/drivers/gpu/drm/ttm/ |
H A D | ttm_pool.c | 54 * @vaddr: original vaddr return for the mapping and order in the lower bits 79 /* Allocate pages of size 1 << order with the given gfp_flags */ 81 unsigned int order) in ttm_pool_alloc_page() argument 88 /* Don't set the __GFP_COMP flag for higher order allocations. in ttm_pool_alloc_page() 92 if (order) in ttm_pool_alloc_page() 97 p = alloc_pages_node(pool->nid, gfp_flags, order); in ttm_pool_alloc_page() 99 p->private = order; in ttm_pool_alloc_page() 107 if (order) in ttm_pool_alloc_page() 110 vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE, in ttm_pool_alloc_page() 123 dma->vaddr = (unsigned long)vaddr | order; in ttm_pool_alloc_page() [all …]
|
/linux/tools/testing/radix-tree/ |
H A D | multiorder.c | 3 * multiorder.c: Multi-order radix tree entry testing 16 unsigned order) in item_insert_order() argument 18 XA_STATE_ORDER(xas, xa, index, order); in item_insert_order() 19 struct item *item = item_create(index, order); in item_insert_order() 42 int order[NUM_ENTRIES] = {1, 1, 2, 3, 4, 1, 0, 1, 3, 0, 7}; in multiorder_iteration() local 47 err = item_insert_order(xa, index[i], order[i]); in multiorder_iteration() 53 if (j <= (index[i] | ((1 << order[i]) - 1))) in multiorder_iteration() 58 int height = order[i] / XA_CHUNK_SHIFT; in multiorder_iteration() 60 unsigned long mask = (1UL << order[i]) - 1; in multiorder_iteration() 66 assert(item->order == order[i]); in multiorder_iteration() [all …]
|
/linux/arch/arm/lib/ |
H A D | lib1funcs.S | 106 .macro ARM_DIV2_ORDER divisor, order argument 110 clz \order, \divisor 111 rsb \order, \order, #31 117 movhs \order, #16 118 movlo \order, #0 122 addhs \order, \order, #8 126 addhs \order, \order, #4 129 addhi \order, \order, #3 130 addls \order, \order, \divisor, lsr #1 137 .macro ARM_MOD_BODY dividend, divisor, order, spare [all …]
|
/linux/mm/kmsan/ |
H A D | init.c | 108 * by their order: when kmsan_memblock_free_pages() is called for the first 109 * time with a certain order, it is reserved as a shadow block, for the second 112 * after which held_back[order] can be used again. 117 bool kmsan_memblock_free_pages(struct page *page, unsigned int order) in kmsan_memblock_free_pages() argument 121 if (!held_back[order].shadow) { in kmsan_memblock_free_pages() 122 held_back[order].shadow = page; in kmsan_memblock_free_pages() 125 if (!held_back[order].origin) { in kmsan_memblock_free_pages() 126 held_back[order].origin = page; in kmsan_memblock_free_pages() 129 shadow = held_back[order].shadow; in kmsan_memblock_free_pages() 130 origin = held_back[order].origin; in kmsan_memblock_free_pages() [all …]
|
/linux/kernel/bpf/ |
H A D | cgroup_iter.c | 13 * 1. Walk the descendants of a cgroup in pre-order. 14 * 2. Walk the descendants of a cgroup in post-order. 18 * For walking descendants, cgroup_iter can walk in either pre-order or 19 * post-order. For walking ancestors, the iter walks up from a cgroup to 40 * EOPNOTSUPP. In order to work around, the user may have to update their 54 int order; member 77 if (p->order == BPF_CGROUP_ITER_DESCENDANTS_PRE) in cgroup_iter_seq_start() 79 else if (p->order == BPF_CGROUP_ITER_DESCENDANTS_POST) in cgroup_iter_seq_start() 110 if (p->order == BPF_CGROUP_ITER_DESCENDANTS_PRE) in cgroup_iter_seq_next() 112 else if (p->order == BPF_CGROUP_ITER_DESCENDANTS_POST) in cgroup_iter_seq_next() [all …]
|
/linux/drivers/media/pci/cx18/ |
H A D | cx18-mailbox.c | 231 static void epu_dma_done(struct cx18 *cx, struct cx18_in_work_order *order) in epu_dma_done() argument 240 mb = &order->mb; in epu_dma_done() 247 (order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) ? in epu_dma_done() 253 mdl_ack = order->mdl_ack; in epu_dma_done() 277 if ((order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) && in epu_dma_done() 324 static void epu_debug(struct cx18 *cx, struct cx18_in_work_order *order) in epu_debug() argument 327 char *str = order->str; in epu_debug() 329 CX18_DEBUG_INFO("%x %s\n", order->mb.args[0], str); in epu_debug() 335 static void epu_cmd(struct cx18 *cx, struct cx18_in_work_order *order) in epu_cmd() argument 337 switch (order->rpu) { in epu_cmd() [all …]
|
/linux/drivers/gpu/drm/lib/ |
H A D | drm_random.c | 16 void drm_random_reorder(unsigned int *order, unsigned int count, in drm_random_reorder() argument 24 swap(order[i], order[j]); in drm_random_reorder() 31 unsigned int *order, i; in drm_random_order() local 33 order = kmalloc_array(count, sizeof(*order), GFP_KERNEL); in drm_random_order() 34 if (!order) in drm_random_order() 35 return order; in drm_random_order() 38 order[i] = i; in drm_random_order() 40 drm_random_reorder(order, count, state); in drm_random_order() 41 return order; in drm_random_order()
|
/linux/arch/riscv/mm/ |
H A D | hugetlbpage.c | 35 unsigned long order; in huge_pte_alloc() local 68 for_each_napot_order(order) { in huge_pte_alloc() 69 if (napot_cont_size(order) == sz) { in huge_pte_alloc() 70 pte = pte_alloc_huge(mm, pmd, addr & napot_cont_mask(order)); in huge_pte_alloc() 88 unsigned long order; in huge_pte_offset() local 119 for_each_napot_order(order) { in huge_pte_offset() 120 if (napot_cont_size(order) == sz) { in huge_pte_offset() 121 pte = pte_offset_huge(pmd, addr & napot_cont_mask(order)); in huge_pte_offset() 186 unsigned long order; in arch_make_huge_pte() local 188 for_each_napot_order(order) { in arch_make_huge_pte() [all …]
|
/linux/drivers/gpu/drm/ |
H A D | drm_buddy.c | 16 unsigned int order, in drm_block_alloc() argument 21 BUG_ON(order > DRM_BUDDY_MAX_ORDER); in drm_block_alloc() 28 block->header |= order; in drm_block_alloc() 125 unsigned int order; in __drm_buddy_free() local 158 order = drm_buddy_block_order(block); in __drm_buddy_free() 161 return order; in __drm_buddy_free() 169 unsigned int order; in __force_merge() local 213 order = __drm_buddy_free(mm, block, true); in __force_merge() 214 if (order >= min_order) in __force_merge() 284 unsigned int order; in drm_buddy_init() local [all …]
|
/linux/include/linux/ |
H A D | gfp.h | 66 * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. 199 static inline void arch_free_page(struct page *page, int order) { } in arch_free_page() argument 202 static inline void arch_alloc_page(struct page *page, int order) { } in arch_alloc_page() argument 205 struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, int preferred_nid, 209 struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid, 225 /* Bulk allocate order-0 pages */ 264 __alloc_pages_node_noprof(int nid, gfp_t gfp_mask, unsigned int order) in __alloc_pages_node_noprof() argument 269 return __alloc_pages_noprof(gfp_mask, order, nid, NULL); in __alloc_pages_node_noprof() 275 struct folio *__folio_alloc_node_noprof(gfp_t gfp, unsigned int order, int nid) in __folio_alloc_node_noprof() argument 280 return __folio_alloc_noprof(gfp, order, nid, NULL); in __folio_alloc_node_noprof() [all …]
|
H A D | compaction.h | 61 * Number of free order-0 pages that should be available above given watermark 65 static inline unsigned long compact_gap(unsigned int order) in compact_gap() argument 69 * free scanner may have up to 1 << order pages on its list and then in compact_gap() 70 * try to split an (order - 1) free page. At that point, a gap of in compact_gap() 71 * 1 << order might not be enough, so it's safer to require twice that in compact_gap() 80 return 2UL << order; in compact_gap() 85 extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order); 86 extern int fragmentation_index(struct zone *zone, unsigned int order); 88 unsigned int order, unsigned int alloc_flags, 92 extern bool compaction_suitable(struct zone *zone, int order, [all …]
|
/linux/arch/riscv/kvm/ |
H A D | tlb.c | 22 unsigned long order) in kvm_riscv_local_hfence_gvma_vmid_gpa() argument 26 if (PTRS_PER_PTE < (gpsz >> order)) { in kvm_riscv_local_hfence_gvma_vmid_gpa() 33 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_vmid_gpa() 38 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_vmid_gpa() 50 unsigned long order) in kvm_riscv_local_hfence_gvma_gpa() argument 54 if (PTRS_PER_PTE < (gpsz >> order)) { in kvm_riscv_local_hfence_gvma_gpa() 61 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_gpa() 66 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_gpa() 81 unsigned long order) in kvm_riscv_local_hfence_vvma_asid_gva() argument 85 if (PTRS_PER_PTE < (gvsz >> order)) { in kvm_riscv_local_hfence_vvma_asid_gva() [all …]
|
/linux/drivers/gpu/drm/tests/ |
H A D | drm_buddy_test.c | 19 static inline u64 get_size(int order, u64 chunk_size) in get_size() argument 21 return (1 << order) * chunk_size; in get_size() 28 unsigned int i, count, *order; in drm_test_buddy_alloc_range_bias() local 45 order = drm_random_order(count, &prng); in drm_test_buddy_alloc_range_bias() 46 KUNIT_EXPECT_TRUE(test, order); in drm_test_buddy_alloc_range_bias() 50 * in some random order allocate within each bias, using various in drm_test_buddy_alloc_range_bias() 59 bias_start = order[i] * bias_size; in drm_test_buddy_alloc_range_bias() 164 kfree(order); in drm_test_buddy_alloc_range_bias() 270 unsigned int order; in drm_test_buddy_alloc_clear() local 344 * repeat the whole thing, increment the order until we hit the max_order. in drm_test_buddy_alloc_clear() [all …]
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/ |
H A D | mlx5hws_buddy.c | 85 u32 *order) in hws_buddy_find_free_seg() argument 98 "ICM Buddy: failed finding free mem for order %d\n", in hws_buddy_find_free_seg() 109 *order = order_iter; in hws_buddy_find_free_seg() 113 int mlx5hws_buddy_alloc_mem(struct mlx5hws_buddy_mem *buddy, u32 order) in mlx5hws_buddy_alloc_mem() argument 117 err = hws_buddy_find_free_seg(buddy, order, &seg, &order_iter); in mlx5hws_buddy_alloc_mem() 124 while (order_iter > order) { in mlx5hws_buddy_alloc_mem() 131 seg <<= order; in mlx5hws_buddy_alloc_mem() 136 void mlx5hws_buddy_free_mem(struct mlx5hws_buddy_mem *buddy, u32 seg, u32 order) in mlx5hws_buddy_free_mem() argument 138 seg >>= order; in mlx5hws_buddy_free_mem() 140 while (test_bit(seg ^ 1, buddy->bitmap[order])) { in mlx5hws_buddy_free_mem() [all …]
|