| /linux/scripts/atomic/ |
| H A D | gen-atomic-fallback.sh | 16 local order="$1"; shift 35 local order="$1"; shift 37 local tmpl_order=${order#_} 39 gen_template_fallback "${tmpl}" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@" 49 local order="$1"; shift 51 local tmpl="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")" 52 gen_template_fallback "${tmpl}" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@" 62 local order="$1"; shift 66 local atomicname="${atomic}_${pfx}${name}${sfx}${order}" 69 local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")" [all …]
|
| /linux/mm/ |
| H A D | page_alloc.c | 218 static void __free_pages_ok(struct page *page, unsigned int order, 293 static bool page_contains_unaccepted(struct page *page, unsigned int order); 294 static bool cond_accept_memory(struct zone *zone, unsigned int order, 320 _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument 322 return deferred_grow_zone(zone, order); in _deferred_grow_zone() 330 static inline bool _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument 658 static inline unsigned int order_to_pindex(int migratetype, int order) in order_to_pindex() argument 663 if (order > PAGE_ALLOC_COSTLY_ORDER) { in order_to_pindex() 664 VM_BUG_ON(order != HPAGE_PMD_ORDER); in order_to_pindex() 671 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); in order_to_pindex() [all …]
|
| H A D | compaction.c | 51 static inline bool is_via_compact_memory(int order) in is_via_compact_memory() argument 53 return order == -1; in is_via_compact_memory() 59 static inline bool is_via_compact_memory(int order) { return false; } in is_via_compact_memory() argument 67 #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) argument 68 #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) argument 83 static struct page *mark_allocated_noprof(struct page *page, unsigned int order, gfp_t gfp_flags) in mark_allocated_noprof() argument 85 post_alloc_hook(page, order, __GFP_MOVABLE); in mark_allocated_noprof() 93 int order; in release_free_list() local 96 for (order = 0; order < NR_PAGE_ORDERS; order++) { in release_free_list() 99 list_for_each_entry_safe(page, next, &freepages[order], lru) { in release_free_list() [all …]
|
| H A D | page_reporting.c | 117 unsigned int order = get_order(sg->length); in page_reporting_drain() local 119 __putback_isolated_page(page, order, mt); in page_reporting_drain() 132 if (PageBuddy(page) && buddy_order(page) == order) in page_reporting_drain() 147 unsigned int order, unsigned int mt, in page_reporting_cycle() argument 150 struct free_area *area = &zone->free_area[order]; in page_reporting_cycle() 152 unsigned int page_len = PAGE_SIZE << order; in page_reporting_cycle() 201 if (!__isolate_free_page(page, order)) { in page_reporting_cycle() 263 unsigned int order, mt, leftover, offset = PAGE_REPORTING_CAPACITY; in page_reporting_process_zone() local 279 for (order = page_reporting_order; order < NR_PAGE_ORDERS; order++) { in page_reporting_process_zone() 285 err = page_reporting_cycle(prdev, zone, order, mt, in page_reporting_process_zone() [all …]
|
| H A D | hugetlb_cma.c | 29 struct folio *hugetlb_cma_alloc_folio(int order, gfp_t gfp_mask, in hugetlb_cma_alloc_folio() argument 36 folio = cma_alloc_folio(hugetlb_cma[nid], order, gfp_mask); in hugetlb_cma_alloc_folio() 43 folio = cma_alloc_folio(hugetlb_cma[node], order, gfp_mask); in hugetlb_cma_alloc_folio() 137 void __init hugetlb_cma_reserve(int order) in hugetlb_cma_reserve() argument 149 VM_WARN_ON(order <= MAX_PAGE_ORDER); in hugetlb_cma_reserve() 168 if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) { in hugetlb_cma_reserve() 170 nid, (PAGE_SIZE << order) / SZ_1M); in hugetlb_cma_reserve() 182 if (hugetlb_cma_size < (PAGE_SIZE << order)) { in hugetlb_cma_reserve() 184 (PAGE_SIZE << order) / SZ_1M); in hugetlb_cma_reserve() 214 size = round_up(size, PAGE_SIZE << order); in hugetlb_cma_reserve() [all …]
|
| /linux/include/trace/events/ |
| H A D | compaction.h | 168 int order, 172 TP_ARGS(order, gfp_mask, prio), 175 __field(int, order) 181 __entry->order = order; 187 __entry->order, 195 int order, 198 TP_ARGS(zone, order, ret), 203 __field(int, order) 210 __entry->order = order; 217 __entry->order, [all …]
|
| H A D | kmem.h | 141 TP_PROTO(struct page *page, unsigned int order), 143 TP_ARGS(page, order), 147 __field( unsigned int, order ) 152 __entry->order = order; 158 __entry->order) 182 TP_PROTO(struct page *page, unsigned int order, 185 TP_ARGS(page, order, gfp_flags, migratetype), 189 __field( unsigned int, order ) 196 __entry->order = order; 204 __entry->order, [all …]
|
| H A D | migrate.h | 115 TP_PROTO(unsigned long addr, unsigned long pte, int order), 117 TP_ARGS(addr, pte, order), 122 __field(int, order) 128 __entry->order = order; 131 TP_printk("addr=%lx, pte=%lx order=%d", __entry->addr, __entry->pte, __entry->order) 135 TP_PROTO(unsigned long addr, unsigned long pte, int order), 136 TP_ARGS(addr, pte, order) 140 TP_PROTO(unsigned long addr, unsigned long pte, int order), 141 TP_ARGS(addr, pte, order)
|
| H A D | oom.h | 37 int order, 44 TP_ARGS(zoneref, order, reclaimable, available, min_wmark, no_progress_loops, wmark_check), 49 __field( int, order) 60 __entry->order = order; 70 __entry->order, 185 TP_PROTO(int order, 192 TP_ARGS(order, priority, result, retries, max_retries, ret), 195 __field( int, order) 204 __entry->order = order; 213 __entry->order,
|
| /linux/lib/ |
| H A D | test_xarray.c | 72 unsigned order, void *entry, gfp_t gfp) in xa_store_order() argument 74 XA_STATE_ORDER(xas, xa, index, order); in xa_store_order() 177 unsigned int order; in check_xa_mark_1() local 207 for (order = 2; order < max_order; order++) { in check_xa_mark_1() 208 unsigned long base = round_down(index, 1UL << order); in check_xa_mark_1() 209 unsigned long next = base + (1UL << order); in check_xa_mark_1() 217 xa_store_order(xa, index, order, xa_mk_index(index), in check_xa_mark_1() 328 unsigned int order; in check_xa_shrink() local 353 for (order = 0; order < max_order; order++) { in check_xa_shrink() 354 unsigned long max = (1UL << order) - 1; in check_xa_shrink() [all …]
|
| /linux/include/linux/ |
| H A D | gfp.h | 219 static inline void arch_free_page(struct page *page, int order) { } in arch_free_page() argument 222 static inline void arch_alloc_page(struct page *page, int order) { } in arch_alloc_page() argument 225 struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, int preferred_nid, 229 struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid, 280 __alloc_pages_node_noprof(int nid, gfp_t gfp_mask, unsigned int order) in __alloc_pages_node_noprof() argument 285 return __alloc_pages_noprof(gfp_mask, order, nid, NULL); in __alloc_pages_node_noprof() 291 struct folio *__folio_alloc_node_noprof(gfp_t gfp, unsigned int order, int nid) in __folio_alloc_node_noprof() argument 296 return __folio_alloc_noprof(gfp, order, nid, NULL); in __folio_alloc_node_noprof() 307 unsigned int order) in alloc_pages_node_noprof() argument 312 return __alloc_pages_node_noprof(nid, gfp_mask, order); in alloc_pages_node_noprof() [all …]
|
| H A D | compaction.h | 65 static inline unsigned long compact_gap(unsigned int order) in compact_gap() argument 80 return 2UL << order; in compact_gap() 90 extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order); 91 extern int fragmentation_index(struct zone *zone, unsigned int order); 93 unsigned int order, unsigned int alloc_flags, 97 extern bool compaction_suitable(struct zone *zone, int order, 100 extern void compaction_defer_reset(struct zone *zone, int order, 103 bool compaction_zonelist_suitable(struct alloc_context *ac, int order, 108 extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx); 115 static inline bool compaction_suitable(struct zone *zone, int order, in compaction_suitable() argument [all …]
|
| /linux/Documentation/trace/postprocess/ |
| H A D | trace-vmscan-postprocess.pl | 315 my $order = $1; 316 $perprocesspid{$process_pid}->{MM_VMSCAN_DIRECT_RECLAIM_BEGIN_PERORDER}[$order]++; 317 $perprocesspid{$process_pid}->{STATE_DIRECT_ORDER} = $order; 326 my $order = $perprocesspid{$process_pid}->{STATE_DIRECT_ORDER}; 328 $perprocesspid{$process_pid}->{HIGH_DIRECT_RECLAIM_LATENCY}[$index] = "$order-$latency"; 339 my $order = $2; 340 $perprocesspid{$process_pid}->{STATE_KSWAPD_ORDER} = $order; 345 $perprocesspid{$process_pid}->{MM_VMSCAN_KSWAPD_WAKE_PERORDER}[$order]++; 348 $perprocesspid{$process_pid}->{HIGH_KSWAPD_REWAKEUP_PERORDER}[$order]++; 358 my $order = $perprocesspid{$process_pid}->{STATE_KSWAPD_ORDER}; [all …]
|
| /linux/mm/kmsan/ |
| H A D | init.c | 116 bool kmsan_memblock_free_pages(struct page *page, unsigned int order) in kmsan_memblock_free_pages() argument 120 if (!held_back[order].shadow) { in kmsan_memblock_free_pages() 121 held_back[order].shadow = page; in kmsan_memblock_free_pages() 124 if (!held_back[order].origin) { in kmsan_memblock_free_pages() 125 held_back[order].origin = page; in kmsan_memblock_free_pages() 128 shadow = held_back[order].shadow; in kmsan_memblock_free_pages() 129 origin = held_back[order].origin; in kmsan_memblock_free_pages() 130 kmsan_setup_meta(page, shadow, origin, order); in kmsan_memblock_free_pages() 132 held_back[order].shadow = NULL; in kmsan_memblock_free_pages() 133 held_back[order].origin = NULL; in kmsan_memblock_free_pages() [all …]
|
| /linux/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ |
| H A D | gk104.c | 34 const struct gk104_clkgate_engine_info *order = therm->clkgate_order; in gk104_clkgate_enable() local 38 for (i = 0; order[i].type != NVKM_SUBDEV_NR; i++) { in gk104_clkgate_enable() 39 if (!nvkm_device_subdev(dev, order[i].type, order[i].inst)) in gk104_clkgate_enable() 42 nvkm_mask(dev, 0x20200 + order[i].offset, 0xff00, 0x4500); in gk104_clkgate_enable() 50 for (i = 0; order[i].type != NVKM_SUBDEV_NR; i++) { in gk104_clkgate_enable() 51 if (!nvkm_device_subdev(dev, order[i].type, order[i].inst)) in gk104_clkgate_enable() 54 nvkm_mask(dev, 0x20200 + order[i].offset, 0x00ff, 0x0045); in gk104_clkgate_enable() 63 const struct gk104_clkgate_engine_info *order = therm->clkgate_order; in gk104_clkgate_fini() local 67 for (i = 0; order[i].type != NVKM_SUBDEV_NR; i++) { in gk104_clkgate_fini() 68 if (!nvkm_device_subdev(dev, order[i].type, order[i].inst)) in gk104_clkgate_fini() [all …]
|
| /linux/arch/riscv/kvm/ |
| H A D | tlb.c | 25 unsigned long order) in kvm_riscv_local_hfence_gvma_vmid_gpa() argument 29 if (PTRS_PER_PTE < (gpsz >> order)) { in kvm_riscv_local_hfence_gvma_vmid_gpa() 36 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_vmid_gpa() 41 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_vmid_gpa() 53 unsigned long order) in kvm_riscv_local_hfence_gvma_gpa() argument 57 if (PTRS_PER_PTE < (gpsz >> order)) { in kvm_riscv_local_hfence_gvma_gpa() 64 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_gpa() 69 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_gpa() 84 unsigned long order) in kvm_riscv_local_hfence_vvma_asid_gva() argument 88 if (PTRS_PER_PTE < (gvsz >> order)) { in kvm_riscv_local_hfence_vvma_asid_gva() [all …]
|
| /linux/arch/riscv/mm/ |
| H A D | hugetlbpage.c | 35 unsigned long order; in huge_pte_alloc() local 68 for_each_napot_order(order) { in huge_pte_alloc() 69 if (napot_cont_size(order) == sz) { in huge_pte_alloc() 70 pte = pte_alloc_huge(mm, pmd, addr & napot_cont_mask(order)); in huge_pte_alloc() 88 unsigned long order; in huge_pte_offset() local 119 for_each_napot_order(order) { in huge_pte_offset() 120 if (napot_cont_size(order) == sz) { in huge_pte_offset() 121 pte = pte_offset_huge(pmd, addr & napot_cont_mask(order)); in huge_pte_offset() 189 unsigned long order; in arch_make_huge_pte() local 191 for_each_napot_order(order) { in arch_make_huge_pte() [all …]
|
| /linux/kernel/bpf/ |
| H A D | cgroup_iter.c | 54 int order; member 77 if (p->order == BPF_CGROUP_ITER_DESCENDANTS_PRE) in cgroup_iter_seq_start() 79 else if (p->order == BPF_CGROUP_ITER_DESCENDANTS_POST) in cgroup_iter_seq_start() 110 if (p->order == BPF_CGROUP_ITER_DESCENDANTS_PRE) in cgroup_iter_seq_next() 112 else if (p->order == BPF_CGROUP_ITER_DESCENDANTS_POST) in cgroup_iter_seq_next() 114 else if (p->order == BPF_CGROUP_ITER_ANCESTORS_UP) in cgroup_iter_seq_next() 176 p->order = aux->cgroup.order; in BTF_ID_LIST_GLOBAL_SINGLE() 200 int order = linfo->cgroup.order; in bpf_iter_attach_cgroup() local 203 if (order != BPF_CGROUP_ITER_DESCENDANTS_PRE && in bpf_iter_attach_cgroup() 204 order != BPF_CGROUP_ITER_DESCENDANTS_POST && in bpf_iter_attach_cgroup() [all …]
|
| /linux/drivers/gpu/drm/i915/selftests/ |
| H A D | i915_syncmap.c | 274 unsigned int pass, order; in igt_syncmap_join_above() local 296 for (order = 0; order < 64; order += SHIFT) { in igt_syncmap_join_above() 297 u64 context = BIT_ULL(order); in igt_syncmap_join_above() 335 unsigned int step, order, idx; in igt_syncmap_join_below() local 345 for (order = 64 - SHIFT; order > 0; order -= SHIFT) { in igt_syncmap_join_below() 346 u64 context = step * BIT_ULL(order); in igt_syncmap_join_below() 354 context, order, step, sync->height, sync->prefix); in igt_syncmap_join_below() 362 for (order = SHIFT; order < 64; order += SHIFT) { in igt_syncmap_join_below() 363 u64 context = step * BIT_ULL(order); in igt_syncmap_join_below() 367 context, order, step); in igt_syncmap_join_below() [all …]
|
| H A D | i915_random.c | 70 void i915_random_reorder(unsigned int *order, unsigned int count, in i915_random_reorder() argument 73 i915_prandom_shuffle(order, sizeof(*order), count, state); in i915_random_reorder() 78 unsigned int *order, i; in i915_random_order() local 80 order = kmalloc_array(count, sizeof(*order), in i915_random_order() 82 if (!order) in i915_random_order() 83 return order; in i915_random_order() 86 order[i] = i; in i915_random_order() 88 i915_random_reorder(order, count, state); in i915_random_order() 89 return order; in i915_random_order()
|
| /linux/arch/arm/lib/ |
| H A D | lib1funcs.S | 106 .macro ARM_DIV2_ORDER divisor, order argument 110 clz \order, \divisor 111 rsb \order, \order, #31 117 movhs \order, #16 118 movlo \order, #0 122 addhs \order, \order, #8 126 addhs \order, \order, #4 129 addhi \order, \order, #3 130 addls \order, \order, \divisor, lsr #1 137 .macro ARM_MOD_BODY dividend, divisor, order, spare [all …]
|
| /linux/kernel/liveupdate/ |
| H A D | kexec_handover.c | 49 unsigned int order; member 150 unsigned int order) in __kho_unpreserve_order() argument 154 const unsigned long pfn_high = pfn >> order; in __kho_unpreserve_order() 156 physxa = xa_load(&track->orders, order); in __kho_unpreserve_order() 170 unsigned int order; in __kho_unpreserve() local 173 order = min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn)); in __kho_unpreserve() 175 __kho_unpreserve_order(track, pfn, order); in __kho_unpreserve() 177 pfn += 1 << order; in __kho_unpreserve() 182 unsigned int order) in __kho_preserve_order() argument 186 const unsigned long pfn_high = pfn >> order; in __kho_preserve_order() [all …]
|
| /linux/tools/testing/radix-tree/ |
| H A D | iteration_check.c | 25 int order; in my_item_insert() local 29 for (order = max_order; order >= 0; order--) { in my_item_insert() 30 xas_set_order(&xas, index, order); in my_item_insert() 31 item->order = order; in my_item_insert() 41 if (order < 0) in my_item_insert() 165 void iteration_test(unsigned order, unsigned test_duration) in iteration_test() argument 170 order > 0 ? "multiorder " : "", test_duration); in iteration_test() 172 max_order = order; in iteration_test()
|
| /linux/arch/riscv/include/asm/ |
| H A D | kvm_tlb.h | 25 unsigned long order; member 36 unsigned long order); 39 unsigned long order); 45 unsigned long order); 50 unsigned long order); 65 unsigned long order, unsigned long vmid); 72 unsigned long order, unsigned long asid, 80 unsigned long order, unsigned long vmid);
|
| H A D | pgtable-64.h | 97 #define for_each_napot_order(order) \ argument 98 for (order = NAPOT_CONT_ORDER_BASE; order < NAPOT_ORDER_MAX; order++) 99 #define for_each_napot_order_rev(order) \ argument 100 for (order = NAPOT_ORDER_MAX - 1; \ 101 order >= NAPOT_CONT_ORDER_BASE; order--) 104 #define napot_cont_shift(order) ((order) + PAGE_SHIFT) argument 105 #define napot_cont_size(order) BIT(napot_cont_shift(order)) argument 106 #define napot_cont_mask(order) (~(napot_cont_size(order) - 1UL)) argument 107 #define napot_pte_num(order) BIT(order) argument
|