Home
last modified time | relevance | path

Searched refs:order (Results 1 – 25 of 1497) sorted by relevance

12345678910>>...60

/linux/scripts/atomic/
H A Dgen-atomic-fallback.sh16 local order="$1"; shift
35 local order="$1"; shift
37 local tmpl_order=${order#_}
39 gen_template_fallback "${tmpl}" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@"
49 local order="$1"; shift
51 local tmpl="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")"
52 gen_template_fallback "${tmpl}" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@"
62 local order="$1"; shift
66 local atomicname="${atomic}_${pfx}${name}${sfx}${order}"
69 local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")"
[all …]
/linux/include/trace/events/
H A Dcompaction.h168 int order,
172 TP_ARGS(order, gfp_mask, prio),
175 __field(int, order)
181 __entry->order = order;
187 __entry->order,
195 int order,
198 TP_ARGS(zone, order, ret),
203 __field(int, order)
210 __entry->order = order;
217 __entry->order,
[all …]
H A Dkmem.h141 TP_PROTO(struct page *page, unsigned int order),
143 TP_ARGS(page, order),
147 __field( unsigned int, order )
152 __entry->order = order;
158 __entry->order)
182 TP_PROTO(struct page *page, unsigned int order,
185 TP_ARGS(page, order, gfp_flags, migratetype),
189 __field( unsigned int, order )
196 __entry->order = order;
204 __entry->order,
[all …]
H A Dmigrate.h115 TP_PROTO(unsigned long addr, unsigned long pte, int order),
117 TP_ARGS(addr, pte, order),
122 __field(int, order)
128 __entry->order = order;
131 TP_printk("addr=%lx, pte=%lx order=%d", __entry->addr, __entry->pte, __entry->order)
135 TP_PROTO(unsigned long addr, unsigned long pte, int order),
136 TP_ARGS(addr, pte, order)
140 TP_PROTO(unsigned long addr, unsigned long pte, int order),
141 TP_ARGS(addr, pte, order)
H A Doom.h37 int order,
44 TP_ARGS(zoneref, order, reclaimable, available, min_wmark, no_progress_loops, wmark_check),
49 __field( int, order)
60 __entry->order = order;
70 __entry->order,
185 TP_PROTO(int order,
192 TP_ARGS(order, priority, result, retries, max_retries, ret),
195 __field( int, order)
204 __entry->order = order;
213 __entry->order,
/linux/mm/
H A Dpage_alloc.c244 static void __free_pages_ok(struct page *page, unsigned int order,
319 static bool page_contains_unaccepted(struct page *page, unsigned int order);
320 static bool cond_accept_memory(struct zone *zone, unsigned int order,
346 _deferred_grow_zone(struct zone *zone, unsigned int order)
348 return deferred_grow_zone(zone, order); in _deferred_grow_zone()
356 static inline bool _deferred_grow_zone(struct zone *zone, unsigned int order)
684 static inline unsigned int order_to_pindex(int migratetype, int order)
689 if (order > PAGE_ALLOC_COSTLY_ORDER) { in order_to_pindex()
690 VM_BUG_ON(order != HPAGE_PMD_ORDER); in order_to_pindex()
697 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDE in order_to_pindex()
347 _deferred_grow_zone(struct zone * zone,unsigned int order) _deferred_grow_zone() argument
357 _deferred_grow_zone(struct zone * zone,unsigned int order) _deferred_grow_zone() argument
685 order_to_pindex(int migratetype,int order) order_to_pindex() argument
706 int order = pindex / MIGRATE_PCPTYPES; pindex_to_order() local
718 pcp_allowed_order(unsigned int order) pcp_allowed_order() argument
741 prep_compound_page(struct page * page,unsigned int order) prep_compound_page() argument
753 set_buddy_order(struct page * page,unsigned int order) set_buddy_order() argument
772 compaction_capture(struct capture_control * capc,struct page * page,int order,int migratetype) compaction_capture() argument
809 compaction_capture(struct capture_control * capc,struct page * page,int order,int migratetype) compaction_capture() argument
834 __add_to_free_list(struct page * page,struct zone * zone,unsigned int order,int migratetype,bool tail) __add_to_free_list() argument
860 move_to_free_list(struct page * page,struct zone * zone,unsigned int order,int old_mt,int new_mt) move_to_free_list() argument
884 __del_page_from_free_list(struct page * page,struct zone * zone,unsigned int order,int migratetype) __del_page_from_free_list() argument
906 del_page_from_free_list(struct page * page,struct zone * zone,unsigned int order,int migratetype) del_page_from_free_list() argument
929 buddy_merge_likely(unsigned long pfn,unsigned long buddy_pfn,struct page * page,unsigned int order) buddy_merge_likely() argument
981 __free_one_page(struct page * page,unsigned long pfn,struct zone * zone,unsigned int order,int migratetype,fpi_t fpi_flags) __free_one_page() argument
1344 free_pages_prepare(struct page * page,unsigned int order) free_pages_prepare() argument
1486 unsigned int order; free_pcppages_bulk() local
1536 split_large_buddy(struct zone * zone,struct page * page,unsigned long pfn,int order,fpi_t fpi) split_large_buddy() argument
1559 add_page_to_zone_llist(struct zone * zone,struct page * page,unsigned int order) add_page_to_zone_llist() argument
1568 free_one_page(struct zone * zone,struct page * page,unsigned long pfn,unsigned int order,fpi_t fpi_flags) free_one_page() argument
1603 __free_pages_ok(struct page * page,unsigned int order,fpi_t fpi_flags) __free_pages_ok() argument
1613 __free_pages_core(struct page * page,unsigned int order,enum meminit_context context) __free_pages_core() argument
1792 check_new_pages(struct page * page,unsigned int order) check_new_pages() argument
1834 post_alloc_hook(struct page * page,unsigned int order,gfp_t gfp_flags) post_alloc_hook() argument
1889 prep_new_page(struct page * page,unsigned int order,gfp_t gfp_flags,unsigned int alloc_flags) prep_new_page() argument
1914 __rmqueue_smallest(struct zone * zone,unsigned int order,int migratetype) __rmqueue_smallest() argument
1954 __rmqueue_cma_fallback(struct zone * zone,unsigned int order) __rmqueue_cma_fallback() argument
1960 __rmqueue_cma_fallback(struct zone * zone,unsigned int order) __rmqueue_cma_fallback() argument
1972 unsigned int order; __move_freepages_block() local
2076 int order = start_pfn ? __ffs(start_pfn) : MAX_PAGE_ORDER; find_large_buddy() local
2149 int order = buddy_order(buddy); __move_freepages_block_isolate() local
2230 should_try_claim_block(unsigned int order,int start_mt) should_try_claim_block() argument
2278 find_suitable_fallback(struct free_area * area,unsigned int order,int migratetype,bool claimable) find_suitable_fallback() argument
2308 try_to_claim_block(struct zone * zone,struct page * page,int current_order,int order,int start_type,int block_type,unsigned int alloc_flags) try_to_claim_block() argument
2382 __rmqueue_claim(struct zone * zone,int order,int start_migratetype,unsigned int alloc_flags) __rmqueue_claim() argument
2437 __rmqueue_steal(struct zone * zone,int order,int start_migratetype) __rmqueue_steal() argument
2473 __rmqueue(struct zone * zone,unsigned int order,int migratetype,unsigned int alloc_flags,enum rmqueue_mode * mode) __rmqueue() argument
2542 rmqueue_bulk(struct zone * zone,unsigned int order,unsigned long count,struct list_head * list,int migratetype,unsigned int alloc_flags) rmqueue_bulk() argument
2856 free_frozen_page_commit(struct zone * zone,struct per_cpu_pages * pcp,struct page * page,int migratetype,unsigned int order,fpi_t fpi_flags,unsigned long * UP_flags) free_frozen_page_commit() argument
2959 __free_frozen_pages(struct page * page,unsigned int order,fpi_t fpi_flags) __free_frozen_pages() argument
3009 free_frozen_pages(struct page * page,unsigned int order) free_frozen_pages() argument
3028 unsigned int order = folio_order(folio); free_unref_folios() local
3052 unsigned int order = (unsigned long)folio->private; free_unref_folios() local
3118 split_page(struct page * page,unsigned int order) split_page() argument
3133 __isolate_free_page(struct page * page,unsigned int order) __isolate_free_page() argument
3183 __putback_isolated_page(struct page * page,unsigned int order,int mt) __putback_isolated_page() argument
3223 rmqueue_buddy(struct zone * preferred_zone,struct zone * zone,unsigned int order,unsigned int alloc_flags,int migratetype) rmqueue_buddy() argument
3267 nr_pcp_alloc(struct per_cpu_pages * pcp,struct zone * zone,int order) nr_pcp_alloc() argument
3319 __rmqueue_pcplist(struct zone * zone,unsigned int order,int migratetype,unsigned int alloc_flags,struct per_cpu_pages * pcp,struct list_head * list) __rmqueue_pcplist() argument
3351 rmqueue_pcplist(struct zone * preferred_zone,struct zone * zone,unsigned int order,int migratetype,unsigned int alloc_flags) rmqueue_pcplist() argument
3394 rmqueue(struct zone * preferred_zone,struct zone * zone,unsigned int order,gfp_t gfp_flags,unsigned int alloc_flags,int migratetype) rmqueue() argument
3427 reserve_highatomic_pageblock(struct page * page,int order,struct zone * zone) reserve_highatomic_pageblock() argument
3487 int order; unreserve_highatomic_pageblock() local
3559 __zone_watermark_unusable_free(struct zone * z,unsigned int order,unsigned int alloc_flags) __zone_watermark_unusable_free() argument
3585 __zone_watermark_ok(struct zone * z,unsigned int order,unsigned long mark,int highest_zoneidx,unsigned int alloc_flags,long free_pages) __zone_watermark_ok() argument
3663 zone_watermark_ok(struct zone * z,unsigned int order,unsigned long mark,int highest_zoneidx,unsigned int alloc_flags) zone_watermark_ok() argument
3670 zone_watermark_fast(struct zone * z,unsigned int order,unsigned long mark,int highest_zoneidx,unsigned int alloc_flags,gfp_t gfp_mask) zone_watermark_fast() argument
3791 get_page_from_freelist(gfp_t gfp_mask,unsigned int order,int alloc_flags,const struct alloc_context * ac) get_page_from_freelist() argument
4034 __alloc_pages_cpuset_fallback(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac) __alloc_pages_cpuset_fallback() argument
4053 __alloc_pages_may_oom(gfp_t gfp_mask,unsigned int order,const struct alloc_context * ac,unsigned long * did_some_progress) __alloc_pages_may_oom() argument
4148 __alloc_pages_direct_compact(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac,enum compact_priority prio,enum compact_result * compact_result) __alloc_pages_direct_compact() argument
4207 should_compact_retry(struct alloc_context * ac,int order,int alloc_flags,enum compact_result compact_result,enum compact_priority * compact_priority,int * compaction_retries) should_compact_retry() argument
4273 __alloc_pages_direct_compact(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac,enum compact_priority prio,enum compact_result * compact_result) __alloc_pages_direct_compact() argument
4282 should_compact_retry(struct alloc_context * ac,int order,int alloc_flags,enum compact_result compact_result,enum compact_priority * compact_priority,int * compaction_retries) should_compact_retry() argument
4394 __perform_reclaim(gfp_t gfp_mask,unsigned int order,const struct alloc_context * ac) __perform_reclaim() argument
4420 __alloc_pages_direct_reclaim(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac,unsigned long * did_some_progress) __alloc_pages_direct_reclaim() argument
4453 wake_all_kswapds(unsigned int order,gfp_t gfp_mask,const struct alloc_context * ac) wake_all_kswapds() argument
4479 gfp_to_alloc_flags(gfp_t gfp_mask,unsigned int order) gfp_to_alloc_flags() argument
4583 should_reclaim_retry(gfp_t gfp_mask,unsigned order,struct alloc_context * ac,int alloc_flags,bool did_some_progress,int * no_progress_loops) should_reclaim_retry() argument
4693 __alloc_pages_slowpath(gfp_t gfp_mask,unsigned int order,struct alloc_context * ac) __alloc_pages_slowpath() argument
4986 prepare_alloc_pages(gfp_t gfp_mask,unsigned int order,int preferred_nid,nodemask_t * nodemask,struct alloc_context * ac,gfp_t * alloc_gfp,unsigned int * alloc_flags) prepare_alloc_pages() argument
5204 __alloc_frozen_pages_noprof(gfp_t gfp,unsigned int order,int preferred_nid,nodemask_t * nodemask) __alloc_frozen_pages_noprof() argument
5269 __alloc_pages_noprof(gfp_t gfp,unsigned int order,int preferred_nid,nodemask_t * nodemask) __alloc_pages_noprof() argument
5281 __folio_alloc_noprof(gfp_t gfp,unsigned int order,int preferred_nid,nodemask_t * nodemask) __folio_alloc_noprof() argument
5295 get_free_pages_noprof(gfp_t gfp_mask,unsigned int order) get_free_pages_noprof() argument
5312 ___free_pages(struct page * page,unsigned int order,fpi_t fpi_flags) ___free_pages() argument
5357 __free_pages(struct page * page,unsigned int order) __free_pages() argument
5367 free_pages_nolock(struct page * page,unsigned int order) free_pages_nolock() argument
5381 free_pages(unsigned long addr,unsigned int order) free_pages() argument
5391 make_alloc_exact(unsigned long addr,unsigned int order,size_t size) make_alloc_exact() argument
5429 unsigned int order = get_order(size); alloc_pages_exact_noprof() local
5454 unsigned int order = get_order(size); alloc_pages_exact_nid_noprof() local
6896 int order; split_free_pages() local
6977 const unsigned int order = ilog2(end - start); alloc_contig_range_noprof() local
7299 unsigned int order; __offline_isolated_pages() local
7345 unsigned int order; is_free_buddy_page() local
7361 add_to_free_list(struct page * page,struct zone * zone,unsigned int order,int migratetype,bool tail) add_to_free_list() argument
7406 unsigned int order; take_page_off_buddy() local
7489 page_contains_unaccepted(struct page * page,unsigned int order) page_contains_unaccepted() argument
7544 cond_accept_memory(struct zone * zone,unsigned int order,int alloc_flags) cond_accept_memory() argument
7603 page_contains_unaccepted(struct page * page,unsigned int order) page_contains_unaccepted() argument
7608 cond_accept_memory(struct zone * zone,unsigned int order,int alloc_flags) cond_accept_memory() argument
7622 alloc_frozen_pages_nolock_noprof(gfp_t gfp_flags,int nid,unsigned int order) alloc_frozen_pages_nolock_noprof() argument
7710 alloc_pages_nolock_noprof(gfp_t gfp_flags,int nid,unsigned int order) alloc_pages_nolock_noprof() argument
[all...]
H A Dcompaction.c51 static inline bool is_via_compact_memory(int order) in is_via_compact_memory() argument
53 return order == -1; in is_via_compact_memory()
59 static inline bool is_via_compact_memory(int order) { return false; } in is_via_compact_memory() argument
67 #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) argument
68 #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) argument
83 static struct page *mark_allocated_noprof(struct page *page, unsigned int order, gfp_t gfp_flags) in mark_allocated_noprof() argument
85 post_alloc_hook(page, order, __GFP_MOVABLE); in mark_allocated_noprof()
93 int order; in release_free_list() local
96 for (order = 0; order < NR_PAGE_ORDERS; order++) { in release_free_list()
99 list_for_each_entry_safe(page, next, &freepages[order], lru) { in release_free_list()
[all …]
H A Dpage_reporting.c20 * If param is set beyond this limit, order is set to default in page_order_update_notify()
38 MODULE_PARM_DESC(page_reporting_order, "Set page reporting order");
42 * symbol so that other drivers can access it to control order values without
117 unsigned int order = get_order(sg->length); in page_reporting_drain() local
119 __putback_isolated_page(page, order, mt); in page_reporting_drain()
130 * up to that higher order. in page_reporting_drain()
132 if (PageBuddy(page) && buddy_order(page) == order) in page_reporting_drain()
147 unsigned int order, unsigned int mt, in page_reporting_cycle() argument
150 struct free_area *area = &zone->free_area[order]; in page_reporting_cycle()
152 unsigned int page_len = PAGE_SIZE << order; in page_reporting_cycle()
263 unsigned int order, mt, leftover, offset = PAGE_REPORTING_CAPACITY; page_reporting_process_zone() local
[all...]
H A Dhugetlb_cma.c27 struct folio *hugetlb_cma_alloc_frozen_folio(int order, gfp_t gfp_mask,
38 page = cma_alloc_frozen_compound(hugetlb_cma[nid], order); in hugetlb_cma_alloc_folio()
45 page = cma_alloc_frozen_compound(hugetlb_cma[node], order); in hugetlb_cma_alloc_folio()
145 unsigned long size, reserved, per_node, order; in hugetlb_cma_reserve()
152 order = arch_hugetlb_cma_order(); in hugetlb_cma_reserve()
153 if (!order) { in hugetlb_cma_reserve()
164 VM_WARN_ON(order <= MAX_PAGE_ORDER); in hugetlb_cma_reserve()
179 if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) { in hugetlb_cma_reserve()
181 nid, (PAGE_SIZE << order) / SZ_1M); in hugetlb_cma_reserve()
193 if (hugetlb_cma_size < (PAGE_SIZE << order)) { in hugetlb_cma_reserve()
29 hugetlb_cma_alloc_folio(int order,gfp_t gfp_mask,int nid,nodemask_t * nodemask) hugetlb_cma_alloc_folio() argument
137 hugetlb_cma_reserve(int order) hugetlb_cma_reserve() argument
[all...]
/linux/lib/
H A Dtest_xarray.c72 unsigned order, void *entry, gfp_t gfp) in xa_store_order() argument
74 XA_STATE_ORDER(xas, xa, index, order); in xa_store_order()
177 unsigned int order; in check_xa_mark_1() local
207 for (order = 2; order < max_order; order++) { in check_xa_mark_1()
208 unsigned long base = round_down(index, 1UL << order); in check_xa_mark_1()
209 unsigned long next = base + (1UL << order); in check_xa_mark_1()
217 xa_store_order(xa, index, order, xa_mk_index(index), in check_xa_mark_1()
328 unsigned int order; in check_xa_shrink() local
353 for (order = 0; order < max_order; order++) { in check_xa_shrink()
354 unsigned long max = (1UL << order) - 1; in check_xa_shrink()
[all …]
/linux/include/linux/
H A Dgfp.h86 * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA.
219 static inline void arch_free_page(struct page *page, int order) { } in arch_free_page() argument
222 static inline void arch_alloc_page(struct page *page, int order) { } in arch_alloc_page() argument
225 struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, int preferred_nid,
229 struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid,
244 /* Bulk allocate order-0 pages */
280 __alloc_pages_node_noprof(int nid, gfp_t gfp_mask, unsigned int order) in __alloc_pages_node_noprof() argument
285 return __alloc_pages_noprof(gfp_mask, order, nid, NULL); in __alloc_pages_node_noprof()
291 struct folio *__folio_alloc_node_noprof(gfp_t gfp, unsigned int order, int nid) in __folio_alloc_node_noprof() argument
296 return __folio_alloc_noprof(gfp, order, ni in __folio_alloc_node_noprof()
307 alloc_pages_node_noprof(int nid,gfp_t gfp_mask,unsigned int order) alloc_pages_node_noprof() argument
325 alloc_pages_noprof(gfp_t gfp_mask,unsigned int order) alloc_pages_noprof() argument
329 folio_alloc_noprof(gfp_t gfp,unsigned int order) folio_alloc_noprof() argument
333 folio_alloc_mpol_noprof(gfp_t gfp,unsigned int order,struct mempolicy * mpol,pgoff_t ilx,int nid) folio_alloc_mpol_noprof() argument
338 vma_alloc_folio_noprof(gfp,order,vma,addr) global() argument
379 __get_dma_pages(gfp_mask,order) global() argument
445 folio_alloc_gigantic_noprof(int order,gfp_t gfp,int nid,nodemask_t * node) folio_alloc_gigantic_noprof() argument
458 folio_alloc_gigantic_noprof(int order,gfp_t gfp,int nid,nodemask_t * node) folio_alloc_gigantic_noprof() argument
[all...]
H A Dcompaction.h65 static inline unsigned long compact_gap(unsigned int order) in compact_gap() argument
80 return 2UL << order; in compact_gap()
90 extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order);
91 extern int fragmentation_index(struct zone *zone, unsigned int order);
93 unsigned int order, unsigned int alloc_flags,
97 extern bool compaction_suitable(struct zone *zone, int order,
100 extern void compaction_defer_reset(struct zone *zone, int order,
103 bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
108 extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx);
115 static inline bool compaction_suitable(struct zone *zone, int order, in compaction_suitable() argument
[all …]
/linux/mm/kmsan/
H A Dinit.c116 bool kmsan_memblock_free_pages(struct page *page, unsigned int order) in kmsan_memblock_free_pages() argument
120 if (!held_back[order].shadow) { in kmsan_memblock_free_pages()
121 held_back[order].shadow = page; in kmsan_memblock_free_pages()
124 if (!held_back[order].origin) { in kmsan_memblock_free_pages()
125 held_back[order].origin = page; in kmsan_memblock_free_pages()
128 shadow = held_back[order].shadow; in kmsan_memblock_free_pages()
129 origin = held_back[order].origin; in kmsan_memblock_free_pages()
130 kmsan_setup_meta(page, shadow, origin, order); in kmsan_memblock_free_pages()
132 held_back[order].shadow = NULL; in kmsan_memblock_free_pages()
133 held_back[order].origin = NULL; in kmsan_memblock_free_pages()
[all …]
/linux/Documentation/trace/postprocess/
H A Dtrace-vmscan-postprocess.pl315 my $order = $1;
316 $perprocesspid{$process_pid}->{MM_VMSCAN_DIRECT_RECLAIM_BEGIN_PERORDER}[$order]++;
317 $perprocesspid{$process_pid}->{STATE_DIRECT_ORDER} = $order;
326 my $order = $perprocesspid{$process_pid}->{STATE_DIRECT_ORDER};
328 $perprocesspid{$process_pid}->{HIGH_DIRECT_RECLAIM_LATENCY}[$index] = "$order-$latency";
339 my $order = $2;
340 $perprocesspid{$process_pid}->{STATE_KSWAPD_ORDER} = $order;
345 $perprocesspid{$process_pid}->{MM_VMSCAN_KSWAPD_WAKE_PERORDER}[$order]++;
348 $perprocesspid{$process_pid}->{HIGH_KSWAPD_REWAKEUP_PERORDER}[$order]++;
358 my $order = $perprocesspid{$process_pid}->{STATE_KSWAPD_ORDER};
[all …]
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/therm/
H A Dgk104.c34 const struct gk104_clkgate_engine_info *order = therm->clkgate_order; in gk104_clkgate_enable() local
38 for (i = 0; order[i].type != NVKM_SUBDEV_NR; i++) { in gk104_clkgate_enable()
39 if (!nvkm_device_subdev(dev, order[i].type, order[i].inst)) in gk104_clkgate_enable()
42 nvkm_mask(dev, 0x20200 + order[i].offset, 0xff00, 0x4500); in gk104_clkgate_enable()
50 for (i = 0; order[i].type != NVKM_SUBDEV_NR; i++) { in gk104_clkgate_enable()
51 if (!nvkm_device_subdev(dev, order[i].type, order[i].inst)) in gk104_clkgate_enable()
54 nvkm_mask(dev, 0x20200 + order[i].offset, 0x00ff, 0x0045); in gk104_clkgate_enable()
63 const struct gk104_clkgate_engine_info *order = therm->clkgate_order; in gk104_clkgate_fini() local
67 for (i = 0; order[i].type != NVKM_SUBDEV_NR; i++) { in gk104_clkgate_fini()
68 if (!nvkm_device_subdev(dev, order[i].type, order[i].inst)) in gk104_clkgate_fini()
[all …]
/linux/arch/riscv/kvm/
H A Dtlb.c25 unsigned long order) in kvm_riscv_local_hfence_gvma_vmid_gpa() argument
29 if (PTRS_PER_PTE < (gpsz >> order)) { in kvm_riscv_local_hfence_gvma_vmid_gpa()
36 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_vmid_gpa()
41 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_vmid_gpa()
53 unsigned long order) in kvm_riscv_local_hfence_gvma_gpa() argument
57 if (PTRS_PER_PTE < (gpsz >> order)) { in kvm_riscv_local_hfence_gvma_gpa()
64 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_gpa()
69 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_gpa()
84 unsigned long order) in kvm_riscv_local_hfence_vvma_asid_gva() argument
88 if (PTRS_PER_PTE < (gvsz >> order)) { in kvm_riscv_local_hfence_vvma_asid_gva()
[all …]
/linux/arch/riscv/mm/
H A Dhugetlbpage.c35 unsigned long order; in huge_pte_alloc() local
68 for_each_napot_order(order) { in huge_pte_alloc()
69 if (napot_cont_size(order) == sz) { in huge_pte_alloc()
70 pte = pte_alloc_huge(mm, pmd, addr & napot_cont_mask(order)); in huge_pte_alloc()
88 unsigned long order; in huge_pte_offset() local
119 for_each_napot_order(order) { in huge_pte_offset()
120 if (napot_cont_size(order) == sz) { in huge_pte_offset()
121 pte = pte_offset_huge(pmd, addr & napot_cont_mask(order)); in huge_pte_offset()
189 unsigned long order; in arch_make_huge_pte() local
191 for_each_napot_order(order) { in arch_make_huge_pte()
281 unsigned long order; huge_ptep_set_access_flags() local
326 unsigned long order; huge_ptep_set_wrprotect() local
383 unsigned long order; is_napot_size() local
398 unsigned long order; napot_hugetlbpages_init() local
[all...]
/linux/kernel/bpf/
H A Dcgroup_iter.c13 * 1. Walk the descendants of a cgroup in pre-order.
14 * 2. Walk the descendants of a cgroup in post-order.
19 * For walking descendants, cgroup_iter can walk in either pre-order or
20 * post-order. For walking ancestors, the iter walks up from a cgroup to
41 * EOPNOTSUPP. In order to work around, the user may have to update their
55 int order;
78 if (p->order == BPF_CGROUP_ITER_DESCENDANTS_PRE) in cgroup_iter_seq_start()
80 else if (p->order == BPF_CGROUP_ITER_DESCENDANTS_POST) in cgroup_iter_seq_start()
82 else if (p->order == BPF_CGROUP_ITER_CHILDREN) in cgroup_iter_seq_start()
113 if (p->order in cgroup_iter_seq_next()
54 int order; global() member
200 int order = linfo->cgroup.order; bpf_iter_attach_cgroup() local
[all...]
/linux/drivers/gpu/drm/i915/selftests/
H A Di915_random.c70 void i915_random_reorder(unsigned int *order, unsigned int count, in i915_random_reorder() argument
73 i915_prandom_shuffle(order, sizeof(*order), count, state); in i915_random_reorder()
78 unsigned int *order, i; in i915_random_order() local
80 order = kmalloc_array(count, sizeof(*order), in i915_random_order()
82 if (!order) in i915_random_order()
83 return order; in i915_random_order()
86 order[i] = i; in i915_random_order()
88 i915_random_reorder(order, count, state); in i915_random_order()
89 return order; in i915_random_order()
H A Di915_syncmap.c274 unsigned int pass, order; in igt_syncmap_join_above() local
296 for (order = 0; order < 64; order += SHIFT) { in igt_syncmap_join_above()
297 u64 context = BIT_ULL(order); in igt_syncmap_join_above()
335 unsigned int step, order, idx; in igt_syncmap_join_below() local
345 for (order = 64 - SHIFT; order > 0; order -= SHIFT) { in igt_syncmap_join_below()
346 u64 context = step * BIT_ULL(order); in igt_syncmap_join_below()
354 context, order, step, sync->height, sync->prefix); in igt_syncmap_join_below()
362 for (order = SHIFT; order < 64; order += SHIFT) { in igt_syncmap_join_below()
363 u64 context = step * BIT_ULL(order); in igt_syncmap_join_below()
367 context, order, step); in igt_syncmap_join_below()
[all …]
/linux/arch/arm/lib/
H A Dlib1funcs.S106 .macro ARM_DIV2_ORDER divisor, order argument
110 clz \order, \divisor
111 rsb \order, \order, #31
117 movhs \order, #16
118 movlo \order, #0
122 addhs \order, \order, #8
126 addhs \order, \order, #4
129 addhi \order, \order, #3
130 addls \order, \order, \divisor, lsr #1
137 .macro ARM_MOD_BODY dividend, divisor, order, spare
[all …]
/linux/kernel/liveupdate/
H A Dkexec_handover.c41 * Use it to store both the magic and the order.
46 unsigned int order;
70 * The serializing side uses two levels of xarrays to manage chunks of per-order
71 * PAGE_SIZE byte bitmaps. For instance if PAGE_SIZE = 4096, the entire 1G order
72 * of a 8TB system would fit inside a single 4096 byte bitmap. For order 0
74 * memory at most 512K of bitmap memory will be needed for order 0.
93 * to order.
99 /* Points to kho_mem_phys, each order gets its own bitmap tree */
147 unsigned int order) in xa_load_or_alloc()
151 const unsigned long pfn_high = pfn >> order; in __kho_unpreserve_order()
49 unsigned int order; global() member
150 __kho_unpreserve_order(struct kho_mem_track * track,unsigned long pfn,unsigned int order) __kho_unpreserve_order() argument
170 unsigned int order; __kho_unpreserve() local
182 __kho_preserve_order(struct kho_mem_track * track,unsigned long pfn,unsigned int order) __kho_preserve_order() argument
301 const unsigned int order = kho_restore_pages() local
335 unsigned int order; global() member
351 new_chunk(struct khoser_mem_chunk * cur_chunk,unsigned long order) new_chunk() argument
406 unsigned long order; kho_mem_serialize() local
450 deserialize_bitmap(unsigned int order,struct khoser_mem_bitmap_ptr * elm) deserialize_bitmap() argument
796 const unsigned int order = folio_order(folio); kho_preserve_folio() local
817 const unsigned int order = folio_order(folio); kho_unpreserve_folio() local
849 const unsigned int order = kho_preserve_pages() local
956 kho_vmalloc_unpreserve_chunk(struct kho_vmalloc_chunk * chunk,unsigned short order) kho_vmalloc_unpreserve_chunk() argument
988 unsigned int order, flags, nr_contig_pages; kho_preserve_vmalloc() local
1072 unsigned int align, order, shift, vm_flags; kho_restore_vmalloc() local
1165 int order, ret; kho_alloc_preserve() local
[all...]
/linux/tools/testing/radix-tree/
H A Diteration_check.c25 int order; in my_item_insert() local
29 for (order = max_order; order >= 0; order--) { in my_item_insert()
30 xas_set_order(&xas, index, order); in my_item_insert()
31 item->order = order; in my_item_insert()
41 if (order < 0) in my_item_insert()
165 void iteration_test(unsigned order, unsigned test_duration) in iteration_test() argument
170 order > 0 ? "multiorder " : "", test_duration); in iteration_test()
172 max_order = order; in iteration_test()
/linux/arch/riscv/include/asm/
H A Dkvm_tlb.h25 unsigned long order; member
36 unsigned long order);
39 unsigned long order);
45 unsigned long order);
50 unsigned long order);
65 unsigned long order, unsigned long vmid);
72 unsigned long order, unsigned long asid,
80 unsigned long order, unsigned long vmid);
H A Dpgtable-64.h97 #define for_each_napot_order(order) \ argument
98 for (order = NAPOT_CONT_ORDER_BASE; order < NAPOT_ORDER_MAX; order++)
99 #define for_each_napot_order_rev(order) \ argument
100 for (order = NAPOT_ORDER_MAX - 1; \
101 order >= NAPOT_CONT_ORDER_BASE; order--)
104 #define napot_cont_shift(order) ((order) + PAGE_SHIFT) argument
105 #define napot_cont_size(order) BIT(napot_cont_shift(order)) argument
106 #define napot_cont_mask(order) (~(napot_cont_size(order) - 1UL)) argument
107 #define napot_pte_num(order) BIT(order) argument

12345678910>>...60