| /linux/include/trace/events/ |
| H A D | compaction.h | 168 int order, 172 TP_ARGS(order, gfp_mask, prio), 175 __field(int, order) 181 __entry->order = order; 186 TP_printk("order=%d gfp_mask=%s priority=%d", 187 __entry->order, 195 int order, 198 TP_ARGS(zone, order, ret), 203 __field(int, order) 210 __entry->order = order; [all …]
|
| H A D | kmem.h | 141 TP_PROTO(struct page *page, unsigned int order), 143 TP_ARGS(page, order), 147 __field( unsigned int, order ) 152 __entry->order = order; 155 TP_printk("page=%p pfn=0x%lx order=%d", 158 __entry->order) 175 TP_printk("page=%p pfn=0x%lx order=0", 182 TP_PROTO(struct page *page, unsigned int order, 185 TP_ARGS(page, order, gfp_flags, migratetype), 189 __field( unsigned int, order ) [all …]
|
| /linux/scripts/atomic/ |
| H A D | gen-atomic-fallback.sh | 8 #gen_template_fallback(template, meta, pfx, name, sfx, order, atomic, int, args...) 16 local order="$1"; shift 28 #gen_order_fallback(meta, pfx, name, sfx, order, atomic, int, args...) 35 local order="$1"; shift 37 local tmpl_order=${order#_} 39 gen_template_fallback "${tmpl}" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@" 42 #gen_proto_fallback(meta, pfx, name, sfx, order, atomic, int, args...) 49 local order="$1"; shift 51 local tmpl="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")" 52 gen_template_fallback "${tmpl}" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@" [all …]
|
| /linux/lib/ |
| H A D | test_xarray.c | 72 unsigned order, void *entry, gfp_t gfp) in xa_store_order() argument 74 XA_STATE_ORDER(xas, xa, index, order); in xa_store_order() 177 unsigned int order; in check_xa_mark_1() local 207 for (order = 2; order < max_order; order++) { in check_xa_mark_1() 208 unsigned long base = round_down(index, 1UL << order); in check_xa_mark_1() 209 unsigned long next = base + (1UL << order); in check_xa_mark_1() 217 xa_store_order(xa, index, order, xa_mk_index(index), in check_xa_mark_1() 328 unsigned int order; in check_xa_shrink() local 353 for (order = 0; order < max_order; order++) { in check_xa_shrink() 354 unsigned long max = (1UL << order) - 1; in check_xa_shrink() [all …]
|
| /linux/mm/ |
| H A D | compaction.c | 46 * order == -1 is expected when compacting proactively via 51 static inline bool is_via_compact_memory(int order) in is_via_compact_memory() argument 53 return order == -1; in is_via_compact_memory() 59 static inline bool is_via_compact_memory(int order) { return false; } in is_via_compact_memory() argument 67 #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) argument 68 #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) argument 71 * Page order with-respect-to which proactive compaction 83 static struct page *mark_allocated_noprof(struct page *page, unsigned int order, gfp_t gfp_flags) in mark_allocated_noprof() argument 85 post_alloc_hook(page, order, __GFP_MOVABLE); in mark_allocated_noprof() 93 int order; in release_free_list() local [all …]
|
| H A D | page_alloc.c | 218 static void __free_pages_ok(struct page *page, unsigned int order, 293 static bool page_contains_unaccepted(struct page *page, unsigned int order); 294 static bool cond_accept_memory(struct zone *zone, unsigned int order, 320 _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument 322 return deferred_grow_zone(zone, order); in _deferred_grow_zone() 330 static inline bool _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument 658 static inline unsigned int order_to_pindex(int migratetype, int order) in order_to_pindex() argument 663 if (order > PAGE_ALLOC_COSTLY_ORDER) { in order_to_pindex() 664 VM_BUG_ON(order != HPAGE_PMD_ORDER); in order_to_pindex() 671 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); in order_to_pindex() [all …]
|
| /linux/arch/arm/lib/ |
| H A D | lib1funcs.S | 106 .macro ARM_DIV2_ORDER divisor, order argument 110 clz \order, \divisor 111 rsb \order, \order, #31 117 movhs \order, #16 118 movlo \order, #0 122 addhs \order, \order, #8 126 addhs \order, \order, #4 129 addhi \order, \order, #3 130 addls \order, \order, \divisor, lsr #1 137 .macro ARM_MOD_BODY dividend, divisor, order, spare [all …]
|
| /linux/mm/kmsan/ |
| H A D | init.c | 107 * by their order: when kmsan_memblock_free_pages() is called for the first 108 * time with a certain order, it is reserved as a shadow block, for the second 111 * after which held_back[order] can be used again. 116 bool kmsan_memblock_free_pages(struct page *page, unsigned int order) in kmsan_memblock_free_pages() argument 120 if (!held_back[order].shadow) { in kmsan_memblock_free_pages() 121 held_back[order].shadow = page; in kmsan_memblock_free_pages() 124 if (!held_back[order].origin) { in kmsan_memblock_free_pages() 125 held_back[order].origin = page; in kmsan_memblock_free_pages() 128 shadow = held_back[order].shadow; in kmsan_memblock_free_pages() 129 origin = held_back[order].origin; in kmsan_memblock_free_pages() [all …]
|
| /linux/kernel/bpf/ |
| H A D | cgroup_iter.c | 13 * 1. Walk the descendants of a cgroup in pre-order. 14 * 2. Walk the descendants of a cgroup in post-order. 18 * For walking descendants, cgroup_iter can walk in either pre-order or 19 * post-order. For walking ancestors, the iter walks up from a cgroup to 40 * EOPNOTSUPP. In order to work around, the user may have to update their 54 int order; member 77 if (p->order == BPF_CGROUP_ITER_DESCENDANTS_PRE) in cgroup_iter_seq_start() 79 else if (p->order == BPF_CGROUP_ITER_DESCENDANTS_POST) in cgroup_iter_seq_start() 110 if (p->order == BPF_CGROUP_ITER_DESCENDANTS_PRE) in cgroup_iter_seq_next() 112 else if (p->order == BPF_CGROUP_ITER_DESCENDANTS_POST) in cgroup_iter_seq_next() [all …]
|
| /linux/arch/riscv/mm/ |
| H A D | hugetlbpage.c | 35 unsigned long order; in huge_pte_alloc() local 68 for_each_napot_order(order) { in huge_pte_alloc() 69 if (napot_cont_size(order) == sz) { in huge_pte_alloc() 70 pte = pte_alloc_huge(mm, pmd, addr & napot_cont_mask(order)); in huge_pte_alloc() 88 unsigned long order; in huge_pte_offset() local 119 for_each_napot_order(order) { in huge_pte_offset() 120 if (napot_cont_size(order) == sz) { in huge_pte_offset() 121 pte = pte_offset_huge(pmd, addr & napot_cont_mask(order)); in huge_pte_offset() 189 unsigned long order; in arch_make_huge_pte() local 191 for_each_napot_order(order) { in arch_make_huge_pte() [all …]
|
| /linux/arch/riscv/kvm/ |
| H A D | tlb.c | 25 unsigned long order) in kvm_riscv_local_hfence_gvma_vmid_gpa() argument 29 if (PTRS_PER_PTE < (gpsz >> order)) { in kvm_riscv_local_hfence_gvma_vmid_gpa() 36 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_vmid_gpa() 41 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_vmid_gpa() 53 unsigned long order) in kvm_riscv_local_hfence_gvma_gpa() argument 57 if (PTRS_PER_PTE < (gpsz >> order)) { in kvm_riscv_local_hfence_gvma_gpa() 64 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_gpa() 69 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_gpa() 84 unsigned long order) in kvm_riscv_local_hfence_vvma_asid_gva() argument 88 if (PTRS_PER_PTE < (gvsz >> order)) { in kvm_riscv_local_hfence_vvma_asid_gva() [all …]
|
| /linux/include/linux/ |
| H A D | gfp.h | 86 * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. 219 static inline void arch_free_page(struct page *page, int order) { } in arch_free_page() argument 222 static inline void arch_alloc_page(struct page *page, int order) { } in arch_alloc_page() argument 225 struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, int preferred_nid, 229 struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid, 244 /* Bulk allocate order-0 pages */ 280 __alloc_pages_node_noprof(int nid, gfp_t gfp_mask, unsigned int order) in __alloc_pages_node_noprof() argument 285 return __alloc_pages_noprof(gfp_mask, order, nid, NULL); in __alloc_pages_node_noprof() 291 struct folio *__folio_alloc_node_noprof(gfp_t gfp, unsigned int order, int nid) in __folio_alloc_node_noprof() argument 296 return __folio_alloc_noprof(gfp, order, nid, NULL); in __folio_alloc_node_noprof() [all …]
|
| H A D | compaction.h | 61 * Number of free order-0 pages that should be available above given watermark 65 static inline unsigned long compact_gap(unsigned int order) in compact_gap() argument 69 * free scanner may have up to 1 << order pages on its list and then in compact_gap() 70 * try to split an (order - 1) free page. At that point, a gap of in compact_gap() 71 * 1 << order might not be enough, so it's safer to require twice that in compact_gap() 80 return 2UL << order; in compact_gap() 90 extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order); 91 extern int fragmentation_index(struct zone *zone, unsigned int order); 93 unsigned int order, unsigned int alloc_flags, 97 extern bool compaction_suitable(struct zone *zone, int order, [all …]
|
| /linux/kernel/liveupdate/ |
| H A D | kexec_handover.c | 44 * Use it to store both the magic and the order. 49 unsigned int order; member 73 * The serializing side uses two levels of xarrays to manage chunks of per-order 74 * PAGE_SIZE byte bitmaps. For instance if PAGE_SIZE = 4096, the entire 1G order 75 * of a 8TB system would fit inside a single 4096 byte bitmap. For order 0 77 * memory at most 512K of bitmap memory will be needed for order 0. 96 * to order. 102 /* Points to kho_mem_phys, each order gets its own bitmap tree */ 150 unsigned int order) in __kho_unpreserve_order() argument 154 const unsigned long pfn_high = pfn >> order; in __kho_unpreserve_order() [all …]
|
| /linux/drivers/gpu/drm/ttm/tests/ |
| H A D | ttm_pool_test.c | 15 unsigned int order; 104 .order = 0, 108 .order = 2, 112 .order = MAX_PAGE_ORDER + 1, 116 .order = 0, 121 .order = MAX_PAGE_ORDER + 1, 144 unsigned int expected_num_pages = 1 << params->order; in ttm_pool_alloc_basic() 167 if (params->order <= MAX_PAGE_ORDER) { in ttm_pool_alloc_basic() 172 KUNIT_ASSERT_EQ(test, fst_page->private, params->order); in ttm_pool_alloc_basic() 181 * order in ttm_pool_alloc_basic() 14 unsigned int order; global() member 244 unsigned int order = 0; ttm_pool_alloc_order_caching_match() local 274 unsigned int order = 0; ttm_pool_alloc_caching_mismatch() local 306 unsigned int order = 2; ttm_pool_alloc_order_mismatch() local 342 unsigned int order = 2; ttm_pool_free_dma_alloc() local 373 unsigned int order = 2; ttm_pool_free_no_dma_alloc() local 401 unsigned int order = 0; ttm_pool_fini_basic() local [all...] |
| /linux/Documentation/userspace-api/media/v4l/ |
| H A D | field-order.rst | 4 .. _field-order: 7 Field Order 26 which field of a frame is older, the *temporal order*. 31 even) fields, the *spatial order*: The first line of the top field is 40 creating a natural order. 45 and spatial order of fields. The diagrams below should make this 49 bus in the same order they were captured, so if the top field was 54 order. Some drivers may permit the selection of a different order, to 77 - Applications request this field order when any field format 79 e.g. the requested image size, and return the actual field order. [all …]
|
| /linux/Documentation/trace/postprocess/ |
| H A D | trace-vmscan-postprocess.pl | 31 # Per-order events 110 my $regex_direct_begin_default = 'order=([0-9]*) gfp_flags=([A-Z_|]*)'; 112 my $regex_kswapd_wake_default = 'nid=([0-9]*) order=([0-9]*)'; 114 my $regex_wakeup_kswapd_default = 'nid=([0-9]*) order=([0-9]*) gfp_flags=([A-Z_|]*)'; 115 my $regex_lru_isolate_default = 'classzone=([0-9]*) order=([0-9]*) nr_requested=([0-9]*) nr_scanned… 166 # Verify fields are in the right order 187 "order", "gfp_flags"); 195 "nid", "order"); 203 "nid", "order", "gfp_flags"); 207 "classzone", "order", [all …]
|
| /linux/tools/testing/selftests/tc-testing/tc-tests/actions/ |
| H A D | mpls.json | 23 "matchPattern": "action order [0-9]+: mpls.*dec_ttl.*pipe.*index 8 ref", 50 "matchPattern": "action order [0-9]+: mpls.*dec_ttl.*pass.*index 8 ref", 77 "matchPattern": "action order [0-9]+: mpls.*dec_ttl.*drop.*index 8 ref", 104 "matchPattern": "action order [0-9]+: mpls.*dec_ttl.*reclassify.*index 8 ref", 131 "matchPattern": "action order [0-9]+: mpls.*dec_ttl.*continue.*index 8 ref", 158 "matchPattern": "action order [0-9]+: mpls.*jump 10.*index 8 ref", 185 "matchPattern": "action order [0-9]+: mpls.*dec_ttl trap.*index 8 ref", 212 "matchPattern": "action order [0-9]+: mpls.*dec_ttl pipe.*index 8 ref.*cookie aabbccddeeff", 239 …"matchPattern": "action order [0-9]+: mpls.*dec_ttl continue.*index 8 ref.*cookie aa11bb22cc33dd44… 266 "matchPattern": "action order [0-9]+: mpls.*dec_ttl.*foo.*index 8 ref", [all …]
|
| H A D | ife.json | 23 …"matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 2", 50 … "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark.*index 2", 77 …"matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*allow mark.*inde… 104 …"matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*use mark 789.*index … 131 …"matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 65676… 158 …"matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0[xX]ED3E.*use mark 65.*index… 185 …"matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 42949… 212 …"matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark 42949672959… 237 …"matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow prio.*index 9", 264 …"matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 7.*index 9", [all …]
|
| H A D | sample.json | 23 "matchPattern": "action order [0-9]+: sample rate 1/10 group 1.*index 2 ref", 50 "matchPattern": "action order [0-9]+: sample rate 1/700 group 2 continue.*index 2 ref", 77 "matchPattern": "action order [0-9]+: sample rate 1/10000 group 11 drop.*index 22 ref", 104 … "matchPattern": "action order [0-9]+: sample rate 1/20000 group 72 reclassify.*index 100 ref", 131 "matchPattern": "action order [0-9]+: sample rate 1/20 group 2 pipe.*index 100 ref", 158 "matchPattern": "action order [0-9]+: sample rate 1/700 group 25 jump 4.*index 200 ref", 185 "matchPattern": "action order [0-9]+: sample rate 1/0 group 1.*index 2 ref", 212 "matchPattern": "action order [0-9]+: sample rate 1/200000 group 52 foo.*index 1 ref", 237 "matchPattern": "action order [0-9]+: sample.*index 1 ref", 262 "matchPattern": "action order [0-9]+: sample.*group 10.*index 1 ref", [all …]
|
| H A D | police.json | 23 "matchPattern": "action order [0-9]*: police 0x1 rate 1Kbit burst 10Kb", 51 "matchPattern": "action order [0-9]*: police 0x9", 78 "matchPattern": "action order [0-9]*: police 0x62 rate 90Kbit burst 10Kb mtu 1Kb", 105 "matchPattern": "action order [0-9]*: police 0x3 rate 90Kbit burst 10Kb mtu 2Kb peakrate 100Kbit", 132 "matchPattern": "action order [0-9]*: police 0x9 rate 5Kb burst 10Kb", 159 "matchPattern": "action order [0-9]*: police 0x40 rate 1Mbit burst 100Kb mtu 2Kb action reclassify overhead 64b", 186 "matchPattern": "action order [0-9]*: police 0x8 rate 2Mbit burst 200Kb mtu 2Kb action reclassify overhead 0b", 213 "matchPattern": "action order [0-9]*: police 0x8 rate 2Mbit burst 200Kb mtu 2Kb action reclassify overhead 0b linklayer atm", 240 "matchPattern": "action order [0-9]*: police 0x1 rate 3Mbit burst 250Kb mtu 2Kb action continue/drop", 267 "matchPattern": "action order [ [all...] |
| H A D | skbedit.json | 23 "matchPattern": "action order [0-9]*: skbedit mark 1", 50 "matchPattern": "action order [0-9]*: skbedit mark 4294967295.*pipe.*index 1", 77 "matchPattern": "action order [0-9]*: skbedit mark", 102 "matchPattern": "action order [0-9]*: skbedit mark 1/0xaabb", 129 "matchPattern": "action order [0-9]*: skbedit mark 1/0xffffffff", 156 "matchPattern": "action order [0-9]*: skbedit mark 1/0xaabbccddeeff112233", 181 "matchPattern": "action order [0-9]*: skbedit mark 1/-1234", 207 "matchPattern": "action order [0-9]*: skbedit mark 1/0xaabb", 234 "matchPattern": "action order [0-9]*: skbedit priority :99", 261 "matchPattern": "action order [0-9]*: skbedit priority", [all …]
|
| /linux/drivers/gpu/drm/i915/selftests/ |
| H A D | i915_syncmap.c | 274 unsigned int pass, order; in igt_syncmap_join_above() local 296 for (order = 0; order < 64; order += SHIFT) { in igt_syncmap_join_above() 297 u64 context = BIT_ULL(order); in igt_syncmap_join_above() 335 unsigned int step, order, idx; in igt_syncmap_join_below() local 345 for (order = 64 - SHIFT; order > 0; order -= SHIFT) { in igt_syncmap_join_below() 346 u64 context = step * BIT_ULL(order); in igt_syncmap_join_below() 353 … pr_err("Inserting context=%llx (order=%d, step=%d) did not return leaf (height=%d, prefix=%llx\n", in igt_syncmap_join_below() 354 context, order, step, sync->height, sync->prefix); in igt_syncmap_join_below() 362 for (order = SHIFT; order < 64; order += SHIFT) { in igt_syncmap_join_below() 363 u64 context = step * BIT_ULL(order); in igt_syncmap_join_below() [all …]
|
| H A D | i915_random.c | 70 void i915_random_reorder(unsigned int *order, unsigned int count, in i915_random_reorder() argument 73 i915_prandom_shuffle(order, sizeof(*order), count, state); in i915_random_reorder() 78 unsigned int *order, i; in i915_random_order() local 80 order = kmalloc_array(count, sizeof(*order), in i915_random_order() 82 if (!order) in i915_random_order() 83 return order; in i915_random_order() 86 order[i] = i; in i915_random_order() 88 i915_random_reorder(order, count, state); in i915_random_order() 89 return order; in i915_random_order()
|
| /linux/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ |
| H A D | gk104.c | 34 const struct gk104_clkgate_engine_info *order = therm->clkgate_order; in gk104_clkgate_enable() local 38 for (i = 0; order[i].type != NVKM_SUBDEV_NR; i++) { in gk104_clkgate_enable() 39 if (!nvkm_device_subdev(dev, order[i].type, order[i].inst)) in gk104_clkgate_enable() 42 nvkm_mask(dev, 0x20200 + order[i].offset, 0xff00, 0x4500); in gk104_clkgate_enable() 50 for (i = 0; order[i].type != NVKM_SUBDEV_NR; i++) { in gk104_clkgate_enable() 51 if (!nvkm_device_subdev(dev, order[i].type, order[i].inst)) in gk104_clkgate_enable() 54 nvkm_mask(dev, 0x20200 + order[i].offset, 0x00ff, 0x0045); in gk104_clkgate_enable() 63 const struct gk104_clkgate_engine_info *order = therm->clkgate_order; in gk104_clkgate_fini() local 67 for (i = 0; order[i].type != NVKM_SUBDEV_NR; i++) { in gk104_clkgate_fini() 68 if (!nvkm_device_subdev(dev, order[i].type, order[i].inst)) in gk104_clkgate_fini() [all …]
|