| /linux/tools/testing/selftests/bpf/progs/ |
| H A D | verifier_arena.c | 31 page1 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); in basic_alloc1() 35 page2 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); in basic_alloc1() 39 no_page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); in basic_alloc1() 51 page3 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); in basic_alloc1() 70 page1 = bpf_arena_alloc_pages(&arena, NULL, 2, NUMA_NO_NODE, 0); in basic_alloc2() 112 pages = bpf_arena_alloc_pages(&ar->map, NULL, ar->map.max_entries, NUMA_NO_NODE, 0); in basic_alloc3() 126 page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); in basic_reserve1() 138 page = bpf_arena_alloc_pages(&arena, page, 1, NUMA_NO_NODE, 0); in basic_reserve1() 143 page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); in basic_reserve1() 163 page = bpf_arena_alloc_pages(&arena, page, 1, NUMA_NO_NODE, 0); in basic_reserve2()
|
| H A D | verifier_arena_large.c | 28 page1 = base = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); in big_alloc1() 33 1, NUMA_NO_NODE, 0); in big_alloc1() 38 1, NUMA_NO_NODE, 0); in big_alloc1() 50 page3 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); in big_alloc1() 126 page = bpf_arena_alloc_pages(&arena, base, 5, NUMA_NO_NODE, 0); in request_partially_reserved() 145 page = bpf_arena_alloc_pages(&arena, addr, 2, NUMA_NO_NODE, 0); in free_reserved() 161 page = bpf_arena_alloc_pages(&arena, addr + __PAGE_SIZE, 2, NUMA_NO_NODE, 0); in free_reserved() 185 NUMA_NO_NODE, 0); in alloc_pages() 212 base = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); in big_alloc2() 261 pg = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, in big_alloc2() [all...] |
| /linux/arch/x86/mm/ |
| H A D | numa.c | 48 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE 57 return NUMA_NO_NODE; in numa_cpu_node() 66 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); 93 numa_set_node(cpu, NUMA_NO_NODE); in numa_clear_node() 160 if (early_cpu_to_node(i) != NUMA_NO_NODE) in numa_init_array() 173 set_apicid_to_node(i, NUMA_NO_NODE); in numa_init() 186 if (nid == NUMA_NO_NODE) in numa_init() 298 if (node == NUMA_NO_NODE) in init_cpu_to_node() 358 return NUMA_NO_NODE; in early_cpu_to_node() 367 if (node == NUMA_NO_NODE) { in debug_cpumask_set_cpu() [all …]
|
| /linux/mm/ |
| H A D | numa_emulation.c | 66 if (emu_nid_to_phys[nid] == NUMA_NO_NODE) in emu_setup_memblk() 335 *dfl_phys_nid = NUMA_NO_NODE; in setup_emu2phys_nid() 337 if (emu_nid_to_phys[i] != NUMA_NO_NODE) { in setup_emu2phys_nid() 339 if (*dfl_phys_nid == NUMA_NO_NODE) in setup_emu2phys_nid() 392 emu_nid_to_phys[i] = NUMA_NO_NODE; in numa_emulation() 473 ei.blk[i].nid != NUMA_NO_NODE) in numa_emulation() 491 if (emu_nid_to_phys[i] == NUMA_NO_NODE) in numa_emulation() 519 if (emu_nid_to_phys[i] != NUMA_NO_NODE && in numa_emulation() 520 emu_nid_to_phys[j] != NUMA_NO_NODE) in numa_emulation() 522 if (emu_nid_to_phys[i] != NUMA_NO_NODE) in numa_emulation() [all …]
|
| H A D | numa_memblks.c | 30 mi->blk[i].nid != NUMA_NO_NODE) in numa_nodemask_from_meminfo() 325 mi->blk[i].nid = NUMA_NO_NODE; in numa_cleanup_meminfo() 455 WARN_ON(memblock_set_node(0, max_addr, &memblock.memory, NUMA_NO_NODE)); in numa_memblks_init() 457 NUMA_NO_NODE)); in numa_memblks_init() 567 return NUMA_NO_NODE; in meminfo_to_nid() 578 if (nid != NUMA_NO_NODE) in phys_to_target_node() 589 if (nid == NUMA_NO_NODE) in memory_add_physaddr_to_nid()
|
| H A D | hugetlb_sysfs.c | 35 *nidp = NUMA_NO_NODE; in kobj_to_hstate() 50 if (nid == NUMA_NO_NODE) in nr_hugepages_show_common() 147 if (nid == NUMA_NO_NODE) in free_hugepages_show() 172 if (nid == NUMA_NO_NODE) in surplus_hugepages_show() 196 if (nid != NUMA_NO_NODE) { in demote_store() 214 if (nid != NUMA_NO_NODE) in demote_store()
|
| H A D | memory-tiers.c | 140 static int default_dram_perf_ref_nid = NUMA_NO_NODE; 335 return NUMA_NO_NODE; in next_demotion_node() 418 int target = NUMA_NO_NODE, node; in establish_demotion_targets() 456 if (target == NUMA_NO_NODE) in establish_demotion_targets() 747 if (default_dram_perf_ref_nid == NUMA_NO_NODE) { in mt_set_default_dram_perf() 795 if (default_dram_perf_ref_nid == NUMA_NO_NODE) in mt_perf_to_adistance()
|
| /linux/drivers/acpi/numa/ |
| H A D | srat.c | 28 = { [0 ... MAX_PXM_DOMAINS - 1] = NUMA_NO_NODE }; 45 return NUMA_NO_NODE; in pxm_to_node() 60 if (pxm_to_node_map[pxm] == NUMA_NO_NODE || node < pxm_to_node_map[pxm]) in __acpi_map_pxm_to_node() 71 return NUMA_NO_NODE; in acpi_map_pxm_to_node() 75 if (node == NUMA_NO_NODE) { in acpi_map_pxm_to_node() 78 return NUMA_NO_NODE; in acpi_map_pxm_to_node() 95 = { [0 ... MAX_PXM_DOMAINS - 1] = NUMA_NO_NODE }; in fix_pxm_node_maps() 165 pxm_to_node_map_copy[node_to_pxm_map_copy[i]] == NUMA_NO_NODE) in fix_pxm_node_maps() 339 if (from_node == NUMA_NO_NODE) in acpi_parse_slit() 345 if (to_node == NUMA_NO_NODE) in acpi_parse_slit() [all …]
|
| /linux/drivers/base/ |
| H A D | arch_numa.c | 19 static int cpu_to_node_map[NR_CPUS] = { [0 ... NR_CPUS-1] = NUMA_NO_NODE }; 47 if (node == NUMA_NO_NODE) in cpumask_of_node() 67 if (nid == NUMA_NO_NODE) in numa_update_cpu() 90 set_cpu_numa_node(cpu, NUMA_NO_NODE); in numa_clear_node() 342 if (cpu_to_node_map[i] == NUMA_NO_NODE) in numa_emu_update_cpu_to_node() 360 if (node == NUMA_NO_NODE) in debug_cpumask_set_cpu()
|
| /linux/include/linux/ |
| H A D | memblock.h | 135 return __memblock_reserve(base, size, NUMA_NO_NODE, 0); in memblock_reserve() 140 return __memblock_reserve(base, size, NUMA_NO_NODE, MEMBLOCK_RSRV_KERN); in memblock_reserve_kern() 184 __next_mem_range(idx, NUMA_NO_NODE, MEMBLOCK_NONE, &physmem, type, in __next_physmem_range() 249 __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, \ 261 __for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \ 275 __for_each_mem_range(i, &memblock.reserved, NULL, NUMA_NO_NODE, \ 428 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); in memblock_alloc() 442 NUMA_NO_NODE); in memblock_alloc_raw() 450 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); in memblock_alloc_from() 457 ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE); in memblock_alloc_low()
|
| H A D | slab.h | 502 #define krealloc_noprof(_o, _s, _f) krealloc_node_align_noprof(_o, _s, 1, _f, NUMA_NO_NODE) 505 #define krealloc(...) krealloc_node(__VA_ARGS__, NUMA_NO_NODE) 969 alloc_hooks(__kmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE)) 972 …alloc_hooks(__kmalloc_node_track_caller_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE… 1061 #define kmalloc_track_caller(...) kmalloc_node_track_caller(__VA_ARGS__, NUMA_NO_NODE) 1064 kmalloc_node_track_caller_noprof(__VA_ARGS__, NUMA_NO_NODE, _RET_IP_) 1106 #define kvmalloc(...) kvmalloc_node(__VA_ARGS__, NUMA_NO_NODE) 1112 alloc_hooks(__kvmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), 1, _flags, NUMA_NO_NODE)) 1125 #define kvmalloc_array_noprof(...) kvmalloc_array_node_noprof(__VA_ARGS__, NUMA_NO_NODE) 1127 #define kvcalloc_noprof(...) kvcalloc_node_noprof(__VA_ARGS__, NUMA_NO_NODE) [all …]
|
| H A D | async.h | 54 return async_schedule_node(func, data, NUMA_NO_NODE); in async_schedule() 72 return async_schedule_node_domain(func, data, NUMA_NO_NODE, domain); in async_schedule_domain()
|
| /linux/arch/x86/kernel/cpu/ |
| H A D | hygon.c | 34 if (node != NUMA_NO_NODE && node_online(node)) in nearby_node() 39 if (node != NUMA_NO_NODE && node_online(node)) in nearby_node() 54 if (node == NUMA_NO_NODE) in srat_detect_node() 86 if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE) in srat_detect_node()
|
| /linux/kernel/sched/ |
| H A D | ext_idle.c | 62 return node == NUMA_NO_NODE ? &scx_idle_global_masks : scx_idle_node_masks[node]; in idle_cpumask() 72 return NUMA_NO_NODE; in scx_cpu_node_if_enabled() 218 if (node == NUMA_NO_NODE || flags & SCX_PICK_IDLE_IN_NODE) in scx_pick_idle_cpu() 784 cpumask_copy(idle_cpumask(NUMA_NO_NODE)->cpu, cpu_online_mask); in reset_idle_masks() 785 cpumask_copy(idle_cpumask(NUMA_NO_NODE)->smt, cpu_online_mask); in reset_idle_masks() 830 if (node == NUMA_NO_NODE) in validate_node() 958 return NUMA_NO_NODE; in scx_bpf_cpu_node() 1114 return idle_cpumask(NUMA_NO_NODE)->cpu; in scx_bpf_get_idle_cpumask() 1174 return idle_cpumask(NUMA_NO_NODE)->smt; in scx_bpf_get_idle_smtmask() 1176 return idle_cpumask(NUMA_NO_NODE)->cpu; in scx_bpf_get_idle_smtmask() [all …]
|
| /linux/arch/arm64/kernel/ |
| H A D | acpi_numa.c | 30 static int acpi_early_node_map[NR_CPUS] __initdata = { [0 ... NR_CPUS - 1] = NUMA_NO_NODE }; 101 if (node == NUMA_NO_NODE) { in acpi_numa_gicc_affinity_init()
|
| /linux/drivers/hv/ |
| H A D | mshv_root_hv_call.c | 126 ret = hv_call_deposit_pages(NUMA_NO_NODE, in hv_call_create_partition() 141 ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id, in hv_call_initialize_partition() 154 ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id, 1); in hv_call_initialize_partition() 243 ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id, in hv_do_map_gpa_hcall() 468 ret = hv_call_deposit_pages(NUMA_NO_NODE, in hv_call_get_vp_state() 528 ret = hv_call_deposit_pages(NUMA_NO_NODE, in hv_call_set_vp_state() 576 ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id, 1); in hv_call_map_vp_state_page() 725 ret = hv_call_deposit_pages(NUMA_NO_NODE, port_partition_id, 1); in hv_call_create_port() 779 ret = hv_call_deposit_pages(NUMA_NO_NODE, in hv_call_connect_port() 851 ret = hv_call_deposit_pages(NUMA_NO_NODE, in hv_call_map_stats_page2() [all …]
|
| /linux/arch/riscv/kernel/ |
| H A D | acpi_numa.c | 31 static int acpi_early_node_map[NR_CPUS] __initdata = { [0 ... NR_CPUS - 1] = NUMA_NO_NODE }; 124 if (node == NUMA_NO_NODE) { in acpi_numa_rintc_affinity_init()
|
| /linux/tools/testing/selftests/bpf/ |
| H A D | bpf_arena_common.h | 9 #ifndef NUMA_NO_NODE 10 #define NUMA_NO_NODE (-1) macro
|
| /linux/arch/loongarch/kernel/ |
| H A D | numa.c | 39 [0 ... CONFIG_NR_CPUS - 1] = NUMA_NO_NODE 115 return NUMA_NO_NODE; in early_cpu_to_node() 232 set_cpuid_to_node(i, NUMA_NO_NODE); in init_numa_memory()
|
| H A D | acpi.c | 340 if (nid != NUMA_NO_NODE) in acpi_map_cpu2node() 343 if (nid != NUMA_NO_NODE) { in acpi_map_cpu2node() 376 set_cpuid_to_node(cpu_logical_map(cpu), NUMA_NO_NODE); in acpi_unmap_cpu()
|
| /linux/tools/testing/memblock/tests/ |
| H A D | alloc_nid_api.c | 81 NUMA_NO_NODE); in alloc_nid_top_down_simple_check() 134 NUMA_NO_NODE); in alloc_nid_top_down_end_misaligned_check() 184 NUMA_NO_NODE); in alloc_nid_exact_address_generic_check() 235 NUMA_NO_NODE); in alloc_nid_top_down_narrow_range_check() 286 NUMA_NO_NODE); in alloc_nid_low_max_generic_check() 331 NUMA_NO_NODE); in alloc_nid_min_reserved_generic_check() 381 NUMA_NO_NODE); in alloc_nid_max_reserved_generic_check() 444 NUMA_NO_NODE); in alloc_nid_top_down_reserved_with_space_check() 507 NUMA_NO_NODE); in alloc_nid_reserved_full_merge_generic_check() 571 NUMA_NO_NODE); in alloc_nid_top_down_reserved_no_space_check() [all …]
|
| /linux/drivers/base/test/ |
| H A D | test_async_driver_probe.c | 89 if (nid != NUMA_NO_NODE) in test_platform_device_register_node() 180 NUMA_NO_NODE); in test_async_probe_init() 213 NUMA_NO_NODE); in test_async_probe_init()
|
| /linux/tools/testing/selftests/bpf/prog_tests/ |
| H A D | bloom_filter_map.c | 9 #ifndef NUMA_NO_NODE 10 #define NUMA_NO_NODE (-1) macro 76 opts.numa_node = NUMA_NO_NODE; in test_success_cases()
|
| /linux/arch/powerpc/platforms/pseries/ |
| H A D | rtas-work-area.c | 140 const int nid = NUMA_NO_NODE; in rtas_work_area_allocator_init() 194 const int nid = NUMA_NO_NODE; in rtas_work_area_reserve_arena()
|
| H A D | hotplug-cpu.c | 175 if (assigned_node != NUMA_NO_NODE) { in find_cpu_id_range() 238 BUG_ON(node == NUMA_NO_NODE); in pseries_add_processor() 248 node = NUMA_NO_NODE; in pseries_add_processor() 249 rc = find_cpu_id_range(nthreads, NUMA_NO_NODE, &cpu_mask); in pseries_add_processor() 272 if (node == NUMA_NO_NODE) { in pseries_add_processor()
|