| /linux/mm/ |
| H A D | hugetlb_cma.c | 23 int nid = folio_nid(folio); in hugetlb_cma_free_folio() local 25 WARN_ON_ONCE(!cma_free_folio(hugetlb_cma[nid], folio)); in hugetlb_cma_free_folio() 30 int nid, nodemask_t *nodemask) in hugetlb_cma_alloc_folio() argument 35 if (hugetlb_cma[nid]) in hugetlb_cma_alloc_folio() 36 folio = cma_alloc_folio(hugetlb_cma[nid], order, gfp_mask); in hugetlb_cma_alloc_folio() 40 if (node == nid || !hugetlb_cma[node]) in hugetlb_cma_alloc_folio() 56 hugetlb_cma_alloc_bootmem(struct hstate *h, int *nid, bool node_exact) in hugetlb_cma_alloc_bootmem() argument 60 int node = *nid; in hugetlb_cma_alloc_bootmem() 62 cma = hugetlb_cma[*nid]; in hugetlb_cma_alloc_bootmem() 70 if (!cma || node == *nid) in hugetlb_cma_alloc_bootmem() [all …]
|
| H A D | hugetlb_internal.h | 31 static inline int next_node_allowed(int nid, nodemask_t *nodes_allowed) in next_node_allowed() argument 33 nid = next_node_in(nid, *nodes_allowed); in next_node_allowed() 34 VM_BUG_ON(nid >= MAX_NUMNODES); in next_node_allowed() 36 return nid; in next_node_allowed() 39 static inline int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed) in get_valid_node_allowed() argument 41 if (!node_isset(nid, *nodes_allowed)) in get_valid_node_allowed() 42 nid = next_node_allowed(nid, nodes_allowed); in get_valid_node_allowed() 43 return nid; in get_valid_node_allowed() 55 int nid; in hstate_next_node_to_alloc() local 59 nid = get_valid_node_allowed(*next_node, nodes_allowed); in hstate_next_node_to_alloc() [all …]
|
| H A D | mm_init.c | 62 int nid; in mminit_verify_zonelist() local 67 for_each_online_node(nid) { in mminit_verify_zonelist() 68 pg_data_t *pgdat = NODE_DATA(nid); in mminit_verify_zonelist() 86 listid > 0 ? "thisnode" : "general", nid, in mminit_verify_zonelist() 318 int i, nid; in early_calculate_totalpages() local 320 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { in early_calculate_totalpages() 325 node_set_state(nid, N_MEMORY); in early_calculate_totalpages() 359 int i, nid; in find_zone_movable_pfns_for_nodes() local 380 nid = memblock_get_region_node(r); in find_zone_movable_pfns_for_nodes() 383 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? in find_zone_movable_pfns_for_nodes() [all …]
|
| H A D | shrinker.c | 42 struct shrinker_info *old, int nid) in shrinker_unit_alloc() argument 50 unit = kzalloc_node(sizeof(*unit), GFP_KERNEL, nid); in shrinker_unit_alloc() 66 int nid; in free_shrinker_info() local 68 for_each_node(nid) { in free_shrinker_info() 69 pn = memcg->nodeinfo[nid]; in free_shrinker_info() 79 int nid, ret = 0; in alloc_shrinker_info() local 84 for_each_node(nid) { in alloc_shrinker_info() 86 GFP_KERNEL, nid); in alloc_shrinker_info() 90 if (shrinker_unit_alloc(info, NULL, nid)) { in alloc_shrinker_info() 94 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info); in alloc_shrinker_info() [all …]
|
| H A D | numa_memblks.c | 28 mi->blk[i].nid != NUMA_NO_NODE) in numa_nodemask_from_meminfo() 29 node_set(mi->blk[i].nid, *nodemask); in numa_nodemask_from_meminfo() 133 static int __init numa_add_memblk_to(int nid, u64 start, u64 end, in numa_add_memblk_to() argument 141 if (start > end || nid < 0 || nid >= MAX_NUMNODES) { in numa_add_memblk_to() 143 nid, start, end - 1); in numa_add_memblk_to() 154 mi->blk[mi->nr_blks].nid = nid; in numa_add_memblk_to() 198 int __init numa_add_memblk(int nid, u64 start, u64 end) in numa_add_memblk() argument 200 return numa_add_memblk_to(nid, start, end, &numa_meminfo); in numa_add_memblk() 220 int __init numa_add_reserved_memblk(int nid, u64 start, u64 end) in numa_add_reserved_memblk() argument 222 return numa_add_memblk_to(nid, start, end, &numa_reserved_meminfo); in numa_add_reserved_memblk() [all …]
|
| H A D | memory_hotplug.c | 389 int __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, in __add_pages() argument 423 err = sparse_add_section(nid, pfn, cur_nr_pages, altmap, in __add_pages() 434 static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, in find_smallest_section_pfn() argument 442 if (unlikely(pfn_to_nid(start_pfn) != nid)) in find_smallest_section_pfn() 455 static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, in find_biggest_section_pfn() argument 467 if (unlikely(pfn_to_nid(pfn) != nid)) in find_biggest_section_pfn() 483 int nid = zone_to_nid(zone); in shrink_zone_span() local 492 pfn = find_smallest_section_pfn(nid, zone, end_pfn, in shrink_zone_span() 508 pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn, in shrink_zone_span() 754 int nid = pgdat->node_id; in move_pfn_range_to_zone() local [all …]
|
| H A D | numa_emulation.c | 25 static int __init emu_find_memblk_by_nid(int nid, const struct numa_meminfo *mi) in emu_find_memblk_by_nid() argument 30 if (mi->blk[i].nid == nid) in emu_find_memblk_by_nid() 51 int nid, int phys_blk, u64 size) in emu_setup_memblk() argument 64 eb->nid = nid; in emu_setup_memblk() 66 if (emu_nid_to_phys[nid] == NUMA_NO_NODE) in emu_setup_memblk() 67 emu_nid_to_phys[nid] = pb->nid; in emu_setup_memblk() 76 nid, eb->start, eb->end - 1, (eb->end - eb->start) / SZ_1M); in emu_setup_memblk() 93 int nid = 0; in split_nodes_interleave() local 144 if (nid < big) in split_nodes_interleave() 176 ret = emu_setup_memblk(ei, pi, nid++ % nr_nodes, in split_nodes_interleave() [all …]
|
| H A D | hugetlb_sysfs.c | 47 int nid; in nr_hugepages_show_common() local 49 h = kobj_to_hstate(kobj, &nid); in nr_hugepages_show_common() 50 if (nid == NUMA_NO_NODE) in nr_hugepages_show_common() 53 nr_huge_pages = h->nr_huge_pages_node[nid]; in nr_hugepages_show_common() 64 int nid; in nr_hugepages_store_common() local 71 h = kobj_to_hstate(kobj, &nid); in nr_hugepages_store_common() 72 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len); in nr_hugepages_store_common() 144 int nid; in free_hugepages_show() local 146 h = kobj_to_hstate(kobj, &nid); in free_hugepages_show() 147 if (nid == NUMA_NO_NODE) in free_hugepages_show() [all …]
|
| H A D | numa.c | 12 void __init alloc_node_data(int nid) in alloc_node_data() argument 19 nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); in alloc_node_data() 22 nd_size, nid); in alloc_node_data() 25 pr_info("NODE_DATA(%d) allocated [mem %#010Lx-%#010Lx]\n", nid, in alloc_node_data() 28 if (tnid != nid) in alloc_node_data() 29 pr_info(" NODE_DATA(%d) on node %d\n", nid, tnid); in alloc_node_data() 31 node_data[nid] = __va(nd_pa); in alloc_node_data() 32 memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); in alloc_node_data() 35 void __init alloc_offline_node_data(int nid) in alloc_offline_node_data() argument 38 node_data[nid] = memblock_alloc_or_panic(sizeof(*pgdat), SMP_CACHE_BYTES); in alloc_offline_node_data()
|
| /linux/sound/hda/core/ |
| H A D | sysfs.c | 89 ssize_t (*show)(struct hdac_device *codec, hda_nid_t nid, 91 ssize_t (*store)(struct hdac_device *codec, hda_nid_t nid, 99 int nid; in get_codec_nid() local 102 ret = kstrtoint(kobj->name, 16, &nid); in get_codec_nid() 106 return nid; in get_codec_nid() 115 int nid; in widget_attr_show() local 119 nid = get_codec_nid(kobj, &codec); in widget_attr_show() 120 if (nid < 0) in widget_attr_show() 121 return nid; in widget_attr_show() 122 return wid_attr->show(codec, nid, wid_attr, buf); in widget_attr_show() [all …]
|
| /linux/drivers/base/ |
| H A D | node.c | 216 void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord, in node_set_perf_attrs() argument 223 if (WARN_ON_ONCE(!node_online(nid))) in node_set_perf_attrs() 226 node = node_devices[nid]; in node_set_perf_attrs() 236 nid); in node_set_perf_attrs() 243 if (mempolicy_set_node_perf(nid, coord)) { in node_set_perf_attrs() 245 nid); in node_set_perf_attrs() 257 void node_update_perf_attrs(unsigned int nid, struct access_coordinate *coord, in node_update_perf_attrs() argument 264 if (WARN_ON_ONCE(!node_online(nid))) in node_update_perf_attrs() 267 node = node_devices[nid]; in node_update_perf_attrs() 284 if (mempolicy_set_node_perf(nid, coord)) in node_update_perf_attrs() [all …]
|
| H A D | arch_numa.c | 65 int nid = cpu_to_node(cpu); in numa_update_cpu() local 67 if (nid == NUMA_NO_NODE) in numa_update_cpu() 71 cpumask_clear_cpu(cpu, node_to_cpumask_map[nid]); in numa_update_cpu() 73 cpumask_set_cpu(cpu, node_to_cpumask_map[nid]); in numa_update_cpu() 126 void __init early_map_cpu_to_node(unsigned int cpu, int nid) in early_map_cpu_to_node() argument 129 if (nid < 0 || nid >= MAX_NUMNODES || numa_off) in early_map_cpu_to_node() 130 nid = 0; in early_map_cpu_to_node() 132 cpu_to_node_map[cpu] = nid; in early_map_cpu_to_node() 140 set_cpu_numa_node(cpu, nid); in early_map_cpu_to_node() 195 static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) in setup_node_data() argument [all …]
|
| /linux/sound/hda/common/ |
| H A D | hda_local.h | 28 #define HDA_COMPOSE_AMP_VAL_OFS(nid,chs,idx,dir,ofs) \ argument 29 ((nid) | ((chs)<<16) | ((dir)<<18) | ((idx)<<19) | ((ofs)<<23)) 31 #define HDA_COMPOSE_AMP_VAL(nid,chs,idx,dir) \ argument 32 HDA_COMPOSE_AMP_VAL_OFS(nid, chs, idx, dir, 0) 34 #define HDA_CODEC_VOLUME_MONO_IDX(xname, xcidx, nid, channel, xindex, dir, flags) \ argument 44 .private_value = HDA_COMPOSE_AMP_VAL(nid, channel, xindex, dir) | flags } 46 #define HDA_CODEC_VOLUME_IDX(xname, xcidx, nid, xindex, direction) \ argument 47 HDA_CODEC_VOLUME_MONO_IDX(xname, xcidx, nid, 3, xindex, direction, 0) 49 #define HDA_CODEC_VOLUME_MONO(xname, nid, channel, xindex, direction) \ argument 50 HDA_CODEC_VOLUME_MONO_IDX(xname, 0, nid, channel, xindex, direction, 0) [all …]
|
| H A D | hda_jack.h | 21 hda_nid_t nid; member 31 hda_nid_t nid; member 55 snd_hda_jack_tbl_get_mst(struct hda_codec *codec, hda_nid_t nid, int dev_id); 63 snd_hda_jack_tbl_get(struct hda_codec *codec, hda_nid_t nid) in snd_hda_jack_tbl_get() argument 65 return snd_hda_jack_tbl_get_mst(codec, nid, 0); in snd_hda_jack_tbl_get() 77 int snd_hda_jack_detect_enable(struct hda_codec *codec, hda_nid_t nid, 81 snd_hda_jack_detect_enable_callback_mst(struct hda_codec *codec, hda_nid_t nid, 95 snd_hda_jack_detect_enable_callback(struct hda_codec *codec, hda_nid_t nid, in snd_hda_jack_detect_enable_callback() argument 98 return snd_hda_jack_detect_enable_callback_mst(codec, nid, 0, cb); in snd_hda_jack_detect_enable_callback() 111 u32 snd_hda_jack_pin_sense(struct hda_codec *codec, hda_nid_t nid, int dev_id); [all …]
|
| H A D | jack.c | 29 bool is_jack_detectable(struct hda_codec *codec, hda_nid_t nid) in is_jack_detectable() argument 33 if (!(snd_hda_query_pin_caps(codec, nid) & AC_PINCAP_PRES_DETECT)) in is_jack_detectable() 35 if (get_defcfg_misc(snd_hda_codec_get_pincfg(codec, nid)) & in is_jack_detectable() 38 if (!(get_wcaps(codec, nid) & AC_WCAP_UNSOL_CAP) && in is_jack_detectable() 46 static u32 read_pin_sense(struct hda_codec *codec, hda_nid_t nid, int dev_id) in read_pin_sense() argument 52 pincap = snd_hda_query_pin_caps(codec, nid); in read_pin_sense() 54 snd_hda_codec_read(codec, nid, 0, in read_pin_sense() 57 val = snd_hda_codec_read(codec, nid, 0, in read_pin_sense() 71 snd_hda_jack_tbl_get_mst(struct hda_codec *codec, hda_nid_t nid, int dev_id) in snd_hda_jack_tbl_get_mst() argument 76 if (!nid || !jack) in snd_hda_jack_tbl_get_mst() [all …]
|
| H A D | auto_parser.c | 20 static int is_in_nid_list(hda_nid_t nid, const hda_nid_t *list) in is_in_nid_list() argument 23 if (*list == nid) in is_in_nid_list() 57 hda_nid_t nid, int type) in add_auto_cfg_input_pin() argument 60 cfg->inputs[cfg->num_inputs].pin = nid; in add_auto_cfg_input_pin() 63 nid_has_volume(codec, nid, HDA_INPUT); in add_auto_cfg_input_pin() 177 hda_nid_t nid; in snd_hda_parse_pin_defcfg() local 194 for_each_hda_codec_node(nid, codec) { in snd_hda_parse_pin_defcfg() 195 unsigned int wid_caps = get_wcaps(codec, nid); in snd_hda_parse_pin_defcfg() 204 if (ignore_nids && is_in_nid_list(nid, ignore_nids)) in snd_hda_parse_pin_defcfg() 207 def_conf = snd_hda_codec_get_pincfg(codec, nid); in snd_hda_parse_pin_defcfg() [all …]
|
| /linux/include/sound/ |
| H A D | hda_regmap.h | 38 #define snd_hdac_regmap_encode_verb(nid, verb) \ argument 39 (((verb) << 8) | 0x80000 | ((unsigned int)(nid) << 20)) 50 #define snd_hdac_regmap_encode_amp(nid, ch, dir, idx) \ argument 51 (snd_hdac_regmap_encode_verb(nid, AC_VERB_GET_AMP_GAIN_MUTE) | \ 64 #define snd_hdac_regmap_encode_amp_stereo(nid, dir, idx) \ argument 65 (snd_hdac_regmap_encode_verb(nid, AC_VERB_GET_AMP_GAIN_MUTE) | \ 79 snd_hdac_regmap_write(struct hdac_device *codec, hda_nid_t nid, in snd_hdac_regmap_write() argument 82 unsigned int cmd = snd_hdac_regmap_encode_verb(nid, verb); in snd_hdac_regmap_write() 97 snd_hdac_regmap_update(struct hdac_device *codec, hda_nid_t nid, in snd_hdac_regmap_update() argument 101 unsigned int cmd = snd_hdac_regmap_encode_verb(nid, verb); in snd_hdac_regmap_update() [all …]
|
| /linux/sound/pci/lola/ |
| H A D | lola_proc.c | 17 struct lola *chip, int nid, const char *name) in print_audio_widget() argument 21 lola_read_param(chip, nid, LOLA_PAR_AUDIO_WIDGET_CAP, &val); in print_audio_widget() 22 snd_iprintf(buffer, "Node 0x%02x %s wcaps 0x%x\n", nid, name, val); in print_audio_widget() 23 lola_read_param(chip, nid, LOLA_PAR_STREAM_FORMATS, &val); in print_audio_widget() 28 struct lola *chip, int nid, unsigned int ampcap, in print_pin_widget() argument 33 lola_read_param(chip, nid, LOLA_PAR_AUDIO_WIDGET_CAP, &val); in print_pin_widget() 34 snd_iprintf(buffer, "Node 0x%02x %s wcaps 0x%x\n", nid, name, val); in print_pin_widget() 37 lola_read_param(chip, nid, ampcap, &val); in print_pin_widget() 44 lola_codec_read(chip, nid, LOLA_VERB_GET_MAX_LEVEL, 0, 0, &val, NULL); in print_pin_widget() 49 struct lola *chip, int nid) in print_clock_widget() argument [all …]
|
| /linux/include/linux/ |
| H A D | node.h | 85 void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs); 86 void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord, 88 void node_update_perf_attrs(unsigned int nid, struct access_coordinate *coord, 91 static inline void node_add_cache(unsigned int nid, in node_add_cache() argument 96 static inline void node_set_perf_attrs(unsigned int nid, in node_set_perf_attrs() argument 102 static inline void node_update_perf_attrs(unsigned int nid, in node_update_perf_attrs() argument 122 void register_memory_blocks_under_node_hotplug(int nid, unsigned long start_pfn, 125 static inline void register_memory_blocks_under_node_hotplug(int nid, in register_memory_blocks_under_node_hotplug() argument 136 int nid; member 177 int register_node(int nid); [all …]
|
| H A D | gfp.h | 213 static inline struct zonelist *node_zonelist(int nid, gfp_t flags) in node_zonelist() argument 215 return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags); in node_zonelist() 249 alloc_pages_bulk_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages, in alloc_pages_bulk_node_noprof() argument 252 if (nid == NUMA_NO_NODE) in alloc_pages_bulk_node_noprof() 253 nid = numa_mem_id(); in alloc_pages_bulk_node_noprof() 255 return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, page_array); in alloc_pages_bulk_node_noprof() 280 __alloc_pages_node_noprof(int nid, gfp_t gfp_mask, unsigned int order) in __alloc_pages_node_noprof() argument 282 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); in __alloc_pages_node_noprof() 283 warn_if_node_offline(nid, gfp_mask); in __alloc_pages_node_noprof() 285 return __alloc_pages_noprof(gfp_mask, order, nid, NULL); in __alloc_pages_node_noprof() [all …]
|
| H A D | memblock.h | 78 int nid; member 125 int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid, 130 int __memblock_reserve(phys_addr_t base, phys_addr_t size, int nid, 165 void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags, 170 void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags, 213 #define __for_each_mem_range(i, type_a, type_b, nid, flags, \ argument 215 for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \ 218 __next_mem_range(&i, nid, flags, type_a, type_b, \ 233 #define __for_each_mem_range_rev(i, type_a, type_b, nid, flags, \ argument 236 __next_mem_range_rev(&i, nid, flags, type_a, type_b, \ [all …]
|
| /linux/drivers/of/ |
| H A D | of_numa.c | 23 u32 nid; in of_numa_parse_cpu_nodes() local 28 r = of_property_read_u32(np, "numa-node-id", &nid); in of_numa_parse_cpu_nodes() 32 pr_debug("CPU on %u\n", nid); in of_numa_parse_cpu_nodes() 33 if (nid >= MAX_NUMNODES) in of_numa_parse_cpu_nodes() 34 pr_warn("Node id %u exceeds maximum value\n", nid); in of_numa_parse_cpu_nodes() 36 node_set(nid, numa_nodes_parsed); in of_numa_parse_cpu_nodes() 44 u32 nid; in of_numa_parse_memory_nodes() local 48 r = of_property_read_u32(np, "numa-node-id", &nid); in of_numa_parse_memory_nodes() 57 if (nid >= MAX_NUMNODES) { in of_numa_parse_memory_nodes() 58 pr_warn("Node id %u exceeds maximum value\n", nid); in of_numa_parse_memory_nodes() [all …]
|
| /linux/arch/sh/include/asm/ |
| H A D | mmzone.h | 10 int nid; in pfn_to_nid() local 12 for (nid = 0; nid < MAX_NUMNODES; nid++) in pfn_to_nid() 13 if (pfn >= node_start_pfn(nid) && pfn <= node_end_pfn(nid)) in pfn_to_nid() 16 return nid; in pfn_to_nid() 25 void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end); 28 setup_bootmem_node(int nid, unsigned long start, unsigned long end) in setup_bootmem_node() argument 37 void __init __add_active_range(unsigned int nid, unsigned long start_pfn, 40 void __init allocate_pgdat(unsigned int nid);
|
| /linux/arch/sh/mm/ |
| H A D | numa.c | 23 void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) in setup_bootmem_node() argument 28 BUG_ON(nid >= MAX_NUMNODES || nid <= 0); in setup_bootmem_node() 38 __add_active_range(nid, start_pfn, end_pfn); in setup_bootmem_node() 41 NODE_DATA(nid) = memblock_alloc_node(sizeof(struct pglist_data), in setup_bootmem_node() 42 SMP_CACHE_BYTES, nid); in setup_bootmem_node() 43 if (!NODE_DATA(nid)) in setup_bootmem_node() 46 nid); in setup_bootmem_node() 48 NODE_DATA(nid)->node_start_pfn = start_pfn; in setup_bootmem_node() 49 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; in setup_bootmem_node() 52 node_set_online(nid); in setup_bootmem_node()
|
| /linux/fs/f2fs/ |
| H A D | node.c | 30 static inline bool is_invalid_nid(struct f2fs_sb_info *sbi, nid_t nid) in is_invalid_nid() argument 32 return nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid; in is_invalid_nid() 38 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid) in f2fs_check_nid_range() argument 40 if (unlikely(is_invalid_nid(sbi, nid))) { in f2fs_check_nid_range() 43 __func__, nid); in f2fs_check_nid_range() 138 static struct folio *get_current_nat_folio(struct f2fs_sb_info *sbi, nid_t nid) in get_current_nat_folio() argument 140 return f2fs_get_meta_folio_retry(sbi, current_nat_addr(sbi, nid)); in get_current_nat_folio() 143 static struct folio *get_next_nat_folio(struct f2fs_sb_info *sbi, nid_t nid) in get_next_nat_folio() argument 152 dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid)); in get_next_nat_folio() 155 src_folio = get_current_nat_folio(sbi, nid); in get_next_nat_folio() [all …]
|