Lines Matching defs:nid

52 static void set_section_nid(unsigned long section_nr, int nid)
54 section_to_node_table[section_nr] = nid;
57 static inline void set_section_nid(unsigned long section_nr, int nid)
63 static noinline struct mem_section __ref *sparse_index_alloc(int nid)
70 section = kzalloc_node(array_size, GFP_KERNEL, nid);
73 nid);
75 panic("%s: Failed to allocate %lu bytes nid=%d\n",
76 __func__, array_size, nid);
82 static int __meminit sparse_index_init(unsigned long section_nr, int nid)
97 section = sparse_index_alloc(nid);
106 static inline int sparse_index_init(unsigned long section_nr, int nid)
118 static inline unsigned long sparse_encode_early_nid(int nid)
120 return ((unsigned long)nid << SECTION_NID_SHIFT);
222 static void __init memory_present(int nid, unsigned long start, unsigned long end)
232 sparse_index_init(section_nr, nid);
233 set_section_nid(section_nr, nid);
237 ms->section_mem_map = sparse_encode_early_nid(nid) |
252 int i, nid;
264 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid)
265 memory_present(nid, start, end);
331 int nid;
344 nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
346 usage = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, goal, limit, nid);
354 static void __init check_usemap_section_nr(int nid,
360 struct pglist_data *pgdat = NODE_DATA(nid);
382 if (usemap_nid != nid) {
384 nid, usemap_snr);
394 usemap_snr, pgdat_snr, nid);
404 static void __init check_usemap_section_nr(int nid,
423 unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
433 map = memmap_alloc(size, size, addr, nid, false);
435 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n",
436 __func__, size, PAGE_SIZE, nid, &addr);
451 static void __init sparse_buffer_init(unsigned long size, int nid)
460 sparsemap_buf = memmap_alloc(size, section_map_size(), addr, nid, true);
502 static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
510 usage = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nid),
513 pr_err("%s: node[%d] usemap allocation failed", __func__, nid);
516 sparse_buffer_init(map_count * section_map_size(), nid);
524 nid, NULL, NULL);
527 __func__, nid);
532 check_usemap_section_nr(nid, usage);
571 int nid = sparse_early_nid(__nr_to_section(pnum_end));
573 if (nid == nid_begin) {
579 nid_begin = nid;
631 unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
634 return __populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap);
704 unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
708 PAGES_PER_SECTION), GFP_KERNEL, nid);
828 static struct page * __meminit section_activate(int nid, unsigned long pfn,
862 memmap = populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap);
873 * @nid: The node to add section on
890 int __meminit sparse_add_section(int nid, unsigned long start_pfn,
899 ret = sparse_index_init(section_nr, nid);
903 memmap = section_activate(nid, start_pfn, nr_pages, altmap, pgmap);
915 set_section_nid(section_nr, nid);