Lines Matching refs:usage
205 subsection_mask_set(ms->usage->subsection_map, pfn, pfns);
299 struct mem_section_usage *usage, unsigned long flags)
304 ms->usage = usage;
332 struct mem_section_usage *usage;
349 usage = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, goal, limit, nid);
350 if (!usage && limit) {
354 return usage;
358 struct mem_section_usage *usage)
372 usemap_snr = pfn_to_section_nr(__pa(usage) >> PAGE_SHIFT);
408 struct mem_section_usage *usage)
509 struct mem_section_usage *usage;
513 usage = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nid),
515 if (!usage) {
535 check_usemap_section_nr(nid, usage);
536 sparse_init_one_section(__nr_to_section(pnum), pnum, map, usage,
538 usage = (void *) usage + mem_section_usage_size();
662 unsigned long *subsection_map = ms->usage
663 ? &ms->usage->subsection_map[0] : NULL;
680 return bitmap_empty(&ms->usage->subsection_map[0],
693 subsection_map = &ms->usage->subsection_map[0];
778 * usage map, but still need to free the vmemmap range.
800 * ms->usage array.
805 * When removing an early section, the usage map is kept (as the
806 * usage maps of other sections fall into the same page). It
808 * longer an early section. If the usage map is PageReserved, it
811 if (!PageReserved(virt_to_page(ms->usage))) {
812 kfree_rcu(ms->usage, rcu);
813 WRITE_ONCE(ms->usage, NULL);
836 struct mem_section_usage *usage = NULL;
840 if (!ms->usage) {
841 usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
842 if (!usage)
844 ms->usage = usage;
849 if (usage)
850 ms->usage = NULL;
851 kfree(usage);
924 sparse_init_one_section(ms, section_nr, memmap, ms->usage, 0);