Lines Matching refs:mem
107 struct memory_block *mem = to_memory_block(dev);
109 WARN_ON(mem->altmap);
110 kfree(mem);
123 struct memory_block *mem = to_memory_block(dev);
125 return sysfs_emit(buf, "%08lx\n", memory_block_id(mem->start_section_nr));
144 struct memory_block *mem = to_memory_block(dev);
151 switch (mem->state) {
163 return sysfs_emit(buf, "ERROR-UNKNOWN-%ld\n", mem->state);
175 static unsigned long memblk_nr_poison(struct memory_block *mem);
177 static inline unsigned long memblk_nr_poison(struct memory_block *mem)
186 static int memory_block_online(struct memory_block *mem)
188 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
195 if (memblk_nr_poison(mem))
198 zone = zone_for_pfn_range(mem->online_type, mem->nid, mem->group,
208 if (mem->altmap)
209 nr_vmemmap_pages = mem->altmap->free;
223 zone, mem->altmap->inaccessible);
229 nr_pages - nr_vmemmap_pages, zone, mem->group);
241 adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
244 mem->zone = zone;
257 static int memory_block_offline(struct memory_block *mem)
259 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
265 if (!mem->zone)
272 if (mem->altmap)
273 nr_vmemmap_pages = mem->altmap->free;
277 adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
281 nr_pages - nr_vmemmap_pages, mem->zone, mem->group);
286 mem->group, nr_vmemmap_pages);
293 mem->zone = NULL;
309 memory_block_action(struct memory_block *mem, unsigned long action)
315 ret = memory_block_online(mem);
318 ret = memory_block_offline(mem);
322 "%ld\n", __func__, mem->start_section_nr, action, action);
329 static int memory_block_change_state(struct memory_block *mem,
334 if (mem->state != from_state_req)
338 mem->state = MEM_GOING_OFFLINE;
340 ret = memory_block_action(mem, to_state);
341 mem->state = ret ? from_state_req : to_state;
349 struct memory_block *mem = to_memory_block(dev);
352 if (mem->state == MEM_ONLINE)
359 if (mem->online_type == MMOP_OFFLINE)
360 mem->online_type = MMOP_ONLINE;
362 ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
363 mem->online_type = MMOP_OFFLINE;
370 struct memory_block *mem = to_memory_block(dev);
372 if (mem->state == MEM_OFFLINE)
375 return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
382 struct memory_block *mem = to_memory_block(dev);
396 /* mem->online_type is protected by device_hotplug_lock */
397 mem->online_type = online_type;
398 ret = device_online(&mem->dev);
401 ret = device_offline(&mem->dev);
427 struct memory_block *mem = to_memory_block(dev);
428 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
452 struct memory_block *mem = to_memory_block(dev);
453 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
455 struct memory_group *group = mem->group;
457 int nid = mem->nid;
464 if (mem->state == MEM_ONLINE) {
466 * If !mem->zone, the memory block spans multiple zones and
469 default_zone = mem->zone;
641 struct memory_block *mem;
643 mem = xa_load(&memory_blocks, block_id);
644 if (mem)
645 get_device(&mem->dev);
646 return mem;
702 static struct zone *early_node_zone_for_memory_block(struct memory_block *mem,
705 const unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
740 * @mem: The memory block device.
747 * set/adjust mem->zone based on the zone ranges of the given node.
749 void memory_block_add_nid(struct memory_block *mem, int nid,
752 if (context == MEMINIT_EARLY && mem->nid != nid) {
762 if (mem->nid == NUMA_NO_NODE)
763 mem->zone = early_node_zone_for_memory_block(mem, nid);
765 mem->zone = NULL;
774 mem->nid = nid;
782 struct memory_block *mem;
785 mem = find_memory_block_by_id(block_id);
786 if (mem) {
787 put_device(&mem->dev);
790 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
791 if (!mem)
794 mem->start_section_nr = block_id * sections_per_block;
795 mem->state = state;
796 mem->nid = NUMA_NO_NODE;
797 mem->altmap = altmap;
798 INIT_LIST_HEAD(&mem->group_next);
808 mem->zone = early_node_zone_for_memory_block(mem, NUMA_NO_NODE);
811 ret = __add_memory_block(mem);
816 mem->group = group;
817 list_add(&mem->group_next, &group->memory_blocks);
876 struct memory_block *mem;
893 mem = find_memory_block_by_id(block_id);
894 if (WARN_ON_ONCE(!mem))
896 remove_memory_block(mem);
913 struct memory_block *mem;
921 mem = find_memory_block_by_id(block_id);
922 if (WARN_ON_ONCE(!mem))
924 num_poisoned_pages_sub(-1UL, memblk_nr_poison(mem));
925 unregister_memory_block_under_nodes(mem);
926 remove_memory_block(mem);
1012 struct memory_block *mem;
1020 mem = find_memory_block_by_id(block_id);
1021 if (!mem)
1024 ret = func(mem, arg);
1025 put_device(&mem->dev);
1039 struct memory_block *mem = to_memory_block(dev);
1042 return cb_data->func(mem, cb_data->arg);
1233 struct memory_block *mem = find_memory_block_by_id(block_id);
1235 if (mem)
1236 atomic_long_inc(&mem->nr_hwpoison);
1242 struct memory_block *mem = find_memory_block_by_id(block_id);
1244 if (mem)
1245 atomic_long_sub(i, &mem->nr_hwpoison);
1248 static unsigned long memblk_nr_poison(struct memory_block *mem)
1250 return atomic_long_read(&mem->nr_hwpoison);