1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * include/linux/memory.h - generic memory definition 4 * 5 * This is mainly for topological representation. We define the 6 * basic "struct memory_block" here, which can be embedded in per-arch 7 * definitions or NUMA information. 8 * 9 * Basic handling of the devices is done in drivers/base/memory.c 10 * and system devices are handled in drivers/base/sys.c. 11 * 12 * Memory block are exported via sysfs in the class/memory/devices/ 13 * directory. 14 * 15 */ 16 #ifndef _LINUX_MEMORY_H_ 17 #define _LINUX_MEMORY_H_ 18 19 #include <linux/node.h> 20 #include <linux/compiler.h> 21 #include <linux/mutex.h> 22 #include <linux/memory_hotplug.h> 23 24 #define MIN_MEMORY_BLOCK_SIZE (1UL << SECTION_SIZE_BITS) 25 26 /** 27 * struct memory_group - a logical group of memory blocks 28 * @nid: The node id for all memory blocks inside the memory group. 29 * @memory_blocks: List of all memory blocks belonging to this memory group. 30 * @present_kernel_pages: Present (online) memory outside ZONE_MOVABLE of this 31 * memory group. 32 * @present_movable_pages: Present (online) memory in ZONE_MOVABLE of this 33 * memory group. 34 * @is_dynamic: The memory group type: static vs. dynamic 35 * @s.max_pages: Valid with &memory_group.is_dynamic == false. The maximum 36 * number of pages we'll have in this static memory group. 37 * @d.unit_pages: Valid with &memory_group.is_dynamic == true. Unit in pages 38 * in which memory is added/removed in this dynamic memory group. 39 * This granularity defines the alignment of a unit in physical 40 * address space; it has to be at least as big as a single 41 * memory block. 42 * 43 * A memory group logically groups memory blocks; each memory block 44 * belongs to at most one memory group. A memory group corresponds to 45 * a memory device, such as a DIMM or a NUMA node, which spans multiple 46 * memory blocks and might even span multiple non-contiguous physical memory 47 * ranges. 48 * 49 * Modification of members after registration is serialized by memory 50 * hot(un)plug code. 51 */ 52 struct memory_group { 53 int nid; 54 struct list_head memory_blocks; 55 unsigned long present_kernel_pages; 56 unsigned long present_movable_pages; 57 bool is_dynamic; 58 union { 59 struct { 60 unsigned long max_pages; 61 } s; 62 struct { 63 unsigned long unit_pages; 64 } d; 65 }; 66 }; 67 68 enum memory_block_state { 69 /* These states are exposed to userspace as text strings in sysfs */ 70 MEM_ONLINE, /* exposed to userspace */ 71 MEM_GOING_OFFLINE, /* exposed to userspace */ 72 MEM_OFFLINE, /* exposed to userspace */ 73 MEM_GOING_ONLINE, 74 MEM_CANCEL_ONLINE, 75 MEM_CANCEL_OFFLINE, 76 }; 77 78 struct memory_block { 79 unsigned long start_section_nr; 80 enum memory_block_state state; /* serialized by the dev->lock */ 81 enum mmop online_type; /* for passing data to online routine */ 82 int nid; /* NID for this memory block */ 83 /* 84 * The single zone of this memory block if all PFNs of this memory block 85 * that are System RAM (not a memory hole, not ZONE_DEVICE ranges) are 86 * managed by a single zone. NULL if multiple zones (including nodes) 87 * apply. 88 */ 89 struct zone *zone; 90 struct device dev; 91 struct vmem_altmap *altmap; 92 struct memory_group *group; /* group (if any) for this block */ 93 struct list_head group_next; /* next block inside memory group */ 94 #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG) 95 atomic_long_t nr_hwpoison; 96 #endif 97 }; 98 99 int arch_get_memory_phys_device(unsigned long start_pfn); 100 unsigned long memory_block_size_bytes(void); 101 int set_memory_block_size_order(unsigned int order); 102 103 struct memory_notify { 104 unsigned long start_pfn; 105 unsigned long nr_pages; 106 }; 107 108 struct notifier_block; 109 struct mem_section; 110 111 /* 112 * Priorities for the hotplug memory callback routines. Invoked from 113 * high to low. Higher priorities correspond to higher numbers. 114 */ 115 #define DEFAULT_CALLBACK_PRI 0 116 #define SLAB_CALLBACK_PRI 1 117 #define CXL_CALLBACK_PRI 5 118 #define HMAT_CALLBACK_PRI 6 119 #define MM_COMPUTE_BATCH_PRI 10 120 #define CPUSET_CALLBACK_PRI 10 121 #define MEMTIER_HOTPLUG_PRI 100 122 #define KSM_CALLBACK_PRI 100 123 124 #ifndef CONFIG_MEMORY_HOTPLUG 125 static inline void memory_dev_init(void) 126 { 127 return; 128 } 129 static inline int register_memory_notifier(struct notifier_block *nb) 130 { 131 return 0; 132 } 133 static inline void unregister_memory_notifier(struct notifier_block *nb) 134 { 135 } 136 static inline int memory_notify(enum memory_block_state state, void *v) 137 { 138 return 0; 139 } 140 static inline int hotplug_memory_notifier(notifier_fn_t fn, int pri) 141 { 142 return 0; 143 } 144 static inline int memory_block_advise_max_size(unsigned long size) 145 { 146 return -ENODEV; 147 } 148 static inline unsigned long memory_block_advised_max_size(void) 149 { 150 return 0; 151 } 152 #else /* CONFIG_MEMORY_HOTPLUG */ 153 extern int register_memory_notifier(struct notifier_block *nb); 154 extern void unregister_memory_notifier(struct notifier_block *nb); 155 int create_memory_block_devices(unsigned long start, unsigned long size, 156 int nid, struct vmem_altmap *altmap, 157 struct memory_group *group); 158 void remove_memory_block_devices(unsigned long start, unsigned long size); 159 extern void memory_dev_init(void); 160 extern int memory_notify(enum memory_block_state state, void *v); 161 extern struct memory_block *find_memory_block(unsigned long section_nr); 162 typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *); 163 extern int walk_memory_blocks(unsigned long start, unsigned long size, 164 void *arg, walk_memory_blocks_func_t func); 165 extern int for_each_memory_block(void *arg, walk_memory_blocks_func_t func); 166 167 extern int memory_group_register_static(int nid, unsigned long max_pages); 168 extern int memory_group_register_dynamic(int nid, unsigned long unit_pages); 169 extern int memory_group_unregister(int mgid); 170 struct memory_group *memory_group_find_by_id(int mgid); 171 typedef int (*walk_memory_groups_func_t)(struct memory_group *, void *); 172 int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func, 173 struct memory_group *excluded, void *arg); 174 struct memory_block *find_memory_block_by_id(unsigned long block_id); 175 #define hotplug_memory_notifier(fn, pri) ({ \ 176 static __meminitdata struct notifier_block fn##_mem_nb =\ 177 { .notifier_call = fn, .priority = pri };\ 178 register_memory_notifier(&fn##_mem_nb); \ 179 }) 180 181 extern int sections_per_block; 182 183 static inline unsigned long memory_block_id(unsigned long section_nr) 184 { 185 return section_nr / sections_per_block; 186 } 187 188 static inline unsigned long pfn_to_block_id(unsigned long pfn) 189 { 190 return memory_block_id(pfn_to_section_nr(pfn)); 191 } 192 193 static inline unsigned long phys_to_block_id(unsigned long phys) 194 { 195 return pfn_to_block_id(PFN_DOWN(phys)); 196 } 197 198 #ifdef CONFIG_NUMA 199 void memory_block_add_nid_early(struct memory_block *mem, int nid); 200 #endif /* CONFIG_NUMA */ 201 int memory_block_advise_max_size(unsigned long size); 202 unsigned long memory_block_advised_max_size(void); 203 #endif /* CONFIG_MEMORY_HOTPLUG */ 204 205 /* 206 * Kernel text modification mutex, used for code patching. Users of this lock 207 * can sleep. 208 */ 209 extern struct mutex text_mutex; 210 211 #endif /* _LINUX_MEMORY_H_ */ 212