1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __LINUX_MEMORY_HOTPLUG_H 3 #define __LINUX_MEMORY_HOTPLUG_H 4 5 #include <linux/mmzone.h> 6 #include <linux/spinlock.h> 7 #include <linux/notifier.h> 8 #include <linux/bug.h> 9 10 struct page; 11 struct zone; 12 struct pglist_data; 13 struct mem_section; 14 struct memory_group; 15 struct resource; 16 struct vmem_altmap; 17 struct dev_pagemap; 18 19 #ifdef CONFIG_MEMORY_HOTPLUG 20 struct page *pfn_to_online_page(unsigned long pfn); 21 22 /* Types for control the zone type of onlined and offlined memory */ 23 enum { 24 /* Offline the memory. */ 25 MMOP_OFFLINE = 0, 26 /* Online the memory. Zone depends, see default_zone_for_pfn(). */ 27 MMOP_ONLINE, 28 /* Online the memory to ZONE_NORMAL. */ 29 MMOP_ONLINE_KERNEL, 30 /* Online the memory to ZONE_MOVABLE. */ 31 MMOP_ONLINE_MOVABLE, 32 }; 33 34 /* Flags for add_memory() and friends to specify memory hotplug details. */ 35 typedef int __bitwise mhp_t; 36 37 /* No special request */ 38 #define MHP_NONE ((__force mhp_t)0) 39 /* 40 * Allow merging of the added System RAM resource with adjacent, 41 * mergeable resources. After a successful call to add_memory_resource() 42 * with this flag set, the resource pointer must no longer be used as it 43 * might be stale, or the resource might have changed. 44 */ 45 #define MHP_MERGE_RESOURCE ((__force mhp_t)BIT(0)) 46 47 /* 48 * We want memmap (struct page array) to be self contained. 49 * To do so, we will use the beginning of the hot-added range to build 50 * the page tables for the memmap array that describes the entire range. 51 * Only selected architectures support it with SPARSE_VMEMMAP. 52 * This is only a hint, the core kernel can decide to not do this based on 53 * different alignment checks. 54 */ 55 #define MHP_MEMMAP_ON_MEMORY ((__force mhp_t)BIT(1)) 56 /* 57 * The nid field specifies a memory group id (mgid) instead. The memory group 58 * implies the node id (nid). 59 */ 60 #define MHP_NID_IS_MGID ((__force mhp_t)BIT(2)) 61 /* 62 * The hotplugged memory is completely inaccessible while the memory is 63 * offline. The memory provider will handle MEM_PREPARE_ONLINE / 64 * MEM_FINISH_OFFLINE notifications and make the memory accessible. 65 * 66 * This flag is only relevant when used along with MHP_MEMMAP_ON_MEMORY, 67 * because the altmap cannot be written (e.g., poisoned) when adding 68 * memory -- before it is set online. 69 * 70 * This allows for adding memory with an altmap that is not currently 71 * made available by a hypervisor. When onlining that memory, the 72 * hypervisor can be instructed to make that memory available, and 73 * the onlining phase will not require any memory allocations, which is 74 * helpful in low-memory situations. 75 */ 76 #define MHP_OFFLINE_INACCESSIBLE ((__force mhp_t)BIT(3)) 77 78 /* 79 * Extended parameters for memory hotplug: 80 * altmap: alternative allocator for memmap array (optional) 81 * pgprot: page protection flags to apply to newly created page tables 82 * (required) 83 */ 84 struct mhp_params { 85 struct vmem_altmap *altmap; 86 pgprot_t pgprot; 87 struct dev_pagemap *pgmap; 88 }; 89 90 bool mhp_range_allowed(u64 start, u64 size, bool need_mapping); 91 struct range mhp_get_pluggable_range(bool need_mapping); 92 bool mhp_supports_memmap_on_memory(void); 93 94 /* 95 * Zone resizing functions 96 * 97 * Note: any attempt to resize a zone should has pgdat_resize_lock() 98 * zone_span_writelock() both held. This ensure the size of a zone 99 * can't be changed while pgdat_resize_lock() held. 100 */ 101 static inline unsigned zone_span_seqbegin(struct zone *zone) 102 { 103 return read_seqbegin(&zone->span_seqlock); 104 } 105 static inline int zone_span_seqretry(struct zone *zone, unsigned iv) 106 { 107 return read_seqretry(&zone->span_seqlock, iv); 108 } 109 static inline void zone_span_writelock(struct zone *zone) 110 { 111 write_seqlock(&zone->span_seqlock); 112 } 113 static inline void zone_span_writeunlock(struct zone *zone) 114 { 115 write_sequnlock(&zone->span_seqlock); 116 } 117 static inline void zone_seqlock_init(struct zone *zone) 118 { 119 seqlock_init(&zone->span_seqlock); 120 } 121 extern void adjust_present_page_count(struct page *page, 122 struct memory_group *group, 123 long nr_pages); 124 /* VM interface that may be used by firmware interface */ 125 extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, 126 struct zone *zone, bool mhp_off_inaccessible); 127 extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages); 128 extern int online_pages(unsigned long pfn, unsigned long nr_pages, 129 struct zone *zone, struct memory_group *group); 130 extern unsigned long __offline_isolated_pages(unsigned long start_pfn, 131 unsigned long end_pfn); 132 133 typedef void (*online_page_callback_t)(struct page *page, unsigned int order); 134 135 extern void generic_online_page(struct page *page, unsigned int order); 136 extern int set_online_page_callback(online_page_callback_t callback); 137 extern int restore_online_page_callback(online_page_callback_t callback); 138 139 extern int try_online_node(int nid); 140 141 extern int arch_add_memory(int nid, u64 start, u64 size, 142 struct mhp_params *params); 143 extern u64 max_mem_size; 144 145 extern int mhp_online_type_from_str(const char *str); 146 147 /* If movable_node boot option specified */ 148 extern bool movable_node_enabled; 149 static inline bool movable_node_is_enabled(void) 150 { 151 return movable_node_enabled; 152 } 153 154 extern void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap); 155 extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages, 156 struct vmem_altmap *altmap); 157 158 /* reasonably generic interface to expand the physical pages */ 159 extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, 160 struct mhp_params *params); 161 162 #ifndef CONFIG_ARCH_HAS_ADD_PAGES 163 static inline int add_pages(int nid, unsigned long start_pfn, 164 unsigned long nr_pages, struct mhp_params *params) 165 { 166 return __add_pages(nid, start_pfn, nr_pages, params); 167 } 168 #else /* ARCH_HAS_ADD_PAGES */ 169 int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, 170 struct mhp_params *params); 171 #endif /* ARCH_HAS_ADD_PAGES */ 172 173 void get_online_mems(void); 174 void put_online_mems(void); 175 176 void mem_hotplug_begin(void); 177 void mem_hotplug_done(void); 178 179 /* See kswapd_is_running() */ 180 static inline void pgdat_kswapd_lock(pg_data_t *pgdat) 181 { 182 mutex_lock(&pgdat->kswapd_lock); 183 } 184 185 static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) 186 { 187 mutex_unlock(&pgdat->kswapd_lock); 188 } 189 190 static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat) 191 { 192 mutex_init(&pgdat->kswapd_lock); 193 } 194 195 #else /* ! CONFIG_MEMORY_HOTPLUG */ 196 #define pfn_to_online_page(pfn) \ 197 ({ \ 198 struct page *___page = NULL; \ 199 if (pfn_valid(pfn)) \ 200 ___page = pfn_to_page(pfn); \ 201 ___page; \ 202 }) 203 204 static inline unsigned zone_span_seqbegin(struct zone *zone) 205 { 206 return 0; 207 } 208 static inline int zone_span_seqretry(struct zone *zone, unsigned iv) 209 { 210 return 0; 211 } 212 static inline void zone_span_writelock(struct zone *zone) {} 213 static inline void zone_span_writeunlock(struct zone *zone) {} 214 static inline void zone_seqlock_init(struct zone *zone) {} 215 216 static inline int try_online_node(int nid) 217 { 218 return 0; 219 } 220 221 static inline void get_online_mems(void) {} 222 static inline void put_online_mems(void) {} 223 224 static inline void mem_hotplug_begin(void) {} 225 static inline void mem_hotplug_done(void) {} 226 227 static inline bool movable_node_is_enabled(void) 228 { 229 return false; 230 } 231 232 static inline bool mhp_supports_memmap_on_memory(void) 233 { 234 return false; 235 } 236 237 static inline void pgdat_kswapd_lock(pg_data_t *pgdat) {} 238 static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) {} 239 static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat) {} 240 #endif /* ! CONFIG_MEMORY_HOTPLUG */ 241 242 /* 243 * Keep this declaration outside CONFIG_MEMORY_HOTPLUG as some 244 * platforms might override and use arch_get_mappable_range() 245 * for internal non memory hotplug purposes. 246 */ 247 struct range arch_get_mappable_range(void); 248 249 #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) 250 /* 251 * pgdat resizing functions 252 */ 253 static inline 254 void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags) 255 { 256 spin_lock_irqsave(&pgdat->node_size_lock, *flags); 257 } 258 static inline 259 void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) 260 { 261 spin_unlock_irqrestore(&pgdat->node_size_lock, *flags); 262 } 263 static inline 264 void pgdat_resize_init(struct pglist_data *pgdat) 265 { 266 spin_lock_init(&pgdat->node_size_lock); 267 } 268 #else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */ 269 /* 270 * Stub functions for when hotplug is off 271 */ 272 static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} 273 static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} 274 static inline void pgdat_resize_init(struct pglist_data *pgdat) {} 275 #endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */ 276 277 #ifdef CONFIG_MEMORY_HOTREMOVE 278 279 extern void try_offline_node(int nid); 280 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages, 281 struct zone *zone, struct memory_group *group); 282 extern int remove_memory(u64 start, u64 size); 283 extern void __remove_memory(u64 start, u64 size); 284 extern int offline_and_remove_memory(u64 start, u64 size); 285 286 #else 287 static inline void try_offline_node(int nid) {} 288 289 static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages, 290 struct zone *zone, struct memory_group *group) 291 { 292 return -EINVAL; 293 } 294 295 static inline int remove_memory(u64 start, u64 size) 296 { 297 return -EBUSY; 298 } 299 300 static inline void __remove_memory(u64 start, u64 size) {} 301 #endif /* CONFIG_MEMORY_HOTREMOVE */ 302 303 #ifdef CONFIG_MEMORY_HOTPLUG 304 /* Default online_type (MMOP_*) when new memory blocks are added. */ 305 extern int mhp_get_default_online_type(void); 306 extern void mhp_set_default_online_type(int online_type); 307 extern void __ref free_area_init_core_hotplug(struct pglist_data *pgdat); 308 extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags); 309 extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags); 310 extern int add_memory_resource(int nid, struct resource *resource, 311 mhp_t mhp_flags); 312 extern int add_memory_driver_managed(int nid, u64 start, u64 size, 313 const char *resource_name, 314 mhp_t mhp_flags); 315 extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, 316 unsigned long nr_pages, 317 struct vmem_altmap *altmap, int migratetype); 318 extern void remove_pfn_range_from_zone(struct zone *zone, 319 unsigned long start_pfn, 320 unsigned long nr_pages); 321 extern int sparse_add_section(int nid, unsigned long pfn, 322 unsigned long nr_pages, struct vmem_altmap *altmap, 323 struct dev_pagemap *pgmap); 324 extern void sparse_remove_section(unsigned long pfn, unsigned long nr_pages, 325 struct vmem_altmap *altmap); 326 extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, 327 unsigned long pnum); 328 extern struct zone *zone_for_pfn_range(int online_type, int nid, 329 struct memory_group *group, unsigned long start_pfn, 330 unsigned long nr_pages); 331 extern int arch_create_linear_mapping(int nid, u64 start, u64 size, 332 struct mhp_params *params); 333 void arch_remove_linear_mapping(u64 start, u64 size); 334 #endif /* CONFIG_MEMORY_HOTPLUG */ 335 336 #endif /* __LINUX_MEMORY_HOTPLUG_H */ 337