1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Memory hotplug support via sclp
4 *
5 * Copyright IBM Corp. 2025
6 */
7
8 #define KMSG_COMPONENT "sclp_mem"
9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11 #include <linux/cpufeature.h>
12 #include <linux/err.h>
13 #include <linux/errno.h>
14 #include <linux/init.h>
15 #include <linux/memory.h>
16 #include <linux/memory_hotplug.h>
17 #include <linux/mm.h>
18 #include <linux/mmzone.h>
19 #include <linux/slab.h>
20 #include <asm/facility.h>
21 #include <asm/page.h>
22 #include <asm/page-states.h>
23 #include <asm/sclp.h>
24
25 #include "sclp.h"
26
27 #define SCLP_CMDW_ASSIGN_STORAGE 0x000d0001
28 #define SCLP_CMDW_UNASSIGN_STORAGE 0x000c0001
29
30 static DEFINE_MUTEX(sclp_mem_mutex);
31 static LIST_HEAD(sclp_mem_list);
32 static u8 sclp_max_storage_id;
33 static DECLARE_BITMAP(sclp_storage_ids, 256);
34
35 struct memory_increment {
36 struct list_head list;
37 u16 rn;
38 int standby;
39 };
40
41 struct assign_storage_sccb {
42 struct sccb_header header;
43 u16 rn;
44 } __packed;
45
46 struct attach_storage_sccb {
47 struct sccb_header header;
48 u16 :16;
49 u16 assigned;
50 u32 :32;
51 u32 entries[];
52 } __packed;
53
arch_get_memory_phys_device(unsigned long start_pfn)54 int arch_get_memory_phys_device(unsigned long start_pfn)
55 {
56 if (!sclp.rzm)
57 return 0;
58 return PFN_PHYS(start_pfn) >> ilog2(sclp.rzm);
59 }
60
rn2addr(u16 rn)61 static unsigned long rn2addr(u16 rn)
62 {
63 return (unsigned long)(rn - 1) * sclp.rzm;
64 }
65
do_assign_storage(sclp_cmdw_t cmd,u16 rn)66 static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
67 {
68 struct assign_storage_sccb *sccb;
69 int rc;
70
71 sccb = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
72 if (!sccb)
73 return -ENOMEM;
74 sccb->header.length = PAGE_SIZE;
75 sccb->rn = rn;
76 rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
77 if (rc)
78 goto out;
79 switch (sccb->header.response_code) {
80 case 0x0020:
81 case 0x0120:
82 break;
83 default:
84 pr_warn("assign storage failed (cmd=0x%08x, response=0x%04x, rn=0x%04x)\n",
85 cmd, sccb->header.response_code, rn);
86 rc = -EIO;
87 break;
88 }
89 out:
90 free_page((unsigned long)sccb);
91 return rc;
92 }
93
sclp_assign_storage(u16 rn)94 static int sclp_assign_storage(u16 rn)
95 {
96 unsigned long start;
97 int rc;
98
99 rc = do_assign_storage(SCLP_CMDW_ASSIGN_STORAGE, rn);
100 if (rc)
101 return rc;
102 start = rn2addr(rn);
103 storage_key_init_range(start, start + sclp.rzm);
104 return 0;
105 }
106
sclp_unassign_storage(u16 rn)107 static int sclp_unassign_storage(u16 rn)
108 {
109 return do_assign_storage(SCLP_CMDW_UNASSIGN_STORAGE, rn);
110 }
111
sclp_attach_storage(u8 id)112 static int sclp_attach_storage(u8 id)
113 {
114 struct attach_storage_sccb *sccb;
115 int rc, i;
116
117 sccb = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
118 if (!sccb)
119 return -ENOMEM;
120 sccb->header.length = PAGE_SIZE;
121 sccb->header.function_code = 0x40;
122 rc = sclp_sync_request_timeout(0x00080001 | id << 8, sccb,
123 SCLP_QUEUE_INTERVAL);
124 if (rc)
125 goto out;
126 switch (sccb->header.response_code) {
127 case 0x0020:
128 set_bit(id, sclp_storage_ids);
129 for (i = 0; i < sccb->assigned; i++) {
130 if (sccb->entries[i])
131 sclp_unassign_storage(sccb->entries[i] >> 16);
132 }
133 break;
134 default:
135 rc = -EIO;
136 break;
137 }
138 out:
139 free_page((unsigned long)sccb);
140 return rc;
141 }
142
sclp_mem_change_state(unsigned long start,unsigned long size,int online)143 static int sclp_mem_change_state(unsigned long start, unsigned long size,
144 int online)
145 {
146 struct memory_increment *incr;
147 unsigned long istart;
148 int rc = 0;
149
150 list_for_each_entry(incr, &sclp_mem_list, list) {
151 istart = rn2addr(incr->rn);
152 if (start + size - 1 < istart)
153 break;
154 if (start > istart + sclp.rzm - 1)
155 continue;
156 if (online)
157 rc |= sclp_assign_storage(incr->rn);
158 else
159 sclp_unassign_storage(incr->rn);
160 if (rc == 0)
161 incr->standby = online ? 0 : 1;
162 }
163 return rc ? -EIO : 0;
164 }
165
contains_standby_increment(unsigned long start,unsigned long end)166 static bool contains_standby_increment(unsigned long start, unsigned long end)
167 {
168 struct memory_increment *incr;
169 unsigned long istart;
170
171 list_for_each_entry(incr, &sclp_mem_list, list) {
172 istart = rn2addr(incr->rn);
173 if (end - 1 < istart)
174 continue;
175 if (start > istart + sclp.rzm - 1)
176 continue;
177 if (incr->standby)
178 return true;
179 }
180 return false;
181 }
182
sclp_mem_notifier(struct notifier_block * nb,unsigned long action,void * data)183 static int sclp_mem_notifier(struct notifier_block *nb,
184 unsigned long action, void *data)
185 {
186 unsigned long start, size;
187 struct memory_notify *arg;
188 unsigned char id;
189 int rc = 0;
190
191 arg = data;
192 start = arg->start_pfn << PAGE_SHIFT;
193 size = arg->nr_pages << PAGE_SHIFT;
194 mutex_lock(&sclp_mem_mutex);
195 for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1)
196 sclp_attach_storage(id);
197 switch (action) {
198 case MEM_GOING_OFFLINE:
199 /*
200 * Do not allow to set memory blocks offline that contain
201 * standby memory. This is done to simplify the "memory online"
202 * case.
203 */
204 if (contains_standby_increment(start, start + size))
205 rc = -EPERM;
206 break;
207 case MEM_PREPARE_ONLINE:
208 /*
209 * Access the altmap_start_pfn and altmap_nr_pages fields
210 * within the struct memory_notify specifically when dealing
211 * with only MEM_PREPARE_ONLINE/MEM_FINISH_OFFLINE notifiers.
212 *
213 * When altmap is in use, take the specified memory range
214 * online, which includes the altmap.
215 */
216 if (arg->altmap_nr_pages) {
217 start = PFN_PHYS(arg->altmap_start_pfn);
218 size += PFN_PHYS(arg->altmap_nr_pages);
219 }
220 rc = sclp_mem_change_state(start, size, 1);
221 if (rc || !arg->altmap_nr_pages)
222 break;
223 /*
224 * Set CMMA state to nodat here, since the struct page memory
225 * at the beginning of the memory block will not go through the
226 * buddy allocator later.
227 */
228 __arch_set_page_nodat((void *)__va(start), arg->altmap_nr_pages);
229 break;
230 case MEM_FINISH_OFFLINE:
231 /*
232 * When altmap is in use, take the specified memory range
233 * offline, which includes the altmap.
234 */
235 if (arg->altmap_nr_pages) {
236 start = PFN_PHYS(arg->altmap_start_pfn);
237 size += PFN_PHYS(arg->altmap_nr_pages);
238 }
239 sclp_mem_change_state(start, size, 0);
240 break;
241 default:
242 break;
243 }
244 mutex_unlock(&sclp_mem_mutex);
245 return rc ? NOTIFY_BAD : NOTIFY_OK;
246 }
247
248 static struct notifier_block sclp_mem_nb = {
249 .notifier_call = sclp_mem_notifier,
250 };
251
align_to_block_size(unsigned long * start,unsigned long * size,unsigned long alignment)252 static void __init align_to_block_size(unsigned long *start,
253 unsigned long *size,
254 unsigned long alignment)
255 {
256 unsigned long start_align, size_align;
257
258 start_align = roundup(*start, alignment);
259 size_align = rounddown(*start + *size, alignment) - start_align;
260
261 pr_info("Standby memory at 0x%lx (%luM of %luM usable)\n",
262 *start, size_align >> 20, *size >> 20);
263 *start = start_align;
264 *size = size_align;
265 }
266
add_memory_merged(u16 rn)267 static void __init add_memory_merged(u16 rn)
268 {
269 unsigned long start, size, addr, block_size;
270 static u16 first_rn, num;
271
272 if (rn && first_rn && (first_rn + num == rn)) {
273 num++;
274 return;
275 }
276 if (!first_rn)
277 goto skip_add;
278 start = rn2addr(first_rn);
279 size = (unsigned long)num * sclp.rzm;
280 if (start >= ident_map_size)
281 goto skip_add;
282 if (start + size > ident_map_size)
283 size = ident_map_size - start;
284 block_size = memory_block_size_bytes();
285 align_to_block_size(&start, &size, block_size);
286 if (!size)
287 goto skip_add;
288 for (addr = start; addr < start + size; addr += block_size) {
289 add_memory(0, addr, block_size,
290 cpu_has_edat1() ?
291 MHP_MEMMAP_ON_MEMORY | MHP_OFFLINE_INACCESSIBLE : MHP_NONE);
292 }
293 skip_add:
294 first_rn = rn;
295 num = 1;
296 }
297
sclp_add_standby_memory(void)298 static void __init sclp_add_standby_memory(void)
299 {
300 struct memory_increment *incr;
301
302 list_for_each_entry(incr, &sclp_mem_list, list) {
303 if (incr->standby)
304 add_memory_merged(incr->rn);
305 }
306 add_memory_merged(0);
307 }
308
insert_increment(u16 rn,int standby,int assigned)309 static void __init insert_increment(u16 rn, int standby, int assigned)
310 {
311 struct memory_increment *incr, *new_incr;
312 struct list_head *prev;
313 u16 last_rn;
314
315 new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
316 if (!new_incr)
317 return;
318 new_incr->rn = rn;
319 new_incr->standby = standby;
320 last_rn = 0;
321 prev = &sclp_mem_list;
322 list_for_each_entry(incr, &sclp_mem_list, list) {
323 if (assigned && incr->rn > rn)
324 break;
325 if (!assigned && incr->rn - last_rn > 1)
326 break;
327 last_rn = incr->rn;
328 prev = &incr->list;
329 }
330 if (!assigned)
331 new_incr->rn = last_rn + 1;
332 if (new_incr->rn > sclp.rnmax) {
333 kfree(new_incr);
334 return;
335 }
336 list_add(&new_incr->list, prev);
337 }
338
sclp_detect_standby_memory(void)339 static int __init sclp_detect_standby_memory(void)
340 {
341 struct read_storage_sccb *sccb;
342 int i, id, assigned, rc;
343
344 /* No standby memory in kdump mode */
345 if (oldmem_data.start)
346 return 0;
347 if ((sclp.facilities & 0xe00000000000UL) != 0xe00000000000UL)
348 return 0;
349 rc = -ENOMEM;
350 sccb = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
351 if (!sccb)
352 goto out;
353 assigned = 0;
354 for (id = 0; id <= sclp_max_storage_id; id++) {
355 memset(sccb, 0, PAGE_SIZE);
356 sccb->header.length = PAGE_SIZE;
357 rc = sclp_sync_request(SCLP_CMDW_READ_STORAGE_INFO | id << 8, sccb);
358 if (rc)
359 goto out;
360 switch (sccb->header.response_code) {
361 case 0x0010:
362 set_bit(id, sclp_storage_ids);
363 for (i = 0; i < sccb->assigned; i++) {
364 if (!sccb->entries[i])
365 continue;
366 assigned++;
367 insert_increment(sccb->entries[i] >> 16, 0, 1);
368 }
369 break;
370 case 0x0310:
371 break;
372 case 0x0410:
373 for (i = 0; i < sccb->assigned; i++) {
374 if (!sccb->entries[i])
375 continue;
376 assigned++;
377 insert_increment(sccb->entries[i] >> 16, 1, 1);
378 }
379 break;
380 default:
381 rc = -EIO;
382 break;
383 }
384 if (!rc)
385 sclp_max_storage_id = sccb->max_id;
386 }
387 if (rc || list_empty(&sclp_mem_list))
388 goto out;
389 for (i = 1; i <= sclp.rnmax - assigned; i++)
390 insert_increment(0, 1, 0);
391 rc = register_memory_notifier(&sclp_mem_nb);
392 if (rc)
393 goto out;
394 sclp_add_standby_memory();
395 out:
396 free_page((unsigned long)sccb);
397 return rc;
398 }
399 __initcall(sclp_detect_standby_memory);
400