xref: /linux/drivers/s390/char/sclp_cmd.c (revision e6a901a00822659181c93c86d8bbc2a17779fddc)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright IBM Corp. 2007,2012
4  *
5  * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6  */
7 
8 #define KMSG_COMPONENT "sclp_cmd"
9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10 
11 #include <linux/completion.h>
12 #include <linux/init.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/export.h>
16 #include <linux/slab.h>
17 #include <linux/string.h>
18 #include <linux/mm.h>
19 #include <linux/mmzone.h>
20 #include <linux/memory.h>
21 #include <linux/memory_hotplug.h>
22 #include <linux/module.h>
23 #include <asm/ctlreg.h>
24 #include <asm/chpid.h>
25 #include <asm/setup.h>
26 #include <asm/page.h>
27 #include <asm/sclp.h>
28 #include <asm/numa.h>
29 #include <asm/facility.h>
30 #include <asm/page-states.h>
31 
32 #include "sclp.h"
33 
34 static void sclp_sync_callback(struct sclp_req *req, void *data)
35 {
36 	struct completion *completion = data;
37 
38 	complete(completion);
39 }
40 
41 int sclp_sync_request(sclp_cmdw_t cmd, void *sccb)
42 {
43 	return sclp_sync_request_timeout(cmd, sccb, 0);
44 }
45 
46 int sclp_sync_request_timeout(sclp_cmdw_t cmd, void *sccb, int timeout)
47 {
48 	struct completion completion;
49 	struct sclp_req *request;
50 	int rc;
51 
52 	request = kzalloc(sizeof(*request), GFP_KERNEL);
53 	if (!request)
54 		return -ENOMEM;
55 	if (timeout)
56 		request->queue_timeout = timeout;
57 	request->command = cmd;
58 	request->sccb = sccb;
59 	request->status = SCLP_REQ_FILLED;
60 	request->callback = sclp_sync_callback;
61 	request->callback_data = &completion;
62 	init_completion(&completion);
63 
64 	/* Perform sclp request. */
65 	rc = sclp_add_request(request);
66 	if (rc)
67 		goto out;
68 	wait_for_completion(&completion);
69 
70 	/* Check response. */
71 	if (request->status != SCLP_REQ_DONE) {
72 		pr_warn("sync request failed (cmd=0x%08x, status=0x%02x)\n",
73 			cmd, request->status);
74 		rc = -EIO;
75 	}
76 out:
77 	kfree(request);
78 	return rc;
79 }
80 
81 /*
82  * CPU configuration related functions.
83  */
84 
85 #define SCLP_CMDW_CONFIGURE_CPU		0x00110001
86 #define SCLP_CMDW_DECONFIGURE_CPU	0x00100001
87 
88 int _sclp_get_core_info(struct sclp_core_info *info)
89 {
90 	int rc;
91 	int length = test_facility(140) ? EXT_SCCB_READ_CPU : PAGE_SIZE;
92 	struct read_cpu_info_sccb *sccb;
93 
94 	if (!SCLP_HAS_CPU_INFO)
95 		return -EOPNOTSUPP;
96 
97 	sccb = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA | __GFP_ZERO, get_order(length));
98 	if (!sccb)
99 		return -ENOMEM;
100 	sccb->header.length = length;
101 	sccb->header.control_mask[2] = 0x80;
102 	rc = sclp_sync_request_timeout(SCLP_CMDW_READ_CPU_INFO, sccb,
103 				       SCLP_QUEUE_INTERVAL);
104 	if (rc)
105 		goto out;
106 	if (sccb->header.response_code != 0x0010) {
107 		pr_warn("readcpuinfo failed (response=0x%04x)\n",
108 			sccb->header.response_code);
109 		rc = -EIO;
110 		goto out;
111 	}
112 	sclp_fill_core_info(info, sccb);
113 out:
114 	free_pages((unsigned long) sccb, get_order(length));
115 	return rc;
116 }
117 
118 struct cpu_configure_sccb {
119 	struct sccb_header header;
120 } __attribute__((packed, aligned(8)));
121 
122 static int do_core_configure(sclp_cmdw_t cmd)
123 {
124 	struct cpu_configure_sccb *sccb;
125 	int rc;
126 
127 	if (!SCLP_HAS_CPU_RECONFIG)
128 		return -EOPNOTSUPP;
129 	/*
130 	 * This is not going to cross a page boundary since we force
131 	 * kmalloc to have a minimum alignment of 8 bytes on s390.
132 	 */
133 	sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA);
134 	if (!sccb)
135 		return -ENOMEM;
136 	sccb->header.length = sizeof(*sccb);
137 	rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
138 	if (rc)
139 		goto out;
140 	switch (sccb->header.response_code) {
141 	case 0x0020:
142 	case 0x0120:
143 		break;
144 	default:
145 		pr_warn("configure cpu failed (cmd=0x%08x, response=0x%04x)\n",
146 			cmd, sccb->header.response_code);
147 		rc = -EIO;
148 		break;
149 	}
150 out:
151 	kfree(sccb);
152 	return rc;
153 }
154 
155 int sclp_core_configure(u8 core)
156 {
157 	return do_core_configure(SCLP_CMDW_CONFIGURE_CPU | core << 8);
158 }
159 
160 int sclp_core_deconfigure(u8 core)
161 {
162 	return do_core_configure(SCLP_CMDW_DECONFIGURE_CPU | core << 8);
163 }
164 
165 #ifdef CONFIG_MEMORY_HOTPLUG
166 
167 static DEFINE_MUTEX(sclp_mem_mutex);
168 static LIST_HEAD(sclp_mem_list);
169 static u8 sclp_max_storage_id;
170 static DECLARE_BITMAP(sclp_storage_ids, 256);
171 
172 struct memory_increment {
173 	struct list_head list;
174 	u16 rn;
175 	int standby;
176 };
177 
178 struct assign_storage_sccb {
179 	struct sccb_header header;
180 	u16 rn;
181 } __packed;
182 
183 int arch_get_memory_phys_device(unsigned long start_pfn)
184 {
185 	if (!sclp.rzm)
186 		return 0;
187 	return PFN_PHYS(start_pfn) >> ilog2(sclp.rzm);
188 }
189 
190 static unsigned long long rn2addr(u16 rn)
191 {
192 	return (unsigned long long) (rn - 1) * sclp.rzm;
193 }
194 
195 static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
196 {
197 	struct assign_storage_sccb *sccb;
198 	int rc;
199 
200 	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
201 	if (!sccb)
202 		return -ENOMEM;
203 	sccb->header.length = PAGE_SIZE;
204 	sccb->rn = rn;
205 	rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
206 	if (rc)
207 		goto out;
208 	switch (sccb->header.response_code) {
209 	case 0x0020:
210 	case 0x0120:
211 		break;
212 	default:
213 		pr_warn("assign storage failed (cmd=0x%08x, response=0x%04x, rn=0x%04x)\n",
214 			cmd, sccb->header.response_code, rn);
215 		rc = -EIO;
216 		break;
217 	}
218 out:
219 	free_page((unsigned long) sccb);
220 	return rc;
221 }
222 
223 static int sclp_assign_storage(u16 rn)
224 {
225 	unsigned long long start;
226 	int rc;
227 
228 	rc = do_assign_storage(0x000d0001, rn);
229 	if (rc)
230 		return rc;
231 	start = rn2addr(rn);
232 	storage_key_init_range(start, start + sclp.rzm);
233 	return 0;
234 }
235 
236 static int sclp_unassign_storage(u16 rn)
237 {
238 	return do_assign_storage(0x000c0001, rn);
239 }
240 
241 struct attach_storage_sccb {
242 	struct sccb_header header;
243 	u16 :16;
244 	u16 assigned;
245 	u32 :32;
246 	u32 entries[];
247 } __packed;
248 
249 static int sclp_attach_storage(u8 id)
250 {
251 	struct attach_storage_sccb *sccb;
252 	int rc;
253 	int i;
254 
255 	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
256 	if (!sccb)
257 		return -ENOMEM;
258 	sccb->header.length = PAGE_SIZE;
259 	sccb->header.function_code = 0x40;
260 	rc = sclp_sync_request_timeout(0x00080001 | id << 8, sccb,
261 				       SCLP_QUEUE_INTERVAL);
262 	if (rc)
263 		goto out;
264 	switch (sccb->header.response_code) {
265 	case 0x0020:
266 		set_bit(id, sclp_storage_ids);
267 		for (i = 0; i < sccb->assigned; i++) {
268 			if (sccb->entries[i])
269 				sclp_unassign_storage(sccb->entries[i] >> 16);
270 		}
271 		break;
272 	default:
273 		rc = -EIO;
274 		break;
275 	}
276 out:
277 	free_page((unsigned long) sccb);
278 	return rc;
279 }
280 
281 static int sclp_mem_change_state(unsigned long start, unsigned long size,
282 				 int online)
283 {
284 	struct memory_increment *incr;
285 	unsigned long long istart;
286 	int rc = 0;
287 
288 	list_for_each_entry(incr, &sclp_mem_list, list) {
289 		istart = rn2addr(incr->rn);
290 		if (start + size - 1 < istart)
291 			break;
292 		if (start > istart + sclp.rzm - 1)
293 			continue;
294 		if (online)
295 			rc |= sclp_assign_storage(incr->rn);
296 		else
297 			sclp_unassign_storage(incr->rn);
298 		if (rc == 0)
299 			incr->standby = online ? 0 : 1;
300 	}
301 	return rc ? -EIO : 0;
302 }
303 
304 static bool contains_standby_increment(unsigned long start, unsigned long end)
305 {
306 	struct memory_increment *incr;
307 	unsigned long istart;
308 
309 	list_for_each_entry(incr, &sclp_mem_list, list) {
310 		istart = rn2addr(incr->rn);
311 		if (end - 1 < istart)
312 			continue;
313 		if (start > istart + sclp.rzm - 1)
314 			continue;
315 		if (incr->standby)
316 			return true;
317 	}
318 	return false;
319 }
320 
321 static int sclp_mem_notifier(struct notifier_block *nb,
322 			     unsigned long action, void *data)
323 {
324 	unsigned long start, size;
325 	struct memory_notify *arg;
326 	unsigned char id;
327 	int rc = 0;
328 
329 	arg = data;
330 	start = arg->start_pfn << PAGE_SHIFT;
331 	size = arg->nr_pages << PAGE_SHIFT;
332 	mutex_lock(&sclp_mem_mutex);
333 	for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1)
334 		sclp_attach_storage(id);
335 	switch (action) {
336 	case MEM_GOING_OFFLINE:
337 		/*
338 		 * We do not allow to set memory blocks offline that contain
339 		 * standby memory. This is done to simplify the "memory online"
340 		 * case.
341 		 */
342 		if (contains_standby_increment(start, start + size))
343 			rc = -EPERM;
344 		break;
345 	case MEM_PREPARE_ONLINE:
346 		/*
347 		 * Access the altmap_start_pfn and altmap_nr_pages fields
348 		 * within the struct memory_notify specifically when dealing
349 		 * with only MEM_PREPARE_ONLINE/MEM_FINISH_OFFLINE notifiers.
350 		 *
351 		 * When altmap is in use, take the specified memory range
352 		 * online, which includes the altmap.
353 		 */
354 		if (arg->altmap_nr_pages) {
355 			start = PFN_PHYS(arg->altmap_start_pfn);
356 			size += PFN_PHYS(arg->altmap_nr_pages);
357 		}
358 		rc = sclp_mem_change_state(start, size, 1);
359 		if (rc || !arg->altmap_nr_pages)
360 			break;
361 		/*
362 		 * Set CMMA state to nodat here, since the struct page memory
363 		 * at the beginning of the memory block will not go through the
364 		 * buddy allocator later.
365 		 */
366 		__arch_set_page_nodat((void *)__va(start), arg->altmap_nr_pages);
367 		break;
368 	case MEM_FINISH_OFFLINE:
369 		/*
370 		 * When altmap is in use, take the specified memory range
371 		 * offline, which includes the altmap.
372 		 */
373 		if (arg->altmap_nr_pages) {
374 			start = PFN_PHYS(arg->altmap_start_pfn);
375 			size += PFN_PHYS(arg->altmap_nr_pages);
376 		}
377 		sclp_mem_change_state(start, size, 0);
378 		break;
379 	default:
380 		break;
381 	}
382 	mutex_unlock(&sclp_mem_mutex);
383 	return rc ? NOTIFY_BAD : NOTIFY_OK;
384 }
385 
386 static struct notifier_block sclp_mem_nb = {
387 	.notifier_call = sclp_mem_notifier,
388 };
389 
390 static void __init align_to_block_size(unsigned long long *start,
391 				       unsigned long long *size,
392 				       unsigned long long alignment)
393 {
394 	unsigned long long start_align, size_align;
395 
396 	start_align = roundup(*start, alignment);
397 	size_align = rounddown(*start + *size, alignment) - start_align;
398 
399 	pr_info("Standby memory at 0x%llx (%lluM of %lluM usable)\n",
400 		*start, size_align >> 20, *size >> 20);
401 	*start = start_align;
402 	*size = size_align;
403 }
404 
405 static void __init add_memory_merged(u16 rn)
406 {
407 	unsigned long long start, size, addr, block_size;
408 	static u16 first_rn, num;
409 
410 	if (rn && first_rn && (first_rn + num == rn)) {
411 		num++;
412 		return;
413 	}
414 	if (!first_rn)
415 		goto skip_add;
416 	start = rn2addr(first_rn);
417 	size = (unsigned long long) num * sclp.rzm;
418 	if (start >= ident_map_size)
419 		goto skip_add;
420 	if (start + size > ident_map_size)
421 		size = ident_map_size - start;
422 	block_size = memory_block_size_bytes();
423 	align_to_block_size(&start, &size, block_size);
424 	if (!size)
425 		goto skip_add;
426 	for (addr = start; addr < start + size; addr += block_size)
427 		add_memory(0, addr, block_size,
428 			   MACHINE_HAS_EDAT1 ?
429 			   MHP_MEMMAP_ON_MEMORY | MHP_OFFLINE_INACCESSIBLE : MHP_NONE);
430 skip_add:
431 	first_rn = rn;
432 	num = 1;
433 }
434 
435 static void __init sclp_add_standby_memory(void)
436 {
437 	struct memory_increment *incr;
438 
439 	list_for_each_entry(incr, &sclp_mem_list, list)
440 		if (incr->standby)
441 			add_memory_merged(incr->rn);
442 	add_memory_merged(0);
443 }
444 
445 static void __init insert_increment(u16 rn, int standby, int assigned)
446 {
447 	struct memory_increment *incr, *new_incr;
448 	struct list_head *prev;
449 	u16 last_rn;
450 
451 	new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
452 	if (!new_incr)
453 		return;
454 	new_incr->rn = rn;
455 	new_incr->standby = standby;
456 	last_rn = 0;
457 	prev = &sclp_mem_list;
458 	list_for_each_entry(incr, &sclp_mem_list, list) {
459 		if (assigned && incr->rn > rn)
460 			break;
461 		if (!assigned && incr->rn - last_rn > 1)
462 			break;
463 		last_rn = incr->rn;
464 		prev = &incr->list;
465 	}
466 	if (!assigned)
467 		new_incr->rn = last_rn + 1;
468 	if (new_incr->rn > sclp.rnmax) {
469 		kfree(new_incr);
470 		return;
471 	}
472 	list_add(&new_incr->list, prev);
473 }
474 
475 static int __init sclp_detect_standby_memory(void)
476 {
477 	struct read_storage_sccb *sccb;
478 	int i, id, assigned, rc;
479 
480 	if (oldmem_data.start) /* No standby memory in kdump mode */
481 		return 0;
482 	if ((sclp.facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
483 		return 0;
484 	rc = -ENOMEM;
485 	sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
486 	if (!sccb)
487 		goto out;
488 	assigned = 0;
489 	for (id = 0; id <= sclp_max_storage_id; id++) {
490 		memset(sccb, 0, PAGE_SIZE);
491 		sccb->header.length = PAGE_SIZE;
492 		rc = sclp_sync_request(SCLP_CMDW_READ_STORAGE_INFO | id << 8, sccb);
493 		if (rc)
494 			goto out;
495 		switch (sccb->header.response_code) {
496 		case 0x0010:
497 			set_bit(id, sclp_storage_ids);
498 			for (i = 0; i < sccb->assigned; i++) {
499 				if (!sccb->entries[i])
500 					continue;
501 				assigned++;
502 				insert_increment(sccb->entries[i] >> 16, 0, 1);
503 			}
504 			break;
505 		case 0x0310:
506 			break;
507 		case 0x0410:
508 			for (i = 0; i < sccb->assigned; i++) {
509 				if (!sccb->entries[i])
510 					continue;
511 				assigned++;
512 				insert_increment(sccb->entries[i] >> 16, 1, 1);
513 			}
514 			break;
515 		default:
516 			rc = -EIO;
517 			break;
518 		}
519 		if (!rc)
520 			sclp_max_storage_id = sccb->max_id;
521 	}
522 	if (rc || list_empty(&sclp_mem_list))
523 		goto out;
524 	for (i = 1; i <= sclp.rnmax - assigned; i++)
525 		insert_increment(0, 1, 0);
526 	rc = register_memory_notifier(&sclp_mem_nb);
527 	if (rc)
528 		goto out;
529 	sclp_add_standby_memory();
530 out:
531 	free_page((unsigned long) sccb);
532 	return rc;
533 }
534 __initcall(sclp_detect_standby_memory);
535 
536 #endif /* CONFIG_MEMORY_HOTPLUG */
537 
538 /*
539  * Channel path configuration related functions.
540  */
541 
542 #define SCLP_CMDW_CONFIGURE_CHPATH		0x000f0001
543 #define SCLP_CMDW_DECONFIGURE_CHPATH		0x000e0001
544 #define SCLP_CMDW_READ_CHPATH_INFORMATION	0x00030001
545 
546 struct chp_cfg_sccb {
547 	struct sccb_header header;
548 	u8 ccm;
549 	u8 reserved[6];
550 	u8 cssid;
551 } __attribute__((packed));
552 
553 static int do_chp_configure(sclp_cmdw_t cmd)
554 {
555 	struct chp_cfg_sccb *sccb;
556 	int rc;
557 
558 	if (!SCLP_HAS_CHP_RECONFIG)
559 		return -EOPNOTSUPP;
560 	/* Prepare sccb. */
561 	sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
562 	if (!sccb)
563 		return -ENOMEM;
564 	sccb->header.length = sizeof(*sccb);
565 	rc = sclp_sync_request(cmd, sccb);
566 	if (rc)
567 		goto out;
568 	switch (sccb->header.response_code) {
569 	case 0x0020:
570 	case 0x0120:
571 	case 0x0440:
572 	case 0x0450:
573 		break;
574 	default:
575 		pr_warn("configure channel-path failed (cmd=0x%08x, response=0x%04x)\n",
576 			cmd, sccb->header.response_code);
577 		rc = -EIO;
578 		break;
579 	}
580 out:
581 	free_page((unsigned long) sccb);
582 	return rc;
583 }
584 
585 /**
586  * sclp_chp_configure - perform configure channel-path sclp command
587  * @chpid: channel-path ID
588  *
589  * Perform configure channel-path command sclp command for specified chpid.
590  * Return 0 after command successfully finished, non-zero otherwise.
591  */
592 int sclp_chp_configure(struct chp_id chpid)
593 {
594 	return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8);
595 }
596 
597 /**
598  * sclp_chp_deconfigure - perform deconfigure channel-path sclp command
599  * @chpid: channel-path ID
600  *
601  * Perform deconfigure channel-path command sclp command for specified chpid
602  * and wait for completion. On success return 0. Return non-zero otherwise.
603  */
604 int sclp_chp_deconfigure(struct chp_id chpid)
605 {
606 	return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
607 }
608 
609 struct chp_info_sccb {
610 	struct sccb_header header;
611 	u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
612 	u8 standby[SCLP_CHP_INFO_MASK_SIZE];
613 	u8 configured[SCLP_CHP_INFO_MASK_SIZE];
614 	u8 ccm;
615 	u8 reserved[6];
616 	u8 cssid;
617 } __attribute__((packed));
618 
619 /**
620  * sclp_chp_read_info - perform read channel-path information sclp command
621  * @info: resulting channel-path information data
622  *
623  * Perform read channel-path information sclp command and wait for completion.
624  * On success, store channel-path information in @info and return 0. Return
625  * non-zero otherwise.
626  */
627 int sclp_chp_read_info(struct sclp_chp_info *info)
628 {
629 	struct chp_info_sccb *sccb;
630 	int rc;
631 
632 	if (!SCLP_HAS_CHP_INFO)
633 		return -EOPNOTSUPP;
634 	/* Prepare sccb. */
635 	sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
636 	if (!sccb)
637 		return -ENOMEM;
638 	sccb->header.length = sizeof(*sccb);
639 	rc = sclp_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb);
640 	if (rc)
641 		goto out;
642 	if (sccb->header.response_code != 0x0010) {
643 		pr_warn("read channel-path info failed (response=0x%04x)\n",
644 			sccb->header.response_code);
645 		rc = -EIO;
646 		goto out;
647 	}
648 	memcpy(info->recognized, sccb->recognized, SCLP_CHP_INFO_MASK_SIZE);
649 	memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE);
650 	memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE);
651 out:
652 	free_page((unsigned long) sccb);
653 	return rc;
654 }
655