xref: /linux/arch/s390/kernel/early.c (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 /*
2  *    Copyright IBM Corp. 2007, 2009
3  *    Author(s): Hongjie Yang <hongjie@us.ibm.com>,
4  *		 Heiko Carstens <heiko.carstens@de.ibm.com>
5  */
6 
7 #define KMSG_COMPONENT "setup"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9 
10 #include <linux/compiler.h>
11 #include <linux/init.h>
12 #include <linux/errno.h>
13 #include <linux/string.h>
14 #include <linux/ctype.h>
15 #include <linux/lockdep.h>
16 #include <linux/extable.h>
17 #include <linux/pfn.h>
18 #include <linux/uaccess.h>
19 #include <linux/kernel.h>
20 #include <asm/diag.h>
21 #include <asm/ebcdic.h>
22 #include <asm/ipl.h>
23 #include <asm/lowcore.h>
24 #include <asm/processor.h>
25 #include <asm/sections.h>
26 #include <asm/setup.h>
27 #include <asm/sysinfo.h>
28 #include <asm/cpcmd.h>
29 #include <asm/sclp.h>
30 #include <asm/facility.h>
31 #include "entry.h"
32 
33 /*
34  * Create a Kernel NSS if the SAVESYS= parameter is defined
35  */
36 #define DEFSYS_CMD_SIZE		128
37 #define SAVESYS_CMD_SIZE	32
38 
39 char kernel_nss_name[NSS_NAME_SIZE + 1];
40 
41 static void __init setup_boot_command_line(void);
42 
43 /*
44  * Get the TOD clock running.
45  */
46 static void __init reset_tod_clock(void)
47 {
48 	u64 time;
49 
50 	if (store_tod_clock(&time) == 0)
51 		return;
52 	/* TOD clock not running. Set the clock to Unix Epoch. */
53 	if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0)
54 		disabled_wait(0);
55 
56 	sched_clock_base_cc = TOD_UNIX_EPOCH;
57 	S390_lowcore.last_update_clock = sched_clock_base_cc;
58 }
59 
60 #ifdef CONFIG_SHARED_KERNEL
61 int __init savesys_ipl_nss(char *cmd, const int cmdlen);
62 
63 asm(
64 	"	.section .init.text,\"ax\",@progbits\n"
65 	"	.align	4\n"
66 	"	.type	savesys_ipl_nss, @function\n"
67 	"savesys_ipl_nss:\n"
68 	"	stmg	6,15,48(15)\n"
69 	"	lgr	14,3\n"
70 	"	sam31\n"
71 	"	diag	2,14,0x8\n"
72 	"	sam64\n"
73 	"	lgr	2,14\n"
74 	"	lmg	6,15,48(15)\n"
75 	"	br	14\n"
76 	"	.size	savesys_ipl_nss, .-savesys_ipl_nss\n"
77 	"	.previous\n");
78 
79 static __initdata char upper_command_line[COMMAND_LINE_SIZE];
80 
81 static noinline __init void create_kernel_nss(void)
82 {
83 	unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size;
84 #ifdef CONFIG_BLK_DEV_INITRD
85 	unsigned int sinitrd_pfn, einitrd_pfn;
86 #endif
87 	int response;
88 	int hlen;
89 	size_t len;
90 	char *savesys_ptr;
91 	char defsys_cmd[DEFSYS_CMD_SIZE];
92 	char savesys_cmd[SAVESYS_CMD_SIZE];
93 
94 	/* Do nothing if we are not running under VM */
95 	if (!MACHINE_IS_VM)
96 		return;
97 
98 	/* Convert COMMAND_LINE to upper case */
99 	for (i = 0; i < strlen(boot_command_line); i++)
100 		upper_command_line[i] = toupper(boot_command_line[i]);
101 
102 	savesys_ptr = strstr(upper_command_line, "SAVESYS=");
103 
104 	if (!savesys_ptr)
105 		return;
106 
107 	savesys_ptr += 8;    /* Point to the beginning of the NSS name */
108 	for (i = 0; i < NSS_NAME_SIZE; i++) {
109 		if (savesys_ptr[i] == ' ' || savesys_ptr[i] == '\0')
110 			break;
111 		kernel_nss_name[i] = savesys_ptr[i];
112 	}
113 
114 	stext_pfn = PFN_DOWN(__pa(&_stext));
115 	eshared_pfn = PFN_DOWN(__pa(&_eshared));
116 	end_pfn = PFN_UP(__pa(&_end));
117 	min_size = end_pfn << 2;
118 
119 	hlen = snprintf(defsys_cmd, DEFSYS_CMD_SIZE,
120 			"DEFSYS %s 00000-%.5X EW %.5X-%.5X SR %.5X-%.5X",
121 			kernel_nss_name, stext_pfn - 1, stext_pfn,
122 			eshared_pfn - 1, eshared_pfn, end_pfn);
123 
124 #ifdef CONFIG_BLK_DEV_INITRD
125 	if (INITRD_START && INITRD_SIZE) {
126 		sinitrd_pfn = PFN_DOWN(__pa(INITRD_START));
127 		einitrd_pfn = PFN_UP(__pa(INITRD_START + INITRD_SIZE));
128 		min_size = einitrd_pfn << 2;
129 		hlen += snprintf(defsys_cmd + hlen, DEFSYS_CMD_SIZE - hlen,
130 				 " EW %.5X-%.5X", sinitrd_pfn, einitrd_pfn);
131 	}
132 #endif
133 
134 	snprintf(defsys_cmd + hlen, DEFSYS_CMD_SIZE - hlen,
135 		 " EW MINSIZE=%.7iK PARMREGS=0-13", min_size);
136 	defsys_cmd[DEFSYS_CMD_SIZE - 1] = '\0';
137 	snprintf(savesys_cmd, SAVESYS_CMD_SIZE, "SAVESYS %s \n IPL %s",
138 		 kernel_nss_name, kernel_nss_name);
139 	savesys_cmd[SAVESYS_CMD_SIZE - 1] = '\0';
140 
141 	__cpcmd(defsys_cmd, NULL, 0, &response);
142 
143 	if (response != 0) {
144 		pr_err("Defining the Linux kernel NSS failed with rc=%d\n",
145 			response);
146 		kernel_nss_name[0] = '\0';
147 		return;
148 	}
149 
150 	len = strlen(savesys_cmd);
151 	ASCEBC(savesys_cmd, len);
152 	response = savesys_ipl_nss(savesys_cmd, len);
153 
154 	/* On success: response is equal to the command size,
155 	 *	       max SAVESYS_CMD_SIZE
156 	 * On error: response contains the numeric portion of cp error message.
157 	 *	     for SAVESYS it will be >= 263
158 	 *	     for missing privilege class, it will be 1
159 	 */
160 	if (response > SAVESYS_CMD_SIZE || response == 1) {
161 		pr_err("Saving the Linux kernel NSS failed with rc=%d\n",
162 			response);
163 		kernel_nss_name[0] = '\0';
164 		return;
165 	}
166 
167 	/* re-initialize cputime accounting. */
168 	sched_clock_base_cc = get_tod_clock();
169 	S390_lowcore.last_update_clock = sched_clock_base_cc;
170 	S390_lowcore.last_update_timer = 0x7fffffffffffffffULL;
171 	S390_lowcore.user_timer = 0;
172 	S390_lowcore.system_timer = 0;
173 	asm volatile("SPT 0(%0)" : : "a" (&S390_lowcore.last_update_timer));
174 
175 	/* re-setup boot command line with new ipl vm parms */
176 	ipl_update_parameters();
177 	setup_boot_command_line();
178 
179 	ipl_flags = IPL_NSS_VALID;
180 }
181 
182 #else /* CONFIG_SHARED_KERNEL */
183 
184 static inline void create_kernel_nss(void) { }
185 
186 #endif /* CONFIG_SHARED_KERNEL */
187 
188 /*
189  * Clear bss memory
190  */
191 static noinline __init void clear_bss_section(void)
192 {
193 	memset(__bss_start, 0, __bss_stop - __bss_start);
194 }
195 
196 /*
197  * Initialize storage key for kernel pages
198  */
199 static noinline __init void init_kernel_storage_key(void)
200 {
201 #if PAGE_DEFAULT_KEY
202 	unsigned long end_pfn, init_pfn;
203 
204 	end_pfn = PFN_UP(__pa(&_end));
205 
206 	for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
207 		page_set_storage_key(init_pfn << PAGE_SHIFT,
208 				     PAGE_DEFAULT_KEY, 0);
209 #endif
210 }
211 
212 static __initdata char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE);
213 
214 static noinline __init void detect_machine_type(void)
215 {
216 	struct sysinfo_3_2_2 *vmms = (struct sysinfo_3_2_2 *)&sysinfo_page;
217 
218 	/* Check current-configuration-level */
219 	if (stsi(NULL, 0, 0, 0) <= 2) {
220 		S390_lowcore.machine_flags |= MACHINE_FLAG_LPAR;
221 		return;
222 	}
223 	/* Get virtual-machine cpu information. */
224 	if (stsi(vmms, 3, 2, 2) || !vmms->count)
225 		return;
226 
227 	/* Running under KVM? If not we assume z/VM */
228 	if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
229 		S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
230 	else
231 		S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
232 }
233 
234 static noinline __init void setup_arch_string(void)
235 {
236 	struct sysinfo_1_1_1 *mach = (struct sysinfo_1_1_1 *)&sysinfo_page;
237 
238 	if (stsi(mach, 1, 1, 1))
239 		return;
240 	EBCASC(mach->manufacturer, sizeof(mach->manufacturer));
241 	EBCASC(mach->type, sizeof(mach->type));
242 	EBCASC(mach->model, sizeof(mach->model));
243 	EBCASC(mach->model_capacity, sizeof(mach->model_capacity));
244 	dump_stack_set_arch_desc("%-16.16s %-4.4s %-16.16s %-16.16s (%s)",
245 				 mach->manufacturer,
246 				 mach->type,
247 				 mach->model,
248 				 mach->model_capacity,
249 				 MACHINE_IS_LPAR ? "LPAR" :
250 				 MACHINE_IS_VM ? "z/VM" :
251 				 MACHINE_IS_KVM ? "KVM" : "unknown");
252 }
253 
254 static __init void setup_topology(void)
255 {
256 	int max_mnest;
257 
258 	if (!test_facility(11))
259 		return;
260 	S390_lowcore.machine_flags |= MACHINE_FLAG_TOPOLOGY;
261 	for (max_mnest = 6; max_mnest > 1; max_mnest--) {
262 		if (stsi(&sysinfo_page, 15, 1, max_mnest) == 0)
263 			break;
264 	}
265 	topology_max_mnest = max_mnest;
266 }
267 
268 static void early_pgm_check_handler(void)
269 {
270 	const struct exception_table_entry *fixup;
271 	unsigned long cr0, cr0_new;
272 	unsigned long addr;
273 
274 	addr = S390_lowcore.program_old_psw.addr;
275 	fixup = search_exception_tables(addr);
276 	if (!fixup)
277 		disabled_wait(0);
278 	/* Disable low address protection before storing into lowcore. */
279 	__ctl_store(cr0, 0, 0);
280 	cr0_new = cr0 & ~(1UL << 28);
281 	__ctl_load(cr0_new, 0, 0);
282 	S390_lowcore.program_old_psw.addr = extable_fixup(fixup);
283 	__ctl_load(cr0, 0, 0);
284 }
285 
286 static noinline __init void setup_lowcore_early(void)
287 {
288 	psw_t psw;
289 
290 	psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA;
291 	psw.addr = (unsigned long) s390_base_ext_handler;
292 	S390_lowcore.external_new_psw = psw;
293 	psw.addr = (unsigned long) s390_base_pgm_handler;
294 	S390_lowcore.program_new_psw = psw;
295 	s390_base_pgm_handler_fn = early_pgm_check_handler;
296 	S390_lowcore.preempt_count = INIT_PREEMPT_COUNT;
297 }
298 
299 static noinline __init void setup_facility_list(void)
300 {
301 	stfle(S390_lowcore.stfle_fac_list,
302 	      ARRAY_SIZE(S390_lowcore.stfle_fac_list));
303 }
304 
305 static __init void detect_diag9c(void)
306 {
307 	unsigned int cpu_address;
308 	int rc;
309 
310 	cpu_address = stap();
311 	diag_stat_inc(DIAG_STAT_X09C);
312 	asm volatile(
313 		"	diag	%2,0,0x9c\n"
314 		"0:	la	%0,0\n"
315 		"1:\n"
316 		EX_TABLE(0b,1b)
317 		: "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc");
318 	if (!rc)
319 		S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG9C;
320 }
321 
322 static __init void detect_diag44(void)
323 {
324 	int rc;
325 
326 	diag_stat_inc(DIAG_STAT_X044);
327 	asm volatile(
328 		"	diag	0,0,0x44\n"
329 		"0:	la	%0,0\n"
330 		"1:\n"
331 		EX_TABLE(0b,1b)
332 		: "=d" (rc) : "0" (-EOPNOTSUPP) : "cc");
333 	if (!rc)
334 		S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44;
335 }
336 
337 static __init void detect_machine_facilities(void)
338 {
339 	if (test_facility(8)) {
340 		S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1;
341 		__ctl_set_bit(0, 23);
342 	}
343 	if (test_facility(78))
344 		S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT2;
345 	if (test_facility(3))
346 		S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
347 	if (test_facility(40))
348 		S390_lowcore.machine_flags |= MACHINE_FLAG_LPP;
349 	if (test_facility(50) && test_facility(73))
350 		S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
351 	if (test_facility(51))
352 		S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
353 	if (test_facility(129)) {
354 		S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
355 		__ctl_set_bit(0, 17);
356 	}
357 	if (test_facility(130)) {
358 		S390_lowcore.machine_flags |= MACHINE_FLAG_NX;
359 		__ctl_set_bit(0, 20);
360 	}
361 }
362 
363 static inline void save_vector_registers(void)
364 {
365 #ifdef CONFIG_CRASH_DUMP
366 	if (test_facility(129))
367 		save_vx_regs(boot_cpu_vector_save_area);
368 #endif
369 }
370 
371 static int __init topology_setup(char *str)
372 {
373 	bool enabled;
374 	int rc;
375 
376 	rc = kstrtobool(str, &enabled);
377 	if (!rc && !enabled)
378 		S390_lowcore.machine_flags &= ~MACHINE_HAS_TOPOLOGY;
379 	return rc;
380 }
381 early_param("topology", topology_setup);
382 
383 static int __init disable_vector_extension(char *str)
384 {
385 	S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
386 	__ctl_clear_bit(0, 17);
387 	return 1;
388 }
389 early_param("novx", disable_vector_extension);
390 
391 static int __init noexec_setup(char *str)
392 {
393 	bool enabled;
394 	int rc;
395 
396 	rc = kstrtobool(str, &enabled);
397 	if (!rc && !enabled) {
398 		/* Disable no-execute support */
399 		S390_lowcore.machine_flags &= ~MACHINE_FLAG_NX;
400 		__ctl_clear_bit(0, 20);
401 	}
402 	return rc;
403 }
404 early_param("noexec", noexec_setup);
405 
406 static int __init cad_setup(char *str)
407 {
408 	int val;
409 
410 	get_option(&str, &val);
411 	if (val && test_facility(128))
412 		S390_lowcore.machine_flags |= MACHINE_FLAG_CAD;
413 	return 0;
414 }
415 early_param("cad", cad_setup);
416 
417 static int __init cad_init(void)
418 {
419 	if (MACHINE_HAS_CAD)
420 		/* Enable problem state CAD. */
421 		__ctl_set_bit(2, 3);
422 	return 0;
423 }
424 early_initcall(cad_init);
425 
426 static __init void memmove_early(void *dst, const void *src, size_t n)
427 {
428 	unsigned long addr;
429 	long incr;
430 	psw_t old;
431 
432 	if (!n)
433 		return;
434 	incr = 1;
435 	if (dst > src) {
436 		incr = -incr;
437 		dst += n - 1;
438 		src += n - 1;
439 	}
440 	old = S390_lowcore.program_new_psw;
441 	S390_lowcore.program_new_psw.mask = __extract_psw();
442 	asm volatile(
443 		"	larl	%[addr],1f\n"
444 		"	stg	%[addr],%[psw_pgm_addr]\n"
445 		"0:     mvc	0(1,%[dst]),0(%[src])\n"
446 		"	agr	%[dst],%[incr]\n"
447 		"	agr	%[src],%[incr]\n"
448 		"	brctg	%[n],0b\n"
449 		"1:\n"
450 		: [addr] "=&d" (addr),
451 		  [psw_pgm_addr] "=Q" (S390_lowcore.program_new_psw.addr),
452 		  [dst] "+&a" (dst), [src] "+&a" (src),  [n] "+d" (n)
453 		: [incr] "d" (incr)
454 		: "cc", "memory");
455 	S390_lowcore.program_new_psw = old;
456 }
457 
458 static __init noinline void ipl_save_parameters(void)
459 {
460 	void *src, *dst;
461 
462 	src = (void *)(unsigned long) S390_lowcore.ipl_parmblock_ptr;
463 	dst = (void *) IPL_PARMBLOCK_ORIGIN;
464 	memmove_early(dst, src, PAGE_SIZE);
465 	S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN;
466 }
467 
468 static __init noinline void rescue_initrd(void)
469 {
470 #ifdef CONFIG_BLK_DEV_INITRD
471 	unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20);
472 	/*
473 	 * Just like in case of IPL from VM reader we make sure there is a
474 	 * gap of 4MB between end of kernel and start of initrd.
475 	 * That way we can also be sure that saving an NSS will succeed,
476 	 * which however only requires different segments.
477 	 */
478 	if (!INITRD_START || !INITRD_SIZE)
479 		return;
480 	if (INITRD_START >= min_initrd_addr)
481 		return;
482 	memmove_early((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE);
483 	INITRD_START = min_initrd_addr;
484 #endif
485 }
486 
487 /* Set up boot command line */
488 static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t))
489 {
490 	char *parm, *delim;
491 	size_t rc, len;
492 
493 	len = strlen(boot_command_line);
494 
495 	delim = boot_command_line + len;	/* '\0' character position */
496 	parm  = boot_command_line + len + 1;	/* append right after '\0' */
497 
498 	rc = ipl_data(parm, COMMAND_LINE_SIZE - len - 1);
499 	if (rc) {
500 		if (*parm == '=')
501 			memmove(boot_command_line, parm + 1, rc);
502 		else
503 			*delim = ' ';		/* replace '\0' with space */
504 	}
505 }
506 
507 static inline int has_ebcdic_char(const char *str)
508 {
509 	int i;
510 
511 	for (i = 0; str[i]; i++)
512 		if (str[i] & 0x80)
513 			return 1;
514 	return 0;
515 }
516 
517 static void __init setup_boot_command_line(void)
518 {
519 	COMMAND_LINE[ARCH_COMMAND_LINE_SIZE - 1] = 0;
520 	/* convert arch command line to ascii if necessary */
521 	if (has_ebcdic_char(COMMAND_LINE))
522 		EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
523 	/* copy arch command line */
524 	strlcpy(boot_command_line, strstrip(COMMAND_LINE),
525 		ARCH_COMMAND_LINE_SIZE);
526 
527 	/* append IPL PARM data to the boot command line */
528 	if (MACHINE_IS_VM)
529 		append_to_cmdline(append_ipl_vmparm);
530 
531 	append_to_cmdline(append_ipl_scpdata);
532 }
533 
534 /*
535  * Save ipl parameters, clear bss memory, initialize storage keys
536  * and create a kernel NSS at startup if the SAVESYS= parm is defined
537  */
538 void __init startup_init(void)
539 {
540 	reset_tod_clock();
541 	ipl_save_parameters();
542 	rescue_initrd();
543 	clear_bss_section();
544 	ipl_verify_parameters();
545 	time_early_init();
546 	init_kernel_storage_key();
547 	lockdep_off();
548 	setup_lowcore_early();
549 	setup_facility_list();
550 	detect_machine_type();
551 	setup_arch_string();
552 	ipl_update_parameters();
553 	setup_boot_command_line();
554 	create_kernel_nss();
555 	detect_diag9c();
556 	detect_diag44();
557 	detect_machine_facilities();
558 	save_vector_registers();
559 	setup_topology();
560 	sclp_early_detect();
561 	lockdep_on();
562 }
563