xref: /linux/arch/s390/kernel/early.c (revision 02680c23d7b3febe45ea3d4f9818c2b2dc89020a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *    Copyright IBM Corp. 2007, 2009
4  *    Author(s): Hongjie Yang <hongjie@us.ibm.com>,
5  *		 Heiko Carstens <heiko.carstens@de.ibm.com>
6  */
7 
8 #define KMSG_COMPONENT "setup"
9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10 
11 #include <linux/compiler.h>
12 #include <linux/init.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/ctype.h>
16 #include <linux/lockdep.h>
17 #include <linux/extable.h>
18 #include <linux/pfn.h>
19 #include <linux/uaccess.h>
20 #include <linux/kernel.h>
21 #include <asm/diag.h>
22 #include <asm/ebcdic.h>
23 #include <asm/ipl.h>
24 #include <asm/lowcore.h>
25 #include <asm/processor.h>
26 #include <asm/sections.h>
27 #include <asm/setup.h>
28 #include <asm/sysinfo.h>
29 #include <asm/cpcmd.h>
30 #include <asm/sclp.h>
31 #include <asm/facility.h>
32 #include <asm/boot_data.h>
33 #include <asm/switch_to.h>
34 #include "entry.h"
35 
36 static void __init reset_tod_clock(void)
37 {
38 	union tod_clock clk;
39 
40 	if (store_tod_clock_ext_cc(&clk) == 0)
41 		return;
42 	/* TOD clock not running. Set the clock to Unix Epoch. */
43 	if (set_tod_clock(TOD_UNIX_EPOCH) || store_tod_clock_ext_cc(&clk))
44 		disabled_wait();
45 
46 	memset(&tod_clock_base, 0, sizeof(tod_clock_base));
47 	tod_clock_base.tod = TOD_UNIX_EPOCH;
48 	S390_lowcore.last_update_clock = TOD_UNIX_EPOCH;
49 }
50 
51 /*
52  * Initialize storage key for kernel pages
53  */
54 static noinline __init void init_kernel_storage_key(void)
55 {
56 #if PAGE_DEFAULT_KEY
57 	unsigned long end_pfn, init_pfn;
58 
59 	end_pfn = PFN_UP(__pa(_end));
60 
61 	for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
62 		page_set_storage_key(init_pfn << PAGE_SHIFT,
63 				     PAGE_DEFAULT_KEY, 0);
64 #endif
65 }
66 
67 static __initdata char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE);
68 
69 static noinline __init void detect_machine_type(void)
70 {
71 	struct sysinfo_3_2_2 *vmms = (struct sysinfo_3_2_2 *)&sysinfo_page;
72 
73 	/* Check current-configuration-level */
74 	if (stsi(NULL, 0, 0, 0) <= 2) {
75 		S390_lowcore.machine_flags |= MACHINE_FLAG_LPAR;
76 		return;
77 	}
78 	/* Get virtual-machine cpu information. */
79 	if (stsi(vmms, 3, 2, 2) || !vmms->count)
80 		return;
81 
82 	/* Detect known hypervisors */
83 	if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
84 		S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
85 	else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
86 		S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
87 }
88 
89 /* Remove leading, trailing and double whitespace. */
90 static inline void strim_all(char *str)
91 {
92 	char *s;
93 
94 	s = strim(str);
95 	if (s != str)
96 		memmove(str, s, strlen(s));
97 	while (*str) {
98 		if (!isspace(*str++))
99 			continue;
100 		if (isspace(*str)) {
101 			s = skip_spaces(str);
102 			memmove(str, s, strlen(s) + 1);
103 		}
104 	}
105 }
106 
107 static noinline __init void setup_arch_string(void)
108 {
109 	struct sysinfo_1_1_1 *mach = (struct sysinfo_1_1_1 *)&sysinfo_page;
110 	struct sysinfo_3_2_2 *vm = (struct sysinfo_3_2_2 *)&sysinfo_page;
111 	char mstr[80], hvstr[17];
112 
113 	if (stsi(mach, 1, 1, 1))
114 		return;
115 	EBCASC(mach->manufacturer, sizeof(mach->manufacturer));
116 	EBCASC(mach->type, sizeof(mach->type));
117 	EBCASC(mach->model, sizeof(mach->model));
118 	EBCASC(mach->model_capacity, sizeof(mach->model_capacity));
119 	sprintf(mstr, "%-16.16s %-4.4s %-16.16s %-16.16s",
120 		mach->manufacturer, mach->type,
121 		mach->model, mach->model_capacity);
122 	strim_all(mstr);
123 	if (stsi(vm, 3, 2, 2) == 0 && vm->count) {
124 		EBCASC(vm->vm[0].cpi, sizeof(vm->vm[0].cpi));
125 		sprintf(hvstr, "%-16.16s", vm->vm[0].cpi);
126 		strim_all(hvstr);
127 	} else {
128 		sprintf(hvstr, "%s",
129 			MACHINE_IS_LPAR ? "LPAR" :
130 			MACHINE_IS_VM ? "z/VM" :
131 			MACHINE_IS_KVM ? "KVM" : "unknown");
132 	}
133 	dump_stack_set_arch_desc("%s (%s)", mstr, hvstr);
134 }
135 
136 static __init void setup_topology(void)
137 {
138 	int max_mnest;
139 
140 	if (!test_facility(11))
141 		return;
142 	S390_lowcore.machine_flags |= MACHINE_FLAG_TOPOLOGY;
143 	for (max_mnest = 6; max_mnest > 1; max_mnest--) {
144 		if (stsi(&sysinfo_page, 15, 1, max_mnest) == 0)
145 			break;
146 	}
147 	topology_max_mnest = max_mnest;
148 }
149 
150 static void early_pgm_check_handler(void)
151 {
152 	const struct exception_table_entry *fixup;
153 	unsigned long cr0, cr0_new;
154 	unsigned long addr;
155 
156 	addr = S390_lowcore.program_old_psw.addr;
157 	fixup = s390_search_extables(addr);
158 	if (!fixup)
159 		disabled_wait();
160 	/* Disable low address protection before storing into lowcore. */
161 	__ctl_store(cr0, 0, 0);
162 	cr0_new = cr0 & ~(1UL << 28);
163 	__ctl_load(cr0_new, 0, 0);
164 	S390_lowcore.program_old_psw.addr = extable_fixup(fixup);
165 	__ctl_load(cr0, 0, 0);
166 }
167 
168 static noinline __init void setup_lowcore_early(void)
169 {
170 	psw_t psw;
171 
172 	psw.addr = (unsigned long)s390_base_pgm_handler;
173 	psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA;
174 	if (IS_ENABLED(CONFIG_KASAN))
175 		psw.mask |= PSW_MASK_DAT;
176 	S390_lowcore.program_new_psw = psw;
177 	s390_base_pgm_handler_fn = early_pgm_check_handler;
178 	S390_lowcore.preempt_count = INIT_PREEMPT_COUNT;
179 }
180 
181 static noinline __init void setup_facility_list(void)
182 {
183 	memcpy(S390_lowcore.alt_stfle_fac_list,
184 	       S390_lowcore.stfle_fac_list,
185 	       sizeof(S390_lowcore.alt_stfle_fac_list));
186 	if (!IS_ENABLED(CONFIG_KERNEL_NOBP))
187 		__clear_facility(82, S390_lowcore.alt_stfle_fac_list);
188 }
189 
190 static __init void detect_diag9c(void)
191 {
192 	unsigned int cpu_address;
193 	int rc;
194 
195 	cpu_address = stap();
196 	diag_stat_inc(DIAG_STAT_X09C);
197 	asm volatile(
198 		"	diag	%2,0,0x9c\n"
199 		"0:	la	%0,0\n"
200 		"1:\n"
201 		EX_TABLE(0b,1b)
202 		: "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc");
203 	if (!rc)
204 		S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG9C;
205 }
206 
207 static __init void detect_machine_facilities(void)
208 {
209 	if (test_facility(8)) {
210 		S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1;
211 		__ctl_set_bit(0, 23);
212 	}
213 	if (test_facility(78))
214 		S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT2;
215 	if (test_facility(3))
216 		S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
217 	if (test_facility(50) && test_facility(73)) {
218 		S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
219 		__ctl_set_bit(0, 55);
220 	}
221 	if (test_facility(51))
222 		S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
223 	if (test_facility(129)) {
224 		S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
225 		__ctl_set_bit(0, 17);
226 	}
227 	if (test_facility(130) && !noexec_disabled) {
228 		S390_lowcore.machine_flags |= MACHINE_FLAG_NX;
229 		__ctl_set_bit(0, 20);
230 	}
231 	if (test_facility(133))
232 		S390_lowcore.machine_flags |= MACHINE_FLAG_GS;
233 	if (test_facility(139) && (tod_clock_base.tod >> 63)) {
234 		/* Enabled signed clock comparator comparisons */
235 		S390_lowcore.machine_flags |= MACHINE_FLAG_SCC;
236 		clock_comparator_max = -1ULL >> 1;
237 		__ctl_set_bit(0, 53);
238 	}
239 }
240 
241 static inline void save_vector_registers(void)
242 {
243 #ifdef CONFIG_CRASH_DUMP
244 	if (test_facility(129))
245 		save_vx_regs(boot_cpu_vector_save_area);
246 #endif
247 }
248 
249 static inline void setup_control_registers(void)
250 {
251 	unsigned long reg;
252 
253 	__ctl_store(reg, 0, 0);
254 	reg |= CR0_LOW_ADDRESS_PROTECTION;
255 	reg |= CR0_EMERGENCY_SIGNAL_SUBMASK;
256 	reg |= CR0_EXTERNAL_CALL_SUBMASK;
257 	__ctl_load(reg, 0, 0);
258 }
259 
260 static inline void setup_access_registers(void)
261 {
262 	unsigned int acrs[NUM_ACRS] = { 0 };
263 
264 	restore_access_regs(acrs);
265 }
266 
267 static int __init disable_vector_extension(char *str)
268 {
269 	S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
270 	__ctl_clear_bit(0, 17);
271 	return 0;
272 }
273 early_param("novx", disable_vector_extension);
274 
275 char __bootdata(early_command_line)[COMMAND_LINE_SIZE];
276 static void __init setup_boot_command_line(void)
277 {
278 	/* copy arch command line */
279 	strlcpy(boot_command_line, early_command_line, ARCH_COMMAND_LINE_SIZE);
280 }
281 
282 static void __init check_image_bootable(void)
283 {
284 	if (!memcmp(EP_STRING, (void *)EP_OFFSET, strlen(EP_STRING)))
285 		return;
286 
287 	sclp_early_printk("Linux kernel boot failure: An attempt to boot a vmlinux ELF image failed.\n");
288 	sclp_early_printk("This image does not contain all parts necessary for starting up. Use\n");
289 	sclp_early_printk("bzImage or arch/s390/boot/compressed/vmlinux instead.\n");
290 	disabled_wait();
291 }
292 
293 void __init startup_init(void)
294 {
295 	reset_tod_clock();
296 	check_image_bootable();
297 	time_early_init();
298 	init_kernel_storage_key();
299 	lockdep_off();
300 	setup_lowcore_early();
301 	setup_facility_list();
302 	detect_machine_type();
303 	setup_arch_string();
304 	setup_boot_command_line();
305 	detect_diag9c();
306 	detect_machine_facilities();
307 	save_vector_registers();
308 	setup_topology();
309 	sclp_early_detect();
310 	setup_control_registers();
311 	setup_access_registers();
312 	lockdep_on();
313 }
314