xref: /linux/arch/s390/kernel/processor.c (revision 7fc2cd2e4b398c57c9cf961cfea05eadbf34c05c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright IBM Corp. 2008
4  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5  */
6 
7 #define pr_fmt(fmt) "cpu: " fmt
8 
9 #include <linux/stop_machine.h>
10 #include <linux/cpufeature.h>
11 #include <linux/bitops.h>
12 #include <linux/kernel.h>
13 #include <linux/random.h>
14 #include <linux/sched/mm.h>
15 #include <linux/init.h>
16 #include <linux/seq_file.h>
17 #include <linux/mm_types.h>
18 #include <linux/delay.h>
19 #include <linux/cpu.h>
20 #include <linux/smp.h>
21 #include <asm/text-patching.h>
22 #include <asm/machine.h>
23 #include <asm/diag.h>
24 #include <asm/facility.h>
25 #include <asm/elf.h>
26 #include <asm/lowcore.h>
27 #include <asm/param.h>
28 #include <asm/sclp.h>
29 #include <asm/smp.h>
30 
31 unsigned long __read_mostly elf_hwcap;
32 char elf_platform[ELF_PLATFORM_SIZE];
33 
34 struct cpu_info {
35 	unsigned int cpu_mhz_dynamic;
36 	unsigned int cpu_mhz_static;
37 	struct cpuid cpu_id;
38 };
39 
40 static DEFINE_PER_CPU(struct cpu_info, cpu_info);
41 static DEFINE_PER_CPU(int, cpu_relax_retry);
42 
43 static bool machine_has_cpu_mhz;
44 
45 void __init cpu_detect_mhz_feature(void)
46 {
47 	if (test_facility(34) && __ecag(ECAG_CPU_ATTRIBUTE, 0) != -1UL)
48 		machine_has_cpu_mhz = true;
49 }
50 
51 static void update_cpu_mhz(void *arg)
52 {
53 	unsigned long mhz;
54 	struct cpu_info *c;
55 
56 	mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0);
57 	c = this_cpu_ptr(&cpu_info);
58 	c->cpu_mhz_dynamic = mhz >> 32;
59 	c->cpu_mhz_static = mhz & 0xffffffff;
60 }
61 
62 void s390_update_cpu_mhz(void)
63 {
64 	s390_adjust_jiffies();
65 	if (machine_has_cpu_mhz)
66 		on_each_cpu(update_cpu_mhz, NULL, 0);
67 }
68 
69 void notrace stop_machine_yield(const struct cpumask *cpumask)
70 {
71 	int cpu, this_cpu;
72 
73 	this_cpu = smp_processor_id();
74 	if (__this_cpu_inc_return(cpu_relax_retry) >= spin_retry) {
75 		__this_cpu_write(cpu_relax_retry, 0);
76 		cpu = cpumask_next_wrap(this_cpu, cpumask);
77 		if (cpu >= nr_cpu_ids)
78 			return;
79 		if (arch_vcpu_is_preempted(cpu))
80 			smp_yield_cpu(cpu);
81 	}
82 }
83 
84 static void do_sync_core(void *info)
85 {
86 	sync_core();
87 }
88 
89 void text_poke_sync(void)
90 {
91 	on_each_cpu(do_sync_core, NULL, 1);
92 }
93 
94 void text_poke_sync_lock(void)
95 {
96 	cpus_read_lock();
97 	text_poke_sync();
98 	cpus_read_unlock();
99 }
100 
101 /*
102  * cpu_init - initializes state that is per-CPU.
103  */
104 void cpu_init(void)
105 {
106 	struct cpuid *id = this_cpu_ptr(&cpu_info.cpu_id);
107 
108 	get_cpu_id(id);
109 	if (machine_has_cpu_mhz)
110 		update_cpu_mhz(NULL);
111 	mmgrab(&init_mm);
112 	current->active_mm = &init_mm;
113 	BUG_ON(current->mm);
114 	enter_lazy_tlb(&init_mm, current);
115 }
116 
117 static void show_facilities(struct seq_file *m)
118 {
119 	unsigned int bit;
120 
121 	seq_puts(m, "facilities      :");
122 	for_each_set_bit_inv(bit, (long *)&stfle_fac_list, MAX_FACILITY_BIT)
123 		seq_printf(m, " %d", bit);
124 	seq_putc(m, '\n');
125 }
126 
127 static void show_cpu_summary(struct seq_file *m, void *v)
128 {
129 	static const char *hwcap_str[] = {
130 		[HWCAP_NR_ESAN3]	= "esan3",
131 		[HWCAP_NR_ZARCH]	= "zarch",
132 		[HWCAP_NR_STFLE]	= "stfle",
133 		[HWCAP_NR_MSA]		= "msa",
134 		[HWCAP_NR_LDISP]	= "ldisp",
135 		[HWCAP_NR_EIMM]		= "eimm",
136 		[HWCAP_NR_DFP]		= "dfp",
137 		[HWCAP_NR_HPAGE]	= "edat",
138 		[HWCAP_NR_ETF3EH]	= "etf3eh",
139 		[HWCAP_NR_HIGH_GPRS]	= "highgprs",
140 		[HWCAP_NR_TE]		= "te",
141 		[HWCAP_NR_VXRS]		= "vx",
142 		[HWCAP_NR_VXRS_BCD]	= "vxd",
143 		[HWCAP_NR_VXRS_EXT]	= "vxe",
144 		[HWCAP_NR_GS]		= "gs",
145 		[HWCAP_NR_VXRS_EXT2]	= "vxe2",
146 		[HWCAP_NR_VXRS_PDE]	= "vxp",
147 		[HWCAP_NR_SORT]		= "sort",
148 		[HWCAP_NR_DFLT]		= "dflt",
149 		[HWCAP_NR_VXRS_PDE2]	= "vxp2",
150 		[HWCAP_NR_NNPA]		= "nnpa",
151 		[HWCAP_NR_PCI_MIO]	= "pcimio",
152 		[HWCAP_NR_SIE]		= "sie",
153 	};
154 	int i, cpu;
155 
156 	BUILD_BUG_ON(ARRAY_SIZE(hwcap_str) != HWCAP_NR_MAX);
157 	seq_printf(m, "vendor_id       : IBM/S390\n"
158 		   "# processors    : %i\n"
159 		   "bogomips per cpu: %lu.%02lu\n",
160 		   num_online_cpus(), loops_per_jiffy/(500000/HZ),
161 		   (loops_per_jiffy/(5000/HZ))%100);
162 	seq_printf(m, "max thread id   : %d\n", smp_cpu_mtid);
163 	seq_puts(m, "features\t: ");
164 	for (i = 0; i < ARRAY_SIZE(hwcap_str); i++)
165 		if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
166 			seq_printf(m, "%s ", hwcap_str[i]);
167 	seq_puts(m, "\n");
168 	show_facilities(m);
169 	show_cacheinfo(m);
170 	for_each_online_cpu(cpu) {
171 		struct cpuid *id = &per_cpu(cpu_info.cpu_id, cpu);
172 
173 		seq_printf(m, "processor %d: "
174 			   "version = %02X,  "
175 			   "identification = %06X,  "
176 			   "machine = %04X\n",
177 			   cpu, id->version, id->ident, id->machine);
178 	}
179 }
180 
181 static int __init setup_hwcaps(void)
182 {
183 	/* instructions named N3, "backported" to esa-mode */
184 	elf_hwcap |= HWCAP_ESAN3;
185 
186 	/* z/Architecture mode active */
187 	elf_hwcap |= HWCAP_ZARCH;
188 
189 	/* store-facility-list-extended */
190 	if (test_facility(7))
191 		elf_hwcap |= HWCAP_STFLE;
192 
193 	/* message-security assist */
194 	if (test_facility(17))
195 		elf_hwcap |= HWCAP_MSA;
196 
197 	/* long-displacement */
198 	if (test_facility(19))
199 		elf_hwcap |= HWCAP_LDISP;
200 
201 	/* extended-immediate */
202 	elf_hwcap |= HWCAP_EIMM;
203 
204 	/* extended-translation facility 3 enhancement */
205 	if (test_facility(22) && test_facility(30))
206 		elf_hwcap |= HWCAP_ETF3EH;
207 
208 	/* decimal floating point & perform floating point operation */
209 	if (test_facility(42) && test_facility(44))
210 		elf_hwcap |= HWCAP_DFP;
211 
212 	/* huge page support */
213 	if (cpu_has_edat1())
214 		elf_hwcap |= HWCAP_HPAGE;
215 
216 	/* 64-bit register support for 31-bit processes */
217 	elf_hwcap |= HWCAP_HIGH_GPRS;
218 
219 	/* transactional execution */
220 	if (machine_has_tx())
221 		elf_hwcap |= HWCAP_TE;
222 
223 	/* vector */
224 	if (test_facility(129)) {
225 		elf_hwcap |= HWCAP_VXRS;
226 		if (test_facility(134))
227 			elf_hwcap |= HWCAP_VXRS_BCD;
228 		if (test_facility(135))
229 			elf_hwcap |= HWCAP_VXRS_EXT;
230 		if (test_facility(148))
231 			elf_hwcap |= HWCAP_VXRS_EXT2;
232 		if (test_facility(152))
233 			elf_hwcap |= HWCAP_VXRS_PDE;
234 		if (test_facility(192))
235 			elf_hwcap |= HWCAP_VXRS_PDE2;
236 	}
237 
238 	if (test_facility(150))
239 		elf_hwcap |= HWCAP_SORT;
240 
241 	if (test_facility(151))
242 		elf_hwcap |= HWCAP_DFLT;
243 
244 	if (test_facility(165))
245 		elf_hwcap |= HWCAP_NNPA;
246 
247 	/* guarded storage */
248 	if (cpu_has_gs())
249 		elf_hwcap |= HWCAP_GS;
250 
251 	if (test_machine_feature(MFEATURE_PCI_MIO))
252 		elf_hwcap |= HWCAP_PCI_MIO;
253 
254 	/* virtualization support */
255 	if (sclp.has_sief2)
256 		elf_hwcap |= HWCAP_SIE;
257 
258 	return 0;
259 }
260 arch_initcall(setup_hwcaps);
261 
262 static int __init setup_elf_platform(void)
263 {
264 	struct cpuid cpu_id;
265 
266 	get_cpu_id(&cpu_id);
267 	add_device_randomness(&cpu_id, sizeof(cpu_id));
268 	switch (cpu_id.machine) {
269 	default:	/* Use "z10" as default. */
270 		strscpy(elf_platform, "z10");
271 		break;
272 	case 0x2817:
273 	case 0x2818:
274 		strscpy(elf_platform, "z196");
275 		break;
276 	case 0x2827:
277 	case 0x2828:
278 		strscpy(elf_platform, "zEC12");
279 		break;
280 	case 0x2964:
281 	case 0x2965:
282 		strscpy(elf_platform, "z13");
283 		break;
284 	case 0x3906:
285 	case 0x3907:
286 		strscpy(elf_platform, "z14");
287 		break;
288 	case 0x8561:
289 	case 0x8562:
290 		strscpy(elf_platform, "z15");
291 		break;
292 	case 0x3931:
293 	case 0x3932:
294 		strscpy(elf_platform, "z16");
295 		break;
296 	case 0x9175:
297 	case 0x9176:
298 		strscpy(elf_platform, "z17");
299 		break;
300 	}
301 	return 0;
302 }
303 arch_initcall(setup_elf_platform);
304 
305 static void show_cpu_topology(struct seq_file *m, unsigned long n)
306 {
307 #ifdef CONFIG_SCHED_TOPOLOGY
308 	seq_printf(m, "physical id     : %d\n", topology_physical_package_id(n));
309 	seq_printf(m, "core id         : %d\n", topology_core_id(n));
310 	seq_printf(m, "book id         : %d\n", topology_book_id(n));
311 	seq_printf(m, "drawer id       : %d\n", topology_drawer_id(n));
312 	seq_printf(m, "dedicated       : %d\n", topology_cpu_dedicated(n));
313 	seq_printf(m, "address         : %d\n", smp_cpu_get_cpu_address(n));
314 	seq_printf(m, "siblings        : %d\n", cpumask_weight(topology_core_cpumask(n)));
315 	seq_printf(m, "cpu cores       : %d\n", topology_booted_cores(n));
316 #endif /* CONFIG_SCHED_TOPOLOGY */
317 }
318 
319 static void show_cpu_ids(struct seq_file *m, unsigned long n)
320 {
321 	struct cpuid *id = &per_cpu(cpu_info.cpu_id, n);
322 
323 	seq_printf(m, "version         : %02X\n", id->version);
324 	seq_printf(m, "identification  : %06X\n", id->ident);
325 	seq_printf(m, "machine         : %04X\n", id->machine);
326 }
327 
328 static void show_cpu_mhz(struct seq_file *m, unsigned long n)
329 {
330 	struct cpu_info *c = per_cpu_ptr(&cpu_info, n);
331 
332 	if (!machine_has_cpu_mhz)
333 		return;
334 	seq_printf(m, "cpu MHz dynamic : %d\n", c->cpu_mhz_dynamic);
335 	seq_printf(m, "cpu MHz static  : %d\n", c->cpu_mhz_static);
336 }
337 
338 /*
339  * show_cpuinfo - Get information on one CPU for use by procfs.
340  */
341 static int show_cpuinfo(struct seq_file *m, void *v)
342 {
343 	unsigned long n = (unsigned long) v - 1;
344 	unsigned long first = cpumask_first(cpu_online_mask);
345 
346 	if (n == first)
347 		show_cpu_summary(m, v);
348 	seq_printf(m, "\ncpu number      : %ld\n", n);
349 	show_cpu_topology(m, n);
350 	show_cpu_ids(m, n);
351 	show_cpu_mhz(m, n);
352 	return 0;
353 }
354 
355 static inline void *c_update(loff_t *pos)
356 {
357 	if (*pos)
358 		*pos = cpumask_next(*pos - 1, cpu_online_mask);
359 	else
360 		*pos = cpumask_first(cpu_online_mask);
361 	return *pos < nr_cpu_ids ? (void *)*pos + 1 : NULL;
362 }
363 
364 static void *c_start(struct seq_file *m, loff_t *pos)
365 {
366 	cpus_read_lock();
367 	return c_update(pos);
368 }
369 
370 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
371 {
372 	++*pos;
373 	return c_update(pos);
374 }
375 
376 static void c_stop(struct seq_file *m, void *v)
377 {
378 	cpus_read_unlock();
379 }
380 
381 const struct seq_operations cpuinfo_op = {
382 	.start	= c_start,
383 	.next	= c_next,
384 	.stop	= c_stop,
385 	.show	= show_cpuinfo,
386 };
387