xref: /linux/arch/s390/kernel/processor.c (revision 4db102dcb0396a4ccf89b1eac0f4eb3fd167a080)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright IBM Corp. 2008
4  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5  */
6 
7 #define KMSG_COMPONENT "cpu"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9 
10 #include <linux/stop_machine.h>
11 #include <linux/bitops.h>
12 #include <linux/kernel.h>
13 #include <linux/random.h>
14 #include <linux/sched/mm.h>
15 #include <linux/init.h>
16 #include <linux/seq_file.h>
17 #include <linux/mm_types.h>
18 #include <linux/delay.h>
19 #include <linux/cpu.h>
20 
21 #include <asm/diag.h>
22 #include <asm/facility.h>
23 #include <asm/elf.h>
24 #include <asm/lowcore.h>
25 #include <asm/param.h>
26 #include <asm/sclp.h>
27 #include <asm/smp.h>
28 
29 unsigned long __read_mostly elf_hwcap;
30 char elf_platform[ELF_PLATFORM_SIZE];
31 
32 struct cpu_info {
33 	unsigned int cpu_mhz_dynamic;
34 	unsigned int cpu_mhz_static;
35 	struct cpuid cpu_id;
36 };
37 
38 static DEFINE_PER_CPU(struct cpu_info, cpu_info);
39 static DEFINE_PER_CPU(int, cpu_relax_retry);
40 
41 static bool machine_has_cpu_mhz;
42 
43 void __init cpu_detect_mhz_feature(void)
44 {
45 	if (test_facility(34) && __ecag(ECAG_CPU_ATTRIBUTE, 0) != -1UL)
46 		machine_has_cpu_mhz = true;
47 }
48 
49 static void update_cpu_mhz(void *arg)
50 {
51 	unsigned long mhz;
52 	struct cpu_info *c;
53 
54 	mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0);
55 	c = this_cpu_ptr(&cpu_info);
56 	c->cpu_mhz_dynamic = mhz >> 32;
57 	c->cpu_mhz_static = mhz & 0xffffffff;
58 }
59 
60 void s390_update_cpu_mhz(void)
61 {
62 	s390_adjust_jiffies();
63 	if (machine_has_cpu_mhz)
64 		on_each_cpu(update_cpu_mhz, NULL, 0);
65 }
66 
67 void notrace stop_machine_yield(const struct cpumask *cpumask)
68 {
69 	int cpu, this_cpu;
70 
71 	this_cpu = smp_processor_id();
72 	if (__this_cpu_inc_return(cpu_relax_retry) >= spin_retry) {
73 		__this_cpu_write(cpu_relax_retry, 0);
74 		cpu = cpumask_next_wrap(this_cpu, cpumask, this_cpu, false);
75 		if (cpu >= nr_cpu_ids)
76 			return;
77 		if (arch_vcpu_is_preempted(cpu))
78 			smp_yield_cpu(cpu);
79 	}
80 }
81 
82 /*
83  * cpu_init - initializes state that is per-CPU.
84  */
85 void cpu_init(void)
86 {
87 	struct cpuid *id = this_cpu_ptr(&cpu_info.cpu_id);
88 
89 	get_cpu_id(id);
90 	if (machine_has_cpu_mhz)
91 		update_cpu_mhz(NULL);
92 	mmgrab(&init_mm);
93 	current->active_mm = &init_mm;
94 	BUG_ON(current->mm);
95 	enter_lazy_tlb(&init_mm, current);
96 }
97 
98 static void show_facilities(struct seq_file *m)
99 {
100 	unsigned int bit;
101 
102 	seq_puts(m, "facilities      :");
103 	for_each_set_bit_inv(bit, (long *)&stfle_fac_list, MAX_FACILITY_BIT)
104 		seq_printf(m, " %d", bit);
105 	seq_putc(m, '\n');
106 }
107 
108 static void show_cpu_summary(struct seq_file *m, void *v)
109 {
110 	static const char *hwcap_str[] = {
111 		[HWCAP_NR_ESAN3]	= "esan3",
112 		[HWCAP_NR_ZARCH]	= "zarch",
113 		[HWCAP_NR_STFLE]	= "stfle",
114 		[HWCAP_NR_MSA]		= "msa",
115 		[HWCAP_NR_LDISP]	= "ldisp",
116 		[HWCAP_NR_EIMM]		= "eimm",
117 		[HWCAP_NR_DFP]		= "dfp",
118 		[HWCAP_NR_HPAGE]	= "edat",
119 		[HWCAP_NR_ETF3EH]	= "etf3eh",
120 		[HWCAP_NR_HIGH_GPRS]	= "highgprs",
121 		[HWCAP_NR_TE]		= "te",
122 		[HWCAP_NR_VXRS]		= "vx",
123 		[HWCAP_NR_VXRS_BCD]	= "vxd",
124 		[HWCAP_NR_VXRS_EXT]	= "vxe",
125 		[HWCAP_NR_GS]		= "gs",
126 		[HWCAP_NR_VXRS_EXT2]	= "vxe2",
127 		[HWCAP_NR_VXRS_PDE]	= "vxp",
128 		[HWCAP_NR_SORT]		= "sort",
129 		[HWCAP_NR_DFLT]		= "dflt",
130 		[HWCAP_NR_VXRS_PDE2]	= "vxp2",
131 		[HWCAP_NR_NNPA]		= "nnpa",
132 		[HWCAP_NR_PCI_MIO]	= "pcimio",
133 		[HWCAP_NR_SIE]		= "sie",
134 	};
135 	int i, cpu;
136 
137 	BUILD_BUG_ON(ARRAY_SIZE(hwcap_str) != HWCAP_NR_MAX);
138 	seq_printf(m, "vendor_id       : IBM/S390\n"
139 		   "# processors    : %i\n"
140 		   "bogomips per cpu: %lu.%02lu\n",
141 		   num_online_cpus(), loops_per_jiffy/(500000/HZ),
142 		   (loops_per_jiffy/(5000/HZ))%100);
143 	seq_printf(m, "max thread id   : %d\n", smp_cpu_mtid);
144 	seq_puts(m, "features\t: ");
145 	for (i = 0; i < ARRAY_SIZE(hwcap_str); i++)
146 		if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
147 			seq_printf(m, "%s ", hwcap_str[i]);
148 	seq_puts(m, "\n");
149 	show_facilities(m);
150 	show_cacheinfo(m);
151 	for_each_online_cpu(cpu) {
152 		struct cpuid *id = &per_cpu(cpu_info.cpu_id, cpu);
153 
154 		seq_printf(m, "processor %d: "
155 			   "version = %02X,  "
156 			   "identification = %06X,  "
157 			   "machine = %04X\n",
158 			   cpu, id->version, id->ident, id->machine);
159 	}
160 }
161 
162 static int __init setup_hwcaps(void)
163 {
164 	/* instructions named N3, "backported" to esa-mode */
165 	elf_hwcap |= HWCAP_ESAN3;
166 
167 	/* z/Architecture mode active */
168 	elf_hwcap |= HWCAP_ZARCH;
169 
170 	/* store-facility-list-extended */
171 	if (test_facility(7))
172 		elf_hwcap |= HWCAP_STFLE;
173 
174 	/* message-security assist */
175 	if (test_facility(17))
176 		elf_hwcap |= HWCAP_MSA;
177 
178 	/* long-displacement */
179 	if (test_facility(19))
180 		elf_hwcap |= HWCAP_LDISP;
181 
182 	/* extended-immediate */
183 	elf_hwcap |= HWCAP_EIMM;
184 
185 	/* extended-translation facility 3 enhancement */
186 	if (test_facility(22) && test_facility(30))
187 		elf_hwcap |= HWCAP_ETF3EH;
188 
189 	/* decimal floating point & perform floating point operation */
190 	if (test_facility(42) && test_facility(44))
191 		elf_hwcap |= HWCAP_DFP;
192 
193 	/* huge page support */
194 	if (MACHINE_HAS_EDAT1)
195 		elf_hwcap |= HWCAP_HPAGE;
196 
197 	/* 64-bit register support for 31-bit processes */
198 	elf_hwcap |= HWCAP_HIGH_GPRS;
199 
200 	/* transactional execution */
201 	if (MACHINE_HAS_TE)
202 		elf_hwcap |= HWCAP_TE;
203 
204 	/* vector */
205 	if (test_facility(129)) {
206 		elf_hwcap |= HWCAP_VXRS;
207 		if (test_facility(134))
208 			elf_hwcap |= HWCAP_VXRS_BCD;
209 		if (test_facility(135))
210 			elf_hwcap |= HWCAP_VXRS_EXT;
211 		if (test_facility(148))
212 			elf_hwcap |= HWCAP_VXRS_EXT2;
213 		if (test_facility(152))
214 			elf_hwcap |= HWCAP_VXRS_PDE;
215 		if (test_facility(192))
216 			elf_hwcap |= HWCAP_VXRS_PDE2;
217 	}
218 
219 	if (test_facility(150))
220 		elf_hwcap |= HWCAP_SORT;
221 
222 	if (test_facility(151))
223 		elf_hwcap |= HWCAP_DFLT;
224 
225 	if (test_facility(165))
226 		elf_hwcap |= HWCAP_NNPA;
227 
228 	/* guarded storage */
229 	if (MACHINE_HAS_GS)
230 		elf_hwcap |= HWCAP_GS;
231 
232 	if (MACHINE_HAS_PCI_MIO)
233 		elf_hwcap |= HWCAP_PCI_MIO;
234 
235 	/* virtualization support */
236 	if (sclp.has_sief2)
237 		elf_hwcap |= HWCAP_SIE;
238 
239 	return 0;
240 }
241 arch_initcall(setup_hwcaps);
242 
243 static int __init setup_elf_platform(void)
244 {
245 	struct cpuid cpu_id;
246 
247 	get_cpu_id(&cpu_id);
248 	add_device_randomness(&cpu_id, sizeof(cpu_id));
249 	switch (cpu_id.machine) {
250 	default:	/* Use "z10" as default. */
251 		strcpy(elf_platform, "z10");
252 		break;
253 	case 0x2817:
254 	case 0x2818:
255 		strcpy(elf_platform, "z196");
256 		break;
257 	case 0x2827:
258 	case 0x2828:
259 		strcpy(elf_platform, "zEC12");
260 		break;
261 	case 0x2964:
262 	case 0x2965:
263 		strcpy(elf_platform, "z13");
264 		break;
265 	case 0x3906:
266 	case 0x3907:
267 		strcpy(elf_platform, "z14");
268 		break;
269 	case 0x8561:
270 	case 0x8562:
271 		strcpy(elf_platform, "z15");
272 		break;
273 	case 0x3931:
274 	case 0x3932:
275 		strcpy(elf_platform, "z16");
276 		break;
277 	}
278 	return 0;
279 }
280 arch_initcall(setup_elf_platform);
281 
282 static void show_cpu_topology(struct seq_file *m, unsigned long n)
283 {
284 #ifdef CONFIG_SCHED_TOPOLOGY
285 	seq_printf(m, "physical id     : %d\n", topology_physical_package_id(n));
286 	seq_printf(m, "core id         : %d\n", topology_core_id(n));
287 	seq_printf(m, "book id         : %d\n", topology_book_id(n));
288 	seq_printf(m, "drawer id       : %d\n", topology_drawer_id(n));
289 	seq_printf(m, "dedicated       : %d\n", topology_cpu_dedicated(n));
290 	seq_printf(m, "address         : %d\n", smp_cpu_get_cpu_address(n));
291 	seq_printf(m, "siblings        : %d\n", cpumask_weight(topology_core_cpumask(n)));
292 	seq_printf(m, "cpu cores       : %d\n", topology_booted_cores(n));
293 #endif /* CONFIG_SCHED_TOPOLOGY */
294 }
295 
296 static void show_cpu_ids(struct seq_file *m, unsigned long n)
297 {
298 	struct cpuid *id = &per_cpu(cpu_info.cpu_id, n);
299 
300 	seq_printf(m, "version         : %02X\n", id->version);
301 	seq_printf(m, "identification  : %06X\n", id->ident);
302 	seq_printf(m, "machine         : %04X\n", id->machine);
303 }
304 
305 static void show_cpu_mhz(struct seq_file *m, unsigned long n)
306 {
307 	struct cpu_info *c = per_cpu_ptr(&cpu_info, n);
308 
309 	if (!machine_has_cpu_mhz)
310 		return;
311 	seq_printf(m, "cpu MHz dynamic : %d\n", c->cpu_mhz_dynamic);
312 	seq_printf(m, "cpu MHz static  : %d\n", c->cpu_mhz_static);
313 }
314 
315 /*
316  * show_cpuinfo - Get information on one CPU for use by procfs.
317  */
318 static int show_cpuinfo(struct seq_file *m, void *v)
319 {
320 	unsigned long n = (unsigned long) v - 1;
321 	unsigned long first = cpumask_first(cpu_online_mask);
322 
323 	if (n == first)
324 		show_cpu_summary(m, v);
325 	seq_printf(m, "\ncpu number      : %ld\n", n);
326 	show_cpu_topology(m, n);
327 	show_cpu_ids(m, n);
328 	show_cpu_mhz(m, n);
329 	return 0;
330 }
331 
332 static inline void *c_update(loff_t *pos)
333 {
334 	if (*pos)
335 		*pos = cpumask_next(*pos - 1, cpu_online_mask);
336 	else
337 		*pos = cpumask_first(cpu_online_mask);
338 	return *pos < nr_cpu_ids ? (void *)*pos + 1 : NULL;
339 }
340 
341 static void *c_start(struct seq_file *m, loff_t *pos)
342 {
343 	cpus_read_lock();
344 	return c_update(pos);
345 }
346 
347 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
348 {
349 	++*pos;
350 	return c_update(pos);
351 }
352 
353 static void c_stop(struct seq_file *m, void *v)
354 {
355 	cpus_read_unlock();
356 }
357 
358 const struct seq_operations cpuinfo_op = {
359 	.start	= c_start,
360 	.next	= c_next,
361 	.stop	= c_stop,
362 	.show	= show_cpuinfo,
363 };
364