xref: /linux/arch/s390/kernel/processor.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright IBM Corp. 2008
4  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5  */
6 
7 #define KMSG_COMPONENT "cpu"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9 
10 #include <linux/stop_machine.h>
11 #include <linux/bitops.h>
12 #include <linux/kernel.h>
13 #include <linux/random.h>
14 #include <linux/sched/mm.h>
15 #include <linux/init.h>
16 #include <linux/seq_file.h>
17 #include <linux/mm_types.h>
18 #include <linux/delay.h>
19 #include <linux/cpu.h>
20 #include <linux/smp.h>
21 #include <asm/text-patching.h>
22 #include <asm/diag.h>
23 #include <asm/facility.h>
24 #include <asm/elf.h>
25 #include <asm/lowcore.h>
26 #include <asm/param.h>
27 #include <asm/sclp.h>
28 #include <asm/smp.h>
29 
30 unsigned long __read_mostly elf_hwcap;
31 char elf_platform[ELF_PLATFORM_SIZE];
32 
33 struct cpu_info {
34 	unsigned int cpu_mhz_dynamic;
35 	unsigned int cpu_mhz_static;
36 	struct cpuid cpu_id;
37 };
38 
39 static DEFINE_PER_CPU(struct cpu_info, cpu_info);
40 static DEFINE_PER_CPU(int, cpu_relax_retry);
41 
42 static bool machine_has_cpu_mhz;
43 
cpu_detect_mhz_feature(void)44 void __init cpu_detect_mhz_feature(void)
45 {
46 	if (test_facility(34) && __ecag(ECAG_CPU_ATTRIBUTE, 0) != -1UL)
47 		machine_has_cpu_mhz = true;
48 }
49 
update_cpu_mhz(void * arg)50 static void update_cpu_mhz(void *arg)
51 {
52 	unsigned long mhz;
53 	struct cpu_info *c;
54 
55 	mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0);
56 	c = this_cpu_ptr(&cpu_info);
57 	c->cpu_mhz_dynamic = mhz >> 32;
58 	c->cpu_mhz_static = mhz & 0xffffffff;
59 }
60 
s390_update_cpu_mhz(void)61 void s390_update_cpu_mhz(void)
62 {
63 	s390_adjust_jiffies();
64 	if (machine_has_cpu_mhz)
65 		on_each_cpu(update_cpu_mhz, NULL, 0);
66 }
67 
stop_machine_yield(const struct cpumask * cpumask)68 void notrace stop_machine_yield(const struct cpumask *cpumask)
69 {
70 	int cpu, this_cpu;
71 
72 	this_cpu = smp_processor_id();
73 	if (__this_cpu_inc_return(cpu_relax_retry) >= spin_retry) {
74 		__this_cpu_write(cpu_relax_retry, 0);
75 		cpu = cpumask_next_wrap(this_cpu, cpumask, this_cpu, false);
76 		if (cpu >= nr_cpu_ids)
77 			return;
78 		if (arch_vcpu_is_preempted(cpu))
79 			smp_yield_cpu(cpu);
80 	}
81 }
82 
do_sync_core(void * info)83 static void do_sync_core(void *info)
84 {
85 	sync_core();
86 }
87 
text_poke_sync(void)88 void text_poke_sync(void)
89 {
90 	on_each_cpu(do_sync_core, NULL, 1);
91 }
92 
text_poke_sync_lock(void)93 void text_poke_sync_lock(void)
94 {
95 	cpus_read_lock();
96 	text_poke_sync();
97 	cpus_read_unlock();
98 }
99 
100 /*
101  * cpu_init - initializes state that is per-CPU.
102  */
cpu_init(void)103 void cpu_init(void)
104 {
105 	struct cpuid *id = this_cpu_ptr(&cpu_info.cpu_id);
106 
107 	get_cpu_id(id);
108 	if (machine_has_cpu_mhz)
109 		update_cpu_mhz(NULL);
110 	mmgrab(&init_mm);
111 	current->active_mm = &init_mm;
112 	BUG_ON(current->mm);
113 	enter_lazy_tlb(&init_mm, current);
114 }
115 
show_facilities(struct seq_file * m)116 static void show_facilities(struct seq_file *m)
117 {
118 	unsigned int bit;
119 
120 	seq_puts(m, "facilities      :");
121 	for_each_set_bit_inv(bit, (long *)&stfle_fac_list, MAX_FACILITY_BIT)
122 		seq_printf(m, " %d", bit);
123 	seq_putc(m, '\n');
124 }
125 
show_cpu_summary(struct seq_file * m,void * v)126 static void show_cpu_summary(struct seq_file *m, void *v)
127 {
128 	static const char *hwcap_str[] = {
129 		[HWCAP_NR_ESAN3]	= "esan3",
130 		[HWCAP_NR_ZARCH]	= "zarch",
131 		[HWCAP_NR_STFLE]	= "stfle",
132 		[HWCAP_NR_MSA]		= "msa",
133 		[HWCAP_NR_LDISP]	= "ldisp",
134 		[HWCAP_NR_EIMM]		= "eimm",
135 		[HWCAP_NR_DFP]		= "dfp",
136 		[HWCAP_NR_HPAGE]	= "edat",
137 		[HWCAP_NR_ETF3EH]	= "etf3eh",
138 		[HWCAP_NR_HIGH_GPRS]	= "highgprs",
139 		[HWCAP_NR_TE]		= "te",
140 		[HWCAP_NR_VXRS]		= "vx",
141 		[HWCAP_NR_VXRS_BCD]	= "vxd",
142 		[HWCAP_NR_VXRS_EXT]	= "vxe",
143 		[HWCAP_NR_GS]		= "gs",
144 		[HWCAP_NR_VXRS_EXT2]	= "vxe2",
145 		[HWCAP_NR_VXRS_PDE]	= "vxp",
146 		[HWCAP_NR_SORT]		= "sort",
147 		[HWCAP_NR_DFLT]		= "dflt",
148 		[HWCAP_NR_VXRS_PDE2]	= "vxp2",
149 		[HWCAP_NR_NNPA]		= "nnpa",
150 		[HWCAP_NR_PCI_MIO]	= "pcimio",
151 		[HWCAP_NR_SIE]		= "sie",
152 	};
153 	int i, cpu;
154 
155 	BUILD_BUG_ON(ARRAY_SIZE(hwcap_str) != HWCAP_NR_MAX);
156 	seq_printf(m, "vendor_id       : IBM/S390\n"
157 		   "# processors    : %i\n"
158 		   "bogomips per cpu: %lu.%02lu\n",
159 		   num_online_cpus(), loops_per_jiffy/(500000/HZ),
160 		   (loops_per_jiffy/(5000/HZ))%100);
161 	seq_printf(m, "max thread id   : %d\n", smp_cpu_mtid);
162 	seq_puts(m, "features\t: ");
163 	for (i = 0; i < ARRAY_SIZE(hwcap_str); i++)
164 		if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
165 			seq_printf(m, "%s ", hwcap_str[i]);
166 	seq_puts(m, "\n");
167 	show_facilities(m);
168 	show_cacheinfo(m);
169 	for_each_online_cpu(cpu) {
170 		struct cpuid *id = &per_cpu(cpu_info.cpu_id, cpu);
171 
172 		seq_printf(m, "processor %d: "
173 			   "version = %02X,  "
174 			   "identification = %06X,  "
175 			   "machine = %04X\n",
176 			   cpu, id->version, id->ident, id->machine);
177 	}
178 }
179 
setup_hwcaps(void)180 static int __init setup_hwcaps(void)
181 {
182 	/* instructions named N3, "backported" to esa-mode */
183 	elf_hwcap |= HWCAP_ESAN3;
184 
185 	/* z/Architecture mode active */
186 	elf_hwcap |= HWCAP_ZARCH;
187 
188 	/* store-facility-list-extended */
189 	if (test_facility(7))
190 		elf_hwcap |= HWCAP_STFLE;
191 
192 	/* message-security assist */
193 	if (test_facility(17))
194 		elf_hwcap |= HWCAP_MSA;
195 
196 	/* long-displacement */
197 	if (test_facility(19))
198 		elf_hwcap |= HWCAP_LDISP;
199 
200 	/* extended-immediate */
201 	elf_hwcap |= HWCAP_EIMM;
202 
203 	/* extended-translation facility 3 enhancement */
204 	if (test_facility(22) && test_facility(30))
205 		elf_hwcap |= HWCAP_ETF3EH;
206 
207 	/* decimal floating point & perform floating point operation */
208 	if (test_facility(42) && test_facility(44))
209 		elf_hwcap |= HWCAP_DFP;
210 
211 	/* huge page support */
212 	if (MACHINE_HAS_EDAT1)
213 		elf_hwcap |= HWCAP_HPAGE;
214 
215 	/* 64-bit register support for 31-bit processes */
216 	elf_hwcap |= HWCAP_HIGH_GPRS;
217 
218 	/* transactional execution */
219 	if (MACHINE_HAS_TE)
220 		elf_hwcap |= HWCAP_TE;
221 
222 	/* vector */
223 	if (test_facility(129)) {
224 		elf_hwcap |= HWCAP_VXRS;
225 		if (test_facility(134))
226 			elf_hwcap |= HWCAP_VXRS_BCD;
227 		if (test_facility(135))
228 			elf_hwcap |= HWCAP_VXRS_EXT;
229 		if (test_facility(148))
230 			elf_hwcap |= HWCAP_VXRS_EXT2;
231 		if (test_facility(152))
232 			elf_hwcap |= HWCAP_VXRS_PDE;
233 		if (test_facility(192))
234 			elf_hwcap |= HWCAP_VXRS_PDE2;
235 	}
236 
237 	if (test_facility(150))
238 		elf_hwcap |= HWCAP_SORT;
239 
240 	if (test_facility(151))
241 		elf_hwcap |= HWCAP_DFLT;
242 
243 	if (test_facility(165))
244 		elf_hwcap |= HWCAP_NNPA;
245 
246 	/* guarded storage */
247 	if (MACHINE_HAS_GS)
248 		elf_hwcap |= HWCAP_GS;
249 
250 	if (MACHINE_HAS_PCI_MIO)
251 		elf_hwcap |= HWCAP_PCI_MIO;
252 
253 	/* virtualization support */
254 	if (sclp.has_sief2)
255 		elf_hwcap |= HWCAP_SIE;
256 
257 	return 0;
258 }
259 arch_initcall(setup_hwcaps);
260 
setup_elf_platform(void)261 static int __init setup_elf_platform(void)
262 {
263 	struct cpuid cpu_id;
264 
265 	get_cpu_id(&cpu_id);
266 	add_device_randomness(&cpu_id, sizeof(cpu_id));
267 	switch (cpu_id.machine) {
268 	default:	/* Use "z10" as default. */
269 		strcpy(elf_platform, "z10");
270 		break;
271 	case 0x2817:
272 	case 0x2818:
273 		strcpy(elf_platform, "z196");
274 		break;
275 	case 0x2827:
276 	case 0x2828:
277 		strcpy(elf_platform, "zEC12");
278 		break;
279 	case 0x2964:
280 	case 0x2965:
281 		strcpy(elf_platform, "z13");
282 		break;
283 	case 0x3906:
284 	case 0x3907:
285 		strcpy(elf_platform, "z14");
286 		break;
287 	case 0x8561:
288 	case 0x8562:
289 		strcpy(elf_platform, "z15");
290 		break;
291 	case 0x3931:
292 	case 0x3932:
293 		strcpy(elf_platform, "z16");
294 		break;
295 	}
296 	return 0;
297 }
298 arch_initcall(setup_elf_platform);
299 
show_cpu_topology(struct seq_file * m,unsigned long n)300 static void show_cpu_topology(struct seq_file *m, unsigned long n)
301 {
302 #ifdef CONFIG_SCHED_TOPOLOGY
303 	seq_printf(m, "physical id     : %d\n", topology_physical_package_id(n));
304 	seq_printf(m, "core id         : %d\n", topology_core_id(n));
305 	seq_printf(m, "book id         : %d\n", topology_book_id(n));
306 	seq_printf(m, "drawer id       : %d\n", topology_drawer_id(n));
307 	seq_printf(m, "dedicated       : %d\n", topology_cpu_dedicated(n));
308 	seq_printf(m, "address         : %d\n", smp_cpu_get_cpu_address(n));
309 	seq_printf(m, "siblings        : %d\n", cpumask_weight(topology_core_cpumask(n)));
310 	seq_printf(m, "cpu cores       : %d\n", topology_booted_cores(n));
311 #endif /* CONFIG_SCHED_TOPOLOGY */
312 }
313 
show_cpu_ids(struct seq_file * m,unsigned long n)314 static void show_cpu_ids(struct seq_file *m, unsigned long n)
315 {
316 	struct cpuid *id = &per_cpu(cpu_info.cpu_id, n);
317 
318 	seq_printf(m, "version         : %02X\n", id->version);
319 	seq_printf(m, "identification  : %06X\n", id->ident);
320 	seq_printf(m, "machine         : %04X\n", id->machine);
321 }
322 
show_cpu_mhz(struct seq_file * m,unsigned long n)323 static void show_cpu_mhz(struct seq_file *m, unsigned long n)
324 {
325 	struct cpu_info *c = per_cpu_ptr(&cpu_info, n);
326 
327 	if (!machine_has_cpu_mhz)
328 		return;
329 	seq_printf(m, "cpu MHz dynamic : %d\n", c->cpu_mhz_dynamic);
330 	seq_printf(m, "cpu MHz static  : %d\n", c->cpu_mhz_static);
331 }
332 
333 /*
334  * show_cpuinfo - Get information on one CPU for use by procfs.
335  */
show_cpuinfo(struct seq_file * m,void * v)336 static int show_cpuinfo(struct seq_file *m, void *v)
337 {
338 	unsigned long n = (unsigned long) v - 1;
339 	unsigned long first = cpumask_first(cpu_online_mask);
340 
341 	if (n == first)
342 		show_cpu_summary(m, v);
343 	seq_printf(m, "\ncpu number      : %ld\n", n);
344 	show_cpu_topology(m, n);
345 	show_cpu_ids(m, n);
346 	show_cpu_mhz(m, n);
347 	return 0;
348 }
349 
c_update(loff_t * pos)350 static inline void *c_update(loff_t *pos)
351 {
352 	if (*pos)
353 		*pos = cpumask_next(*pos - 1, cpu_online_mask);
354 	else
355 		*pos = cpumask_first(cpu_online_mask);
356 	return *pos < nr_cpu_ids ? (void *)*pos + 1 : NULL;
357 }
358 
c_start(struct seq_file * m,loff_t * pos)359 static void *c_start(struct seq_file *m, loff_t *pos)
360 {
361 	cpus_read_lock();
362 	return c_update(pos);
363 }
364 
c_next(struct seq_file * m,void * v,loff_t * pos)365 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
366 {
367 	++*pos;
368 	return c_update(pos);
369 }
370 
c_stop(struct seq_file * m,void * v)371 static void c_stop(struct seq_file *m, void *v)
372 {
373 	cpus_read_unlock();
374 }
375 
376 const struct seq_operations cpuinfo_op = {
377 	.start	= c_start,
378 	.next	= c_next,
379 	.stop	= c_stop,
380 	.show	= show_cpuinfo,
381 };
382