xref: /linux/arch/mips/loongson64/smp.c (revision ca64d84e93762f4e587e040a44ad9f6089afc777)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2010, 2011, 2012, Lemote, Inc.
4  * Author: Chen Huacai, chenhc@lemote.com
5  */
6 
7 #include <irq.h>
8 #include <linux/init.h>
9 #include <linux/cpu.h>
10 #include <linux/sched.h>
11 #include <linux/sched/hotplug.h>
12 #include <linux/sched/task_stack.h>
13 #include <linux/smp.h>
14 #include <linux/cpufreq.h>
15 #include <linux/kexec.h>
16 #include <asm/processor.h>
17 #include <asm/time.h>
18 #include <asm/clock.h>
19 #include <asm/tlbflush.h>
20 #include <asm/cacheflush.h>
21 #include <loongson.h>
22 #include <loongson_regs.h>
23 #include <workarounds.h>
24 
25 #include "smp.h"
26 
27 DEFINE_PER_CPU(int, cpu_state);
28 
29 #define LS_IPI_IRQ (MIPS_CPU_IRQ_BASE + 6)
30 
31 static void *ipi_set0_regs[16];
32 static void *ipi_clear0_regs[16];
33 static void *ipi_status0_regs[16];
34 static void *ipi_en0_regs[16];
35 static void *ipi_mailbox_buf[16];
36 static uint32_t core0_c0count[NR_CPUS];
37 
38 /* read a 32bit value from ipi register */
39 #define loongson3_ipi_read32(addr) readl(addr)
40 /* read a 64bit value from ipi register */
41 #define loongson3_ipi_read64(addr) readq(addr)
42 /* write a 32bit value to ipi register */
43 #define loongson3_ipi_write32(action, addr)	\
44 	do {					\
45 		writel(action, addr);		\
46 		__wbflush();			\
47 	} while (0)
48 /* write a 64bit value to ipi register */
49 #define loongson3_ipi_write64(action, addr)	\
50 	do {					\
51 		writeq(action, addr);		\
52 		__wbflush();			\
53 	} while (0)
54 
55 u32 (*ipi_read_clear)(int cpu);
56 void (*ipi_write_action)(int cpu, u32 action);
57 
58 static u32 csr_ipi_read_clear(int cpu)
59 {
60 	u32 action;
61 
62 	/* Load the ipi register to figure out what we're supposed to do */
63 	action = csr_readl(LOONGSON_CSR_IPI_STATUS);
64 	/* Clear the ipi register to clear the interrupt */
65 	csr_writel(action, LOONGSON_CSR_IPI_CLEAR);
66 
67 	return action;
68 }
69 
70 static void csr_ipi_write_action(int cpu, u32 action)
71 {
72 	unsigned int irq = 0;
73 
74 	while ((irq = ffs(action))) {
75 		uint32_t val = CSR_IPI_SEND_BLOCK;
76 		val |= (irq - 1);
77 		val |= (cpu << CSR_IPI_SEND_CPU_SHIFT);
78 		csr_writel(val, LOONGSON_CSR_IPI_SEND);
79 		action &= ~BIT(irq - 1);
80 	}
81 }
82 
83 static u32 legacy_ipi_read_clear(int cpu)
84 {
85 	u32 action;
86 
87 	/* Load the ipi register to figure out what we're supposed to do */
88 	action = loongson3_ipi_read32(ipi_status0_regs[cpu_logical_map(cpu)]);
89 	/* Clear the ipi register to clear the interrupt */
90 	loongson3_ipi_write32(action, ipi_clear0_regs[cpu_logical_map(cpu)]);
91 
92 	return action;
93 }
94 
95 static void legacy_ipi_write_action(int cpu, u32 action)
96 {
97 	loongson3_ipi_write32((u32)action, ipi_set0_regs[cpu]);
98 }
99 
100 static void csr_ipi_probe(void)
101 {
102 	if (cpu_has_csr() && csr_readl(LOONGSON_CSR_FEATURES) & LOONGSON_CSRF_IPI) {
103 		ipi_read_clear = csr_ipi_read_clear;
104 		ipi_write_action = csr_ipi_write_action;
105 	} else {
106 		ipi_read_clear = legacy_ipi_read_clear;
107 		ipi_write_action = legacy_ipi_write_action;
108 	}
109 }
110 
111 static void ipi_set0_regs_init(void)
112 {
113 	ipi_set0_regs[0] = (void *)
114 		(SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + SET0);
115 	ipi_set0_regs[1] = (void *)
116 		(SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + SET0);
117 	ipi_set0_regs[2] = (void *)
118 		(SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + SET0);
119 	ipi_set0_regs[3] = (void *)
120 		(SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + SET0);
121 	ipi_set0_regs[4] = (void *)
122 		(SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + SET0);
123 	ipi_set0_regs[5] = (void *)
124 		(SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + SET0);
125 	ipi_set0_regs[6] = (void *)
126 		(SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + SET0);
127 	ipi_set0_regs[7] = (void *)
128 		(SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + SET0);
129 	ipi_set0_regs[8] = (void *)
130 		(SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + SET0);
131 	ipi_set0_regs[9] = (void *)
132 		(SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + SET0);
133 	ipi_set0_regs[10] = (void *)
134 		(SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + SET0);
135 	ipi_set0_regs[11] = (void *)
136 		(SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + SET0);
137 	ipi_set0_regs[12] = (void *)
138 		(SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + SET0);
139 	ipi_set0_regs[13] = (void *)
140 		(SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + SET0);
141 	ipi_set0_regs[14] = (void *)
142 		(SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + SET0);
143 	ipi_set0_regs[15] = (void *)
144 		(SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + SET0);
145 }
146 
147 static void ipi_clear0_regs_init(void)
148 {
149 	ipi_clear0_regs[0] = (void *)
150 		(SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + CLEAR0);
151 	ipi_clear0_regs[1] = (void *)
152 		(SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + CLEAR0);
153 	ipi_clear0_regs[2] = (void *)
154 		(SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + CLEAR0);
155 	ipi_clear0_regs[3] = (void *)
156 		(SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + CLEAR0);
157 	ipi_clear0_regs[4] = (void *)
158 		(SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + CLEAR0);
159 	ipi_clear0_regs[5] = (void *)
160 		(SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + CLEAR0);
161 	ipi_clear0_regs[6] = (void *)
162 		(SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + CLEAR0);
163 	ipi_clear0_regs[7] = (void *)
164 		(SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + CLEAR0);
165 	ipi_clear0_regs[8] = (void *)
166 		(SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + CLEAR0);
167 	ipi_clear0_regs[9] = (void *)
168 		(SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + CLEAR0);
169 	ipi_clear0_regs[10] = (void *)
170 		(SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + CLEAR0);
171 	ipi_clear0_regs[11] = (void *)
172 		(SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + CLEAR0);
173 	ipi_clear0_regs[12] = (void *)
174 		(SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + CLEAR0);
175 	ipi_clear0_regs[13] = (void *)
176 		(SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + CLEAR0);
177 	ipi_clear0_regs[14] = (void *)
178 		(SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + CLEAR0);
179 	ipi_clear0_regs[15] = (void *)
180 		(SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + CLEAR0);
181 }
182 
183 static void ipi_status0_regs_init(void)
184 {
185 	ipi_status0_regs[0] = (void *)
186 		(SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + STATUS0);
187 	ipi_status0_regs[1] = (void *)
188 		(SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + STATUS0);
189 	ipi_status0_regs[2] = (void *)
190 		(SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + STATUS0);
191 	ipi_status0_regs[3] = (void *)
192 		(SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + STATUS0);
193 	ipi_status0_regs[4] = (void *)
194 		(SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + STATUS0);
195 	ipi_status0_regs[5] = (void *)
196 		(SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + STATUS0);
197 	ipi_status0_regs[6] = (void *)
198 		(SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + STATUS0);
199 	ipi_status0_regs[7] = (void *)
200 		(SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + STATUS0);
201 	ipi_status0_regs[8] = (void *)
202 		(SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + STATUS0);
203 	ipi_status0_regs[9] = (void *)
204 		(SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + STATUS0);
205 	ipi_status0_regs[10] = (void *)
206 		(SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + STATUS0);
207 	ipi_status0_regs[11] = (void *)
208 		(SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + STATUS0);
209 	ipi_status0_regs[12] = (void *)
210 		(SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + STATUS0);
211 	ipi_status0_regs[13] = (void *)
212 		(SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + STATUS0);
213 	ipi_status0_regs[14] = (void *)
214 		(SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + STATUS0);
215 	ipi_status0_regs[15] = (void *)
216 		(SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + STATUS0);
217 }
218 
219 static void ipi_en0_regs_init(void)
220 {
221 	ipi_en0_regs[0] = (void *)
222 		(SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + EN0);
223 	ipi_en0_regs[1] = (void *)
224 		(SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + EN0);
225 	ipi_en0_regs[2] = (void *)
226 		(SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + EN0);
227 	ipi_en0_regs[3] = (void *)
228 		(SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + EN0);
229 	ipi_en0_regs[4] = (void *)
230 		(SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + EN0);
231 	ipi_en0_regs[5] = (void *)
232 		(SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + EN0);
233 	ipi_en0_regs[6] = (void *)
234 		(SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + EN0);
235 	ipi_en0_regs[7] = (void *)
236 		(SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + EN0);
237 	ipi_en0_regs[8] = (void *)
238 		(SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + EN0);
239 	ipi_en0_regs[9] = (void *)
240 		(SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + EN0);
241 	ipi_en0_regs[10] = (void *)
242 		(SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + EN0);
243 	ipi_en0_regs[11] = (void *)
244 		(SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + EN0);
245 	ipi_en0_regs[12] = (void *)
246 		(SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + EN0);
247 	ipi_en0_regs[13] = (void *)
248 		(SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + EN0);
249 	ipi_en0_regs[14] = (void *)
250 		(SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + EN0);
251 	ipi_en0_regs[15] = (void *)
252 		(SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + EN0);
253 }
254 
255 static void ipi_mailbox_buf_init(void)
256 {
257 	ipi_mailbox_buf[0] = (void *)
258 		(SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + BUF);
259 	ipi_mailbox_buf[1] = (void *)
260 		(SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + BUF);
261 	ipi_mailbox_buf[2] = (void *)
262 		(SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + BUF);
263 	ipi_mailbox_buf[3] = (void *)
264 		(SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + BUF);
265 	ipi_mailbox_buf[4] = (void *)
266 		(SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + BUF);
267 	ipi_mailbox_buf[5] = (void *)
268 		(SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + BUF);
269 	ipi_mailbox_buf[6] = (void *)
270 		(SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + BUF);
271 	ipi_mailbox_buf[7] = (void *)
272 		(SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + BUF);
273 	ipi_mailbox_buf[8] = (void *)
274 		(SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + BUF);
275 	ipi_mailbox_buf[9] = (void *)
276 		(SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + BUF);
277 	ipi_mailbox_buf[10] = (void *)
278 		(SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + BUF);
279 	ipi_mailbox_buf[11] = (void *)
280 		(SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + BUF);
281 	ipi_mailbox_buf[12] = (void *)
282 		(SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + BUF);
283 	ipi_mailbox_buf[13] = (void *)
284 		(SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + BUF);
285 	ipi_mailbox_buf[14] = (void *)
286 		(SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + BUF);
287 	ipi_mailbox_buf[15] = (void *)
288 		(SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + BUF);
289 }
290 
291 /*
292  * Simple enough, just poke the appropriate ipi register
293  */
294 static void loongson3_send_ipi_single(int cpu, unsigned int action)
295 {
296 	ipi_write_action(cpu_logical_map(cpu), (u32)action);
297 }
298 
299 static void
300 loongson3_send_ipi_mask(const struct cpumask *mask, unsigned int action)
301 {
302 	unsigned int i;
303 
304 	for_each_cpu(i, mask)
305 		ipi_write_action(cpu_logical_map(i), (u32)action);
306 }
307 
308 
309 static irqreturn_t loongson3_ipi_interrupt(int irq, void *dev_id)
310 {
311 	int i, cpu = smp_processor_id();
312 	unsigned int action, c0count;
313 
314 	action = ipi_read_clear(cpu);
315 
316 	if (action & SMP_RESCHEDULE_YOURSELF)
317 		scheduler_ipi();
318 
319 	if (action & SMP_CALL_FUNCTION) {
320 		irq_enter();
321 		generic_smp_call_function_interrupt();
322 		irq_exit();
323 	}
324 
325 	if (action & SMP_ASK_C0COUNT) {
326 		BUG_ON(cpu != 0);
327 		c0count = read_c0_count();
328 		c0count = c0count ? c0count : 1;
329 		for (i = 1; i < nr_cpu_ids; i++)
330 			core0_c0count[i] = c0count;
331 		__wbflush(); /* Let others see the result ASAP */
332 	}
333 
334 	return IRQ_HANDLED;
335 }
336 
337 #define MAX_LOOPS 800
338 /*
339  * SMP init and finish on secondary CPUs
340  */
341 static void loongson3_init_secondary(void)
342 {
343 	int i;
344 	uint32_t initcount;
345 	unsigned int cpu = smp_processor_id();
346 	unsigned int imask = STATUSF_IP7 | STATUSF_IP6 |
347 			     STATUSF_IP3 | STATUSF_IP2;
348 
349 	/* Set interrupt mask, but don't enable */
350 	change_c0_status(ST0_IM, imask);
351 
352 	for (i = 0; i < num_possible_cpus(); i++)
353 		loongson3_ipi_write32(0xffffffff, ipi_en0_regs[cpu_logical_map(i)]);
354 
355 	per_cpu(cpu_state, cpu) = CPU_ONLINE;
356 	cpu_set_core(&cpu_data[cpu],
357 		     cpu_logical_map(cpu) % loongson_sysconf.cores_per_package);
358 	cpu_data[cpu].package =
359 		cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
360 
361 	i = 0;
362 	core0_c0count[cpu] = 0;
363 	loongson3_send_ipi_single(0, SMP_ASK_C0COUNT);
364 	while (!core0_c0count[cpu]) {
365 		i++;
366 		cpu_relax();
367 	}
368 
369 	if (i > MAX_LOOPS)
370 		i = MAX_LOOPS;
371 	if (cpu_data[cpu].package)
372 		initcount = core0_c0count[cpu] + i;
373 	else /* Local access is faster for loops */
374 		initcount = core0_c0count[cpu] + i/2;
375 
376 	write_c0_count(initcount);
377 }
378 
379 static void loongson3_smp_finish(void)
380 {
381 	int cpu = smp_processor_id();
382 
383 	write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
384 	local_irq_enable();
385 	loongson3_ipi_write64(0,
386 			ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x0);
387 	pr_info("CPU#%d finished, CP0_ST=%x\n",
388 			smp_processor_id(), read_c0_status());
389 }
390 
391 static void __init loongson3_smp_setup(void)
392 {
393 	int i = 0, num = 0; /* i: physical id, num: logical id */
394 
395 	init_cpu_possible(cpu_none_mask);
396 
397 	/* For unified kernel, NR_CPUS is the maximum possible value,
398 	 * loongson_sysconf.nr_cpus is the really present value */
399 	while (i < loongson_sysconf.nr_cpus) {
400 		if (loongson_sysconf.reserved_cpus_mask & (1<<i)) {
401 			/* Reserved physical CPU cores */
402 			__cpu_number_map[i] = -1;
403 		} else {
404 			__cpu_number_map[i] = num;
405 			__cpu_logical_map[num] = i;
406 			set_cpu_possible(num, true);
407 			num++;
408 		}
409 		i++;
410 	}
411 	pr_info("Detected %i available CPU(s)\n", num);
412 
413 	while (num < loongson_sysconf.nr_cpus) {
414 		__cpu_logical_map[num] = -1;
415 		num++;
416 	}
417 
418 	csr_ipi_probe();
419 	ipi_set0_regs_init();
420 	ipi_clear0_regs_init();
421 	ipi_status0_regs_init();
422 	ipi_en0_regs_init();
423 	ipi_mailbox_buf_init();
424 	cpu_set_core(&cpu_data[0],
425 		     cpu_logical_map(0) % loongson_sysconf.cores_per_package);
426 	cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package;
427 }
428 
429 static void __init loongson3_prepare_cpus(unsigned int max_cpus)
430 {
431 	if (request_irq(LS_IPI_IRQ, loongson3_ipi_interrupt,
432 			IRQF_PERCPU | IRQF_NO_SUSPEND, "SMP_IPI", NULL))
433 		pr_err("Failed to request IPI IRQ\n");
434 	init_cpu_present(cpu_possible_mask);
435 	per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
436 }
437 
438 /*
439  * Setup the PC, SP, and GP of a secondary processor and start it runing!
440  */
441 static int loongson3_boot_secondary(int cpu, struct task_struct *idle)
442 {
443 	unsigned long startargs[4];
444 
445 	pr_info("Booting CPU#%d...\n", cpu);
446 
447 	/* startargs[] are initial PC, SP and GP for secondary CPU */
448 	startargs[0] = (unsigned long)&smp_bootstrap;
449 	startargs[1] = (unsigned long)__KSTK_TOS(idle);
450 	startargs[2] = (unsigned long)task_thread_info(idle);
451 	startargs[3] = 0;
452 
453 	pr_debug("CPU#%d, func_pc=%lx, sp=%lx, gp=%lx\n",
454 			cpu, startargs[0], startargs[1], startargs[2]);
455 
456 	loongson3_ipi_write64(startargs[3],
457 			ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x18);
458 	loongson3_ipi_write64(startargs[2],
459 			ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x10);
460 	loongson3_ipi_write64(startargs[1],
461 			ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x8);
462 	loongson3_ipi_write64(startargs[0],
463 			ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x0);
464 	return 0;
465 }
466 
467 #ifdef CONFIG_HOTPLUG_CPU
468 
469 static int loongson3_cpu_disable(void)
470 {
471 	unsigned long flags;
472 	unsigned int cpu = smp_processor_id();
473 
474 	if (cpu == 0)
475 		return -EBUSY;
476 
477 	set_cpu_online(cpu, false);
478 	calculate_cpu_foreign_map();
479 	local_irq_save(flags);
480 	irq_cpu_offline();
481 	clear_c0_status(ST0_IM);
482 	local_irq_restore(flags);
483 	local_flush_tlb_all();
484 
485 	return 0;
486 }
487 
488 
489 static void loongson3_cpu_die(unsigned int cpu)
490 {
491 	while (per_cpu(cpu_state, cpu) != CPU_DEAD)
492 		cpu_relax();
493 
494 	mb();
495 }
496 
497 /* To shutdown a core in Loongson 3, the target core should go to CKSEG1 and
498  * flush all L1 entries at first. Then, another core (usually Core 0) can
499  * safely disable the clock of the target core. loongson3_play_dead() is
500  * called via CKSEG1 (uncached and unmmaped) */
501 static void loongson3_type1_play_dead(int *state_addr)
502 {
503 	register int val;
504 	register long cpuid, core, node, count;
505 	register void *addr, *base, *initfunc;
506 
507 	__asm__ __volatile__(
508 		"   .set push                     \n"
509 		"   .set noreorder                \n"
510 		"   li %[addr], 0x80000000        \n" /* KSEG0 */
511 		"1: cache 0, 0(%[addr])           \n" /* flush L1 ICache */
512 		"   cache 0, 1(%[addr])           \n"
513 		"   cache 0, 2(%[addr])           \n"
514 		"   cache 0, 3(%[addr])           \n"
515 		"   cache 1, 0(%[addr])           \n" /* flush L1 DCache */
516 		"   cache 1, 1(%[addr])           \n"
517 		"   cache 1, 2(%[addr])           \n"
518 		"   cache 1, 3(%[addr])           \n"
519 		"   addiu %[sets], %[sets], -1    \n"
520 		"   bnez  %[sets], 1b             \n"
521 		"   addiu %[addr], %[addr], 0x20  \n"
522 		"   li    %[val], 0x7             \n" /* *state_addr = CPU_DEAD; */
523 		"   sw    %[val], (%[state_addr]) \n"
524 		"   sync                          \n"
525 		"   cache 21, (%[state_addr])     \n" /* flush entry of *state_addr */
526 		"   .set pop                      \n"
527 		: [addr] "=&r" (addr), [val] "=&r" (val)
528 		: [state_addr] "r" (state_addr),
529 		  [sets] "r" (cpu_data[smp_processor_id()].dcache.sets));
530 
531 	__asm__ __volatile__(
532 		"   .set push                         \n"
533 		"   .set noreorder                    \n"
534 		"   .set mips64                       \n"
535 		"   mfc0  %[cpuid], $15, 1            \n"
536 		"   andi  %[cpuid], 0x3ff             \n"
537 		"   dli   %[base], 0x900000003ff01000 \n"
538 		"   andi  %[core], %[cpuid], 0x3      \n"
539 		"   sll   %[core], 8                  \n" /* get core id */
540 		"   or    %[base], %[base], %[core]   \n"
541 		"   andi  %[node], %[cpuid], 0xc      \n"
542 		"   dsll  %[node], 42                 \n" /* get node id */
543 		"   or    %[base], %[base], %[node]   \n"
544 		"1: li    %[count], 0x100             \n" /* wait for init loop */
545 		"2: bnez  %[count], 2b                \n" /* limit mailbox access */
546 		"   addiu %[count], -1                \n"
547 		"   ld    %[initfunc], 0x20(%[base])  \n" /* get PC via mailbox */
548 		"   beqz  %[initfunc], 1b             \n"
549 		"   nop                               \n"
550 		"   ld    $sp, 0x28(%[base])          \n" /* get SP via mailbox */
551 		"   ld    $gp, 0x30(%[base])          \n" /* get GP via mailbox */
552 		"   ld    $a1, 0x38(%[base])          \n"
553 		"   jr    %[initfunc]                 \n" /* jump to initial PC */
554 		"   nop                               \n"
555 		"   .set pop                          \n"
556 		: [core] "=&r" (core), [node] "=&r" (node),
557 		  [base] "=&r" (base), [cpuid] "=&r" (cpuid),
558 		  [count] "=&r" (count), [initfunc] "=&r" (initfunc)
559 		: /* No Input */
560 		: "a1");
561 }
562 
563 static void loongson3_type2_play_dead(int *state_addr)
564 {
565 	register int val;
566 	register long cpuid, core, node, count;
567 	register void *addr, *base, *initfunc;
568 
569 	__asm__ __volatile__(
570 		"   .set push                     \n"
571 		"   .set noreorder                \n"
572 		"   li %[addr], 0x80000000        \n" /* KSEG0 */
573 		"1: cache 0, 0(%[addr])           \n" /* flush L1 ICache */
574 		"   cache 0, 1(%[addr])           \n"
575 		"   cache 0, 2(%[addr])           \n"
576 		"   cache 0, 3(%[addr])           \n"
577 		"   cache 1, 0(%[addr])           \n" /* flush L1 DCache */
578 		"   cache 1, 1(%[addr])           \n"
579 		"   cache 1, 2(%[addr])           \n"
580 		"   cache 1, 3(%[addr])           \n"
581 		"   addiu %[sets], %[sets], -1    \n"
582 		"   bnez  %[sets], 1b             \n"
583 		"   addiu %[addr], %[addr], 0x20  \n"
584 		"   li    %[val], 0x7             \n" /* *state_addr = CPU_DEAD; */
585 		"   sw    %[val], (%[state_addr]) \n"
586 		"   sync                          \n"
587 		"   cache 21, (%[state_addr])     \n" /* flush entry of *state_addr */
588 		"   .set pop                      \n"
589 		: [addr] "=&r" (addr), [val] "=&r" (val)
590 		: [state_addr] "r" (state_addr),
591 		  [sets] "r" (cpu_data[smp_processor_id()].dcache.sets));
592 
593 	__asm__ __volatile__(
594 		"   .set push                         \n"
595 		"   .set noreorder                    \n"
596 		"   .set mips64                       \n"
597 		"   mfc0  %[cpuid], $15, 1            \n"
598 		"   andi  %[cpuid], 0x3ff             \n"
599 		"   dli   %[base], 0x900000003ff01000 \n"
600 		"   andi  %[core], %[cpuid], 0x3      \n"
601 		"   sll   %[core], 8                  \n" /* get core id */
602 		"   or    %[base], %[base], %[core]   \n"
603 		"   andi  %[node], %[cpuid], 0xc      \n"
604 		"   dsll  %[node], 42                 \n" /* get node id */
605 		"   or    %[base], %[base], %[node]   \n"
606 		"   dsrl  %[node], 30                 \n" /* 15:14 */
607 		"   or    %[base], %[base], %[node]   \n"
608 		"1: li    %[count], 0x100             \n" /* wait for init loop */
609 		"2: bnez  %[count], 2b                \n" /* limit mailbox access */
610 		"   addiu %[count], -1                \n"
611 		"   ld    %[initfunc], 0x20(%[base])  \n" /* get PC via mailbox */
612 		"   beqz  %[initfunc], 1b             \n"
613 		"   nop                               \n"
614 		"   ld    $sp, 0x28(%[base])          \n" /* get SP via mailbox */
615 		"   ld    $gp, 0x30(%[base])          \n" /* get GP via mailbox */
616 		"   ld    $a1, 0x38(%[base])          \n"
617 		"   jr    %[initfunc]                 \n" /* jump to initial PC */
618 		"   nop                               \n"
619 		"   .set pop                          \n"
620 		: [core] "=&r" (core), [node] "=&r" (node),
621 		  [base] "=&r" (base), [cpuid] "=&r" (cpuid),
622 		  [count] "=&r" (count), [initfunc] "=&r" (initfunc)
623 		: /* No Input */
624 		: "a1");
625 }
626 
627 static void loongson3_type3_play_dead(int *state_addr)
628 {
629 	register int val;
630 	register long cpuid, core, node, count;
631 	register void *addr, *base, *initfunc;
632 
633 	__asm__ __volatile__(
634 		"   .set push                     \n"
635 		"   .set noreorder                \n"
636 		"   li %[addr], 0x80000000        \n" /* KSEG0 */
637 		"1: cache 0, 0(%[addr])           \n" /* flush L1 ICache */
638 		"   cache 0, 1(%[addr])           \n"
639 		"   cache 0, 2(%[addr])           \n"
640 		"   cache 0, 3(%[addr])           \n"
641 		"   cache 1, 0(%[addr])           \n" /* flush L1 DCache */
642 		"   cache 1, 1(%[addr])           \n"
643 		"   cache 1, 2(%[addr])           \n"
644 		"   cache 1, 3(%[addr])           \n"
645 		"   addiu %[sets], %[sets], -1    \n"
646 		"   bnez  %[sets], 1b             \n"
647 		"   addiu %[addr], %[addr], 0x40  \n"
648 		"   li %[addr], 0x80000000        \n" /* KSEG0 */
649 		"2: cache 2, 0(%[addr])           \n" /* flush L1 VCache */
650 		"   cache 2, 1(%[addr])           \n"
651 		"   cache 2, 2(%[addr])           \n"
652 		"   cache 2, 3(%[addr])           \n"
653 		"   cache 2, 4(%[addr])           \n"
654 		"   cache 2, 5(%[addr])           \n"
655 		"   cache 2, 6(%[addr])           \n"
656 		"   cache 2, 7(%[addr])           \n"
657 		"   cache 2, 8(%[addr])           \n"
658 		"   cache 2, 9(%[addr])           \n"
659 		"   cache 2, 10(%[addr])          \n"
660 		"   cache 2, 11(%[addr])          \n"
661 		"   cache 2, 12(%[addr])          \n"
662 		"   cache 2, 13(%[addr])          \n"
663 		"   cache 2, 14(%[addr])          \n"
664 		"   cache 2, 15(%[addr])          \n"
665 		"   addiu %[vsets], %[vsets], -1  \n"
666 		"   bnez  %[vsets], 2b            \n"
667 		"   addiu %[addr], %[addr], 0x40  \n"
668 		"   li    %[val], 0x7             \n" /* *state_addr = CPU_DEAD; */
669 		"   sw    %[val], (%[state_addr]) \n"
670 		"   sync                          \n"
671 		"   cache 21, (%[state_addr])     \n" /* flush entry of *state_addr */
672 		"   .set pop                      \n"
673 		: [addr] "=&r" (addr), [val] "=&r" (val)
674 		: [state_addr] "r" (state_addr),
675 		  [sets] "r" (cpu_data[smp_processor_id()].dcache.sets),
676 		  [vsets] "r" (cpu_data[smp_processor_id()].vcache.sets));
677 
678 	__asm__ __volatile__(
679 		"   .set push                         \n"
680 		"   .set noreorder                    \n"
681 		"   .set mips64                       \n"
682 		"   mfc0  %[cpuid], $15, 1            \n"
683 		"   andi  %[cpuid], 0x3ff             \n"
684 		"   dli   %[base], 0x900000003ff01000 \n"
685 		"   andi  %[core], %[cpuid], 0x3      \n"
686 		"   sll   %[core], 8                  \n" /* get core id */
687 		"   or    %[base], %[base], %[core]   \n"
688 		"   andi  %[node], %[cpuid], 0xc      \n"
689 		"   dsll  %[node], 42                 \n" /* get node id */
690 		"   or    %[base], %[base], %[node]   \n"
691 		"1: li    %[count], 0x100             \n" /* wait for init loop */
692 		"2: bnez  %[count], 2b                \n" /* limit mailbox access */
693 		"   addiu %[count], -1                \n"
694 		"   ld    %[initfunc], 0x20(%[base])  \n" /* get PC via mailbox */
695 		"   beqz  %[initfunc], 1b             \n"
696 		"   nop                               \n"
697 		"   ld    $sp, 0x28(%[base])          \n" /* get SP via mailbox */
698 		"   ld    $gp, 0x30(%[base])          \n" /* get GP via mailbox */
699 		"   ld    $a1, 0x38(%[base])          \n"
700 		"   jr    %[initfunc]                 \n" /* jump to initial PC */
701 		"   nop                               \n"
702 		"   .set pop                          \n"
703 		: [core] "=&r" (core), [node] "=&r" (node),
704 		  [base] "=&r" (base), [cpuid] "=&r" (cpuid),
705 		  [count] "=&r" (count), [initfunc] "=&r" (initfunc)
706 		: /* No Input */
707 		: "a1");
708 }
709 
710 void play_dead(void)
711 {
712 	int prid_imp, prid_rev, *state_addr;
713 	unsigned int cpu = smp_processor_id();
714 	void (*play_dead_at_ckseg1)(int *);
715 
716 	idle_task_exit();
717 
718 	prid_imp = read_c0_prid() & PRID_IMP_MASK;
719 	prid_rev = read_c0_prid() & PRID_REV_MASK;
720 
721 	if (prid_imp == PRID_IMP_LOONGSON_64G) {
722 		play_dead_at_ckseg1 =
723 			(void *)CKSEG1ADDR((unsigned long)loongson3_type3_play_dead);
724 		goto out;
725 	}
726 
727 	switch (prid_rev) {
728 	case PRID_REV_LOONGSON3A_R1:
729 	default:
730 		play_dead_at_ckseg1 =
731 			(void *)CKSEG1ADDR((unsigned long)loongson3_type1_play_dead);
732 		break;
733 	case PRID_REV_LOONGSON3B_R1:
734 	case PRID_REV_LOONGSON3B_R2:
735 		play_dead_at_ckseg1 =
736 			(void *)CKSEG1ADDR((unsigned long)loongson3_type2_play_dead);
737 		break;
738 	case PRID_REV_LOONGSON3A_R2_0:
739 	case PRID_REV_LOONGSON3A_R2_1:
740 	case PRID_REV_LOONGSON3A_R3_0:
741 	case PRID_REV_LOONGSON3A_R3_1:
742 		play_dead_at_ckseg1 =
743 			(void *)CKSEG1ADDR((unsigned long)loongson3_type3_play_dead);
744 		break;
745 	}
746 
747 out:
748 	state_addr = &per_cpu(cpu_state, cpu);
749 	mb();
750 	play_dead_at_ckseg1(state_addr);
751 }
752 
753 static int loongson3_disable_clock(unsigned int cpu)
754 {
755 	uint64_t core_id = cpu_core(&cpu_data[cpu]);
756 	uint64_t package_id = cpu_data[cpu].package;
757 
758 	if ((read_c0_prid() & PRID_REV_MASK) == PRID_REV_LOONGSON3A_R1) {
759 		LOONGSON_CHIPCFG(package_id) &= ~(1 << (12 + core_id));
760 	} else {
761 		if (!(loongson_sysconf.workarounds & WORKAROUND_CPUHOTPLUG))
762 			LOONGSON_FREQCTRL(package_id) &= ~(1 << (core_id * 4 + 3));
763 	}
764 	return 0;
765 }
766 
767 static int loongson3_enable_clock(unsigned int cpu)
768 {
769 	uint64_t core_id = cpu_core(&cpu_data[cpu]);
770 	uint64_t package_id = cpu_data[cpu].package;
771 
772 	if ((read_c0_prid() & PRID_REV_MASK) == PRID_REV_LOONGSON3A_R1) {
773 		LOONGSON_CHIPCFG(package_id) |= 1 << (12 + core_id);
774 	} else {
775 		if (!(loongson_sysconf.workarounds & WORKAROUND_CPUHOTPLUG))
776 			LOONGSON_FREQCTRL(package_id) |= 1 << (core_id * 4 + 3);
777 	}
778 	return 0;
779 }
780 
781 static int register_loongson3_notifier(void)
782 {
783 	return cpuhp_setup_state_nocalls(CPUHP_MIPS_SOC_PREPARE,
784 					 "mips/loongson:prepare",
785 					 loongson3_enable_clock,
786 					 loongson3_disable_clock);
787 }
788 early_initcall(register_loongson3_notifier);
789 
790 #endif
791 
792 const struct plat_smp_ops loongson3_smp_ops = {
793 	.send_ipi_single = loongson3_send_ipi_single,
794 	.send_ipi_mask = loongson3_send_ipi_mask,
795 	.init_secondary = loongson3_init_secondary,
796 	.smp_finish = loongson3_smp_finish,
797 	.boot_secondary = loongson3_boot_secondary,
798 	.smp_setup = loongson3_smp_setup,
799 	.prepare_cpus = loongson3_prepare_cpus,
800 #ifdef CONFIG_HOTPLUG_CPU
801 	.cpu_disable = loongson3_cpu_disable,
802 	.cpu_die = loongson3_cpu_die,
803 #endif
804 #ifdef CONFIG_KEXEC
805 	.kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
806 #endif
807 };
808