xref: /freebsd/sys/powerpc/powerpc/mp_machdep.c (revision b3aaa0cc21c63d388230c7ef2a80abd631ff20d5)
1 /*-
2  * Copyright (c) 2008 Marcel Moolenaar
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/bus.h>
34 #include <sys/pcpu.h>
35 #include <sys/proc.h>
36 #include <sys/sched.h>
37 #include <sys/smp.h>
38 
39 #include <machine/bus.h>
40 #include <machine/cpu.h>
41 #include <machine/intr_machdep.h>
42 #include <machine/smp.h>
43 
44 #include "pic_if.h"
45 
46 extern struct pcpu __pcpu[MAXCPU];
47 
48 volatile static int ap_awake;
49 volatile static u_int ap_state;
50 volatile static uint32_t ap_decr;
51 volatile static uint32_t ap_tbl;
52 
53 void
54 machdep_ap_bootstrap(void)
55 {
56 
57 	pcpup->pc_awake = 1;
58 
59 	while (ap_state == 0)
60 		;
61 
62 	mtspr(SPR_TBL, 0);
63 	mtspr(SPR_TBU, 0);
64 	mtspr(SPR_TBL, ap_tbl);
65 	__asm __volatile("mtdec %0" :: "r"(ap_decr));
66 
67 	ap_awake++;
68 
69 	/* Initialize curthread. */
70 	PCPU_SET(curthread, PCPU_GET(idlethread));
71 	PCPU_SET(curpcb, curthread->td_pcb);
72 
73 	mtmsr(mfmsr() | PSL_EE);
74 	sched_throw(NULL);
75 }
76 
77 struct cpu_group *
78 cpu_topo(void)
79 {
80 
81 	return (smp_topo_none());
82 }
83 
84 void
85 cpu_mp_setmaxid(void)
86 {
87 	struct cpuref cpuref;
88 	int error;
89 
90 	mp_ncpus = 0;
91 	error = powerpc_smp_first_cpu(&cpuref);
92 	while (!error) {
93 		mp_ncpus++;
94 		error = powerpc_smp_next_cpu(&cpuref);
95 	}
96 	/* Sanity. */
97 	if (mp_ncpus == 0)
98 		mp_ncpus = 1;
99 
100 	/*
101 	 * Set the largest cpuid we're going to use. This is necessary
102 	 * for VM initialization.
103 	 */
104 	mp_maxid = min(mp_ncpus, MAXCPU) - 1;
105 }
106 
107 int
108 cpu_mp_probe(void)
109 {
110 
111 	/*
112 	 * We're not going to enable SMP if there's only 1 processor.
113 	 */
114 	return (mp_ncpus > 1);
115 }
116 
117 void
118 cpu_mp_start(void)
119 {
120 	struct cpuref bsp, cpu;
121 	struct pcpu *pc;
122 	int error;
123 
124 	error = powerpc_smp_get_bsp(&bsp);
125 	KASSERT(error == 0, ("Don't know BSP"));
126 	KASSERT(bsp.cr_cpuid == 0, ("%s: cpuid != 0", __func__));
127 
128 	error = powerpc_smp_first_cpu(&cpu);
129 	while (!error) {
130 		if (cpu.cr_cpuid >= MAXCPU) {
131 			printf("SMP: cpu%d: skipped -- ID out of range\n",
132 			    cpu.cr_cpuid);
133 			goto next;
134 		}
135 		if (all_cpus & (1 << cpu.cr_cpuid)) {
136 			printf("SMP: cpu%d: skipped - duplicate ID\n",
137 			    cpu.cr_cpuid);
138 			goto next;
139 		}
140 		if (cpu.cr_cpuid != bsp.cr_cpuid) {
141 			pc = &__pcpu[cpu.cr_cpuid];
142 			pcpu_init(pc, cpu.cr_cpuid, sizeof(*pc));
143 		} else {
144 			pc = pcpup;
145 			pc->pc_cpuid = bsp.cr_cpuid;
146 			pc->pc_bsp = 1;
147 		}
148 		pc->pc_cpumask = 1 << pc->pc_cpuid;
149 		pc->pc_hwref = cpu.cr_hwref;
150 		all_cpus |= pc->pc_cpumask;
151 
152  next:
153 		error = powerpc_smp_next_cpu(&cpu);
154 	}
155 }
156 
157 void
158 cpu_mp_announce(void)
159 {
160 	struct pcpu *pc;
161 	int i;
162 
163 	for (i = 0; i <= mp_maxid; i++) {
164 		pc = pcpu_find(i);
165 		if (pc == NULL)
166 			continue;
167 		printf("cpu%d: dev=%x", i, pc->pc_hwref);
168 		if (pc->pc_bsp)
169 			printf(" (BSP)");
170 		printf("\n");
171 	}
172 }
173 
174 static void
175 cpu_mp_unleash(void *dummy)
176 {
177 	struct pcpu *pc;
178 	int cpus;
179 
180 	if (mp_ncpus <= 1)
181 		return;
182 
183 	cpus = 0;
184 	smp_cpus = 0;
185 	SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
186 		cpus++;
187 		pc->pc_other_cpus = all_cpus & ~pc->pc_cpumask;
188 		if (!pc->pc_bsp) {
189 			printf("Waking up CPU %d (dev=%x)\n", pc->pc_cpuid,
190 			    pc->pc_hwref);
191 			powerpc_smp_start_cpu(pc);
192 		} else {
193 			__asm __volatile("mfspr %0,1023" : "=r"(pc->pc_pir));
194 			pc->pc_awake = 1;
195 		}
196 		if (pc->pc_awake)
197 			smp_cpus++;
198 	}
199 
200 	ap_awake = 1;
201 
202 	__asm __volatile("mftb %0" : "=r"(ap_tbl));
203 	ap_tbl += 10;
204 	__asm __volatile("mfdec %0" : "=r"(ap_decr));
205 	ap_state++;
206 	powerpc_sync();
207 
208 	mtspr(SPR_TBL, 0);
209 	mtspr(SPR_TBU, 0);
210 	mtspr(SPR_TBL, ap_tbl);
211 
212 	while (ap_awake < smp_cpus)
213 		;
214 
215 	if (smp_cpus != cpus || cpus != mp_ncpus) {
216 		printf("SMP: %d CPUs found; %d CPUs usable; %d CPUs woken\n",
217 			mp_ncpus, cpus, smp_cpus);
218 	}
219 
220 	smp_active = 1;
221 	smp_started = 1;
222 }
223 
224 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, cpu_mp_unleash, NULL);
225 
226 static u_int ipi_msg_cnt[32];
227 
228 int
229 powerpc_ipi_handler(void *arg)
230 {
231 	cpumask_t self;
232 	uint32_t ipimask;
233 	int msg;
234 
235 	ipimask = atomic_readandclear_32(&(pcpup->pc_ipimask));
236 	if (ipimask == 0)
237 		return (FILTER_STRAY);
238 	while ((msg = ffs(ipimask) - 1) != -1) {
239 		ipimask &= ~(1u << msg);
240 		ipi_msg_cnt[msg]++;
241 		switch (msg) {
242 		case IPI_AST:
243 			break;
244 		case IPI_PREEMPT:
245 			sched_preempt(curthread);
246 			break;
247 		case IPI_RENDEZVOUS:
248 			smp_rendezvous_action();
249 			break;
250 		case IPI_STOP:
251 			self = PCPU_GET(cpumask);
252 			savectx(PCPU_GET(curpcb));
253 			atomic_set_int(&stopped_cpus, self);
254 			while ((started_cpus & self) == 0)
255 				cpu_spinwait();
256 			atomic_clear_int(&started_cpus, self);
257 			atomic_clear_int(&stopped_cpus, self);
258 			break;
259 		}
260 	}
261 
262 	return (FILTER_HANDLED);
263 }
264 
265 static void
266 ipi_send(struct pcpu *pc, int ipi)
267 {
268 
269 	atomic_set_32(&pc->pc_ipimask, (1 << ipi));
270 	PIC_IPI(pic, pc->pc_cpuid);
271 }
272 
273 /* Send an IPI to a set of cpus. */
274 void
275 ipi_selected(cpumask_t cpus, int ipi)
276 {
277 	struct pcpu *pc;
278 
279 	SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
280 		if (cpus & pc->pc_cpumask)
281 			ipi_send(pc, ipi);
282 	}
283 }
284 
285 /* Send an IPI to all CPUs EXCEPT myself. */
286 void
287 ipi_all_but_self(int ipi)
288 {
289 	struct pcpu *pc;
290 
291 	SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
292 		if (pc != pcpup)
293 			ipi_send(pc, ipi);
294 	}
295 }
296