xref: /titanic_52/usr/src/uts/sun4v/os/mach_startup.c (revision 5d54f3d8999eac1762fe0a8c7177d20f1f201fae)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/machsystm.h>
30 #include <sys/archsystm.h>
31 #include <sys/prom_plat.h>
32 #include <sys/promif.h>
33 #include <sys/vm.h>
34 #include <sys/cpu.h>
35 #include <sys/atomic.h>
36 #include <sys/cpupart.h>
37 #include <sys/disp.h>
38 #include <sys/hypervisor_api.h>
39 #ifdef TRAPTRACE
40 #include <sys/traptrace.h>
41 #include <sys/hypervisor_api.h>
42 #endif /* TRAPTRACE */
43 
44 caddr_t	mmu_fault_status_area;
45 
46 extern void sfmmu_set_tsbs(void);
47 /*
48  * CPU IDLE optimization variables/routines
49  */
50 static int enable_halt_idle_cpus = 1;
51 
52 void
53 setup_trap_table(void)
54 {
55 	caddr_t mmfsa_va;
56 	extern	 caddr_t mmu_fault_status_area;
57 	mmfsa_va =
58 	    mmu_fault_status_area + (MMFSA_SIZE * CPU->cpu_id);
59 
60 	intr_init(CPU);			/* init interrupt request free list */
61 	setwstate(WSTATE_KERN);
62 	set_mmfsa_scratchpad(mmfsa_va);
63 	prom_set_mmfsa_traptable(&trap_table, va_to_pa(mmfsa_va));
64 	sfmmu_set_tsbs();
65 }
66 
67 void
68 phys_install_has_changed(void)
69 {
70 
71 }
72 
73 /*
74  * Halt the present CPU until awoken via an interrupt
75  */
76 static void
77 cpu_halt(void)
78 {
79 	cpu_t *cpup = CPU;
80 	processorid_t cpun = cpup->cpu_id;
81 	cpupart_t *cp = cpup->cpu_part;
82 	int hset_update = 1;
83 	uint_t s;
84 
85 	/*
86 	 * If this CPU is online, and there's multiple CPUs
87 	 * in the system, then we should notate our halting
88 	 * by adding ourselves to the partition's halted CPU
89 	 * bitmap. This allows other CPUs to find/awaken us when
90 	 * work becomes available.
91 	 */
92 	if (CPU->cpu_flags & CPU_OFFLINE || ncpus == 1)
93 		hset_update = 0;
94 
95 	/*
96 	 * Add ourselves to the partition's halted CPUs bitmask
97 	 * and set our HALTED flag, if necessary.
98 	 *
99 	 * When a thread becomes runnable, it is placed on the queue
100 	 * and then the halted cpuset is checked to determine who
101 	 * (if anyone) should be awoken. We therefore need to first
102 	 * add ourselves to the halted cpuset, and then check if there
103 	 * is any work available.
104 	 */
105 	if (hset_update) {
106 		cpup->cpu_disp_flags |= CPU_DISP_HALTED;
107 		membar_producer();
108 		CPUSET_ATOMIC_ADD(cp->cp_haltset, cpun);
109 	}
110 
111 	/*
112 	 * Check to make sure there's really nothing to do.
113 	 * Work destined for this CPU may become available after
114 	 * this check. We'll be notified through the clearing of our
115 	 * bit in the halted CPU bitmask, and a poke.
116 	 */
117 	if (disp_anywork()) {
118 		if (hset_update) {
119 			cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
120 			CPUSET_ATOMIC_DEL(cp->cp_haltset, cpun);
121 		}
122 		return;
123 	}
124 
125 	/*
126 	 * We're on our way to being halted.
127 	 *
128 	 * Disable interrupts now, so that we'll awaken immediately
129 	 * after halting if someone tries to poke us between now and
130 	 * the time we actually halt.
131 	 *
132 	 * We check for the presence of our bit after disabling interrupts.
133 	 * If it's cleared, we'll return. If the bit is cleared after
134 	 * we check then the poke will pop us out of the halted state.
135 	 *
136 	 * The ordering of the poke and the clearing of the bit by cpu_wakeup
137 	 * is important.
138 	 * cpu_wakeup() must clear, then poke.
139 	 * cpu_halt() must disable interrupts, then check for the bit.
140 	 */
141 	s = disable_vec_intr();
142 
143 	if (hset_update && !CPU_IN_SET(cp->cp_haltset, cpun)) {
144 		cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
145 		enable_vec_intr(s);
146 		return;
147 	}
148 
149 	/*
150 	 * The check for anything locally runnable is here for performance
151 	 * and isn't needed for correctness. disp_nrunnable ought to be
152 	 * in our cache still, so it's inexpensive to check, and if there
153 	 * is anything runnable we won't have to wait for the poke.
154 	 */
155 	if (cpup->cpu_disp->disp_nrunnable != 0) {
156 		if (hset_update) {
157 			cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
158 			CPUSET_ATOMIC_DEL(cp->cp_haltset, cpun);
159 		}
160 		enable_vec_intr(s);
161 		return;
162 	}
163 
164 	/*
165 	 * Halt the strand
166 	 */
167 	(void) hv_cpu_yield();
168 
169 	/*
170 	 * We're no longer halted
171 	 */
172 	enable_vec_intr(s);
173 	if (hset_update) {
174 		cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
175 		CPUSET_ATOMIC_DEL(cp->cp_haltset, cpun);
176 	}
177 }
178 
179 /*
180  * If "cpu" is halted, then wake it up clearing its halted bit in advance.
181  * Otherwise, see if other CPUs in the cpu partition are halted and need to
182  * be woken up so that they can steal the thread we placed on this CPU.
183  * This function is only used on MP systems.
184  */
185 static void
186 cpu_wakeup(cpu_t *cpu, int bound)
187 {
188 	uint_t		cpu_found;
189 	int		result;
190 	cpupart_t	*cp;
191 
192 	cp = cpu->cpu_part;
193 	if (CPU_IN_SET(cp->cp_haltset, cpu->cpu_id)) {
194 		/*
195 		 * Clear the halted bit for that CPU since it will be
196 		 * poked in a moment.
197 		 */
198 		CPUSET_ATOMIC_DEL(cp->cp_haltset, cpu->cpu_id);
199 		/*
200 		 * We may find the current CPU present in the halted cpuset
201 		 * if we're in the context of an interrupt that occurred
202 		 * before we had a chance to clear our bit in cpu_halt().
203 		 * Poking ourself is obviously unnecessary, since if
204 		 * we're here, we're not halted.
205 		 */
206 		if (cpu != CPU)
207 			poke_cpu(cpu->cpu_id);
208 		return;
209 	} else {
210 		/*
211 		 * This cpu isn't halted, but it's idle or undergoing a
212 		 * context switch. No need to awaken anyone else.
213 		 */
214 		if (cpu->cpu_thread == cpu->cpu_idle_thread ||
215 		    cpu->cpu_disp_flags & CPU_DISP_DONTSTEAL)
216 			return;
217 	}
218 
219 	/*
220 	 * No need to wake up other CPUs if the thread we just enqueued
221 	 * is bound.
222 	 */
223 	if (bound)
224 		return;
225 
226 	/*
227 	 * See if there's any other halted CPUs. If there are, then
228 	 * select one, and awaken it.
229 	 * It's possible that after we find a CPU, somebody else
230 	 * will awaken it before we get the chance.
231 	 * In that case, look again.
232 	 */
233 	do {
234 		CPUSET_FIND(cp->cp_haltset, cpu_found);
235 		if (cpu_found == CPUSET_NOTINSET)
236 			return;
237 
238 		ASSERT(cpu_found >= 0 && cpu_found < NCPU);
239 		CPUSET_ATOMIC_XDEL(cp->cp_haltset, cpu_found, result);
240 	} while (result < 0);
241 
242 	if (cpu_found != CPU->cpu_id)
243 		poke_cpu(cpu_found);
244 }
245 
246 void
247 mach_cpu_halt_idle()
248 {
249 	if (enable_halt_idle_cpus) {
250 		idle_cpu = cpu_halt;
251 		disp_enq_thread = cpu_wakeup;
252 	}
253 }
254 
255 int
256 ndata_alloc_mmfsa(struct memlist *ndata)
257 {
258 	size_t	size;
259 
260 	size = MMFSA_SIZE * max_ncpus;
261 	mmu_fault_status_area = ndata_alloc(ndata, size, ecache_alignsize);
262 	if (mmu_fault_status_area == NULL)
263 		return (-1);
264 	return (0);
265 }
266 
267 void
268 mach_memscrub(void)
269 {
270 	/* no memscrub support for sun4v for now */
271 }
272 
273 void
274 mach_fpras()
275 {
276 	/* no fpras support for sun4v for now */
277 }
278 
279 void
280 mach_hw_copy_limit(void)
281 {
282 	/* HW copy limits set by individual CPU module */
283 }
284 
285 #ifdef TRAPTRACE
286 /*
287  * This function sets up hypervisor traptrace buffer
288  */
289 void
290 htrap_trace_setup(caddr_t buf, int cpuid)
291 {
292 	TRAP_TRACE_CTL	*ctlp;
293 
294 	ctlp = &trap_trace_ctl[cpuid];
295 	ctlp->d.hvaddr_base = buf;
296 	ctlp->d.hlimit = HTRAP_TSIZE;
297 	ctlp->d.hpaddr_base = va_to_pa(buf);
298 }
299 
300 /*
301  * This function configures and enables the hypervisor traptrace buffer
302  */
303 void
304 htrap_trace_register(int cpuid)
305 {
306 	uint64_t ret;
307 	uint64_t prev_buf, prev_bufsize;
308 	uint64_t prev_enable;
309 	uint64_t size;
310 	TRAP_TRACE_CTL	*ctlp;
311 
312 	ret = hv_ttrace_buf_info(&prev_buf, &prev_bufsize);
313 	if ((ret == H_EOK) && (prev_bufsize != 0)) {
314 		cmn_err(CE_CONT,
315 		    "!cpu%d: previous HV traptrace buffer of size 0x%lx "
316 		    "at address 0x%lx", cpuid, prev_bufsize, prev_buf);
317 	}
318 
319 	ctlp = &trap_trace_ctl[cpuid];
320 	ret = hv_ttrace_buf_conf(ctlp->d.hpaddr_base, HTRAP_TSIZE /
321 		(sizeof (struct htrap_trace_record)), &size);
322 	if (ret == H_EOK) {
323 		ret = hv_ttrace_enable((uint64_t)TRAP_TENABLE_ALL,
324 			&prev_enable);
325 		if (ret != H_EOK) {
326 			cmn_err(CE_WARN,
327 			    "!cpu%d: HV traptracing not enabled, "
328 			    "ta: 0x%x returned error: %d",
329 			    cpuid, TTRACE_ENABLE, ret);
330 		}
331 	} else {
332 		cmn_err(CE_WARN,
333 		    "!cpu%d: HV traptrace buffer not configured, "
334 		    "ta: 0x%x returned error: %d",
335 		    cpuid, TTRACE_BUF_CONF, ret);
336 	}
337 	/* set hvaddr_base to NULL when traptrace buffer registration fails */
338 	if (ret != H_EOK) {
339 		ctlp->d.hvaddr_base = NULL;
340 		ctlp->d.hlimit = 0;
341 		ctlp->d.hpaddr_base = NULL;
342 	}
343 }
344 #endif /* TRAPTRACE */
345