xref: /titanic_52/usr/src/uts/sun4v/os/mach_startup.c (revision 1cea05af420c1992d793dc442f4e30c7269fc107)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/machsystm.h>
30 #include <sys/archsystm.h>
31 #include <sys/prom_plat.h>
32 #include <sys/promif.h>
33 #include <sys/vm.h>
34 #include <sys/cpu.h>
35 #include <sys/atomic.h>
36 #include <sys/cpupart.h>
37 #include <sys/disp.h>
38 #include <sys/hypervisor_api.h>
39 #include <sys/traptrace.h>
40 #include <sys/modctl.h>
41 #include <sys/ldoms.h>
42 
43 #ifdef TRAPTRACE
44 int mach_htraptrace_enable = 1;
45 #else
46 int mach_htraptrace_enable = 0;
47 #endif
48 int htrap_tr0_inuse = 0;
49 extern char htrap_tr0[];	/* prealloc buf for boot cpu */
50 
51 caddr_t	mmu_fault_status_area;
52 
53 extern void sfmmu_set_tsbs(void);
54 /*
55  * CPU IDLE optimization variables/routines
56  */
57 static int enable_halt_idle_cpus = 1;
58 
59 void
60 setup_trap_table(void)
61 {
62 	caddr_t mmfsa_va;
63 	extern	 caddr_t mmu_fault_status_area;
64 	mmfsa_va =
65 	    mmu_fault_status_area + (MMFSA_SIZE * CPU->cpu_id);
66 
67 	intr_init(CPU);		/* init interrupt request free list */
68 	setwstate(WSTATE_KERN);
69 	set_mmfsa_scratchpad(mmfsa_va);
70 	prom_set_mmfsa_traptable(&trap_table, va_to_pa(mmfsa_va));
71 	sfmmu_set_tsbs();
72 }
73 
74 void
75 phys_install_has_changed(void)
76 {
77 
78 }
79 
80 #ifdef N2_IDLE_WORKAROUND
81 /*
82  * Tuneable to control enabling of IDLE loop workaround on Niagara2 1.x parts.
83  * This workaround will be removed before the RR.
84  */
85 int	n2_idle_workaround;
86 #endif
87 
88 /*
89  * Halt the present CPU until awoken via an interrupt
90  */
91 static void
92 cpu_halt(void)
93 {
94 	cpu_t *cpup = CPU;
95 	processorid_t cpun = cpup->cpu_id;
96 	cpupart_t *cp = cpup->cpu_part;
97 	int hset_update = 1;
98 	volatile int *p = &cpup->cpu_disp->disp_nrunnable;
99 	uint_t s;
100 
101 	/*
102 	 * If this CPU is online, and there's multiple CPUs
103 	 * in the system, then we should notate our halting
104 	 * by adding ourselves to the partition's halted CPU
105 	 * bitmap. This allows other CPUs to find/awaken us when
106 	 * work becomes available.
107 	 */
108 	if (CPU->cpu_flags & CPU_OFFLINE || ncpus == 1)
109 		hset_update = 0;
110 
111 	/*
112 	 * Add ourselves to the partition's halted CPUs bitmask
113 	 * and set our HALTED flag, if necessary.
114 	 *
115 	 * When a thread becomes runnable, it is placed on the queue
116 	 * and then the halted cpuset is checked to determine who
117 	 * (if anyone) should be awoken. We therefore need to first
118 	 * add ourselves to the halted cpuset, and then check if there
119 	 * is any work available.
120 	 */
121 	if (hset_update) {
122 		cpup->cpu_disp_flags |= CPU_DISP_HALTED;
123 		membar_producer();
124 		CPUSET_ATOMIC_ADD(cp->cp_mach->mc_haltset, cpun);
125 	}
126 
127 	/*
128 	 * Check to make sure there's really nothing to do.
129 	 * Work destined for this CPU may become available after
130 	 * this check. We'll be notified through the clearing of our
131 	 * bit in the halted CPU bitmask, and a poke.
132 	 */
133 	if (disp_anywork()) {
134 		if (hset_update) {
135 			cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
136 			CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun);
137 		}
138 		return;
139 	}
140 
141 #ifdef N2_IDLE_WORKAROUND
142 	/*
143 	 * The following workaround for Niagara2, when enabled, forces the
144 	 * IDLE CPU to wait in a tight loop until something becomes runnable
145 	 * locally, minimizing the overall CPU usage on an IDLE CPU.
146 	 */
147 	if (n2_idle_workaround) {
148 		while (cpup->cpu_disp->disp_nrunnable == 0) {
149 			(void) hv_cpu_yield();
150 		}
151 	}
152 #endif
153 
154 	/*
155 	 * We're on our way to being halted.  Wait until something becomes
156 	 * runnable locally or we are awaken (i.e. removed from the halt set).
157 	 * Note that the call to hv_cpu_yield() can return even if we have
158 	 * nothing to do.
159 	 *
160 	 * Disable interrupts now, so that we'll awaken immediately
161 	 * after halting if someone tries to poke us between now and
162 	 * the time we actually halt.
163 	 *
164 	 * We check for the presence of our bit after disabling interrupts.
165 	 * If it's cleared, we'll return. If the bit is cleared after
166 	 * we check then the poke will pop us out of the halted state.
167 	 *
168 	 * The ordering of the poke and the clearing of the bit by cpu_wakeup
169 	 * is important.
170 	 * cpu_wakeup() must clear, then poke.
171 	 * cpu_halt() must disable interrupts, then check for the bit.
172 	 *
173 	 * The check for anything locally runnable is here for performance
174 	 * and isn't needed for correctness. disp_nrunnable ought to be
175 	 * in our cache still, so it's inexpensive to check, and if there
176 	 * is anything runnable we won't have to wait for the poke.
177 	 *
178 	 */
179 	s = disable_vec_intr();
180 	while (*p == 0 &&
181 	    (!hset_update || CPU_IN_SET(cp->cp_mach->mc_haltset, cpun))) {
182 		(void) hv_cpu_yield();
183 		enable_vec_intr(s);
184 		s = disable_vec_intr();
185 	}
186 
187 	/*
188 	 * We're no longer halted
189 	 */
190 	enable_vec_intr(s);
191 	if (hset_update) {
192 		cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
193 		CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun);
194 	}
195 }
196 
197 /*
198  * If "cpu" is halted, then wake it up clearing its halted bit in advance.
199  * Otherwise, see if other CPUs in the cpu partition are halted and need to
200  * be woken up so that they can steal the thread we placed on this CPU.
201  * This function is only used on MP systems.
202  */
203 static void
204 cpu_wakeup(cpu_t *cpu, int bound)
205 {
206 	uint_t		cpu_found;
207 	int		result;
208 	cpupart_t	*cp;
209 
210 	cp = cpu->cpu_part;
211 	if (CPU_IN_SET(cp->cp_mach->mc_haltset, cpu->cpu_id)) {
212 		/*
213 		 * Clear the halted bit for that CPU since it will be
214 		 * poked in a moment.
215 		 */
216 		CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpu->cpu_id);
217 		/*
218 		 * We may find the current CPU present in the halted cpuset
219 		 * if we're in the context of an interrupt that occurred
220 		 * before we had a chance to clear our bit in cpu_halt().
221 		 * Poking ourself is obviously unnecessary, since if
222 		 * we're here, we're not halted.
223 		 */
224 		if (cpu != CPU)
225 			poke_cpu(cpu->cpu_id);
226 		return;
227 	} else {
228 		/*
229 		 * This cpu isn't halted, but it's idle or undergoing a
230 		 * context switch. No need to awaken anyone else.
231 		 */
232 		if (cpu->cpu_thread == cpu->cpu_idle_thread ||
233 		    cpu->cpu_disp_flags & CPU_DISP_DONTSTEAL)
234 			return;
235 	}
236 
237 	/*
238 	 * No need to wake up other CPUs if the thread we just enqueued
239 	 * is bound.
240 	 */
241 	if (bound)
242 		return;
243 
244 	/*
245 	 * See if there's any other halted CPUs. If there are, then
246 	 * select one, and awaken it.
247 	 * It's possible that after we find a CPU, somebody else
248 	 * will awaken it before we get the chance.
249 	 * In that case, look again.
250 	 */
251 	do {
252 		CPUSET_FIND(cp->cp_mach->mc_haltset, cpu_found);
253 		if (cpu_found == CPUSET_NOTINSET)
254 			return;
255 
256 		ASSERT(cpu_found >= 0 && cpu_found < NCPU);
257 		CPUSET_ATOMIC_XDEL(cp->cp_mach->mc_haltset, cpu_found, result);
258 	} while (result < 0);
259 
260 	if (cpu_found != CPU->cpu_id)
261 		poke_cpu(cpu_found);
262 }
263 
264 void
265 mach_cpu_halt_idle()
266 {
267 	if (enable_halt_idle_cpus) {
268 		idle_cpu = cpu_halt;
269 		disp_enq_thread = cpu_wakeup;
270 	}
271 }
272 
273 int
274 ndata_alloc_mmfsa(struct memlist *ndata)
275 {
276 	size_t	size;
277 
278 	size = MMFSA_SIZE * max_ncpus;
279 	mmu_fault_status_area = ndata_alloc(ndata, size, ecache_alignsize);
280 	if (mmu_fault_status_area == NULL)
281 		return (-1);
282 	return (0);
283 }
284 
285 void
286 mach_memscrub(void)
287 {
288 	/* no memscrub support for sun4v for now */
289 }
290 
291 void
292 mach_fpras()
293 {
294 	/* no fpras support for sun4v for now */
295 }
296 
297 void
298 mach_hw_copy_limit(void)
299 {
300 	/* HW copy limits set by individual CPU module */
301 }
302 
303 /*
304  * We need to enable soft ring functionality on Niagara platform since
305  * one strand can't handle interrupts for a 1Gb NIC. Set the tunable
306  * ip_squeue_soft_ring by default on this platform. We can also set
307  * ip_threads_per_cpu to track number of threads per core. The variables
308  * themselves are defined in space.c and used by IP module
309  */
310 extern uint_t ip_threads_per_cpu;
311 extern boolean_t ip_squeue_soft_ring;
312 void
313 startup_platform(void)
314 {
315 	ip_squeue_soft_ring = B_TRUE;
316 }
317 
318 /*
319  * This function sets up hypervisor traptrace buffer
320  * This routine is called by the boot cpu only
321  */
322 void
323 mach_htraptrace_setup(int cpuid)
324 {
325 	TRAP_TRACE_CTL	*ctlp;
326 	int bootcpuid = getprocessorid(); /* invoked on boot cpu only */
327 
328 	if (mach_htraptrace_enable && ((cpuid != bootcpuid) ||
329 	    !htrap_tr0_inuse)) {
330 		ctlp = &trap_trace_ctl[cpuid];
331 		ctlp->d.hvaddr_base = (cpuid == bootcpuid) ? htrap_tr0 :
332 		    contig_mem_alloc_align(HTRAP_TSIZE, HTRAP_TSIZE);
333 		if (ctlp->d.hvaddr_base == NULL) {
334 			ctlp->d.hlimit = 0;
335 			ctlp->d.hpaddr_base = NULL;
336 			cmn_err(CE_WARN, "!cpu%d: failed to allocate HV "
337 			    "traptrace buffer", cpuid);
338 		} else {
339 			ctlp->d.hlimit = HTRAP_TSIZE;
340 			ctlp->d.hpaddr_base = va_to_pa(ctlp->d.hvaddr_base);
341 		}
342 	}
343 }
344 
345 /*
346  * This function enables or disables the hypervisor traptracing
347  */
348 void
349 mach_htraptrace_configure(int cpuid)
350 {
351 	uint64_t ret;
352 	uint64_t prev_buf, prev_bufsize;
353 	uint64_t prev_enable;
354 	uint64_t size;
355 	TRAP_TRACE_CTL	*ctlp;
356 
357 	ctlp = &trap_trace_ctl[cpuid];
358 	if (mach_htraptrace_enable) {
359 		if ((ctlp->d.hvaddr_base != NULL) &&
360 		    ((ctlp->d.hvaddr_base != htrap_tr0) ||
361 		    (!htrap_tr0_inuse))) {
362 			ret = hv_ttrace_buf_info(&prev_buf, &prev_bufsize);
363 			if ((ret == H_EOK) && (prev_bufsize != 0)) {
364 				cmn_err(CE_CONT,
365 				    "!cpu%d: previous HV traptrace buffer of "
366 				    "size 0x%lx at address 0x%lx", cpuid,
367 				    prev_bufsize, prev_buf);
368 			}
369 
370 			ret = hv_ttrace_buf_conf(ctlp->d.hpaddr_base,
371 			    ctlp->d.hlimit /
372 			    (sizeof (struct htrap_trace_record)), &size);
373 			if (ret == H_EOK) {
374 				ret = hv_ttrace_enable(\
375 				    (uint64_t)TRAP_TENABLE_ALL, &prev_enable);
376 				if (ret != H_EOK) {
377 					cmn_err(CE_WARN,
378 					    "!cpu%d: HV traptracing not "
379 					    "enabled, ta: 0x%x returned error: "
380 					    "%ld", cpuid, TTRACE_ENABLE, ret);
381 				} else {
382 					if (ctlp->d.hvaddr_base == htrap_tr0)
383 						htrap_tr0_inuse = 1;
384 				}
385 			} else {
386 				cmn_err(CE_WARN,
387 				    "!cpu%d: HV traptrace buffer not "
388 				    "configured, ta: 0x%x returned error: %ld",
389 				    cpuid, TTRACE_BUF_CONF, ret);
390 			}
391 			/*
392 			 * set hvaddr_base to NULL when traptrace buffer
393 			 * registration fails
394 			 */
395 			if (ret != H_EOK) {
396 				ctlp->d.hvaddr_base = NULL;
397 				ctlp->d.hlimit = 0;
398 				ctlp->d.hpaddr_base = NULL;
399 			}
400 		}
401 	} else {
402 		ret = hv_ttrace_buf_info(&prev_buf, &prev_bufsize);
403 		if ((ret == H_EOK) && (prev_bufsize != 0)) {
404 			ret = hv_ttrace_enable((uint64_t)TRAP_TDISABLE_ALL,
405 			    &prev_enable);
406 			if (ret == H_EOK) {
407 				if (ctlp->d.hvaddr_base == htrap_tr0)
408 					htrap_tr0_inuse = 0;
409 				ctlp->d.hvaddr_base = NULL;
410 				ctlp->d.hlimit = 0;
411 				ctlp->d.hpaddr_base = NULL;
412 			} else
413 				cmn_err(CE_WARN,
414 				    "!cpu%d: HV traptracing is not disabled, "
415 				    "ta: 0x%x returned error: %ld",
416 				    cpuid, TTRACE_ENABLE, ret);
417 		}
418 	}
419 }
420 
421 /*
422  * This function cleans up the hypervisor traptrace buffer
423  */
424 void
425 mach_htraptrace_cleanup(int cpuid)
426 {
427 	if (mach_htraptrace_enable) {
428 		TRAP_TRACE_CTL *ctlp;
429 		caddr_t httrace_buf_va;
430 
431 		ASSERT(cpuid < max_ncpus);
432 		ctlp = &trap_trace_ctl[cpuid];
433 		httrace_buf_va = ctlp->d.hvaddr_base;
434 		if (httrace_buf_va == htrap_tr0) {
435 			bzero(httrace_buf_va, HTRAP_TSIZE);
436 		} else if (httrace_buf_va != NULL) {
437 			contig_mem_free(httrace_buf_va, HTRAP_TSIZE);
438 		}
439 		ctlp->d.hvaddr_base = NULL;
440 		ctlp->d.hlimit = 0;
441 		ctlp->d.hpaddr_base = NULL;
442 	}
443 }
444 
445 /*
446  * Load any required machine class (sun4v) specific drivers.
447  */
448 void
449 load_mach_drivers(void)
450 {
451 	/*
452 	 * We don't want to load these LDOMs-specific
453 	 * modules if domaining is not supported.  Also,
454 	 * we must be able to run on non-LDOMs firmware.
455 	 */
456 	if (!(domaining_capabilities & DOMAINING_SUPPORTED))
457 		return;
458 
459 	/*
460 	 * Load the core domain services module
461 	 */
462 	if (modload("misc", "ds") == -1)
463 		cmn_err(CE_NOTE, "!'ds' module failed to load");
464 
465 	/*
466 	 * Load the rest of the domain services
467 	 */
468 	if (modload("misc", "fault_iso") == -1)
469 		cmn_err(CE_NOTE, "!'fault_iso' module failed to load");
470 
471 	if (modload("misc", "platsvc") == -1)
472 		cmn_err(CE_NOTE, "!'platsvc' module failed to load");
473 
474 	if ((domaining_capabilities & DOMAINING_ENABLED) &&
475 	    modload("misc", "dr_cpu") == -1)
476 		cmn_err(CE_NOTE, "!'dr_cpu' module failed to load");
477 
478 	/*
479 	 * Attempt to attach any virtual device servers. These
480 	 * drivers must be loaded at start of day so that they
481 	 * can respond to any updates to the machine description.
482 	 *
483 	 * Since it is quite likely that a domain will not support
484 	 * one or more of these servers, failures are ignored.
485 	 */
486 
487 	/* virtual disk server */
488 	(void) i_ddi_attach_hw_nodes("vds");
489 
490 	/* virtual network switch */
491 	(void) i_ddi_attach_hw_nodes("vsw");
492 
493 	/* virtual console concentrator */
494 	(void) i_ddi_attach_hw_nodes("vcc");
495 }
496