xref: /titanic_50/usr/src/uts/sun4v/os/mach_startup.c (revision e38a713ad4e0a9c42f8cccd9350412b2c6ccccdb)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/machsystm.h>
30 #include <sys/archsystm.h>
31 #include <sys/prom_plat.h>
32 #include <sys/promif.h>
33 #include <sys/vm.h>
34 #include <sys/cpu.h>
35 #include <sys/atomic.h>
36 #include <sys/cpupart.h>
37 #include <sys/disp.h>
38 #include <sys/hypervisor_api.h>
39 #include <sys/traptrace.h>
40 #include <sys/modctl.h>
41 #include <sys/ldoms.h>
42 #include <vm/vm_dep.h>
43 
44 #ifdef TRAPTRACE
45 int mach_htraptrace_enable = 1;
46 #else
47 int mach_htraptrace_enable = 0;
48 #endif
49 int htrap_tr0_inuse = 0;
50 extern char htrap_tr0[];	/* prealloc buf for boot cpu */
51 
52 caddr_t	mmu_fault_status_area;
53 
54 extern void sfmmu_set_tsbs(void);
55 /*
56  * CPU IDLE optimization variables/routines
57  */
58 static int enable_halt_idle_cpus = 1;
59 
60 #define	SUN4V_CLOCK_TICK_THRESHOLD	64
61 #define	SUN4V_CLOCK_TICK_NCPUS		64
62 
63 extern int	clock_tick_threshold;
64 extern int	clock_tick_ncpus;
65 
66 void
67 setup_trap_table(void)
68 {
69 	caddr_t mmfsa_va;
70 	extern	 caddr_t mmu_fault_status_area;
71 	mmfsa_va =
72 	    mmu_fault_status_area + (MMFSA_SIZE * CPU->cpu_id);
73 
74 	intr_init(CPU);		/* init interrupt request free list */
75 	setwstate(WSTATE_KERN);
76 	set_mmfsa_scratchpad(mmfsa_va);
77 	prom_set_mmfsa_traptable(&trap_table, va_to_pa(mmfsa_va));
78 	sfmmu_set_tsbs();
79 }
80 
81 void
82 phys_install_has_changed(void)
83 {
84 
85 }
86 
87 /*
88  * Halt the present CPU until awoken via an interrupt
89  */
90 static void
91 cpu_halt(void)
92 {
93 	cpu_t *cpup = CPU;
94 	processorid_t cpun = cpup->cpu_id;
95 	cpupart_t *cp = cpup->cpu_part;
96 	int hset_update = 1;
97 	volatile int *p = &cpup->cpu_disp->disp_nrunnable;
98 	uint_t s;
99 
100 	/*
101 	 * If this CPU is online, and there's multiple CPUs
102 	 * in the system, then we should notate our halting
103 	 * by adding ourselves to the partition's halted CPU
104 	 * bitmap. This allows other CPUs to find/awaken us when
105 	 * work becomes available.
106 	 */
107 	if (CPU->cpu_flags & CPU_OFFLINE || ncpus == 1)
108 		hset_update = 0;
109 
110 	/*
111 	 * Add ourselves to the partition's halted CPUs bitmask
112 	 * and set our HALTED flag, if necessary.
113 	 *
114 	 * When a thread becomes runnable, it is placed on the queue
115 	 * and then the halted cpuset is checked to determine who
116 	 * (if anyone) should be awoken. We therefore need to first
117 	 * add ourselves to the halted cpuset, and then check if there
118 	 * is any work available.
119 	 */
120 	if (hset_update) {
121 		cpup->cpu_disp_flags |= CPU_DISP_HALTED;
122 		membar_producer();
123 		CPUSET_ATOMIC_ADD(cp->cp_mach->mc_haltset, cpun);
124 	}
125 
126 	/*
127 	 * Check to make sure there's really nothing to do.
128 	 * Work destined for this CPU may become available after
129 	 * this check. We'll be notified through the clearing of our
130 	 * bit in the halted CPU bitmask, and a poke.
131 	 */
132 	if (disp_anywork()) {
133 		if (hset_update) {
134 			cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
135 			CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun);
136 		}
137 		return;
138 	}
139 
140 	/*
141 	 * We're on our way to being halted.  Wait until something becomes
142 	 * runnable locally or we are awaken (i.e. removed from the halt set).
143 	 * Note that the call to hv_cpu_yield() can return even if we have
144 	 * nothing to do.
145 	 *
146 	 * Disable interrupts now, so that we'll awaken immediately
147 	 * after halting if someone tries to poke us between now and
148 	 * the time we actually halt.
149 	 *
150 	 * We check for the presence of our bit after disabling interrupts.
151 	 * If it's cleared, we'll return. If the bit is cleared after
152 	 * we check then the poke will pop us out of the halted state.
153 	 * Also, if the offlined CPU has been brought back on-line, then
154 	 * we return as well.
155 	 *
156 	 * The ordering of the poke and the clearing of the bit by cpu_wakeup
157 	 * is important.
158 	 * cpu_wakeup() must clear, then poke.
159 	 * cpu_halt() must disable interrupts, then check for the bit.
160 	 *
161 	 * The check for anything locally runnable is here for performance
162 	 * and isn't needed for correctness. disp_nrunnable ought to be
163 	 * in our cache still, so it's inexpensive to check, and if there
164 	 * is anything runnable we won't have to wait for the poke.
165 	 *
166 	 */
167 	s = disable_vec_intr();
168 	while (*p == 0 &&
169 	    ((hset_update && CPU_IN_SET(cp->cp_mach->mc_haltset, cpun)) ||
170 	    (!hset_update && (CPU->cpu_flags & CPU_OFFLINE)))) {
171 		(void) hv_cpu_yield();
172 		enable_vec_intr(s);
173 		s = disable_vec_intr();
174 	}
175 
176 	/*
177 	 * We're no longer halted
178 	 */
179 	enable_vec_intr(s);
180 	if (hset_update) {
181 		cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
182 		CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun);
183 	}
184 }
185 
186 /*
187  * If "cpu" is halted, then wake it up clearing its halted bit in advance.
188  * Otherwise, see if other CPUs in the cpu partition are halted and need to
189  * be woken up so that they can steal the thread we placed on this CPU.
190  * This function is only used on MP systems.
191  */
192 static void
193 cpu_wakeup(cpu_t *cpu, int bound)
194 {
195 	uint_t		cpu_found;
196 	int		result;
197 	cpupart_t	*cp;
198 
199 	cp = cpu->cpu_part;
200 	if (CPU_IN_SET(cp->cp_mach->mc_haltset, cpu->cpu_id)) {
201 		/*
202 		 * Clear the halted bit for that CPU since it will be
203 		 * poked in a moment.
204 		 */
205 		CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpu->cpu_id);
206 		/*
207 		 * We may find the current CPU present in the halted cpuset
208 		 * if we're in the context of an interrupt that occurred
209 		 * before we had a chance to clear our bit in cpu_halt().
210 		 * Poking ourself is obviously unnecessary, since if
211 		 * we're here, we're not halted.
212 		 */
213 		if (cpu != CPU)
214 			poke_cpu(cpu->cpu_id);
215 		return;
216 	} else {
217 		/*
218 		 * This cpu isn't halted, but it's idle or undergoing a
219 		 * context switch. No need to awaken anyone else.
220 		 */
221 		if (cpu->cpu_thread == cpu->cpu_idle_thread ||
222 		    cpu->cpu_disp_flags & CPU_DISP_DONTSTEAL)
223 			return;
224 	}
225 
226 	/*
227 	 * No need to wake up other CPUs if the thread we just enqueued
228 	 * is bound.
229 	 */
230 	if (bound)
231 		return;
232 
233 	/*
234 	 * See if there's any other halted CPUs. If there are, then
235 	 * select one, and awaken it.
236 	 * It's possible that after we find a CPU, somebody else
237 	 * will awaken it before we get the chance.
238 	 * In that case, look again.
239 	 */
240 	do {
241 		CPUSET_FIND(cp->cp_mach->mc_haltset, cpu_found);
242 		if (cpu_found == CPUSET_NOTINSET)
243 			return;
244 
245 		ASSERT(cpu_found >= 0 && cpu_found < NCPU);
246 		CPUSET_ATOMIC_XDEL(cp->cp_mach->mc_haltset, cpu_found, result);
247 	} while (result < 0);
248 
249 	if (cpu_found != CPU->cpu_id)
250 		poke_cpu(cpu_found);
251 }
252 
253 void
254 mach_cpu_halt_idle()
255 {
256 	if (enable_halt_idle_cpus) {
257 		idle_cpu = cpu_halt;
258 		disp_enq_thread = cpu_wakeup;
259 	}
260 }
261 
262 int
263 ndata_alloc_mmfsa(struct memlist *ndata)
264 {
265 	size_t	size;
266 
267 	size = MMFSA_SIZE * max_ncpus;
268 	mmu_fault_status_area = ndata_alloc(ndata, size, ecache_alignsize);
269 	if (mmu_fault_status_area == NULL)
270 		return (-1);
271 	return (0);
272 }
273 
274 void
275 mach_memscrub(void)
276 {
277 	/* no memscrub support for sun4v for now */
278 }
279 
280 void
281 mach_fpras()
282 {
283 	/* no fpras support for sun4v for now */
284 }
285 
286 void
287 mach_hw_copy_limit(void)
288 {
289 	/* HW copy limits set by individual CPU module */
290 }
291 
292 /*
293  * We need to enable soft ring functionality on Niagara platform since
294  * one strand can't handle interrupts for a 1Gb NIC. Set the tunable
295  * ip_squeue_soft_ring by default on this platform. We can also set
296  * ip_threads_per_cpu to track number of threads per core. The variables
297  * themselves are defined in space.c and used by IP module
298  */
299 extern uint_t ip_threads_per_cpu;
300 extern boolean_t ip_squeue_soft_ring;
301 void
302 startup_platform(void)
303 {
304 	ip_squeue_soft_ring = B_TRUE;
305 	if (clock_tick_threshold == 0)
306 		clock_tick_threshold = SUN4V_CLOCK_TICK_THRESHOLD;
307 	if (clock_tick_ncpus == 0)
308 		clock_tick_ncpus = SUN4V_CLOCK_TICK_NCPUS;
309 }
310 
311 /*
312  * This function sets up hypervisor traptrace buffer
313  * This routine is called by the boot cpu only
314  */
315 void
316 mach_htraptrace_setup(int cpuid)
317 {
318 	TRAP_TRACE_CTL	*ctlp;
319 	int bootcpuid = getprocessorid(); /* invoked on boot cpu only */
320 
321 	if (mach_htraptrace_enable && ((cpuid != bootcpuid) ||
322 	    !htrap_tr0_inuse)) {
323 		ctlp = &trap_trace_ctl[cpuid];
324 		ctlp->d.hvaddr_base = (cpuid == bootcpuid) ? htrap_tr0 :
325 		    contig_mem_alloc_align(HTRAP_TSIZE, HTRAP_TSIZE);
326 		if (ctlp->d.hvaddr_base == NULL) {
327 			ctlp->d.hlimit = 0;
328 			ctlp->d.hpaddr_base = NULL;
329 			cmn_err(CE_WARN, "!cpu%d: failed to allocate HV "
330 			    "traptrace buffer", cpuid);
331 		} else {
332 			ctlp->d.hlimit = HTRAP_TSIZE;
333 			ctlp->d.hpaddr_base = va_to_pa(ctlp->d.hvaddr_base);
334 		}
335 	}
336 }
337 
338 /*
339  * This function enables or disables the hypervisor traptracing
340  */
341 void
342 mach_htraptrace_configure(int cpuid)
343 {
344 	uint64_t ret;
345 	uint64_t prev_buf, prev_bufsize;
346 	uint64_t prev_enable;
347 	uint64_t size;
348 	TRAP_TRACE_CTL	*ctlp;
349 
350 	ctlp = &trap_trace_ctl[cpuid];
351 	if (mach_htraptrace_enable) {
352 		if ((ctlp->d.hvaddr_base != NULL) &&
353 		    ((ctlp->d.hvaddr_base != htrap_tr0) ||
354 		    (!htrap_tr0_inuse))) {
355 			ret = hv_ttrace_buf_info(&prev_buf, &prev_bufsize);
356 			if ((ret == H_EOK) && (prev_bufsize != 0)) {
357 				cmn_err(CE_CONT,
358 				    "!cpu%d: previous HV traptrace buffer of "
359 				    "size 0x%lx at address 0x%lx", cpuid,
360 				    prev_bufsize, prev_buf);
361 			}
362 
363 			ret = hv_ttrace_buf_conf(ctlp->d.hpaddr_base,
364 			    ctlp->d.hlimit /
365 			    (sizeof (struct htrap_trace_record)), &size);
366 			if (ret == H_EOK) {
367 				ret = hv_ttrace_enable(\
368 				    (uint64_t)TRAP_TENABLE_ALL, &prev_enable);
369 				if (ret != H_EOK) {
370 					cmn_err(CE_WARN,
371 					    "!cpu%d: HV traptracing not "
372 					    "enabled, ta: 0x%x returned error: "
373 					    "%ld", cpuid, TTRACE_ENABLE, ret);
374 				} else {
375 					if (ctlp->d.hvaddr_base == htrap_tr0)
376 						htrap_tr0_inuse = 1;
377 				}
378 			} else {
379 				cmn_err(CE_WARN,
380 				    "!cpu%d: HV traptrace buffer not "
381 				    "configured, ta: 0x%x returned error: %ld",
382 				    cpuid, TTRACE_BUF_CONF, ret);
383 			}
384 			/*
385 			 * set hvaddr_base to NULL when traptrace buffer
386 			 * registration fails
387 			 */
388 			if (ret != H_EOK) {
389 				ctlp->d.hvaddr_base = NULL;
390 				ctlp->d.hlimit = 0;
391 				ctlp->d.hpaddr_base = NULL;
392 			}
393 		}
394 	} else {
395 		ret = hv_ttrace_buf_info(&prev_buf, &prev_bufsize);
396 		if ((ret == H_EOK) && (prev_bufsize != 0)) {
397 			ret = hv_ttrace_enable((uint64_t)TRAP_TDISABLE_ALL,
398 			    &prev_enable);
399 			if (ret == H_EOK) {
400 				if (ctlp->d.hvaddr_base == htrap_tr0)
401 					htrap_tr0_inuse = 0;
402 				ctlp->d.hvaddr_base = NULL;
403 				ctlp->d.hlimit = 0;
404 				ctlp->d.hpaddr_base = NULL;
405 			} else
406 				cmn_err(CE_WARN,
407 				    "!cpu%d: HV traptracing is not disabled, "
408 				    "ta: 0x%x returned error: %ld",
409 				    cpuid, TTRACE_ENABLE, ret);
410 		}
411 	}
412 }
413 
414 /*
415  * This function cleans up the hypervisor traptrace buffer
416  */
417 void
418 mach_htraptrace_cleanup(int cpuid)
419 {
420 	if (mach_htraptrace_enable) {
421 		TRAP_TRACE_CTL *ctlp;
422 		caddr_t httrace_buf_va;
423 
424 		ASSERT(cpuid < max_ncpus);
425 		ctlp = &trap_trace_ctl[cpuid];
426 		httrace_buf_va = ctlp->d.hvaddr_base;
427 		if (httrace_buf_va == htrap_tr0) {
428 			bzero(httrace_buf_va, HTRAP_TSIZE);
429 		} else if (httrace_buf_va != NULL) {
430 			contig_mem_free(httrace_buf_va, HTRAP_TSIZE);
431 		}
432 		ctlp->d.hvaddr_base = NULL;
433 		ctlp->d.hlimit = 0;
434 		ctlp->d.hpaddr_base = NULL;
435 	}
436 }
437 
438 /*
439  * Load any required machine class (sun4v) specific drivers.
440  */
441 void
442 load_mach_drivers(void)
443 {
444 	/*
445 	 * We don't want to load these LDOMs-specific
446 	 * modules if domaining is not supported.  Also,
447 	 * we must be able to run on non-LDOMs firmware.
448 	 */
449 	if (!domaining_supported())
450 		return;
451 
452 	/*
453 	 * Load the core domain services module
454 	 */
455 	if (modload("misc", "ds") == -1)
456 		cmn_err(CE_NOTE, "!'ds' module failed to load");
457 
458 	/*
459 	 * Load the rest of the domain services
460 	 */
461 	if (modload("misc", "fault_iso") == -1)
462 		cmn_err(CE_NOTE, "!'fault_iso' module failed to load");
463 
464 	if (modload("misc", "platsvc") == -1)
465 		cmn_err(CE_NOTE, "!'platsvc' module failed to load");
466 
467 	if (domaining_enabled() && modload("misc", "dr_cpu") == -1)
468 		cmn_err(CE_NOTE, "!'dr_cpu' module failed to load");
469 
470 	/*
471 	 * Attempt to attach any virtual device servers. These
472 	 * drivers must be loaded at start of day so that they
473 	 * can respond to any updates to the machine description.
474 	 *
475 	 * Since it is quite likely that a domain will not support
476 	 * one or more of these servers, failures are ignored.
477 	 */
478 
479 	/* virtual disk server */
480 	(void) i_ddi_attach_hw_nodes("vds");
481 
482 	/* virtual network switch */
483 	(void) i_ddi_attach_hw_nodes("vsw");
484 
485 	/* virtual console concentrator */
486 	(void) i_ddi_attach_hw_nodes("vcc");
487 }
488