xref: /titanic_51/usr/src/uts/sun4v/os/mach_startup.c (revision 0ade2cf005fcaecc5255dacf7d76683de855a9da)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/machsystm.h>
29 #include <sys/archsystm.h>
30 #include <sys/prom_plat.h>
31 #include <sys/promif.h>
32 #include <sys/vm.h>
33 #include <sys/cpu.h>
34 #include <sys/atomic.h>
35 #include <sys/cpupart.h>
36 #include <sys/disp.h>
37 #include <sys/hypervisor_api.h>
38 #include <sys/traptrace.h>
39 
40 #ifdef TRAPTRACE
41 int mach_htraptrace_enable = 1;
42 #else
43 int mach_htraptrace_enable = 0;
44 #endif
45 int htrap_tr0_inuse = 0;
46 extern char htrap_tr0[];	/* prealloc buf for boot cpu */
47 
48 caddr_t	mmu_fault_status_area;
49 
50 extern void sfmmu_set_tsbs(void);
51 /*
52  * CPU IDLE optimization variables/routines
53  */
54 static int enable_halt_idle_cpus = 1;
55 
56 void
57 setup_trap_table(void)
58 {
59 	caddr_t mmfsa_va;
60 	extern	 caddr_t mmu_fault_status_area;
61 	mmfsa_va =
62 	    mmu_fault_status_area + (MMFSA_SIZE * CPU->cpu_id);
63 
64 	intr_init(CPU);			/* init interrupt request free list */
65 	setwstate(WSTATE_KERN);
66 	set_mmfsa_scratchpad(mmfsa_va);
67 	prom_set_mmfsa_traptable(&trap_table, va_to_pa(mmfsa_va));
68 	sfmmu_set_tsbs();
69 }
70 
71 void
72 phys_install_has_changed(void)
73 {
74 
75 }
76 
77 /*
78  * Halt the present CPU until awoken via an interrupt
79  */
80 static void
81 cpu_halt(void)
82 {
83 	cpu_t *cpup = CPU;
84 	processorid_t cpun = cpup->cpu_id;
85 	cpupart_t *cp = cpup->cpu_part;
86 	int hset_update = 1;
87 	uint_t s;
88 
89 	/*
90 	 * If this CPU is online, and there's multiple CPUs
91 	 * in the system, then we should notate our halting
92 	 * by adding ourselves to the partition's halted CPU
93 	 * bitmap. This allows other CPUs to find/awaken us when
94 	 * work becomes available.
95 	 */
96 	if (CPU->cpu_flags & CPU_OFFLINE || ncpus == 1)
97 		hset_update = 0;
98 
99 	/*
100 	 * Add ourselves to the partition's halted CPUs bitmask
101 	 * and set our HALTED flag, if necessary.
102 	 *
103 	 * When a thread becomes runnable, it is placed on the queue
104 	 * and then the halted cpuset is checked to determine who
105 	 * (if anyone) should be awoken. We therefore need to first
106 	 * add ourselves to the halted cpuset, and then check if there
107 	 * is any work available.
108 	 */
109 	if (hset_update) {
110 		cpup->cpu_disp_flags |= CPU_DISP_HALTED;
111 		membar_producer();
112 		CPUSET_ATOMIC_ADD(cp->cp_haltset, cpun);
113 	}
114 
115 	/*
116 	 * Check to make sure there's really nothing to do.
117 	 * Work destined for this CPU may become available after
118 	 * this check. We'll be notified through the clearing of our
119 	 * bit in the halted CPU bitmask, and a poke.
120 	 */
121 	if (disp_anywork()) {
122 		if (hset_update) {
123 			cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
124 			CPUSET_ATOMIC_DEL(cp->cp_haltset, cpun);
125 		}
126 		return;
127 	}
128 
129 	/*
130 	 * We're on our way to being halted.
131 	 *
132 	 * Disable interrupts now, so that we'll awaken immediately
133 	 * after halting if someone tries to poke us between now and
134 	 * the time we actually halt.
135 	 *
136 	 * We check for the presence of our bit after disabling interrupts.
137 	 * If it's cleared, we'll return. If the bit is cleared after
138 	 * we check then the poke will pop us out of the halted state.
139 	 *
140 	 * The ordering of the poke and the clearing of the bit by cpu_wakeup
141 	 * is important.
142 	 * cpu_wakeup() must clear, then poke.
143 	 * cpu_halt() must disable interrupts, then check for the bit.
144 	 */
145 	s = disable_vec_intr();
146 
147 	if (hset_update && !CPU_IN_SET(cp->cp_haltset, cpun)) {
148 		cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
149 		enable_vec_intr(s);
150 		return;
151 	}
152 
153 	/*
154 	 * The check for anything locally runnable is here for performance
155 	 * and isn't needed for correctness. disp_nrunnable ought to be
156 	 * in our cache still, so it's inexpensive to check, and if there
157 	 * is anything runnable we won't have to wait for the poke.
158 	 */
159 	if (cpup->cpu_disp->disp_nrunnable != 0) {
160 		if (hset_update) {
161 			cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
162 			CPUSET_ATOMIC_DEL(cp->cp_haltset, cpun);
163 		}
164 		enable_vec_intr(s);
165 		return;
166 	}
167 
168 	/*
169 	 * Halt the strand
170 	 */
171 	(void) hv_cpu_yield();
172 
173 	/*
174 	 * We're no longer halted
175 	 */
176 	enable_vec_intr(s);
177 	if (hset_update) {
178 		cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
179 		CPUSET_ATOMIC_DEL(cp->cp_haltset, cpun);
180 	}
181 }
182 
183 /*
184  * If "cpu" is halted, then wake it up clearing its halted bit in advance.
185  * Otherwise, see if other CPUs in the cpu partition are halted and need to
186  * be woken up so that they can steal the thread we placed on this CPU.
187  * This function is only used on MP systems.
188  */
189 static void
190 cpu_wakeup(cpu_t *cpu, int bound)
191 {
192 	uint_t		cpu_found;
193 	int		result;
194 	cpupart_t	*cp;
195 
196 	cp = cpu->cpu_part;
197 	if (CPU_IN_SET(cp->cp_haltset, cpu->cpu_id)) {
198 		/*
199 		 * Clear the halted bit for that CPU since it will be
200 		 * poked in a moment.
201 		 */
202 		CPUSET_ATOMIC_DEL(cp->cp_haltset, cpu->cpu_id);
203 		/*
204 		 * We may find the current CPU present in the halted cpuset
205 		 * if we're in the context of an interrupt that occurred
206 		 * before we had a chance to clear our bit in cpu_halt().
207 		 * Poking ourself is obviously unnecessary, since if
208 		 * we're here, we're not halted.
209 		 */
210 		if (cpu != CPU)
211 			poke_cpu(cpu->cpu_id);
212 		return;
213 	} else {
214 		/*
215 		 * This cpu isn't halted, but it's idle or undergoing a
216 		 * context switch. No need to awaken anyone else.
217 		 */
218 		if (cpu->cpu_thread == cpu->cpu_idle_thread ||
219 		    cpu->cpu_disp_flags & CPU_DISP_DONTSTEAL)
220 			return;
221 	}
222 
223 	/*
224 	 * No need to wake up other CPUs if the thread we just enqueued
225 	 * is bound.
226 	 */
227 	if (bound)
228 		return;
229 
230 	/*
231 	 * See if there's any other halted CPUs. If there are, then
232 	 * select one, and awaken it.
233 	 * It's possible that after we find a CPU, somebody else
234 	 * will awaken it before we get the chance.
235 	 * In that case, look again.
236 	 */
237 	do {
238 		CPUSET_FIND(cp->cp_haltset, cpu_found);
239 		if (cpu_found == CPUSET_NOTINSET)
240 			return;
241 
242 		ASSERT(cpu_found >= 0 && cpu_found < NCPU);
243 		CPUSET_ATOMIC_XDEL(cp->cp_haltset, cpu_found, result);
244 	} while (result < 0);
245 
246 	if (cpu_found != CPU->cpu_id)
247 		poke_cpu(cpu_found);
248 }
249 
250 void
251 mach_cpu_halt_idle()
252 {
253 	if (enable_halt_idle_cpus) {
254 		idle_cpu = cpu_halt;
255 		disp_enq_thread = cpu_wakeup;
256 	}
257 }
258 
259 int
260 ndata_alloc_mmfsa(struct memlist *ndata)
261 {
262 	size_t	size;
263 
264 	size = MMFSA_SIZE * max_ncpus;
265 	mmu_fault_status_area = ndata_alloc(ndata, size, ecache_alignsize);
266 	if (mmu_fault_status_area == NULL)
267 		return (-1);
268 	return (0);
269 }
270 
271 void
272 mach_memscrub(void)
273 {
274 	/* no memscrub support for sun4v for now */
275 }
276 
277 void
278 mach_fpras()
279 {
280 	/* no fpras support for sun4v for now */
281 }
282 
283 void
284 mach_hw_copy_limit(void)
285 {
286 	/* HW copy limits set by individual CPU module */
287 }
288 
289 /*
290  * We need to enable soft ring functionality on Niagara platform since
291  * one strand can't handle interrupts for a 1Gb NIC. Set the tunable
292  * ip_squeue_soft_ring by default on this platform. We can also set
293  * ip_threads_per_cpu to track number of threads per core. The variables
294  * themselves are defined in space.c and used by IP module
295  */
296 extern uint_t ip_threads_per_cpu;
297 extern boolean_t ip_squeue_soft_ring;
298 void
299 startup_platform(void)
300 {
301 	ip_squeue_soft_ring = B_TRUE;
302 }
303 
304 /*
305  * This function sets up hypervisor traptrace buffer
306  * This routine is called by the boot cpu only
307  */
308 void
309 mach_htraptrace_setup(int cpuid)
310 {
311 	TRAP_TRACE_CTL	*ctlp;
312 	int bootcpuid = getprocessorid(); /* invoked on boot cpu only */
313 
314 	if (mach_htraptrace_enable && ((cpuid != bootcpuid) ||
315 	    !htrap_tr0_inuse)) {
316 		ctlp = &trap_trace_ctl[cpuid];
317 		ctlp->d.hvaddr_base = (cpuid == bootcpuid) ? htrap_tr0 :
318 		    contig_mem_alloc_align(HTRAP_TSIZE, HTRAP_TSIZE);
319 		if (ctlp->d.hvaddr_base == NULL) {
320 			ctlp->d.hlimit = 0;
321 			ctlp->d.hpaddr_base = NULL;
322 			cmn_err(CE_WARN, "!cpu%d: failed to allocate HV "
323 			    "traptrace buffer", cpuid);
324 		} else {
325 			ctlp->d.hlimit = HTRAP_TSIZE;
326 			ctlp->d.hpaddr_base = va_to_pa(ctlp->d.hvaddr_base);
327 		}
328 	}
329 }
330 
331 /*
332  * This function enables or disables the hypervisor traptracing
333  */
334 void
335 mach_htraptrace_configure(int cpuid)
336 {
337 	uint64_t ret;
338 	uint64_t prev_buf, prev_bufsize;
339 	uint64_t prev_enable;
340 	uint64_t size;
341 	TRAP_TRACE_CTL	*ctlp;
342 
343 	ctlp = &trap_trace_ctl[cpuid];
344 	if (mach_htraptrace_enable) {
345 		if ((ctlp->d.hvaddr_base != NULL) &&
346 		    ((ctlp->d.hvaddr_base != htrap_tr0) ||
347 		    (!htrap_tr0_inuse))) {
348 			ret = hv_ttrace_buf_info(&prev_buf, &prev_bufsize);
349 			if ((ret == H_EOK) && (prev_bufsize != 0)) {
350 				cmn_err(CE_CONT,
351 				    "!cpu%d: previous HV traptrace buffer of "
352 				    "size 0x%lx at address 0x%lx", cpuid,
353 				    prev_bufsize, prev_buf);
354 			}
355 
356 			ret = hv_ttrace_buf_conf(ctlp->d.hpaddr_base,
357 			    ctlp->d.hlimit /
358 			    (sizeof (struct htrap_trace_record)), &size);
359 			if (ret == H_EOK) {
360 				ret = hv_ttrace_enable(\
361 				    (uint64_t)TRAP_TENABLE_ALL, &prev_enable);
362 				if (ret != H_EOK) {
363 					cmn_err(CE_WARN,
364 					    "!cpu%d: HV traptracing not "
365 					    "enabled, ta: 0x%x returned error: "
366 					    "%ld", cpuid, TTRACE_ENABLE, ret);
367 				} else {
368 					if (ctlp->d.hvaddr_base == htrap_tr0)
369 						htrap_tr0_inuse = 1;
370 				}
371 			} else {
372 				cmn_err(CE_WARN,
373 				    "!cpu%d: HV traptrace buffer not "
374 				    "configured, ta: 0x%x returned error: %ld",
375 				    cpuid, TTRACE_BUF_CONF, ret);
376 			}
377 			/*
378 			 * set hvaddr_base to NULL when traptrace buffer
379 			 * registration fails
380 			 */
381 			if (ret != H_EOK) {
382 				ctlp->d.hvaddr_base = NULL;
383 				ctlp->d.hlimit = 0;
384 				ctlp->d.hpaddr_base = NULL;
385 			}
386 		}
387 	} else {
388 		ret = hv_ttrace_buf_info(&prev_buf, &prev_bufsize);
389 		if ((ret == H_EOK) && (prev_bufsize != 0)) {
390 			ret = hv_ttrace_enable((uint64_t)TRAP_TDISABLE_ALL,
391 			    &prev_enable);
392 			if (ret == H_EOK) {
393 				if (ctlp->d.hvaddr_base == htrap_tr0)
394 					htrap_tr0_inuse = 0;
395 				ctlp->d.hvaddr_base = NULL;
396 				ctlp->d.hlimit = 0;
397 				ctlp->d.hpaddr_base = NULL;
398 			} else
399 				cmn_err(CE_WARN,
400 				    "!cpu%d: HV traptracing is not disabled, "
401 				    "ta: 0x%x returned error: %ld",
402 				    cpuid, TTRACE_ENABLE, ret);
403 		}
404 	}
405 }
406 
407 /*
408  * This function cleans up the hypervisor traptrace buffer
409  */
410 void
411 mach_htraptrace_cleanup(int cpuid)
412 {
413 	if (mach_htraptrace_enable) {
414 		TRAP_TRACE_CTL *ctlp;
415 		caddr_t httrace_buf_va;
416 
417 		ASSERT(cpuid < max_ncpus);
418 		ctlp = &trap_trace_ctl[cpuid];
419 		httrace_buf_va = ctlp->d.hvaddr_base;
420 		if (httrace_buf_va == htrap_tr0) {
421 			bzero(httrace_buf_va, HTRAP_TSIZE);
422 		} else if (httrace_buf_va != NULL) {
423 			contig_mem_free(httrace_buf_va, HTRAP_TSIZE);
424 		}
425 		ctlp->d.hvaddr_base = NULL;
426 		ctlp->d.hlimit = 0;
427 		ctlp->d.hpaddr_base = NULL;
428 	}
429 }
430