xref: /illumos-gate/usr/src/uts/sun4u/os/mach_startup.c (revision bc3489c0c3dc75477d8ebb93b7a202625bd8cbfd)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/machsystm.h>
27 #include <sys/archsystm.h>
28 #include <sys/vm.h>
29 #include <sys/cpu.h>
30 #include <sys/cpupart.h>
31 #include <sys/cmt.h>
32 #include <sys/bitset.h>
33 #include <sys/reboot.h>
34 #include <sys/kdi.h>
35 #include <sys/bootconf.h>
36 #include <sys/memlist_plat.h>
37 #include <sys/memlist_impl.h>
38 #include <sys/prom_plat.h>
39 #include <sys/prom_isa.h>
40 #include <sys/autoconf.h>
41 #include <sys/intreg.h>
42 #include <sys/ivintr.h>
43 #include <sys/fpu/fpusystm.h>
44 #include <sys/iommutsb.h>
45 #include <vm/vm_dep.h>
46 #include <vm/seg_kmem.h>
47 #include <vm/seg_kpm.h>
48 #include <vm/seg_map.h>
49 #include <vm/seg_kp.h>
50 #include <sys/sysconf.h>
51 #include <vm/hat_sfmmu.h>
52 #include <sys/kobj.h>
53 #include <sys/sun4asi.h>
54 #include <sys/clconf.h>
55 #include <sys/platform_module.h>
56 #include <sys/panic.h>
57 #include <sys/cpu_sgnblk_defs.h>
58 #include <sys/clock.h>
59 #include <sys/fpras_impl.h>
60 #include <sys/prom_debug.h>
61 #include <sys/traptrace.h>
62 #include <sys/memnode.h>
63 #include <sys/mem_cage.h>
64 
65 /*
66  * fpRAS implementation structures.
67  */
68 struct fpras_chkfn *fpras_chkfnaddrs[FPRAS_NCOPYOPS];
69 struct fpras_chkfngrp *fpras_chkfngrps;
70 struct fpras_chkfngrp *fpras_chkfngrps_base;
71 int fpras_frequency = -1;
72 int64_t fpras_interval = -1;
73 
74 /*
75  * Halt idling cpus optimization
76  *
77  * This optimation is only enabled in platforms that have
78  * the CPU halt support. The cpu_halt_cpu() support is provided
79  * in the cpu module and it is referenced here with a pragma weak.
80  * The presence of this routine automatically enable the halt idling
81  * cpus functionality if the global switch enable_halt_idle_cpus
82  * is set (default is set).
83  *
84  */
85 #pragma weak	cpu_halt_cpu
86 extern void	cpu_halt_cpu();
87 
88 /*
89  * Defines for the idle_state_transition DTrace probe
90  *
91  * The probe fires when the CPU undergoes an idle state change (e.g. halting)
92  * The agument passed is the state to which the CPU is transitioning.
93  *
94  * The states are defined here.
95  */
96 #define	IDLE_STATE_NORMAL 0
97 #define	IDLE_STATE_HALTED 1
98 
99 int		enable_halt_idle_cpus = 1; /* global switch */
100 
101 void
102 setup_trap_table(void)
103 {
104 	intr_init(CPU);			/* init interrupt request free list */
105 	setwstate(WSTATE_KERN);
106 	prom_set_traptable(&trap_table);
107 }
108 
109 void
110 mach_fpras()
111 {
112 	if (fpras_implemented && !fpras_disable) {
113 		int i;
114 		struct fpras_chkfngrp *fcgp;
115 		size_t chkfngrpsallocsz;
116 
117 		/*
118 		 * Note that we size off of NCPU and setup for
119 		 * all those possibilities regardless of whether
120 		 * the cpu id is present or not.  We do this so that
121 		 * we don't have any construction or destruction
122 		 * activity to perform at DR time, and it's not
123 		 * costly in memory.  We require block alignment.
124 		 */
125 		chkfngrpsallocsz = NCPU * sizeof (struct fpras_chkfngrp);
126 		fpras_chkfngrps_base = kmem_alloc(chkfngrpsallocsz, KM_SLEEP);
127 		if (IS_P2ALIGNED((uintptr_t)fpras_chkfngrps_base, 64)) {
128 			fpras_chkfngrps = fpras_chkfngrps_base;
129 		} else {
130 			kmem_free(fpras_chkfngrps_base, chkfngrpsallocsz);
131 			chkfngrpsallocsz += 64;
132 			fpras_chkfngrps_base = kmem_alloc(chkfngrpsallocsz,
133 			    KM_SLEEP);
134 			fpras_chkfngrps = (struct fpras_chkfngrp *)
135 			    P2ROUNDUP((uintptr_t)fpras_chkfngrps_base, 64);
136 		}
137 
138 		/*
139 		 * Copy our check function into place for each copy operation
140 		 * and each cpu id.
141 		 */
142 		fcgp = &fpras_chkfngrps[0];
143 		for (i = 0; i < FPRAS_NCOPYOPS; ++i)
144 			bcopy((void *)fpras_chkfn_type1, &fcgp->fpras_fn[i],
145 			    sizeof (struct fpras_chkfn));
146 		for (i = 1; i < NCPU; ++i)
147 			*(&fpras_chkfngrps[i]) = *fcgp;
148 
149 		/*
150 		 * At definition fpras_frequency is set to -1, and it will
151 		 * still have that value unless changed in /etc/system (not
152 		 * strictly supported, but not preventable).  The following
153 		 * both sets the default and sanity checks anything from
154 		 * /etc/system.
155 		 */
156 		if (fpras_frequency < 0)
157 			fpras_frequency = FPRAS_DEFAULT_FREQUENCY;
158 
159 		/*
160 		 * Now calculate fpras_interval.  When fpras_interval
161 		 * becomes non-negative fpras checks will commence
162 		 * (copies before this point in boot will bypass fpras).
163 		 * Our stores of instructions must be visible; no need
164 		 * to flush as they're never been executed before.
165 		 */
166 		membar_producer();
167 		fpras_interval = (fpras_frequency == 0) ?
168 		    0 : sys_tick_freq / fpras_frequency;
169 	}
170 }
171 
172 void
173 mach_hw_copy_limit(void)
174 {
175 	if (!fpu_exists) {
176 		use_hw_bcopy = 0;
177 		hw_copy_limit_1 = 0;
178 		hw_copy_limit_2 = 0;
179 		hw_copy_limit_4 = 0;
180 		hw_copy_limit_8 = 0;
181 		use_hw_bzero = 0;
182 	}
183 }
184 
185 void
186 load_tod_module()
187 {
188 	/*
189 	 * Load tod driver module for the tod part found on this system.
190 	 * Recompute the cpu frequency/delays based on tod as tod part
191 	 * tends to keep time more accurately.
192 	 */
193 	if (tod_module_name == NULL || modload("tod", tod_module_name) == -1)
194 		halt("Can't load tod module");
195 }
196 
197 void
198 mach_memscrub(void)
199 {
200 	/*
201 	 * Startup memory scrubber, if not running fpu emulation code.
202 	 */
203 
204 #ifndef _HW_MEMSCRUB_SUPPORT
205 	if (fpu_exists) {
206 		if (memscrub_init()) {
207 			cmn_err(CE_WARN,
208 			    "Memory scrubber failed to initialize");
209 		}
210 	}
211 #endif /* _HW_MEMSCRUB_SUPPORT */
212 }
213 
214 /*
215  * Halt the calling CPU until awoken via an interrupt
216  * This routine should only be invoked if cpu_halt_cpu()
217  * exists and is supported, see mach_cpu_halt_idle()
218  */
219 static void
220 cpu_halt(void)
221 {
222 	cpu_t		*cpup = CPU;
223 	processorid_t	cpu_sid = cpup->cpu_seqid;
224 	cpupart_t	*cp = cpup->cpu_part;
225 	int		hset_update = 1;
226 	uint_t		pstate;
227 	extern uint_t	getpstate(void);
228 	extern void	setpstate(uint_t);
229 
230 	/*
231 	 * If this CPU is online, and there's multiple CPUs
232 	 * in the system, then we should notate our halting
233 	 * by adding ourselves to the partition's halted CPU
234 	 * bitset. This allows other CPUs to find/awaken us when
235 	 * work becomes available.
236 	 */
237 	if (CPU->cpu_flags & CPU_OFFLINE || ncpus == 1)
238 		hset_update = 0;
239 
240 	/*
241 	 * Add ourselves to the partition's halted CPU bitset
242 	 * and set our HALTED flag, if necessary.
243 	 *
244 	 * When a thread becomes runnable, it is placed on the queue
245 	 * and then the halted cpu bitset is checked to determine who
246 	 * (if anyone) should be awoken. We therefore need to first
247 	 * add ourselves to the halted cpu bitset, and then check if there
248 	 * is any work available. The order is important to prevent a race
249 	 * that can lead to work languishing on a run queue somewhere while
250 	 * this CPU remains halted.
251 	 *
252 	 * Either the producing CPU will see we're halted and will awaken us,
253 	 * or this CPU will see the work available in disp_anywork()
254 	 */
255 	if (hset_update) {
256 		cpup->cpu_disp_flags |= CPU_DISP_HALTED;
257 		membar_producer();
258 		bitset_atomic_add(&cp->cp_haltset, cpu_sid);
259 	}
260 
261 	/*
262 	 * Check to make sure there's really nothing to do.
263 	 * Work destined for this CPU may become available after
264 	 * this check. We'll be notified through the clearing of our
265 	 * bit in the halted CPU bitset, and a poke.
266 	 */
267 	if (disp_anywork()) {
268 		if (hset_update) {
269 			cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
270 			bitset_atomic_del(&cp->cp_haltset, cpu_sid);
271 		}
272 		return;
273 	}
274 
275 	/*
276 	 * We're on our way to being halted.
277 	 *
278 	 * Disable interrupts now, so that we'll awaken immediately
279 	 * after halting if someone tries to poke us between now and
280 	 * the time we actually halt.
281 	 *
282 	 * We check for the presence of our bit after disabling interrupts.
283 	 * If it's cleared, we'll return. If the bit is cleared after
284 	 * we check then the poke will pop us out of the halted state.
285 	 *
286 	 * The ordering of the poke and the clearing of the bit by cpu_wakeup
287 	 * is important.
288 	 * cpu_wakeup() must clear, then poke.
289 	 * cpu_halt() must disable interrupts, then check for the bit.
290 	 */
291 	pstate = getpstate();
292 	setpstate(pstate & ~PSTATE_IE);
293 
294 	if (hset_update && bitset_in_set(&cp->cp_haltset, cpu_sid) == 0) {
295 		cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
296 		setpstate(pstate);
297 		return;
298 	}
299 
300 	/*
301 	 * The check for anything locally runnable is here for performance
302 	 * and isn't needed for correctness. disp_nrunnable ought to be
303 	 * in our cache still, so it's inexpensive to check, and if there
304 	 * is anything runnable we won't have to wait for the poke.
305 	 */
306 	if (cpup->cpu_disp->disp_nrunnable != 0) {
307 		if (hset_update) {
308 			cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
309 			bitset_atomic_del(&cp->cp_haltset, cpu_sid);
310 		}
311 		setpstate(pstate);
312 		return;
313 	}
314 
315 	/*
316 	 * Halt the strand.
317 	 */
318 	if (&cpu_halt_cpu) {
319 		DTRACE_PROBE1(idle__state__transition,
320 		    uint_t, IDLE_STATE_HALTED);
321 
322 		cpu_halt_cpu();
323 
324 		DTRACE_PROBE1(idle__state__transition,
325 		    uint_t, IDLE_STATE_NORMAL);
326 	}
327 
328 	/*
329 	 * We're no longer halted
330 	 */
331 	setpstate(pstate);
332 	if (hset_update) {
333 		cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
334 		bitset_atomic_del(&cp->cp_haltset, cpu_sid);
335 	}
336 }
337 
338 /*
339  * If "cpu" is halted, then wake it up clearing its halted bit in advance.
340  * Otherwise, see if other CPUs in the cpu partition are halted and need to
341  * be woken up so that they can steal the thread we placed on this CPU.
342  * This function is only used on MP systems.
343  * This function should only be invoked if cpu_halt_cpu()
344  * exists and is supported, see mach_cpu_halt_idle()
345  */
346 static void
347 cpu_wakeup(cpu_t *cpu, int bound)
348 {
349 	uint_t		cpu_found;
350 	processorid_t	cpu_sid;
351 	cpupart_t	*cp;
352 
353 	cp = cpu->cpu_part;
354 	cpu_sid = cpu->cpu_seqid;
355 	if (bitset_in_set(&cp->cp_haltset, cpu_sid)) {
356 		/*
357 		 * Clear the halted bit for that CPU since it will be
358 		 * poked in a moment.
359 		 */
360 		bitset_atomic_del(&cp->cp_haltset, cpu_sid);
361 		/*
362 		 * We may find the current CPU present in the halted cpu bitset
363 		 * if we're in the context of an interrupt that occurred
364 		 * before we had a chance to clear our bit in cpu_halt().
365 		 * Poking ourself is obviously unnecessary, since if
366 		 * we're here, we're not halted.
367 		 */
368 		if (cpu != CPU)
369 			poke_cpu(cpu->cpu_id);
370 		return;
371 	} else {
372 		/*
373 		 * This cpu isn't halted, but it's idle or undergoing a
374 		 * context switch. No need to awaken anyone else.
375 		 */
376 		if (cpu->cpu_thread == cpu->cpu_idle_thread ||
377 		    cpu->cpu_disp_flags & CPU_DISP_DONTSTEAL)
378 			return;
379 	}
380 
381 	/*
382 	 * No need to wake up other CPUs if this is for a bound thread.
383 	 */
384 	if (bound)
385 		return;
386 
387 	/*
388 	 * The CPU specified for wakeup isn't currently halted, so check
389 	 * to see if there are any other halted CPUs in the partition,
390 	 * and if there are then awaken one.
391 	 *
392 	 * If possible, try to select a CPU close to the target, since this
393 	 * will likely trigger a migration.
394 	 */
395 	do {
396 		cpu_found = bitset_find(&cp->cp_haltset);
397 		if (cpu_found == (uint_t)-1)
398 			return;
399 	} while (bitset_atomic_test_and_del(&cp->cp_haltset, cpu_found) < 0);
400 
401 	if (cpu_found != CPU->cpu_seqid)
402 		poke_cpu(cpu_seq[cpu_found]->cpu_id);
403 }
404 
405 void
406 mach_cpu_halt_idle()
407 {
408 	if (enable_halt_idle_cpus) {
409 		if (&cpu_halt_cpu) {
410 			idle_cpu = cpu_halt;
411 			disp_enq_thread = cpu_wakeup;
412 		}
413 	}
414 }
415 
416 /*ARGSUSED*/
417 int
418 cpu_intrq_setup(struct cpu *cp)
419 {
420 	/* Interrupt mondo queues not applicable to sun4u */
421 	return (0);
422 }
423 
424 /*ARGSUSED*/
425 void
426 cpu_intrq_cleanup(struct cpu *cp)
427 {
428 	/* Interrupt mondo queues not applicable to sun4u */
429 }
430 
431 /*ARGSUSED*/
432 void
433 cpu_intrq_register(struct cpu *cp)
434 {
435 	/* Interrupt/error queues not applicable to sun4u */
436 }
437 
438 /*ARGSUSED*/
439 void
440 mach_htraptrace_setup(int cpuid)
441 {
442 	/* Setup hypervisor traptrace buffer, not applicable to sun4u */
443 }
444 
445 /*ARGSUSED*/
446 void
447 mach_htraptrace_configure(int cpuid)
448 {
449 	/* enable/ disable hypervisor traptracing, not applicable to sun4u */
450 }
451 
452 /*ARGSUSED*/
453 void
454 mach_htraptrace_cleanup(int cpuid)
455 {
456 	/* cleanup hypervisor traptrace buffer, not applicable to sun4u */
457 }
458 
459 void
460 mach_descrip_startup_init(void)
461 {
462 	/*
463 	 * Only for sun4v.
464 	 * Initialize Machine description framework during startup.
465 	 */
466 }
467 void
468 mach_descrip_startup_fini(void)
469 {
470 	/*
471 	 * Only for sun4v.
472 	 * Clean up Machine Description framework during startup.
473 	 */
474 }
475 
476 void
477 mach_descrip_init(void)
478 {
479 	/*
480 	 * Only for sun4v.
481 	 * Initialize Machine description framework.
482 	 */
483 }
484 
485 void
486 hsvc_setup(void)
487 {
488 	/* Setup hypervisor services, not applicable to sun4u */
489 }
490 
491 void
492 load_mach_drivers(void)
493 {
494 	/* Currently no machine class (sun4u) specific drivers to load */
495 }
496 
497 /*
498  * Return true if the machine we're running on is a Positron.
499  * (Positron is an unsupported developers platform.)
500  */
501 int
502 iam_positron(void)
503 {
504 	char model[32];
505 	const char proto_model[] = "SUNW,501-2732";
506 	pnode_t root = prom_rootnode();
507 
508 	if (prom_getproplen(root, "model") != sizeof (proto_model))
509 		return (0);
510 
511 	(void) prom_getprop(root, "model", model);
512 	if (strcmp(model, proto_model) == 0)
513 		return (1);
514 	return (0);
515 }
516 
517 /*
518  * Find a physically contiguous area of twice the largest ecache size
519  * to be used while doing displacement flush of ecaches.
520  */
521 uint64_t
522 ecache_flush_address(void)
523 {
524 	struct memlist *pmem;
525 	uint64_t flush_size;
526 	uint64_t ret_val;
527 
528 	flush_size = ecache_size * 2;
529 	for (pmem = phys_install; pmem; pmem = pmem->next) {
530 		ret_val = P2ROUNDUP(pmem->address, ecache_size);
531 		if (ret_val + flush_size <= pmem->address + pmem->size)
532 			return (ret_val);
533 	}
534 	return ((uint64_t)-1);
535 }
536 
537 /*
538  * Called with the memlist lock held to say that phys_install has
539  * changed.
540  */
541 void
542 phys_install_has_changed(void)
543 {
544 	/*
545 	 * Get the new address into a temporary just in case panicking
546 	 * involves use of ecache_flushaddr.
547 	 */
548 	uint64_t new_addr;
549 
550 	new_addr = ecache_flush_address();
551 	if (new_addr == (uint64_t)-1) {
552 		cmn_err(CE_PANIC,
553 		    "ecache_flush_address(): failed, ecache_size=%x",
554 		    ecache_size);
555 		/*NOTREACHED*/
556 	}
557 	ecache_flushaddr = new_addr;
558 	membar_producer();
559 }
560