17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate * CDDL HEADER START
37c478bd9Sstevel@tonic-gate *
47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the
50400e0b7Sha137994 * Common Development and Distribution License (the "License").
60400e0b7Sha137994 * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate *
87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate * and limitations under the License.
127c478bd9Sstevel@tonic-gate *
137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate *
197c478bd9Sstevel@tonic-gate * CDDL HEADER END
207c478bd9Sstevel@tonic-gate */
211ae08745Sheppo
227c478bd9Sstevel@tonic-gate /*
23*0542eecfSRafael Vanoni * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
247c478bd9Sstevel@tonic-gate */
257c478bd9Sstevel@tonic-gate
267c478bd9Sstevel@tonic-gate #include <sys/machsystm.h>
277c478bd9Sstevel@tonic-gate #include <sys/archsystm.h>
287c478bd9Sstevel@tonic-gate #include <sys/prom_plat.h>
297c478bd9Sstevel@tonic-gate #include <sys/promif.h>
307c478bd9Sstevel@tonic-gate #include <sys/vm.h>
317c478bd9Sstevel@tonic-gate #include <sys/cpu.h>
326890d023SEric Saxe #include <sys/bitset.h>
337c478bd9Sstevel@tonic-gate #include <sys/cpupart.h>
347c478bd9Sstevel@tonic-gate #include <sys/disp.h>
357c478bd9Sstevel@tonic-gate #include <sys/hypervisor_api.h>
367c478bd9Sstevel@tonic-gate #include <sys/traptrace.h>
371ae08745Sheppo #include <sys/modctl.h>
381ae08745Sheppo #include <sys/ldoms.h>
39575a7426Spt157919 #include <sys/cpu_module.h>
40575a7426Spt157919 #include <sys/mutex_impl.h>
41374ae87fSsvemuri #include <sys/rwlock.h>
42c210ded4Sesaxe #include <sys/sdt.h>
436890d023SEric Saxe #include <sys/cmt.h>
446890d023SEric Saxe #include <vm/vm_dep.h>
45db6d2ee3Ssvemuri
46db6d2ee3Ssvemuri #ifdef TRAPTRACE
47db6d2ee3Ssvemuri int mach_htraptrace_enable = 1;
48db6d2ee3Ssvemuri #else
49db6d2ee3Ssvemuri int mach_htraptrace_enable = 0;
50db6d2ee3Ssvemuri #endif
51db6d2ee3Ssvemuri int htrap_tr0_inuse = 0;
52db6d2ee3Ssvemuri extern char htrap_tr0[]; /* prealloc buf for boot cpu */
537c478bd9Sstevel@tonic-gate
547c478bd9Sstevel@tonic-gate caddr_t mmu_fault_status_area;
557c478bd9Sstevel@tonic-gate
567c478bd9Sstevel@tonic-gate extern void sfmmu_set_tsbs(void);
577c478bd9Sstevel@tonic-gate /*
587c478bd9Sstevel@tonic-gate * CPU IDLE optimization variables/routines
597c478bd9Sstevel@tonic-gate */
607c478bd9Sstevel@tonic-gate static int enable_halt_idle_cpus = 1;
617c478bd9Sstevel@tonic-gate
62c210ded4Sesaxe /*
63c210ded4Sesaxe * Defines for the idle_state_transition DTrace probe
64c210ded4Sesaxe *
65c210ded4Sesaxe * The probe fires when the CPU undergoes an idle state change (e.g. hv yield)
66c210ded4Sesaxe * The agument passed is the state to which the CPU is transitioning.
67c210ded4Sesaxe *
68c210ded4Sesaxe * The states are defined here.
69c210ded4Sesaxe */
70c210ded4Sesaxe #define IDLE_STATE_NORMAL 0
71c210ded4Sesaxe #define IDLE_STATE_YIELDED 1
72c210ded4Sesaxe
732850d85bSmv143129 #define SUN4V_CLOCK_TICK_THRESHOLD 64
742850d85bSmv143129 #define SUN4V_CLOCK_TICK_NCPUS 64
752850d85bSmv143129
762850d85bSmv143129 extern int clock_tick_threshold;
772850d85bSmv143129 extern int clock_tick_ncpus;
782850d85bSmv143129
79*0542eecfSRafael Vanoni uint_t cp_haltset_fanout = 3;
80*0542eecfSRafael Vanoni
817c478bd9Sstevel@tonic-gate void
setup_trap_table(void)827c478bd9Sstevel@tonic-gate setup_trap_table(void)
837c478bd9Sstevel@tonic-gate {
847c478bd9Sstevel@tonic-gate caddr_t mmfsa_va;
857c478bd9Sstevel@tonic-gate extern caddr_t mmu_fault_status_area;
867c478bd9Sstevel@tonic-gate mmfsa_va =
877c478bd9Sstevel@tonic-gate mmu_fault_status_area + (MMFSA_SIZE * CPU->cpu_id);
887c478bd9Sstevel@tonic-gate
897c478bd9Sstevel@tonic-gate intr_init(CPU); /* init interrupt request free list */
907c478bd9Sstevel@tonic-gate setwstate(WSTATE_KERN);
917c478bd9Sstevel@tonic-gate set_mmfsa_scratchpad(mmfsa_va);
927c478bd9Sstevel@tonic-gate prom_set_mmfsa_traptable(&trap_table, va_to_pa(mmfsa_va));
937c478bd9Sstevel@tonic-gate sfmmu_set_tsbs();
947c478bd9Sstevel@tonic-gate }
957c478bd9Sstevel@tonic-gate
967c478bd9Sstevel@tonic-gate void
phys_install_has_changed(void)977c478bd9Sstevel@tonic-gate phys_install_has_changed(void)
987c478bd9Sstevel@tonic-gate {
997c478bd9Sstevel@tonic-gate
1007c478bd9Sstevel@tonic-gate }
1017c478bd9Sstevel@tonic-gate
1027c478bd9Sstevel@tonic-gate /*
1037c478bd9Sstevel@tonic-gate * Halt the present CPU until awoken via an interrupt
1047c478bd9Sstevel@tonic-gate */
1057c478bd9Sstevel@tonic-gate static void
cpu_halt(void)1067c478bd9Sstevel@tonic-gate cpu_halt(void)
1077c478bd9Sstevel@tonic-gate {
1087c478bd9Sstevel@tonic-gate cpu_t *cpup = CPU;
1096890d023SEric Saxe processorid_t cpu_sid = cpup->cpu_seqid;
110f1f2d3ffSesaxe cpupart_t *cp = cpup->cpu_part;
1117c478bd9Sstevel@tonic-gate int hset_update = 1;
11244961713Sgirish volatile int *p = &cpup->cpu_disp->disp_nrunnable;
113df59106dSgirish uint_t s;
1147c478bd9Sstevel@tonic-gate
1157c478bd9Sstevel@tonic-gate /*
11622b3ceccSDave Marquardt * If this CPU is online then we should notate our halting
1177c478bd9Sstevel@tonic-gate * by adding ourselves to the partition's halted CPU
1186890d023SEric Saxe * bitset. This allows other CPUs to find/awaken us when
1197c478bd9Sstevel@tonic-gate * work becomes available.
1207c478bd9Sstevel@tonic-gate */
12122b3ceccSDave Marquardt if (CPU->cpu_flags & CPU_OFFLINE)
1227c478bd9Sstevel@tonic-gate hset_update = 0;
1237c478bd9Sstevel@tonic-gate
1247c478bd9Sstevel@tonic-gate /*
1256890d023SEric Saxe * Add ourselves to the partition's halted CPUs bitset
12607e2e5e8Sgirish * and set our HALTED flag, if necessary.
12707e2e5e8Sgirish *
128f1f2d3ffSesaxe * When a thread becomes runnable, it is placed on the queue
1296890d023SEric Saxe * and then the halted cpu bitset is checked to determine who
130f1f2d3ffSesaxe * (if anyone) should be awoken. We therefore need to first
1316890d023SEric Saxe * add ourselves to the halted bitset, and then check if there
1326890d023SEric Saxe * is any work available. The order is important to prevent a race
1336890d023SEric Saxe * that can lead to work languishing on a run queue somewhere while
1346890d023SEric Saxe * this CPU remains halted.
1356890d023SEric Saxe *
1366890d023SEric Saxe * Either the producing CPU will see we're halted and will awaken us,
1376890d023SEric Saxe * or this CPU will see the work available in disp_anywork()
1387c478bd9Sstevel@tonic-gate */
1397c478bd9Sstevel@tonic-gate if (hset_update) {
14007e2e5e8Sgirish cpup->cpu_disp_flags |= CPU_DISP_HALTED;
14107e2e5e8Sgirish membar_producer();
1426890d023SEric Saxe bitset_atomic_add(&cp->cp_haltset, cpu_sid);
1437c478bd9Sstevel@tonic-gate }
1447c478bd9Sstevel@tonic-gate
1457c478bd9Sstevel@tonic-gate /*
1467c478bd9Sstevel@tonic-gate * Check to make sure there's really nothing to do.
147f1f2d3ffSesaxe * Work destined for this CPU may become available after
148f1f2d3ffSesaxe * this check. We'll be notified through the clearing of our
1496890d023SEric Saxe * bit in the halted CPU bitset, and a poke.
1507c478bd9Sstevel@tonic-gate */
1517c478bd9Sstevel@tonic-gate if (disp_anywork()) {
15207e2e5e8Sgirish if (hset_update) {
15307e2e5e8Sgirish cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
1546890d023SEric Saxe bitset_atomic_del(&cp->cp_haltset, cpu_sid);
15507e2e5e8Sgirish }
156f1f2d3ffSesaxe return;
157f1f2d3ffSesaxe }
158f1f2d3ffSesaxe
15944961713Sgirish /*
16044961713Sgirish * We're on our way to being halted. Wait until something becomes
16144961713Sgirish * runnable locally or we are awaken (i.e. removed from the halt set).
16244961713Sgirish * Note that the call to hv_cpu_yield() can return even if we have
16344961713Sgirish * nothing to do.
164f1f2d3ffSesaxe *
165f1f2d3ffSesaxe * Disable interrupts now, so that we'll awaken immediately
166f1f2d3ffSesaxe * after halting if someone tries to poke us between now and
167f1f2d3ffSesaxe * the time we actually halt.
168f1f2d3ffSesaxe *
169f1f2d3ffSesaxe * We check for the presence of our bit after disabling interrupts.
170f1f2d3ffSesaxe * If it's cleared, we'll return. If the bit is cleared after
171f1f2d3ffSesaxe * we check then the poke will pop us out of the halted state.
172ce809214Sgirish * Also, if the offlined CPU has been brought back on-line, then
173ce809214Sgirish * we return as well.
174f1f2d3ffSesaxe *
175f1f2d3ffSesaxe * The ordering of the poke and the clearing of the bit by cpu_wakeup
176f1f2d3ffSesaxe * is important.
177f1f2d3ffSesaxe * cpu_wakeup() must clear, then poke.
178f1f2d3ffSesaxe * cpu_halt() must disable interrupts, then check for the bit.
17944961713Sgirish *
180f1f2d3ffSesaxe * The check for anything locally runnable is here for performance
181f1f2d3ffSesaxe * and isn't needed for correctness. disp_nrunnable ought to be
182f1f2d3ffSesaxe * in our cache still, so it's inexpensive to check, and if there
183f1f2d3ffSesaxe * is anything runnable we won't have to wait for the poke.
18444961713Sgirish *
1856b2c23f3SDave Plauger * Any interrupt will awaken the cpu from halt. Looping here
1866b2c23f3SDave Plauger * will filter spurious interrupts that wake us up, but don't
1876b2c23f3SDave Plauger * represent a need for us to head back out to idle(). This
1886b2c23f3SDave Plauger * will enable the idle loop to be more efficient and sleep in
1896b2c23f3SDave Plauger * the processor pipeline for a larger percent of the time,
1906b2c23f3SDave Plauger * which returns useful cycles to the peer hardware strand
1916b2c23f3SDave Plauger * that shares the pipeline.
192f1f2d3ffSesaxe */
19344961713Sgirish s = disable_vec_intr();
19444961713Sgirish while (*p == 0 &&
1956890d023SEric Saxe ((hset_update && bitset_in_set(&cp->cp_haltset, cpu_sid)) ||
196ce809214Sgirish (!hset_update && (CPU->cpu_flags & CPU_OFFLINE)))) {
197c210ded4Sesaxe
198c210ded4Sesaxe DTRACE_PROBE1(idle__state__transition,
199c210ded4Sesaxe uint_t, IDLE_STATE_YIELDED);
2007c478bd9Sstevel@tonic-gate (void) hv_cpu_yield();
201c210ded4Sesaxe DTRACE_PROBE1(idle__state__transition,
202c210ded4Sesaxe uint_t, IDLE_STATE_NORMAL);
203c210ded4Sesaxe
20444961713Sgirish enable_vec_intr(s);
20544961713Sgirish s = disable_vec_intr();
20644961713Sgirish }
2077c478bd9Sstevel@tonic-gate
2087c478bd9Sstevel@tonic-gate /*
2097c478bd9Sstevel@tonic-gate * We're no longer halted
2107c478bd9Sstevel@tonic-gate */
211df59106dSgirish enable_vec_intr(s);
21207e2e5e8Sgirish if (hset_update) {
21307e2e5e8Sgirish cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
2146890d023SEric Saxe bitset_atomic_del(&cp->cp_haltset, cpu_sid);
2157c478bd9Sstevel@tonic-gate }
21607e2e5e8Sgirish }
2177c478bd9Sstevel@tonic-gate
2187c478bd9Sstevel@tonic-gate /*
2197c478bd9Sstevel@tonic-gate * If "cpu" is halted, then wake it up clearing its halted bit in advance.
2207c478bd9Sstevel@tonic-gate * Otherwise, see if other CPUs in the cpu partition are halted and need to
2217c478bd9Sstevel@tonic-gate * be woken up so that they can steal the thread we placed on this CPU.
2227c478bd9Sstevel@tonic-gate * This function is only used on MP systems.
2237c478bd9Sstevel@tonic-gate */
2247c478bd9Sstevel@tonic-gate static void
cpu_wakeup(cpu_t * cpu,int bound)2257c478bd9Sstevel@tonic-gate cpu_wakeup(cpu_t *cpu, int bound)
2267c478bd9Sstevel@tonic-gate {
2277c478bd9Sstevel@tonic-gate uint_t cpu_found;
2286890d023SEric Saxe processorid_t cpu_sid;
2297c478bd9Sstevel@tonic-gate cpupart_t *cp;
2307c478bd9Sstevel@tonic-gate
2317c478bd9Sstevel@tonic-gate cp = cpu->cpu_part;
2326890d023SEric Saxe cpu_sid = cpu->cpu_seqid;
2336890d023SEric Saxe if (bitset_in_set(&cp->cp_haltset, cpu_sid)) {
2347c478bd9Sstevel@tonic-gate /*
2357c478bd9Sstevel@tonic-gate * Clear the halted bit for that CPU since it will be
2367c478bd9Sstevel@tonic-gate * poked in a moment.
2377c478bd9Sstevel@tonic-gate */
2386890d023SEric Saxe bitset_atomic_del(&cp->cp_haltset, cpu_sid);
2397c478bd9Sstevel@tonic-gate /*
2406890d023SEric Saxe * We may find the current CPU present in the halted cpu bitset
2417c478bd9Sstevel@tonic-gate * if we're in the context of an interrupt that occurred
2427c478bd9Sstevel@tonic-gate * before we had a chance to clear our bit in cpu_halt().
2437c478bd9Sstevel@tonic-gate * Poking ourself is obviously unnecessary, since if
2447c478bd9Sstevel@tonic-gate * we're here, we're not halted.
2457c478bd9Sstevel@tonic-gate */
2467c478bd9Sstevel@tonic-gate if (cpu != CPU)
2477c478bd9Sstevel@tonic-gate poke_cpu(cpu->cpu_id);
2487c478bd9Sstevel@tonic-gate return;
2497c478bd9Sstevel@tonic-gate } else {
2507c478bd9Sstevel@tonic-gate /*
2517c478bd9Sstevel@tonic-gate * This cpu isn't halted, but it's idle or undergoing a
2527c478bd9Sstevel@tonic-gate * context switch. No need to awaken anyone else.
2537c478bd9Sstevel@tonic-gate */
2547c478bd9Sstevel@tonic-gate if (cpu->cpu_thread == cpu->cpu_idle_thread ||
2557c478bd9Sstevel@tonic-gate cpu->cpu_disp_flags & CPU_DISP_DONTSTEAL)
2567c478bd9Sstevel@tonic-gate return;
2577c478bd9Sstevel@tonic-gate }
2587c478bd9Sstevel@tonic-gate
2597c478bd9Sstevel@tonic-gate /*
2606890d023SEric Saxe * No need to wake up other CPUs if this is for a bound thread.
2617c478bd9Sstevel@tonic-gate */
2627c478bd9Sstevel@tonic-gate if (bound)
2637c478bd9Sstevel@tonic-gate return;
2647c478bd9Sstevel@tonic-gate
2657c478bd9Sstevel@tonic-gate /*
2666890d023SEric Saxe * The CPU specified for wakeup isn't currently halted, so check
2676890d023SEric Saxe * to see if there are any other halted CPUs in the partition,
2686890d023SEric Saxe * and if there are then awaken one.
2697c478bd9Sstevel@tonic-gate */
2707c478bd9Sstevel@tonic-gate do {
2716890d023SEric Saxe cpu_found = bitset_find(&cp->cp_haltset);
2726890d023SEric Saxe if (cpu_found == (uint_t)-1)
2737c478bd9Sstevel@tonic-gate return;
2746890d023SEric Saxe } while (bitset_atomic_test_and_del(&cp->cp_haltset, cpu_found) < 0);
2757c478bd9Sstevel@tonic-gate
2766890d023SEric Saxe if (cpu_found != CPU->cpu_seqid)
2776890d023SEric Saxe poke_cpu(cpu_seq[cpu_found]->cpu_id);
2787c478bd9Sstevel@tonic-gate }
2797c478bd9Sstevel@tonic-gate
2807c478bd9Sstevel@tonic-gate void
mach_cpu_halt_idle(void)2810e751525SEric Saxe mach_cpu_halt_idle(void)
2827c478bd9Sstevel@tonic-gate {
2837c478bd9Sstevel@tonic-gate if (enable_halt_idle_cpus) {
2847c478bd9Sstevel@tonic-gate idle_cpu = cpu_halt;
2857c478bd9Sstevel@tonic-gate disp_enq_thread = cpu_wakeup;
2867c478bd9Sstevel@tonic-gate }
2877c478bd9Sstevel@tonic-gate }
2887c478bd9Sstevel@tonic-gate
2897c478bd9Sstevel@tonic-gate int
ndata_alloc_mmfsa(struct memlist * ndata)2907c478bd9Sstevel@tonic-gate ndata_alloc_mmfsa(struct memlist *ndata)
2917c478bd9Sstevel@tonic-gate {
2927c478bd9Sstevel@tonic-gate size_t size;
2937c478bd9Sstevel@tonic-gate
2947c478bd9Sstevel@tonic-gate size = MMFSA_SIZE * max_ncpus;
2957c478bd9Sstevel@tonic-gate mmu_fault_status_area = ndata_alloc(ndata, size, ecache_alignsize);
2967c478bd9Sstevel@tonic-gate if (mmu_fault_status_area == NULL)
2977c478bd9Sstevel@tonic-gate return (-1);
2987c478bd9Sstevel@tonic-gate return (0);
2997c478bd9Sstevel@tonic-gate }
3007c478bd9Sstevel@tonic-gate
3017c478bd9Sstevel@tonic-gate void
mach_memscrub(void)3027c478bd9Sstevel@tonic-gate mach_memscrub(void)
3037c478bd9Sstevel@tonic-gate {
3047c478bd9Sstevel@tonic-gate /* no memscrub support for sun4v for now */
3057c478bd9Sstevel@tonic-gate }
3067c478bd9Sstevel@tonic-gate
3077c478bd9Sstevel@tonic-gate void
mach_fpras()3087c478bd9Sstevel@tonic-gate mach_fpras()
3097c478bd9Sstevel@tonic-gate {
3107c478bd9Sstevel@tonic-gate /* no fpras support for sun4v for now */
3117c478bd9Sstevel@tonic-gate }
3127c478bd9Sstevel@tonic-gate
3137c478bd9Sstevel@tonic-gate void
mach_hw_copy_limit(void)3147c478bd9Sstevel@tonic-gate mach_hw_copy_limit(void)
3157c478bd9Sstevel@tonic-gate {
3167c478bd9Sstevel@tonic-gate /* HW copy limits set by individual CPU module */
3177c478bd9Sstevel@tonic-gate }
3187c478bd9Sstevel@tonic-gate
3197c478bd9Sstevel@tonic-gate /*
320da14cebeSEric Cheng * We need to enable soft ring functionality on Niagara platforms since
321da14cebeSEric Cheng * one strand can't handle interrupts for a 1Gb NIC. So set the tunable
322da14cebeSEric Cheng * mac_soft_ring_enable by default on this platform.
323da14cebeSEric Cheng * mac_soft_ring_enable variable is defined in space.c and used by MAC
324da14cebeSEric Cheng * module. This tunable in concert with mac_soft_ring_count (declared
325da14cebeSEric Cheng * in mac.h) will configure the number of fanout soft rings for a link.
3264b46d1efSkrgopi */
327da14cebeSEric Cheng extern boolean_t mac_soft_ring_enable;
3284b46d1efSkrgopi void
startup_platform(void)3294b46d1efSkrgopi startup_platform(void)
3304b46d1efSkrgopi {
331da14cebeSEric Cheng mac_soft_ring_enable = B_TRUE;
3322850d85bSmv143129 if (clock_tick_threshold == 0)
3332850d85bSmv143129 clock_tick_threshold = SUN4V_CLOCK_TICK_THRESHOLD;
3342850d85bSmv143129 if (clock_tick_ncpus == 0)
3352850d85bSmv143129 clock_tick_ncpus = SUN4V_CLOCK_TICK_NCPUS;
336575a7426Spt157919 /* set per-platform constants for mutex_backoff */
337575a7426Spt157919 mutex_backoff_base = 1;
338575a7426Spt157919 mutex_cap_factor = 4;
339575a7426Spt157919 if (l2_cache_node_count() > 1) {
340575a7426Spt157919 /* VF for example */
341575a7426Spt157919 mutex_backoff_base = 2;
342374ae87fSsvemuri mutex_cap_factor = 64;
343575a7426Spt157919 }
344374ae87fSsvemuri rw_lock_backoff = default_lock_backoff;
345374ae87fSsvemuri rw_lock_delay = default_lock_delay;
3464b46d1efSkrgopi }
3474b46d1efSkrgopi
3484b46d1efSkrgopi /*
3497c478bd9Sstevel@tonic-gate * This function sets up hypervisor traptrace buffer
350db6d2ee3Ssvemuri * This routine is called by the boot cpu only
3517c478bd9Sstevel@tonic-gate */
3527c478bd9Sstevel@tonic-gate void
mach_htraptrace_setup(int cpuid)353db6d2ee3Ssvemuri mach_htraptrace_setup(int cpuid)
3547c478bd9Sstevel@tonic-gate {
3557c478bd9Sstevel@tonic-gate TRAP_TRACE_CTL *ctlp;
356db6d2ee3Ssvemuri int bootcpuid = getprocessorid(); /* invoked on boot cpu only */
3577c478bd9Sstevel@tonic-gate
358db6d2ee3Ssvemuri if (mach_htraptrace_enable && ((cpuid != bootcpuid) ||
359db6d2ee3Ssvemuri !htrap_tr0_inuse)) {
3607c478bd9Sstevel@tonic-gate ctlp = &trap_trace_ctl[cpuid];
361db6d2ee3Ssvemuri ctlp->d.hvaddr_base = (cpuid == bootcpuid) ? htrap_tr0 :
3620400e0b7Sha137994 contig_mem_alloc_align(HTRAP_TSIZE, HTRAP_TSIZE);
3630400e0b7Sha137994 if (ctlp->d.hvaddr_base == NULL) {
3640400e0b7Sha137994 ctlp->d.hlimit = 0;
3650400e0b7Sha137994 ctlp->d.hpaddr_base = NULL;
3660400e0b7Sha137994 cmn_err(CE_WARN, "!cpu%d: failed to allocate HV "
3670400e0b7Sha137994 "traptrace buffer", cpuid);
3680400e0b7Sha137994 } else {
3697c478bd9Sstevel@tonic-gate ctlp->d.hlimit = HTRAP_TSIZE;
370db6d2ee3Ssvemuri ctlp->d.hpaddr_base = va_to_pa(ctlp->d.hvaddr_base);
371db6d2ee3Ssvemuri }
3727c478bd9Sstevel@tonic-gate }
3730400e0b7Sha137994 }
3747c478bd9Sstevel@tonic-gate
3757c478bd9Sstevel@tonic-gate /*
376db6d2ee3Ssvemuri * This function enables or disables the hypervisor traptracing
3777c478bd9Sstevel@tonic-gate */
3787c478bd9Sstevel@tonic-gate void
mach_htraptrace_configure(int cpuid)379db6d2ee3Ssvemuri mach_htraptrace_configure(int cpuid)
3807c478bd9Sstevel@tonic-gate {
3817c478bd9Sstevel@tonic-gate uint64_t ret;
3827c478bd9Sstevel@tonic-gate uint64_t prev_buf, prev_bufsize;
3837c478bd9Sstevel@tonic-gate uint64_t prev_enable;
3847c478bd9Sstevel@tonic-gate uint64_t size;
3857c478bd9Sstevel@tonic-gate TRAP_TRACE_CTL *ctlp;
3867c478bd9Sstevel@tonic-gate
387db6d2ee3Ssvemuri ctlp = &trap_trace_ctl[cpuid];
388db6d2ee3Ssvemuri if (mach_htraptrace_enable) {
3890400e0b7Sha137994 if ((ctlp->d.hvaddr_base != NULL) &&
3900400e0b7Sha137994 ((ctlp->d.hvaddr_base != htrap_tr0) ||
3910400e0b7Sha137994 (!htrap_tr0_inuse))) {
3927c478bd9Sstevel@tonic-gate ret = hv_ttrace_buf_info(&prev_buf, &prev_bufsize);
3937c478bd9Sstevel@tonic-gate if ((ret == H_EOK) && (prev_bufsize != 0)) {
3947c478bd9Sstevel@tonic-gate cmn_err(CE_CONT,
395db6d2ee3Ssvemuri "!cpu%d: previous HV traptrace buffer of "
396db6d2ee3Ssvemuri "size 0x%lx at address 0x%lx", cpuid,
397db6d2ee3Ssvemuri prev_bufsize, prev_buf);
3987c478bd9Sstevel@tonic-gate }
3997c478bd9Sstevel@tonic-gate
400db6d2ee3Ssvemuri ret = hv_ttrace_buf_conf(ctlp->d.hpaddr_base,
4010400e0b7Sha137994 ctlp->d.hlimit /
4020400e0b7Sha137994 (sizeof (struct htrap_trace_record)), &size);
4037c478bd9Sstevel@tonic-gate if (ret == H_EOK) {
404db6d2ee3Ssvemuri ret = hv_ttrace_enable(\
405db6d2ee3Ssvemuri (uint64_t)TRAP_TENABLE_ALL, &prev_enable);
4067c478bd9Sstevel@tonic-gate if (ret != H_EOK) {
4077c478bd9Sstevel@tonic-gate cmn_err(CE_WARN,
408db6d2ee3Ssvemuri "!cpu%d: HV traptracing not "
409db6d2ee3Ssvemuri "enabled, ta: 0x%x returned error: "
410db6d2ee3Ssvemuri "%ld", cpuid, TTRACE_ENABLE, ret);
411db6d2ee3Ssvemuri } else {
412db6d2ee3Ssvemuri if (ctlp->d.hvaddr_base == htrap_tr0)
413db6d2ee3Ssvemuri htrap_tr0_inuse = 1;
4147c478bd9Sstevel@tonic-gate }
4157c478bd9Sstevel@tonic-gate } else {
4167c478bd9Sstevel@tonic-gate cmn_err(CE_WARN,
417db6d2ee3Ssvemuri "!cpu%d: HV traptrace buffer not "
418db6d2ee3Ssvemuri "configured, ta: 0x%x returned error: %ld",
4197c478bd9Sstevel@tonic-gate cpuid, TTRACE_BUF_CONF, ret);
4207c478bd9Sstevel@tonic-gate }
421db6d2ee3Ssvemuri /*
422db6d2ee3Ssvemuri * set hvaddr_base to NULL when traptrace buffer
423db6d2ee3Ssvemuri * registration fails
424db6d2ee3Ssvemuri */
4257c478bd9Sstevel@tonic-gate if (ret != H_EOK) {
4267c478bd9Sstevel@tonic-gate ctlp->d.hvaddr_base = NULL;
4277c478bd9Sstevel@tonic-gate ctlp->d.hlimit = 0;
4287c478bd9Sstevel@tonic-gate ctlp->d.hpaddr_base = NULL;
4297c478bd9Sstevel@tonic-gate }
4307c478bd9Sstevel@tonic-gate }
431db6d2ee3Ssvemuri } else {
432db6d2ee3Ssvemuri ret = hv_ttrace_buf_info(&prev_buf, &prev_bufsize);
433db6d2ee3Ssvemuri if ((ret == H_EOK) && (prev_bufsize != 0)) {
434db6d2ee3Ssvemuri ret = hv_ttrace_enable((uint64_t)TRAP_TDISABLE_ALL,
435db6d2ee3Ssvemuri &prev_enable);
436db6d2ee3Ssvemuri if (ret == H_EOK) {
437db6d2ee3Ssvemuri if (ctlp->d.hvaddr_base == htrap_tr0)
438db6d2ee3Ssvemuri htrap_tr0_inuse = 0;
439db6d2ee3Ssvemuri ctlp->d.hvaddr_base = NULL;
440db6d2ee3Ssvemuri ctlp->d.hlimit = 0;
441db6d2ee3Ssvemuri ctlp->d.hpaddr_base = NULL;
442db6d2ee3Ssvemuri } else
443db6d2ee3Ssvemuri cmn_err(CE_WARN,
444db6d2ee3Ssvemuri "!cpu%d: HV traptracing is not disabled, "
445db6d2ee3Ssvemuri "ta: 0x%x returned error: %ld",
446db6d2ee3Ssvemuri cpuid, TTRACE_ENABLE, ret);
447db6d2ee3Ssvemuri }
448db6d2ee3Ssvemuri }
449db6d2ee3Ssvemuri }
450db6d2ee3Ssvemuri
451db6d2ee3Ssvemuri /*
452db6d2ee3Ssvemuri * This function cleans up the hypervisor traptrace buffer
453db6d2ee3Ssvemuri */
454db6d2ee3Ssvemuri void
mach_htraptrace_cleanup(int cpuid)455db6d2ee3Ssvemuri mach_htraptrace_cleanup(int cpuid)
456db6d2ee3Ssvemuri {
457db6d2ee3Ssvemuri if (mach_htraptrace_enable) {
4580400e0b7Sha137994 TRAP_TRACE_CTL *ctlp;
4590400e0b7Sha137994 caddr_t httrace_buf_va;
4600400e0b7Sha137994
4610400e0b7Sha137994 ASSERT(cpuid < max_ncpus);
462db6d2ee3Ssvemuri ctlp = &trap_trace_ctl[cpuid];
4630400e0b7Sha137994 httrace_buf_va = ctlp->d.hvaddr_base;
4640400e0b7Sha137994 if (httrace_buf_va == htrap_tr0) {
4650400e0b7Sha137994 bzero(httrace_buf_va, HTRAP_TSIZE);
4660400e0b7Sha137994 } else if (httrace_buf_va != NULL) {
4670400e0b7Sha137994 contig_mem_free(httrace_buf_va, HTRAP_TSIZE);
468db6d2ee3Ssvemuri }
469db6d2ee3Ssvemuri ctlp->d.hvaddr_base = NULL;
470db6d2ee3Ssvemuri ctlp->d.hlimit = 0;
471db6d2ee3Ssvemuri ctlp->d.hpaddr_base = NULL;
472db6d2ee3Ssvemuri }
473db6d2ee3Ssvemuri }
4741ae08745Sheppo
4751ae08745Sheppo /*
4761ae08745Sheppo * Load any required machine class (sun4v) specific drivers.
4771ae08745Sheppo */
4781ae08745Sheppo void
load_mach_drivers(void)4791ae08745Sheppo load_mach_drivers(void)
4801ae08745Sheppo {
4811ae08745Sheppo /*
4821ae08745Sheppo * We don't want to load these LDOMs-specific
4834bac2208Snarayan * modules if domaining is not supported. Also,
4841ae08745Sheppo * we must be able to run on non-LDOMs firmware.
4851ae08745Sheppo */
48622e19ac1Sjm22469 if (!domaining_supported())
4871ae08745Sheppo return;
4881ae08745Sheppo
4891ae08745Sheppo /*
4901ae08745Sheppo * Load the core domain services module
4911ae08745Sheppo */
4921ae08745Sheppo if (modload("misc", "ds") == -1)
4931ae08745Sheppo cmn_err(CE_NOTE, "!'ds' module failed to load");
4941ae08745Sheppo
4951ae08745Sheppo /*
4961ae08745Sheppo * Load the rest of the domain services
4971ae08745Sheppo */
4981ae08745Sheppo if (modload("misc", "fault_iso") == -1)
4991ae08745Sheppo cmn_err(CE_NOTE, "!'fault_iso' module failed to load");
5001ae08745Sheppo
5011ae08745Sheppo if (modload("misc", "platsvc") == -1)
5021ae08745Sheppo cmn_err(CE_NOTE, "!'platsvc' module failed to load");
5031ae08745Sheppo
50422e19ac1Sjm22469 if (domaining_enabled() && modload("misc", "dr_cpu") == -1)
5051ae08745Sheppo cmn_err(CE_NOTE, "!'dr_cpu' module failed to load");
5061ae08745Sheppo
5078fea755aSjm22469 if (modload("misc", "dr_io") == -1)
5088fea755aSjm22469 cmn_err(CE_NOTE, "!'dr_io' module failed to load");
5098fea755aSjm22469
5109853d9e8SJason Beloro if (modload("misc", "dr_mem") == -1)
5119853d9e8SJason Beloro cmn_err(CE_NOTE, "!'dr_mem' module failed to load");
5129853d9e8SJason Beloro
5131ae08745Sheppo /*
5141ae08745Sheppo * Attempt to attach any virtual device servers. These
5151ae08745Sheppo * drivers must be loaded at start of day so that they
5161ae08745Sheppo * can respond to any updates to the machine description.
5171ae08745Sheppo *
5181ae08745Sheppo * Since it is quite likely that a domain will not support
5191ae08745Sheppo * one or more of these servers, failures are ignored.
5201ae08745Sheppo */
5211ae08745Sheppo
5221ae08745Sheppo /* virtual disk server */
5231ae08745Sheppo (void) i_ddi_attach_hw_nodes("vds");
5241ae08745Sheppo
5251ae08745Sheppo /* virtual network switch */
5261ae08745Sheppo (void) i_ddi_attach_hw_nodes("vsw");
5271ae08745Sheppo
5281ae08745Sheppo /* virtual console concentrator */
5291ae08745Sheppo (void) i_ddi_attach_hw_nodes("vcc");
5301ae08745Sheppo }
531d2365b01SPavel Tatashin
532d2365b01SPavel Tatashin void
set_platform_defaults(void)533d2365b01SPavel Tatashin set_platform_defaults(void)
534d2365b01SPavel Tatashin {
535d2365b01SPavel Tatashin /*
536d2365b01SPavel Tatashin * Allow at most one context domain per 8 CPUs, which is ample for
537d2365b01SPavel Tatashin * good performance. Do not make this too large, because it
538d2365b01SPavel Tatashin * increases the space consumed in the per-process sfmmu structure.
539d2365b01SPavel Tatashin */
540d2365b01SPavel Tatashin if (max_mmu_ctxdoms == 0)
541d2365b01SPavel Tatashin max_mmu_ctxdoms = (NCPU + 7) / 8;
542d2365b01SPavel Tatashin }
543