xref: /titanic_50/usr/src/uts/sun4v/os/mach_startup.c (revision 02e56f3f1bfc8d9977bafb8cb5202f576dcded27)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/machsystm.h>
30 #include <sys/archsystm.h>
31 #include <sys/prom_plat.h>
32 #include <sys/promif.h>
33 #include <sys/vm.h>
34 #include <sys/cpu.h>
35 #include <sys/atomic.h>
36 #include <sys/cpupart.h>
37 #include <sys/disp.h>
38 #include <sys/hypervisor_api.h>
39 #ifdef TRAPTRACE
40 #include <sys/traptrace.h>
41 #include <sys/hypervisor_api.h>
42 #endif /* TRAPTRACE */
43 
44 caddr_t	mmu_fault_status_area;
45 
46 extern void sfmmu_set_tsbs(void);
47 /*
48  * CPU IDLE optimization variables/routines
49  */
50 static int enable_halt_idle_cpus = 1;
51 
52 void
53 setup_trap_table(void)
54 {
55 	caddr_t mmfsa_va;
56 	extern	 caddr_t mmu_fault_status_area;
57 	mmfsa_va =
58 	    mmu_fault_status_area + (MMFSA_SIZE * CPU->cpu_id);
59 
60 	intr_init(CPU);			/* init interrupt request free list */
61 	setwstate(WSTATE_KERN);
62 	set_mmfsa_scratchpad(mmfsa_va);
63 	prom_set_mmfsa_traptable(&trap_table, va_to_pa(mmfsa_va));
64 	sfmmu_set_tsbs();
65 }
66 
67 void
68 phys_install_has_changed(void)
69 {
70 
71 }
72 
73 /*
74  * Halt the present CPU until awoken via an interrupt
75  */
76 static void
77 cpu_halt(void)
78 {
79 	cpu_t *cpup = CPU;
80 	processorid_t cpun = cpup->cpu_id;
81 	cpupart_t *cp;
82 	int hset_update = 1;
83 	uint_t s;
84 
85 	/*
86 	 * If this CPU is online, and there's multiple CPUs
87 	 * in the system, then we should notate our halting
88 	 * by adding ourselves to the partition's halted CPU
89 	 * bitmap. This allows other CPUs to find/awaken us when
90 	 * work becomes available.
91 	 */
92 	if (CPU->cpu_flags & CPU_OFFLINE || ncpus == 1)
93 		hset_update = 0;
94 	/*
95 	 * We're on our way to being halted.
96 	 * Disable interrupts now, so that we'll awaken immediately
97 	 * after halting if someone tries to poke us between now and
98 	 * the time we actually halt.
99 	 */
100 	s = disable_vec_intr();
101 
102 	/*
103 	 * Add ourselves to the partition's halted CPUs bitmask
104 	 * and set our HALTED flag, if necessary.
105 	 *
106 	 * Note that the memory barrier after updating the HALTED flag
107 	 * is needed to ensure that the HALTED flag has reached global
108 	 * visibility before scanning the run queue for the last time
109 	 * (via disp_anywork) and halting ourself.
110 	 */
111 	if (hset_update) {
112 		cpup->cpu_disp_flags |= CPU_DISP_HALTED;
113 		membar_producer();
114 		cp = cpup->cpu_part;
115 		CPUSET_ATOMIC_ADD(cp->cp_haltset, cpun);
116 	}
117 
118 	/*
119 	 * Check to make sure there's really nothing to do.
120 	 * If work becomes available *after* we do this check
121 	 * and it's determined that the work should be ours,
122 	 * we won't miss it since we'll be notified with a "poke"
123 	 * ...which will pop us right back out of the halted state.
124 	 */
125 	if (disp_anywork()) {
126 		if (hset_update) {
127 			cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
128 			CPUSET_ATOMIC_DEL(cp->cp_haltset, cpun);
129 		}
130 		enable_vec_intr(s);
131 		return;
132 	}
133 
134 	/*
135 	 * Halt the strand
136 	 */
137 	(void) hv_cpu_yield();
138 
139 	/*
140 	 * We're no longer halted
141 	 */
142 	enable_vec_intr(s);
143 	if (hset_update) {
144 		cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
145 		CPUSET_ATOMIC_DEL(cp->cp_haltset, cpun);
146 	}
147 }
148 
149 /*
150  * If "cpu" is halted, then wake it up clearing its halted bit in advance.
151  * Otherwise, see if other CPUs in the cpu partition are halted and need to
152  * be woken up so that they can steal the thread we placed on this CPU.
153  * This function is only used on MP systems.
154  */
155 static void
156 cpu_wakeup(cpu_t *cpu, int bound)
157 {
158 	uint_t		cpu_found;
159 	int		result;
160 	cpupart_t	*cp;
161 
162 	cp = cpu->cpu_part;
163 	if (CPU_IN_SET(cp->cp_haltset, cpu->cpu_id)) {
164 		/*
165 		 * Clear the halted bit for that CPU since it will be
166 		 * poked in a moment.
167 		 */
168 		CPUSET_ATOMIC_DEL(cp->cp_haltset, cpu->cpu_id);
169 		/*
170 		 * We may find the current CPU present in the halted cpuset
171 		 * if we're in the context of an interrupt that occurred
172 		 * before we had a chance to clear our bit in cpu_halt().
173 		 * Poking ourself is obviously unnecessary, since if
174 		 * we're here, we're not halted.
175 		 */
176 		if (cpu != CPU)
177 			poke_cpu(cpu->cpu_id);
178 		return;
179 	} else {
180 		/*
181 		 * This cpu isn't halted, but it's idle or undergoing a
182 		 * context switch. No need to awaken anyone else.
183 		 */
184 		if (cpu->cpu_thread == cpu->cpu_idle_thread ||
185 		    cpu->cpu_disp_flags & CPU_DISP_DONTSTEAL)
186 			return;
187 	}
188 
189 	/*
190 	 * No need to wake up other CPUs if the thread we just enqueued
191 	 * is bound.
192 	 */
193 	if (bound)
194 		return;
195 
196 	/*
197 	 * See if there's any other halted CPUs. If there are, then
198 	 * select one, and awaken it.
199 	 * It's possible that after we find a CPU, somebody else
200 	 * will awaken it before we get the chance.
201 	 * In that case, look again.
202 	 */
203 	do {
204 		CPUSET_FIND(cp->cp_haltset, cpu_found);
205 		if (cpu_found == CPUSET_NOTINSET)
206 			return;
207 
208 		ASSERT(cpu_found >= 0 && cpu_found < NCPU);
209 		CPUSET_ATOMIC_XDEL(cp->cp_haltset, cpu_found, result);
210 	} while (result < 0);
211 
212 	if (cpu_found != CPU->cpu_id)
213 		poke_cpu(cpu_found);
214 }
215 
216 void
217 mach_cpu_halt_idle()
218 {
219 	if (enable_halt_idle_cpus) {
220 		idle_cpu = cpu_halt;
221 		disp_enq_thread = cpu_wakeup;
222 	}
223 }
224 
225 int
226 ndata_alloc_mmfsa(struct memlist *ndata)
227 {
228 	size_t	size;
229 
230 	size = MMFSA_SIZE * max_ncpus;
231 	mmu_fault_status_area = ndata_alloc(ndata, size, ecache_alignsize);
232 	if (mmu_fault_status_area == NULL)
233 		return (-1);
234 	return (0);
235 }
236 
237 void
238 mach_memscrub(void)
239 {
240 	/* no memscrub support for sun4v for now */
241 }
242 
243 void
244 mach_fpras()
245 {
246 	/* no fpras support for sun4v for now */
247 }
248 
249 void
250 mach_hw_copy_limit(void)
251 {
252 	/* HW copy limits set by individual CPU module */
253 }
254 
255 #ifdef TRAPTRACE
256 /*
257  * This function sets up hypervisor traptrace buffer
258  */
259 void
260 htrap_trace_setup(caddr_t buf, int cpuid)
261 {
262 	TRAP_TRACE_CTL	*ctlp;
263 
264 	ctlp = &trap_trace_ctl[cpuid];
265 	ctlp->d.hvaddr_base = buf;
266 	ctlp->d.hlimit = HTRAP_TSIZE;
267 	ctlp->d.hpaddr_base = va_to_pa(buf);
268 }
269 
270 /*
271  * This function configures and enables the hypervisor traptrace buffer
272  */
273 void
274 htrap_trace_register(int cpuid)
275 {
276 	uint64_t ret;
277 	uint64_t prev_buf, prev_bufsize;
278 	uint64_t prev_enable;
279 	uint64_t size;
280 	TRAP_TRACE_CTL	*ctlp;
281 
282 	ret = hv_ttrace_buf_info(&prev_buf, &prev_bufsize);
283 	if ((ret == H_EOK) && (prev_bufsize != 0)) {
284 		cmn_err(CE_CONT,
285 		    "!cpu%d: previous HV traptrace buffer of size 0x%lx "
286 		    "at address 0x%lx", cpuid, prev_bufsize, prev_buf);
287 	}
288 
289 	ctlp = &trap_trace_ctl[cpuid];
290 	ret = hv_ttrace_buf_conf(ctlp->d.hpaddr_base, HTRAP_TSIZE /
291 		(sizeof (struct htrap_trace_record)), &size);
292 	if (ret == H_EOK) {
293 		ret = hv_ttrace_enable((uint64_t)TRAP_TENABLE_ALL,
294 			&prev_enable);
295 		if (ret != H_EOK) {
296 			cmn_err(CE_WARN,
297 			    "!cpu%d: HV traptracing not enabled, "
298 			    "ta: 0x%x returned error: %d",
299 			    cpuid, TTRACE_ENABLE, ret);
300 		}
301 	} else {
302 		cmn_err(CE_WARN,
303 		    "!cpu%d: HV traptrace buffer not configured, "
304 		    "ta: 0x%x returned error: %d",
305 		    cpuid, TTRACE_BUF_CONF, ret);
306 	}
307 	/* set hvaddr_base to NULL when traptrace buffer registration fails */
308 	if (ret != H_EOK) {
309 		ctlp->d.hvaddr_base = NULL;
310 		ctlp->d.hlimit = 0;
311 		ctlp->d.hpaddr_base = NULL;
312 	}
313 }
314 #endif /* TRAPTRACE */
315