xref: /titanic_44/usr/src/uts/i86xpv/os/xen_machdep.c (revision 23c57df72989c916b3e98084eb88d48777999691)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /* derived from netbsd's xen_machdep.c 1.1.2.1 */
30 
31 /*
32  *
33  * Copyright (c) 2004 Christian Limpach.
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  * 3. This section intentionally left blank.
45  * 4. The name of the author may not be used to endorse or promote products
46  *    derived from this software without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
49  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
50  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
51  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
52  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
53  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
54  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
55  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
56  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
57  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58  */
59 /*
60  * Section 3 of the above license was updated in response to bug 6379571.
61  */
62 
63 #include <sys/ctype.h>
64 #include <sys/types.h>
65 #include <sys/cmn_err.h>
66 #include <sys/trap.h>
67 #include <sys/segments.h>
68 #include <sys/hypervisor.h>
69 #include <sys/xen_mmu.h>
70 #include <sys/machsystm.h>
71 #include <sys/promif.h>
72 #include <sys/bootconf.h>
73 #include <sys/bootinfo.h>
74 #include <sys/cpr.h>
75 #include <sys/taskq.h>
76 #include <sys/uadmin.h>
77 #include <sys/evtchn_impl.h>
78 #include <sys/archsystm.h>
79 #include <xen/sys/xenbus_impl.h>
80 #include <sys/mach_mmu.h>
81 #include <vm/hat_i86.h>
82 #include <sys/gnttab.h>
83 #include <sys/reboot.h>
84 #include <sys/stack.h>
85 #include <sys/clock.h>
86 #include <sys/bitmap.h>
87 #include <sys/processor.h>
88 #include <sys/xen_errno.h>
89 #include <sys/xpv_panic.h>
90 #include <sys/smp_impldefs.h>
91 #include <sys/cpu.h>
92 #include <sys/balloon_impl.h>
93 #include <sys/ddi.h>
94 
95 #ifdef DEBUG
96 #define	SUSPEND_DEBUG if (xen_suspend_debug) xen_printf
97 #else
98 #define	SUSPEND_DEBUG(...)
99 #endif
100 
101 int cpr_debug;
102 cpuset_t cpu_suspend_lost_set;
103 static int xen_suspend_debug;
104 
105 /*
106  * Determine helpful version information.
107  *
108  * (And leave copies in the data segment so we can look at them later
109  * with e.g. kmdb.)
110  */
111 
112 typedef enum xen_version {
113 	XENVER_BOOT_IDX,
114 	XENVER_CURRENT_IDX
115 } xen_version_t;
116 
117 struct xenver {
118 	ulong_t xv_major;
119 	ulong_t xv_minor;
120 	ulong_t xv_revision;
121 	xen_extraversion_t xv_ver;
122 	ulong_t xv_is_xvm;
123 	xen_changeset_info_t xv_chgset;
124 	xen_compile_info_t xv_build;
125 	xen_capabilities_info_t xv_caps;
126 } xenver[2];
127 
128 #define	XENVER_BOOT(m)	(xenver[XENVER_BOOT_IDX].m)
129 #define	XENVER_CURRENT(m)	(xenver[XENVER_CURRENT_IDX].m)
130 
131 /*
132  * Update the xenver data. We maintain two copies, boot and
133  * current. If we are setting the boot, then also set current.
134  */
135 static void
136 xen_set_version(xen_version_t idx)
137 {
138 	ulong_t ver;
139 
140 	bzero(&xenver[idx], sizeof (xenver[idx]));
141 
142 	ver = HYPERVISOR_xen_version(XENVER_version, 0);
143 
144 	xenver[idx].xv_major = BITX(ver, 31, 16);
145 	xenver[idx].xv_minor = BITX(ver, 15, 0);
146 
147 	(void) HYPERVISOR_xen_version(XENVER_extraversion, &xenver[idx].xv_ver);
148 
149 	/*
150 	 * The revision is buried in the extraversion information that is
151 	 * maintained by the hypervisor. For our purposes we expect that
152 	 * the revision number is:
153 	 * 	- the second character in the extraversion information
154 	 *	- one character long
155 	 *	- numeric digit
156 	 * If it isn't then we can't extract the revision and we leave it
157 	 * set to 0.
158 	 */
159 	if (strlen(xenver[idx].xv_ver) > 1 && isdigit(xenver[idx].xv_ver[1]))
160 		xenver[idx].xv_revision = xenver[idx].xv_ver[1] - '0';
161 	else
162 		cmn_err(CE_WARN, "Cannot extract revision on this hypervisor "
163 		    "version: v%s, unexpected version format",
164 		    xenver[idx].xv_ver);
165 
166 	xenver[idx].xv_is_xvm = 0;
167 
168 	if (strlen(xenver[idx].xv_ver) >= 4 &&
169 	    strncmp(xenver[idx].xv_ver + strlen(xenver[idx].xv_ver) - 4,
170 	    "-xvm", 4) == 0)
171 		xenver[idx].xv_is_xvm = 1;
172 
173 	(void) HYPERVISOR_xen_version(XENVER_changeset,
174 	    &xenver[idx].xv_chgset);
175 
176 	(void) HYPERVISOR_xen_version(XENVER_compile_info,
177 	    &xenver[idx].xv_build);
178 	/*
179 	 * Capabilities are a set of space separated ascii strings
180 	 * e.g. 'xen-3.1-x86_32p' or 'hvm-3.2-x86_64'
181 	 */
182 	(void) HYPERVISOR_xen_version(XENVER_capabilities,
183 	    &xenver[idx].xv_caps);
184 
185 	cmn_err(CE_CONT, "?v%lu.%lu%s chgset '%s'\n", xenver[idx].xv_major,
186 	    xenver[idx].xv_minor, xenver[idx].xv_ver, xenver[idx].xv_chgset);
187 
188 	if (idx == XENVER_BOOT_IDX)
189 		bcopy(&xenver[XENVER_BOOT_IDX], &xenver[XENVER_CURRENT_IDX],
190 		    sizeof (xenver[XENVER_BOOT_IDX]));
191 }
192 
193 typedef enum xen_hypervisor_check {
194 	XEN_RUN_CHECK,
195 	XEN_SUSPEND_CHECK
196 } xen_hypervisor_check_t;
197 
198 /*
199  * To run the hypervisor must be 3.0.4 or better. To suspend/resume
200  * we need 3.0.4 or better and if it is 3.0.4. then it must be provided
201  * by the Solaris xVM project.
202  * Checking can be disabled for testing purposes by setting the
203  * xen_suspend_debug variable.
204  */
205 static int
206 xen_hypervisor_supports_solaris(xen_hypervisor_check_t check)
207 {
208 	if (xen_suspend_debug == 1)
209 		return (1);
210 	if (XENVER_CURRENT(xv_major) < 3)
211 		return (0);
212 	if (XENVER_CURRENT(xv_major) > 3)
213 		return (1);
214 	if (XENVER_CURRENT(xv_minor) > 0)
215 		return (1);
216 	if (XENVER_CURRENT(xv_revision) < 4)
217 		return (0);
218 	if (check == XEN_SUSPEND_CHECK && XENVER_CURRENT(xv_revision) == 4 &&
219 	    !XENVER_CURRENT(xv_is_xvm))
220 		return (0);
221 
222 	return (1);
223 }
224 
225 /*
226  * If the hypervisor is -xvm, or 3.1.2 or higher, we don't need the
227  * workaround.
228  */
229 static void
230 xen_pte_workaround(void)
231 {
232 #if defined(__amd64)
233 	extern int pt_kern;
234 
235 	if (XENVER_CURRENT(xv_major) != 3)
236 		return;
237 	if (XENVER_CURRENT(xv_minor) > 1)
238 		return;
239 	if (XENVER_CURRENT(xv_minor) == 1 &&
240 	    XENVER_CURRENT(xv_revision) > 1)
241 		return;
242 	if (XENVER_CURRENT(xv_is_xvm))
243 		return;
244 
245 	pt_kern = PT_USER;
246 #endif
247 }
248 
249 void
250 xen_set_callback(void (*func)(void), uint_t type, uint_t flags)
251 {
252 	struct callback_register cb;
253 
254 	bzero(&cb, sizeof (cb));
255 #if defined(__amd64)
256 	cb.address = (ulong_t)func;
257 #elif defined(__i386)
258 	cb.address.cs = KCS_SEL;
259 	cb.address.eip = (ulong_t)func;
260 #endif
261 	cb.type = type;
262 	cb.flags = flags;
263 
264 	/*
265 	 * XXPV always ignore return value for NMI
266 	 */
267 	if (HYPERVISOR_callback_op(CALLBACKOP_register, &cb) != 0 &&
268 	    type != CALLBACKTYPE_nmi)
269 		panic("HYPERVISOR_callback_op failed");
270 }
271 
272 void
273 xen_init_callbacks(void)
274 {
275 	/*
276 	 * register event (interrupt) handler.
277 	 */
278 	xen_set_callback(xen_callback, CALLBACKTYPE_event, 0);
279 
280 	/*
281 	 * failsafe handler.
282 	 */
283 	xen_set_callback(xen_failsafe_callback, CALLBACKTYPE_failsafe,
284 	    CALLBACKF_mask_events);
285 
286 	/*
287 	 * NMI handler.
288 	 */
289 	xen_set_callback(nmiint, CALLBACKTYPE_nmi, 0);
290 
291 	/*
292 	 * system call handler
293 	 * XXPV move to init_cpu_syscall?
294 	 */
295 #if defined(__amd64)
296 	xen_set_callback(sys_syscall, CALLBACKTYPE_syscall,
297 	    CALLBACKF_mask_events);
298 #endif	/* __amd64 */
299 }
300 
301 
302 /*
303  * cmn_err() followed by a 1/4 second delay; this gives the
304  * logging service a chance to flush messages and helps avoid
305  * intermixing output from prom_printf().
306  * XXPV: doesn't exactly help us on UP though.
307  */
308 /*PRINTFLIKE2*/
309 void
310 cpr_err(int ce, const char *fmt, ...)
311 {
312 	va_list adx;
313 
314 	va_start(adx, fmt);
315 	vcmn_err(ce, fmt, adx);
316 	va_end(adx);
317 	drv_usecwait(MICROSEC >> 2);
318 }
319 
320 void
321 xen_suspend_devices(void)
322 {
323 	int rc;
324 
325 	SUSPEND_DEBUG("xen_suspend_devices\n");
326 
327 	if ((rc = cpr_suspend_devices(ddi_root_node())) != 0)
328 		panic("failed to suspend devices: %d", rc);
329 }
330 
331 void
332 xen_resume_devices(void)
333 {
334 	int rc;
335 
336 	SUSPEND_DEBUG("xen_resume_devices\n");
337 
338 	if ((rc = cpr_resume_devices(ddi_root_node(), 0)) != 0)
339 		panic("failed to resume devices: %d", rc);
340 }
341 
342 /*
343  * The list of mfn pages is out of date.  Recompute it.
344  */
345 static void
346 rebuild_mfn_list(void)
347 {
348 	int i = 0;
349 	size_t sz;
350 	size_t off;
351 	pfn_t pfn;
352 
353 	SUSPEND_DEBUG("rebuild_mfn_list\n");
354 
355 	sz = ((mfn_count * sizeof (mfn_t)) + MMU_PAGEOFFSET) & MMU_PAGEMASK;
356 
357 	for (off = 0; off < sz; off += MMU_PAGESIZE) {
358 		size_t j = mmu_btop(off);
359 		if (((j * sizeof (mfn_t)) & MMU_PAGEOFFSET) == 0) {
360 			pfn = hat_getpfnum(kas.a_hat,
361 			    (caddr_t)&mfn_list_pages[j]);
362 			mfn_list_pages_page[i++] = pfn_to_mfn(pfn);
363 		}
364 
365 		pfn = hat_getpfnum(kas.a_hat, (caddr_t)mfn_list + off);
366 		mfn_list_pages[j] = pfn_to_mfn(pfn);
367 	}
368 
369 	pfn = hat_getpfnum(kas.a_hat, (caddr_t)mfn_list_pages_page);
370 	HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list
371 	    = pfn_to_mfn(pfn);
372 }
373 
374 static void
375 suspend_cpus(void)
376 {
377 	int i;
378 
379 	SUSPEND_DEBUG("suspend_cpus\n");
380 
381 	mp_enter_barrier();
382 
383 	for (i = 1; i < ncpus; i++) {
384 		if (!CPU_IN_SET(cpu_suspend_lost_set, i)) {
385 			SUSPEND_DEBUG("xen_vcpu_down %d\n", i);
386 			(void) xen_vcpu_down(i);
387 		}
388 
389 		mach_cpucontext_reset(cpu[i]);
390 	}
391 }
392 
393 static void
394 resume_cpus(void)
395 {
396 	int i;
397 
398 	for (i = 1; i < ncpus; i++) {
399 		if (cpu[i] == NULL)
400 			continue;
401 
402 		if (!CPU_IN_SET(cpu_suspend_lost_set, i)) {
403 			SUSPEND_DEBUG("xen_vcpu_up %d\n", i);
404 			mach_cpucontext_restore(cpu[i]);
405 			(void) xen_vcpu_up(i);
406 		}
407 	}
408 
409 	mp_leave_barrier();
410 }
411 
412 /*
413  * Top level routine to direct suspend/resume of a domain.
414  */
415 void
416 xen_suspend_domain(void)
417 {
418 	extern void rtcsync(void);
419 	extern hrtime_t hres_last_tick;
420 	mfn_t start_info_mfn;
421 	ulong_t flags;
422 	pfn_t pfn;
423 	int i;
424 
425 	/*
426 	 * Check that we are happy to suspend on this hypervisor.
427 	 */
428 	if (xen_hypervisor_supports_solaris(XEN_SUSPEND_CHECK) == 0) {
429 		cpr_err(CE_WARN, "Cannot suspend on this hypervisor "
430 		    "version: v%lu.%lu%s, need at least version v3.0.4 or "
431 		    "-xvm based hypervisor", XENVER_CURRENT(xv_major),
432 		    XENVER_CURRENT(xv_minor), XENVER_CURRENT(xv_ver));
433 		return;
434 	}
435 
436 	/*
437 	 * XXPV - Are we definitely OK to suspend by the time we've connected
438 	 * the handler?
439 	 */
440 
441 	cpr_err(CE_NOTE, "Domain suspending for save/migrate");
442 
443 	SUSPEND_DEBUG("xen_suspend_domain\n");
444 
445 	/*
446 	 * suspend interrupts and devices
447 	 * XXPV - we use suspend/resume for both save/restore domains (like sun
448 	 * cpr) and for migration.  Would be nice to know the difference if
449 	 * possible.  For save/restore where down time may be a long time, we
450 	 * may want to do more of the things that cpr does.  (i.e. notify user
451 	 * processes, shrink memory footprint for faster restore, etc.)
452 	 */
453 	xen_suspend_devices();
454 	SUSPEND_DEBUG("xenbus_suspend\n");
455 	xenbus_suspend();
456 
457 	pfn = hat_getpfnum(kas.a_hat, (caddr_t)xen_info);
458 	start_info_mfn = pfn_to_mfn(pfn);
459 
460 	/*
461 	 * XXPV: cpu hotplug can hold this under a xenbus watch. Are we safe
462 	 * wrt xenbus being suspended here?
463 	 */
464 	mutex_enter(&cpu_lock);
465 
466 	/*
467 	 * Suspend must be done on vcpu 0, as no context for other CPUs is
468 	 * saved.
469 	 *
470 	 * XXPV - add to taskq API ?
471 	 */
472 	thread_affinity_set(curthread, 0);
473 	kpreempt_disable();
474 
475 	SUSPEND_DEBUG("xen_start_migrate\n");
476 	xen_start_migrate();
477 	if (ncpus > 1)
478 		suspend_cpus();
479 
480 	/*
481 	 * We can grab the ec_lock as it's a spinlock with a high SPL. Hence
482 	 * any holder would have dropped it to get through suspend_cpus().
483 	 */
484 	mutex_enter(&ec_lock);
485 
486 	/*
487 	 * From here on in, we can't take locks.
488 	 */
489 	SUSPEND_DEBUG("ec_suspend\n");
490 	ec_suspend();
491 	SUSPEND_DEBUG("gnttab_suspend\n");
492 	gnttab_suspend();
493 
494 	flags = intr_clear();
495 
496 	xpv_time_suspend();
497 
498 	/*
499 	 * Currently, the hypervisor incorrectly fails to bring back
500 	 * powered-down VCPUs.  Thus we need to record any powered-down VCPUs
501 	 * to prevent any attempts to operate on them.  But we have to do this
502 	 * *after* the very first time we do ec_suspend().
503 	 */
504 	for (i = 1; i < ncpus; i++) {
505 		if (cpu[i] == NULL)
506 			continue;
507 
508 		if (cpu_get_state(cpu[i]) == P_POWEROFF)
509 			CPUSET_ATOMIC_ADD(cpu_suspend_lost_set, i);
510 	}
511 
512 	/*
513 	 * The dom0 save/migrate code doesn't automatically translate
514 	 * these into PFNs, but expects them to be, so we do it here.
515 	 * We don't use mfn_to_pfn() because so many OS services have
516 	 * been disabled at this point.
517 	 */
518 	xen_info->store_mfn = mfn_to_pfn_mapping[xen_info->store_mfn];
519 	xen_info->console.domU.mfn =
520 	    mfn_to_pfn_mapping[xen_info->console.domU.mfn];
521 
522 	if (CPU->cpu_m.mcpu_vcpu_info->evtchn_upcall_mask == 0) {
523 		prom_printf("xen_suspend_domain(): "
524 		    "CPU->cpu_m.mcpu_vcpu_info->evtchn_upcall_mask not set\n");
525 		(void) HYPERVISOR_shutdown(SHUTDOWN_crash);
526 	}
527 
528 	if (HYPERVISOR_update_va_mapping((uintptr_t)HYPERVISOR_shared_info,
529 	    0, UVMF_INVLPG)) {
530 		prom_printf("xen_suspend_domain(): "
531 		    "HYPERVISOR_update_va_mapping() failed\n");
532 		(void) HYPERVISOR_shutdown(SHUTDOWN_crash);
533 	}
534 
535 	SUSPEND_DEBUG("HYPERVISOR_suspend\n");
536 
537 	/*
538 	 * At this point we suspend and sometime later resume.
539 	 */
540 	if (HYPERVISOR_suspend(start_info_mfn)) {
541 		prom_printf("xen_suspend_domain(): "
542 		    "HYPERVISOR_suspend() failed\n");
543 		(void) HYPERVISOR_shutdown(SHUTDOWN_crash);
544 	}
545 
546 	/*
547 	 * Point HYPERVISOR_shared_info to its new value.
548 	 */
549 	if (HYPERVISOR_update_va_mapping((uintptr_t)HYPERVISOR_shared_info,
550 	    xen_info->shared_info | PT_NOCONSIST | PT_VALID | PT_WRITABLE,
551 	    UVMF_INVLPG))
552 		(void) HYPERVISOR_shutdown(SHUTDOWN_crash);
553 
554 	if (xen_info->nr_pages != mfn_count) {
555 		prom_printf("xen_suspend_domain(): number of pages"
556 		    " changed, was 0x%lx, now 0x%lx\n", mfn_count,
557 		    xen_info->nr_pages);
558 		(void) HYPERVISOR_shutdown(SHUTDOWN_crash);
559 	}
560 
561 	xpv_time_resume();
562 
563 	cached_max_mfn = 0;
564 
565 	SUSPEND_DEBUG("gnttab_resume\n");
566 	gnttab_resume();
567 
568 	/* XXPV: add a note that this must be lockless. */
569 	SUSPEND_DEBUG("ec_resume\n");
570 	ec_resume();
571 
572 	intr_restore(flags);
573 
574 	if (ncpus > 1)
575 		resume_cpus();
576 
577 	mutex_exit(&ec_lock);
578 	xen_end_migrate();
579 	mutex_exit(&cpu_lock);
580 
581 	/*
582 	 * Now we can take locks again.
583 	 */
584 
585 	/*
586 	 * Force the tick value used for tv_nsec in hres_tick() to be up to
587 	 * date. rtcsync() will reset the hrestime value appropriately.
588 	 */
589 	hres_last_tick = xpv_gethrtime();
590 
591 	/*
592 	 * XXPV: we need to have resumed the CPUs since this takes locks, but
593 	 * can remote CPUs see bad state? Presumably yes. Should probably nest
594 	 * taking of todlock inside of cpu_lock, or vice versa, then provide an
595 	 * unlocked version.  Probably need to call clkinitf to reset cpu freq
596 	 * and re-calibrate if we migrated to a different speed cpu.  Also need
597 	 * to make a (re)init_cpu_info call to update processor info structs
598 	 * and device tree info.  That remains to be written at the moment.
599 	 */
600 	rtcsync();
601 
602 	rebuild_mfn_list();
603 
604 	SUSPEND_DEBUG("xenbus_resume\n");
605 	xenbus_resume();
606 	SUSPEND_DEBUG("xenbus_resume_devices\n");
607 	xen_resume_devices();
608 
609 	thread_affinity_clear(curthread);
610 	kpreempt_enable();
611 
612 	SUSPEND_DEBUG("finished xen_suspend_domain\n");
613 
614 	/*
615 	 * We have restarted our suspended domain, update the hypervisor
616 	 * details. NB: This must be done at the end of this function,
617 	 * since we need the domain to be completely resumed before
618 	 * these functions will work correctly.
619 	 */
620 	xen_set_version(XENVER_CURRENT_IDX);
621 
622 	/*
623 	 * We can check and report a warning, but we don't stop the
624 	 * process.
625 	 */
626 	if (xen_hypervisor_supports_solaris(XEN_SUSPEND_CHECK) == 0)
627 		cmn_err(CE_WARN, "Found hypervisor version: v%lu.%lu%s "
628 		    "but need at least version v3.0.4",
629 		    XENVER_CURRENT(xv_major), XENVER_CURRENT(xv_minor),
630 		    XENVER_CURRENT(xv_ver));
631 
632 	cmn_err(CE_NOTE, "domain restore/migrate completed");
633 }
634 
635 /*ARGSUSED*/
636 int
637 xen_debug_handler(void *arg)
638 {
639 	debug_enter("External debug event received");
640 
641 	/*
642 	 * If we've not got KMDB loaded, output some stuff difficult to capture
643 	 * from a domain core.
644 	 */
645 	if (!(boothowto & RB_DEBUG)) {
646 		shared_info_t *si = HYPERVISOR_shared_info;
647 		int i;
648 
649 		prom_printf("evtchn_pending [ ");
650 		for (i = 0; i < 8; i++)
651 			prom_printf("%lx ", si->evtchn_pending[i]);
652 		prom_printf("]\nevtchn_mask [ ");
653 		for (i = 0; i < 8; i++)
654 			prom_printf("%lx ", si->evtchn_mask[i]);
655 		prom_printf("]\n");
656 
657 		for (i = 0; i < ncpus; i++) {
658 			vcpu_info_t *vcpu = &si->vcpu_info[i];
659 			if (cpu[i] == NULL)
660 				continue;
661 			prom_printf("CPU%d pending %d mask %d sel %lx\n",
662 			    i, vcpu->evtchn_upcall_pending,
663 			    vcpu->evtchn_upcall_mask,
664 			    vcpu->evtchn_pending_sel);
665 		}
666 	}
667 
668 	return (0);
669 }
670 
671 /*ARGSUSED*/
672 static void
673 xen_sysrq_handler(struct xenbus_watch *watch, const char **vec,
674     unsigned int len)
675 {
676 	xenbus_transaction_t xbt;
677 	char key = '\0';
678 	int ret;
679 
680 retry:
681 	if (xenbus_transaction_start(&xbt)) {
682 		cmn_err(CE_WARN, "failed to start sysrq transaction");
683 		return;
684 	}
685 
686 	if ((ret = xenbus_scanf(xbt, "control", "sysrq", "%c", &key)) != 0) {
687 		/*
688 		 * ENOENT happens in response to our own xenbus_rm.
689 		 * XXPV - this happens spuriously on boot?
690 		 */
691 		if (ret != ENOENT)
692 			cmn_err(CE_WARN, "failed to read sysrq: %d", ret);
693 		goto out;
694 	}
695 
696 	if ((ret = xenbus_rm(xbt, "control", "sysrq")) != 0) {
697 		cmn_err(CE_WARN, "failed to reset sysrq: %d", ret);
698 		goto out;
699 	}
700 
701 	if (xenbus_transaction_end(xbt, 0) == EAGAIN)
702 		goto retry;
703 
704 	/*
705 	 * Somewhat arbitrary - on Linux this means 'reboot'. We could just
706 	 * accept any key, but this might increase the risk of sending a
707 	 * harmless sysrq to the wrong domain...
708 	 */
709 	if (key == 'b')
710 		(void) xen_debug_handler(NULL);
711 	else
712 		cmn_err(CE_WARN, "Ignored sysrq %c", key);
713 	return;
714 
715 out:
716 	(void) xenbus_transaction_end(xbt, 1);
717 }
718 
719 taskq_t *xen_shutdown_tq;
720 
721 #define	SHUTDOWN_INVALID	-1
722 #define	SHUTDOWN_POWEROFF	0
723 #define	SHUTDOWN_REBOOT		1
724 #define	SHUTDOWN_SUSPEND	2
725 #define	SHUTDOWN_HALT		3
726 #define	SHUTDOWN_MAX		4
727 
728 #define	SHUTDOWN_TIMEOUT_SECS (60 * 5)
729 
730 static const char *cmd_strings[SHUTDOWN_MAX] = {
731 	"poweroff",
732 	"reboot",
733 	"suspend",
734 	"halt"
735 };
736 
737 static void
738 xen_dirty_shutdown(void *arg)
739 {
740 	int cmd = (uintptr_t)arg;
741 
742 	cmn_err(CE_WARN, "Externally requested shutdown failed or "
743 	    "timed out.\nShutting down.\n");
744 
745 	switch (cmd) {
746 	case SHUTDOWN_HALT:
747 	case SHUTDOWN_POWEROFF:
748 		(void) kadmin(A_SHUTDOWN, AD_POWEROFF, NULL, kcred);
749 		break;
750 	case SHUTDOWN_REBOOT:
751 		(void) kadmin(A_REBOOT, AD_BOOT, NULL, kcred);
752 		break;
753 	}
754 }
755 
756 static void
757 xen_shutdown(void *arg)
758 {
759 	int cmd = (uintptr_t)arg;
760 	proc_t *initpp;
761 
762 	ASSERT(cmd > SHUTDOWN_INVALID && cmd < SHUTDOWN_MAX);
763 
764 	if (cmd == SHUTDOWN_SUSPEND) {
765 		xen_suspend_domain();
766 		return;
767 	}
768 
769 	switch (cmd) {
770 	case SHUTDOWN_POWEROFF:
771 		force_shutdown_method = AD_POWEROFF;
772 		break;
773 	case SHUTDOWN_HALT:
774 		force_shutdown_method = AD_HALT;
775 		break;
776 	case SHUTDOWN_REBOOT:
777 		force_shutdown_method = AD_BOOT;
778 		break;
779 	}
780 
781 	/*
782 	 * If we're still booting and init(1) isn't set up yet, simply halt.
783 	 */
784 	mutex_enter(&pidlock);
785 	initpp = prfind(P_INITPID);
786 	mutex_exit(&pidlock);
787 	if (initpp == NULL) {
788 		extern void halt(char *);
789 		halt("Power off the System");   /* just in case */
790 	}
791 
792 	/*
793 	 * else, graceful shutdown with inittab and all getting involved
794 	 */
795 	psignal(initpp, SIGPWR);
796 
797 	(void) timeout(xen_dirty_shutdown, arg,
798 	    SHUTDOWN_TIMEOUT_SECS * drv_usectohz(MICROSEC));
799 }
800 
801 /*ARGSUSED*/
802 static void
803 xen_shutdown_handler(struct xenbus_watch *watch, const char **vec,
804 	unsigned int len)
805 {
806 	char *str;
807 	xenbus_transaction_t xbt;
808 	int err, shutdown_code = SHUTDOWN_INVALID;
809 	unsigned int slen;
810 
811 again:
812 	err = xenbus_transaction_start(&xbt);
813 	if (err)
814 		return;
815 	if (xenbus_read(xbt, "control", "shutdown", (void *)&str, &slen)) {
816 		(void) xenbus_transaction_end(xbt, 1);
817 		return;
818 	}
819 
820 	SUSPEND_DEBUG("%d: xen_shutdown_handler: \"%s\"\n", CPU->cpu_id, str);
821 
822 	/*
823 	 * If this is a watch fired from our write below, check out early to
824 	 * avoid an infinite loop.
825 	 */
826 	if (strcmp(str, "") == 0) {
827 		(void) xenbus_transaction_end(xbt, 0);
828 		kmem_free(str, slen);
829 		return;
830 	} else if (strcmp(str, "poweroff") == 0) {
831 		shutdown_code = SHUTDOWN_POWEROFF;
832 	} else if (strcmp(str, "reboot") == 0) {
833 		shutdown_code = SHUTDOWN_REBOOT;
834 	} else if (strcmp(str, "suspend") == 0) {
835 		shutdown_code = SHUTDOWN_SUSPEND;
836 	} else if (strcmp(str, "halt") == 0) {
837 		shutdown_code = SHUTDOWN_HALT;
838 	} else {
839 		printf("Ignoring shutdown request: %s\n", str);
840 	}
841 
842 	/*
843 	 * XXPV	Should we check the value of xenbus_write() too, or are all
844 	 *	errors automatically folded into xenbus_transaction_end() ??
845 	 */
846 	(void) xenbus_write(xbt, "control", "shutdown", "");
847 	err = xenbus_transaction_end(xbt, 0);
848 	if (err == EAGAIN) {
849 		SUSPEND_DEBUG("%d: trying again\n", CPU->cpu_id);
850 		kmem_free(str, slen);
851 		goto again;
852 	}
853 
854 	kmem_free(str, slen);
855 	if (shutdown_code != SHUTDOWN_INVALID) {
856 		(void) taskq_dispatch(xen_shutdown_tq, xen_shutdown,
857 		    (void *)(intptr_t)shutdown_code, 0);
858 	}
859 }
860 
861 static struct xenbus_watch shutdown_watch;
862 static struct xenbus_watch sysrq_watch;
863 
864 void
865 xen_late_startup(void)
866 {
867 	if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
868 		xen_shutdown_tq = taskq_create("shutdown_taskq", 1,
869 		    maxclsyspri - 1, 1, 1, TASKQ_PREPOPULATE);
870 		shutdown_watch.node = "control/shutdown";
871 		shutdown_watch.callback = xen_shutdown_handler;
872 		if (register_xenbus_watch(&shutdown_watch))
873 			cmn_err(CE_WARN, "Failed to set shutdown watcher");
874 
875 		sysrq_watch.node = "control/sysrq";
876 		sysrq_watch.callback = xen_sysrq_handler;
877 		if (register_xenbus_watch(&sysrq_watch))
878 			cmn_err(CE_WARN, "Failed to set sysrq watcher");
879 	}
880 	balloon_init(xen_info->nr_pages);
881 }
882 
883 #ifdef DEBUG
884 #define	XEN_PRINTF_BUFSIZE	1024
885 
886 char xen_printf_buffer[XEN_PRINTF_BUFSIZE];
887 
888 /*
889  * Printf function that calls hypervisor directly.  For DomU it only
890  * works when running on a xen hypervisor built with debug on.  Works
891  * always since no I/O ring interaction is needed.
892  */
893 /*PRINTFLIKE1*/
894 void
895 xen_printf(const char *fmt, ...)
896 {
897 	va_list	ap;
898 
899 	va_start(ap, fmt);
900 	(void) vsnprintf(xen_printf_buffer, XEN_PRINTF_BUFSIZE, fmt, ap);
901 	va_end(ap);
902 
903 	(void) HYPERVISOR_console_io(CONSOLEIO_write,
904 	    strlen(xen_printf_buffer), xen_printf_buffer);
905 }
906 #else
907 void
908 xen_printf(const char *fmt, ...)
909 {
910 }
911 #endif	/* DEBUG */
912 
913 void
914 startup_xen_version(void)
915 {
916 	xen_set_version(XENVER_BOOT_IDX);
917 	if (xen_hypervisor_supports_solaris(XEN_RUN_CHECK) == 0)
918 		cmn_err(CE_WARN, "Found hypervisor version: v%lu.%lu%s "
919 		    "but need at least version v3.0.4",
920 		    XENVER_CURRENT(xv_major), XENVER_CURRENT(xv_minor),
921 		    XENVER_CURRENT(xv_ver));
922 	xen_pte_workaround();
923 }
924 
925 /*
926  * Miscellaneous hypercall wrappers with slightly more verbose diagnostics.
927  */
928 
929 void
930 xen_set_gdt(ulong_t *frame_list, int entries)
931 {
932 	int err;
933 	if ((err = HYPERVISOR_set_gdt(frame_list, entries)) != 0) {
934 		/*
935 		 * X_EINVAL:	reserved entry or bad frames
936 		 * X_EFAULT:	bad address
937 		 */
938 		panic("xen_set_gdt(%p, %d): error %d",
939 		    (void *)frame_list, entries, -(int)err);
940 	}
941 }
942 
943 void
944 xen_set_ldt(user_desc_t *ldt, uint_t nsels)
945 {
946 	struct mmuext_op	op;
947 	long			err;
948 
949 	op.cmd = MMUEXT_SET_LDT;
950 	op.arg1.linear_addr = (uintptr_t)ldt;
951 	op.arg2.nr_ents = nsels;
952 
953 	if ((err = HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) != 0) {
954 		panic("xen_set_ldt(%p, %d): error %d",
955 		    (void *)ldt, nsels, -(int)err);
956 	}
957 }
958 
959 void
960 xen_stack_switch(ulong_t ss, ulong_t esp)
961 {
962 	long err;
963 
964 	if ((err = HYPERVISOR_stack_switch(ss, esp)) != 0) {
965 		/*
966 		 * X_EPERM:	bad selector
967 		 */
968 		panic("xen_stack_switch(%lx, %lx): error %d", ss, esp,
969 		    -(int)err);
970 	}
971 }
972 
973 long
974 xen_set_trap_table(trap_info_t *table)
975 {
976 	long err;
977 
978 	if ((err = HYPERVISOR_set_trap_table(table)) != 0) {
979 		/*
980 		 * X_EFAULT:	bad address
981 		 * X_EPERM:	bad selector
982 		 */
983 		panic("xen_set_trap_table(%p): error %d", (void *)table,
984 		    -(int)err);
985 	}
986 	return (err);
987 }
988 
989 #if defined(__amd64)
990 void
991 xen_set_segment_base(int reg, ulong_t value)
992 {
993 	long err;
994 
995 	if ((err = HYPERVISOR_set_segment_base(reg, value)) != 0) {
996 		/*
997 		 * X_EFAULT:	bad address
998 		 * X_EINVAL:	bad type
999 		 */
1000 		panic("xen_set_segment_base(%d, %lx): error %d",
1001 		    reg, value, -(int)err);
1002 	}
1003 }
1004 #endif	/* __amd64 */
1005 
1006 /*
1007  * Translate a hypervisor errcode to a Solaris error code.
1008  */
1009 int
1010 xen_xlate_errcode(int error)
1011 {
1012 	switch (-error) {
1013 
1014 	/*
1015 	 * Translate hypervisor errno's into native errno's
1016 	 */
1017 
1018 #define	CASE(num)	case X_##num: error = num; break
1019 
1020 	CASE(EPERM);	CASE(ENOENT);	CASE(ESRCH);
1021 	CASE(EINTR);	CASE(EIO);	CASE(ENXIO);
1022 	CASE(E2BIG);	CASE(ENOMEM);	CASE(EACCES);
1023 	CASE(EFAULT);	CASE(EBUSY);	CASE(EEXIST);
1024 	CASE(ENODEV);	CASE(EISDIR);	CASE(EINVAL);
1025 	CASE(ENOSPC);	CASE(ESPIPE);	CASE(EROFS);
1026 	CASE(ENOSYS);	CASE(ENOTEMPTY); CASE(EISCONN);
1027 	CASE(ENODATA);
1028 
1029 #undef CASE
1030 
1031 	default:
1032 		panic("xen_xlate_errcode: unknown error %d", error);
1033 	}
1034 
1035 	return (error);
1036 }
1037 
1038 /*
1039  * Raise PS_IOPL on current vcpu to user level.
1040  * Caller responsible for preventing kernel preemption.
1041  */
1042 void
1043 xen_enable_user_iopl(void)
1044 {
1045 	physdev_set_iopl_t set_iopl;
1046 	set_iopl.iopl = 3;		/* user ring 3 */
1047 	(void) HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
1048 }
1049 
1050 /*
1051  * Drop PS_IOPL on current vcpu to kernel level
1052  */
1053 void
1054 xen_disable_user_iopl(void)
1055 {
1056 	physdev_set_iopl_t set_iopl;
1057 	set_iopl.iopl = 1;		/* kernel pseudo ring 1 */
1058 	(void) HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
1059 }
1060 
1061 int
1062 xen_gdt_setprot(cpu_t *cp, uint_t prot)
1063 {
1064 	int err;
1065 #if defined(__amd64)
1066 	int pt_bits = PT_VALID;
1067 	if (prot & PROT_WRITE)
1068 		pt_bits |= PT_WRITABLE;
1069 #endif
1070 
1071 	if ((err = as_setprot(&kas, (caddr_t)cp->cpu_gdt,
1072 	    MMU_PAGESIZE, prot)) != 0)
1073 		goto done;
1074 
1075 #if defined(__amd64)
1076 	err = xen_kpm_page(mmu_btop(cp->cpu_m.mcpu_gdtpa), pt_bits);
1077 #endif
1078 
1079 done:
1080 	if (err) {
1081 		cmn_err(CE_WARN, "cpu%d: xen_gdt_setprot(%s) failed: error %d",
1082 		    cp->cpu_id, (prot & PROT_WRITE) ? "writable" : "read-only",
1083 		    err);
1084 	}
1085 
1086 	return (err);
1087 }
1088 
1089 int
1090 xen_ldt_setprot(user_desc_t *ldt, size_t lsize, uint_t prot)
1091 {
1092 	int err;
1093 	caddr_t	lva = (caddr_t)ldt;
1094 #if defined(__amd64)
1095 	int pt_bits = PT_VALID;
1096 	pgcnt_t npgs;
1097 	if (prot & PROT_WRITE)
1098 		pt_bits |= PT_WRITABLE;
1099 #endif	/* __amd64 */
1100 
1101 	if ((err = as_setprot(&kas, (caddr_t)ldt, lsize, prot)) != 0)
1102 		goto done;
1103 
1104 #if defined(__amd64)
1105 
1106 	ASSERT(IS_P2ALIGNED(lsize, PAGESIZE));
1107 	npgs = mmu_btop(lsize);
1108 	while (npgs--) {
1109 		if ((err = xen_kpm_page(hat_getpfnum(kas.a_hat, lva),
1110 		    pt_bits)) != 0)
1111 			break;
1112 		lva += PAGESIZE;
1113 	}
1114 #endif	/* __amd64 */
1115 
1116 done:
1117 	if (err) {
1118 		cmn_err(CE_WARN, "xen_ldt_setprot(%p, %s) failed: error %d",
1119 		    (void *)lva,
1120 		    (prot & PROT_WRITE) ? "writable" : "read-only", err);
1121 	}
1122 
1123 	return (err);
1124 }
1125