1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* derived from netbsd's xen_machdep.c 1.1.2.1 */ 28 29 /* 30 * 31 * Copyright (c) 2004 Christian Limpach. 32 * All rights reserved. 33 * 34 * Redistribution and use in source and binary forms, with or without 35 * modification, are permitted provided that the following conditions 36 * are met: 37 * 1. Redistributions of source code must retain the above copyright 38 * notice, this list of conditions and the following disclaimer. 39 * 2. Redistributions in binary form must reproduce the above copyright 40 * notice, this list of conditions and the following disclaimer in the 41 * documentation and/or other materials provided with the distribution. 42 * 3. This section intentionally left blank. 43 * 4. The name of the author may not be used to endorse or promote products 44 * derived from this software without specific prior written permission. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 47 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 48 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 49 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 50 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 51 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 52 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 53 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 54 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 55 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 56 */ 57 /* 58 * Section 3 of the above license was updated in response to bug 6379571. 59 */ 60 61 #include <sys/xpv_user.h> 62 63 /* XXX 3.3. TODO remove this include */ 64 #include <xen/public/arch-x86/xen-mca.h> 65 66 #include <sys/ctype.h> 67 #include <sys/types.h> 68 #include <sys/cmn_err.h> 69 #include <sys/trap.h> 70 #include <sys/segments.h> 71 #include <sys/hypervisor.h> 72 #include <sys/xen_mmu.h> 73 #include <sys/machsystm.h> 74 #include <sys/promif.h> 75 #include <sys/bootconf.h> 76 #include <sys/bootinfo.h> 77 #include <sys/cpr.h> 78 #include <sys/taskq.h> 79 #include <sys/uadmin.h> 80 #include <sys/evtchn_impl.h> 81 #include <sys/archsystm.h> 82 #include <xen/sys/xenbus_impl.h> 83 #include <sys/mach_mmu.h> 84 #include <vm/hat_i86.h> 85 #include <sys/gnttab.h> 86 #include <sys/reboot.h> 87 #include <sys/stack.h> 88 #include <sys/clock.h> 89 #include <sys/bitmap.h> 90 #include <sys/processor.h> 91 #include <sys/xen_errno.h> 92 #include <sys/xpv_panic.h> 93 #include <sys/smp_impldefs.h> 94 #include <sys/cpu.h> 95 #include <sys/balloon_impl.h> 96 #include <sys/ddi.h> 97 98 #ifdef DEBUG 99 #define SUSPEND_DEBUG if (xen_suspend_debug) xen_printf 100 #else 101 #define SUSPEND_DEBUG(...) 102 #endif 103 104 int cpr_debug; 105 cpuset_t cpu_suspend_lost_set; 106 static int xen_suspend_debug; 107 108 uint_t xen_phys_ncpus; 109 xen_mc_logical_cpu_t *xen_phys_cpus; 110 int xen_physinfo_debug = 0; 111 112 /* 113 * Determine helpful version information. 114 * 115 * (And leave copies in the data segment so we can look at them later 116 * with e.g. kmdb.) 117 */ 118 119 typedef enum xen_version { 120 XENVER_BOOT_IDX, 121 XENVER_CURRENT_IDX 122 } xen_version_t; 123 124 struct xenver { 125 ulong_t xv_major; 126 ulong_t xv_minor; 127 ulong_t xv_revision; 128 xen_extraversion_t xv_ver; 129 ulong_t xv_is_xvm; 130 xen_changeset_info_t xv_chgset; 131 xen_compile_info_t xv_build; 132 xen_capabilities_info_t xv_caps; 133 } xenver[2]; 134 135 #define XENVER_BOOT(m) (xenver[XENVER_BOOT_IDX].m) 136 #define XENVER_CURRENT(m) (xenver[XENVER_CURRENT_IDX].m) 137 138 /* 139 * Update the xenver data. We maintain two copies, boot and 140 * current. If we are setting the boot, then also set current. 141 */ 142 static void 143 xen_set_version(xen_version_t idx) 144 { 145 ulong_t ver; 146 147 bzero(&xenver[idx], sizeof (xenver[idx])); 148 149 ver = HYPERVISOR_xen_version(XENVER_version, 0); 150 151 xenver[idx].xv_major = BITX(ver, 31, 16); 152 xenver[idx].xv_minor = BITX(ver, 15, 0); 153 154 (void) HYPERVISOR_xen_version(XENVER_extraversion, &xenver[idx].xv_ver); 155 156 /* 157 * The revision is buried in the extraversion information that is 158 * maintained by the hypervisor. For our purposes we expect that 159 * the revision number is: 160 * - the second character in the extraversion information 161 * - one character long 162 * - numeric digit 163 * If it isn't then we can't extract the revision and we leave it 164 * set to 0. 165 */ 166 if (strlen(xenver[idx].xv_ver) > 1 && isdigit(xenver[idx].xv_ver[1])) 167 xenver[idx].xv_revision = xenver[idx].xv_ver[1] - '0'; 168 else 169 cmn_err(CE_WARN, "Cannot extract revision on this hypervisor " 170 "version: v%s, unexpected version format", 171 xenver[idx].xv_ver); 172 173 xenver[idx].xv_is_xvm = 0; 174 175 if (strstr(xenver[idx].xv_ver, "-xvm") != NULL) 176 xenver[idx].xv_is_xvm = 1; 177 178 (void) HYPERVISOR_xen_version(XENVER_changeset, 179 &xenver[idx].xv_chgset); 180 181 (void) HYPERVISOR_xen_version(XENVER_compile_info, 182 &xenver[idx].xv_build); 183 /* 184 * Capabilities are a set of space separated ascii strings 185 * e.g. 'xen-3.1-x86_32p' or 'hvm-3.2-x86_64' 186 */ 187 (void) HYPERVISOR_xen_version(XENVER_capabilities, 188 &xenver[idx].xv_caps); 189 190 cmn_err(CE_CONT, "?v%lu.%lu%s chgset '%s'\n", xenver[idx].xv_major, 191 xenver[idx].xv_minor, xenver[idx].xv_ver, xenver[idx].xv_chgset); 192 193 if (idx == XENVER_BOOT_IDX) 194 bcopy(&xenver[XENVER_BOOT_IDX], &xenver[XENVER_CURRENT_IDX], 195 sizeof (xenver[XENVER_BOOT_IDX])); 196 } 197 198 typedef enum xen_hypervisor_check { 199 XEN_RUN_CHECK, 200 XEN_SUSPEND_CHECK 201 } xen_hypervisor_check_t; 202 203 /* 204 * To run the hypervisor must be 3.0.4 or better. To suspend/resume 205 * we need 3.0.4 or better and if it is 3.0.4. then it must be provided 206 * by the Solaris xVM project. 207 * Checking can be disabled for testing purposes by setting the 208 * xen_suspend_debug variable. 209 */ 210 static int 211 xen_hypervisor_supports_solaris(xen_hypervisor_check_t check) 212 { 213 if (xen_suspend_debug == 1) 214 return (1); 215 if (XENVER_CURRENT(xv_major) < 3) 216 return (0); 217 if (XENVER_CURRENT(xv_major) > 3) 218 return (1); 219 if (XENVER_CURRENT(xv_minor) > 0) 220 return (1); 221 if (XENVER_CURRENT(xv_revision) < 4) 222 return (0); 223 if (check == XEN_SUSPEND_CHECK && XENVER_CURRENT(xv_revision) == 4 && 224 !XENVER_CURRENT(xv_is_xvm)) 225 return (0); 226 227 return (1); 228 } 229 230 /* 231 * If the hypervisor is -xvm, or 3.1.2 or higher, we don't need the 232 * workaround. 233 */ 234 static void 235 xen_pte_workaround(void) 236 { 237 #if defined(__amd64) 238 extern int pt_kern; 239 240 if (XENVER_CURRENT(xv_major) != 3) 241 return; 242 if (XENVER_CURRENT(xv_minor) > 1) 243 return; 244 if (XENVER_CURRENT(xv_minor) == 1 && 245 XENVER_CURRENT(xv_revision) > 1) 246 return; 247 if (XENVER_CURRENT(xv_is_xvm)) 248 return; 249 250 pt_kern = PT_USER; 251 #endif 252 } 253 254 void 255 xen_set_callback(void (*func)(void), uint_t type, uint_t flags) 256 { 257 struct callback_register cb; 258 259 bzero(&cb, sizeof (cb)); 260 #if defined(__amd64) 261 cb.address = (ulong_t)func; 262 #elif defined(__i386) 263 cb.address.cs = KCS_SEL; 264 cb.address.eip = (ulong_t)func; 265 #endif 266 cb.type = type; 267 cb.flags = flags; 268 269 /* 270 * XXPV always ignore return value for NMI 271 */ 272 if (HYPERVISOR_callback_op(CALLBACKOP_register, &cb) != 0 && 273 type != CALLBACKTYPE_nmi) 274 panic("HYPERVISOR_callback_op failed"); 275 } 276 277 void 278 xen_init_callbacks(void) 279 { 280 /* 281 * register event (interrupt) handler. 282 */ 283 xen_set_callback(xen_callback, CALLBACKTYPE_event, 0); 284 285 /* 286 * failsafe handler. 287 */ 288 xen_set_callback(xen_failsafe_callback, CALLBACKTYPE_failsafe, 289 CALLBACKF_mask_events); 290 291 /* 292 * NMI handler. 293 */ 294 xen_set_callback(nmiint, CALLBACKTYPE_nmi, 0); 295 296 /* 297 * system call handler 298 * XXPV move to init_cpu_syscall? 299 */ 300 #if defined(__amd64) 301 xen_set_callback(sys_syscall, CALLBACKTYPE_syscall, 302 CALLBACKF_mask_events); 303 #endif /* __amd64 */ 304 } 305 306 307 /* 308 * cmn_err() followed by a 1/4 second delay; this gives the 309 * logging service a chance to flush messages and helps avoid 310 * intermixing output from prom_printf(). 311 * XXPV: doesn't exactly help us on UP though. 312 */ 313 /*PRINTFLIKE2*/ 314 void 315 cpr_err(int ce, const char *fmt, ...) 316 { 317 va_list adx; 318 319 va_start(adx, fmt); 320 vcmn_err(ce, fmt, adx); 321 va_end(adx); 322 drv_usecwait(MICROSEC >> 2); 323 } 324 325 void 326 xen_suspend_devices(void) 327 { 328 int rc; 329 330 SUSPEND_DEBUG("xen_suspend_devices\n"); 331 332 if ((rc = cpr_suspend_devices(ddi_root_node())) != 0) 333 panic("failed to suspend devices: %d", rc); 334 } 335 336 void 337 xen_resume_devices(void) 338 { 339 int rc; 340 341 SUSPEND_DEBUG("xen_resume_devices\n"); 342 343 if ((rc = cpr_resume_devices(ddi_root_node(), 0)) != 0) 344 panic("failed to resume devices: %d", rc); 345 } 346 347 /* 348 * The list of mfn pages is out of date. Recompute it. 349 */ 350 static void 351 rebuild_mfn_list(void) 352 { 353 int i = 0; 354 size_t sz; 355 size_t off; 356 pfn_t pfn; 357 358 SUSPEND_DEBUG("rebuild_mfn_list\n"); 359 360 sz = ((mfn_count * sizeof (mfn_t)) + MMU_PAGEOFFSET) & MMU_PAGEMASK; 361 362 for (off = 0; off < sz; off += MMU_PAGESIZE) { 363 size_t j = mmu_btop(off); 364 if (((j * sizeof (mfn_t)) & MMU_PAGEOFFSET) == 0) { 365 pfn = hat_getpfnum(kas.a_hat, 366 (caddr_t)&mfn_list_pages[j]); 367 mfn_list_pages_page[i++] = pfn_to_mfn(pfn); 368 } 369 370 pfn = hat_getpfnum(kas.a_hat, (caddr_t)mfn_list + off); 371 mfn_list_pages[j] = pfn_to_mfn(pfn); 372 } 373 374 pfn = hat_getpfnum(kas.a_hat, (caddr_t)mfn_list_pages_page); 375 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list 376 = pfn_to_mfn(pfn); 377 } 378 379 static void 380 suspend_cpus(void) 381 { 382 int i; 383 384 SUSPEND_DEBUG("suspend_cpus\n"); 385 386 mp_enter_barrier(); 387 388 for (i = 1; i < ncpus; i++) { 389 if (!CPU_IN_SET(cpu_suspend_lost_set, i)) { 390 SUSPEND_DEBUG("xen_vcpu_down %d\n", i); 391 (void) xen_vcpu_down(i); 392 } 393 394 mach_cpucontext_reset(cpu[i]); 395 } 396 } 397 398 static void 399 resume_cpus(void) 400 { 401 int i; 402 403 for (i = 1; i < ncpus; i++) { 404 if (cpu[i] == NULL) 405 continue; 406 407 if (!CPU_IN_SET(cpu_suspend_lost_set, i)) { 408 SUSPEND_DEBUG("xen_vcpu_up %d\n", i); 409 mach_cpucontext_restore(cpu[i]); 410 (void) xen_vcpu_up(i); 411 } 412 } 413 414 mp_leave_barrier(); 415 } 416 417 /* 418 * Top level routine to direct suspend/resume of a domain. 419 */ 420 void 421 xen_suspend_domain(void) 422 { 423 extern void rtcsync(void); 424 extern hrtime_t hres_last_tick; 425 mfn_t start_info_mfn; 426 ulong_t flags; 427 pfn_t pfn; 428 int i; 429 430 /* 431 * Check that we are happy to suspend on this hypervisor. 432 */ 433 if (xen_hypervisor_supports_solaris(XEN_SUSPEND_CHECK) == 0) { 434 cpr_err(CE_WARN, "Cannot suspend on this hypervisor " 435 "version: v%lu.%lu%s, need at least version v3.0.4 or " 436 "-xvm based hypervisor", XENVER_CURRENT(xv_major), 437 XENVER_CURRENT(xv_minor), XENVER_CURRENT(xv_ver)); 438 return; 439 } 440 441 /* 442 * XXPV - Are we definitely OK to suspend by the time we've connected 443 * the handler? 444 */ 445 446 cpr_err(CE_NOTE, "Domain suspending for save/migrate"); 447 448 SUSPEND_DEBUG("xen_suspend_domain\n"); 449 450 /* 451 * suspend interrupts and devices 452 * XXPV - we use suspend/resume for both save/restore domains (like sun 453 * cpr) and for migration. Would be nice to know the difference if 454 * possible. For save/restore where down time may be a long time, we 455 * may want to do more of the things that cpr does. (i.e. notify user 456 * processes, shrink memory footprint for faster restore, etc.) 457 */ 458 xen_suspend_devices(); 459 SUSPEND_DEBUG("xenbus_suspend\n"); 460 xenbus_suspend(); 461 462 pfn = hat_getpfnum(kas.a_hat, (caddr_t)xen_info); 463 start_info_mfn = pfn_to_mfn(pfn); 464 465 /* 466 * XXPV: cpu hotplug can hold this under a xenbus watch. Are we safe 467 * wrt xenbus being suspended here? 468 */ 469 mutex_enter(&cpu_lock); 470 471 /* 472 * Suspend must be done on vcpu 0, as no context for other CPUs is 473 * saved. 474 * 475 * XXPV - add to taskq API ? 476 */ 477 thread_affinity_set(curthread, 0); 478 kpreempt_disable(); 479 480 SUSPEND_DEBUG("xen_start_migrate\n"); 481 xen_start_migrate(); 482 if (ncpus > 1) 483 suspend_cpus(); 484 485 /* 486 * We can grab the ec_lock as it's a spinlock with a high SPL. Hence 487 * any holder would have dropped it to get through suspend_cpus(). 488 */ 489 mutex_enter(&ec_lock); 490 491 /* 492 * From here on in, we can't take locks. 493 */ 494 SUSPEND_DEBUG("ec_suspend\n"); 495 ec_suspend(); 496 SUSPEND_DEBUG("gnttab_suspend\n"); 497 gnttab_suspend(); 498 499 flags = intr_clear(); 500 501 xpv_time_suspend(); 502 503 /* 504 * Currently, the hypervisor incorrectly fails to bring back 505 * powered-down VCPUs. Thus we need to record any powered-down VCPUs 506 * to prevent any attempts to operate on them. But we have to do this 507 * *after* the very first time we do ec_suspend(). 508 */ 509 for (i = 1; i < ncpus; i++) { 510 if (cpu[i] == NULL) 511 continue; 512 513 if (cpu_get_state(cpu[i]) == P_POWEROFF) 514 CPUSET_ATOMIC_ADD(cpu_suspend_lost_set, i); 515 } 516 517 /* 518 * The dom0 save/migrate code doesn't automatically translate 519 * these into PFNs, but expects them to be, so we do it here. 520 * We don't use mfn_to_pfn() because so many OS services have 521 * been disabled at this point. 522 */ 523 xen_info->store_mfn = mfn_to_pfn_mapping[xen_info->store_mfn]; 524 xen_info->console.domU.mfn = 525 mfn_to_pfn_mapping[xen_info->console.domU.mfn]; 526 527 if (CPU->cpu_m.mcpu_vcpu_info->evtchn_upcall_mask == 0) { 528 prom_printf("xen_suspend_domain(): " 529 "CPU->cpu_m.mcpu_vcpu_info->evtchn_upcall_mask not set\n"); 530 (void) HYPERVISOR_shutdown(SHUTDOWN_crash); 531 } 532 533 if (HYPERVISOR_update_va_mapping((uintptr_t)HYPERVISOR_shared_info, 534 0, UVMF_INVLPG)) { 535 prom_printf("xen_suspend_domain(): " 536 "HYPERVISOR_update_va_mapping() failed\n"); 537 (void) HYPERVISOR_shutdown(SHUTDOWN_crash); 538 } 539 540 SUSPEND_DEBUG("HYPERVISOR_suspend\n"); 541 542 /* 543 * At this point we suspend and sometime later resume. 544 */ 545 if (HYPERVISOR_suspend(start_info_mfn)) { 546 prom_printf("xen_suspend_domain(): " 547 "HYPERVISOR_suspend() failed\n"); 548 (void) HYPERVISOR_shutdown(SHUTDOWN_crash); 549 } 550 551 /* 552 * Point HYPERVISOR_shared_info to its new value. 553 */ 554 if (HYPERVISOR_update_va_mapping((uintptr_t)HYPERVISOR_shared_info, 555 xen_info->shared_info | PT_NOCONSIST | PT_VALID | PT_WRITABLE, 556 UVMF_INVLPG)) 557 (void) HYPERVISOR_shutdown(SHUTDOWN_crash); 558 559 if (xen_info->nr_pages != mfn_count) { 560 prom_printf("xen_suspend_domain(): number of pages" 561 " changed, was 0x%lx, now 0x%lx\n", mfn_count, 562 xen_info->nr_pages); 563 (void) HYPERVISOR_shutdown(SHUTDOWN_crash); 564 } 565 566 xpv_time_resume(); 567 568 cached_max_mfn = 0; 569 570 SUSPEND_DEBUG("gnttab_resume\n"); 571 gnttab_resume(); 572 573 /* XXPV: add a note that this must be lockless. */ 574 SUSPEND_DEBUG("ec_resume\n"); 575 ec_resume(); 576 577 intr_restore(flags); 578 579 if (ncpus > 1) 580 resume_cpus(); 581 582 mutex_exit(&ec_lock); 583 xen_end_migrate(); 584 mutex_exit(&cpu_lock); 585 586 /* 587 * Now we can take locks again. 588 */ 589 590 /* 591 * Force the tick value used for tv_nsec in hres_tick() to be up to 592 * date. rtcsync() will reset the hrestime value appropriately. 593 */ 594 hres_last_tick = xpv_gethrtime(); 595 596 /* 597 * XXPV: we need to have resumed the CPUs since this takes locks, but 598 * can remote CPUs see bad state? Presumably yes. Should probably nest 599 * taking of todlock inside of cpu_lock, or vice versa, then provide an 600 * unlocked version. Probably need to call clkinitf to reset cpu freq 601 * and re-calibrate if we migrated to a different speed cpu. Also need 602 * to make a (re)init_cpu_info call to update processor info structs 603 * and device tree info. That remains to be written at the moment. 604 */ 605 rtcsync(); 606 607 rebuild_mfn_list(); 608 609 SUSPEND_DEBUG("xenbus_resume\n"); 610 xenbus_resume(); 611 SUSPEND_DEBUG("xenbus_resume_devices\n"); 612 xen_resume_devices(); 613 614 thread_affinity_clear(curthread); 615 kpreempt_enable(); 616 617 SUSPEND_DEBUG("finished xen_suspend_domain\n"); 618 619 /* 620 * We have restarted our suspended domain, update the hypervisor 621 * details. NB: This must be done at the end of this function, 622 * since we need the domain to be completely resumed before 623 * these functions will work correctly. 624 */ 625 xen_set_version(XENVER_CURRENT_IDX); 626 627 /* 628 * We can check and report a warning, but we don't stop the 629 * process. 630 */ 631 if (xen_hypervisor_supports_solaris(XEN_SUSPEND_CHECK) == 0) 632 cmn_err(CE_WARN, "Found hypervisor version: v%lu.%lu%s " 633 "but need at least version v3.0.4", 634 XENVER_CURRENT(xv_major), XENVER_CURRENT(xv_minor), 635 XENVER_CURRENT(xv_ver)); 636 637 cmn_err(CE_NOTE, "domain restore/migrate completed"); 638 } 639 640 uint_t 641 xen_debug_handler(caddr_t arg __unused, caddr_t arg1 __unused) 642 { 643 debug_enter("External debug event received"); 644 645 /* 646 * If we've not got KMDB loaded, output some stuff difficult to capture 647 * from a domain core. 648 */ 649 if (!(boothowto & RB_DEBUG)) { 650 shared_info_t *si = HYPERVISOR_shared_info; 651 int i; 652 653 prom_printf("evtchn_pending [ "); 654 for (i = 0; i < 8; i++) 655 prom_printf("%lx ", si->evtchn_pending[i]); 656 prom_printf("]\nevtchn_mask [ "); 657 for (i = 0; i < 8; i++) 658 prom_printf("%lx ", si->evtchn_mask[i]); 659 prom_printf("]\n"); 660 661 for (i = 0; i < ncpus; i++) { 662 vcpu_info_t *vcpu = &si->vcpu_info[i]; 663 if (cpu[i] == NULL) 664 continue; 665 prom_printf("CPU%d pending %d mask %d sel %lx\n", 666 i, vcpu->evtchn_upcall_pending, 667 vcpu->evtchn_upcall_mask, 668 vcpu->evtchn_pending_sel); 669 } 670 } 671 672 return (0); 673 } 674 675 /*ARGSUSED*/ 676 static void 677 xen_sysrq_handler(struct xenbus_watch *watch, const char **vec, 678 unsigned int len) 679 { 680 xenbus_transaction_t xbt; 681 char key = '\0'; 682 int ret; 683 684 retry: 685 if (xenbus_transaction_start(&xbt)) { 686 cmn_err(CE_WARN, "failed to start sysrq transaction"); 687 return; 688 } 689 690 if ((ret = xenbus_scanf(xbt, "control", "sysrq", "%c", &key)) != 0) { 691 /* 692 * ENOENT happens in response to our own xenbus_rm. 693 * XXPV - this happens spuriously on boot? 694 */ 695 if (ret != ENOENT) 696 cmn_err(CE_WARN, "failed to read sysrq: %d", ret); 697 goto out; 698 } 699 700 if ((ret = xenbus_rm(xbt, "control", "sysrq")) != 0) { 701 cmn_err(CE_WARN, "failed to reset sysrq: %d", ret); 702 goto out; 703 } 704 705 if (xenbus_transaction_end(xbt, 0) == EAGAIN) 706 goto retry; 707 708 /* 709 * Somewhat arbitrary - on Linux this means 'reboot'. We could just 710 * accept any key, but this might increase the risk of sending a 711 * harmless sysrq to the wrong domain... 712 */ 713 if (key == 'b') 714 (void) xen_debug_handler(NULL, NULL); 715 else 716 cmn_err(CE_WARN, "Ignored sysrq %c", key); 717 return; 718 719 out: 720 (void) xenbus_transaction_end(xbt, 1); 721 } 722 723 taskq_t *xen_shutdown_tq; 724 725 #define SHUTDOWN_INVALID -1 726 #define SHUTDOWN_POWEROFF 0 727 #define SHUTDOWN_REBOOT 1 728 #define SHUTDOWN_SUSPEND 2 729 #define SHUTDOWN_HALT 3 730 #define SHUTDOWN_MAX 4 731 732 #define SHUTDOWN_TIMEOUT_SECS (60 * 5) 733 734 static const char *cmd_strings[SHUTDOWN_MAX] = { 735 "poweroff", 736 "reboot", 737 "suspend", 738 "halt" 739 }; 740 741 static void 742 xen_dirty_shutdown(void *arg) 743 { 744 int cmd = (uintptr_t)arg; 745 746 cmn_err(CE_WARN, "Externally requested shutdown failed or " 747 "timed out.\nShutting down.\n"); 748 749 switch (cmd) { 750 case SHUTDOWN_HALT: 751 case SHUTDOWN_POWEROFF: 752 (void) kadmin(A_SHUTDOWN, AD_POWEROFF, NULL, kcred); 753 break; 754 case SHUTDOWN_REBOOT: 755 (void) kadmin(A_REBOOT, AD_BOOT, NULL, kcred); 756 break; 757 } 758 } 759 760 static void 761 xen_shutdown(void *arg) 762 { 763 int cmd = (uintptr_t)arg; 764 proc_t *initpp; 765 766 ASSERT(cmd > SHUTDOWN_INVALID && cmd < SHUTDOWN_MAX); 767 768 if (cmd == SHUTDOWN_SUSPEND) { 769 xen_suspend_domain(); 770 return; 771 } 772 773 switch (cmd) { 774 case SHUTDOWN_POWEROFF: 775 force_shutdown_method = AD_POWEROFF; 776 break; 777 case SHUTDOWN_HALT: 778 force_shutdown_method = AD_HALT; 779 break; 780 case SHUTDOWN_REBOOT: 781 force_shutdown_method = AD_BOOT; 782 break; 783 } 784 785 /* 786 * If we're still booting and init(1) isn't set up yet, simply halt. 787 */ 788 mutex_enter(&pidlock); 789 initpp = prfind(P_INITPID); 790 mutex_exit(&pidlock); 791 if (initpp == NULL) { 792 extern void halt(char *); 793 halt("Power off the System"); /* just in case */ 794 } 795 796 /* 797 * else, graceful shutdown with inittab and all getting involved 798 */ 799 psignal(initpp, SIGPWR); 800 801 (void) timeout(xen_dirty_shutdown, arg, 802 SHUTDOWN_TIMEOUT_SECS * drv_usectohz(MICROSEC)); 803 } 804 805 /*ARGSUSED*/ 806 static void 807 xen_shutdown_handler(struct xenbus_watch *watch, const char **vec, 808 unsigned int len) 809 { 810 char *str; 811 xenbus_transaction_t xbt; 812 int err, shutdown_code = SHUTDOWN_INVALID; 813 unsigned int slen; 814 815 again: 816 err = xenbus_transaction_start(&xbt); 817 if (err) 818 return; 819 if (xenbus_read(xbt, "control", "shutdown", (void *)&str, &slen)) { 820 (void) xenbus_transaction_end(xbt, 1); 821 return; 822 } 823 824 SUSPEND_DEBUG("%d: xen_shutdown_handler: \"%s\"\n", CPU->cpu_id, str); 825 826 /* 827 * If this is a watch fired from our write below, check out early to 828 * avoid an infinite loop. 829 */ 830 if (strcmp(str, "") == 0) { 831 (void) xenbus_transaction_end(xbt, 0); 832 kmem_free(str, slen); 833 return; 834 } else if (strcmp(str, "poweroff") == 0) { 835 shutdown_code = SHUTDOWN_POWEROFF; 836 } else if (strcmp(str, "reboot") == 0) { 837 shutdown_code = SHUTDOWN_REBOOT; 838 } else if (strcmp(str, "suspend") == 0) { 839 shutdown_code = SHUTDOWN_SUSPEND; 840 } else if (strcmp(str, "halt") == 0) { 841 shutdown_code = SHUTDOWN_HALT; 842 } else { 843 printf("Ignoring shutdown request: %s\n", str); 844 } 845 846 /* 847 * XXPV Should we check the value of xenbus_write() too, or are all 848 * errors automatically folded into xenbus_transaction_end() ?? 849 */ 850 (void) xenbus_write(xbt, "control", "shutdown", ""); 851 err = xenbus_transaction_end(xbt, 0); 852 if (err == EAGAIN) { 853 SUSPEND_DEBUG("%d: trying again\n", CPU->cpu_id); 854 kmem_free(str, slen); 855 goto again; 856 } 857 858 kmem_free(str, slen); 859 if (shutdown_code != SHUTDOWN_INVALID) { 860 (void) taskq_dispatch(xen_shutdown_tq, xen_shutdown, 861 (void *)(intptr_t)shutdown_code, 0); 862 } 863 } 864 865 static struct xenbus_watch shutdown_watch; 866 static struct xenbus_watch sysrq_watch; 867 868 void 869 xen_late_startup(void) 870 { 871 if (!DOMAIN_IS_INITDOMAIN(xen_info)) { 872 xen_shutdown_tq = taskq_create("shutdown_taskq", 1, 873 maxclsyspri - 1, 1, 1, TASKQ_PREPOPULATE); 874 shutdown_watch.node = "control/shutdown"; 875 shutdown_watch.callback = xen_shutdown_handler; 876 if (register_xenbus_watch(&shutdown_watch)) 877 cmn_err(CE_WARN, "Failed to set shutdown watcher"); 878 879 sysrq_watch.node = "control/sysrq"; 880 sysrq_watch.callback = xen_sysrq_handler; 881 if (register_xenbus_watch(&sysrq_watch)) 882 cmn_err(CE_WARN, "Failed to set sysrq watcher"); 883 } 884 balloon_init(xen_info->nr_pages); 885 } 886 887 #ifdef DEBUG 888 #define XEN_PRINTF_BUFSIZE 1024 889 890 char xen_printf_buffer[XEN_PRINTF_BUFSIZE]; 891 892 /* 893 * Printf function that calls hypervisor directly. For DomU it only 894 * works when running on a xen hypervisor built with debug on. Works 895 * always since no I/O ring interaction is needed. 896 */ 897 /*PRINTFLIKE1*/ 898 void 899 xen_printf(const char *fmt, ...) 900 { 901 va_list ap; 902 903 va_start(ap, fmt); 904 (void) vsnprintf(xen_printf_buffer, XEN_PRINTF_BUFSIZE, fmt, ap); 905 va_end(ap); 906 907 (void) HYPERVISOR_console_io(CONSOLEIO_write, 908 strlen(xen_printf_buffer), xen_printf_buffer); 909 } 910 #else 911 void 912 xen_printf(const char *fmt, ...) 913 { 914 } 915 #endif /* DEBUG */ 916 917 void 918 startup_xen_version(void) 919 { 920 xen_set_version(XENVER_BOOT_IDX); 921 if (xen_hypervisor_supports_solaris(XEN_RUN_CHECK) == 0) 922 cmn_err(CE_WARN, "Found hypervisor version: v%lu.%lu%s " 923 "but need at least version v3.0.4", 924 XENVER_CURRENT(xv_major), XENVER_CURRENT(xv_minor), 925 XENVER_CURRENT(xv_ver)); 926 xen_pte_workaround(); 927 } 928 929 int xen_mca_simulate_mc_physinfo_failure = 0; 930 931 void 932 startup_xen_mca(void) 933 { 934 if (!DOMAIN_IS_INITDOMAIN(xen_info)) 935 return; 936 937 xen_phys_ncpus = 0; 938 xen_phys_cpus = NULL; 939 940 if (xen_mca_simulate_mc_physinfo_failure || 941 xen_get_mc_physcpuinfo(NULL, &xen_phys_ncpus) != 0) { 942 cmn_err(CE_WARN, 943 "%sxen_get_mc_physinfo failure during xen MCA startup: " 944 "there will be no machine check support", 945 xen_mca_simulate_mc_physinfo_failure ? "(simulated) " : ""); 946 return; 947 } 948 949 xen_phys_cpus = kmem_alloc(xen_phys_ncpus * 950 sizeof (xen_mc_logical_cpu_t), KM_NOSLEEP); 951 952 if (xen_phys_cpus == NULL) { 953 cmn_err(CE_WARN, 954 "xen_get_mc_physinfo failure: can't allocate CPU array"); 955 return; 956 } 957 958 if (xen_get_mc_physcpuinfo(xen_phys_cpus, &xen_phys_ncpus) != 0) { 959 cmn_err(CE_WARN, "xen_get_mc_physinfo failure: no " 960 "physical CPU info"); 961 kmem_free(xen_phys_cpus, 962 xen_phys_ncpus * sizeof (xen_mc_logical_cpu_t)); 963 xen_phys_ncpus = 0; 964 xen_phys_cpus = NULL; 965 } 966 967 if (xen_physinfo_debug) { 968 xen_mc_logical_cpu_t *xcp; 969 unsigned i; 970 971 cmn_err(CE_NOTE, "xvm mca: %u physical cpus:\n", 972 xen_phys_ncpus); 973 for (i = 0; i < xen_phys_ncpus; i++) { 974 xcp = &xen_phys_cpus[i]; 975 cmn_err(CE_NOTE, "cpu%u: (%u, %u, %u) apid %u", 976 xcp->mc_cpunr, xcp->mc_chipid, xcp->mc_coreid, 977 xcp->mc_threadid, xcp->mc_apicid); 978 } 979 } 980 } 981 982 /* 983 * Miscellaneous hypercall wrappers with slightly more verbose diagnostics. 984 */ 985 986 void 987 xen_set_gdt(ulong_t *frame_list, int entries) 988 { 989 int err; 990 if ((err = HYPERVISOR_set_gdt(frame_list, entries)) != 0) { 991 /* 992 * X_EINVAL: reserved entry or bad frames 993 * X_EFAULT: bad address 994 */ 995 panic("xen_set_gdt(%p, %d): error %d", 996 (void *)frame_list, entries, -(int)err); 997 } 998 } 999 1000 void 1001 xen_set_ldt(user_desc_t *ldt, uint_t nsels) 1002 { 1003 struct mmuext_op op; 1004 long err; 1005 1006 op.cmd = MMUEXT_SET_LDT; 1007 op.arg1.linear_addr = (uintptr_t)ldt; 1008 op.arg2.nr_ents = nsels; 1009 1010 if ((err = HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) != 0) { 1011 panic("xen_set_ldt(%p, %d): error %d", 1012 (void *)ldt, nsels, -(int)err); 1013 } 1014 } 1015 1016 void 1017 xen_stack_switch(ulong_t ss, ulong_t esp) 1018 { 1019 long err; 1020 1021 if ((err = HYPERVISOR_stack_switch(ss, esp)) != 0) { 1022 /* 1023 * X_EPERM: bad selector 1024 */ 1025 panic("xen_stack_switch(%lx, %lx): error %d", ss, esp, 1026 -(int)err); 1027 } 1028 } 1029 1030 long 1031 xen_set_trap_table(trap_info_t *table) 1032 { 1033 long err; 1034 1035 if ((err = HYPERVISOR_set_trap_table(table)) != 0) { 1036 /* 1037 * X_EFAULT: bad address 1038 * X_EPERM: bad selector 1039 */ 1040 panic("xen_set_trap_table(%p): error %d", (void *)table, 1041 -(int)err); 1042 } 1043 return (err); 1044 } 1045 1046 #if defined(__amd64) 1047 void 1048 xen_set_segment_base(int reg, ulong_t value) 1049 { 1050 long err; 1051 1052 if ((err = HYPERVISOR_set_segment_base(reg, value)) != 0) { 1053 /* 1054 * X_EFAULT: bad address 1055 * X_EINVAL: bad type 1056 */ 1057 panic("xen_set_segment_base(%d, %lx): error %d", 1058 reg, value, -(int)err); 1059 } 1060 } 1061 #endif /* __amd64 */ 1062 1063 /* 1064 * Translate a hypervisor errcode to a Solaris error code. 1065 */ 1066 int 1067 xen_xlate_errcode(int error) 1068 { 1069 switch (-error) { 1070 1071 /* 1072 * Translate hypervisor errno's into native errno's 1073 */ 1074 1075 #define CASE(num) case X_##num: error = num; break 1076 1077 CASE(EPERM); CASE(ENOENT); CASE(ESRCH); 1078 CASE(EINTR); CASE(EIO); CASE(ENXIO); 1079 CASE(E2BIG); CASE(ENOMEM); CASE(EACCES); 1080 CASE(EFAULT); CASE(EBUSY); CASE(EEXIST); 1081 CASE(ENODEV); CASE(EISDIR); CASE(EINVAL); 1082 CASE(ENOSPC); CASE(ESPIPE); CASE(EROFS); 1083 CASE(ENOSYS); CASE(ENOTEMPTY); CASE(EISCONN); 1084 CASE(ENODATA); CASE(EAGAIN); 1085 1086 #undef CASE 1087 1088 default: 1089 panic("xen_xlate_errcode: unknown error %d", error); 1090 } 1091 1092 return (error); 1093 } 1094 1095 /* 1096 * Raise PS_IOPL on current vcpu to user level. 1097 * Caller responsible for preventing kernel preemption. 1098 */ 1099 void 1100 xen_enable_user_iopl(void) 1101 { 1102 physdev_set_iopl_t set_iopl; 1103 set_iopl.iopl = 3; /* user ring 3 */ 1104 (void) HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl); 1105 } 1106 1107 /* 1108 * Drop PS_IOPL on current vcpu to kernel level 1109 */ 1110 void 1111 xen_disable_user_iopl(void) 1112 { 1113 physdev_set_iopl_t set_iopl; 1114 set_iopl.iopl = 1; /* kernel pseudo ring 1 */ 1115 (void) HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl); 1116 } 1117 1118 int 1119 xen_gdt_setprot(cpu_t *cp, uint_t prot) 1120 { 1121 int err; 1122 #if defined(__amd64) 1123 int pt_bits = PT_VALID; 1124 if (prot & PROT_WRITE) 1125 pt_bits |= PT_WRITABLE; 1126 #endif 1127 1128 if ((err = as_setprot(&kas, (caddr_t)cp->cpu_gdt, 1129 MMU_PAGESIZE, prot)) != 0) 1130 goto done; 1131 1132 #if defined(__amd64) 1133 err = xen_kpm_page(mmu_btop(cp->cpu_m.mcpu_gdtpa), pt_bits); 1134 #endif 1135 1136 done: 1137 if (err) { 1138 cmn_err(CE_WARN, "cpu%d: xen_gdt_setprot(%s) failed: error %d", 1139 cp->cpu_id, (prot & PROT_WRITE) ? "writable" : "read-only", 1140 err); 1141 } 1142 1143 return (err); 1144 } 1145 1146 int 1147 xen_ldt_setprot(user_desc_t *ldt, size_t lsize, uint_t prot) 1148 { 1149 int err; 1150 caddr_t lva = (caddr_t)ldt; 1151 #if defined(__amd64) 1152 int pt_bits = PT_VALID; 1153 pgcnt_t npgs; 1154 if (prot & PROT_WRITE) 1155 pt_bits |= PT_WRITABLE; 1156 #endif /* __amd64 */ 1157 1158 if ((err = as_setprot(&kas, (caddr_t)ldt, lsize, prot)) != 0) 1159 goto done; 1160 1161 #if defined(__amd64) 1162 1163 ASSERT(IS_P2ALIGNED(lsize, PAGESIZE)); 1164 npgs = mmu_btop(lsize); 1165 while (npgs--) { 1166 if ((err = xen_kpm_page(hat_getpfnum(kas.a_hat, lva), 1167 pt_bits)) != 0) 1168 break; 1169 lva += PAGESIZE; 1170 } 1171 #endif /* __amd64 */ 1172 1173 done: 1174 if (err) { 1175 cmn_err(CE_WARN, "xen_ldt_setprot(%p, %s) failed: error %d", 1176 (void *)lva, 1177 (prot & PROT_WRITE) ? "writable" : "read-only", err); 1178 } 1179 1180 return (err); 1181 } 1182 1183 int 1184 xen_get_mc_physcpuinfo(xen_mc_logical_cpu_t *log_cpus, uint_t *ncpus) 1185 { 1186 xen_mc_t xmc; 1187 struct xen_mc_physcpuinfo *cpi = &xmc.u.mc_physcpuinfo; 1188 1189 cpi->ncpus = *ncpus; 1190 /*LINTED: constant in conditional context*/ 1191 set_xen_guest_handle(cpi->info, log_cpus); 1192 1193 if (HYPERVISOR_mca(XEN_MC_physcpuinfo, &xmc) != 0) 1194 return (-1); 1195 1196 *ncpus = cpi->ncpus; 1197 return (0); 1198 } 1199 1200 void 1201 print_panic(const char *str) 1202 { 1203 xen_printf(str); 1204 } 1205 1206 /* 1207 * Interfaces to iterate over real cpu information, but only that info 1208 * which we choose to expose here. These are of interest to dom0 1209 * only (and the backing hypercall should not work for domu). 1210 */ 1211 1212 xen_mc_lcpu_cookie_t 1213 xen_physcpu_next(xen_mc_lcpu_cookie_t cookie) 1214 { 1215 xen_mc_logical_cpu_t *xcp = (xen_mc_logical_cpu_t *)cookie; 1216 1217 if (!DOMAIN_IS_INITDOMAIN(xen_info)) 1218 return (NULL); 1219 1220 if (cookie == NULL) 1221 return ((xen_mc_lcpu_cookie_t)xen_phys_cpus); 1222 1223 if (xcp == xen_phys_cpus + xen_phys_ncpus - 1) 1224 return (NULL); 1225 else 1226 return ((xen_mc_lcpu_cookie_t)++xcp); 1227 } 1228 1229 #define COOKIE2XCP(c) ((xen_mc_logical_cpu_t *)(c)) 1230 1231 const char * 1232 xen_physcpu_vendorstr(xen_mc_lcpu_cookie_t cookie) 1233 { 1234 xen_mc_logical_cpu_t *xcp = COOKIE2XCP(cookie); 1235 1236 return ((const char *)&xcp->mc_vendorid[0]); 1237 } 1238 1239 int 1240 xen_physcpu_family(xen_mc_lcpu_cookie_t cookie) 1241 { 1242 return (COOKIE2XCP(cookie)->mc_family); 1243 } 1244 1245 int 1246 xen_physcpu_model(xen_mc_lcpu_cookie_t cookie) 1247 { 1248 return (COOKIE2XCP(cookie)->mc_model); 1249 } 1250 1251 int 1252 xen_physcpu_stepping(xen_mc_lcpu_cookie_t cookie) 1253 { 1254 return (COOKIE2XCP(cookie)->mc_step); 1255 } 1256 1257 id_t 1258 xen_physcpu_chipid(xen_mc_lcpu_cookie_t cookie) 1259 { 1260 return (COOKIE2XCP(cookie)->mc_chipid); 1261 } 1262 1263 id_t 1264 xen_physcpu_coreid(xen_mc_lcpu_cookie_t cookie) 1265 { 1266 return (COOKIE2XCP(cookie)->mc_coreid); 1267 } 1268 1269 id_t 1270 xen_physcpu_strandid(xen_mc_lcpu_cookie_t cookie) 1271 { 1272 return (COOKIE2XCP(cookie)->mc_threadid); 1273 } 1274 1275 id_t 1276 xen_physcpu_initial_apicid(xen_mc_lcpu_cookie_t cookie) 1277 { 1278 return (COOKIE2XCP(cookie)->mc_clusterid); 1279 } 1280 1281 id_t 1282 xen_physcpu_logical_id(xen_mc_lcpu_cookie_t cookie) 1283 { 1284 return (COOKIE2XCP(cookie)->mc_cpunr); 1285 } 1286 1287 boolean_t 1288 xen_physcpu_is_cmt(xen_mc_lcpu_cookie_t cookie) 1289 { 1290 return (COOKIE2XCP(cookie)->mc_nthreads > 1); 1291 } 1292 1293 uint64_t 1294 xen_physcpu_mcg_cap(xen_mc_lcpu_cookie_t cookie) 1295 { 1296 xen_mc_logical_cpu_t *xcp = COOKIE2XCP(cookie); 1297 1298 /* 1299 * Need to #define the indices, or search through the array. 1300 */ 1301 return (xcp->mc_msrvalues[0].value); 1302 } 1303 1304 int 1305 xen_map_gref(uint_t cmd, gnttab_map_grant_ref_t *mapop, uint_t count, 1306 boolean_t uvaddr) 1307 { 1308 long rc; 1309 uint_t i; 1310 1311 ASSERT(cmd == GNTTABOP_map_grant_ref); 1312 1313 #if !defined(_BOOT) 1314 if (uvaddr == B_FALSE) { 1315 for (i = 0; i < count; ++i) { 1316 mapop[i].flags |= (PT_FOREIGN <<_GNTMAP_guest_avail0); 1317 } 1318 } 1319 #endif 1320 1321 rc = HYPERVISOR_grant_table_op(cmd, mapop, count); 1322 1323 return (rc); 1324 } 1325 1326 static int 1327 xpv_get_physinfo(xen_sysctl_physinfo_t *pi) 1328 { 1329 xen_sysctl_t op; 1330 struct sp { void *p; } *sp = (struct sp *)&op.u.physinfo.cpu_to_node; 1331 int ret; 1332 1333 bzero(&op, sizeof (op)); 1334 op.cmd = XEN_SYSCTL_physinfo; 1335 op.interface_version = XEN_SYSCTL_INTERFACE_VERSION; 1336 /*LINTED: constant in conditional context*/ 1337 set_xen_guest_handle(*sp, NULL); 1338 1339 ret = HYPERVISOR_sysctl(&op); 1340 1341 if (ret != 0) 1342 return (xen_xlate_errcode(ret)); 1343 1344 bcopy(&op.u.physinfo, pi, sizeof (op.u.physinfo)); 1345 return (0); 1346 } 1347 1348 /* 1349 * On dom0, we can determine the number of physical cpus on the machine. 1350 * This number is important when figuring out what workarounds are 1351 * appropriate, so compute it now. 1352 */ 1353 uint_t 1354 xpv_nr_phys_cpus(void) 1355 { 1356 static uint_t nphyscpus = 0; 1357 1358 ASSERT(DOMAIN_IS_INITDOMAIN(xen_info)); 1359 1360 if (nphyscpus == 0) { 1361 xen_sysctl_physinfo_t pi; 1362 int ret; 1363 1364 if ((ret = xpv_get_physinfo(&pi)) != 0) 1365 panic("xpv_get_physinfo() failed: %d\n", ret); 1366 nphyscpus = pi.nr_cpus; 1367 } 1368 return (nphyscpus); 1369 } 1370 1371 pgcnt_t 1372 xpv_nr_phys_pages(void) 1373 { 1374 xen_sysctl_physinfo_t pi; 1375 int ret; 1376 1377 ASSERT(DOMAIN_IS_INITDOMAIN(xen_info)); 1378 1379 if ((ret = xpv_get_physinfo(&pi)) != 0) 1380 panic("xpv_get_physinfo() failed: %d\n", ret); 1381 1382 return ((pgcnt_t)pi.total_pages); 1383 } 1384 1385 uint64_t 1386 xpv_cpu_khz(void) 1387 { 1388 xen_sysctl_physinfo_t pi; 1389 int ret; 1390 1391 ASSERT(DOMAIN_IS_INITDOMAIN(xen_info)); 1392 1393 if ((ret = xpv_get_physinfo(&pi)) != 0) 1394 panic("xpv_get_physinfo() failed: %d\n", ret); 1395 return ((uint64_t)pi.cpu_khz); 1396 } 1397