1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * Provides basic C wrappers around hypervisor invocation. 29 * 30 * i386: eax = vector: ebx, ecx, edx, esi, edi = args 1-5 31 * eax = return value 32 * (argument registers may be clobbered on return) 33 * 34 * amd64:rax = vector: rdi, rsi, rdx, r10, r8, r9 = args 1-6 35 * rax = return value 36 * (arguments registers not clobbered on return; rcx, r11 are) 37 */ 38 39 #include <sys/types.h> 40 #ifdef XPV_HVM_DRIVER 41 #include <sys/xpv_support.h> 42 #endif 43 44 #include <sys/hypervisor.h> 45 #include <xen/public/sched.h> 46 #include <sys/debug.h> 47 #include <sys/archsystm.h> 48 49 long 50 HYPERVISOR_set_trap_table(trap_info_t *table) 51 { 52 return (__hypercall1(__HYPERVISOR_set_trap_table, (ulong_t)table)); 53 } 54 55 int 56 HYPERVISOR_mmu_update(mmu_update_t *req, int count, int *success_count, 57 domid_t domain_id) 58 { 59 return (__hypercall4_int(__HYPERVISOR_mmu_update, 60 (ulong_t)req, (long)count, (ulong_t)success_count, 61 (ulong_t)domain_id)); 62 } 63 64 long 65 HYPERVISOR_set_gdt(ulong_t *frame_list, int entries) 66 { 67 return (__hypercall2( 68 __HYPERVISOR_set_gdt, (ulong_t)frame_list, (long)entries)); 69 } 70 71 /* 72 * XXPV Seems like "sp" would be a better name for both amd64 and i386? 73 * For now stay consistent with xen project source. 74 */ 75 long 76 HYPERVISOR_stack_switch(ulong_t ss, ulong_t esp) 77 { 78 return (__hypercall2(__HYPERVISOR_stack_switch, ss, esp)); 79 } 80 81 #if defined(__amd64) 82 83 long 84 HYPERVISOR_set_callbacks(ulong_t event_address, ulong_t failsafe_address, 85 ulong_t syscall_address) 86 { 87 return (__hypercall3(__HYPERVISOR_set_callbacks, 88 event_address, failsafe_address, syscall_address)); 89 } 90 91 #elif defined(__i386) 92 93 long 94 HYPERVISOR_set_callbacks( 95 ulong_t event_selector, ulong_t event_address, 96 ulong_t failsafe_selector, ulong_t failsafe_address) 97 { 98 return (__hypercall4(__HYPERVISOR_set_callbacks, 99 event_selector, event_address, 100 failsafe_selector, failsafe_address)); 101 } 102 103 #endif /* __amd64 */ 104 105 long 106 HYPERVISOR_fpu_taskswitch(int set) 107 { 108 return (__hypercall1(__HYPERVISOR_fpu_taskswitch, (long)set)); 109 } 110 111 /* *** __HYPERVISOR_sched_op_compat *** OBSOLETED */ 112 113 long 114 HYPERVISOR_platform_op(xen_platform_op_t *platform_op) 115 { 116 return (__hypercall1(__HYPERVISOR_platform_op, (ulong_t)platform_op)); 117 } 118 119 /* *** __HYPERVISOR_set_debugreg *** NOT IMPLEMENTED */ 120 121 /* *** __HYPERVISOR_get_debugreg *** NOT IMPLEMENTED */ 122 123 long 124 HYPERVISOR_update_descriptor(maddr_t ma, uint64_t desc) 125 { 126 #if defined(__amd64) 127 128 return (__hypercall2(__HYPERVISOR_update_descriptor, ma, desc)); 129 130 #elif defined(__i386) 131 132 return (__hypercall4(__HYPERVISOR_update_descriptor, 133 (ulong_t)ma, (ulong_t)(ma >>32), 134 (ulong_t)desc, (ulong_t)(desc >> 32))); 135 136 #endif 137 } 138 139 long 140 HYPERVISOR_memory_op(int cmd, void *arg) 141 { 142 return (__hypercall2(__HYPERVISOR_memory_op, (long)cmd, 143 (ulong_t)arg)); 144 } 145 146 long 147 HYPERVISOR_multicall(void *call_list, uint_t nr_calls) 148 { 149 return (__hypercall2(__HYPERVISOR_multicall, 150 (ulong_t)call_list, (ulong_t)nr_calls)); 151 } 152 153 int 154 HYPERVISOR_update_va_mapping(ulong_t va, uint64_t new_pte, ulong_t flags) 155 { 156 #if !defined(_BOOT) 157 if (IN_XPV_PANIC()) 158 return (0); 159 #endif 160 #if defined(__amd64) 161 162 return (__hypercall3_int(__HYPERVISOR_update_va_mapping, va, 163 new_pte, flags)); 164 165 #elif defined(__i386) 166 167 return (__hypercall4_int(__HYPERVISOR_update_va_mapping, va, 168 (ulong_t)new_pte, (ulong_t)(new_pte >> 32), flags)); 169 170 #endif /* __i386 */ 171 } 172 173 /* 174 * Note: this timeout must be the Xen system time not hrtime (see 175 * xpv_timestamp.c). 176 */ 177 long 178 HYPERVISOR_set_timer_op(uint64_t timeout) 179 { 180 #if defined(__amd64) 181 182 return (__hypercall1(__HYPERVISOR_set_timer_op, timeout)); 183 184 #elif defined(__i386) 185 186 uint32_t timeout_hi = (uint32_t)(timeout >> 32); 187 uint32_t timeout_lo = (uint32_t)timeout; 188 return (__hypercall2(__HYPERVISOR_set_timer_op, 189 (ulong_t)timeout_lo, (ulong_t)timeout_hi)); 190 191 #endif /* __i386 */ 192 } 193 194 /* *** __HYPERVISOR_event_channel_op_compat *** OBSOLETED */ 195 196 long 197 HYPERVISOR_xen_version(int cmd, void *arg) 198 { 199 return (__hypercall2(__HYPERVISOR_xen_version, (long)cmd, 200 (ulong_t)arg)); 201 } 202 203 long 204 HYPERVISOR_console_io(int cmd, int count, char *str) 205 { 206 return (__hypercall3(__HYPERVISOR_console_io, (long)cmd, (long)count, 207 (ulong_t)str)); 208 } 209 210 /* *** __HYPERVISOR_physdev_op_compat *** OBSOLETED */ 211 212 long 213 HYPERVISOR_grant_table_op(uint_t cmd, void *uop, uint_t count) 214 { 215 int ret_val; 216 ret_val = __hypercall3(__HYPERVISOR_grant_table_op, 217 (long)cmd, (ulong_t)uop, (ulong_t)count); 218 219 #if !defined(_BOOT) && !defined(XPV_HVM_DRIVER) 220 /* 221 * XXPV -- 222 * The map_grant_ref call suffers a poor design flaw. 223 * It's the only hypervisor interface that creates page table mappings 224 * that doesn't take an entire PTE. Hence we can't create the 225 * mapping with a particular setting of the software PTE bits, NX, etc. 226 * 227 * Until the interface is fixed, we need to minimize the possiblity 228 * of dtrace or kmdb blowing up on a foreign mapping that doesn't 229 * have a correct setting for the soft bits. We'll force them here. 230 */ 231 if (ret_val == 0 && cmd == GNTTABOP_map_grant_ref) { 232 extern void xen_fix_foreign(uint64_t); 233 gnttab_map_grant_ref_t *mapops = (gnttab_map_grant_ref_t *)uop; 234 uint_t i; 235 for (i = 0; i < count; ++i) { 236 if (mapops[i].status == GNTST_okay) 237 xen_fix_foreign(mapops[i].host_addr); 238 } 239 } 240 #endif 241 return (ret_val); 242 } 243 244 long 245 HYPERVISOR_vm_assist(uint_t cmd, uint_t type) 246 { 247 return (__hypercall2(__HYPERVISOR_vm_assist, 248 (ulong_t)cmd, (ulong_t)type)); 249 } 250 251 int 252 HYPERVISOR_update_va_mapping_otherdomain(ulong_t va, 253 uint64_t new_pte, ulong_t flags, domid_t domain_id) 254 { 255 #if defined(__amd64) 256 257 return (__hypercall4_int(__HYPERVISOR_update_va_mapping_otherdomain, 258 va, new_pte, flags, (ulong_t)domain_id)); 259 260 #elif defined(__i386) 261 262 return (__hypercall5_int(__HYPERVISOR_update_va_mapping_otherdomain, 263 va, (ulong_t)new_pte, (ulong_t)(new_pte >> 32), flags, 264 (ulong_t)domain_id)); 265 266 #endif /* __i386 */ 267 } 268 269 /* 270 * *** __HYPERVISOR_iret *** 271 * see HYPERVISOR_IRET() macro in i86xpv/sys/machprivregs.h 272 */ 273 274 long 275 HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args) 276 { 277 return (__hypercall3(__HYPERVISOR_vcpu_op, (long)cmd, (long)vcpuid, 278 (ulong_t)extra_args)); 279 } 280 281 #if defined(__amd64) 282 283 long 284 HYPERVISOR_set_segment_base(int reg, ulong_t value) 285 { 286 return (__hypercall2(__HYPERVISOR_set_segment_base, (long)reg, value)); 287 } 288 289 #endif /* __amd64 */ 290 291 int 292 HYPERVISOR_mmuext_op(struct mmuext_op *req, int count, uint_t *success_count, 293 domid_t domain_id) 294 { 295 return (__hypercall4_int(__HYPERVISOR_mmuext_op, 296 (ulong_t)req, (long)count, (ulong_t)success_count, 297 (ulong_t)domain_id)); 298 } 299 300 long 301 HYPERVISOR_acm_op(struct xen_acmctl *arg) 302 { 303 return (__hypercall1(__HYPERVISOR_acm_op, (ulong_t)arg)); 304 } 305 306 long 307 HYPERVISOR_nmi_op(int cmd, void *arg) 308 { 309 return (__hypercall2(__HYPERVISOR_nmi_op, (long)cmd, (ulong_t)arg)); 310 } 311 312 long 313 HYPERVISOR_sched_op(int cmd, void *arg) 314 { 315 return (__hypercall2(__HYPERVISOR_sched_op, 316 (ulong_t)cmd, (ulong_t)arg)); 317 } 318 319 long 320 HYPERVISOR_callback_op(int cmd, void *arg) 321 { 322 return (__hypercall2(__HYPERVISOR_callback_op, 323 (ulong_t)cmd, (ulong_t)arg)); 324 } 325 326 /* *** __HYPERVISOR_xenoprof_op *** NOT IMPLEMENTED */ 327 328 long 329 HYPERVISOR_event_channel_op(int cmd, void *arg) 330 { 331 return (__hypercall2(__HYPERVISOR_event_channel_op, (long)cmd, 332 (ulong_t)arg)); 333 } 334 335 long 336 HYPERVISOR_physdev_op(int cmd, void *arg) 337 { 338 return (__hypercall2(__HYPERVISOR_physdev_op, (long)cmd, 339 (ulong_t)arg)); 340 } 341 342 long 343 HYPERVISOR_hvm_op(int cmd, void *arg) 344 { 345 return (__hypercall2(__HYPERVISOR_hvm_op, (long)cmd, (ulong_t)arg)); 346 } 347 348 long 349 HYPERVISOR_sysctl(xen_sysctl_t *sysctl) 350 { 351 return (__hypercall1(__HYPERVISOR_sysctl, (ulong_t)sysctl)); 352 } 353 354 long 355 HYPERVISOR_domctl(xen_domctl_t *domctl) 356 { 357 return (__hypercall1(__HYPERVISOR_domctl, (ulong_t)domctl)); 358 } 359 360 /* *** __HYPERVISOR_kexec_op *** NOT IMPLEMENTED */ 361 362 /* 363 * 364 * HYPERCALL HELPER ROUTINES 365 * These don't have there own unique hypercalls. 366 * 367 */ 368 369 long 370 HYPERVISOR_yield(void) 371 { 372 return (HYPERVISOR_sched_op(SCHEDOP_yield, NULL)); 373 } 374 375 long 376 HYPERVISOR_block(void) 377 { 378 return (HYPERVISOR_sched_op(SCHEDOP_block, NULL)); 379 } 380 381 long 382 HYPERVISOR_shutdown(uint_t reason) 383 { 384 struct sched_shutdown sched_shutdown; 385 386 sched_shutdown.reason = reason; 387 388 return (HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown)); 389 } 390 391 /* 392 * Poll one or more event-channel ports, and return when pending. 393 * An optional timeout (in nanoseconds, absolute time since boot) may be 394 * specified. Note: this timeout must be the Xen system time not hrtime (see 395 * xpv_timestamp.c). 396 */ 397 long 398 HYPERVISOR_poll(evtchn_port_t *ports, uint_t nr_ports, uint64_t timeout) 399 { 400 struct sched_poll sched_poll; 401 402 /*LINTED: constant in conditional context*/ 403 set_xen_guest_handle(sched_poll.ports, ports); 404 sched_poll.nr_ports = nr_ports; 405 sched_poll.timeout = timeout; 406 407 return (HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll)); 408 } 409 410 long 411 HYPERVISOR_suspend(ulong_t start_info_mfn) 412 { 413 struct sched_shutdown sched_shutdown; 414 415 sched_shutdown.reason = SHUTDOWN_suspend; 416 417 return (__hypercall3(__HYPERVISOR_sched_op, SCHEDOP_shutdown, 418 (ulong_t)&sched_shutdown, start_info_mfn)); 419 } 420 421 long 422 HYPERVISOR_mca(uint32_t cmd, xen_mc_arg_t *arg) 423 { 424 xen_mc_t xmc; 425 long rv; 426 427 switch (cmd) { 428 case XEN_MC_CMD_fetch: 429 case XEN_MC_CMD_physcpuinfo: 430 case XEN_MC_CMD_msrinject: 431 case XEN_MC_CMD_mceinject: 432 case XEN_MC_CMD_offlinecpu: 433 if (arg == NULL) 434 return (EINVAL); 435 break; 436 437 case XEN_MC_CMD_notifydomain: 438 return (ENOTSUP); 439 440 default: 441 return (EINVAL); 442 } 443 444 xmc.interface_version = XEN_MCA_INTERFACE_VERSION; 445 xmc.cmd = cmd; 446 if (arg != NULL) 447 xmc.u = *arg; 448 449 rv = __hypercall1(__HYPERVISOR_mca, (ulong_t)&xmc); 450 451 if (rv == XEN_MC_HCALL_SUCCESS && arg != NULL) 452 *arg = xmc.u; 453 454 return (rv); 455 } 456