xref: /illumos-gate/usr/src/uts/common/xen/os/hypercall.c (revision 257873cfc1dd3337766407f80397db60a56f2f5a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Provides basic C wrappers around hypervisor invocation.
29  *
30  * i386: eax = vector: ebx, ecx, edx, esi, edi = args 1-5
31  *	 eax = return value
32  *	 (argument registers may be clobbered on return)
33  *
34  * amd64:rax = vector: rdi, rsi, rdx, r10, r8, r9 = args 1-6
35  *	 rax = return value
36  *	 (arguments registers not clobbered on return; rcx, r11 are)
37  */
38 
39 #include <sys/types.h>
40 #ifdef XPV_HVM_DRIVER
41 #include <sys/xpv_support.h>
42 #endif
43 
44 #include <sys/hypervisor.h>
45 #include <xen/public/sched.h>
46 #include <sys/debug.h>
47 #include <sys/archsystm.h>
48 
49 long
50 HYPERVISOR_set_trap_table(trap_info_t *table)
51 {
52 	return (__hypercall1(__HYPERVISOR_set_trap_table, (ulong_t)table));
53 }
54 
55 int
56 HYPERVISOR_mmu_update(mmu_update_t *req, int count, int *success_count,
57     domid_t domain_id)
58 {
59 	return (__hypercall4_int(__HYPERVISOR_mmu_update,
60 	    (ulong_t)req, (long)count, (ulong_t)success_count,
61 	    (ulong_t)domain_id));
62 }
63 
64 long
65 HYPERVISOR_set_gdt(ulong_t *frame_list, int entries)
66 {
67 	return (__hypercall2(
68 	    __HYPERVISOR_set_gdt, (ulong_t)frame_list, (long)entries));
69 }
70 
71 /*
72  * XXPV Seems like "sp" would be a better name for both amd64 and i386?
73  * For now stay consistent with xen project source.
74  */
75 long
76 HYPERVISOR_stack_switch(ulong_t ss, ulong_t esp)
77 {
78 	return (__hypercall2(__HYPERVISOR_stack_switch, ss, esp));
79 }
80 
81 #if defined(__amd64)
82 
83 long
84 HYPERVISOR_set_callbacks(ulong_t event_address, ulong_t failsafe_address,
85     ulong_t syscall_address)
86 {
87 	return (__hypercall3(__HYPERVISOR_set_callbacks,
88 	    event_address, failsafe_address, syscall_address));
89 }
90 
91 #elif defined(__i386)
92 
93 long
94 HYPERVISOR_set_callbacks(
95     ulong_t event_selector, ulong_t event_address,
96     ulong_t failsafe_selector, ulong_t failsafe_address)
97 {
98 	return (__hypercall4(__HYPERVISOR_set_callbacks,
99 	    event_selector, event_address,
100 	    failsafe_selector, failsafe_address));
101 }
102 
103 #endif	/* __amd64 */
104 
105 long
106 HYPERVISOR_fpu_taskswitch(int set)
107 {
108 	return (__hypercall1(__HYPERVISOR_fpu_taskswitch, (long)set));
109 }
110 
111 /* *** __HYPERVISOR_sched_op_compat *** OBSOLETED */
112 
113 long
114 HYPERVISOR_platform_op(xen_platform_op_t *platform_op)
115 {
116 	return (__hypercall1(__HYPERVISOR_platform_op, (ulong_t)platform_op));
117 }
118 
119 /* *** __HYPERVISOR_set_debugreg *** NOT IMPLEMENTED */
120 
121 /* *** __HYPERVISOR_get_debugreg *** NOT IMPLEMENTED */
122 
123 long
124 HYPERVISOR_update_descriptor(maddr_t ma, uint64_t desc)
125 {
126 #if defined(__amd64)
127 
128 	return (__hypercall2(__HYPERVISOR_update_descriptor, ma, desc));
129 
130 #elif defined(__i386)
131 
132 	return (__hypercall4(__HYPERVISOR_update_descriptor,
133 	    (ulong_t)ma, (ulong_t)(ma >>32),
134 	    (ulong_t)desc, (ulong_t)(desc >> 32)));
135 
136 #endif
137 }
138 
139 long
140 HYPERVISOR_memory_op(int cmd, void *arg)
141 {
142 	return (__hypercall2(__HYPERVISOR_memory_op, (long)cmd,
143 	    (ulong_t)arg));
144 }
145 
146 long
147 HYPERVISOR_multicall(void *call_list, uint_t nr_calls)
148 {
149 	return (__hypercall2(__HYPERVISOR_multicall,
150 	    (ulong_t)call_list, (ulong_t)nr_calls));
151 }
152 
153 int
154 HYPERVISOR_update_va_mapping(ulong_t va, uint64_t new_pte, ulong_t flags)
155 {
156 #if !defined(_BOOT)
157 	if (IN_XPV_PANIC())
158 		return (0);
159 #endif
160 #if defined(__amd64)
161 
162 	return (__hypercall3_int(__HYPERVISOR_update_va_mapping, va,
163 	    new_pte, flags));
164 
165 #elif defined(__i386)
166 
167 	return (__hypercall4_int(__HYPERVISOR_update_va_mapping, va,
168 	    (ulong_t)new_pte, (ulong_t)(new_pte >> 32), flags));
169 
170 #endif	/* __i386 */
171 }
172 
173 /*
174  * Note: this timeout must be the Xen system time not hrtime (see
175  * xpv_timestamp.c).
176  */
177 long
178 HYPERVISOR_set_timer_op(uint64_t timeout)
179 {
180 #if defined(__amd64)
181 
182 	return (__hypercall1(__HYPERVISOR_set_timer_op, timeout));
183 
184 #elif defined(__i386)
185 
186 	uint32_t timeout_hi = (uint32_t)(timeout >> 32);
187 	uint32_t timeout_lo = (uint32_t)timeout;
188 	return (__hypercall2(__HYPERVISOR_set_timer_op,
189 	    (ulong_t)timeout_lo, (ulong_t)timeout_hi));
190 
191 #endif	/* __i386 */
192 }
193 
194 /* *** __HYPERVISOR_event_channel_op_compat *** OBSOLETED */
195 
196 long
197 HYPERVISOR_xen_version(int cmd, void *arg)
198 {
199 	return (__hypercall2(__HYPERVISOR_xen_version, (long)cmd,
200 	    (ulong_t)arg));
201 }
202 
203 long
204 HYPERVISOR_console_io(int cmd, int count, char *str)
205 {
206 	return (__hypercall3(__HYPERVISOR_console_io, (long)cmd, (long)count,
207 	    (ulong_t)str));
208 }
209 
210 /* *** __HYPERVISOR_physdev_op_compat *** OBSOLETED */
211 
212 /*
213  * ****
214  * NOTE: this hypercall should not be called directly for a
215  * GNTTABOP_map_grant_ref. Instead xen_map_gref() should be called.
216  * ****
217  */
218 long
219 HYPERVISOR_grant_table_op(uint_t cmd, void *uop, uint_t count)
220 {
221 	int ret_val;
222 	ret_val = __hypercall3(__HYPERVISOR_grant_table_op,
223 	    (long)cmd, (ulong_t)uop, (ulong_t)count);
224 	return (ret_val);
225 }
226 
227 long
228 HYPERVISOR_vm_assist(uint_t cmd, uint_t type)
229 {
230 	return (__hypercall2(__HYPERVISOR_vm_assist,
231 	    (ulong_t)cmd, (ulong_t)type));
232 }
233 
234 int
235 HYPERVISOR_update_va_mapping_otherdomain(ulong_t va,
236     uint64_t new_pte, ulong_t flags, domid_t domain_id)
237 {
238 #if defined(__amd64)
239 
240 	return (__hypercall4_int(__HYPERVISOR_update_va_mapping_otherdomain,
241 	    va, new_pte, flags, (ulong_t)domain_id));
242 
243 #elif defined(__i386)
244 
245 	return (__hypercall5_int(__HYPERVISOR_update_va_mapping_otherdomain,
246 	    va, (ulong_t)new_pte, (ulong_t)(new_pte >> 32), flags,
247 	    (ulong_t)domain_id));
248 
249 #endif	/* __i386 */
250 }
251 
252 /*
253  * *** __HYPERVISOR_iret ***
254  *   see HYPERVISOR_IRET() macro in i86xpv/sys/machprivregs.h
255  */
256 
257 long
258 HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args)
259 {
260 	return (__hypercall3(__HYPERVISOR_vcpu_op, (long)cmd, (long)vcpuid,
261 	    (ulong_t)extra_args));
262 }
263 
264 #if defined(__amd64)
265 
266 long
267 HYPERVISOR_set_segment_base(int reg, ulong_t value)
268 {
269 	return (__hypercall2(__HYPERVISOR_set_segment_base, (long)reg, value));
270 }
271 
272 #endif	/* __amd64 */
273 
274 int
275 HYPERVISOR_mmuext_op(struct mmuext_op *req, int count, uint_t *success_count,
276     domid_t domain_id)
277 {
278 	return (__hypercall4_int(__HYPERVISOR_mmuext_op,
279 	    (ulong_t)req, (long)count, (ulong_t)success_count,
280 	    (ulong_t)domain_id));
281 }
282 
283 long
284 HYPERVISOR_acm_op(struct xen_acmctl *arg)
285 {
286 	return (__hypercall1(__HYPERVISOR_acm_op, (ulong_t)arg));
287 }
288 
289 long
290 HYPERVISOR_nmi_op(int cmd, void *arg)
291 {
292 	return (__hypercall2(__HYPERVISOR_nmi_op, (long)cmd, (ulong_t)arg));
293 }
294 
295 long
296 HYPERVISOR_sched_op(int cmd, void *arg)
297 {
298 	return (__hypercall2(__HYPERVISOR_sched_op,
299 	    (ulong_t)cmd, (ulong_t)arg));
300 }
301 
302 long
303 HYPERVISOR_callback_op(int cmd, void *arg)
304 {
305 	return (__hypercall2(__HYPERVISOR_callback_op,
306 	    (ulong_t)cmd, (ulong_t)arg));
307 }
308 
309 /* *** __HYPERVISOR_xenoprof_op *** NOT IMPLEMENTED */
310 
311 long
312 HYPERVISOR_event_channel_op(int cmd, void *arg)
313 {
314 	return (__hypercall2(__HYPERVISOR_event_channel_op, (long)cmd,
315 	    (ulong_t)arg));
316 }
317 
318 long
319 HYPERVISOR_physdev_op(int cmd, void *arg)
320 {
321 	return (__hypercall2(__HYPERVISOR_physdev_op, (long)cmd,
322 	    (ulong_t)arg));
323 }
324 
325 long
326 HYPERVISOR_hvm_op(int cmd, void *arg)
327 {
328 	return (__hypercall2(__HYPERVISOR_hvm_op, (long)cmd, (ulong_t)arg));
329 }
330 
331 long
332 HYPERVISOR_sysctl(xen_sysctl_t *sysctl)
333 {
334 	return (__hypercall1(__HYPERVISOR_sysctl, (ulong_t)sysctl));
335 }
336 
337 long
338 HYPERVISOR_domctl(xen_domctl_t *domctl)
339 {
340 	return (__hypercall1(__HYPERVISOR_domctl, (ulong_t)domctl));
341 }
342 
343 /* *** __HYPERVISOR_kexec_op *** NOT IMPLEMENTED */
344 
345 /*
346  *
347  * HYPERCALL HELPER ROUTINES
348  *    These don't have there own unique hypercalls.
349  *
350  */
351 
352 long
353 HYPERVISOR_yield(void)
354 {
355 	return (HYPERVISOR_sched_op(SCHEDOP_yield, NULL));
356 }
357 
358 long
359 HYPERVISOR_block(void)
360 {
361 	return (HYPERVISOR_sched_op(SCHEDOP_block, NULL));
362 }
363 
364 long
365 HYPERVISOR_shutdown(uint_t reason)
366 {
367 	struct sched_shutdown sched_shutdown;
368 
369 	sched_shutdown.reason = reason;
370 
371 	return (HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown));
372 }
373 
374 /*
375  * Poll one or more event-channel ports, and return when pending.
376  * An optional timeout (in nanoseconds, absolute time since boot) may be
377  * specified. Note: this timeout must be the Xen system time not hrtime (see
378  * xpv_timestamp.c).
379  */
380 long
381 HYPERVISOR_poll(evtchn_port_t *ports, uint_t nr_ports, uint64_t timeout)
382 {
383 	struct sched_poll sched_poll;
384 
385 	/*LINTED: constant in conditional context*/
386 	set_xen_guest_handle(sched_poll.ports, ports);
387 	sched_poll.nr_ports = nr_ports;
388 	sched_poll.timeout = timeout;
389 
390 	return (HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll));
391 }
392 
393 long
394 HYPERVISOR_suspend(ulong_t start_info_mfn)
395 {
396 	struct sched_shutdown sched_shutdown;
397 
398 	sched_shutdown.reason = SHUTDOWN_suspend;
399 
400 	return (__hypercall3(__HYPERVISOR_sched_op, SCHEDOP_shutdown,
401 	    (ulong_t)&sched_shutdown, start_info_mfn));
402 }
403 
404 long
405 HYPERVISOR_mca(uint32_t cmd, xen_mc_arg_t *arg)
406 {
407 	xen_mc_t xmc;
408 	long rv;
409 
410 	switch (cmd) {
411 	case XEN_MC_CMD_fetch:
412 	case XEN_MC_CMD_physcpuinfo:
413 	case XEN_MC_CMD_msrinject:
414 	case XEN_MC_CMD_mceinject:
415 	case XEN_MC_CMD_offlinecpu:
416 		if (arg == NULL)
417 			return (EINVAL);
418 		break;
419 
420 	case XEN_MC_CMD_notifydomain:
421 		return (ENOTSUP);
422 
423 	default:
424 		return (EINVAL);
425 	}
426 
427 	xmc.interface_version = XEN_MCA_INTERFACE_VERSION;
428 	xmc.cmd = cmd;
429 	if (arg != NULL)
430 		xmc.u = *arg;
431 
432 	rv = __hypercall1(__HYPERVISOR_mca, (ulong_t)&xmc);
433 
434 	if (rv == XEN_MC_HCALL_SUCCESS && arg != NULL)
435 		*arg = xmc.u;
436 
437 	return (rv);
438 }
439