xref: /titanic_41/usr/src/uts/common/xen/os/hypercall.c (revision ad09f8b827db90c9a0093f0b6382803fa64a5fd1)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Provides basic C wrappers around hypervisor invocation.
29  *
30  * i386: eax = vector: ebx, ecx, edx, esi, edi = args 1-5
31  *	 eax = return value
32  *	 (argument registers may be clobbered on return)
33  *
34  * amd64:rax = vector: rdi, rsi, rdx, r10, r8, r9 = args 1-6
35  *	 rax = return value
36  *	 (arguments registers not clobbered on return; rcx, r11 are)
37  */
38 
39 #include <sys/types.h>
40 #ifndef __xpv
41 #include <sys/xpv_support.h>
42 #else
43 #include <sys/xpv_user.h>
44 #endif
45 
46 #include <sys/hypervisor.h>
47 #include <xen/public/sched.h>
48 #include <sys/debug.h>
49 #include <sys/archsystm.h>
50 
51 long
HYPERVISOR_set_trap_table(trap_info_t * table)52 HYPERVISOR_set_trap_table(trap_info_t *table)
53 {
54 	return (__hypercall1(__HYPERVISOR_set_trap_table, (ulong_t)table));
55 }
56 
57 int
HYPERVISOR_mmu_update(mmu_update_t * req,int count,int * success_count,domid_t domain_id)58 HYPERVISOR_mmu_update(mmu_update_t *req, int count, int *success_count,
59     domid_t domain_id)
60 {
61 	return (__hypercall4_int(__HYPERVISOR_mmu_update,
62 	    (ulong_t)req, (long)count, (ulong_t)success_count,
63 	    (ulong_t)domain_id));
64 }
65 
66 long
HYPERVISOR_set_gdt(ulong_t * frame_list,int entries)67 HYPERVISOR_set_gdt(ulong_t *frame_list, int entries)
68 {
69 	return (__hypercall2(
70 	    __HYPERVISOR_set_gdt, (ulong_t)frame_list, (long)entries));
71 }
72 
73 /*
74  * XXPV Seems like "sp" would be a better name for both amd64 and i386?
75  * For now stay consistent with xen project source.
76  */
77 long
HYPERVISOR_stack_switch(ulong_t ss,ulong_t esp)78 HYPERVISOR_stack_switch(ulong_t ss, ulong_t esp)
79 {
80 	return (__hypercall2(__HYPERVISOR_stack_switch, ss, esp));
81 }
82 
83 #if defined(__amd64)
84 
85 long
HYPERVISOR_set_callbacks(ulong_t event_address,ulong_t failsafe_address,ulong_t syscall_address)86 HYPERVISOR_set_callbacks(ulong_t event_address, ulong_t failsafe_address,
87     ulong_t syscall_address)
88 {
89 	return (__hypercall3(__HYPERVISOR_set_callbacks,
90 	    event_address, failsafe_address, syscall_address));
91 }
92 
93 #elif defined(__i386)
94 
95 long
HYPERVISOR_set_callbacks(ulong_t event_selector,ulong_t event_address,ulong_t failsafe_selector,ulong_t failsafe_address)96 HYPERVISOR_set_callbacks(
97     ulong_t event_selector, ulong_t event_address,
98     ulong_t failsafe_selector, ulong_t failsafe_address)
99 {
100 	return (__hypercall4(__HYPERVISOR_set_callbacks,
101 	    event_selector, event_address,
102 	    failsafe_selector, failsafe_address));
103 }
104 
105 #endif	/* __amd64 */
106 
107 long
HYPERVISOR_fpu_taskswitch(int set)108 HYPERVISOR_fpu_taskswitch(int set)
109 {
110 	return (__hypercall1(__HYPERVISOR_fpu_taskswitch, (long)set));
111 }
112 
113 /* *** __HYPERVISOR_sched_op_compat *** OBSOLETED */
114 
115 long
HYPERVISOR_platform_op(xen_platform_op_t * platform_op)116 HYPERVISOR_platform_op(xen_platform_op_t *platform_op)
117 {
118 	return (__hypercall1(__HYPERVISOR_platform_op, (ulong_t)platform_op));
119 }
120 
121 /* *** __HYPERVISOR_set_debugreg *** NOT IMPLEMENTED */
122 
123 /* *** __HYPERVISOR_get_debugreg *** NOT IMPLEMENTED */
124 
125 long
HYPERVISOR_update_descriptor(maddr_t ma,uint64_t desc)126 HYPERVISOR_update_descriptor(maddr_t ma, uint64_t desc)
127 {
128 #if defined(__amd64)
129 
130 	return (__hypercall2(__HYPERVISOR_update_descriptor, ma, desc));
131 
132 #elif defined(__i386)
133 
134 	return (__hypercall4(__HYPERVISOR_update_descriptor,
135 	    (ulong_t)ma, (ulong_t)(ma >>32),
136 	    (ulong_t)desc, (ulong_t)(desc >> 32)));
137 
138 #endif
139 }
140 
141 long
HYPERVISOR_memory_op(int cmd,void * arg)142 HYPERVISOR_memory_op(int cmd, void *arg)
143 {
144 	return (__hypercall2(__HYPERVISOR_memory_op, (long)cmd,
145 	    (ulong_t)arg));
146 }
147 
148 long
HYPERVISOR_multicall(void * call_list,uint_t nr_calls)149 HYPERVISOR_multicall(void *call_list, uint_t nr_calls)
150 {
151 	return (__hypercall2(__HYPERVISOR_multicall,
152 	    (ulong_t)call_list, (ulong_t)nr_calls));
153 }
154 
155 int
HYPERVISOR_update_va_mapping(ulong_t va,uint64_t new_pte,ulong_t flags)156 HYPERVISOR_update_va_mapping(ulong_t va, uint64_t new_pte, ulong_t flags)
157 {
158 #if !defined(_BOOT)
159 	if (IN_XPV_PANIC())
160 		return (0);
161 #endif
162 #if defined(__amd64)
163 
164 	return (__hypercall3_int(__HYPERVISOR_update_va_mapping, va,
165 	    new_pte, flags));
166 
167 #elif defined(__i386)
168 
169 	return (__hypercall4_int(__HYPERVISOR_update_va_mapping, va,
170 	    (ulong_t)new_pte, (ulong_t)(new_pte >> 32), flags));
171 
172 #endif	/* __i386 */
173 }
174 
175 /*
176  * Note: this timeout must be the Xen system time not hrtime (see
177  * xpv_timestamp.c).
178  */
179 long
HYPERVISOR_set_timer_op(uint64_t timeout)180 HYPERVISOR_set_timer_op(uint64_t timeout)
181 {
182 #if defined(__amd64)
183 
184 	return (__hypercall1(__HYPERVISOR_set_timer_op, timeout));
185 
186 #elif defined(__i386)
187 
188 	uint32_t timeout_hi = (uint32_t)(timeout >> 32);
189 	uint32_t timeout_lo = (uint32_t)timeout;
190 	return (__hypercall2(__HYPERVISOR_set_timer_op,
191 	    (ulong_t)timeout_lo, (ulong_t)timeout_hi));
192 
193 #endif	/* __i386 */
194 }
195 
196 /* *** __HYPERVISOR_event_channel_op_compat *** OBSOLETED */
197 
198 long
HYPERVISOR_xen_version(int cmd,void * arg)199 HYPERVISOR_xen_version(int cmd, void *arg)
200 {
201 	return (__hypercall2(__HYPERVISOR_xen_version, (long)cmd,
202 	    (ulong_t)arg));
203 }
204 
205 long
HYPERVISOR_console_io(int cmd,int count,char * str)206 HYPERVISOR_console_io(int cmd, int count, char *str)
207 {
208 	return (__hypercall3(__HYPERVISOR_console_io, (long)cmd, (long)count,
209 	    (ulong_t)str));
210 }
211 
212 /* *** __HYPERVISOR_physdev_op_compat *** OBSOLETED */
213 
214 /*
215  * ****
216  * NOTE: this hypercall should not be called directly for a
217  * GNTTABOP_map_grant_ref. Instead xen_map_gref() should be called.
218  * ****
219  */
220 long
HYPERVISOR_grant_table_op(uint_t cmd,void * uop,uint_t count)221 HYPERVISOR_grant_table_op(uint_t cmd, void *uop, uint_t count)
222 {
223 	int ret_val;
224 	ret_val = __hypercall3(__HYPERVISOR_grant_table_op,
225 	    (long)cmd, (ulong_t)uop, (ulong_t)count);
226 	return (ret_val);
227 }
228 
229 long
HYPERVISOR_vm_assist(uint_t cmd,uint_t type)230 HYPERVISOR_vm_assist(uint_t cmd, uint_t type)
231 {
232 	return (__hypercall2(__HYPERVISOR_vm_assist,
233 	    (ulong_t)cmd, (ulong_t)type));
234 }
235 
236 int
HYPERVISOR_update_va_mapping_otherdomain(ulong_t va,uint64_t new_pte,ulong_t flags,domid_t domain_id)237 HYPERVISOR_update_va_mapping_otherdomain(ulong_t va,
238     uint64_t new_pte, ulong_t flags, domid_t domain_id)
239 {
240 #if defined(__amd64)
241 
242 	return (__hypercall4_int(__HYPERVISOR_update_va_mapping_otherdomain,
243 	    va, new_pte, flags, (ulong_t)domain_id));
244 
245 #elif defined(__i386)
246 
247 	return (__hypercall5_int(__HYPERVISOR_update_va_mapping_otherdomain,
248 	    va, (ulong_t)new_pte, (ulong_t)(new_pte >> 32), flags,
249 	    (ulong_t)domain_id));
250 
251 #endif	/* __i386 */
252 }
253 
254 /*
255  * *** __HYPERVISOR_iret ***
256  *   see HYPERVISOR_IRET() macro in i86xpv/sys/machprivregs.h
257  */
258 
259 long
HYPERVISOR_vcpu_op(int cmd,int vcpuid,void * extra_args)260 HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args)
261 {
262 	return (__hypercall3(__HYPERVISOR_vcpu_op, (long)cmd, (long)vcpuid,
263 	    (ulong_t)extra_args));
264 }
265 
266 #if defined(__amd64)
267 
268 long
HYPERVISOR_set_segment_base(int reg,ulong_t value)269 HYPERVISOR_set_segment_base(int reg, ulong_t value)
270 {
271 	return (__hypercall2(__HYPERVISOR_set_segment_base, (long)reg, value));
272 }
273 
274 #endif	/* __amd64 */
275 
276 int
HYPERVISOR_mmuext_op(struct mmuext_op * req,int count,uint_t * success_count,domid_t domain_id)277 HYPERVISOR_mmuext_op(struct mmuext_op *req, int count, uint_t *success_count,
278     domid_t domain_id)
279 {
280 	return (__hypercall4_int(__HYPERVISOR_mmuext_op,
281 	    (ulong_t)req, (long)count, (ulong_t)success_count,
282 	    (ulong_t)domain_id));
283 }
284 
285 long
HYPERVISOR_nmi_op(int cmd,void * arg)286 HYPERVISOR_nmi_op(int cmd, void *arg)
287 {
288 	return (__hypercall2(__HYPERVISOR_nmi_op, (long)cmd, (ulong_t)arg));
289 }
290 
291 long
HYPERVISOR_sched_op(int cmd,void * arg)292 HYPERVISOR_sched_op(int cmd, void *arg)
293 {
294 	return (__hypercall2(__HYPERVISOR_sched_op,
295 	    (ulong_t)cmd, (ulong_t)arg));
296 }
297 
298 long
HYPERVISOR_callback_op(int cmd,void * arg)299 HYPERVISOR_callback_op(int cmd, void *arg)
300 {
301 	return (__hypercall2(__HYPERVISOR_callback_op,
302 	    (ulong_t)cmd, (ulong_t)arg));
303 }
304 
305 /* *** __HYPERVISOR_xenoprof_op *** NOT IMPLEMENTED */
306 
307 long
HYPERVISOR_event_channel_op(int cmd,void * arg)308 HYPERVISOR_event_channel_op(int cmd, void *arg)
309 {
310 	return (__hypercall2(__HYPERVISOR_event_channel_op, (long)cmd,
311 	    (ulong_t)arg));
312 }
313 
314 long
HYPERVISOR_physdev_op(int cmd,void * arg)315 HYPERVISOR_physdev_op(int cmd, void *arg)
316 {
317 	return (__hypercall2(__HYPERVISOR_physdev_op, (long)cmd,
318 	    (ulong_t)arg));
319 }
320 
321 long
HYPERVISOR_hvm_op(int cmd,void * arg)322 HYPERVISOR_hvm_op(int cmd, void *arg)
323 {
324 	return (__hypercall2(__HYPERVISOR_hvm_op, (long)cmd, (ulong_t)arg));
325 }
326 
327 #if defined(__xpv)
328 long
HYPERVISOR_xsm_op(struct xen_acmctl * arg)329 HYPERVISOR_xsm_op(struct xen_acmctl *arg)
330 {
331 	return (__hypercall1(__HYPERVISOR_xsm_op, (ulong_t)arg));
332 }
333 
334 long
HYPERVISOR_sysctl(xen_sysctl_t * sysctl)335 HYPERVISOR_sysctl(xen_sysctl_t *sysctl)
336 {
337 	return (__hypercall1(__HYPERVISOR_sysctl, (ulong_t)sysctl));
338 }
339 
340 long
HYPERVISOR_domctl(xen_domctl_t * domctl)341 HYPERVISOR_domctl(xen_domctl_t *domctl)
342 {
343 	return (__hypercall1(__HYPERVISOR_domctl, (ulong_t)domctl));
344 }
345 #endif /* __xpv */
346 
347 /* *** __HYPERVISOR_kexec_op *** NOT IMPLEMENTED */
348 
349 /*
350  *
351  * HYPERCALL HELPER ROUTINES
352  *    These don't have there own unique hypercalls.
353  *
354  */
355 
356 long
HYPERVISOR_yield(void)357 HYPERVISOR_yield(void)
358 {
359 	return (HYPERVISOR_sched_op(SCHEDOP_yield, NULL));
360 }
361 
362 long
HYPERVISOR_block(void)363 HYPERVISOR_block(void)
364 {
365 	return (HYPERVISOR_sched_op(SCHEDOP_block, NULL));
366 }
367 
368 long
HYPERVISOR_shutdown(uint_t reason)369 HYPERVISOR_shutdown(uint_t reason)
370 {
371 	struct sched_shutdown sched_shutdown;
372 
373 	sched_shutdown.reason = reason;
374 
375 	return (HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown));
376 }
377 
378 /*
379  * Poll one or more event-channel ports, and return when pending.
380  * An optional timeout (in nanoseconds, absolute time since boot) may be
381  * specified. Note: this timeout must be the Xen system time not hrtime (see
382  * xpv_timestamp.c).
383  */
384 long
HYPERVISOR_poll(evtchn_port_t * ports,uint_t nr_ports,uint64_t timeout)385 HYPERVISOR_poll(evtchn_port_t *ports, uint_t nr_ports, uint64_t timeout)
386 {
387 	struct sched_poll sched_poll;
388 
389 	/*LINTED: constant in conditional context*/
390 	set_xen_guest_handle(sched_poll.ports, ports);
391 	sched_poll.nr_ports = nr_ports;
392 	sched_poll.timeout = timeout;
393 
394 	return (HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll));
395 }
396 
397 long
HYPERVISOR_suspend(ulong_t start_info_mfn)398 HYPERVISOR_suspend(ulong_t start_info_mfn)
399 {
400 	struct sched_shutdown sched_shutdown;
401 
402 	sched_shutdown.reason = SHUTDOWN_suspend;
403 
404 	return (__hypercall3(__HYPERVISOR_sched_op, SCHEDOP_shutdown,
405 	    (ulong_t)&sched_shutdown, start_info_mfn));
406 }
407 
408 long
HYPERVISOR_mca(uint32_t cmd,xen_mc_t * xmcp)409 HYPERVISOR_mca(uint32_t cmd, xen_mc_t *xmcp)
410 {
411 	long rv;
412 
413 	switch (cmd) {
414 	case XEN_MC_fetch:
415 	case XEN_MC_physcpuinfo:
416 	case XEN_MC_msrinject:
417 	case XEN_MC_mceinject:
418 		break;
419 
420 	case XEN_MC_notifydomain:
421 		return (ENOTSUP);
422 
423 	default:
424 		return (EINVAL);
425 	}
426 
427 	xmcp->interface_version = XEN_MCA_INTERFACE_VERSION;
428 	xmcp->cmd = cmd;
429 
430 	rv = __hypercall1(__HYPERVISOR_mca, (ulong_t)xmcp);
431 
432 	return (rv);
433 }
434