1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 *
29 * xenbus_comms.c
30 *
31 * Low level code to talks to Xen Store: ringbuffer and event channel.
32 *
33 * Copyright (C) 2005 Rusty Russell, IBM Corporation
34 *
35 * This file may be distributed separately from the Linux kernel, or
36 * incorporated into other software packages, subject to the following license:
37 *
38 * Permission is hereby granted, free of charge, to any person obtaining a copy
39 * of this source file (the "Software"), to deal in the Software without
40 * restriction, including without limitation the rights to use, copy, modify,
41 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
42 * and to permit persons to whom the Software is furnished to do so, subject to
43 * the following conditions:
44 *
45 * The above copyright notice and this permission notice shall be included in
46 * all copies or substantial portions of the Software.
47 *
48 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
49 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
50 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
51 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
52 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
53 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
54 * IN THE SOFTWARE.
55 */
56
57 #pragma ident "%Z%%M% %I% %E% SMI"
58
59 #include <sys/types.h>
60 #include <vm/hat.h>
61 #include <vm/as.h>
62 #include <sys/bootconf.h>
63 #include <vm/seg_kmem.h>
64 #ifdef XPV_HVM_DRIVER
65 #include <sys/pc_mmu.h>
66 #include <sys/xpv_support.h>
67 #include <sys/hypervisor.h>
68 #else
69 #include <vm/kboot_mmu.h>
70 #include <sys/bootinfo.h>
71 #include <sys/hypervisor.h>
72 #include <sys/evtchn_impl.h>
73 #endif
74 #include <sys/condvar.h>
75 #include <sys/mutex.h>
76 #include <sys/atomic.h>
77 #include <sys/mman.h>
78 #include <sys/errno.h>
79 #include <sys/cmn_err.h>
80 #include <sys/avintr.h>
81 #include <xen/sys/xenbus_comms.h>
82 #include <xen/public/io/xs_wire.h>
83
84 #ifndef XPV_HVM_DRIVER
85 static int xenbus_irq;
86 #endif
87 static ddi_umem_cookie_t xb_cookie; /* cookie for xenbus comm page */
88 extern caddr_t xb_addr; /* va of xenbus comm page */
89
90 static kcondvar_t xb_wait_cv;
91 static kmutex_t xb_wait_lock;
92
93 #define xs_domain_interface(ra) ((struct xenstore_domain_interface *)(ra))
94
95 /*ARGSUSED*/
96 static uint_t
xenbus_intr(void * unused)97 xenbus_intr(void *unused)
98 {
99 mutex_enter(&xb_wait_lock);
100 cv_broadcast(&xb_wait_cv);
101 mutex_exit(&xb_wait_lock);
102 return (DDI_INTR_CLAIMED);
103 }
104
105 static int
check_indexes(XENSTORE_RING_IDX cons,XENSTORE_RING_IDX prod)106 check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod)
107 {
108 return ((prod - cons) <= XENSTORE_RING_SIZE);
109 }
110
111 static void *
get_output_chunk(XENSTORE_RING_IDX cons,XENSTORE_RING_IDX prod,char * buf,uint32_t * len)112 get_output_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod,
113 char *buf, uint32_t *len)
114 {
115 *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod);
116 if ((XENSTORE_RING_SIZE - (prod - cons)) < *len)
117 *len = XENSTORE_RING_SIZE - (prod - cons);
118 return ((void *)(buf + MASK_XENSTORE_IDX(prod)));
119 }
120
121 static const void *
get_input_chunk(XENSTORE_RING_IDX cons,XENSTORE_RING_IDX prod,const char * buf,uint32_t * len)122 get_input_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod,
123 const char *buf, uint32_t *len)
124 {
125 *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons);
126 if ((prod - cons) < *len)
127 *len = prod - cons;
128 return ((void *)(buf + MASK_XENSTORE_IDX(cons)));
129 }
130
131
132 int
xb_write(const void * data,unsigned len)133 xb_write(const void *data, unsigned len)
134 {
135 volatile struct xenstore_domain_interface *intf =
136 xs_domain_interface(xb_addr);
137 XENSTORE_RING_IDX cons, prod;
138 extern int do_polled_io;
139
140 while (len != 0) {
141 void *dst;
142 unsigned int avail;
143
144 mutex_enter(&xb_wait_lock);
145 while ((intf->req_prod - intf->req_cons) ==
146 XENSTORE_RING_SIZE) {
147 if (interrupts_unleashed && !do_polled_io) {
148 if (cv_wait_sig(&xb_wait_cv,
149 &xb_wait_lock) == 0) {
150 mutex_exit(&xb_wait_lock);
151 return (EINTR);
152 }
153 } else { /* polled mode needed for early probes */
154 (void) HYPERVISOR_yield();
155 }
156 }
157 mutex_exit(&xb_wait_lock);
158 /* Read indexes, then verify. */
159 cons = intf->req_cons;
160 prod = intf->req_prod;
161 membar_enter();
162 if (!check_indexes(cons, prod))
163 return (EIO);
164
165 dst = get_output_chunk(cons, prod, (char *)intf->req, &avail);
166 if (avail == 0)
167 continue;
168 if (avail > len)
169 avail = len;
170
171 (void) memcpy(dst, data, avail);
172 data = (void *)((uintptr_t)data + avail);
173 len -= avail;
174
175 /* Other side must not see new header until data is there. */
176 membar_producer();
177 intf->req_prod += avail;
178
179 /* This implies mb() before other side sees interrupt. */
180 ec_notify_via_evtchn(xen_info->store_evtchn);
181 }
182
183 return (0);
184 }
185
186 int
xb_read(void * data,unsigned len)187 xb_read(void *data, unsigned len)
188 {
189 volatile struct xenstore_domain_interface *intf =
190 xs_domain_interface(xb_addr);
191 XENSTORE_RING_IDX cons, prod;
192 extern int do_polled_io;
193
194 while (len != 0) {
195 unsigned int avail;
196 const char *src;
197
198 mutex_enter(&xb_wait_lock);
199 while (intf->rsp_cons == intf->rsp_prod) {
200 if (interrupts_unleashed && !do_polled_io) {
201 if (cv_wait_sig(&xb_wait_cv,
202 &xb_wait_lock) == 0) {
203 mutex_exit(&xb_wait_lock);
204 return (EINTR);
205 }
206 } else { /* polled mode needed for early probes */
207 (void) HYPERVISOR_yield();
208 }
209 }
210 mutex_exit(&xb_wait_lock);
211 /* Read indexes, then verify. */
212 cons = intf->rsp_cons;
213 prod = intf->rsp_prod;
214 membar_enter();
215 if (!check_indexes(cons, prod))
216 return (EIO);
217
218 src = get_input_chunk(cons, prod, (char *)intf->rsp, &avail);
219 if (avail == 0)
220 continue;
221 if (avail > len)
222 avail = len;
223
224 /* We must read header before we read data. */
225 membar_consumer();
226
227 (void) memcpy(data, src, avail);
228 data = (void *)((uintptr_t)data + avail);
229 len -= avail;
230
231 /* Other side must not see free space until we've copied out */
232 membar_enter();
233 intf->rsp_cons += avail;
234
235 /* Implies mb(): they will see new header. */
236 ec_notify_via_evtchn(xen_info->store_evtchn);
237 }
238
239 return (0);
240 }
241
242 void
xb_suspend(void)243 xb_suspend(void)
244 {
245 #ifdef XPV_HVM_DRIVER
246 ec_unbind_evtchn(xen_info->store_evtchn);
247 #else
248 rem_avintr(NULL, IPL_XENBUS, (avfunc)xenbus_intr, xenbus_irq);
249 #endif
250 }
251
252 void
xb_setup_intr(void)253 xb_setup_intr(void)
254 {
255 #ifdef XPV_HVM_DRIVER
256 ec_bind_evtchn_to_handler(xen_info->store_evtchn, IPL_XENBUS,
257 xenbus_intr, NULL);
258 #else
259 xenbus_irq = ec_bind_evtchn_to_irq(xen_info->store_evtchn);
260 if (xenbus_irq < 0) {
261 cmn_err(CE_WARN, "Couldn't bind xenbus event channel");
262 return;
263 }
264 if (!add_avintr(NULL, IPL_XENBUS, (avfunc)xenbus_intr, "xenbus",
265 xenbus_irq, NULL, NULL, NULL, NULL))
266 cmn_err(CE_WARN, "XENBUS add intr failed\n");
267 #endif
268 }
269
270 /*
271 * Set up our xenstore page and event channel. Domain 0 needs to allocate a
272 * page and event channel; other domains use what we are told.
273 */
274 void
xb_init(void)275 xb_init(void)
276 {
277 int err;
278
279 if (DOMAIN_IS_INITDOMAIN(xen_info)) {
280
281 if (xb_addr != NULL)
282 return;
283
284 xb_addr = ddi_umem_alloc(PAGESIZE, DDI_UMEM_SLEEP,
285 &xb_cookie);
286 xen_info->store_mfn = pfn_to_mfn(hat_getpfnum(kas.a_hat,
287 xb_addr));
288
289 err = xen_alloc_unbound_evtchn(0,
290 (int *)&xen_info->store_evtchn);
291 ASSERT(err == 0);
292 } else {
293 /*
294 * This is harmless on first boot, but needed for resume and
295 * migrate. We use kbm_map_ma() as a shortcut instead of
296 * directly using HYPERVISOR_update_va_mapping().
297 */
298 ASSERT(xb_addr != NULL);
299 kbm_map_ma(mfn_to_ma(xen_info->store_mfn),
300 (uintptr_t)xb_addr, 0);
301 }
302
303 ASSERT(xen_info->store_evtchn);
304 }
305
306 void *
xb_xenstore_cookie(void)307 xb_xenstore_cookie(void)
308 {
309 ASSERT(DOMAIN_IS_INITDOMAIN(xen_info));
310 return (xb_cookie);
311 }
312