1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 *
29 * xenbus_comms.c
30 *
31 * Low level code to talks to Xen Store: ringbuffer and event channel.
32 *
33 * Copyright (C) 2005 Rusty Russell, IBM Corporation
34 *
35 * This file may be distributed separately from the Linux kernel, or
36 * incorporated into other software packages, subject to the following license:
37 *
38 * Permission is hereby granted, free of charge, to any person obtaining a copy
39 * of this source file (the "Software"), to deal in the Software without
40 * restriction, including without limitation the rights to use, copy, modify,
41 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
42 * and to permit persons to whom the Software is furnished to do so, subject to
43 * the following conditions:
44 *
45 * The above copyright notice and this permission notice shall be included in
46 * all copies or substantial portions of the Software.
47 *
48 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
49 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
50 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
51 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
52 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
53 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
54 * IN THE SOFTWARE.
55 */
56
57 #include <sys/types.h>
58 #include <vm/hat.h>
59 #include <vm/as.h>
60 #include <sys/bootconf.h>
61 #include <vm/seg_kmem.h>
62 #ifdef XPV_HVM_DRIVER
63 #include <sys/pc_mmu.h>
64 #include <sys/xpv_support.h>
65 #include <sys/hypervisor.h>
66 #else
67 #include <vm/kboot_mmu.h>
68 #include <sys/bootinfo.h>
69 #include <sys/hypervisor.h>
70 #include <sys/evtchn_impl.h>
71 #endif
72 #include <sys/condvar.h>
73 #include <sys/mutex.h>
74 #include <sys/atomic.h>
75 #include <sys/mman.h>
76 #include <sys/errno.h>
77 #include <sys/cmn_err.h>
78 #include <sys/avintr.h>
79 #include <xen/sys/xenbus_comms.h>
80 #include <xen/public/io/xs_wire.h>
81
82 #ifndef XPV_HVM_DRIVER
83 static int xenbus_irq;
84 #endif
85 static ddi_umem_cookie_t xb_cookie; /* cookie for xenbus comm page */
86 extern caddr_t xb_addr; /* va of xenbus comm page */
87
88 static kcondvar_t xb_wait_cv;
89 static kmutex_t xb_wait_lock;
90
91 #define xs_domain_interface(ra) ((struct xenstore_domain_interface *)(ra))
92
93 static uint_t
xenbus_intr(caddr_t arg __unused,caddr_t arg1 __unused)94 xenbus_intr(caddr_t arg __unused, caddr_t arg1 __unused)
95 {
96 mutex_enter(&xb_wait_lock);
97 cv_broadcast(&xb_wait_cv);
98 mutex_exit(&xb_wait_lock);
99 return (DDI_INTR_CLAIMED);
100 }
101
102 static int
check_indexes(XENSTORE_RING_IDX cons,XENSTORE_RING_IDX prod)103 check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod)
104 {
105 return ((prod - cons) <= XENSTORE_RING_SIZE);
106 }
107
108 static void *
get_output_chunk(XENSTORE_RING_IDX cons,XENSTORE_RING_IDX prod,char * buf,uint32_t * len)109 get_output_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod,
110 char *buf, uint32_t *len)
111 {
112 *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod);
113 if ((XENSTORE_RING_SIZE - (prod - cons)) < *len)
114 *len = XENSTORE_RING_SIZE - (prod - cons);
115 return ((void *)(buf + MASK_XENSTORE_IDX(prod)));
116 }
117
118 static const void *
get_input_chunk(XENSTORE_RING_IDX cons,XENSTORE_RING_IDX prod,const char * buf,uint32_t * len)119 get_input_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod,
120 const char *buf, uint32_t *len)
121 {
122 *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons);
123 if ((prod - cons) < *len)
124 *len = prod - cons;
125 return ((void *)(buf + MASK_XENSTORE_IDX(cons)));
126 }
127
128
129 int
xb_write(const void * data,unsigned len)130 xb_write(const void *data, unsigned len)
131 {
132 volatile struct xenstore_domain_interface *intf =
133 xs_domain_interface(xb_addr);
134 XENSTORE_RING_IDX cons, prod;
135 extern int do_polled_io;
136
137 while (len != 0) {
138 void *dst;
139 unsigned int avail;
140
141 mutex_enter(&xb_wait_lock);
142 while ((intf->req_prod - intf->req_cons) ==
143 XENSTORE_RING_SIZE) {
144 if (interrupts_unleashed && !do_polled_io) {
145 if (cv_wait_sig(&xb_wait_cv,
146 &xb_wait_lock) == 0) {
147 mutex_exit(&xb_wait_lock);
148 return (EINTR);
149 }
150 } else { /* polled mode needed for early probes */
151 (void) HYPERVISOR_yield();
152 }
153 }
154 mutex_exit(&xb_wait_lock);
155 /* Read indexes, then verify. */
156 cons = intf->req_cons;
157 prod = intf->req_prod;
158 membar_enter();
159 if (!check_indexes(cons, prod))
160 return (EIO);
161
162 dst = get_output_chunk(cons, prod, (char *)intf->req, &avail);
163 if (avail == 0)
164 continue;
165 if (avail > len)
166 avail = len;
167
168 (void) memcpy(dst, data, avail);
169 data = (void *)((uintptr_t)data + avail);
170 len -= avail;
171
172 /* Other side must not see new header until data is there. */
173 membar_producer();
174 intf->req_prod += avail;
175
176 /* This implies mb() before other side sees interrupt. */
177 ec_notify_via_evtchn(xen_info->store_evtchn);
178 }
179
180 return (0);
181 }
182
183 int
xb_read(void * data,unsigned len)184 xb_read(void *data, unsigned len)
185 {
186 volatile struct xenstore_domain_interface *intf =
187 xs_domain_interface(xb_addr);
188 XENSTORE_RING_IDX cons, prod;
189 extern int do_polled_io;
190
191 while (len != 0) {
192 unsigned int avail;
193 const char *src;
194
195 mutex_enter(&xb_wait_lock);
196 while (intf->rsp_cons == intf->rsp_prod) {
197 if (interrupts_unleashed && !do_polled_io) {
198 if (cv_wait_sig(&xb_wait_cv,
199 &xb_wait_lock) == 0) {
200 mutex_exit(&xb_wait_lock);
201 return (EINTR);
202 }
203 } else { /* polled mode needed for early probes */
204 (void) HYPERVISOR_yield();
205 }
206 }
207 mutex_exit(&xb_wait_lock);
208 /* Read indexes, then verify. */
209 cons = intf->rsp_cons;
210 prod = intf->rsp_prod;
211 membar_enter();
212 if (!check_indexes(cons, prod))
213 return (EIO);
214
215 src = get_input_chunk(cons, prod, (char *)intf->rsp, &avail);
216 if (avail == 0)
217 continue;
218 if (avail > len)
219 avail = len;
220
221 /* We must read header before we read data. */
222 membar_consumer();
223
224 (void) memcpy(data, src, avail);
225 data = (void *)((uintptr_t)data + avail);
226 len -= avail;
227
228 /* Other side must not see free space until we've copied out */
229 membar_enter();
230 intf->rsp_cons += avail;
231
232 /* Implies mb(): they will see new header. */
233 ec_notify_via_evtchn(xen_info->store_evtchn);
234 }
235
236 return (0);
237 }
238
239 void
xb_suspend(void)240 xb_suspend(void)
241 {
242 #ifdef XPV_HVM_DRIVER
243 ec_unbind_evtchn(xen_info->store_evtchn);
244 #else
245 rem_avintr(NULL, IPL_XENBUS, xenbus_intr, xenbus_irq);
246 #endif
247 }
248
249 void
xb_setup_intr(void)250 xb_setup_intr(void)
251 {
252 #ifdef XPV_HVM_DRIVER
253 ec_bind_evtchn_to_handler(xen_info->store_evtchn, IPL_XENBUS,
254 xenbus_intr, NULL);
255 #else
256 xenbus_irq = ec_bind_evtchn_to_irq(xen_info->store_evtchn);
257 if (xenbus_irq < 0) {
258 cmn_err(CE_WARN, "Couldn't bind xenbus event channel");
259 return;
260 }
261 if (!add_avintr(NULL, IPL_XENBUS, xenbus_intr, "xenbus",
262 xenbus_irq, NULL, NULL, NULL, NULL))
263 cmn_err(CE_WARN, "XENBUS add intr failed\n");
264 #endif
265 }
266
267 /*
268 * Set up our xenstore page and event channel. Domain 0 needs to allocate a
269 * page and event channel; other domains use what we are told.
270 */
271 void
xb_init(void)272 xb_init(void)
273 {
274 int err;
275
276 if (DOMAIN_IS_INITDOMAIN(xen_info)) {
277
278 if (xb_addr != NULL)
279 return;
280
281 xb_addr = ddi_umem_alloc(PAGESIZE, DDI_UMEM_SLEEP,
282 &xb_cookie);
283 xen_info->store_mfn = pfn_to_mfn(hat_getpfnum(kas.a_hat,
284 xb_addr));
285
286 err = xen_alloc_unbound_evtchn(0,
287 (int *)&xen_info->store_evtchn);
288 ASSERT(err == 0);
289 } else {
290 /*
291 * This is harmless on first boot, but needed for resume and
292 * migrate. We use kbm_map_ma() as a shortcut instead of
293 * directly using HYPERVISOR_update_va_mapping().
294 */
295 ASSERT(xb_addr != NULL);
296 kbm_map_ma(mfn_to_ma(xen_info->store_mfn),
297 (uintptr_t)xb_addr, 0);
298 }
299
300 ASSERT(xen_info->store_evtchn);
301 }
302
303 void *
xb_xenstore_cookie(void)304 xb_xenstore_cookie(void)
305 {
306 ASSERT(DOMAIN_IS_INITDOMAIN(xen_info));
307 return (xb_cookie);
308 }
309