xref: /linux/drivers/xen/xenbus/xenbus_client.c (revision f01387d2693813eb5271a3448e6a082322c7d75d)
1 /******************************************************************************
2  * Client-facing interface for the Xenbus driver.  In other words, the
3  * interface between the Xenbus and the device-specific code, be it the
4  * frontend or the backend of that driver.
5  *
6  * Copyright (C) 2005 XenSource Ltd
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version 2
10  * as published by the Free Software Foundation; or, when distributed
11  * separately from the Linux kernel or incorporated into other
12  * software packages, subject to the following license:
13  *
14  * Permission is hereby granted, free of charge, to any person obtaining a copy
15  * of this source file (the "Software"), to deal in the Software without
16  * restriction, including without limitation the rights to use, copy, modify,
17  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18  * and to permit persons to whom the Software is furnished to do so, subject to
19  * the following conditions:
20  *
21  * The above copyright notice and this permission notice shall be included in
22  * all copies or substantial portions of the Software.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30  * IN THE SOFTWARE.
31  */
32 
33 #include <linux/mm.h>
34 #include <linux/slab.h>
35 #include <linux/types.h>
36 #include <linux/spinlock.h>
37 #include <linux/vmalloc.h>
38 #include <linux/export.h>
39 #include <asm/xen/hypervisor.h>
40 #include <asm/xen/page.h>
41 #include <xen/interface/xen.h>
42 #include <xen/interface/event_channel.h>
43 #include <xen/balloon.h>
44 #include <xen/events.h>
45 #include <xen/grant_table.h>
46 #include <xen/xenbus.h>
47 #include <xen/xen.h>
48 #include <xen/features.h>
49 
50 #include "xenbus_probe.h"
51 
52 struct xenbus_map_node {
53 	struct list_head next;
54 	union {
55 		struct vm_struct *area; /* PV */
56 		struct page *page;     /* HVM */
57 	};
58 	grant_handle_t handle;
59 };
60 
61 static DEFINE_SPINLOCK(xenbus_valloc_lock);
62 static LIST_HEAD(xenbus_valloc_pages);
63 
64 struct xenbus_ring_ops {
65 	int (*map)(struct xenbus_device *dev, int gnt, void **vaddr);
66 	int (*unmap)(struct xenbus_device *dev, void *vaddr);
67 };
68 
69 static const struct xenbus_ring_ops *ring_ops __read_mostly;
70 
71 const char *xenbus_strstate(enum xenbus_state state)
72 {
73 	static const char *const name[] = {
74 		[ XenbusStateUnknown      ] = "Unknown",
75 		[ XenbusStateInitialising ] = "Initialising",
76 		[ XenbusStateInitWait     ] = "InitWait",
77 		[ XenbusStateInitialised  ] = "Initialised",
78 		[ XenbusStateConnected    ] = "Connected",
79 		[ XenbusStateClosing      ] = "Closing",
80 		[ XenbusStateClosed	  ] = "Closed",
81 		[XenbusStateReconfiguring] = "Reconfiguring",
82 		[XenbusStateReconfigured] = "Reconfigured",
83 	};
84 	return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
85 }
86 EXPORT_SYMBOL_GPL(xenbus_strstate);
87 
88 /**
89  * xenbus_watch_path - register a watch
90  * @dev: xenbus device
91  * @path: path to watch
92  * @watch: watch to register
93  * @callback: callback to register
94  *
95  * Register a @watch on the given path, using the given xenbus_watch structure
96  * for storage, and the given @callback function as the callback.  Return 0 on
97  * success, or -errno on error.  On success, the given @path will be saved as
98  * @watch->node, and remains the caller's to free.  On error, @watch->node will
99  * be NULL, the device will switch to %XenbusStateClosing, and the error will
100  * be saved in the store.
101  */
102 int xenbus_watch_path(struct xenbus_device *dev, const char *path,
103 		      struct xenbus_watch *watch,
104 		      void (*callback)(struct xenbus_watch *,
105 				       const char **, unsigned int))
106 {
107 	int err;
108 
109 	watch->node = path;
110 	watch->callback = callback;
111 
112 	err = register_xenbus_watch(watch);
113 
114 	if (err) {
115 		watch->node = NULL;
116 		watch->callback = NULL;
117 		xenbus_dev_fatal(dev, err, "adding watch on %s", path);
118 	}
119 
120 	return err;
121 }
122 EXPORT_SYMBOL_GPL(xenbus_watch_path);
123 
124 
125 /**
126  * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
127  * @dev: xenbus device
128  * @watch: watch to register
129  * @callback: callback to register
130  * @pathfmt: format of path to watch
131  *
132  * Register a watch on the given @path, using the given xenbus_watch
133  * structure for storage, and the given @callback function as the callback.
134  * Return 0 on success, or -errno on error.  On success, the watched path
135  * (@path/@path2) will be saved as @watch->node, and becomes the caller's to
136  * kfree().  On error, watch->node will be NULL, so the caller has nothing to
137  * free, the device will switch to %XenbusStateClosing, and the error will be
138  * saved in the store.
139  */
140 int xenbus_watch_pathfmt(struct xenbus_device *dev,
141 			 struct xenbus_watch *watch,
142 			 void (*callback)(struct xenbus_watch *,
143 					const char **, unsigned int),
144 			 const char *pathfmt, ...)
145 {
146 	int err;
147 	va_list ap;
148 	char *path;
149 
150 	va_start(ap, pathfmt);
151 	path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
152 	va_end(ap);
153 
154 	if (!path) {
155 		xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
156 		return -ENOMEM;
157 	}
158 	err = xenbus_watch_path(dev, path, watch, callback);
159 
160 	if (err)
161 		kfree(path);
162 	return err;
163 }
164 EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
165 
166 static void xenbus_switch_fatal(struct xenbus_device *, int, int,
167 				const char *, ...);
168 
169 static int
170 __xenbus_switch_state(struct xenbus_device *dev,
171 		      enum xenbus_state state, int depth)
172 {
173 	/* We check whether the state is currently set to the given value, and
174 	   if not, then the state is set.  We don't want to unconditionally
175 	   write the given state, because we don't want to fire watches
176 	   unnecessarily.  Furthermore, if the node has gone, we don't write
177 	   to it, as the device will be tearing down, and we don't want to
178 	   resurrect that directory.
179 
180 	   Note that, because of this cached value of our state, this
181 	   function will not take a caller's Xenstore transaction
182 	   (something it was trying to in the past) because dev->state
183 	   would not get reset if the transaction was aborted.
184 	 */
185 
186 	struct xenbus_transaction xbt;
187 	int current_state;
188 	int err, abort;
189 
190 	if (state == dev->state)
191 		return 0;
192 
193 again:
194 	abort = 1;
195 
196 	err = xenbus_transaction_start(&xbt);
197 	if (err) {
198 		xenbus_switch_fatal(dev, depth, err, "starting transaction");
199 		return 0;
200 	}
201 
202 	err = xenbus_scanf(xbt, dev->nodename, "state", "%d", &current_state);
203 	if (err != 1)
204 		goto abort;
205 
206 	err = xenbus_printf(xbt, dev->nodename, "state", "%d", state);
207 	if (err) {
208 		xenbus_switch_fatal(dev, depth, err, "writing new state");
209 		goto abort;
210 	}
211 
212 	abort = 0;
213 abort:
214 	err = xenbus_transaction_end(xbt, abort);
215 	if (err) {
216 		if (err == -EAGAIN && !abort)
217 			goto again;
218 		xenbus_switch_fatal(dev, depth, err, "ending transaction");
219 	} else
220 		dev->state = state;
221 
222 	return 0;
223 }
224 
225 /**
226  * xenbus_switch_state
227  * @dev: xenbus device
228  * @state: new state
229  *
230  * Advertise in the store a change of the given driver to the given new_state.
231  * Return 0 on success, or -errno on error.  On error, the device will switch
232  * to XenbusStateClosing, and the error will be saved in the store.
233  */
234 int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
235 {
236 	return __xenbus_switch_state(dev, state, 0);
237 }
238 
239 EXPORT_SYMBOL_GPL(xenbus_switch_state);
240 
241 int xenbus_frontend_closed(struct xenbus_device *dev)
242 {
243 	xenbus_switch_state(dev, XenbusStateClosed);
244 	complete(&dev->down);
245 	return 0;
246 }
247 EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
248 
249 /**
250  * Return the path to the error node for the given device, or NULL on failure.
251  * If the value returned is non-NULL, then it is the caller's to kfree.
252  */
253 static char *error_path(struct xenbus_device *dev)
254 {
255 	return kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
256 }
257 
258 
259 static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
260 				const char *fmt, va_list ap)
261 {
262 	int ret;
263 	unsigned int len;
264 	char *printf_buffer = NULL;
265 	char *path_buffer = NULL;
266 
267 #define PRINTF_BUFFER_SIZE 4096
268 	printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
269 	if (printf_buffer == NULL)
270 		goto fail;
271 
272 	len = sprintf(printf_buffer, "%i ", -err);
273 	ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap);
274 
275 	BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1);
276 
277 	dev_err(&dev->dev, "%s\n", printf_buffer);
278 
279 	path_buffer = error_path(dev);
280 
281 	if (path_buffer == NULL) {
282 		dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
283 		       dev->nodename, printf_buffer);
284 		goto fail;
285 	}
286 
287 	if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) {
288 		dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
289 		       dev->nodename, printf_buffer);
290 		goto fail;
291 	}
292 
293 fail:
294 	kfree(printf_buffer);
295 	kfree(path_buffer);
296 }
297 
298 
299 /**
300  * xenbus_dev_error
301  * @dev: xenbus device
302  * @err: error to report
303  * @fmt: error message format
304  *
305  * Report the given negative errno into the store, along with the given
306  * formatted message.
307  */
308 void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
309 {
310 	va_list ap;
311 
312 	va_start(ap, fmt);
313 	xenbus_va_dev_error(dev, err, fmt, ap);
314 	va_end(ap);
315 }
316 EXPORT_SYMBOL_GPL(xenbus_dev_error);
317 
318 /**
319  * xenbus_dev_fatal
320  * @dev: xenbus device
321  * @err: error to report
322  * @fmt: error message format
323  *
324  * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
325  * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly
326  * closedown of this driver and its peer.
327  */
328 
329 void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
330 {
331 	va_list ap;
332 
333 	va_start(ap, fmt);
334 	xenbus_va_dev_error(dev, err, fmt, ap);
335 	va_end(ap);
336 
337 	xenbus_switch_state(dev, XenbusStateClosing);
338 }
339 EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
340 
341 /**
342  * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps
343  * avoiding recursion within xenbus_switch_state.
344  */
345 static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
346 				const char *fmt, ...)
347 {
348 	va_list ap;
349 
350 	va_start(ap, fmt);
351 	xenbus_va_dev_error(dev, err, fmt, ap);
352 	va_end(ap);
353 
354 	if (!depth)
355 		__xenbus_switch_state(dev, XenbusStateClosing, 1);
356 }
357 
358 /**
359  * xenbus_grant_ring
360  * @dev: xenbus device
361  * @ring_mfn: mfn of ring to grant
362 
363  * Grant access to the given @ring_mfn to the peer of the given device.  Return
364  * 0 on success, or -errno on error.  On error, the device will switch to
365  * XenbusStateClosing, and the error will be saved in the store.
366  */
367 int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
368 {
369 	int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0);
370 	if (err < 0)
371 		xenbus_dev_fatal(dev, err, "granting access to ring page");
372 	return err;
373 }
374 EXPORT_SYMBOL_GPL(xenbus_grant_ring);
375 
376 
377 /**
378  * Allocate an event channel for the given xenbus_device, assigning the newly
379  * created local port to *port.  Return 0 on success, or -errno on error.  On
380  * error, the device will switch to XenbusStateClosing, and the error will be
381  * saved in the store.
382  */
383 int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
384 {
385 	struct evtchn_alloc_unbound alloc_unbound;
386 	int err;
387 
388 	alloc_unbound.dom = DOMID_SELF;
389 	alloc_unbound.remote_dom = dev->otherend_id;
390 
391 	err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
392 					  &alloc_unbound);
393 	if (err)
394 		xenbus_dev_fatal(dev, err, "allocating event channel");
395 	else
396 		*port = alloc_unbound.port;
397 
398 	return err;
399 }
400 EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
401 
402 
403 /**
404  * Free an existing event channel. Returns 0 on success or -errno on error.
405  */
406 int xenbus_free_evtchn(struct xenbus_device *dev, int port)
407 {
408 	struct evtchn_close close;
409 	int err;
410 
411 	close.port = port;
412 
413 	err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
414 	if (err)
415 		xenbus_dev_error(dev, err, "freeing event channel %d", port);
416 
417 	return err;
418 }
419 EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
420 
421 
422 /**
423  * xenbus_map_ring_valloc
424  * @dev: xenbus device
425  * @gnt_ref: grant reference
426  * @vaddr: pointer to address to be filled out by mapping
427  *
428  * Based on Rusty Russell's skeleton driver's map_page.
429  * Map a page of memory into this domain from another domain's grant table.
430  * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
431  * page to that address, and sets *vaddr to that address.
432  * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
433  * or -ENOMEM on error. If an error is returned, device will switch to
434  * XenbusStateClosing and the error message will be saved in XenStore.
435  */
436 int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
437 {
438 	return ring_ops->map(dev, gnt_ref, vaddr);
439 }
440 EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
441 
442 static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
443 				     int gnt_ref, void **vaddr)
444 {
445 	struct gnttab_map_grant_ref op = {
446 		.flags = GNTMAP_host_map | GNTMAP_contains_pte,
447 		.ref   = gnt_ref,
448 		.dom   = dev->otherend_id,
449 	};
450 	struct xenbus_map_node *node;
451 	struct vm_struct *area;
452 	pte_t *pte;
453 
454 	*vaddr = NULL;
455 
456 	node = kzalloc(sizeof(*node), GFP_KERNEL);
457 	if (!node)
458 		return -ENOMEM;
459 
460 	area = alloc_vm_area(PAGE_SIZE, &pte);
461 	if (!area) {
462 		kfree(node);
463 		return -ENOMEM;
464 	}
465 
466 	op.host_addr = arbitrary_virt_to_machine(pte).maddr;
467 
468 	gnttab_batch_map(&op, 1);
469 
470 	if (op.status != GNTST_okay) {
471 		free_vm_area(area);
472 		kfree(node);
473 		xenbus_dev_fatal(dev, op.status,
474 				 "mapping in shared page %d from domain %d",
475 				 gnt_ref, dev->otherend_id);
476 		return op.status;
477 	}
478 
479 	node->handle = op.handle;
480 	node->area = area;
481 
482 	spin_lock(&xenbus_valloc_lock);
483 	list_add(&node->next, &xenbus_valloc_pages);
484 	spin_unlock(&xenbus_valloc_lock);
485 
486 	*vaddr = area->addr;
487 	return 0;
488 }
489 
490 static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
491 				      int gnt_ref, void **vaddr)
492 {
493 	struct xenbus_map_node *node;
494 	int err;
495 	void *addr;
496 
497 	*vaddr = NULL;
498 
499 	node = kzalloc(sizeof(*node), GFP_KERNEL);
500 	if (!node)
501 		return -ENOMEM;
502 
503 	err = alloc_xenballooned_pages(1, &node->page, false /* lowmem */);
504 	if (err)
505 		goto out_err;
506 
507 	addr = pfn_to_kaddr(page_to_pfn(node->page));
508 
509 	err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr);
510 	if (err)
511 		goto out_err_free_ballooned_pages;
512 
513 	spin_lock(&xenbus_valloc_lock);
514 	list_add(&node->next, &xenbus_valloc_pages);
515 	spin_unlock(&xenbus_valloc_lock);
516 
517 	*vaddr = addr;
518 	return 0;
519 
520  out_err_free_ballooned_pages:
521 	free_xenballooned_pages(1, &node->page);
522  out_err:
523 	kfree(node);
524 	return err;
525 }
526 
527 
528 /**
529  * xenbus_map_ring
530  * @dev: xenbus device
531  * @gnt_ref: grant reference
532  * @handle: pointer to grant handle to be filled
533  * @vaddr: address to be mapped to
534  *
535  * Map a page of memory into this domain from another domain's grant table.
536  * xenbus_map_ring does not allocate the virtual address space (you must do
537  * this yourself!). It only maps in the page to the specified address.
538  * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
539  * or -ENOMEM on error. If an error is returned, device will switch to
540  * XenbusStateClosing and the error message will be saved in XenStore.
541  */
542 int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
543 		    grant_handle_t *handle, void *vaddr)
544 {
545 	struct gnttab_map_grant_ref op;
546 
547 	gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, gnt_ref,
548 			  dev->otherend_id);
549 
550 	gnttab_batch_map(&op, 1);
551 
552 	if (op.status != GNTST_okay) {
553 		xenbus_dev_fatal(dev, op.status,
554 				 "mapping in shared page %d from domain %d",
555 				 gnt_ref, dev->otherend_id);
556 	} else
557 		*handle = op.handle;
558 
559 	return op.status;
560 }
561 EXPORT_SYMBOL_GPL(xenbus_map_ring);
562 
563 
564 /**
565  * xenbus_unmap_ring_vfree
566  * @dev: xenbus device
567  * @vaddr: addr to unmap
568  *
569  * Based on Rusty Russell's skeleton driver's unmap_page.
570  * Unmap a page of memory in this domain that was imported from another domain.
571  * Use xenbus_unmap_ring_vfree if you mapped in your memory with
572  * xenbus_map_ring_valloc (it will free the virtual address space).
573  * Returns 0 on success and returns GNTST_* on error
574  * (see xen/include/interface/grant_table.h).
575  */
576 int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
577 {
578 	return ring_ops->unmap(dev, vaddr);
579 }
580 EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
581 
582 static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
583 {
584 	struct xenbus_map_node *node;
585 	struct gnttab_unmap_grant_ref op = {
586 		.host_addr = (unsigned long)vaddr,
587 	};
588 	unsigned int level;
589 
590 	spin_lock(&xenbus_valloc_lock);
591 	list_for_each_entry(node, &xenbus_valloc_pages, next) {
592 		if (node->area->addr == vaddr) {
593 			list_del(&node->next);
594 			goto found;
595 		}
596 	}
597 	node = NULL;
598  found:
599 	spin_unlock(&xenbus_valloc_lock);
600 
601 	if (!node) {
602 		xenbus_dev_error(dev, -ENOENT,
603 				 "can't find mapped virtual address %p", vaddr);
604 		return GNTST_bad_virt_addr;
605 	}
606 
607 	op.handle = node->handle;
608 	op.host_addr = arbitrary_virt_to_machine(
609 		lookup_address((unsigned long)vaddr, &level)).maddr;
610 
611 	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
612 		BUG();
613 
614 	if (op.status == GNTST_okay)
615 		free_vm_area(node->area);
616 	else
617 		xenbus_dev_error(dev, op.status,
618 				 "unmapping page at handle %d error %d",
619 				 node->handle, op.status);
620 
621 	kfree(node);
622 	return op.status;
623 }
624 
625 static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
626 {
627 	int rv;
628 	struct xenbus_map_node *node;
629 	void *addr;
630 
631 	spin_lock(&xenbus_valloc_lock);
632 	list_for_each_entry(node, &xenbus_valloc_pages, next) {
633 		addr = pfn_to_kaddr(page_to_pfn(node->page));
634 		if (addr == vaddr) {
635 			list_del(&node->next);
636 			goto found;
637 		}
638 	}
639 	node = addr = NULL;
640  found:
641 	spin_unlock(&xenbus_valloc_lock);
642 
643 	if (!node) {
644 		xenbus_dev_error(dev, -ENOENT,
645 				 "can't find mapped virtual address %p", vaddr);
646 		return GNTST_bad_virt_addr;
647 	}
648 
649 	rv = xenbus_unmap_ring(dev, node->handle, addr);
650 
651 	if (!rv)
652 		free_xenballooned_pages(1, &node->page);
653 	else
654 		WARN(1, "Leaking %p\n", vaddr);
655 
656 	kfree(node);
657 	return rv;
658 }
659 
660 /**
661  * xenbus_unmap_ring
662  * @dev: xenbus device
663  * @handle: grant handle
664  * @vaddr: addr to unmap
665  *
666  * Unmap a page of memory in this domain that was imported from another domain.
667  * Returns 0 on success and returns GNTST_* on error
668  * (see xen/include/interface/grant_table.h).
669  */
670 int xenbus_unmap_ring(struct xenbus_device *dev,
671 		      grant_handle_t handle, void *vaddr)
672 {
673 	struct gnttab_unmap_grant_ref op;
674 
675 	gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map, handle);
676 
677 	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
678 		BUG();
679 
680 	if (op.status != GNTST_okay)
681 		xenbus_dev_error(dev, op.status,
682 				 "unmapping page at handle %d error %d",
683 				 handle, op.status);
684 
685 	return op.status;
686 }
687 EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
688 
689 
690 /**
691  * xenbus_read_driver_state
692  * @path: path for driver
693  *
694  * Return the state of the driver rooted at the given store path, or
695  * XenbusStateUnknown if no state can be read.
696  */
697 enum xenbus_state xenbus_read_driver_state(const char *path)
698 {
699 	enum xenbus_state result;
700 	int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
701 	if (err)
702 		result = XenbusStateUnknown;
703 
704 	return result;
705 }
706 EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
707 
708 static const struct xenbus_ring_ops ring_ops_pv = {
709 	.map = xenbus_map_ring_valloc_pv,
710 	.unmap = xenbus_unmap_ring_vfree_pv,
711 };
712 
713 static const struct xenbus_ring_ops ring_ops_hvm = {
714 	.map = xenbus_map_ring_valloc_hvm,
715 	.unmap = xenbus_unmap_ring_vfree_hvm,
716 };
717 
718 void __init xenbus_ring_ops_init(void)
719 {
720 	if (!xen_feature(XENFEAT_auto_translated_physmap))
721 		ring_ops = &ring_ops_pv;
722 	else
723 		ring_ops = &ring_ops_hvm;
724 }
725