Lines Matching +full:b +full:- +full:facing
2 * Client-facing interface for the Xenbus driver. In other words, the
3 * interface between the Xenbus and the device-specific code, be it the
115 * xenbus_watch_path - register a watch
125 * On success, the given @path will be saved as @watch->node, and remains the
126 * caller's to free. On error, @watch->node will be NULL, the device will
129 * Returns: %0 on success or -errno on error
140 watch->node = path; in xenbus_watch_path()
141 watch->will_handle = will_handle; in xenbus_watch_path()
142 watch->callback = callback; in xenbus_watch_path()
147 watch->node = NULL; in xenbus_watch_path()
148 watch->will_handle = NULL; in xenbus_watch_path()
149 watch->callback = NULL; in xenbus_watch_path()
159 * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
170 * as @watch->node, and becomes the caller's to kfree().
171 * On error, watch->node will be NULL, so the caller has nothing to
175 * Returns: %0 on success or -errno on error
194 xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); in xenbus_watch_pathfmt()
195 return -ENOMEM; in xenbus_watch_pathfmt()
221 (something it was trying to in the past) because dev->state in __xenbus_switch_state()
229 if (state == dev->state) in __xenbus_switch_state()
241 err = xenbus_scanf(xbt, dev->nodename, "state", "%d", ¤t_state); in __xenbus_switch_state()
245 err = xenbus_printf(xbt, dev->nodename, "state", "%d", state); in __xenbus_switch_state()
255 if (err == -EAGAIN && !abort) in __xenbus_switch_state()
259 dev->state = state; in __xenbus_switch_state()
265 * xenbus_switch_state - save the new state of a driver
273 * Returns: %0 on success or -errno on error
285 complete(&dev->down); in xenbus_frontend_closed()
303 len = sprintf(printf_buffer, "%i ", -err); in xenbus_va_dev_error()
304 vsnprintf(printf_buffer + len, PRINTF_BUFFER_SIZE - len, fmt, ap); in xenbus_va_dev_error()
306 dev_err(&dev->dev, "%s\n", printf_buffer); in xenbus_va_dev_error()
308 path_buffer = kasprintf(GFP_KERNEL, "error/%s", dev->nodename); in xenbus_va_dev_error()
317 * xenbus_dev_error - place an error message into the store
336 * xenbus_dev_fatal - put an error messages into the store and then shutdown
399 ret = -ENOMEM; in xenbus_setup_ring()
419 gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id, in xenbus_setup_ring()
467 * created local port to *port. Return 0 on success, or -errno on error. On
477 alloc_unbound.remote_dom = dev->otherend_id; in xenbus_alloc_evtchn()
492 * Free an existing event channel. Returns 0 on success or -errno on error.
511 * xenbus_map_ring_valloc - allocate & map pages of VA space
523 * Returns: %0 on success or -errno on error
534 return -EINVAL; in xenbus_map_ring_valloc()
538 return -ENOMEM; in xenbus_map_ring_valloc()
540 info->node = kzalloc(sizeof(*info->node), GFP_KERNEL); in xenbus_map_ring_valloc()
541 if (!info->node) in xenbus_map_ring_valloc()
542 err = -ENOMEM; in xenbus_map_ring_valloc()
544 err = ring_ops->map(dev, info, gnt_refs, nr_grefs, vaddr); in xenbus_map_ring_valloc()
546 kfree(info->node); in xenbus_map_ring_valloc()
552 /* N.B. sizeof(phys_addr_t) doesn't always equal to sizeof(unsigned
553 * long), e.g. 32-on-64. Caller is responsible for preparing the
566 return -EINVAL; in __xenbus_map_ring()
569 gnttab_set_map_op(&info->map[i], info->phys_addrs[i], flags, in __xenbus_map_ring()
570 gnt_refs[i], dev->otherend_id); in __xenbus_map_ring()
574 gnttab_batch_map(info->map, i); in __xenbus_map_ring()
577 if (info->map[i].status != GNTST_okay) { in __xenbus_map_ring()
578 xenbus_dev_fatal(dev, info->map[i].status, in __xenbus_map_ring()
580 gnt_refs[i], dev->otherend_id); in __xenbus_map_ring()
583 handles[i] = info->map[i].handle; in __xenbus_map_ring()
591 gnttab_set_unmap_op(&info->unmap[j], in __xenbus_map_ring()
592 info->phys_addrs[i], in __xenbus_map_ring()
598 BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, info->unmap, j)); in __xenbus_map_ring()
602 if (info->unmap[i].status != GNTST_okay) { in __xenbus_map_ring()
608 return -ENOENT; in __xenbus_map_ring()
612 * xenbus_unmap_ring - unmap memory from another domain
631 return -EINVAL; in xenbus_unmap_ring()
661 info->phys_addrs[info->idx] = vaddr; in xenbus_map_ring_setup_grant_hvm()
662 info->addrs[info->idx] = vaddr; in xenbus_map_ring_setup_grant_hvm()
664 info->idx++; in xenbus_map_ring_setup_grant_hvm()
673 struct xenbus_map_node *node = info->node; in xenbus_map_ring_hvm()
679 err = xen_alloc_unpopulated_pages(nr_pages, node->hvm.pages); in xenbus_map_ring_hvm()
683 gnttab_foreach_grant(node->hvm.pages, nr_grefs, in xenbus_map_ring_hvm()
687 err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles, in xenbus_map_ring_hvm()
689 node->nr_handles = nr_grefs; in xenbus_map_ring_hvm()
694 addr = vmap(node->hvm.pages, nr_pages, VM_MAP | VM_IOREMAP, in xenbus_map_ring_hvm()
697 err = -ENOMEM; in xenbus_map_ring_hvm()
701 node->hvm.addr = addr; in xenbus_map_ring_hvm()
704 list_add(&node->next, &xenbus_valloc_pages); in xenbus_map_ring_hvm()
708 info->node = NULL; in xenbus_map_ring_hvm()
714 xenbus_unmap_ring(dev, node->handles, nr_grefs, info->addrs); in xenbus_map_ring_hvm()
720 xen_free_unpopulated_pages(nr_pages, node->hvm.pages); in xenbus_map_ring_hvm()
726 * xenbus_unmap_ring_vfree - unmap a page of memory from another domain
740 return ring_ops->unmap(dev, vaddr); in xenbus_unmap_ring_vfree()
749 info->phys_addrs[info->idx++] = arbitrary_virt_to_machine(pte).maddr; in map_ring_apply()
759 struct xenbus_map_node *node = info->node; in xenbus_map_ring_pv()
762 int err = -ENOMEM; in xenbus_map_ring_pv()
766 return -ENOMEM; in xenbus_map_ring_pv()
767 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, in xenbus_map_ring_pv()
770 err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles, in xenbus_map_ring_pv()
776 node->nr_handles = nr_grefs; in xenbus_map_ring_pv()
777 node->pv.area = area; in xenbus_map_ring_pv()
780 list_add(&node->next, &xenbus_valloc_pages); in xenbus_map_ring_pv()
783 *vaddr = area->addr; in xenbus_map_ring_pv()
784 info->node = NULL; in xenbus_map_ring_pv()
808 if (node->pv.area->addr == vaddr) { in xenbus_unmap_ring_pv()
809 list_del(&node->next); in xenbus_unmap_ring_pv()
818 xenbus_dev_error(dev, -ENOENT, in xenbus_unmap_ring_pv()
823 for (i = 0; i < node->nr_handles; i++) { in xenbus_unmap_ring_pv()
831 unmap[i].handle = node->handles[i]; in xenbus_unmap_ring_pv()
838 for (i = 0; i < node->nr_handles; i++) { in xenbus_unmap_ring_pv()
843 node->handles[i], unmap[i].status); in xenbus_unmap_ring_pv()
850 free_vm_area(node->pv.area); in xenbus_unmap_ring_pv()
853 node->pv.area, node->nr_handles); in xenbus_unmap_ring_pv()
878 info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn); in xenbus_unmap_ring_setup_grant_hvm()
880 info->idx++; in xenbus_unmap_ring_setup_grant_hvm()
895 addr = node->hvm.addr; in xenbus_unmap_ring_hvm()
897 list_del(&node->next); in xenbus_unmap_ring_hvm()
906 xenbus_dev_error(dev, -ENOENT, in xenbus_unmap_ring_hvm()
911 nr_pages = XENBUS_PAGES(node->nr_handles); in xenbus_unmap_ring_hvm()
913 gnttab_foreach_grant(node->hvm.pages, node->nr_handles, in xenbus_unmap_ring_hvm()
917 rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles, in xenbus_unmap_ring_hvm()
921 xen_free_unpopulated_pages(nr_pages, node->hvm.pages); in xenbus_unmap_ring_hvm()
931 * xenbus_read_driver_state - read state from a store path