Lines Matching refs:xbb
154 static void xbb_attach_failed(struct xbb_softc *xbb, int err, const char *fmt,
156 static int xbb_shutdown(struct xbb_softc *xbb);
172 struct xbb_softc *xbb; member
487 typedef int (*xbb_dispatch_t)(struct xbb_softc *xbb,
770 xbb_get_req(struct xbb_softc *xbb) in xbb_get_req() argument
776 mtx_assert(&xbb->lock, MA_OWNED); in xbb_get_req()
778 if ((req = STAILQ_FIRST(&xbb->request_free_stailq)) != NULL) { in xbb_get_req()
779 STAILQ_REMOVE_HEAD(&xbb->request_free_stailq, links); in xbb_get_req()
780 xbb->active_request_count++; in xbb_get_req()
793 xbb_release_req(struct xbb_softc *xbb, struct xbb_xen_req *req) in xbb_release_req() argument
795 mtx_assert(&xbb->lock, MA_OWNED); in xbb_release_req()
797 STAILQ_INSERT_HEAD(&xbb->request_free_stailq, req, links); in xbb_release_req()
798 xbb->active_request_count--; in xbb_release_req()
800 KASSERT(xbb->active_request_count >= 0, in xbb_release_req()
812 xbb_release_reqs(struct xbb_softc *xbb, struct xbb_xen_req_list *req_list, in xbb_release_reqs() argument
815 mtx_assert(&xbb->lock, MA_OWNED); in xbb_release_reqs()
817 STAILQ_CONCAT(&xbb->request_free_stailq, req_list); in xbb_release_reqs()
818 xbb->active_request_count -= nreqs; in xbb_release_reqs()
820 KASSERT(xbb->active_request_count >= 0, in xbb_release_reqs()
883 struct xbb_softc *xbb; in xbb_get_gntaddr() local
885 xbb = reqlist->xbb; in xbb_get_gntaddr()
887 return ((uintptr_t)(xbb->gnt_base_addr + in xbb_get_gntaddr()
888 (uintptr_t)(reqlist->kva - xbb->kva) + in xbb_get_gntaddr()
908 xbb_get_kva(struct xbb_softc *xbb, int nr_pages) in xbb_get_kva() argument
920 mtx_lock(&xbb->lock); in xbb_get_kva()
925 bit_ffc(xbb->kva_free, xbb->reqlist_kva_pages, &first_clear); in xbb_get_kva()
934 for (i = first_clear, num_clear = 0; i < xbb->reqlist_kva_pages; i++) { in xbb_get_kva()
941 if (bit_test(xbb->kva_free, i)) { in xbb_get_kva()
955 bit_nset(xbb->kva_free, first_clear, in xbb_get_kva()
958 free_kva = xbb->kva + in xbb_get_kva()
961 KASSERT(free_kva >= (uint8_t *)xbb->kva && in xbb_get_kva()
963 (uint8_t *)xbb->ring_config.va, in xbb_get_kva()
966 nr_pages * PAGE_SIZE, (uintmax_t)xbb->kva, in xbb_get_kva()
967 (uintmax_t)xbb->ring_config.va)); in xbb_get_kva()
975 xbb->flags |= XBBF_RESOURCE_SHORTAGE; in xbb_get_kva()
976 xbb->kva_shortages++; in xbb_get_kva()
979 mtx_unlock(&xbb->lock); in xbb_get_kva()
992 xbb_free_kva(struct xbb_softc *xbb, uint8_t *kva_ptr, int nr_pages) in xbb_free_kva() argument
996 mtx_assert(&xbb->lock, MA_OWNED); in xbb_free_kva()
998 start_page = (intptr_t)(kva_ptr - xbb->kva) >> PAGE_SHIFT; in xbb_free_kva()
999 bit_nclear(xbb->kva_free, start_page, start_page + nr_pages - 1); in xbb_free_kva()
1042 xbb_get_reqlist(struct xbb_softc *xbb) in xbb_get_reqlist() argument
1048 mtx_assert(&xbb->lock, MA_OWNED); in xbb_get_reqlist()
1050 if ((reqlist = STAILQ_FIRST(&xbb->reqlist_free_stailq)) != NULL) { in xbb_get_reqlist()
1051 STAILQ_REMOVE_HEAD(&xbb->reqlist_free_stailq, links); in xbb_get_reqlist()
1073 xbb_release_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist, in xbb_release_reqlist() argument
1077 mtx_assert(&xbb->lock, MA_OWNED); in xbb_release_reqlist()
1080 wakeup = xbb->flags & XBBF_RESOURCE_SHORTAGE; in xbb_release_reqlist()
1081 xbb->flags &= ~XBBF_RESOURCE_SHORTAGE; in xbb_release_reqlist()
1085 xbb_free_kva(xbb, reqlist->kva, reqlist->nr_segments); in xbb_release_reqlist()
1087 xbb_release_reqs(xbb, &reqlist->contig_req_list, reqlist->num_children); in xbb_release_reqlist()
1089 STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links); in xbb_release_reqlist()
1091 if ((xbb->flags & XBBF_SHUTDOWN) != 0) { in xbb_release_reqlist()
1098 xbb_shutdown(xbb); in xbb_release_reqlist()
1102 taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task); in xbb_release_reqlist()
1116 xbb_get_resources(struct xbb_softc *xbb, struct xbb_xen_reqlist **reqlist, in xbb_get_resources() argument
1125 mtx_lock(&xbb->lock); in xbb_get_resources()
1131 if ((xbb->flags & XBBF_SHUTDOWN) != 0) { in xbb_get_resources()
1132 mtx_unlock(&xbb->lock); in xbb_get_resources()
1140 nreqlist = xbb_get_reqlist(xbb); in xbb_get_resources()
1146 nreq = xbb_get_req(xbb); in xbb_get_resources()
1150 mtx_unlock(&xbb->lock); in xbb_get_resources()
1157 xbb->sector_size_shift; in xbb_get_resources()
1158 STAILQ_INSERT_TAIL(&xbb->reqlist_pending_stailq, nreqlist, in xbb_get_resources()
1167 if (xbb->abi != BLKIF_PROTOCOL_NATIVE) { in xbb_get_resources()
1175 devstat_start_transaction(xbb->xbb_stats_in, &nreq->ds_t0); in xbb_get_resources()
1189 xbb->flags |= XBBF_RESOURCE_SHORTAGE; in xbb_get_resources()
1190 xbb->request_shortages++; in xbb_get_resources()
1193 xbb_release_req(xbb, nreq); in xbb_get_resources()
1196 xbb_release_reqlist(xbb, nreqlist, /*wakeup*/ 0); in xbb_get_resources()
1198 mtx_unlock(&xbb->lock); in xbb_get_resources()
1212 xbb_queue_response(struct xbb_softc *xbb, struct xbb_xen_req *req, int status) in xbb_queue_response() argument
1227 mtx_assert(&xbb->lock, MA_OWNED); in xbb_queue_response()
1234 switch (xbb->abi) { in xbb_queue_response()
1236 resp = RING_GET_RESPONSE(&xbb->rings.native, in xbb_queue_response()
1237 xbb->rings.native.rsp_prod_pvt); in xbb_queue_response()
1241 RING_GET_RESPONSE(&xbb->rings.x86_32, in xbb_queue_response()
1242 xbb->rings.x86_32.rsp_prod_pvt); in xbb_queue_response()
1246 RING_GET_RESPONSE(&xbb->rings.x86_64, in xbb_queue_response()
1247 xbb->rings.x86_64.rsp_prod_pvt); in xbb_queue_response()
1258 xbb->reqs_completed_with_error++; in xbb_queue_response()
1260 xbb->rings.common.rsp_prod_pvt++; in xbb_queue_response()
1262 xbb->reqs_queued_for_completion++; in xbb_queue_response()
1277 xbb_push_responses(struct xbb_softc *xbb, int *run_taskqueue, int *notify) in xbb_push_responses() argument
1284 mtx_assert(&xbb->lock, MA_OWNED); in xbb_push_responses()
1288 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&xbb->rings.common, *notify); in xbb_push_responses()
1290 if (xbb->rings.common.rsp_prod_pvt == xbb->rings.common.req_cons) { in xbb_push_responses()
1296 RING_FINAL_CHECK_FOR_REQUESTS(&xbb->rings.common, more_to_do); in xbb_push_responses()
1297 } else if (RING_HAS_UNCONSUMED_REQUESTS(&xbb->rings.common)) { in xbb_push_responses()
1301 xbb->reqs_completed += xbb->reqs_queued_for_completion; in xbb_push_responses()
1302 xbb->reqs_queued_for_completion = 0; in xbb_push_responses()
1314 xbb_complete_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist) in xbb_complete_reqlist() argument
1325 mtx_lock(&xbb->lock); in xbb_complete_reqlist()
1340 xbb_queue_response(xbb, nreq, reqlist->status); in xbb_complete_reqlist()
1350 devstat_end_transaction(xbb->xbb_stats_in, in xbb_complete_reqlist()
1367 devstat_end_transaction(xbb->xbb_stats, in xbb_complete_reqlist()
1374 xbb_release_reqlist(xbb, reqlist, /*wakeup*/ 1); in xbb_complete_reqlist()
1376 xbb_push_responses(xbb, &run_taskqueue, ¬ify); in xbb_complete_reqlist()
1378 mtx_unlock(&xbb->lock); in xbb_complete_reqlist()
1381 taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task); in xbb_complete_reqlist()
1384 xen_intr_signal(xbb->xen_intr_handle); in xbb_complete_reqlist()
1397 struct xbb_softc *xbb; in xbb_bio_done() local
1401 xbb = reqlist->xbb; in xbb_bio_done()
1423 bio->bio_error, xbb->dev_name); in xbb_bio_done()
1427 && xenbus_get_state(xbb->dev) == XenbusStateConnected) { in xbb_bio_done()
1433 xenbus_set_state(xbb->dev, XenbusStateClosing); in xbb_bio_done()
1442 xbb_complete_reqlist(xbb, reqlist); in xbb_bio_done()
1462 xbb_dispatch_io(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist) in xbb_dispatch_io() argument
1490 reqlist->kva = xbb_get_kva(xbb, reqlist->nr_segments); in xbb_dispatch_io()
1500 devstat_start_transaction(xbb->xbb_stats, &reqlist->ds_t0); in xbb_dispatch_io()
1510 if ((xbb->flags & XBBF_READ_ONLY) != 0) { in xbb_dispatch_io()
1512 xbb->dev_name); in xbb_dispatch_io()
1527 if (xbb->disable_flush != 0) { in xbb_dispatch_io()
1537 if (xbb->flush_interval != 0) { in xbb_dispatch_io()
1538 if (++(xbb->flush_count) < xbb->flush_interval) { in xbb_dispatch_io()
1541 xbb->flush_count = 0; in xbb_dispatch_io()
1556 reqlist->xbb = xbb; in xbb_dispatch_io()
1557 xbb_sg = xbb->xbb_sgs; in xbb_dispatch_io()
1558 map = xbb->maps; in xbb_dispatch_io()
1573 || __predict_false(nseg > xbb->max_request_segments)) { in xbb_dispatch_io()
1607 xbb->ring_config.gnt_addr, in xbb_dispatch_io()
1611 (uintmax_t)xbb->ring_config.gnt_addr)); in xbb_dispatch_io()
1615 map->dom = xbb->otherend_id; in xbb_dispatch_io()
1626 nr_sects = (nr_sects << 9) >> xbb->sector_size_shift; in xbb_dispatch_io()
1630 ((xbb->sector_size >> 9) - 1)) != 0) { in xbb_dispatch_io()
1631 device_printf(xbb->dev, "%s: I/O size (%d) is not " in xbb_dispatch_io()
1635 xbb->sector_size); in xbb_dispatch_io()
1642 xbb->maps, reqlist->nr_segments); in xbb_dispatch_io()
1648 for (seg_idx = 0, map = xbb->maps; seg_idx < reqlist->nr_segments; in xbb_dispatch_io()
1664 xbb->media_num_sectors) { in xbb_dispatch_io()
1670 xbb->dev_name); in xbb_dispatch_io()
1677 error = xbb->dispatch_io(xbb, in xbb_dispatch_io()
1691 xbb_complete_reqlist(xbb, reqlist); in xbb_dispatch_io()
1728 struct xbb_softc *xbb; in xbb_run_queue() local
1735 xbb = (struct xbb_softc *)context; in xbb_run_queue()
1736 rings = &xbb->rings; in xbb_run_queue()
1757 reqlist = STAILQ_LAST(&xbb->reqlist_pending_stailq, in xbb_run_queue()
1792 switch (xbb->abi) { in xbb_run_queue()
1794 ring_req = RING_GET_REQUEST(&xbb->rings.native, in xbb_run_queue()
1802 &xbb->rings.x86_32, rings->common.req_cons); in xbb_run_queue()
1812 ring_req64 =RING_GET_REQUEST(&xbb->rings.x86_64, in xbb_run_queue()
1833 && ((xbb->no_coalesce_reqs != 0) in xbb_run_queue()
1834 || ((xbb->no_coalesce_reqs == 0) in xbb_run_queue()
1838 xbb->max_reqlist_segments))))) { in xbb_run_queue()
1848 retval = xbb_get_resources(xbb, &reqlist, ring_req, in xbb_run_queue()
1849 xbb->rings.common.req_cons); in xbb_run_queue()
1872 xbb->rings.common.req_cons++; in xbb_run_queue()
1873 xbb->reqs_received++; in xbb_run_queue()
1882 reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq); in xbb_run_queue()
1895 STAILQ_REMOVE_HEAD(&xbb->reqlist_pending_stailq, links); in xbb_run_queue()
1897 retval = xbb_dispatch_io(xbb, reqlist); in xbb_run_queue()
1906 STAILQ_INSERT_HEAD(&xbb->reqlist_pending_stailq, in xbb_run_queue()
1921 reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq); in xbb_run_queue()
1924 xbb->forced_dispatch++; in xbb_run_queue()
1926 xbb->normal_dispatch++; in xbb_run_queue()
1928 xbb->total_dispatch++; in xbb_run_queue()
1942 struct xbb_softc *xbb; in xbb_filter() local
1945 xbb = (struct xbb_softc *)arg; in xbb_filter()
1946 taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task); in xbb_filter()
1951 SDT_PROVIDER_DEFINE(xbb);
1952 SDT_PROBE_DEFINE1(xbb, kernel, xbb_dispatch_dev, flush, "int");
1953 SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_dev, read, "int", "uint64_t",
1955 SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_dev, write, "int",
1971 xbb_dispatch_dev(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist, in xbb_dispatch_dev() argument
1985 dev_data = &xbb->backend.dev; in xbb_dispatch_dev()
1987 << xbb->sector_size_shift; in xbb_dispatch_dev()
2011 SDT_PROBE1(xbb, kernel, xbb_dispatch_dev, flush, in xbb_dispatch_dev()
2012 device_get_unit(xbb->dev)); in xbb_dispatch_dev()
2019 xbb_sg = xbb->xbb_sgs; in xbb_dispatch_dev()
2030 if ((bio->bio_length & (xbb->sector_size - 1)) != 0) { in xbb_dispatch_dev()
2034 __func__, xbb->otherend_id); in xbb_dispatch_dev()
2046 if ((bio_offset & (xbb->sector_size - 1)) != 0){ in xbb_dispatch_dev()
2049 xbb->otherend_id); in xbb_dispatch_dev()
2067 bio->bio_pblkno = bio_offset >> xbb->sector_size_shift; in xbb_dispatch_dev()
2075 if ((bio->bio_length & (xbb->sector_size - 1)) != 0) { in xbb_dispatch_dev()
2079 __func__, xbb->otherend_id); in xbb_dispatch_dev()
2096 SDT_PROBE3(xbb, kernel, xbb_dispatch_dev, read, in xbb_dispatch_dev()
2097 device_get_unit(xbb->dev), in xbb_dispatch_dev()
2101 SDT_PROBE3(xbb, kernel, xbb_dispatch_dev, write, in xbb_dispatch_dev()
2102 device_get_unit(xbb->dev), in xbb_dispatch_dev()
2118 SDT_PROBE_DEFINE1(xbb, kernel, xbb_dispatch_file, flush, "int");
2119 SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_file, read, "int", "uint64_t",
2121 SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_file, write, "int",
2136 xbb_dispatch_file(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist, in xbb_dispatch_file() argument
2147 file_data = &xbb->backend.file; in xbb_dispatch_file()
2161 SDT_PROBE1(xbb, kernel, xbb_dispatch_file, flush, in xbb_dispatch_file()
2162 device_get_unit(xbb->dev)); in xbb_dispatch_file()
2164 (void) vn_start_write(xbb->vn, &mountpoint, V_WAIT); in xbb_dispatch_file()
2166 vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY); in xbb_dispatch_file()
2167 error = VOP_FSYNC(xbb->vn, MNT_WAIT, curthread); in xbb_dispatch_file()
2168 VOP_UNLOCK(xbb->vn); in xbb_dispatch_file()
2180 << xbb->sector_size_shift; in xbb_dispatch_file()
2184 xbb_sg = xbb->xbb_sgs; in xbb_dispatch_file()
2222 SDT_PROBE3(xbb, kernel, xbb_dispatch_file, read, in xbb_dispatch_file()
2223 device_get_unit(xbb->dev), xuio.uio_offset, in xbb_dispatch_file()
2226 vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY); in xbb_dispatch_file()
2248 error = VOP_READ(xbb->vn, &xuio, (flags & BIO_ORDERED) ? in xbb_dispatch_file()
2251 VOP_UNLOCK(xbb->vn); in xbb_dispatch_file()
2256 SDT_PROBE3(xbb, kernel, xbb_dispatch_file, write, in xbb_dispatch_file()
2257 device_get_unit(xbb->dev), xuio.uio_offset, in xbb_dispatch_file()
2260 (void)vn_start_write(xbb->vn, &mountpoint, V_WAIT); in xbb_dispatch_file()
2262 vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY); in xbb_dispatch_file()
2282 error = VOP_WRITE(xbb->vn, &xuio, (flags & BIO_ORDERED) ? in xbb_dispatch_file()
2284 VOP_UNLOCK(xbb->vn); in xbb_dispatch_file()
2300 xbb_complete_reqlist(xbb, reqlist); in xbb_dispatch_file()
2313 xbb_close_backend(struct xbb_softc *xbb) in xbb_close_backend() argument
2316 DPRINTF("closing dev=%s\n", xbb->dev_name); in xbb_close_backend()
2317 if (xbb->vn) { in xbb_close_backend()
2320 if ((xbb->flags & XBBF_READ_ONLY) == 0) in xbb_close_backend()
2323 switch (xbb->device_type) { in xbb_close_backend()
2325 if (xbb->backend.dev.csw) { in xbb_close_backend()
2326 dev_relthread(xbb->backend.dev.cdev, in xbb_close_backend()
2327 xbb->backend.dev.dev_ref); in xbb_close_backend()
2328 xbb->backend.dev.csw = NULL; in xbb_close_backend()
2329 xbb->backend.dev.cdev = NULL; in xbb_close_backend()
2340 (void)vn_close(xbb->vn, flags, NOCRED, curthread); in xbb_close_backend()
2341 xbb->vn = NULL; in xbb_close_backend()
2343 switch (xbb->device_type) { in xbb_close_backend()
2347 if (xbb->backend.file.cred != NULL) { in xbb_close_backend()
2348 crfree(xbb->backend.file.cred); in xbb_close_backend()
2349 xbb->backend.file.cred = NULL; in xbb_close_backend()
2369 xbb_open_dev(struct xbb_softc *xbb) in xbb_open_dev() argument
2376 xbb->device_type = XBB_TYPE_DISK; in xbb_open_dev()
2377 xbb->dispatch_io = xbb_dispatch_dev; in xbb_open_dev()
2378 xbb->backend.dev.cdev = xbb->vn->v_rdev; in xbb_open_dev()
2379 xbb->backend.dev.csw = dev_refthread(xbb->backend.dev.cdev, in xbb_open_dev()
2380 &xbb->backend.dev.dev_ref); in xbb_open_dev()
2381 if (xbb->backend.dev.csw == NULL) in xbb_open_dev()
2384 error = VOP_GETATTR(xbb->vn, &vattr, NOCRED); in xbb_open_dev()
2386 xenbus_dev_fatal(xbb->dev, error, "error getting " in xbb_open_dev()
2388 xbb->dev_name); in xbb_open_dev()
2392 dev = xbb->vn->v_rdev; in xbb_open_dev()
2395 xenbus_dev_fatal(xbb->dev, ENODEV, "no d_ioctl for " in xbb_open_dev()
2396 "device %s!", xbb->dev_name); in xbb_open_dev()
2401 (caddr_t)&xbb->sector_size, FREAD, in xbb_open_dev()
2404 xenbus_dev_fatal(xbb->dev, error, in xbb_open_dev()
2406 "for device %s", xbb->dev_name); in xbb_open_dev()
2411 (caddr_t)&xbb->media_size, FREAD, in xbb_open_dev()
2414 xenbus_dev_fatal(xbb->dev, error, in xbb_open_dev()
2416 "for device %s", xbb->dev_name); in xbb_open_dev()
2431 xbb_open_file(struct xbb_softc *xbb) in xbb_open_file() argument
2437 file_data = &xbb->backend.file; in xbb_open_file()
2438 xbb->device_type = XBB_TYPE_FILE; in xbb_open_file()
2439 xbb->dispatch_io = xbb_dispatch_file; in xbb_open_file()
2440 error = VOP_GETATTR(xbb->vn, &vattr, curthread->td_ucred); in xbb_open_file()
2442 xenbus_dev_fatal(xbb->dev, error, in xbb_open_file()
2444 "for file %s", xbb->dev_name); in xbb_open_file()
2453 if (VOP_ISLOCKED(xbb->vn) != LK_EXCLUSIVE) { in xbb_open_file()
2454 vn_lock(xbb->vn, LK_UPGRADE | LK_RETRY); in xbb_open_file()
2455 if (VN_IS_DOOMED(xbb->vn)) { in xbb_open_file()
2457 xenbus_dev_fatal(xbb->dev, error, in xbb_open_file()
2459 xbb->dev_name); in xbb_open_file()
2466 xbb->media_size = vattr.va_size; in xbb_open_file()
2478 xbb->sector_size = vattr.va_blocksize; in xbb_open_file()
2480 xbb->sector_size = 512; in xbb_open_file()
2486 if ((xbb->media_size % xbb->sector_size) != 0) { in xbb_open_file()
2488 xenbus_dev_fatal(xbb->dev, error, in xbb_open_file()
2490 xbb->dev_name, in xbb_open_file()
2491 (uintmax_t)xbb->media_size, in xbb_open_file()
2492 xbb->sector_size); in xbb_open_file()
2505 xbb_open_backend(struct xbb_softc *xbb) in xbb_open_backend() argument
2514 DPRINTF("opening dev=%s\n", xbb->dev_name); in xbb_open_backend()
2517 xenbus_dev_fatal(xbb->dev, ENOENT, in xbb_open_backend()
2522 if ((xbb->flags & XBBF_READ_ONLY) == 0) in xbb_open_backend()
2528 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, xbb->dev_name); in xbb_open_backend()
2537 if (xbb->dev_name[0] != '/') { in xbb_open_backend()
2542 dev_name = malloc(strlen(xbb->dev_name) in xbb_open_backend()
2547 xbb->dev_name); in xbb_open_backend()
2548 free(xbb->dev_name, M_XENBLOCKBACK); in xbb_open_backend()
2549 xbb->dev_name = dev_name; in xbb_open_backend()
2553 xenbus_dev_fatal(xbb->dev, error, "error opening device %s", in xbb_open_backend()
2554 xbb->dev_name); in xbb_open_backend()
2560 xbb->vn = nd.ni_vp; in xbb_open_backend()
2563 if (vn_isdisk_error(xbb->vn, &error)) { in xbb_open_backend()
2564 error = xbb_open_dev(xbb); in xbb_open_backend()
2565 } else if (xbb->vn->v_type == VREG) { in xbb_open_backend()
2566 error = xbb_open_file(xbb); in xbb_open_backend()
2569 xenbus_dev_fatal(xbb->dev, error, "%s is not a disk " in xbb_open_backend()
2570 "or file", xbb->dev_name); in xbb_open_backend()
2572 VOP_UNLOCK(xbb->vn); in xbb_open_backend()
2575 xbb_close_backend(xbb); in xbb_open_backend()
2579 xbb->sector_size_shift = fls(xbb->sector_size) - 1; in xbb_open_backend()
2580 xbb->media_num_sectors = xbb->media_size >> xbb->sector_size_shift; in xbb_open_backend()
2583 (xbb->device_type == XBB_TYPE_DISK) ? "dev" : "file", in xbb_open_backend()
2584 xbb->dev_name, xbb->sector_size, xbb->media_size); in xbb_open_backend()
2596 xbb_free_communication_mem(struct xbb_softc *xbb) in xbb_free_communication_mem() argument
2598 if (xbb->kva != 0) { in xbb_free_communication_mem()
2599 if (xbb->pseudo_phys_res != NULL) { in xbb_free_communication_mem()
2600 xenmem_free(xbb->dev, xbb->pseudo_phys_res_id, in xbb_free_communication_mem()
2601 xbb->pseudo_phys_res); in xbb_free_communication_mem()
2602 xbb->pseudo_phys_res = NULL; in xbb_free_communication_mem()
2605 xbb->kva = 0; in xbb_free_communication_mem()
2606 xbb->gnt_base_addr = 0; in xbb_free_communication_mem()
2607 if (xbb->kva_free != NULL) { in xbb_free_communication_mem()
2608 free(xbb->kva_free, M_XENBLOCKBACK); in xbb_free_communication_mem()
2609 xbb->kva_free = NULL; in xbb_free_communication_mem()
2619 xbb_disconnect(struct xbb_softc *xbb) in xbb_disconnect() argument
2623 mtx_unlock(&xbb->lock); in xbb_disconnect()
2624 xen_intr_unbind(&xbb->xen_intr_handle); in xbb_disconnect()
2625 if (xbb->io_taskqueue != NULL) in xbb_disconnect()
2626 taskqueue_drain(xbb->io_taskqueue, &xbb->io_task); in xbb_disconnect()
2627 mtx_lock(&xbb->lock); in xbb_disconnect()
2633 if (xbb->active_request_count != 0) in xbb_disconnect()
2636 if (xbb->flags & XBBF_RING_CONNECTED) { in xbb_disconnect()
2643 ring_idx < xbb->ring_config.ring_pages; in xbb_disconnect()
2645 op->host_addr = xbb->ring_config.gnt_addr in xbb_disconnect()
2647 op->dev_bus_addr = xbb->ring_config.bus_addr[ring_idx]; in xbb_disconnect()
2648 op->handle = xbb->ring_config.handle[ring_idx]; in xbb_disconnect()
2652 xbb->ring_config.ring_pages); in xbb_disconnect()
2656 xbb->flags &= ~XBBF_RING_CONNECTED; in xbb_disconnect()
2659 xbb_free_communication_mem(xbb); in xbb_disconnect()
2661 if (xbb->requests != NULL) { in xbb_disconnect()
2662 free(xbb->requests, M_XENBLOCKBACK); in xbb_disconnect()
2663 xbb->requests = NULL; in xbb_disconnect()
2666 if (xbb->request_lists != NULL) { in xbb_disconnect()
2671 for (i = 0, reqlist = xbb->request_lists; in xbb_disconnect()
2672 i < xbb->max_requests; i++, reqlist++){ in xbb_disconnect()
2678 free(xbb->request_lists, M_XENBLOCKBACK); in xbb_disconnect()
2679 xbb->request_lists = NULL; in xbb_disconnect()
2693 xbb_connect_ring(struct xbb_softc *xbb) in xbb_connect_ring() argument
2700 if ((xbb->flags & XBBF_RING_CONNECTED) != 0) in xbb_connect_ring()
2707 xbb->ring_config.va = xbb->kva in xbb_connect_ring()
2708 + (xbb->kva_size in xbb_connect_ring()
2709 - (xbb->ring_config.ring_pages * PAGE_SIZE)); in xbb_connect_ring()
2710 xbb->ring_config.gnt_addr = xbb->gnt_base_addr in xbb_connect_ring()
2711 + (xbb->kva_size in xbb_connect_ring()
2712 - (xbb->ring_config.ring_pages * PAGE_SIZE)); in xbb_connect_ring()
2715 ring_idx < xbb->ring_config.ring_pages; in xbb_connect_ring()
2717 gnt->host_addr = xbb->ring_config.gnt_addr in xbb_connect_ring()
2720 gnt->ref = xbb->ring_config.ring_ref[ring_idx]; in xbb_connect_ring()
2721 gnt->dom = xbb->otherend_id; in xbb_connect_ring()
2725 xbb->ring_config.ring_pages); in xbb_connect_ring()
2730 ring_idx < xbb->ring_config.ring_pages; in xbb_connect_ring()
2736 xbb->ring_config.va = 0; in xbb_connect_ring()
2737 xenbus_dev_fatal(xbb->dev, EACCES, in xbb_connect_ring()
2742 for (i = 0, j = 0; i < xbb->ring_config.ring_pages; in xbb_connect_ring()
2760 xbb->ring_config.handle[ring_idx] = gnt->handle; in xbb_connect_ring()
2761 xbb->ring_config.bus_addr[ring_idx] = gnt->dev_bus_addr; in xbb_connect_ring()
2765 switch (xbb->abi) { in xbb_connect_ring()
2769 sring = (blkif_sring_t *)xbb->ring_config.va; in xbb_connect_ring()
2770 BACK_RING_INIT(&xbb->rings.native, sring, in xbb_connect_ring()
2771 xbb->ring_config.ring_pages * PAGE_SIZE); in xbb_connect_ring()
2777 sring_x86_32 = (blkif_x86_32_sring_t *)xbb->ring_config.va; in xbb_connect_ring()
2778 BACK_RING_INIT(&xbb->rings.x86_32, sring_x86_32, in xbb_connect_ring()
2779 xbb->ring_config.ring_pages * PAGE_SIZE); in xbb_connect_ring()
2785 sring_x86_64 = (blkif_x86_64_sring_t *)xbb->ring_config.va; in xbb_connect_ring()
2786 BACK_RING_INIT(&xbb->rings.x86_64, sring_x86_64, in xbb_connect_ring()
2787 xbb->ring_config.ring_pages * PAGE_SIZE); in xbb_connect_ring()
2794 xbb->flags |= XBBF_RING_CONNECTED; in xbb_connect_ring()
2796 error = xen_intr_bind_remote_port(xbb->dev, in xbb_connect_ring()
2797 xbb->otherend_id, in xbb_connect_ring()
2798 xbb->ring_config.evtchn, in xbb_connect_ring()
2801 /*arg*/xbb, in xbb_connect_ring()
2803 &xbb->xen_intr_handle); in xbb_connect_ring()
2805 xenbus_dev_fatal(xbb->dev, error, "binding event channel"); in xbb_connect_ring()
2825 xbb_alloc_communication_mem(struct xbb_softc *xbb) in xbb_alloc_communication_mem() argument
2827 xbb->reqlist_kva_pages = xbb->max_requests * xbb->max_request_segments; in xbb_alloc_communication_mem()
2828 xbb->reqlist_kva_size = xbb->reqlist_kva_pages * PAGE_SIZE; in xbb_alloc_communication_mem()
2829 xbb->kva_size = xbb->reqlist_kva_size + in xbb_alloc_communication_mem()
2830 (xbb->ring_config.ring_pages * PAGE_SIZE); in xbb_alloc_communication_mem()
2832 xbb->kva_free = bit_alloc(xbb->reqlist_kva_pages, M_XENBLOCKBACK, M_NOWAIT); in xbb_alloc_communication_mem()
2833 if (xbb->kva_free == NULL) in xbb_alloc_communication_mem()
2837 device_get_nameunit(xbb->dev), xbb->kva_size, in xbb_alloc_communication_mem()
2838 xbb->reqlist_kva_size); in xbb_alloc_communication_mem()
2845 xbb->pseudo_phys_res_id = 0; in xbb_alloc_communication_mem()
2846 xbb->pseudo_phys_res = xenmem_alloc(xbb->dev, &xbb->pseudo_phys_res_id, in xbb_alloc_communication_mem()
2847 xbb->kva_size); in xbb_alloc_communication_mem()
2848 if (xbb->pseudo_phys_res == NULL) { in xbb_alloc_communication_mem()
2849 xbb->kva = 0; in xbb_alloc_communication_mem()
2852 xbb->kva = (vm_offset_t)rman_get_virtual(xbb->pseudo_phys_res); in xbb_alloc_communication_mem()
2853 xbb->gnt_base_addr = rman_get_start(xbb->pseudo_phys_res); in xbb_alloc_communication_mem()
2856 device_get_nameunit(xbb->dev), (uintmax_t)xbb->kva, in xbb_alloc_communication_mem()
2857 (uintmax_t)xbb->gnt_base_addr); in xbb_alloc_communication_mem()
2867 xbb_collect_frontend_info(struct xbb_softc *xbb) in xbb_collect_frontend_info() argument
2876 otherend_path = xenbus_get_otherend_path(xbb->dev); in xbb_collect_frontend_info()
2881 xbb->ring_config.ring_pages = 1; in xbb_collect_frontend_info()
2882 xbb->max_request_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; in xbb_collect_frontend_info()
2883 xbb->max_request_size = xbb->max_request_segments * PAGE_SIZE; in xbb_collect_frontend_info()
2890 &xbb->ring_config.evtchn); in xbb_collect_frontend_info()
2892 xenbus_dev_fatal(xbb->dev, error, in xbb_collect_frontend_info()
2895 xenbus_get_otherend_path(xbb->dev)); in xbb_collect_frontend_info()
2913 xbb->max_requests = 32; in xbb_collect_frontend_info()
2918 xbb->ring_config.ring_pages = 1 << ring_page_order; in xbb_collect_frontend_info()
2919 ring_size = PAGE_SIZE * xbb->ring_config.ring_pages; in xbb_collect_frontend_info()
2920 xbb->max_requests = BLKIF_MAX_RING_REQUESTS(ring_size); in xbb_collect_frontend_info()
2922 if (xbb->ring_config.ring_pages > XBB_MAX_RING_PAGES) { in xbb_collect_frontend_info()
2923 xenbus_dev_fatal(xbb->dev, EINVAL, in xbb_collect_frontend_info()
2927 xbb->ring_config.ring_pages, in xbb_collect_frontend_info()
2932 if (xbb->ring_config.ring_pages == 1) { in xbb_collect_frontend_info()
2935 &xbb->ring_config.ring_ref[0], in xbb_collect_frontend_info()
2938 xenbus_dev_fatal(xbb->dev, error, in xbb_collect_frontend_info()
2942 xenbus_get_otherend_path(xbb->dev)); in xbb_collect_frontend_info()
2947 for (ring_idx = 0; ring_idx < xbb->ring_config.ring_pages; in xbb_collect_frontend_info()
2955 &xbb->ring_config.ring_ref[ring_idx]); in xbb_collect_frontend_info()
2957 xenbus_dev_fatal(xbb->dev, error, in xbb_collect_frontend_info()
2977 xbb->abi = BLKIF_PROTOCOL_NATIVE; in xbb_collect_frontend_info()
2979 xbb->abi = BLKIF_PROTOCOL_X86_32; in xbb_collect_frontend_info()
2981 xbb->abi = BLKIF_PROTOCOL_X86_64; in xbb_collect_frontend_info()
2983 xenbus_dev_fatal(xbb->dev, EINVAL, in xbb_collect_frontend_info()
2998 xbb_alloc_requests(struct xbb_softc *xbb) in xbb_alloc_requests() argument
3006 xbb->requests = malloc(xbb->max_requests * sizeof(*xbb->requests), in xbb_alloc_requests()
3008 if (xbb->requests == NULL) { in xbb_alloc_requests()
3009 xenbus_dev_fatal(xbb->dev, ENOMEM, in xbb_alloc_requests()
3014 req = xbb->requests; in xbb_alloc_requests()
3015 last_req = &xbb->requests[xbb->max_requests - 1]; in xbb_alloc_requests()
3016 STAILQ_INIT(&xbb->request_free_stailq); in xbb_alloc_requests()
3018 STAILQ_INSERT_TAIL(&xbb->request_free_stailq, req, links); in xbb_alloc_requests()
3025 xbb_alloc_request_lists(struct xbb_softc *xbb) in xbb_alloc_request_lists() argument
3034 xbb->request_lists = malloc(xbb->max_requests * in xbb_alloc_request_lists()
3035 sizeof(*xbb->request_lists), M_XENBLOCKBACK, M_NOWAIT|M_ZERO); in xbb_alloc_request_lists()
3036 if (xbb->request_lists == NULL) { in xbb_alloc_request_lists()
3037 xenbus_dev_fatal(xbb->dev, ENOMEM, in xbb_alloc_request_lists()
3042 STAILQ_INIT(&xbb->reqlist_free_stailq); in xbb_alloc_request_lists()
3043 STAILQ_INIT(&xbb->reqlist_pending_stailq); in xbb_alloc_request_lists()
3044 for (i = 0; i < xbb->max_requests; i++) { in xbb_alloc_request_lists()
3047 reqlist = &xbb->request_lists[i]; in xbb_alloc_request_lists()
3049 reqlist->xbb = xbb; in xbb_alloc_request_lists()
3051 reqlist->gnt_handles = malloc(xbb->max_reqlist_segments * in xbb_alloc_request_lists()
3055 xenbus_dev_fatal(xbb->dev, ENOMEM, in xbb_alloc_request_lists()
3061 for (seg = 0; seg < xbb->max_reqlist_segments; seg++) in xbb_alloc_request_lists()
3064 STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links); in xbb_alloc_request_lists()
3076 xbb_publish_backend_info(struct xbb_softc *xbb) in xbb_publish_backend_info() argument
3083 our_path = xenbus_get_node(xbb->dev); in xbb_publish_backend_info()
3087 xenbus_dev_fatal(xbb->dev, error, in xbb_publish_backend_info()
3099 (uintmax_t)(xbb->media_size >> XBD_SECTOR_SHFT)); in xbb_publish_backend_info()
3106 xbb->flags & XBBF_READ_ONLY in xbb_publish_backend_info()
3113 xbb->sector_size); in xbb_publish_backend_info()
3121 xenbus_dev_fatal(xbb->dev, error, "ending transaction"); in xbb_publish_backend_info()
3126 xenbus_dev_fatal(xbb->dev, error, "writing %s/%s", in xbb_publish_backend_info()
3139 xbb_connect(struct xbb_softc *xbb) in xbb_connect() argument
3143 if (!xbb->hotplug_done || in xbb_connect()
3144 (xenbus_get_state(xbb->dev) != XenbusStateInitWait) || in xbb_connect()
3145 (xbb_collect_frontend_info(xbb) != 0)) in xbb_connect()
3148 xbb->flags &= ~XBBF_SHUTDOWN; in xbb_connect()
3155 xbb->max_reqlist_segments = MIN(xbb->max_request_segments * in xbb_connect()
3156 xbb->max_requests, XBB_MAX_SEGMENTS_PER_REQLIST); in xbb_connect()
3162 xbb->max_reqlist_size = xbb->max_reqlist_segments * PAGE_SIZE; in xbb_connect()
3165 error = xbb_alloc_communication_mem(xbb); in xbb_connect()
3167 xenbus_dev_fatal(xbb->dev, error, in xbb_connect()
3172 error = xbb_publish_backend_info(xbb); in xbb_connect()
3174 xenbus_dev_fatal(xbb->dev, error, in xbb_connect()
3179 error = xbb_alloc_requests(xbb); in xbb_connect()
3185 error = xbb_alloc_request_lists(xbb); in xbb_connect()
3194 error = xbb_connect_ring(xbb); in xbb_connect()
3201 xenbus_set_state(xbb->dev, XenbusStateConnected); in xbb_connect()
3216 xbb_shutdown(struct xbb_softc *xbb) in xbb_shutdown() argument
3230 if ((xbb->flags & XBBF_IN_SHUTDOWN) != 0) in xbb_shutdown()
3233 xbb->flags |= XBBF_IN_SHUTDOWN; in xbb_shutdown()
3234 mtx_unlock(&xbb->lock); in xbb_shutdown()
3236 if (xbb->hotplug_watch.node != NULL) { in xbb_shutdown()
3237 xs_unregister_watch(&xbb->hotplug_watch); in xbb_shutdown()
3238 free(xbb->hotplug_watch.node, M_XENBLOCKBACK); in xbb_shutdown()
3239 xbb->hotplug_watch.node = NULL; in xbb_shutdown()
3242 if (xenbus_get_state(xbb->dev) < XenbusStateClosing) in xbb_shutdown()
3243 xenbus_set_state(xbb->dev, XenbusStateClosing); in xbb_shutdown()
3245 frontState = xenbus_get_otherend_state(xbb->dev); in xbb_shutdown()
3246 mtx_lock(&xbb->lock); in xbb_shutdown()
3247 xbb->flags &= ~XBBF_IN_SHUTDOWN; in xbb_shutdown()
3256 xbb->flags |= XBBF_SHUTDOWN; in xbb_shutdown()
3259 error = xbb_disconnect(xbb); in xbb_shutdown()
3275 wakeup(xbb); in xbb_shutdown()
3289 xbb_attach_failed(struct xbb_softc *xbb, int err, const char *fmt, ...) in xbb_attach_failed() argument
3296 xs_vprintf(XST_NIL, xenbus_get_node(xbb->dev), in xbb_attach_failed()
3299 xs_printf(XST_NIL, xenbus_get_node(xbb->dev), in xbb_attach_failed()
3302 xenbus_dev_vfatal(xbb->dev, err, fmt, ap); in xbb_attach_failed()
3305 xs_printf(XST_NIL, xenbus_get_node(xbb->dev), in xbb_attach_failed()
3307 mtx_lock(&xbb->lock); in xbb_attach_failed()
3308 xbb_shutdown(xbb); in xbb_attach_failed()
3309 mtx_unlock(&xbb->lock); in xbb_attach_failed()
3351 xbb_setup_sysctl(struct xbb_softc *xbb) in xbb_setup_sysctl() argument
3356 sysctl_ctx = device_get_sysctl_ctx(xbb->dev); in xbb_setup_sysctl()
3360 sysctl_tree = device_get_sysctl_tree(xbb->dev); in xbb_setup_sysctl()
3365 "disable_flush", CTLFLAG_RW, &xbb->disable_flush, 0, in xbb_setup_sysctl()
3369 "flush_interval", CTLFLAG_RW, &xbb->flush_interval, 0, in xbb_setup_sysctl()
3373 "no_coalesce_reqs", CTLFLAG_RW, &xbb->no_coalesce_reqs,0, in xbb_setup_sysctl()
3377 "reqs_received", CTLFLAG_RW, &xbb->reqs_received, in xbb_setup_sysctl()
3381 "reqs_completed", CTLFLAG_RW, &xbb->reqs_completed, in xbb_setup_sysctl()
3386 &xbb->reqs_queued_for_completion, in xbb_setup_sysctl()
3391 &xbb->reqs_completed_with_error, in xbb_setup_sysctl()
3395 "forced_dispatch", CTLFLAG_RW, &xbb->forced_dispatch, in xbb_setup_sysctl()
3399 "normal_dispatch", CTLFLAG_RW, &xbb->normal_dispatch, in xbb_setup_sysctl()
3403 "total_dispatch", CTLFLAG_RW, &xbb->total_dispatch, in xbb_setup_sysctl()
3407 "kva_shortages", CTLFLAG_RW, &xbb->kva_shortages, in xbb_setup_sysctl()
3412 &xbb->request_shortages, in xbb_setup_sysctl()
3416 "max_requests", CTLFLAG_RD, &xbb->max_requests, 0, in xbb_setup_sysctl()
3421 &xbb->max_request_segments, 0, in xbb_setup_sysctl()
3426 &xbb->max_request_size, 0, in xbb_setup_sysctl()
3431 &xbb->ring_config.ring_pages, 0, in xbb_setup_sysctl()
3438 struct xbb_softc *xbb; in xbb_attach_disk() local
3441 xbb = device_get_softc(dev); in xbb_attach_disk()
3443 KASSERT(xbb->hotplug_done, ("Missing hotplug execution")); in xbb_attach_disk()
3446 if (strchr(xbb->dev_mode, 'w') == NULL) in xbb_attach_disk()
3447 xbb->flags |= XBBF_READ_ONLY; in xbb_attach_disk()
3453 error = xbb_open_backend(xbb); in xbb_attach_disk()
3455 xbb_attach_failed(xbb, error, "Unable to open %s", in xbb_attach_disk()
3456 xbb->dev_name); in xbb_attach_disk()
3461 xbb->xbb_stats = devstat_new_entry("xbb", device_get_unit(xbb->dev), in xbb_attach_disk()
3462 xbb->sector_size, in xbb_attach_disk()
3468 xbb->xbb_stats_in = devstat_new_entry("xbbi", device_get_unit(xbb->dev), in xbb_attach_disk()
3469 xbb->sector_size, in xbb_attach_disk()
3477 xbb_setup_sysctl(xbb); in xbb_attach_disk()
3483 xbb->io_taskqueue = taskqueue_create_fast(device_get_nameunit(dev), in xbb_attach_disk()
3486 /*contxt*/&xbb->io_taskqueue); in xbb_attach_disk()
3487 if (xbb->io_taskqueue == NULL) { in xbb_attach_disk()
3488 xbb_attach_failed(xbb, error, "Unable to create taskqueue"); in xbb_attach_disk()
3492 taskqueue_start_threads(&xbb->io_taskqueue, in xbb_attach_disk()
3499 error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev), in xbb_attach_disk()
3502 xbb_attach_failed(xbb, error, "writing %s/hotplug-status", in xbb_attach_disk()
3503 xenbus_get_node(xbb->dev)); in xbb_attach_disk()
3508 if (xenbus_get_otherend_state(xbb->dev) == XenbusStateInitialised) in xbb_attach_disk()
3509 xbb_connect(xbb); in xbb_attach_disk()
3516 struct xbb_softc *xbb; in xbb_attach_cb() local
3520 xbb = device_get_softc(dev); in xbb_attach_cb()
3523 NULL, &xbb->dev_name, NULL); in xbb_attach_cb()
3530 xbb->hotplug_done = true; in xbb_attach_cb()
3534 NULL, &xbb->dev_type, NULL); in xbb_attach_cb()
3536 xbb->dev_type = NULL; in xbb_attach_cb()
3539 &xbb->dev_mode, NULL); in xbb_attach_cb()
3541 xbb_attach_failed(xbb, error, "reading backend fields at %s", in xbb_attach_cb()
3559 struct xbb_softc *xbb; in xbb_attach() local
3571 xbb = device_get_softc(dev); in xbb_attach()
3572 xbb->dev = dev; in xbb_attach()
3573 xbb->otherend_id = xenbus_get_otherend_id(dev); in xbb_attach()
3574 TASK_INIT(&xbb->io_task, /*priority*/0, xbb_run_queue, xbb); in xbb_attach()
3575 mtx_init(&xbb->lock, device_get_nameunit(dev), NULL, MTX_DEF); in xbb_attach()
3581 error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev), in xbb_attach()
3584 xbb_attach_failed(xbb, error, "writing %s/feature-barrier", in xbb_attach()
3585 xenbus_get_node(xbb->dev)); in xbb_attach()
3589 error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev), in xbb_attach()
3592 xbb_attach_failed(xbb, error, "writing %s/feature-flush-cache", in xbb_attach()
3593 xenbus_get_node(xbb->dev)); in xbb_attach()
3598 error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev), in xbb_attach()
3601 xbb_attach_failed(xbb, error, "writing %s/max-ring-page-order", in xbb_attach()
3602 xenbus_get_node(xbb->dev)); in xbb_attach()
3609 if (xbb->hotplug_done) { in xbb_attach()
3618 watch_path = xs_join(xenbus_get_node(xbb->dev), "physical-device-path"); in xbb_attach()
3619 xbb->hotplug_watch.callback_data = (uintptr_t)dev; in xbb_attach()
3620 xbb->hotplug_watch.callback = xbb_attach_cb; in xbb_attach()
3621 KASSERT(xbb->hotplug_watch.node == NULL, ("watch node already setup")); in xbb_attach()
3622 xbb->hotplug_watch.node = strdup(sbuf_data(watch_path), M_XENBLOCKBACK); in xbb_attach()
3628 xbb->hotplug_watch.max_pending = 1; in xbb_attach()
3630 error = xs_register_watch(&xbb->hotplug_watch); in xbb_attach()
3632 xbb_attach_failed(xbb, error, "failed to create watch on %s", in xbb_attach()
3633 xbb->hotplug_watch.node); in xbb_attach()
3634 free(xbb->hotplug_watch.node, M_XENBLOCKBACK); in xbb_attach()
3657 struct xbb_softc *xbb; in xbb_detach() local
3661 xbb = device_get_softc(dev); in xbb_detach()
3662 mtx_lock(&xbb->lock); in xbb_detach()
3663 while (xbb_shutdown(xbb) == EAGAIN) { in xbb_detach()
3664 msleep(xbb, &xbb->lock, /*wakeup prio unchanged*/0, in xbb_detach()
3667 mtx_unlock(&xbb->lock); in xbb_detach()
3671 if (xbb->io_taskqueue != NULL) in xbb_detach()
3672 taskqueue_free(xbb->io_taskqueue); in xbb_detach()
3674 if (xbb->xbb_stats != NULL) in xbb_detach()
3675 devstat_remove_entry(xbb->xbb_stats); in xbb_detach()
3677 if (xbb->xbb_stats_in != NULL) in xbb_detach()
3678 devstat_remove_entry(xbb->xbb_stats_in); in xbb_detach()
3680 xbb_close_backend(xbb); in xbb_detach()
3682 if (xbb->dev_mode != NULL) { in xbb_detach()
3683 free(xbb->dev_mode, M_XENSTORE); in xbb_detach()
3684 xbb->dev_mode = NULL; in xbb_detach()
3687 if (xbb->dev_type != NULL) { in xbb_detach()
3688 free(xbb->dev_type, M_XENSTORE); in xbb_detach()
3689 xbb->dev_type = NULL; in xbb_detach()
3692 if (xbb->dev_name != NULL) { in xbb_detach()
3693 free(xbb->dev_name, M_XENSTORE); in xbb_detach()
3694 xbb->dev_name = NULL; in xbb_detach()
3697 mtx_destroy(&xbb->lock); in xbb_detach()
3748 struct xbb_softc *xbb = device_get_softc(dev); in xbb_frontend_changed() local
3752 xenbus_strstate(xenbus_get_state(xbb->dev))); in xbb_frontend_changed()
3759 xbb_connect(xbb); in xbb_frontend_changed()
3763 mtx_lock(&xbb->lock); in xbb_frontend_changed()
3764 xbb_shutdown(xbb); in xbb_frontend_changed()
3765 mtx_unlock(&xbb->lock); in xbb_frontend_changed()
3767 xenbus_set_state(xbb->dev, XenbusStateClosed); in xbb_frontend_changed()
3770 xenbus_dev_fatal(xbb->dev, EINVAL, "saw state %d at frontend", in xbb_frontend_changed()