Lines Matching +full:grant +full:- +full:dma

4  * Copyright (c) 2010-2013 Spectra Logic Corporation
9 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
49 #include <xen/xen-os.h>
65 /*--------------------------- Forward Declarations ---------------------------*/
69 /*---------------------------------- Macros ----------------------------------*/
78 /*---------------------------- Global Static Data ----------------------------*/
87 /*---------------------------- Command Processing ----------------------------*/
91 if (xbd_flag != XBDF_NONE && (sc->xbd_flags & xbd_flag) != 0) in xbd_freeze()
94 sc->xbd_flags |= xbd_flag; in xbd_freeze()
95 sc->xbd_qfrozen_cnt++; in xbd_freeze()
101 if (xbd_flag != XBDF_NONE && (sc->xbd_flags & xbd_flag) == 0) in xbd_thaw()
104 if (sc->xbd_qfrozen_cnt == 0) in xbd_thaw()
108 sc->xbd_flags &= ~xbd_flag; in xbd_thaw()
109 sc->xbd_qfrozen_cnt--; in xbd_thaw()
115 if ((cm->cm_flags & XBDCF_FROZEN) != 0) in xbd_cm_freeze()
118 cm->cm_flags |= XBDCF_FROZEN|cm_flag; in xbd_cm_freeze()
125 if ((cm->cm_flags & XBDCF_FROZEN) == 0) in xbd_cm_thaw()
128 cm->cm_flags &= ~XBDCF_FROZEN; in xbd_cm_thaw()
137 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->xbd_ring, notify); in xbd_flush_requests()
140 xen_intr_signal(sc->xen_intr_handle); in xbd_flush_requests()
147 KASSERT((cm->cm_flags & XBDCF_Q_MASK) == XBD_Q_NONE, in xbd_free_command()
149 cm->cm_flags & XBDCF_Q_MASK)); in xbd_free_command()
151 cm->cm_flags = XBDCF_INITIALIZER; in xbd_free_command()
152 cm->cm_bp = NULL; in xbd_free_command()
153 cm->cm_complete = NULL; in xbd_free_command()
155 xbd_thaw(cm->cm_sc, XBDF_CM_SHORTAGE); in xbd_free_command()
170 KASSERT((segs->ds_addr & (sector_size - 1)) == 0, in xbd_mksegarray()
172 KASSERT((segs->ds_len & (sector_size - 1)) == 0, in xbd_mksegarray()
175 buffer_ma = segs->ds_addr; in xbd_mksegarray()
177 lsect = fsect + (segs->ds_len >> XBD_SECTOR_SHFT) - 1; in xbd_mksegarray()
182 /* install a grant reference. */ in xbd_mksegarray()
217 sc = cm->cm_sc; in xbd_queue_cb()
220 cm->cm_bp->bio_error = EIO; in xbd_queue_cb()
221 biodone(cm->cm_bp); in xbd_queue_cb()
226 KASSERT(nsegs <= sc->xbd_max_request_segments, in xbd_queue_cb()
234 RING_GET_REQUEST(&sc->xbd_ring, sc->xbd_ring.req_prod_pvt); in xbd_queue_cb()
235 sc->xbd_ring.req_prod_pvt++; in xbd_queue_cb()
236 ring_req->id = cm->cm_id; in xbd_queue_cb()
237 ring_req->operation = cm->cm_operation; in xbd_queue_cb()
238 ring_req->sector_number = cm->cm_sector_number; in xbd_queue_cb()
239 ring_req->handle = (blkif_vdev_t)(uintptr_t)sc->xbd_disk; in xbd_queue_cb()
240 ring_req->nr_segments = nsegs; in xbd_queue_cb()
241 cm->cm_nseg = nsegs; in xbd_queue_cb()
242 xbd_mksegarray(segs, nsegs, &cm->cm_gref_head, in xbd_queue_cb()
243 xenbus_get_otherend_id(sc->xbd_dev), in xbd_queue_cb()
244 cm->cm_operation == BLKIF_OP_WRITE, in xbd_queue_cb()
245 cm->cm_sg_refs, ring_req->seg, in xbd_queue_cb()
246 sc->xbd_disk->d_sectorsize); in xbd_queue_cb()
252 RING_GET_REQUEST(&sc->xbd_ring, sc->xbd_ring.req_prod_pvt); in xbd_queue_cb()
253 sc->xbd_ring.req_prod_pvt++; in xbd_queue_cb()
254 ring_req->id = cm->cm_id; in xbd_queue_cb()
255 ring_req->operation = BLKIF_OP_INDIRECT; in xbd_queue_cb()
256 ring_req->indirect_op = cm->cm_operation; in xbd_queue_cb()
257 ring_req->sector_number = cm->cm_sector_number; in xbd_queue_cb()
258 ring_req->handle = (blkif_vdev_t)(uintptr_t)sc->xbd_disk; in xbd_queue_cb()
259 ring_req->nr_segments = nsegs; in xbd_queue_cb()
260 cm->cm_nseg = nsegs; in xbd_queue_cb()
261 xbd_mksegarray(segs, nsegs, &cm->cm_gref_head, in xbd_queue_cb()
262 xenbus_get_otherend_id(sc->xbd_dev), in xbd_queue_cb()
263 cm->cm_operation == BLKIF_OP_WRITE, in xbd_queue_cb()
264 cm->cm_sg_refs, cm->cm_indirectionpages, in xbd_queue_cb()
265 sc->xbd_disk->d_sectorsize); in xbd_queue_cb()
266 memcpy(ring_req->indirect_grefs, &cm->cm_indirectionrefs, in xbd_queue_cb()
267 sizeof(grant_ref_t) * sc->xbd_max_request_indirectpages); in xbd_queue_cb()
270 if (cm->cm_operation == BLKIF_OP_READ) in xbd_queue_cb()
272 else if (cm->cm_operation == BLKIF_OP_WRITE) in xbd_queue_cb()
276 bus_dmamap_sync(sc->xbd_io_dmat, cm->cm_map, op); in xbd_queue_cb()
278 gnttab_free_grant_references(cm->cm_gref_head); in xbd_queue_cb()
283 * If bus dma had to asynchronously call us back to dispatch in xbd_queue_cb()
289 if ((cm->cm_flags & XBDCF_ASYNC_MAPPING) != 0) in xbd_queue_cb()
300 if (cm->cm_bp != NULL) in xbd_queue_request()
301 error = bus_dmamap_load_bio(sc->xbd_io_dmat, cm->cm_map, in xbd_queue_request()
302 cm->cm_bp, xbd_queue_cb, cm, 0); in xbd_queue_request()
304 error = bus_dmamap_load(sc->xbd_io_dmat, cm->cm_map, in xbd_queue_request()
305 cm->cm_data, cm->cm_datalen, xbd_queue_cb, cm, 0); in xbd_queue_request()
310 * we just attempted to map, so we can't rely on bus dma in xbd_queue_request()
325 mtx_lock(&sc->xbd_io_lock); in xbd_restart_queue_callback()
331 mtx_unlock(&sc->xbd_io_lock); in xbd_restart_queue_callback()
340 if (__predict_false(sc->xbd_state != XBD_STATE_CONNECTED)) in xbd_bio_command()
353 if (gnttab_alloc_grant_references(sc->xbd_max_request_segments, in xbd_bio_command()
354 &cm->cm_gref_head) != 0) { in xbd_bio_command()
355 gnttab_request_free_callback(&sc->xbd_callback, in xbd_bio_command()
357 sc->xbd_max_request_segments); in xbd_bio_command()
364 cm->cm_bp = bp; in xbd_bio_command()
365 cm->cm_sector_number = in xbd_bio_command()
366 ((blkif_sector_t)bp->bio_pblkno * sc->xbd_disk->d_sectorsize) >> in xbd_bio_command()
369 switch (bp->bio_cmd) { in xbd_bio_command()
371 cm->cm_operation = BLKIF_OP_READ; in xbd_bio_command()
374 cm->cm_operation = BLKIF_OP_WRITE; in xbd_bio_command()
375 if ((bp->bio_flags & BIO_ORDERED) != 0) { in xbd_bio_command()
376 if ((sc->xbd_flags & XBDF_BARRIER) != 0) { in xbd_bio_command()
377 cm->cm_operation = BLKIF_OP_WRITE_BARRIER; in xbd_bio_command()
382 cm->cm_flags |= XBDCF_Q_FREEZE; in xbd_bio_command()
385 * Wait for in-flight requests to in xbd_bio_command()
396 if ((sc->xbd_flags & XBDF_FLUSH) != 0) in xbd_bio_command()
397 cm->cm_operation = BLKIF_OP_FLUSH_DISKCACHE; in xbd_bio_command()
398 else if ((sc->xbd_flags & XBDF_BARRIER) != 0) in xbd_bio_command()
399 cm->cm_operation = BLKIF_OP_WRITE_BARRIER; in xbd_bio_command()
425 mtx_assert(&sc->xbd_io_lock, MA_OWNED); in xbd_startio()
427 if (sc->xbd_state != XBD_STATE_CONNECTED) in xbd_startio()
430 while (!RING_FULL(&sc->xbd_ring)) { in xbd_startio()
431 if (sc->xbd_qfrozen_cnt != 0) in xbd_startio()
442 if ((cm->cm_flags & XBDCF_Q_FREEZE) != 0) { in xbd_startio()
466 bp = cm->cm_bp; in xbd_bio_complete()
468 if (__predict_false(cm->cm_status != BLKIF_RSP_OKAY)) { in xbd_bio_complete()
469 disk_err(bp, "disk error" , -1, 0); in xbd_bio_complete()
470 printf(" status: %x\n", cm->cm_status); in xbd_bio_complete()
471 bp->bio_flags |= BIO_ERROR; in xbd_bio_complete()
474 if (bp->bio_flags & BIO_ERROR) in xbd_bio_complete()
475 bp->bio_error = EIO; in xbd_bio_complete()
477 bp->bio_resid = 0; in xbd_bio_complete()
492 mtx_lock(&sc->xbd_io_lock); in xbd_int()
494 if (__predict_false(sc->xbd_state == XBD_STATE_DISCONNECTED)) { in xbd_int()
495 mtx_unlock(&sc->xbd_io_lock); in xbd_int()
500 rp = sc->xbd_ring.sring->rsp_prod; in xbd_int()
503 for (i = sc->xbd_ring.rsp_cons; i != rp;) { in xbd_int()
504 bret = RING_GET_RESPONSE(&sc->xbd_ring, i); in xbd_int()
505 cm = &sc->xbd_shadow[bret->id]; in xbd_int()
508 gnttab_end_foreign_access_references(cm->cm_nseg, in xbd_int()
509 cm->cm_sg_refs); in xbd_int()
512 if (cm->cm_operation == BLKIF_OP_READ) in xbd_int()
514 else if (cm->cm_operation == BLKIF_OP_WRITE || in xbd_int()
515 cm->cm_operation == BLKIF_OP_WRITE_BARRIER) in xbd_int()
519 bus_dmamap_sync(sc->xbd_io_dmat, cm->cm_map, op); in xbd_int()
520 bus_dmamap_unload(sc->xbd_io_dmat, cm->cm_map); in xbd_int()
532 cm->cm_status = bret->status; in xbd_int()
533 if (cm->cm_bp) in xbd_int()
535 else if (cm->cm_complete != NULL) in xbd_int()
536 cm->cm_complete(cm); in xbd_int()
541 sc->xbd_ring.rsp_cons = i; in xbd_int()
543 if (i != sc->xbd_ring.req_prod_pvt) { in xbd_int()
545 RING_FINAL_CHECK_FOR_RESPONSES(&sc->xbd_ring, more_to_do); in xbd_int()
549 sc->xbd_ring.sring->rsp_event = i + 1; in xbd_int()
557 if (__predict_false(sc->xbd_state == XBD_STATE_SUSPENDED)) in xbd_int()
558 wakeup(&sc->xbd_cm_q[XBD_Q_BUSY]); in xbd_int()
560 mtx_unlock(&sc->xbd_io_lock); in xbd_int()
563 /*------------------------------- Dump Support -------------------------------*/
574 RING_FINAL_CHECK_FOR_RESPONSES(&sc->xbd_ring, mtd); in xbd_quiesce()
601 struct xbd_softc *sc = dp->d_drv1; in xbd_dump()
615 mtx_lock(&sc->xbd_io_lock); in xbd_dump()
621 mtx_unlock(&sc->xbd_io_lock); in xbd_dump()
622 device_printf(sc->xbd_dev, "dump: no more commands?\n"); in xbd_dump()
626 if (gnttab_alloc_grant_references(sc->xbd_max_request_segments, in xbd_dump()
627 &cm->cm_gref_head) != 0) { in xbd_dump()
629 mtx_unlock(&sc->xbd_io_lock); in xbd_dump()
630 device_printf(sc->xbd_dev, "no more grant allocs?\n"); in xbd_dump()
634 chunk = length > sc->xbd_max_request_size ? in xbd_dump()
635 sc->xbd_max_request_size : length; in xbd_dump()
636 cm->cm_data = virtual; in xbd_dump()
637 cm->cm_datalen = chunk; in xbd_dump()
638 cm->cm_operation = BLKIF_OP_WRITE; in xbd_dump()
639 cm->cm_sector_number = offset >> XBD_SECTOR_SHFT; in xbd_dump()
640 cm->cm_complete = xbd_dump_complete; in xbd_dump()
644 length -= chunk; in xbd_dump()
651 mtx_unlock(&sc->xbd_io_lock); in xbd_dump()
658 if (cm->cm_status != BLKIF_RSP_OKAY) { in xbd_dump()
659 device_printf(sc->xbd_dev, in xbd_dump()
661 cm->cm_sector_number); in xbd_dump()
670 /*----------------------------- Disk Entrypoints -----------------------------*/
674 struct xbd_softc *sc = dp->d_drv1; in xbd_open()
677 printf("xbd%d: not found", dp->d_unit); in xbd_open()
681 sc->xbd_flags |= XBDF_OPEN; in xbd_open()
682 sc->xbd_users++; in xbd_open()
689 struct xbd_softc *sc = dp->d_drv1; in xbd_close()
693 sc->xbd_flags &= ~XBDF_OPEN; in xbd_close()
694 if (--(sc->xbd_users) == 0) { in xbd_close()
700 if (xenbus_get_otherend_state(sc->xbd_dev) == in xbd_close()
702 xbd_closing(sc->xbd_dev); in xbd_close()
710 struct xbd_softc *sc = dp->d_drv1; in xbd_ioctl()
725 struct xbd_softc *sc = bp->bio_disk->d_drv1; in xbd_strategy()
729 bp->bio_error = EINVAL; in xbd_strategy()
730 bp->bio_flags |= BIO_ERROR; in xbd_strategy()
731 bp->bio_resid = bp->bio_bcount; in xbd_strategy()
739 mtx_lock(&sc->xbd_io_lock); in xbd_strategy()
744 mtx_unlock(&sc->xbd_io_lock); in xbd_strategy()
748 /*------------------------------ Ring Management -----------------------------*/
757 sring = malloc(sc->xbd_ring_pages * PAGE_SIZE, M_XENBLOCKFRONT, in xbd_alloc_ring()
760 xenbus_dev_fatal(sc->xbd_dev, ENOMEM, "allocating shared ring"); in xbd_alloc_ring()
764 FRONT_RING_INIT(&sc->xbd_ring, sring, sc->xbd_ring_pages * PAGE_SIZE); in xbd_alloc_ring()
767 i < sc->xbd_ring_pages; in xbd_alloc_ring()
769 error = xenbus_grant_ring(sc->xbd_dev, in xbd_alloc_ring()
771 &sc->xbd_ring_ref[i]); in xbd_alloc_ring()
773 xenbus_dev_fatal(sc->xbd_dev, error, in xbd_alloc_ring()
778 if (sc->xbd_ring_pages == 1) { in xbd_alloc_ring()
779 error = xs_printf(XST_NIL, xenbus_get_node(sc->xbd_dev), in xbd_alloc_ring()
780 "ring-ref", "%u", sc->xbd_ring_ref[0]); in xbd_alloc_ring()
782 xenbus_dev_fatal(sc->xbd_dev, error, in xbd_alloc_ring()
783 "writing %s/ring-ref", in xbd_alloc_ring()
784 xenbus_get_node(sc->xbd_dev)); in xbd_alloc_ring()
788 for (i = 0; i < sc->xbd_ring_pages; i++) { in xbd_alloc_ring()
792 "ring-ref%u", i); in xbd_alloc_ring()
793 error = xs_printf(XST_NIL, xenbus_get_node(sc->xbd_dev), in xbd_alloc_ring()
794 ring_ref_name, "%u", sc->xbd_ring_ref[i]); in xbd_alloc_ring()
796 xenbus_dev_fatal(sc->xbd_dev, error, in xbd_alloc_ring()
798 xenbus_get_node(sc->xbd_dev), in xbd_alloc_ring()
805 error = xen_intr_alloc_and_bind_local_port(sc->xbd_dev, in xbd_alloc_ring()
806 xenbus_get_otherend_id(sc->xbd_dev), NULL, xbd_int, sc, in xbd_alloc_ring()
807 INTR_TYPE_BIO | INTR_MPSAFE, &sc->xen_intr_handle); in xbd_alloc_ring()
809 xenbus_dev_fatal(sc->xbd_dev, error, in xbd_alloc_ring()
822 if (sc->xbd_ring.sring == NULL) in xbd_free_ring()
825 for (i = 0; i < sc->xbd_ring_pages; i++) { in xbd_free_ring()
826 if (sc->xbd_ring_ref[i] != GRANT_REF_INVALID) { in xbd_free_ring()
827 gnttab_end_foreign_access_ref(sc->xbd_ring_ref[i]); in xbd_free_ring()
828 sc->xbd_ring_ref[i] = GRANT_REF_INVALID; in xbd_free_ring()
831 free(sc->xbd_ring.sring, M_XENBLOCKFRONT); in xbd_free_ring()
832 sc->xbd_ring.sring = NULL; in xbd_free_ring()
835 /*-------------------------- Initialization/Teardown -------------------------*/
845 if ((sc->xbd_flags & XBDF_FLUSH) != 0) { in xbd_feature_string()
850 if ((sc->xbd_flags & XBDF_BARRIER) != 0) { in xbd_feature_string()
857 if ((sc->xbd_flags & XBDF_DISCARD) != 0) { in xbd_feature_string()
864 if ((sc->xbd_flags & XBDF_PERSISTENT) != 0) { in xbd_feature_string()
889 /* len is -1 on error, which will make the SYSCTL_OUT a no-op. */ in xbd_sysctl_features()
900 sysctl_ctx = device_get_sysctl_ctx(xbd->xbd_dev); in xbd_setup_sysctl()
904 sysctl_tree = device_get_sysctl_tree(xbd->xbd_dev); in xbd_setup_sysctl()
910 "max_requests", CTLFLAG_RD, &xbd->xbd_max_requests, -1, in xbd_setup_sysctl()
915 &xbd->xbd_max_request_segments, 0, in xbd_setup_sysctl()
919 "max_request_size", CTLFLAG_RD, &xbd->xbd_max_request_size, 0, in xbd_setup_sysctl()
923 "ring_pages", CTLFLAG_RD, &xbd->xbd_ring_pages, 0, in xbd_setup_sysctl()
982 *unit = (vdevice & ((1 << 28) - 1)) >> 8; in xbd_vdevice_to_unit()
1010 sc->xbd_unit = unit; in xbd_instance_create()
1013 device_printf(sc->xbd_dev, "attaching as %s%d\n", name, unit); in xbd_instance_create()
1016 device_printf(sc->xbd_dev, "features: %s\n", in xbd_instance_create()
1020 sc->xbd_disk = disk_alloc(); in xbd_instance_create()
1021 sc->xbd_disk->d_unit = sc->xbd_unit; in xbd_instance_create()
1022 sc->xbd_disk->d_open = xbd_open; in xbd_instance_create()
1023 sc->xbd_disk->d_close = xbd_close; in xbd_instance_create()
1024 sc->xbd_disk->d_ioctl = xbd_ioctl; in xbd_instance_create()
1025 sc->xbd_disk->d_strategy = xbd_strategy; in xbd_instance_create()
1026 sc->xbd_disk->d_dump = xbd_dump; in xbd_instance_create()
1027 sc->xbd_disk->d_name = name; in xbd_instance_create()
1028 sc->xbd_disk->d_drv1 = sc; in xbd_instance_create()
1029 sc->xbd_disk->d_sectorsize = sector_size; in xbd_instance_create()
1030 sc->xbd_disk->d_stripesize = phys_sector_size; in xbd_instance_create()
1031 sc->xbd_disk->d_stripeoffset = 0; in xbd_instance_create()
1035 * the 'sector-size' xenbus node value. in xbd_instance_create()
1037 sc->xbd_disk->d_mediasize = sectors << XBD_SECTOR_SHFT; in xbd_instance_create()
1038 if ((sc->xbd_disk->d_mediasize % sc->xbd_disk->d_sectorsize) != 0) { in xbd_instance_create()
1040 xenbus_dev_fatal(sc->xbd_dev, error, in xbd_instance_create()
1042 (uintmax_t)sc->xbd_disk->d_mediasize, in xbd_instance_create()
1043 (uintmax_t)sc->xbd_disk->d_sectorsize); in xbd_instance_create()
1046 sc->xbd_disk->d_maxsize = sc->xbd_max_request_size; in xbd_instance_create()
1047 sc->xbd_disk->d_flags = DISKFLAG_UNMAPPED_BIO; in xbd_instance_create()
1048 if ((sc->xbd_flags & (XBDF_FLUSH|XBDF_BARRIER)) != 0) { in xbd_instance_create()
1049 sc->xbd_disk->d_flags |= DISKFLAG_CANFLUSHCACHE; in xbd_instance_create()
1050 device_printf(sc->xbd_dev, in xbd_instance_create()
1053 disk_create(sc->xbd_disk, DISK_VERSION); in xbd_instance_create()
1064 mtx_lock(&sc->xbd_io_lock); in xbd_free()
1065 sc->xbd_state = XBD_STATE_DISCONNECTED; in xbd_free()
1066 mtx_unlock(&sc->xbd_io_lock); in xbd_free()
1070 if (sc->xbd_shadow) { in xbd_free()
1071 for (i = 0; i < sc->xbd_max_requests; i++) { in xbd_free()
1074 cm = &sc->xbd_shadow[i]; in xbd_free()
1075 if (cm->cm_sg_refs != NULL) { in xbd_free()
1076 free(cm->cm_sg_refs, M_XENBLOCKFRONT); in xbd_free()
1077 cm->cm_sg_refs = NULL; in xbd_free()
1080 if (cm->cm_indirectionpages != NULL) { in xbd_free()
1082 sc->xbd_max_request_indirectpages, in xbd_free()
1083 &cm->cm_indirectionrefs[0]); in xbd_free()
1084 free(cm->cm_indirectionpages, M_XENBLOCKFRONT); in xbd_free()
1085 cm->cm_indirectionpages = NULL; in xbd_free()
1088 bus_dmamap_destroy(sc->xbd_io_dmat, cm->cm_map); in xbd_free()
1090 free(sc->xbd_shadow, M_XENBLOCKFRONT); in xbd_free()
1091 sc->xbd_shadow = NULL; in xbd_free()
1093 bus_dma_tag_destroy(sc->xbd_io_dmat); in xbd_free()
1100 xen_intr_unbind(&sc->xen_intr_handle); in xbd_free()
1104 /*--------------------------- State Change Handlers --------------------------*/
1113 if (xenbus_get_state(sc->xbd_dev) != XenbusStateInitialising) { in xbd_initialize()
1123 sc->xbd_ring_pages = 1; in xbd_initialize()
1130 * we don't miss information in a sparsly populated back-end in xbd_initialize()
1136 otherend_path = xenbus_get_otherend_path(sc->xbd_dev); in xbd_initialize()
1137 node_path = xenbus_get_node(sc->xbd_dev); in xbd_initialize()
1141 "max-ring-page-order", NULL, "%" PRIu32, in xbd_initialize()
1143 sc->xbd_ring_pages = 1 << max_ring_page_order; in xbd_initialize()
1145 "max-ring-pages", NULL, "%" PRIu32, in xbd_initialize()
1146 &sc->xbd_ring_pages); in xbd_initialize()
1147 if (sc->xbd_ring_pages < 1) in xbd_initialize()
1148 sc->xbd_ring_pages = 1; in xbd_initialize()
1150 if (sc->xbd_ring_pages > XBD_MAX_RING_PAGES) { in xbd_initialize()
1151 device_printf(sc->xbd_dev, in xbd_initialize()
1152 "Back-end specified ring-pages of %u " in xbd_initialize()
1153 "limited to front-end limit of %u.\n", in xbd_initialize()
1154 sc->xbd_ring_pages, XBD_MAX_RING_PAGES); in xbd_initialize()
1155 sc->xbd_ring_pages = XBD_MAX_RING_PAGES; in xbd_initialize()
1158 if (powerof2(sc->xbd_ring_pages) == 0) { in xbd_initialize()
1161 new_page_limit = 0x01 << (fls(sc->xbd_ring_pages) - 1); in xbd_initialize()
1162 device_printf(sc->xbd_dev, in xbd_initialize()
1163 "Back-end specified ring-pages of %u " in xbd_initialize()
1165 sc->xbd_ring_pages, new_page_limit); in xbd_initialize()
1166 sc->xbd_ring_pages = new_page_limit; in xbd_initialize()
1169 sc->xbd_max_requests = in xbd_initialize()
1170 BLKIF_MAX_RING_REQUESTS(sc->xbd_ring_pages * PAGE_SIZE); in xbd_initialize()
1171 if (sc->xbd_max_requests > XBD_MAX_REQUESTS) { in xbd_initialize()
1172 device_printf(sc->xbd_dev, in xbd_initialize()
1173 "Back-end specified max_requests of %u " in xbd_initialize()
1174 "limited to front-end limit of %zu.\n", in xbd_initialize()
1175 sc->xbd_max_requests, XBD_MAX_REQUESTS); in xbd_initialize()
1176 sc->xbd_max_requests = XBD_MAX_REQUESTS; in xbd_initialize()
1183 if (sc->xbd_ring_pages > 1) { in xbd_initialize()
1185 "num-ring-pages","%u", in xbd_initialize()
1186 sc->xbd_ring_pages); in xbd_initialize()
1188 xenbus_dev_fatal(sc->xbd_dev, error, in xbd_initialize()
1189 "writing %s/num-ring-pages", in xbd_initialize()
1195 "ring-page-order", "%u", in xbd_initialize()
1196 fls(sc->xbd_ring_pages) - 1); in xbd_initialize()
1198 xenbus_dev_fatal(sc->xbd_dev, error, in xbd_initialize()
1199 "writing %s/ring-page-order", in xbd_initialize()
1205 error = xs_printf(XST_NIL, node_path, "event-channel", in xbd_initialize()
1206 "%u", xen_intr_port(sc->xen_intr_handle)); in xbd_initialize()
1208 xenbus_dev_fatal(sc->xbd_dev, error, in xbd_initialize()
1209 "writing %s/event-channel", in xbd_initialize()
1217 xenbus_dev_fatal(sc->xbd_dev, error, in xbd_initialize()
1223 xenbus_set_state(sc->xbd_dev, XenbusStateInitialised); in xbd_initialize()
1228 * the details about the physical device - #sectors, size, etc).
1233 device_t dev = sc->xbd_dev; in xbd_connect()
1242 if (sc->xbd_state == XBD_STATE_SUSPENDED) { in xbd_connect()
1246 if (sc->xbd_state == XBD_STATE_CONNECTED) { in xbd_connect()
1249 disk = sc->xbd_disk; in xbd_connect()
1261 disk->d_mediasize = disk->d_sectorsize * sectors; in xbd_connect()
1266 disk->d_name, disk->d_unit); in xbd_connect()
1269 device_printf(sc->xbd_dev, in xbd_connect()
1271 (intmax_t)disk->d_mediasize); in xbd_connect()
1278 "sector-size", "%lu", &sector_size, in xbd_connect()
1295 "physical-sector-size", "%lu", &phys_sector_size, in xbd_connect()
1300 "feature-barrier", "%d", &feature_barrier, in xbd_connect()
1303 sc->xbd_flags |= XBDF_BARRIER; in xbd_connect()
1306 "feature-flush-cache", "%d", &feature_flush, in xbd_connect()
1309 sc->xbd_flags |= XBDF_FLUSH; in xbd_connect()
1312 "feature-max-indirect-segments", "%" PRIu32, in xbd_connect()
1313 &sc->xbd_max_request_segments, NULL); in xbd_connect()
1315 sc->xbd_max_request_segments = 0; in xbd_connect()
1316 if (sc->xbd_max_request_segments > XBD_MAX_INDIRECT_SEGMENTS) in xbd_connect()
1317 sc->xbd_max_request_segments = XBD_MAX_INDIRECT_SEGMENTS; in xbd_connect()
1318 if (sc->xbd_max_request_segments > XBD_SIZE_TO_SEGS(maxphys)) in xbd_connect()
1319 sc->xbd_max_request_segments = XBD_SIZE_TO_SEGS(maxphys); in xbd_connect()
1320 sc->xbd_max_request_indirectpages = in xbd_connect()
1321 XBD_INDIRECT_SEGS_TO_PAGES(sc->xbd_max_request_segments); in xbd_connect()
1322 if (sc->xbd_max_request_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST) in xbd_connect()
1323 sc->xbd_max_request_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; in xbd_connect()
1324 sc->xbd_max_request_size = in xbd_connect()
1325 XBD_SEGS_TO_SIZE(sc->xbd_max_request_segments); in xbd_connect()
1329 bus_get_dma_tag(sc->xbd_dev), /* parent */ in xbd_connect()
1334 sc->xbd_max_request_size, in xbd_connect()
1335 sc->xbd_max_request_segments, in xbd_connect()
1339 &sc->xbd_io_lock, /* lockarg */ in xbd_connect()
1340 &sc->xbd_io_dmat); in xbd_connect()
1342 xenbus_dev_fatal(sc->xbd_dev, err, in xbd_connect()
1343 "Cannot allocate parent DMA tag\n"); in xbd_connect()
1347 /* Per-transaction data allocation. */ in xbd_connect()
1348 sc->xbd_shadow = malloc(sizeof(*sc->xbd_shadow) * sc->xbd_max_requests, in xbd_connect()
1350 if (sc->xbd_shadow == NULL) { in xbd_connect()
1351 bus_dma_tag_destroy(sc->xbd_io_dmat); in xbd_connect()
1352 xenbus_dev_fatal(sc->xbd_dev, ENOMEM, in xbd_connect()
1357 for (i = 0; i < sc->xbd_max_requests; i++) { in xbd_connect()
1361 cm = &sc->xbd_shadow[i]; in xbd_connect()
1362 cm->cm_sg_refs = malloc( in xbd_connect()
1363 sizeof(grant_ref_t) * sc->xbd_max_request_segments, in xbd_connect()
1365 if (cm->cm_sg_refs == NULL) in xbd_connect()
1367 cm->cm_id = i; in xbd_connect()
1368 cm->cm_flags = XBDCF_INITIALIZER; in xbd_connect()
1369 cm->cm_sc = sc; in xbd_connect()
1370 if (bus_dmamap_create(sc->xbd_io_dmat, 0, &cm->cm_map) != 0) in xbd_connect()
1372 if (sc->xbd_max_request_indirectpages > 0) { in xbd_connect()
1374 PAGE_SIZE * sc->xbd_max_request_indirectpages, in xbd_connect()
1378 sc->xbd_max_request_indirectpages = 0; in xbd_connect()
1382 for (j = 0; j < sc->xbd_max_request_indirectpages; j++) { in xbd_connect()
1384 xenbus_get_otherend_id(sc->xbd_dev), in xbd_connect()
1386 1 /* grant read-only access */, in xbd_connect()
1387 &cm->cm_indirectionrefs[j])) in xbd_connect()
1390 if (j < sc->xbd_max_request_indirectpages) { in xbd_connect()
1394 cm->cm_indirectionpages = indirectpages; in xbd_connect()
1398 if (sc->xbd_disk == NULL) { in xbd_connect()
1405 err = xbd_instance_create(sc, sectors, sc->xbd_vdevice, binfo, in xbd_connect()
1416 mtx_lock(&sc->xbd_io_lock); in xbd_connect()
1417 sc->xbd_state = XBD_STATE_CONNECTED; in xbd_connect()
1419 sc->xbd_flags |= XBDF_READY; in xbd_connect()
1420 mtx_unlock(&sc->xbd_io_lock); in xbd_connect()
1425 * device-layer structures now, to ensure that writes are flushed through to
1438 if (sc->xbd_disk != NULL) { in xbd_closing()
1439 disk_destroy(sc->xbd_disk); in xbd_closing()
1440 sc->xbd_disk = NULL; in xbd_closing()
1446 /*---------------------------- NewBus Entrypoints ----------------------------*/
1475 "device-type", NULL, (void **) &type); in xbd_probe()
1508 "virtual-device", NULL, "%" PRIu32, &vdevice); in xbd_attach()
1511 "virtual-device-ext", NULL, "%" PRIu32, &vdevice); in xbd_attach()
1513 xenbus_dev_fatal(dev, error, "reading virtual-device"); in xbd_attach()
1523 mtx_init(&sc->xbd_io_lock, "blkfront i/o lock", NULL, MTX_DEF); in xbd_attach()
1526 sc->xbd_ring_ref[i] = GRANT_REF_INVALID; in xbd_attach()
1528 sc->xbd_dev = dev; in xbd_attach()
1529 sc->xbd_vdevice = vdevice; in xbd_attach()
1530 sc->xbd_state = XBD_STATE_DISCONNECTED; in xbd_attach()
1548 mtx_destroy(&sc->xbd_io_lock); in xbd_detach()
1561 mtx_lock(&sc->xbd_io_lock); in xbd_suspend()
1562 saved_state = sc->xbd_state; in xbd_suspend()
1563 sc->xbd_state = XBD_STATE_SUSPENDED; in xbd_suspend()
1568 if (msleep(&sc->xbd_cm_q[XBD_Q_BUSY], &sc->xbd_io_lock, in xbd_suspend()
1574 mtx_unlock(&sc->xbd_io_lock); in xbd_suspend()
1577 sc->xbd_state = saved_state; in xbd_suspend()
1588 sc->xbd_state = XBD_STATE_CONNECTED; in xbd_resume()
1628 if (sc->xbd_users > 0) { in xbd_backend_changed()
1630 KASSERT(sc->xbd_disk != NULL, in xbd_backend_changed()
1632 disk_gone(sc->xbd_disk); in xbd_backend_changed()
1640 /*---------------------------- NewBus Registration ---------------------------*/