Lines Matching +full:num +full:- +full:rings
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2009-2012 Spectra Logic Corporation
76 #include <xen/xen-os.h>
86 /*--------------------------- Compile-time Tunables --------------------------*/
89 * negotiated block-front/back communication channel. Allow enough
96 * additional segment blocks) we will allow in a negotiated block-front/back
107 /*---------------------------------- Macros ----------------------------------*/
122 * block-front/back communication channel.
130 * segment blocks) per request we will allow in a negotiated block-front/back
150 /*--------------------------- Forward Declarations ---------------------------*/
158 /*------------------------------ Data Structures -----------------------------*/
234 * Base, pseudo-physical address, corresponding to the start
266 * request list free pool (xbb->reqlist_free_stailq) and pending
267 * requests waiting for execution (xbb->reqlist_pending_stailq).
275 * \brief Object tracking an in-flight I/O from a Xen VBD consumer.
305 * Storage used for non-native ring requests.
333 * used to communicate with the front-end client of this
340 /** The pseudo-physical address where ring memory is mapped.*/
344 * Grant table handles, one per-ring page, returned by the
361 * The grant references, one per-ring page, supplied by the
362 * front-end, allowing us to reference the ring pages in the
363 * front-end's domain and to map these pages into our own domain.
372 * Per-instance connection state flags.
377 * The front-end requested a read-only mount of the
378 * back-end device/file.
382 /** Communication with the front-end has been established. */
386 * Front-end requests exist in the ring and are waiting for
414 * \brief Structure used to memoize information about a per-request
415 * scatter-gather list.
470 * Only a single file based request is outstanding per-xbb instance,
492 * Per-instance configuration data.
496 * Task-queue used to process I/O requests.
539 /** Pseudo-physical address corresponding to kva. */
555 * \brief Cached value of the front-end's domain id.
568 * 32bit x86 domains on the same machine). The back-end
569 * always accommodates the front-end's native abi. That
599 * The maximum size of any request to this back-end
618 /** Runtime, cross-abi safe, structures for ring access. */
619 blkif_back_rings_t rings; member
625 * \brief Backend access mode flags (e.g. write, or read-only).
627 * This value is passed to us by the front-end via the XenStore.
634 * This value is passed to us by the front-end via the XenStore.
642 * This value is passed to us by the front-end via the XenStore.
669 * (e.g. xbb->media_size >> xbb->sector_size_shift).
689 /** Mutex protecting per-instance data. */
694 * associated with our per-instance kva region.
760 /*---------------------------- Request Processing ----------------------------*/
764 * \param xbb Per-instance xbb configuration structure.
776 mtx_assert(&xbb->lock, MA_OWNED); in xbb_get_req()
778 if ((req = STAILQ_FIRST(&xbb->request_free_stailq)) != NULL) { in xbb_get_req()
779 STAILQ_REMOVE_HEAD(&xbb->request_free_stailq, links); in xbb_get_req()
780 xbb->active_request_count++; in xbb_get_req()
789 * \param xbb Per-instance xbb configuration structure.
795 mtx_assert(&xbb->lock, MA_OWNED); in xbb_release_req()
797 STAILQ_INSERT_HEAD(&xbb->request_free_stailq, req, links); in xbb_release_req()
798 xbb->active_request_count--; in xbb_release_req()
800 KASSERT(xbb->active_request_count >= 0, in xbb_release_req()
807 * \param xbb Per-instance xbb configuration structure.
815 mtx_assert(&xbb->lock, MA_OWNED); in xbb_release_reqs()
817 STAILQ_CONCAT(&xbb->request_free_stailq, req_list); in xbb_release_reqs()
818 xbb->active_request_count -= nreqs; in xbb_release_reqs()
820 KASSERT(xbb->active_request_count >= 0, in xbb_release_reqs()
838 return (reqlist->kva + (PAGE_SIZE * pagenr) + (sector << 9)); in xbb_reqlist_vaddr()
854 * or a pointer to the memory mapped in from the front-end domain for
865 * an offset into the local pseudo-physical address space used to map a
866 * front-end's request data into a request.
868 * \param reqlist The request list structure whose pseudo-physical region
870 * \param pagenr The page index used to compute the pseudo-physical offset.
872 * pseudo-physical offset.
874 * \return The computed global pseudo-phsyical address.
877 * or a pointer to the memory mapped in from the front-end domain for
885 xbb = reqlist->xbb; in xbb_get_gntaddr()
887 return ((uintptr_t)(xbb->gnt_base_addr + in xbb_get_gntaddr()
888 (uintptr_t)(reqlist->kva - xbb->kva) + in xbb_get_gntaddr()
895 * \param xbb Per-instance xbb configuration structure.
920 mtx_lock(&xbb->lock); in xbb_get_kva()
925 bit_ffc(xbb->kva_free, xbb->reqlist_kva_pages, &first_clear); in xbb_get_kva()
927 if (first_clear == -1) in xbb_get_kva()
934 for (i = first_clear, num_clear = 0; i < xbb->reqlist_kva_pages; i++) { in xbb_get_kva()
941 if (bit_test(xbb->kva_free, i)) { in xbb_get_kva()
943 first_clear = -1; in xbb_get_kva()
947 if (first_clear == -1) in xbb_get_kva()
955 bit_nset(xbb->kva_free, first_clear, in xbb_get_kva()
956 first_clear + nr_pages - 1); in xbb_get_kva()
958 free_kva = xbb->kva + in xbb_get_kva()
961 KASSERT(free_kva >= (uint8_t *)xbb->kva && in xbb_get_kva()
963 (uint8_t *)xbb->ring_config.va, in xbb_get_kva()
966 nr_pages * PAGE_SIZE, (uintmax_t)xbb->kva, in xbb_get_kva()
967 (uintmax_t)xbb->ring_config.va)); in xbb_get_kva()
975 xbb->flags |= XBBF_RESOURCE_SHORTAGE; in xbb_get_kva()
976 xbb->kva_shortages++; in xbb_get_kva()
979 mtx_unlock(&xbb->lock); in xbb_get_kva()
987 * \param xbb Per-instance xbb configuration structure.
996 mtx_assert(&xbb->lock, MA_OWNED); in xbb_free_kva()
998 start_page = (intptr_t)(kva_ptr - xbb->kva) >> PAGE_SHIFT; in xbb_free_kva()
999 bit_nclear(xbb->kva_free, start_page, start_page + nr_pages - 1); in xbb_free_kva()
1004 * Unmap the front-end pages associated with this I/O request.
1017 for (i = 0; i < reqlist->nr_segments; i++) { in xbb_unmap_reqlist()
1018 if (reqlist->gnt_handles[i] == GRANT_REF_INVALID) in xbb_unmap_reqlist()
1023 unmap[invcount].handle = reqlist->gnt_handles[i]; in xbb_unmap_reqlist()
1024 reqlist->gnt_handles[i] = GRANT_REF_INVALID; in xbb_unmap_reqlist()
1036 * \param xbb Per-instance xbb configuration structure.
1048 mtx_assert(&xbb->lock, MA_OWNED); in xbb_get_reqlist()
1050 if ((reqlist = STAILQ_FIRST(&xbb->reqlist_free_stailq)) != NULL) { in xbb_get_reqlist()
1051 STAILQ_REMOVE_HEAD(&xbb->reqlist_free_stailq, links); in xbb_get_reqlist()
1052 reqlist->flags = XBB_REQLIST_NONE; in xbb_get_reqlist()
1053 reqlist->kva = NULL; in xbb_get_reqlist()
1054 reqlist->status = BLKIF_RSP_OKAY; in xbb_get_reqlist()
1055 reqlist->residual_512b_sectors = 0; in xbb_get_reqlist()
1056 reqlist->num_children = 0; in xbb_get_reqlist()
1057 reqlist->nr_segments = 0; in xbb_get_reqlist()
1058 STAILQ_INIT(&reqlist->contig_req_list); in xbb_get_reqlist()
1067 * \param xbb Per-instance xbb configuration structure.
1077 mtx_assert(&xbb->lock, MA_OWNED); in xbb_release_reqlist()
1080 wakeup = xbb->flags & XBBF_RESOURCE_SHORTAGE; in xbb_release_reqlist()
1081 xbb->flags &= ~XBBF_RESOURCE_SHORTAGE; in xbb_release_reqlist()
1084 if (reqlist->kva != NULL) in xbb_release_reqlist()
1085 xbb_free_kva(xbb, reqlist->kva, reqlist->nr_segments); in xbb_release_reqlist()
1087 xbb_release_reqs(xbb, &reqlist->contig_req_list, reqlist->num_children); in xbb_release_reqlist()
1089 STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links); in xbb_release_reqlist()
1091 if ((xbb->flags & XBBF_SHUTDOWN) != 0) { in xbb_release_reqlist()
1102 taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task); in xbb_release_reqlist()
1108 * \param xbb Per-instance xbb configuration structure.
1113 * \return 0 for success, non-zero for failure.
1125 mtx_lock(&xbb->lock); in xbb_get_resources()
1131 if ((xbb->flags & XBBF_SHUTDOWN) != 0) { in xbb_get_resources()
1132 mtx_unlock(&xbb->lock); in xbb_get_resources()
1150 mtx_unlock(&xbb->lock); in xbb_get_resources()
1154 nreqlist->operation = ring_req->operation; in xbb_get_resources()
1155 nreqlist->starting_sector_number = in xbb_get_resources()
1156 (ring_req->sector_number << XBD_SECTOR_SHFT) >> in xbb_get_resources()
1157 xbb->sector_size_shift; in xbb_get_resources()
1158 STAILQ_INSERT_TAIL(&xbb->reqlist_pending_stailq, nreqlist, in xbb_get_resources()
1162 nreq->reqlist = *reqlist; in xbb_get_resources()
1163 nreq->req_ring_idx = ring_idx; in xbb_get_resources()
1164 nreq->id = ring_req->id; in xbb_get_resources()
1165 nreq->operation = ring_req->operation; in xbb_get_resources()
1167 if (xbb->abi != BLKIF_PROTOCOL_NATIVE) { in xbb_get_resources()
1168 bcopy(ring_req, &nreq->ring_req_storage, sizeof(*ring_req)); in xbb_get_resources()
1169 nreq->ring_req = &nreq->ring_req_storage; in xbb_get_resources()
1171 nreq->ring_req = ring_req; in xbb_get_resources()
1174 binuptime(&nreq->ds_t0); in xbb_get_resources()
1175 devstat_start_transaction(xbb->xbb_stats_in, &nreq->ds_t0); in xbb_get_resources()
1176 STAILQ_INSERT_TAIL(&(*reqlist)->contig_req_list, nreq, links); in xbb_get_resources()
1177 (*reqlist)->num_children++; in xbb_get_resources()
1178 (*reqlist)->nr_segments += ring_req->nr_segments; in xbb_get_resources()
1189 xbb->flags |= XBBF_RESOURCE_SHORTAGE; in xbb_get_resources()
1190 xbb->request_shortages++; in xbb_get_resources()
1198 mtx_unlock(&xbb->lock); in xbb_get_resources()
1206 * \param xbb Per-instance xbb configuration structure.
1227 mtx_assert(&xbb->lock, MA_OWNED); in xbb_queue_response()
1234 switch (xbb->abi) { in xbb_queue_response()
1236 resp = RING_GET_RESPONSE(&xbb->rings.native, in xbb_queue_response()
1237 xbb->rings.native.rsp_prod_pvt); in xbb_queue_response()
1241 RING_GET_RESPONSE(&xbb->rings.x86_32, in xbb_queue_response()
1242 xbb->rings.x86_32.rsp_prod_pvt); in xbb_queue_response()
1246 RING_GET_RESPONSE(&xbb->rings.x86_64, in xbb_queue_response()
1247 xbb->rings.x86_64.rsp_prod_pvt); in xbb_queue_response()
1253 resp->id = req->id; in xbb_queue_response()
1254 resp->operation = req->operation; in xbb_queue_response()
1255 resp->status = status; in xbb_queue_response()
1258 xbb->reqs_completed_with_error++; in xbb_queue_response()
1260 xbb->rings.common.rsp_prod_pvt++; in xbb_queue_response()
1262 xbb->reqs_queued_for_completion++; in xbb_queue_response()
1269 * \param xbb Per-instance xbb configuration structure.
1284 mtx_assert(&xbb->lock, MA_OWNED); in xbb_push_responses()
1288 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&xbb->rings.common, *notify); in xbb_push_responses()
1290 if (xbb->rings.common.rsp_prod_pvt == xbb->rings.common.req_cons) { in xbb_push_responses()
1296 RING_FINAL_CHECK_FOR_REQUESTS(&xbb->rings.common, more_to_do); in xbb_push_responses()
1297 } else if (RING_HAS_UNCONSUMED_REQUESTS(&xbb->rings.common)) { in xbb_push_responses()
1301 xbb->reqs_completed += xbb->reqs_queued_for_completion; in xbb_push_responses()
1302 xbb->reqs_queued_for_completion = 0; in xbb_push_responses()
1310 * \param xbb Per-instance xbb configuration structure.
1322 if (reqlist->flags & XBB_REQLIST_MAPPED) in xbb_complete_reqlist()
1325 mtx_lock(&xbb->lock); in xbb_complete_reqlist()
1336 STAILQ_FOREACH(nreq, &reqlist->contig_req_list, links) { in xbb_complete_reqlist()
1340 xbb_queue_response(xbb, nreq, reqlist->status); in xbb_complete_reqlist()
1343 if (reqlist->status == BLKIF_RSP_OKAY) in xbb_complete_reqlist()
1344 cur_sectors_sent = nreq->nr_512b_sectors; in xbb_complete_reqlist()
1350 devstat_end_transaction(xbb->xbb_stats_in, in xbb_complete_reqlist()
1352 reqlist->ds_tag_type, in xbb_complete_reqlist()
1353 reqlist->ds_trans_type, in xbb_complete_reqlist()
1355 /*then*/&nreq->ds_t0); in xbb_complete_reqlist()
1363 sectors_sent -= reqlist->residual_512b_sectors; in xbb_complete_reqlist()
1367 devstat_end_transaction(xbb->xbb_stats, in xbb_complete_reqlist()
1369 reqlist->ds_tag_type, in xbb_complete_reqlist()
1370 reqlist->ds_trans_type, in xbb_complete_reqlist()
1372 /*then*/&reqlist->ds_t0); in xbb_complete_reqlist()
1378 mtx_unlock(&xbb->lock); in xbb_complete_reqlist()
1381 taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task); in xbb_complete_reqlist()
1384 xen_intr_signal(xbb->xen_intr_handle); in xbb_complete_reqlist()
1400 reqlist = bio->bio_caller1; in xbb_bio_done()
1401 xbb = reqlist->xbb; in xbb_bio_done()
1403 reqlist->residual_512b_sectors += bio->bio_resid >> 9; in xbb_bio_done()
1407 * request list can contain multiple front-end requests and in xbb_bio_done()
1410 * back to the original front-end request, but the interface in xbb_bio_done()
1418 * do the work to determine which front-end request to which the in xbb_bio_done()
1421 if (bio->bio_error) { in xbb_bio_done()
1423 bio->bio_error, xbb->dev_name); in xbb_bio_done()
1424 reqlist->status = BLKIF_RSP_ERROR; in xbb_bio_done()
1426 if (bio->bio_error == ENXIO in xbb_bio_done()
1427 && xenbus_get_state(xbb->dev) == XenbusStateConnected) { in xbb_bio_done()
1430 * front-end that we (the device proxy) want to in xbb_bio_done()
1433 xenbus_set_state(xbb->dev, XenbusStateClosing); in xbb_bio_done()
1441 if (atomic_fetchadd_int(&reqlist->pendcnt, -1) == 1) in xbb_bio_done()
1451 * \param xbb Per-instance xbb configuration structure.
1454 * \return On success, 0. For resource shortages, non-zero.
1459 * the mapping of front-end I/O pages into our domain.
1478 reqlist->ds_tag_type = DEVSTAT_TAG_SIMPLE; in xbb_dispatch_io()
1488 reqlist->kva = NULL; in xbb_dispatch_io()
1489 if (reqlist->nr_segments != 0) { in xbb_dispatch_io()
1490 reqlist->kva = xbb_get_kva(xbb, reqlist->nr_segments); in xbb_dispatch_io()
1491 if (reqlist->kva == NULL) { in xbb_dispatch_io()
1499 binuptime(&reqlist->ds_t0); in xbb_dispatch_io()
1500 devstat_start_transaction(xbb->xbb_stats, &reqlist->ds_t0); in xbb_dispatch_io()
1502 switch (reqlist->operation) { in xbb_dispatch_io()
1505 reqlist->ds_tag_type = DEVSTAT_TAG_ORDERED; in xbb_dispatch_io()
1509 reqlist->ds_trans_type = DEVSTAT_WRITE; in xbb_dispatch_io()
1510 if ((xbb->flags & XBBF_READ_ONLY) != 0) { in xbb_dispatch_io()
1512 xbb->dev_name); in xbb_dispatch_io()
1513 reqlist->status = BLKIF_RSP_ERROR; in xbb_dispatch_io()
1519 reqlist->ds_trans_type = DEVSTAT_READ; in xbb_dispatch_io()
1527 if (xbb->disable_flush != 0) { in xbb_dispatch_io()
1537 if (xbb->flush_interval != 0) { in xbb_dispatch_io()
1538 if (++(xbb->flush_count) < xbb->flush_interval) { in xbb_dispatch_io()
1541 xbb->flush_count = 0; in xbb_dispatch_io()
1545 reqlist->ds_tag_type = DEVSTAT_TAG_ORDERED; in xbb_dispatch_io()
1546 reqlist->ds_trans_type = DEVSTAT_NO_DATA; in xbb_dispatch_io()
1551 reqlist->operation); in xbb_dispatch_io()
1552 reqlist->status = BLKIF_RSP_ERROR; in xbb_dispatch_io()
1556 reqlist->xbb = xbb; in xbb_dispatch_io()
1557 xbb_sg = xbb->xbb_sgs; in xbb_dispatch_io()
1558 map = xbb->maps; in xbb_dispatch_io()
1561 STAILQ_FOREACH(nreq, &reqlist->contig_req_list, links) { in xbb_dispatch_io()
1564 ring_req = nreq->ring_req; in xbb_dispatch_io()
1566 nseg = ring_req->nr_segments; in xbb_dispatch_io()
1567 nreq->nr_pages = nseg; in xbb_dispatch_io()
1568 nreq->nr_512b_sectors = 0; in xbb_dispatch_io()
1573 || __predict_false(nseg > xbb->max_request_segments)) { in xbb_dispatch_io()
1576 reqlist->status = BLKIF_RSP_ERROR; in xbb_dispatch_io()
1581 sg = ring_req->seg; in xbb_dispatch_io()
1591 xbb_sg->first_sect = sg->first_sect; in xbb_dispatch_io()
1592 xbb_sg->last_sect = sg->last_sect; in xbb_dispatch_io()
1593 xbb_sg->nsect = in xbb_dispatch_io()
1594 (int8_t)(sg->last_sect - in xbb_dispatch_io()
1595 sg->first_sect + 1); in xbb_dispatch_io()
1597 if ((sg->last_sect >= (PAGE_SIZE >> 9)) in xbb_dispatch_io()
1598 || (xbb_sg->nsect <= 0)) { in xbb_dispatch_io()
1599 reqlist->status = BLKIF_RSP_ERROR; in xbb_dispatch_io()
1603 nr_sects += xbb_sg->nsect; in xbb_dispatch_io()
1604 map->host_addr = xbb_get_gntaddr(reqlist, in xbb_dispatch_io()
1606 KASSERT(map->host_addr + PAGE_SIZE <= in xbb_dispatch_io()
1607 xbb->ring_config.gnt_addr, in xbb_dispatch_io()
1610 (uintmax_t)map->host_addr, PAGE_SIZE, in xbb_dispatch_io()
1611 (uintmax_t)xbb->ring_config.gnt_addr)); in xbb_dispatch_io()
1613 map->flags = GNTMAP_host_map; in xbb_dispatch_io()
1614 map->ref = sg->gref; in xbb_dispatch_io()
1615 map->dom = xbb->otherend_id; in xbb_dispatch_io()
1617 map->flags |= GNTMAP_readonly; in xbb_dispatch_io()
1625 nreq->nr_512b_sectors = nr_sects; in xbb_dispatch_io()
1626 nr_sects = (nr_sects << 9) >> xbb->sector_size_shift; in xbb_dispatch_io()
1629 if ((nreq->nr_512b_sectors & in xbb_dispatch_io()
1630 ((xbb->sector_size >> 9) - 1)) != 0) { in xbb_dispatch_io()
1631 device_printf(xbb->dev, "%s: I/O size (%d) is not " in xbb_dispatch_io()
1634 nreq->nr_512b_sectors << 9, in xbb_dispatch_io()
1635 xbb->sector_size); in xbb_dispatch_io()
1636 reqlist->status = BLKIF_RSP_ERROR; in xbb_dispatch_io()
1642 xbb->maps, reqlist->nr_segments); in xbb_dispatch_io()
1646 reqlist->flags |= XBB_REQLIST_MAPPED; in xbb_dispatch_io()
1648 for (seg_idx = 0, map = xbb->maps; seg_idx < reqlist->nr_segments; in xbb_dispatch_io()
1650 if (__predict_false(map->status != 0)) { in xbb_dispatch_io()
1651 DPRINTF("invalid buffer -- could not remap " in xbb_dispatch_io()
1652 "it (%d)\n", map->status); in xbb_dispatch_io()
1655 map->host_addr, map->flags, map->ref, in xbb_dispatch_io()
1656 map->dom); in xbb_dispatch_io()
1657 reqlist->status = BLKIF_RSP_ERROR; in xbb_dispatch_io()
1661 reqlist->gnt_handles[seg_idx] = map->handle; in xbb_dispatch_io()
1663 if (reqlist->starting_sector_number + total_sects > in xbb_dispatch_io()
1664 xbb->media_num_sectors) { in xbb_dispatch_io()
1668 reqlist->starting_sector_number, in xbb_dispatch_io()
1669 reqlist->starting_sector_number + total_sects, in xbb_dispatch_io()
1670 xbb->dev_name); in xbb_dispatch_io()
1671 reqlist->status = BLKIF_RSP_ERROR; in xbb_dispatch_io()
1677 error = xbb->dispatch_io(xbb, in xbb_dispatch_io()
1683 reqlist->status = BLKIF_RSP_ERROR; in xbb_dispatch_io()
1702 for (i = 0; i < ring_req->nr_segments; i++) { in xbb_count_sects()
1705 nsect = (int8_t)(ring_req->seg[i].last_sect - in xbb_count_sects()
1706 ring_req->seg[i].first_sect + 1); in xbb_count_sects()
1720 * \param context Callback argument registerd during task initialization -
1729 blkif_back_rings_t *rings; in xbb_run_queue() local
1736 rings = &xbb->rings; in xbb_run_queue()
1757 reqlist = STAILQ_LAST(&xbb->reqlist_pending_stailq, in xbb_run_queue()
1760 cur_sector = reqlist->next_contig_sector; in xbb_run_queue()
1761 cur_operation = reqlist->operation; in xbb_run_queue()
1771 rp = rings->common.sring->req_prod; in xbb_run_queue()
1785 while (rings->common.req_cons != rp in xbb_run_queue()
1786 && RING_REQUEST_CONS_OVERFLOW(&rings->common, in xbb_run_queue()
1787 rings->common.req_cons) == 0){ in xbb_run_queue()
1792 switch (xbb->abi) { in xbb_run_queue()
1794 ring_req = RING_GET_REQUEST(&xbb->rings.native, in xbb_run_queue()
1795 rings->common.req_cons); in xbb_run_queue()
1802 &xbb->rings.x86_32, rings->common.req_cons); in xbb_run_queue()
1812 ring_req64 =RING_GET_REQUEST(&xbb->rings.x86_64, in xbb_run_queue()
1813 rings->common.req_cons); in xbb_run_queue()
1827 * - Coalescing is turned off. in xbb_run_queue()
1828 * - Current I/O is out of sequence with the previous in xbb_run_queue()
1830 * - Coalesced I/O would be too large. in xbb_run_queue()
1833 && ((xbb->no_coalesce_reqs != 0) in xbb_run_queue()
1834 || ((xbb->no_coalesce_reqs == 0) in xbb_run_queue()
1835 && ((ring_req->sector_number != cur_sector) in xbb_run_queue()
1836 || (ring_req->operation != cur_operation) in xbb_run_queue()
1837 || ((ring_req->nr_segments + reqlist->nr_segments) > in xbb_run_queue()
1838 xbb->max_reqlist_segments))))) { in xbb_run_queue()
1849 xbb->rings.common.req_cons); in xbb_run_queue()
1867 * index before issuing back-end I/O so there is in xbb_run_queue()
1872 xbb->rings.common.req_cons++; in xbb_run_queue()
1873 xbb->reqs_received++; in xbb_run_queue()
1876 cur_sector = ring_req->sector_number + cur_size; in xbb_run_queue()
1877 reqlist->next_contig_sector = cur_sector; in xbb_run_queue()
1878 cur_operation = ring_req->operation; in xbb_run_queue()
1882 reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq); in xbb_run_queue()
1895 STAILQ_REMOVE_HEAD(&xbb->reqlist_pending_stailq, links); in xbb_run_queue()
1900 * xbb_dispatch_io() returns non-zero only when in xbb_run_queue()
1902 * case, re-queue this request on the head of the in xbb_run_queue()
1906 STAILQ_INSERT_HEAD(&xbb->reqlist_pending_stailq, in xbb_run_queue()
1921 reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq); in xbb_run_queue()
1924 xbb->forced_dispatch++; in xbb_run_queue()
1926 xbb->normal_dispatch++; in xbb_run_queue()
1928 xbb->total_dispatch++; in xbb_run_queue()
1937 * binding - the xbb_softc for this instance.
1946 taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task); in xbb_filter()
1958 /*----------------------------- Backend Handlers -----------------------------*/
1962 * \param xbb Per-instance xbb configuration structure.
1985 dev_data = &xbb->backend.dev; in xbb_dispatch_dev()
1986 bio_offset = (off_t)reqlist->starting_sector_number in xbb_dispatch_dev()
1987 << xbb->sector_size_shift; in xbb_dispatch_dev()
2000 bio->bio_cmd = BIO_FLUSH; in xbb_dispatch_dev()
2001 bio->bio_flags |= BIO_ORDERED; in xbb_dispatch_dev()
2002 bio->bio_dev = dev_data->cdev; in xbb_dispatch_dev()
2003 bio->bio_offset = 0; in xbb_dispatch_dev()
2004 bio->bio_data = 0; in xbb_dispatch_dev()
2005 bio->bio_done = xbb_bio_done; in xbb_dispatch_dev()
2006 bio->bio_caller1 = reqlist; in xbb_dispatch_dev()
2007 bio->bio_pblkno = 0; in xbb_dispatch_dev()
2009 reqlist->pendcnt = 1; in xbb_dispatch_dev()
2012 device_get_unit(xbb->dev)); in xbb_dispatch_dev()
2014 (*dev_data->csw->d_strategy)(bio); in xbb_dispatch_dev()
2019 xbb_sg = xbb->xbb_sgs; in xbb_dispatch_dev()
2021 nseg = reqlist->nr_segments; in xbb_dispatch_dev()
2029 && (xbb_sg->first_sect != 0)) { in xbb_dispatch_dev()
2030 if ((bio->bio_length & (xbb->sector_size - 1)) != 0) { in xbb_dispatch_dev()
2033 "non-sector boundary\n", in xbb_dispatch_dev()
2034 __func__, xbb->otherend_id); in xbb_dispatch_dev()
2046 if ((bio_offset & (xbb->sector_size - 1)) != 0){ in xbb_dispatch_dev()
2049 xbb->otherend_id); in xbb_dispatch_dev()
2059 bio->bio_cmd = operation; in xbb_dispatch_dev()
2060 bio->bio_flags |= bio_flags; in xbb_dispatch_dev()
2061 bio->bio_dev = dev_data->cdev; in xbb_dispatch_dev()
2062 bio->bio_offset = bio_offset; in xbb_dispatch_dev()
2063 bio->bio_data = xbb_reqlist_ioaddr(reqlist, seg_idx, in xbb_dispatch_dev()
2064 xbb_sg->first_sect); in xbb_dispatch_dev()
2065 bio->bio_done = xbb_bio_done; in xbb_dispatch_dev()
2066 bio->bio_caller1 = reqlist; in xbb_dispatch_dev()
2067 bio->bio_pblkno = bio_offset >> xbb->sector_size_shift; in xbb_dispatch_dev()
2070 bio->bio_length += xbb_sg->nsect << 9; in xbb_dispatch_dev()
2071 bio->bio_bcount = bio->bio_length; in xbb_dispatch_dev()
2072 bio_offset += xbb_sg->nsect << 9; in xbb_dispatch_dev()
2074 if (xbb_sg->last_sect != (PAGE_SIZE - 512) >> 9) { in xbb_dispatch_dev()
2075 if ((bio->bio_length & (xbb->sector_size - 1)) != 0) { in xbb_dispatch_dev()
2078 "non-sector boundary\n", in xbb_dispatch_dev()
2079 __func__, xbb->otherend_id); in xbb_dispatch_dev()
2091 reqlist->pendcnt = nbio; in xbb_dispatch_dev()
2097 device_get_unit(xbb->dev), in xbb_dispatch_dev()
2098 bios[bio_idx]->bio_offset, in xbb_dispatch_dev()
2099 bios[bio_idx]->bio_length); in xbb_dispatch_dev()
2102 device_get_unit(xbb->dev), in xbb_dispatch_dev()
2103 bios[bio_idx]->bio_offset, in xbb_dispatch_dev()
2104 bios[bio_idx]->bio_length); in xbb_dispatch_dev()
2106 (*dev_data->csw->d_strategy)(bios[bio_idx]); in xbb_dispatch_dev()
2112 for (bio_idx = 0; bio_idx < (nbio-1); bio_idx++) in xbb_dispatch_dev()
2127 * \param xbb Per-instance xbb configuration structure.
2147 file_data = &xbb->backend.file; in xbb_dispatch_file()
2162 device_get_unit(xbb->dev)); in xbb_dispatch_file()
2164 (void) vn_start_write(xbb->vn, &mountpoint, V_WAIT); in xbb_dispatch_file()
2166 vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY); in xbb_dispatch_file()
2167 error = VOP_FSYNC(xbb->vn, MNT_WAIT, curthread); in xbb_dispatch_file()
2168 VOP_UNLOCK(xbb->vn); in xbb_dispatch_file()
2179 xuio.uio_offset = (vm_offset_t)reqlist->starting_sector_number in xbb_dispatch_file()
2180 << xbb->sector_size_shift; in xbb_dispatch_file()
2182 xuio.uio_iov = file_data->xiovecs; in xbb_dispatch_file()
2184 xbb_sg = xbb->xbb_sgs; in xbb_dispatch_file()
2185 nseg = reqlist->nr_segments; in xbb_dispatch_file()
2193 if (xbb_sg->first_sect != 0) in xbb_dispatch_file()
2197 xiovec = &file_data->xiovecs[xuio.uio_iovcnt]; in xbb_dispatch_file()
2198 xiovec->iov_base = xbb_reqlist_ioaddr(reqlist, in xbb_dispatch_file()
2199 seg_idx, xbb_sg->first_sect); in xbb_dispatch_file()
2200 xiovec->iov_len = 0; in xbb_dispatch_file()
2204 xiovec->iov_len += xbb_sg->nsect << 9; in xbb_dispatch_file()
2206 xuio.uio_resid += xbb_sg->nsect << 9; in xbb_dispatch_file()
2213 if (xbb_sg->last_sect != (PAGE_SIZE - 512) >> 9) in xbb_dispatch_file()
2223 device_get_unit(xbb->dev), xuio.uio_offset, in xbb_dispatch_file()
2226 vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY); in xbb_dispatch_file()
2231 * ffs_rawread(). But that only works for single-segment in xbb_dispatch_file()
2248 error = VOP_READ(xbb->vn, &xuio, (flags & BIO_ORDERED) ? in xbb_dispatch_file()
2249 (IO_DIRECT|IO_SYNC) : 0, file_data->cred); in xbb_dispatch_file()
2251 VOP_UNLOCK(xbb->vn); in xbb_dispatch_file()
2257 device_get_unit(xbb->dev), xuio.uio_offset, in xbb_dispatch_file()
2260 (void)vn_start_write(xbb->vn, &mountpoint, V_WAIT); in xbb_dispatch_file()
2262 vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY); in xbb_dispatch_file()
2282 error = VOP_WRITE(xbb->vn, &xuio, (flags & BIO_ORDERED) ? in xbb_dispatch_file()
2283 IO_SYNC : 0, file_data->cred); in xbb_dispatch_file()
2284 VOP_UNLOCK(xbb->vn); in xbb_dispatch_file()
2298 reqlist->status = BLKIF_RSP_ERROR; in xbb_dispatch_file()
2305 /*--------------------------- Backend Configuration --------------------------*/
2310 * \param xbb Per-instance xbb configuration structure.
2316 DPRINTF("closing dev=%s\n", xbb->dev_name); in xbb_close_backend()
2317 if (xbb->vn) { in xbb_close_backend()
2320 if ((xbb->flags & XBBF_READ_ONLY) == 0) in xbb_close_backend()
2323 switch (xbb->device_type) { in xbb_close_backend()
2325 if (xbb->backend.dev.csw) { in xbb_close_backend()
2326 dev_relthread(xbb->backend.dev.cdev, in xbb_close_backend()
2327 xbb->backend.dev.dev_ref); in xbb_close_backend()
2328 xbb->backend.dev.csw = NULL; in xbb_close_backend()
2329 xbb->backend.dev.cdev = NULL; in xbb_close_backend()
2340 (void)vn_close(xbb->vn, flags, NOCRED, curthread); in xbb_close_backend()
2341 xbb->vn = NULL; in xbb_close_backend()
2343 switch (xbb->device_type) { in xbb_close_backend()
2347 if (xbb->backend.file.cred != NULL) { in xbb_close_backend()
2348 crfree(xbb->backend.file.cred); in xbb_close_backend()
2349 xbb->backend.file.cred = NULL; in xbb_close_backend()
2364 * \param xbb Per-instance xbb configuration structure.
2376 xbb->device_type = XBB_TYPE_DISK; in xbb_open_dev()
2377 xbb->dispatch_io = xbb_dispatch_dev; in xbb_open_dev()
2378 xbb->backend.dev.cdev = xbb->vn->v_rdev; in xbb_open_dev()
2379 xbb->backend.dev.csw = dev_refthread(xbb->backend.dev.cdev, in xbb_open_dev()
2380 &xbb->backend.dev.dev_ref); in xbb_open_dev()
2381 if (xbb->backend.dev.csw == NULL) in xbb_open_dev()
2384 error = VOP_GETATTR(xbb->vn, &vattr, NOCRED); in xbb_open_dev()
2386 xenbus_dev_fatal(xbb->dev, error, "error getting " in xbb_open_dev()
2388 xbb->dev_name); in xbb_open_dev()
2392 dev = xbb->vn->v_rdev; in xbb_open_dev()
2393 devsw = dev->si_devsw; in xbb_open_dev()
2394 if (!devsw->d_ioctl) { in xbb_open_dev()
2395 xenbus_dev_fatal(xbb->dev, ENODEV, "no d_ioctl for " in xbb_open_dev()
2396 "device %s!", xbb->dev_name); in xbb_open_dev()
2400 error = devsw->d_ioctl(dev, DIOCGSECTORSIZE, in xbb_open_dev()
2401 (caddr_t)&xbb->sector_size, FREAD, in xbb_open_dev()
2404 xenbus_dev_fatal(xbb->dev, error, in xbb_open_dev()
2406 "for device %s", xbb->dev_name); in xbb_open_dev()
2410 error = devsw->d_ioctl(dev, DIOCGMEDIASIZE, in xbb_open_dev()
2411 (caddr_t)&xbb->media_size, FREAD, in xbb_open_dev()
2414 xenbus_dev_fatal(xbb->dev, error, in xbb_open_dev()
2416 "for device %s", xbb->dev_name); in xbb_open_dev()
2426 * \param xbb Per-instance xbb configuration structure.
2437 file_data = &xbb->backend.file; in xbb_open_file()
2438 xbb->device_type = XBB_TYPE_FILE; in xbb_open_file()
2439 xbb->dispatch_io = xbb_dispatch_file; in xbb_open_file()
2440 error = VOP_GETATTR(xbb->vn, &vattr, curthread->td_ucred); in xbb_open_file()
2442 xenbus_dev_fatal(xbb->dev, error, in xbb_open_file()
2444 "for file %s", xbb->dev_name); in xbb_open_file()
2453 if (VOP_ISLOCKED(xbb->vn) != LK_EXCLUSIVE) { in xbb_open_file()
2454 vn_lock(xbb->vn, LK_UPGRADE | LK_RETRY); in xbb_open_file()
2455 if (VN_IS_DOOMED(xbb->vn)) { in xbb_open_file()
2457 xenbus_dev_fatal(xbb->dev, error, in xbb_open_file()
2459 xbb->dev_name); in xbb_open_file()
2465 file_data->cred = crhold(curthread->td_ucred); in xbb_open_file()
2466 xbb->media_size = vattr.va_size; in xbb_open_file()
2478 xbb->sector_size = vattr.va_blocksize; in xbb_open_file()
2480 xbb->sector_size = 512; in xbb_open_file()
2486 if ((xbb->media_size % xbb->sector_size) != 0) { in xbb_open_file()
2488 xenbus_dev_fatal(xbb->dev, error, in xbb_open_file()
2490 xbb->dev_name, in xbb_open_file()
2491 (uintmax_t)xbb->media_size, in xbb_open_file()
2492 xbb->sector_size); in xbb_open_file()
2500 * \param xbb Per-instance xbb configuration structure.
2514 DPRINTF("opening dev=%s\n", xbb->dev_name); in xbb_open_backend()
2517 xenbus_dev_fatal(xbb->dev, ENOENT, in xbb_open_backend()
2522 if ((xbb->flags & XBBF_READ_ONLY) == 0) in xbb_open_backend()
2528 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, xbb->dev_name); in xbb_open_backend()
2537 if (xbb->dev_name[0] != '/') { in xbb_open_backend()
2542 dev_name = malloc(strlen(xbb->dev_name) in xbb_open_backend()
2547 xbb->dev_name); in xbb_open_backend()
2548 free(xbb->dev_name, M_XENBLOCKBACK); in xbb_open_backend()
2549 xbb->dev_name = dev_name; in xbb_open_backend()
2553 xenbus_dev_fatal(xbb->dev, error, "error opening device %s", in xbb_open_backend()
2554 xbb->dev_name); in xbb_open_backend()
2560 xbb->vn = nd.ni_vp; in xbb_open_backend()
2563 if (vn_isdisk_error(xbb->vn, &error)) { in xbb_open_backend()
2565 } else if (xbb->vn->v_type == VREG) { in xbb_open_backend()
2569 xenbus_dev_fatal(xbb->dev, error, "%s is not a disk " in xbb_open_backend()
2570 "or file", xbb->dev_name); in xbb_open_backend()
2572 VOP_UNLOCK(xbb->vn); in xbb_open_backend()
2579 xbb->sector_size_shift = fls(xbb->sector_size) - 1; in xbb_open_backend()
2580 xbb->media_num_sectors = xbb->media_size >> xbb->sector_size_shift; in xbb_open_backend()
2583 (xbb->device_type == XBB_TYPE_DISK) ? "dev" : "file", in xbb_open_backend()
2584 xbb->dev_name, xbb->sector_size, xbb->media_size); in xbb_open_backend()
2589 /*------------------------ Inter-Domain Communication ------------------------*/
2591 * Free dynamically allocated KVA or pseudo-physical address allocations.
2593 * \param xbb Per-instance xbb configuration structure.
2598 if (xbb->kva != 0) { in xbb_free_communication_mem()
2599 if (xbb->pseudo_phys_res != NULL) { in xbb_free_communication_mem()
2600 xenmem_free(xbb->dev, xbb->pseudo_phys_res_id, in xbb_free_communication_mem()
2601 xbb->pseudo_phys_res); in xbb_free_communication_mem()
2602 xbb->pseudo_phys_res = NULL; in xbb_free_communication_mem()
2605 xbb->kva = 0; in xbb_free_communication_mem()
2606 xbb->gnt_base_addr = 0; in xbb_free_communication_mem()
2607 if (xbb->kva_free != NULL) { in xbb_free_communication_mem()
2608 free(xbb->kva_free, M_XENBLOCKBACK); in xbb_free_communication_mem()
2609 xbb->kva_free = NULL; in xbb_free_communication_mem()
2614 * Cleanup all inter-domain communication mechanisms.
2616 * \param xbb Per-instance xbb configuration structure.
2623 mtx_unlock(&xbb->lock); in xbb_disconnect()
2624 xen_intr_unbind(&xbb->xen_intr_handle); in xbb_disconnect()
2625 if (xbb->io_taskqueue != NULL) in xbb_disconnect()
2626 taskqueue_drain(xbb->io_taskqueue, &xbb->io_task); in xbb_disconnect()
2627 mtx_lock(&xbb->lock); in xbb_disconnect()
2633 if (xbb->active_request_count != 0) in xbb_disconnect()
2636 if (xbb->flags & XBBF_RING_CONNECTED) { in xbb_disconnect()
2643 ring_idx < xbb->ring_config.ring_pages; in xbb_disconnect()
2645 op->host_addr = xbb->ring_config.gnt_addr in xbb_disconnect()
2647 op->dev_bus_addr = xbb->ring_config.bus_addr[ring_idx]; in xbb_disconnect()
2648 op->handle = xbb->ring_config.handle[ring_idx]; in xbb_disconnect()
2652 xbb->ring_config.ring_pages); in xbb_disconnect()
2656 xbb->flags &= ~XBBF_RING_CONNECTED; in xbb_disconnect()
2661 if (xbb->requests != NULL) { in xbb_disconnect()
2662 free(xbb->requests, M_XENBLOCKBACK); in xbb_disconnect()
2663 xbb->requests = NULL; in xbb_disconnect()
2666 if (xbb->request_lists != NULL) { in xbb_disconnect()
2671 for (i = 0, reqlist = xbb->request_lists; in xbb_disconnect()
2672 i < xbb->max_requests; i++, reqlist++){ in xbb_disconnect()
2673 if (reqlist->gnt_handles != NULL) { in xbb_disconnect()
2674 free(reqlist->gnt_handles, M_XENBLOCKBACK); in xbb_disconnect()
2675 reqlist->gnt_handles = NULL; in xbb_disconnect()
2678 free(xbb->request_lists, M_XENBLOCKBACK); in xbb_disconnect()
2679 xbb->request_lists = NULL; in xbb_disconnect()
2690 * \param xbb Per-instance xbb configuration structure.
2700 if ((xbb->flags & XBBF_RING_CONNECTED) != 0) in xbb_connect_ring()
2707 xbb->ring_config.va = xbb->kva in xbb_connect_ring()
2708 + (xbb->kva_size in xbb_connect_ring()
2709 - (xbb->ring_config.ring_pages * PAGE_SIZE)); in xbb_connect_ring()
2710 xbb->ring_config.gnt_addr = xbb->gnt_base_addr in xbb_connect_ring()
2711 + (xbb->kva_size in xbb_connect_ring()
2712 - (xbb->ring_config.ring_pages * PAGE_SIZE)); in xbb_connect_ring()
2715 ring_idx < xbb->ring_config.ring_pages; in xbb_connect_ring()
2717 gnt->host_addr = xbb->ring_config.gnt_addr in xbb_connect_ring()
2719 gnt->flags = GNTMAP_host_map; in xbb_connect_ring()
2720 gnt->ref = xbb->ring_config.ring_ref[ring_idx]; in xbb_connect_ring()
2721 gnt->dom = xbb->otherend_id; in xbb_connect_ring()
2725 xbb->ring_config.ring_pages); in xbb_connect_ring()
2730 ring_idx < xbb->ring_config.ring_pages; in xbb_connect_ring()
2732 if (gnt->status != 0) { in xbb_connect_ring()
2736 xbb->ring_config.va = 0; in xbb_connect_ring()
2737 xenbus_dev_fatal(xbb->dev, EACCES, in xbb_connect_ring()
2739 "Status %d.", gnt->status); in xbb_connect_ring()
2742 for (i = 0, j = 0; i < xbb->ring_config.ring_pages; in xbb_connect_ring()
2760 xbb->ring_config.handle[ring_idx] = gnt->handle; in xbb_connect_ring()
2761 xbb->ring_config.bus_addr[ring_idx] = gnt->dev_bus_addr; in xbb_connect_ring()
2765 switch (xbb->abi) { in xbb_connect_ring()
2769 sring = (blkif_sring_t *)xbb->ring_config.va; in xbb_connect_ring()
2770 BACK_RING_INIT(&xbb->rings.native, sring, in xbb_connect_ring()
2771 xbb->ring_config.ring_pages * PAGE_SIZE); in xbb_connect_ring()
2777 sring_x86_32 = (blkif_x86_32_sring_t *)xbb->ring_config.va; in xbb_connect_ring()
2778 BACK_RING_INIT(&xbb->rings.x86_32, sring_x86_32, in xbb_connect_ring()
2779 xbb->ring_config.ring_pages * PAGE_SIZE); in xbb_connect_ring()
2785 sring_x86_64 = (blkif_x86_64_sring_t *)xbb->ring_config.va; in xbb_connect_ring()
2786 BACK_RING_INIT(&xbb->rings.x86_64, sring_x86_64, in xbb_connect_ring()
2787 xbb->ring_config.ring_pages * PAGE_SIZE); in xbb_connect_ring()
2794 xbb->flags |= XBBF_RING_CONNECTED; in xbb_connect_ring()
2796 error = xen_intr_bind_remote_port(xbb->dev, in xbb_connect_ring()
2797 xbb->otherend_id, in xbb_connect_ring()
2798 xbb->ring_config.evtchn, in xbb_connect_ring()
2803 &xbb->xen_intr_handle); in xbb_connect_ring()
2805 xenbus_dev_fatal(xbb->dev, error, "binding event channel"); in xbb_connect_ring()
2809 DPRINTF("rings connected!\n"); in xbb_connect_ring()
2815 * Size KVA and pseudo-physical address allocations based on negotiated
2819 * \param xbb Per-instance xbb configuration structure.
2822 * front-end's domain into our own.
2827 xbb->reqlist_kva_pages = xbb->max_requests * xbb->max_request_segments; in xbb_alloc_communication_mem()
2828 xbb->reqlist_kva_size = xbb->reqlist_kva_pages * PAGE_SIZE; in xbb_alloc_communication_mem()
2829 xbb->kva_size = xbb->reqlist_kva_size + in xbb_alloc_communication_mem()
2830 (xbb->ring_config.ring_pages * PAGE_SIZE); in xbb_alloc_communication_mem()
2832 xbb->kva_free = bit_alloc(xbb->reqlist_kva_pages, M_XENBLOCKBACK, M_NOWAIT); in xbb_alloc_communication_mem()
2833 if (xbb->kva_free == NULL) in xbb_alloc_communication_mem()
2837 device_get_nameunit(xbb->dev), xbb->kva_size, in xbb_alloc_communication_mem()
2838 xbb->reqlist_kva_size); in xbb_alloc_communication_mem()
2842 * pages ("real memory") during the lifetime of front-end requests in xbb_alloc_communication_mem()
2845 xbb->pseudo_phys_res_id = 0; in xbb_alloc_communication_mem()
2846 xbb->pseudo_phys_res = xenmem_alloc(xbb->dev, &xbb->pseudo_phys_res_id, in xbb_alloc_communication_mem()
2847 xbb->kva_size); in xbb_alloc_communication_mem()
2848 if (xbb->pseudo_phys_res == NULL) { in xbb_alloc_communication_mem()
2849 xbb->kva = 0; in xbb_alloc_communication_mem()
2852 xbb->kva = (vm_offset_t)rman_get_virtual(xbb->pseudo_phys_res); in xbb_alloc_communication_mem()
2853 xbb->gnt_base_addr = rman_get_start(xbb->pseudo_phys_res); in xbb_alloc_communication_mem()
2856 device_get_nameunit(xbb->dev), (uintmax_t)xbb->kva, in xbb_alloc_communication_mem()
2857 (uintmax_t)xbb->gnt_base_addr); in xbb_alloc_communication_mem()
2862 * Collect front-end information from the XenStore.
2864 * \param xbb Per-instance xbb configuration structure.
2876 otherend_path = xenbus_get_otherend_path(xbb->dev); in xbb_collect_frontend_info()
2881 xbb->ring_config.ring_pages = 1; in xbb_collect_frontend_info()
2882 xbb->max_request_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; in xbb_collect_frontend_info()
2883 xbb->max_request_size = xbb->max_request_segments * PAGE_SIZE; in xbb_collect_frontend_info()
2889 "event-channel", NULL, "%" PRIu32, in xbb_collect_frontend_info()
2890 &xbb->ring_config.evtchn); in xbb_collect_frontend_info()
2892 xenbus_dev_fatal(xbb->dev, error, in xbb_collect_frontend_info()
2893 "Unable to retrieve event-channel information " in xbb_collect_frontend_info()
2895 xenbus_get_otherend_path(xbb->dev)); in xbb_collect_frontend_info()
2906 * we don't miss information in a sparsly populated front-end in xbb_collect_frontend_info()
2913 xbb->max_requests = 32; in xbb_collect_frontend_info()
2916 "ring-page-order", NULL, "%u", in xbb_collect_frontend_info()
2918 xbb->ring_config.ring_pages = 1 << ring_page_order; in xbb_collect_frontend_info()
2919 ring_size = PAGE_SIZE * xbb->ring_config.ring_pages; in xbb_collect_frontend_info()
2920 xbb->max_requests = BLKIF_MAX_RING_REQUESTS(ring_size); in xbb_collect_frontend_info()
2922 if (xbb->ring_config.ring_pages > XBB_MAX_RING_PAGES) { in xbb_collect_frontend_info()
2923 xenbus_dev_fatal(xbb->dev, EINVAL, in xbb_collect_frontend_info()
2924 "Front-end specified ring-pages of %u " in xbb_collect_frontend_info()
2927 xbb->ring_config.ring_pages, in xbb_collect_frontend_info()
2932 if (xbb->ring_config.ring_pages == 1) { in xbb_collect_frontend_info()
2934 "ring-ref", "%" PRIu32, in xbb_collect_frontend_info()
2935 &xbb->ring_config.ring_ref[0], in xbb_collect_frontend_info()
2938 xenbus_dev_fatal(xbb->dev, error, in xbb_collect_frontend_info()
2942 xenbus_get_otherend_path(xbb->dev)); in xbb_collect_frontend_info()
2946 /* Multi-page ring format. */ in xbb_collect_frontend_info()
2947 for (ring_idx = 0; ring_idx < xbb->ring_config.ring_pages; in xbb_collect_frontend_info()
2952 "ring-ref%u", ring_idx); in xbb_collect_frontend_info()
2955 &xbb->ring_config.ring_ref[ring_idx]); in xbb_collect_frontend_info()
2957 xenbus_dev_fatal(xbb->dev, error, in xbb_collect_frontend_info()
2977 xbb->abi = BLKIF_PROTOCOL_NATIVE; in xbb_collect_frontend_info()
2979 xbb->abi = BLKIF_PROTOCOL_X86_32; in xbb_collect_frontend_info()
2981 xbb->abi = BLKIF_PROTOCOL_X86_64; in xbb_collect_frontend_info()
2983 xenbus_dev_fatal(xbb->dev, EINVAL, in xbb_collect_frontend_info()
2992 * Allocate per-request data structures given request size and number
2993 * information negotiated with the front-end.
2995 * \param xbb Per-instance xbb configuration structure.
3006 xbb->requests = malloc(xbb->max_requests * sizeof(*xbb->requests), in xbb_alloc_requests()
3008 if (xbb->requests == NULL) { in xbb_alloc_requests()
3009 xenbus_dev_fatal(xbb->dev, ENOMEM, in xbb_alloc_requests()
3014 req = xbb->requests; in xbb_alloc_requests()
3015 last_req = &xbb->requests[xbb->max_requests - 1]; in xbb_alloc_requests()
3016 STAILQ_INIT(&xbb->request_free_stailq); in xbb_alloc_requests()
3018 STAILQ_INSERT_TAIL(&xbb->request_free_stailq, req, links); in xbb_alloc_requests()
3034 xbb->request_lists = malloc(xbb->max_requests * in xbb_alloc_request_lists()
3035 sizeof(*xbb->request_lists), M_XENBLOCKBACK, M_NOWAIT|M_ZERO); in xbb_alloc_request_lists()
3036 if (xbb->request_lists == NULL) { in xbb_alloc_request_lists()
3037 xenbus_dev_fatal(xbb->dev, ENOMEM, in xbb_alloc_request_lists()
3042 STAILQ_INIT(&xbb->reqlist_free_stailq); in xbb_alloc_request_lists()
3043 STAILQ_INIT(&xbb->reqlist_pending_stailq); in xbb_alloc_request_lists()
3044 for (i = 0; i < xbb->max_requests; i++) { in xbb_alloc_request_lists()
3047 reqlist = &xbb->request_lists[i]; in xbb_alloc_request_lists()
3049 reqlist->xbb = xbb; in xbb_alloc_request_lists()
3051 reqlist->gnt_handles = malloc(xbb->max_reqlist_segments * in xbb_alloc_request_lists()
3052 sizeof(*reqlist->gnt_handles), in xbb_alloc_request_lists()
3054 if (reqlist->gnt_handles == NULL) { in xbb_alloc_request_lists()
3055 xenbus_dev_fatal(xbb->dev, ENOMEM, in xbb_alloc_request_lists()
3061 for (seg = 0; seg < xbb->max_reqlist_segments; seg++) in xbb_alloc_request_lists()
3062 reqlist->gnt_handles[seg] = GRANT_REF_INVALID; in xbb_alloc_request_lists()
3064 STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links); in xbb_alloc_request_lists()
3073 * \param xbb Per-instance xbb configuration structure.
3083 our_path = xenbus_get_node(xbb->dev); in xbb_publish_backend_info()
3087 xenbus_dev_fatal(xbb->dev, error, in xbb_publish_backend_info()
3095 * in units of 512b, regardless of the value in 'sector-size'. in xbb_publish_backend_info()
3099 (uintmax_t)(xbb->media_size >> XBD_SECTOR_SHFT)); in xbb_publish_backend_info()
3106 xbb->flags & XBBF_READ_ONLY in xbb_publish_backend_info()
3111 leaf = "sector-size"; in xbb_publish_backend_info()
3113 xbb->sector_size); in xbb_publish_backend_info()
3121 xenbus_dev_fatal(xbb->dev, error, "ending transaction"); in xbb_publish_backend_info()
3126 xenbus_dev_fatal(xbb->dev, error, "writing %s/%s", in xbb_publish_backend_info()
3136 * \param xbb Per-instance xbb configuration structure.
3143 if (!xbb->hotplug_done || in xbb_connect()
3144 (xenbus_get_state(xbb->dev) != XenbusStateInitWait) || in xbb_connect()
3148 xbb->flags &= ~XBBF_SHUTDOWN; in xbb_connect()
3155 xbb->max_reqlist_segments = MIN(xbb->max_request_segments * in xbb_connect()
3156 xbb->max_requests, XBB_MAX_SEGMENTS_PER_REQLIST); in xbb_connect()
3162 xbb->max_reqlist_size = xbb->max_reqlist_segments * PAGE_SIZE; in xbb_connect()
3164 /* Allocate resources whose size depends on front-end configuration. */ in xbb_connect()
3167 xenbus_dev_fatal(xbb->dev, error, in xbb_connect()
3174 xenbus_dev_fatal(xbb->dev, error, in xbb_connect()
3201 xenbus_set_state(xbb->dev, XenbusStateConnected); in xbb_connect()
3204 /*-------------------------- Device Teardown Support -------------------------*/
3208 * \param xbb Per-instance xbb configuration structure.
3211 * backend device/file to drain, disconnect from the front-end, and notify
3230 if ((xbb->flags & XBBF_IN_SHUTDOWN) != 0) in xbb_shutdown()
3233 xbb->flags |= XBBF_IN_SHUTDOWN; in xbb_shutdown()
3234 mtx_unlock(&xbb->lock); in xbb_shutdown()
3236 if (xbb->hotplug_watch.node != NULL) { in xbb_shutdown()
3237 xs_unregister_watch(&xbb->hotplug_watch); in xbb_shutdown()
3238 free(xbb->hotplug_watch.node, M_XENBLOCKBACK); in xbb_shutdown()
3239 xbb->hotplug_watch.node = NULL; in xbb_shutdown()
3242 if (xenbus_get_state(xbb->dev) < XenbusStateClosing) in xbb_shutdown()
3243 xenbus_set_state(xbb->dev, XenbusStateClosing); in xbb_shutdown()
3245 frontState = xenbus_get_otherend_state(xbb->dev); in xbb_shutdown()
3246 mtx_lock(&xbb->lock); in xbb_shutdown()
3247 xbb->flags &= ~XBBF_IN_SHUTDOWN; in xbb_shutdown()
3256 xbb->flags |= XBBF_SHUTDOWN; in xbb_shutdown()
3258 /* Disconnect from the front-end. */ in xbb_shutdown()
3284 * \param xbb Per-instance xbb configuration structure.
3296 xs_vprintf(XST_NIL, xenbus_get_node(xbb->dev), in xbb_attach_failed()
3297 "hotplug-error", fmt, ap_hotplug); in xbb_attach_failed()
3299 xs_printf(XST_NIL, xenbus_get_node(xbb->dev), in xbb_attach_failed()
3300 "hotplug-status", "error"); in xbb_attach_failed()
3302 xenbus_dev_vfatal(xbb->dev, err, fmt, ap); in xbb_attach_failed()
3305 xs_printf(XST_NIL, xenbus_get_node(xbb->dev), in xbb_attach_failed()
3307 mtx_lock(&xbb->lock); in xbb_attach_failed()
3309 mtx_unlock(&xbb->lock); in xbb_attach_failed()
3312 /*---------------------------- NewBus Entrypoints ----------------------------*/
3334 "xen-blkback disabled due to grant maps lacking IOMMU entries\n"); in xbb_probe()
3356 sysctl_ctx = device_get_sysctl_ctx(xbb->dev); in xbb_setup_sysctl()
3360 sysctl_tree = device_get_sysctl_tree(xbb->dev); in xbb_setup_sysctl()
3365 "disable_flush", CTLFLAG_RW, &xbb->disable_flush, 0, in xbb_setup_sysctl()
3369 "flush_interval", CTLFLAG_RW, &xbb->flush_interval, 0, in xbb_setup_sysctl()
3373 "no_coalesce_reqs", CTLFLAG_RW, &xbb->no_coalesce_reqs,0, in xbb_setup_sysctl()
3377 "reqs_received", CTLFLAG_RW, &xbb->reqs_received, in xbb_setup_sysctl()
3381 "reqs_completed", CTLFLAG_RW, &xbb->reqs_completed, in xbb_setup_sysctl()
3386 &xbb->reqs_queued_for_completion, in xbb_setup_sysctl()
3391 &xbb->reqs_completed_with_error, in xbb_setup_sysctl()
3395 "forced_dispatch", CTLFLAG_RW, &xbb->forced_dispatch, in xbb_setup_sysctl()
3399 "normal_dispatch", CTLFLAG_RW, &xbb->normal_dispatch, in xbb_setup_sysctl()
3403 "total_dispatch", CTLFLAG_RW, &xbb->total_dispatch, in xbb_setup_sysctl()
3407 "kva_shortages", CTLFLAG_RW, &xbb->kva_shortages, in xbb_setup_sysctl()
3412 &xbb->request_shortages, in xbb_setup_sysctl()
3416 "max_requests", CTLFLAG_RD, &xbb->max_requests, 0, in xbb_setup_sysctl()
3421 &xbb->max_request_segments, 0, in xbb_setup_sysctl()
3426 &xbb->max_request_size, 0, in xbb_setup_sysctl()
3431 &xbb->ring_config.ring_pages, 0, in xbb_setup_sysctl()
3443 KASSERT(xbb->hotplug_done, ("Missing hotplug execution")); in xbb_attach_disk()
3446 if (strchr(xbb->dev_mode, 'w') == NULL) in xbb_attach_disk()
3447 xbb->flags |= XBBF_READ_ONLY; in xbb_attach_disk()
3456 xbb->dev_name); in xbb_attach_disk()
3461 xbb->xbb_stats = devstat_new_entry("xbb", device_get_unit(xbb->dev), in xbb_attach_disk()
3462 xbb->sector_size, in xbb_attach_disk()
3468 xbb->xbb_stats_in = devstat_new_entry("xbbi", device_get_unit(xbb->dev), in xbb_attach_disk()
3469 xbb->sector_size, in xbb_attach_disk()
3483 xbb->io_taskqueue = taskqueue_create_fast(device_get_nameunit(dev), in xbb_attach_disk()
3486 /*contxt*/&xbb->io_taskqueue); in xbb_attach_disk()
3487 if (xbb->io_taskqueue == NULL) { in xbb_attach_disk()
3492 taskqueue_start_threads(&xbb->io_taskqueue, in xbb_attach_disk()
3493 /*num threads*/1, in xbb_attach_disk()
3498 /* Update hot-plug status to satisfy xend. */ in xbb_attach_disk()
3499 error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev), in xbb_attach_disk()
3500 "hotplug-status", "connected"); in xbb_attach_disk()
3502 xbb_attach_failed(xbb, error, "writing %s/hotplug-status", in xbb_attach_disk()
3503 xenbus_get_node(xbb->dev)); in xbb_attach_disk()
3508 if (xenbus_get_otherend_state(xbb->dev) == XenbusStateInitialised) in xbb_attach_disk()
3519 dev = (device_t)watch->callback_data; in xbb_attach_cb()
3522 error = xs_gather(XST_NIL, xenbus_get_node(dev), "physical-device-path", in xbb_attach_cb()
3523 NULL, &xbb->dev_name, NULL); in xbb_attach_cb()
3528 free(watch->node, M_XENBLOCKBACK); in xbb_attach_cb()
3529 watch->node = NULL; in xbb_attach_cb()
3530 xbb->hotplug_done = true; in xbb_attach_cb()
3533 error = xs_gather(XST_NIL, xenbus_get_otherend_path(dev), "device-type", in xbb_attach_cb()
3534 NULL, &xbb->dev_type, NULL); in xbb_attach_cb()
3536 xbb->dev_type = NULL; in xbb_attach_cb()
3539 &xbb->dev_mode, NULL); in xbb_attach_cb()
3572 xbb->dev = dev; in xbb_attach()
3573 xbb->otherend_id = xenbus_get_otherend_id(dev); in xbb_attach()
3574 TASK_INIT(&xbb->io_task, /*priority*/0, xbb_run_queue, xbb); in xbb_attach()
3575 mtx_init(&xbb->lock, device_get_nameunit(dev), NULL, MTX_DEF); in xbb_attach()
3579 * front-end. in xbb_attach()
3581 error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev), in xbb_attach()
3582 "feature-barrier", "1"); in xbb_attach()
3584 xbb_attach_failed(xbb, error, "writing %s/feature-barrier", in xbb_attach()
3585 xenbus_get_node(xbb->dev)); in xbb_attach()
3589 error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev), in xbb_attach()
3590 "feature-flush-cache", "1"); in xbb_attach()
3592 xbb_attach_failed(xbb, error, "writing %s/feature-flush-cache", in xbb_attach()
3593 xenbus_get_node(xbb->dev)); in xbb_attach()
3597 max_ring_page_order = flsl(XBB_MAX_RING_PAGES) - 1; in xbb_attach()
3598 error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev), in xbb_attach()
3599 "max-ring-page-order", "%u", max_ring_page_order); in xbb_attach()
3601 xbb_attach_failed(xbb, error, "writing %s/max-ring-page-order", in xbb_attach()
3602 xenbus_get_node(xbb->dev)); in xbb_attach()
3609 if (xbb->hotplug_done) { in xbb_attach()
3618 watch_path = xs_join(xenbus_get_node(xbb->dev), "physical-device-path"); in xbb_attach()
3619 xbb->hotplug_watch.callback_data = (uintptr_t)dev; in xbb_attach()
3620 xbb->hotplug_watch.callback = xbb_attach_cb; in xbb_attach()
3621 KASSERT(xbb->hotplug_watch.node == NULL, ("watch node already setup")); in xbb_attach()
3622 xbb->hotplug_watch.node = strdup(sbuf_data(watch_path), M_XENBLOCKBACK); in xbb_attach()
3628 xbb->hotplug_watch.max_pending = 1; in xbb_attach()
3630 error = xs_register_watch(&xbb->hotplug_watch); in xbb_attach()
3633 xbb->hotplug_watch.node); in xbb_attach()
3634 free(xbb->hotplug_watch.node, M_XENBLOCKBACK); in xbb_attach()
3648 * \note A block back device may be detached at any time in its life-cycle,
3662 mtx_lock(&xbb->lock); in xbb_detach()
3664 msleep(xbb, &xbb->lock, /*wakeup prio unchanged*/0, in xbb_detach()
3667 mtx_unlock(&xbb->lock); in xbb_detach()
3671 if (xbb->io_taskqueue != NULL) in xbb_detach()
3672 taskqueue_free(xbb->io_taskqueue); in xbb_detach()
3674 if (xbb->xbb_stats != NULL) in xbb_detach()
3675 devstat_remove_entry(xbb->xbb_stats); in xbb_detach()
3677 if (xbb->xbb_stats_in != NULL) in xbb_detach()
3678 devstat_remove_entry(xbb->xbb_stats_in); in xbb_detach()
3682 if (xbb->dev_mode != NULL) { in xbb_detach()
3683 free(xbb->dev_mode, M_XENSTORE); in xbb_detach()
3684 xbb->dev_mode = NULL; in xbb_detach()
3687 if (xbb->dev_type != NULL) { in xbb_detach()
3688 free(xbb->dev_type, M_XENSTORE); in xbb_detach()
3689 xbb->dev_type = NULL; in xbb_detach()
3692 if (xbb->dev_name != NULL) { in xbb_detach()
3693 free(xbb->dev_name, M_XENSTORE); in xbb_detach()
3694 xbb->dev_name = NULL; in xbb_detach()
3697 mtx_destroy(&xbb->lock); in xbb_detach()
3715 mtx_lock(&sc->xb_io_lock); in xbb_suspend()
3716 sc->connected = BLKIF_STATE_SUSPENDED; in xbb_suspend()
3717 mtx_unlock(&sc->xb_io_lock); in xbb_suspend()
3737 * Handle state changes expressed via the XenStore by our front-end peer.
3741 * \param frontend_state The new state of the front-end.
3752 xenbus_strstate(xenbus_get_state(xbb->dev))); in xbb_frontend_changed()
3763 mtx_lock(&xbb->lock); in xbb_frontend_changed()
3765 mtx_unlock(&xbb->lock); in xbb_frontend_changed()
3767 xenbus_set_state(xbb->dev, XenbusStateClosed); in xbb_frontend_changed()
3770 xenbus_dev_fatal(xbb->dev, EINVAL, "saw state %d at frontend", in xbb_frontend_changed()
3776 /*---------------------------- NewBus Registration ---------------------------*/