Lines Matching +full:vm +full:- +full:active +full:- +full:channels

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2004-2006 Kip Macy
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
64 #include <xen/xen-os.h>
241 struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
244 #define XN_RX_LOCK(_q) mtx_lock(&(_q)->lock)
245 #define XN_RX_UNLOCK(_q) mtx_unlock(&(_q)->lock)
247 #define XN_TX_LOCK(_q) mtx_lock(&(_q)->lock)
248 #define XN_TX_TRYLOCK(_q) mtx_trylock(&(_q)->lock)
249 #define XN_TX_UNLOCK(_q) mtx_unlock(&(_q)->lock)
251 #define XN_LOCK(_sc) mtx_lock(&(_sc)->sc_lock);
252 #define XN_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_lock);
254 #define XN_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_lock, MA_OWNED);
255 #define XN_RX_LOCK_ASSERT(_q) mtx_assert(&(_q)->lock, MA_OWNED);
256 #define XN_TX_LOCK_ASSERT(_q) mtx_assert(&(_q)->lock, MA_OWNED);
258 #define netfront_carrier_on(netif) ((netif)->carrier = 1)
259 #define netfront_carrier_off(netif) ((netif)->carrier = 0)
260 #define netfront_carrier_ok(netif) ((netif)->carrier)
290 return idx & (NET_RX_RING_SIZE - 1); in xn_rxidx()
300 m = rxq->mbufs[i]; in xn_get_rx_mbuf()
301 rxq->mbufs[i] = NULL; in xn_get_rx_mbuf()
309 grant_ref_t ref = rxq->grant_ref[i]; in xn_get_rx_ref()
312 rxq->grant_ref[i] = GRANT_REF_INVALID; in xn_get_rx_ref()
326 ref->count++; in mbuf_grab()
336 KASSERT(ref->count > 0, ("Invalid reference count")); in mbuf_release()
338 if (--ref->count == 0) { in mbuf_release()
345 m_tag_delete(m, &ref->tag); in mbuf_release()
354 KASSERT(ref->count == 0, ("Free mbuf tag with pending refcnt")); in tag_free()
355 bus_dmamap_sync(ref->dma_tag, ref->dma_map, BUS_DMASYNC_POSTWRITE); in tag_free()
356 bus_dmamap_destroy(ref->dma_tag, ref->dma_map); in tag_free()
357 SLIST_INSERT_HEAD(&ref->txq->tags, ref, next); in tag_free()
377 * as colon-separated octets, placing result the given mac array. mac must be
400 * front-side mac node, even when operating in Dom0. in xen_net_read_mac()
478 for (i = 0; i < np->num_queues; i++) { in netfront_suspend()
479 XN_RX_LOCK(&np->rxq[i]); in netfront_suspend()
480 XN_TX_LOCK(&np->txq[i]); in netfront_suspend()
483 for (i = 0; i < np->num_queues; i++) { in netfront_suspend()
484 XN_RX_UNLOCK(&np->rxq[i]); in netfront_suspend()
485 XN_TX_UNLOCK(&np->txq[i]); in netfront_suspend()
493 * leave the device-layer structures intact so that this is transparent to the
503 for (i = 0; i < info->num_queues; i++) { in netfront_resume()
504 XN_RX_LOCK(&info->rxq[i]); in netfront_resume()
505 XN_TX_LOCK(&info->txq[i]); in netfront_resume()
508 for (i = 0; i < info->num_queues; i++) { in netfront_resume()
509 XN_RX_UNLOCK(&info->rxq[i]); in netfront_resume()
510 XN_TX_UNLOCK(&info->txq[i]); in netfront_resume()
531 KASSERT(rxq->id == txq->id, ("Mismatch between RX and TX queue ids")); in write_queue_xenstore_keys()
533 KASSERT(rxq->xen_intr_handle == txq->xen_intr_handle, in write_queue_xenstore_keys()
534 ("Split event channels are not supported")); in write_queue_xenstore_keys()
539 snprintf(path, path_size, "%s/queue-%u", node, rxq->id); in write_queue_xenstore_keys()
546 err = xs_printf(*xst, path, "tx-ring-ref","%u", txq->ring_ref); in write_queue_xenstore_keys()
548 message = "writing tx ring-ref"; in write_queue_xenstore_keys()
551 err = xs_printf(*xst, path, "rx-ring-ref","%u", rxq->ring_ref); in write_queue_xenstore_keys()
553 message = "writing rx ring-ref"; in write_queue_xenstore_keys()
556 err = xs_printf(*xst, path, "event-channel", "%u", in write_queue_xenstore_keys()
557 xen_intr_port(rxq->xen_intr_handle)); in write_queue_xenstore_keys()
559 message = "writing event-channel"; in write_queue_xenstore_keys()
585 err = xen_net_read_mac(dev, info->mac); in talk_to_backend()
591 err = xs_scanf(XST_NIL, xenbus_get_otherend_path(info->xbdev), in talk_to_backend()
592 "multi-queue-max-queues", NULL, "%lu", &max_queues); in talk_to_backend()
612 if (info->num_queues == 1) { in talk_to_backend()
613 err = write_queue_xenstore_keys(dev, &info->rxq[0], in talk_to_backend()
614 &info->txq[0], &xst, false); in talk_to_backend()
618 err = xs_printf(xst, node, "multi-queue-num-queues", in talk_to_backend()
619 "%u", info->num_queues); in talk_to_backend()
621 message = "writing multi-queue-num-queues"; in talk_to_backend()
625 for (i = 0; i < info->num_queues; i++) { in talk_to_backend()
626 err = write_queue_xenstore_keys(dev, &info->rxq[i], in talk_to_backend()
627 &info->txq[i], &xst, true); in talk_to_backend()
633 err = xs_printf(xst, node, "request-rx-copy", "%u", 1); in talk_to_backend()
635 message = "writing request-rx-copy"; in talk_to_backend()
638 err = xs_printf(xst, node, "feature-rx-notify", "%d", 1); in talk_to_backend()
640 message = "writing feature-rx-notify"; in talk_to_backend()
643 err = xs_printf(xst, node, "feature-sg", "%d", 1); in talk_to_backend()
645 message = "writing feature-sg"; in talk_to_backend()
648 if ((if_getcapenable(info->xn_ifp) & IFCAP_LRO) != 0) { in talk_to_backend()
649 err = xs_printf(xst, node, "feature-gso-tcpv4", "%d", 1); in talk_to_backend()
651 message = "writing feature-gso-tcpv4"; in talk_to_backend()
655 if ((if_getcapenable(info->xn_ifp) & IFCAP_RXCSUM) == 0) { in talk_to_backend()
656 err = xs_printf(xst, node, "feature-no-csum-offload", "%d", 1); in talk_to_backend()
658 message = "writing feature-no-csum-offload"; in talk_to_backend()
695 struct netfront_info *np = txq->info; in xn_txq_start()
696 if_t ifp = np->xn_ifp; in xn_txq_start()
699 if (!drbr_empty(ifp, txq->br)) in xn_txq_start()
708 if (RING_HAS_UNCONSUMED_RESPONSES(&txq->ring)) in xn_txq_intr()
729 gnttab_free_grant_references(rxq->gref_head); in disconnect_rxq()
730 if (rxq->ring_ref != GRANT_REF_INVALID) { in disconnect_rxq()
731 gnttab_end_foreign_access(rxq->ring_ref, NULL); in disconnect_rxq()
732 rxq->ring_ref = GRANT_REF_INVALID; in disconnect_rxq()
739 rxq->xen_intr_handle = 0; in disconnect_rxq()
746 callout_drain(&rxq->rx_refill); in destroy_rxq()
747 free(rxq->ring.sring, M_DEVBUF); in destroy_rxq()
748 rxq->ring.sring = NULL; in destroy_rxq()
756 for (i = 0; i < np->num_queues; i++) in destroy_rxqs()
757 destroy_rxq(&np->rxq[i]); in destroy_rxqs()
759 free(np->rxq, M_DEVBUF); in destroy_rxqs()
760 np->rxq = NULL; in destroy_rxqs()
772 info->rxq = malloc(sizeof(struct netfront_rxq) * num_queues, in setup_rxqs()
776 rxq = &info->rxq[q]; in setup_rxqs()
778 rxq->id = q; in setup_rxqs()
779 rxq->info = info; in setup_rxqs()
781 rxq->gref_head = GNTTAB_LIST_END; in setup_rxqs()
782 rxq->ring_ref = GRANT_REF_INVALID; in setup_rxqs()
783 rxq->ring.sring = NULL; in setup_rxqs()
784 snprintf(rxq->name, XN_QUEUE_NAME_LEN, "xnrx_%u", q); in setup_rxqs()
785 mtx_init(&rxq->lock, rxq->name, "netfront receive lock", in setup_rxqs()
789 rxq->mbufs[i] = NULL; in setup_rxqs()
790 rxq->grant_ref[i] = GRANT_REF_INVALID; in setup_rxqs()
796 &rxq->gref_head) != 0) { in setup_rxqs()
805 FRONT_RING_INIT(&rxq->ring, rxs, PAGE_SIZE); in setup_rxqs()
808 &rxq->ring_ref); in setup_rxqs()
814 callout_init(&rxq->rx_refill, 1); in setup_rxqs()
820 gnttab_free_grant_references(rxq->gref_head); in setup_rxqs()
821 free(rxq->ring.sring, M_DEVBUF); in setup_rxqs()
823 for (; q >= 0; q--) { in setup_rxqs()
824 disconnect_rxq(&info->rxq[q]); in setup_rxqs()
825 destroy_rxq(&info->rxq[q]); in setup_rxqs()
828 free(info->rxq, M_DEVBUF); in setup_rxqs()
837 gnttab_free_grant_references(txq->gref_head); in disconnect_txq()
838 if (txq->ring_ref != GRANT_REF_INVALID) { in disconnect_txq()
839 gnttab_end_foreign_access(txq->ring_ref, NULL); in disconnect_txq()
840 txq->ring_ref = GRANT_REF_INVALID; in disconnect_txq()
842 xen_intr_unbind(&txq->xen_intr_handle); in disconnect_txq()
850 free(txq->ring.sring, M_DEVBUF); in destroy_txq()
851 txq->ring.sring = NULL; in destroy_txq()
852 buf_ring_free(txq->br, M_DEVBUF); in destroy_txq()
853 txq->br = NULL; in destroy_txq()
854 if (txq->tq) { in destroy_txq()
855 taskqueue_drain_all(txq->tq); in destroy_txq()
856 taskqueue_free(txq->tq); in destroy_txq()
857 txq->tq = NULL; in destroy_txq()
861 bus_dmamap_destroy(txq->info->dma_tag, in destroy_txq()
862 txq->xennet_tag[i].dma_map); in destroy_txq()
863 txq->xennet_tag[i].dma_map = NULL; in destroy_txq()
872 for (i = 0; i < np->num_queues; i++) in destroy_txqs()
873 destroy_txq(&np->txq[i]); in destroy_txqs()
875 free(np->txq, M_DEVBUF); in destroy_txqs()
876 np->txq = NULL; in destroy_txqs()
888 info->txq = malloc(sizeof(struct netfront_txq) * num_queues, in setup_txqs()
892 txq = &info->txq[q]; in setup_txqs()
894 txq->id = q; in setup_txqs()
895 txq->info = info; in setup_txqs()
897 txq->gref_head = GNTTAB_LIST_END; in setup_txqs()
898 txq->ring_ref = GRANT_REF_INVALID; in setup_txqs()
899 txq->ring.sring = NULL; in setup_txqs()
901 snprintf(txq->name, XN_QUEUE_NAME_LEN, "xntx_%u", q); in setup_txqs()
903 mtx_init(&txq->lock, txq->name, "netfront transmit lock", in setup_txqs()
905 SLIST_INIT(&txq->tags); in setup_txqs()
908 txq->mbufs[i] = (void *) ((u_long) i+1); in setup_txqs()
909 txq->grant_ref[i] = GRANT_REF_INVALID; in setup_txqs()
910 txq->xennet_tag[i].txq = txq; in setup_txqs()
911 txq->xennet_tag[i].dma_tag = info->dma_tag; in setup_txqs()
912 error = bus_dmamap_create(info->dma_tag, 0, in setup_txqs()
913 &txq->xennet_tag[i].dma_map); in setup_txqs()
919 m_tag_setup(&txq->xennet_tag[i].tag, in setup_txqs()
921 sizeof(txq->xennet_tag[i]) - in setup_txqs()
922 sizeof(txq->xennet_tag[i].tag)); in setup_txqs()
923 txq->xennet_tag[i].tag.m_tag_free = &tag_free; in setup_txqs()
924 SLIST_INSERT_HEAD(&txq->tags, &txq->xennet_tag[i], in setup_txqs()
927 txq->mbufs[NET_TX_RING_SIZE] = (void *)0; in setup_txqs()
932 &txq->gref_head) != 0) { in setup_txqs()
941 FRONT_RING_INIT(&txq->ring, txs, PAGE_SIZE); in setup_txqs()
944 &txq->ring_ref); in setup_txqs()
950 txq->br = buf_ring_alloc(NET_TX_RING_SIZE, M_DEVBUF, in setup_txqs()
951 M_WAITOK, &txq->lock); in setup_txqs()
952 TASK_INIT(&txq->defrtask, 0, xn_txq_tq_deferred, txq); in setup_txqs()
954 txq->tq = taskqueue_create(txq->name, M_WAITOK, in setup_txqs()
955 taskqueue_thread_enqueue, &txq->tq); in setup_txqs()
957 error = taskqueue_start_threads(&txq->tq, 1, PI_NET, in setup_txqs()
958 "%s txq %d", device_get_nameunit(dev), txq->id); in setup_txqs()
961 txq->id); in setup_txqs()
967 &info->txq[q], INTR_TYPE_NET | INTR_MPSAFE | INTR_ENTROPY, in setup_txqs()
968 &txq->xen_intr_handle); in setup_txqs()
979 taskqueue_drain_all(txq->tq); in setup_txqs()
981 buf_ring_free(txq->br, M_DEVBUF); in setup_txqs()
982 taskqueue_free(txq->tq); in setup_txqs()
983 gnttab_end_foreign_access(txq->ring_ref, NULL); in setup_txqs()
985 gnttab_free_grant_references(txq->gref_head); in setup_txqs()
986 free(txq->ring.sring, M_DEVBUF); in setup_txqs()
988 for (; q >= 0; q--) { in setup_txqs()
989 disconnect_txq(&info->txq[q]); in setup_txqs()
990 destroy_txq(&info->txq[q]); in setup_txqs()
993 free(info->txq, M_DEVBUF); in setup_txqs()
1004 if (info->txq) in setup_device()
1007 if (info->rxq) in setup_device()
1010 info->num_queues = 0; in setup_device()
1019 info->num_queues = num_queues; in setup_device()
1023 info->rxq[q].xen_intr_handle = info->txq[q].xen_intr_handle; in setup_device()
1042 CURVNET_SET(if_getvnet(sc->xn_ifp)); in netfront_backend_changed()
1057 xenbus_set_state(sc->xbdev, XenbusStateConnected); in netfront_backend_changed()
1064 if (sc->xn_reset) { in netfront_backend_changed()
1067 sc->xn_reset = false; in netfront_backend_changed()
1077 EVENTHANDLER_INVOKE(iflladdr_event, sc->xn_ifp); in netfront_backend_changed()
1096 return (RING_FREE_REQUESTS(&txq->ring) > (MAX_TX_REQ_FRAGS + 2)); in xn_tx_slot_available()
1107 m = txq->mbufs[i]; in xn_release_tx_bufs()
1113 * must be an index from free-list tracking. in xn_release_tx_bufs()
1117 gnttab_end_foreign_access_ref(txq->grant_ref[i]); in xn_release_tx_bufs()
1118 gnttab_release_grant_reference(&txq->gref_head, in xn_release_tx_bufs()
1119 txq->grant_ref[i]); in xn_release_tx_bufs()
1120 txq->grant_ref[i] = GRANT_REF_INVALID; in xn_release_tx_bufs()
1121 add_id_to_freelist(txq->mbufs, i); in xn_release_tx_bufs()
1122 txq->mbufs_cnt--; in xn_release_tx_bufs()
1123 if (txq->mbufs_cnt < 0) { in xn_release_tx_bufs()
1138 m->m_len = m->m_pkthdr.len = MJUMPAGESIZE; in xn_alloc_one_rx_buffer()
1151 if (__predict_false(rxq->info->carrier == 0)) in xn_alloc_rx_buffers()
1154 for (req_prod = rxq->ring.req_prod_pvt; in xn_alloc_rx_buffers()
1155 req_prod - rxq->ring.rsp_cons < NET_RX_RING_SIZE; in xn_alloc_rx_buffers()
1169 KASSERT(rxq->mbufs[id] == NULL, ("non-NULL xn_rx_chain")); in xn_alloc_rx_buffers()
1170 rxq->mbufs[id] = m; in xn_alloc_rx_buffers()
1172 ref = gnttab_claim_grant_reference(&rxq->gref_head); in xn_alloc_rx_buffers()
1175 rxq->grant_ref[id] = ref; in xn_alloc_rx_buffers()
1178 req = RING_GET_REQUEST(&rxq->ring, req_prod); in xn_alloc_rx_buffers()
1181 xenbus_get_otherend_id(rxq->info->xbdev), pfn, 0); in xn_alloc_rx_buffers()
1182 req->id = id; in xn_alloc_rx_buffers()
1183 req->gref = ref; in xn_alloc_rx_buffers()
1186 rxq->ring.req_prod_pvt = req_prod; in xn_alloc_rx_buffers()
1189 if (req_prod - rxq->ring.rsp_cons < NET_RX_SLOTS_MIN) { in xn_alloc_rx_buffers()
1190 callout_reset_curcpu(&rxq->rx_refill, hz/10, in xn_alloc_rx_buffers()
1197 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rxq->ring, notify); in xn_alloc_rx_buffers()
1199 xen_intr_signal(rxq->xen_intr_handle); in xn_alloc_rx_buffers()
1219 m = rxq->mbufs[i]; in xn_release_rx_bufs()
1224 ref = rxq->grant_ref[i]; in xn_release_rx_bufs()
1229 gnttab_release_grant_reference(&rxq->gref_head, ref); in xn_release_rx_bufs()
1230 rxq->mbufs[i] = NULL; in xn_release_rx_bufs()
1231 rxq->grant_ref[i] = GRANT_REF_INVALID; in xn_release_rx_bufs()
1240 struct netfront_info *np = rxq->info; in xn_rxeof()
1242 struct lro_ctrl *lro = &rxq->lro; in xn_rxeof()
1261 ifp = np->xn_ifp; in xn_rxeof()
1264 rp = rxq->ring.sring->rsp_prod; in xn_rxeof()
1267 i = rxq->ring.rsp_cons; in xn_rxeof()
1269 memcpy(rx, RING_GET_RESPONSE(&rxq->ring, i), sizeof(*rx)); in xn_rxeof()
1282 m->m_pkthdr.rcvif = ifp; in xn_rxeof()
1283 if (rx->flags & NETRXF_data_validated) { in xn_rxeof()
1293 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID in xn_rxeof()
1295 m->m_pkthdr.csum_data = 0xffff; in xn_rxeof()
1297 if ((rx->flags & NETRXF_extra_info) != 0 && in xn_rxeof()
1298 (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type == in xn_rxeof()
1300 m->m_pkthdr.tso_segsz = in xn_rxeof()
1301 extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].u.gso.size; in xn_rxeof()
1302 m->m_pkthdr.csum_flags |= CSUM_TSO; in xn_rxeof()
1308 rxq->ring.rsp_cons = i; in xn_rxeof()
1312 RING_FINAL_CHECK_FOR_RESPONSES(&rxq->ring, work_to_do); in xn_rxeof()
1325 lro->lro_cnt == 0 || tcp_lro_rx(lro, m, 0)) { in xn_rxeof()
1353 struct netfront_info *np = txq->info; in xn_txeof()
1360 ifp = np->xn_ifp; in xn_txeof()
1363 prod = txq->ring.sring->rsp_prod; in xn_txeof()
1366 for (i = txq->ring.rsp_cons; i != prod; i++) { in xn_txeof()
1367 txr = RING_GET_RESPONSE(&txq->ring, i); in xn_txeof()
1368 if (txr->status == NETIF_RSP_NULL) in xn_txeof()
1371 if (txr->status != NETIF_RSP_OKAY) { in xn_txeof()
1373 __func__, txr->status); in xn_txeof()
1375 id = txr->id; in xn_txeof()
1376 m = txq->mbufs[id]; in xn_txeof()
1384 txq->grant_ref[id]) != 0)) { in xn_txeof()
1388 gnttab_end_foreign_access_ref(txq->grant_ref[id]); in xn_txeof()
1390 &txq->gref_head, txq->grant_ref[id]); in xn_txeof()
1391 txq->grant_ref[id] = GRANT_REF_INVALID; in xn_txeof()
1393 txq->mbufs[id] = NULL; in xn_txeof()
1394 add_id_to_freelist(txq->mbufs, id); in xn_txeof()
1395 txq->mbufs_cnt--; in xn_txeof()
1397 /* Only mark the txq active if we've freed up at least one slot to try */ in xn_txeof()
1400 txq->ring.rsp_cons = prod; in xn_txeof()
1411 txq->ring.sring->rsp_event = in xn_txeof()
1412 prod + ((txq->ring.sring->req_prod - prod) >> 1) + 1; in xn_txeof()
1415 } while (prod != txq->ring.sring->rsp_prod); in xn_txeof()
1417 if (txq->full && in xn_txeof()
1418 ((txq->ring.sring->req_prod - prod) < NET_TX_RING_SIZE)) { in xn_txeof()
1419 txq->full = false; in xn_txeof()
1428 struct netfront_info *np = txq->info; in xn_intr()
1429 struct netfront_rxq *rxq = &np->rxq[txq->id]; in xn_intr()
1440 int new = xn_rxidx(rxq->ring.req_prod_pvt); in xn_move_rx_slot()
1442 KASSERT(rxq->mbufs[new] == NULL, ("mbufs != NULL")); in xn_move_rx_slot()
1443 rxq->mbufs[new] = m; in xn_move_rx_slot()
1444 rxq->grant_ref[new] = ref; in xn_move_rx_slot()
1445 RING_GET_REQUEST(&rxq->ring, rxq->ring.req_prod_pvt)->id = new; in xn_move_rx_slot()
1446 RING_GET_REQUEST(&rxq->ring, rxq->ring.req_prod_pvt)->gref = ref; in xn_move_rx_slot()
1447 rxq->ring.req_prod_pvt++; in xn_move_rx_slot()
1468 RING_GET_RESPONSE(&rxq->ring, ++(*cons)); in xn_get_extras()
1470 if (__predict_false(!extra->type || in xn_get_extras()
1471 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { in xn_get_extras()
1474 memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); in xn_get_extras()
1480 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); in xn_get_extras()
1490 struct netif_rx_response *rx = &rinfo->rx; in xn_get_responses()
1491 struct netif_extra_info *extras = rinfo->extras; in xn_get_responses()
1500 if (rx->flags & NETRXF_extra_info) { in xn_get_responses()
1505 m0->m_pkthdr.len = 0; in xn_get_responses()
1506 m0->m_next = NULL; in xn_get_responses()
1511 DPRINTK("rx->status=%hd rx->offset=%hu frags=%u\n", in xn_get_responses()
1512 rx->status, rx->offset, frags); in xn_get_responses()
1514 if (__predict_false(rx->status < 0 || in xn_get_responses()
1515 rx->offset + rx->status > PAGE_SIZE)) { in xn_get_responses()
1530 printf("%s: Bad rx response id %d.\n", __func__, rx->id); in xn_get_responses()
1538 gnttab_release_grant_reference(&rxq->gref_head, ref); in xn_get_responses()
1544 m->m_len = rx->status; in xn_get_responses()
1545 m->m_data += rx->offset; in xn_get_responses()
1546 m0->m_pkthdr.len += rx->status; in xn_get_responses()
1549 if (!(rx->flags & NETRXF_more_data)) in xn_get_responses()
1561 * Note that m can be NULL, if rx->status < 0 or if in xn_get_responses()
1562 * rx->offset + rx->status > PAGE_SIZE above. in xn_get_responses()
1566 rx = RING_GET_RESPONSE(&rxq->ring, *cons + frags); in xn_get_responses()
1570 * m_prev == NULL can happen if rx->status < 0 or if in xn_get_responses()
1571 * rx->offset + * rx->status > PAGE_SIZE above. in xn_get_responses()
1574 m_prev->m_next = m; in xn_get_responses()
1577 * m0 can be NULL if rx->status < 0 or if * rx->offset + in xn_get_responses()
1578 * rx->status > PAGE_SIZE above. in xn_get_responses()
1582 m->m_next = NULL; in xn_get_responses()
1599 struct netfront_info *np = txq->info; in xn_assemble_tx_request()
1600 if_t ifp = np->xn_ifp; in xn_assemble_tx_request()
1602 bus_dma_segment_t *segs = txq->segs; in xn_assemble_tx_request()
1607 KASSERT(!SLIST_EMPTY(&txq->tags), ("no tags available")); in xn_assemble_tx_request()
1608 tag = SLIST_FIRST(&txq->tags); in xn_assemble_tx_request()
1609 SLIST_REMOVE_HEAD(&txq->tags, next); in xn_assemble_tx_request()
1610 KASSERT(tag->count == 0, ("tag already in-use")); in xn_assemble_tx_request()
1611 map = tag->dma_map; in xn_assemble_tx_request()
1612 error = bus_dmamap_load_mbuf_sg(np->dma_tag, map, m_head, segs, in xn_assemble_tx_request()
1614 if (error == EFBIG || nfrags > np->maxfrags) { in xn_assemble_tx_request()
1617 bus_dmamap_unload(np->dma_tag, map); in xn_assemble_tx_request()
1624 SLIST_INSERT_HEAD(&txq->tags, tag, next); in xn_assemble_tx_request()
1629 error = bus_dmamap_load_mbuf_sg(np->dma_tag, map, m_head, segs, in xn_assemble_tx_request()
1631 if (error != 0 || nfrags > np->maxfrags) { in xn_assemble_tx_request()
1632 bus_dmamap_unload(np->dma_tag, map); in xn_assemble_tx_request()
1633 SLIST_INSERT_HEAD(&txq->tags, tag, next); in xn_assemble_tx_request()
1638 SLIST_INSERT_HEAD(&txq->tags, tag, next); in xn_assemble_tx_request()
1646 * pass a too-long chain over to the other side by dropping the in xn_assemble_tx_request()
1656 SLIST_INSERT_HEAD(&txq->tags, tag, next); in xn_assemble_tx_request()
1657 bus_dmamap_unload(np->dma_tag, map); in xn_assemble_tx_request()
1669 KASSERT((txq->mbufs_cnt + nfrags) <= NET_TX_RING_SIZE, in xn_assemble_tx_request()
1671 "(%d)!", __func__, (int) txq->mbufs_cnt, in xn_assemble_tx_request()
1679 otherend_id = xenbus_get_otherend_id(np->xbdev); in xn_assemble_tx_request()
1680 m_tag_prepend(m_head, &tag->tag); in xn_assemble_tx_request()
1687 tx = RING_GET_REQUEST(&txq->ring, txq->ring.req_prod_pvt); in xn_assemble_tx_request()
1688 id = get_id_from_freelist(txq->mbufs); in xn_assemble_tx_request()
1692 txq->mbufs_cnt++; in xn_assemble_tx_request()
1693 if (txq->mbufs_cnt > NET_TX_RING_SIZE) in xn_assemble_tx_request()
1697 txq->mbufs[id] = m_head; in xn_assemble_tx_request()
1698 tx->id = id; in xn_assemble_tx_request()
1699 ref = gnttab_claim_grant_reference(&txq->gref_head); in xn_assemble_tx_request()
1704 tx->gref = txq->grant_ref[id] = ref; in xn_assemble_tx_request()
1705 tx->offset = segs[i].ds_addr & PAGE_MASK; in xn_assemble_tx_request()
1706 KASSERT(tx->offset + segs[i].ds_len <= PAGE_SIZE, in xn_assemble_tx_request()
1708 tx->flags = 0; in xn_assemble_tx_request()
1718 tx->size = m_head->m_pkthdr.len; in xn_assemble_tx_request()
1732 if (m_head->m_pkthdr.csum_flags in xn_assemble_tx_request()
1734 tx->flags |= (NETTXF_csum_blank in xn_assemble_tx_request()
1737 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { in xn_assemble_tx_request()
1740 RING_GET_REQUEST(&txq->ring, in xn_assemble_tx_request()
1741 ++txq->ring.req_prod_pvt); in xn_assemble_tx_request()
1743 tx->flags |= NETTXF_extra_info; in xn_assemble_tx_request()
1745 gso->u.gso.size = m_head->m_pkthdr.tso_segsz; in xn_assemble_tx_request()
1746 gso->u.gso.type = in xn_assemble_tx_request()
1748 gso->u.gso.pad = 0; in xn_assemble_tx_request()
1749 gso->u.gso.features = 0; in xn_assemble_tx_request()
1751 gso->type = XEN_NETIF_EXTRA_TYPE_GSO; in xn_assemble_tx_request()
1752 gso->flags = 0; in xn_assemble_tx_request()
1755 tx->size = segs[i].ds_len; in xn_assemble_tx_request()
1757 if (i != nfrags - 1) in xn_assemble_tx_request()
1758 tx->flags |= NETTXF_more_data; in xn_assemble_tx_request()
1760 txq->ring.req_prod_pvt++; in xn_assemble_tx_request()
1762 bus_dmamap_sync(np->dma_tag, map, BUS_DMASYNC_PREWRITE); in xn_assemble_tx_request()
1766 if_inc_counter(ifp, IFCOUNTER_OBYTES, m_head->m_pkthdr.len); in xn_assemble_tx_request()
1767 if (m_head->m_flags & M_MCAST) in xn_assemble_tx_request()
1785 ifp = np->xn_ifp; in xn_ifinit_locked()
1792 for (i = 0; i < np->num_queues; i++) { in xn_ifinit_locked()
1793 rxq = &np->rxq[i]; in xn_ifinit_locked()
1796 rxq->ring.sring->rsp_event = rxq->ring.rsp_cons + 1; in xn_ifinit_locked()
1797 if (RING_HAS_UNCONSUMED_RESPONSES(&rxq->ring)) in xn_ifinit_locked()
1828 dev = sc->xbdev; in xn_ioctl()
1834 if (ifa->ifa_addr->sa_family == AF_INET) { in xn_ioctl()
1849 if (if_getmtu(ifp) == ifr->ifr_mtu) in xn_ioctl()
1852 if_setmtu(ifp, ifr->ifr_mtu); in xn_ioctl()
1863 * a full re-init means reloading the firmware and in xn_ioctl()
1873 sc->xn_if_flags = if_getflags(ifp); in xn_ioctl()
1877 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); in xn_ioctl()
1906 device_printf(sc->xbdev, in xn_ioctl()
1910 sc->xn_reset = true; in xn_ioctl()
1926 xs_rm(XST_NIL, xenbus_get_node(dev), "feature-gso-tcpv4"); in xn_ioctl()
1927 xs_rm(XST_NIL, xenbus_get_node(dev), "feature-no-csum-offload"); in xn_ioctl()
1942 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); in xn_ioctl()
1958 ifp = sc->xn_ifp; in xn_stop()
1975 if (rxq->mbufs[i] == NULL) in xn_rebuild_rx_bufs()
1978 m = rxq->mbufs[requeue_idx] = xn_get_rx_mbuf(rxq, i); in xn_rebuild_rx_bufs()
1979 ref = rxq->grant_ref[requeue_idx] = xn_get_rx_ref(rxq, i); in xn_rebuild_rx_bufs()
1981 req = RING_GET_REQUEST(&rxq->ring, requeue_idx); in xn_rebuild_rx_bufs()
1985 xenbus_get_otherend_id(rxq->info->xbdev), in xn_rebuild_rx_bufs()
1988 req->gref = ref; in xn_rebuild_rx_bufs()
1989 req->id = requeue_idx; in xn_rebuild_rx_bufs()
1994 rxq->ring.req_prod_pvt = requeue_idx; in xn_rebuild_rx_bufs()
2006 error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), in xn_connect()
2007 "feature-rx-copy", NULL, "%u", &feature_rx_copy); in xn_connect()
2016 error = talk_to_backend(np->xbdev, np); in xn_connect()
2025 for (i = 0; i < np->num_queues; i++) { in xn_connect()
2026 txq = &np->txq[i]; in xn_connect()
2031 for (i = 0; i < np->num_queues; i++) { in xn_connect()
2032 rxq = &np->rxq[i]; in xn_connect()
2054 for (i = 0; i < np->num_queues; i++) { in xn_kick_rings()
2055 txq = &np->txq[i]; in xn_kick_rings()
2056 rxq = &np->rxq[i]; in xn_kick_rings()
2057 xen_intr_signal(txq->xen_intr_handle); in xn_kick_rings()
2072 device_printf(np->xbdev, "backend features:"); in xn_query_features()
2074 if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), in xn_query_features()
2075 "feature-sg", NULL, "%d", &val) != 0) in xn_query_features()
2078 np->maxfrags = 1; in xn_query_features()
2080 np->maxfrags = MAX_TX_REQ_FRAGS; in xn_query_features()
2081 printf(" feature-sg"); in xn_query_features()
2084 if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), in xn_query_features()
2085 "feature-gso-tcpv4", NULL, "%d", &val) != 0) in xn_query_features()
2088 if_setcapabilitiesbit(np->xn_ifp, 0, IFCAP_TSO4 | IFCAP_LRO); in xn_query_features()
2090 if_setcapabilitiesbit(np->xn_ifp, IFCAP_TSO4 | IFCAP_LRO, 0); in xn_query_features()
2091 printf(" feature-gso-tcp4"); in xn_query_features()
2096 * feature-no-csum-offload is set in xenstore. in xn_query_features()
2098 if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), in xn_query_features()
2099 "feature-no-csum-offload", NULL, "%d", &val) != 0) in xn_query_features()
2102 if_setcapabilitiesbit(np->xn_ifp, IFCAP_HWCSUM, 0); in xn_query_features()
2104 if_setcapabilitiesbit(np->xn_ifp, 0, IFCAP_HWCSUM); in xn_query_features()
2105 printf(" feature-no-csum-offload"); in xn_query_features()
2120 ifp = np->xn_ifp; in xn_configure_features()
2135 for (i = 0; i < np->num_queues; i++) in xn_configure_features()
2136 tcp_lro_free(&np->rxq[i].lro); in xn_configure_features()
2140 for (i = 0; i < np->num_queues; i++) { in xn_configure_features()
2141 err = tcp_lro_init(&np->rxq[i].lro); in xn_configure_features()
2143 device_printf(np->xbdev, in xn_configure_features()
2148 np->rxq[i].lro.ifp = ifp; in xn_configure_features()
2174 np = txq->info; in xn_txq_mq_start_locked()
2175 br = txq->br; in xn_txq_mq_start_locked()
2176 ifp = np->xn_ifp; in xn_txq_mq_start_locked()
2207 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&txq->ring, notify); in xn_txq_mq_start_locked()
2209 xen_intr_signal(txq->xen_intr_handle); in xn_txq_mq_start_locked()
2214 if (RING_FULL(&txq->ring)) in xn_txq_mq_start_locked()
2215 txq->full = true; in xn_txq_mq_start_locked()
2228 npairs = np->num_queues; in xn_txq_mq_start()
2237 i = m->m_pkthdr.flowid % npairs; in xn_txq_mq_start()
2241 txq = &np->txq[i]; in xn_txq_mq_start()
2247 error = drbr_enqueue(ifp, txq->br, m); in xn_txq_mq_start()
2248 taskqueue_enqueue(txq->tq, &txq->defrtask); in xn_txq_mq_start()
2264 for (i = 0; i < np->num_queues; i++) { in xn_qflush()
2265 txq = &np->txq[i]; in xn_qflush()
2268 while ((m = buf_ring_dequeue_sc(txq->br)) != NULL) in xn_qflush()
2289 np->xbdev = dev; in create_netdev()
2291 mtx_init(&np->sc_lock, "xnsc", "netfront softc lock", MTX_DEF); in create_netdev()
2293 ifmedia_init(&np->sc_media, 0, xn_ifmedia_upd, xn_ifmedia_sts); in create_netdev()
2294 ifmedia_add(&np->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); in create_netdev()
2295 ifmedia_set(&np->sc_media, IFM_ETHER|IFM_MANUAL); in create_netdev()
2297 err = xen_net_read_mac(dev, np->mac); in create_netdev()
2302 ifp = np->xn_ifp = if_alloc(IFT_ETHER); in create_netdev()
2322 if_sethwtsomax(ifp, 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)); in create_netdev()
2326 ether_ifattach(ifp, np->mac); in create_netdev()
2340 &np->dma_tag); in create_netdev()
2369 ether_ifdetach(np->xn_ifp); in netif_free()
2370 free(np->rxq, M_DEVBUF); in netif_free()
2371 free(np->txq, M_DEVBUF); in netif_free()
2372 if_free(np->xn_ifp); in netif_free()
2373 np->xn_ifp = NULL; in netif_free()
2374 ifmedia_removeall(&np->sc_media); in netif_free()
2375 bus_dma_tag_destroy(np->dma_tag); in netif_free()
2383 for (i = 0; i < np->num_queues; i++) { in netif_disconnect_backend()
2384 XN_RX_LOCK(&np->rxq[i]); in netif_disconnect_backend()
2385 XN_TX_LOCK(&np->txq[i]); in netif_disconnect_backend()
2388 for (i = 0; i < np->num_queues; i++) { in netif_disconnect_backend()
2389 XN_RX_UNLOCK(&np->rxq[i]); in netif_disconnect_backend()
2390 XN_TX_UNLOCK(&np->txq[i]); in netif_disconnect_backend()
2393 for (i = 0; i < np->num_queues; i++) { in netif_disconnect_backend()
2394 disconnect_rxq(&np->rxq[i]); in netif_disconnect_backend()
2395 disconnect_txq(&np->txq[i]); in netif_disconnect_backend()
2410 ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE; in xn_ifmedia_sts()
2411 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; in xn_ifmedia_sts()