netfront.c (f929eb1ed50f079f1638cdabe511ac2e347f541a) | netfront.c (dabb3db7a817f003af3f89c965ba369c67fc4910) |
---|---|
1/*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2004-2006 Kip Macy 5 * Copyright (c) 2015 Wei Liu <wei.liu2@citrix.com> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without --- 57 unchanged lines hidden (view full) --- 66#include <xen/xen-os.h> 67#include <xen/hypervisor.h> 68#include <xen/xen_intr.h> 69#include <xen/gnttab.h> 70#include <contrib/xen/memory.h> 71#include <contrib/xen/io/netif.h> 72#include <xen/xenbus/xenbusvar.h> 73 | 1/*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2004-2006 Kip Macy 5 * Copyright (c) 2015 Wei Liu <wei.liu2@citrix.com> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without --- 57 unchanged lines hidden (view full) --- 66#include <xen/xen-os.h> 67#include <xen/hypervisor.h> 68#include <xen/xen_intr.h> 69#include <xen/gnttab.h> 70#include <contrib/xen/memory.h> 71#include <contrib/xen/io/netif.h> 72#include <xen/xenbus/xenbusvar.h> 73 |
74#include <machine/bus.h> 75 |
|
74#include "xenbus_if.h" 75 76/* Features supported by all backends. TSO and LRO can be negotiated */ 77#define XN_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 78 79#define NET_TX_RING_SIZE __CONST_RING_SIZE(netif_tx, PAGE_SIZE) 80#define NET_RX_RING_SIZE __CONST_RING_SIZE(netif_rx, PAGE_SIZE) 81 --- 39 unchanged lines hidden (view full) --- 121static void xn_alloc_rx_buffers_callout(void *arg); 122 123static void xn_release_rx_bufs(struct netfront_rxq *); 124static void xn_release_tx_bufs(struct netfront_txq *); 125 126static void xn_rxq_intr(struct netfront_rxq *); 127static void xn_txq_intr(struct netfront_txq *); 128static void xn_intr(void *); | 76#include "xenbus_if.h" 77 78/* Features supported by all backends. TSO and LRO can be negotiated */ 79#define XN_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 80 81#define NET_TX_RING_SIZE __CONST_RING_SIZE(netif_tx, PAGE_SIZE) 82#define NET_RX_RING_SIZE __CONST_RING_SIZE(netif_rx, PAGE_SIZE) 83 --- 39 unchanged lines hidden (view full) --- 123static void xn_alloc_rx_buffers_callout(void *arg); 124 125static void xn_release_rx_bufs(struct netfront_rxq *); 126static void xn_release_tx_bufs(struct netfront_txq *); 127 128static void xn_rxq_intr(struct netfront_rxq *); 129static void xn_txq_intr(struct netfront_txq *); 130static void xn_intr(void *); |
129static inline int xn_count_frags(struct mbuf *m); | |
130static int xn_assemble_tx_request(struct netfront_txq *, struct mbuf *); 131static int xn_ioctl(struct ifnet *, u_long, caddr_t); 132static void xn_ifinit_locked(struct netfront_info *); 133static void xn_ifinit(void *); 134static void xn_stop(struct netfront_info *); 135static void xn_query_features(struct netfront_info *np); 136static int xn_configure_features(struct netfront_info *np); 137static void netif_free(struct netfront_info *info); --- 56 unchanged lines hidden (view full) --- 194 195 struct mbuf *mbufs[NET_TX_RING_SIZE + 1]; 196 int mbufs_cnt; 197 struct buf_ring *br; 198 199 struct taskqueue *tq; 200 struct task defrtask; 201 | 131static int xn_assemble_tx_request(struct netfront_txq *, struct mbuf *); 132static int xn_ioctl(struct ifnet *, u_long, caddr_t); 133static void xn_ifinit_locked(struct netfront_info *); 134static void xn_ifinit(void *); 135static void xn_stop(struct netfront_info *); 136static void xn_query_features(struct netfront_info *np); 137static int xn_configure_features(struct netfront_info *np); 138static void netif_free(struct netfront_info *info); --- 56 unchanged lines hidden (view full) --- 195 196 struct mbuf *mbufs[NET_TX_RING_SIZE + 1]; 197 int mbufs_cnt; 198 struct buf_ring *br; 199 200 struct taskqueue *tq; 201 struct task defrtask; 202 |
203 bus_dma_segment_t segs[MAX_TX_REQ_FRAGS]; 204 struct mbuf_xennet { 205 struct m_tag tag; 206 bus_dma_tag_t dma_tag; 207 bus_dmamap_t dma_map; 208 struct netfront_txq *txq; 209 SLIST_ENTRY(mbuf_xennet) next; 210 u_int count; 211 } xennet_tag[NET_TX_RING_SIZE + 1]; 212 SLIST_HEAD(, mbuf_xennet) tags; 213 |
|
202 bool full; 203}; 204 205struct netfront_info { 206 struct ifnet *xn_ifp; 207 208 struct mtx sc_lock; 209 --- 6 unchanged lines hidden (view full) --- 216 217 device_t xbdev; 218 uint8_t mac[ETHER_ADDR_LEN]; 219 220 int xn_if_flags; 221 222 struct ifmedia sc_media; 223 | 214 bool full; 215}; 216 217struct netfront_info { 218 struct ifnet *xn_ifp; 219 220 struct mtx sc_lock; 221 --- 6 unchanged lines hidden (view full) --- 228 229 device_t xbdev; 230 uint8_t mac[ETHER_ADDR_LEN]; 231 232 int xn_if_flags; 233 234 struct ifmedia sc_media; 235 |
236 bus_dma_tag_t dma_tag; 237 |
|
224 bool xn_reset; 225}; 226 227struct netfront_rx_info { 228 struct netif_rx_response rx; 229 struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; 230}; 231 --- 64 unchanged lines hidden (view full) --- 296 int i = xn_rxidx(ri); 297 grant_ref_t ref = rxq->grant_ref[i]; 298 299 KASSERT(ref != GRANT_REF_INVALID, ("Invalid grant reference!\n")); 300 rxq->grant_ref[i] = GRANT_REF_INVALID; 301 return (ref); 302} 303 | 238 bool xn_reset; 239}; 240 241struct netfront_rx_info { 242 struct netif_rx_response rx; 243 struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; 244}; 245 --- 64 unchanged lines hidden (view full) --- 310 int i = xn_rxidx(ri); 311 grant_ref_t ref = rxq->grant_ref[i]; 312 313 KASSERT(ref != GRANT_REF_INVALID, ("Invalid grant reference!\n")); 314 rxq->grant_ref[i] = GRANT_REF_INVALID; 315 return (ref); 316} 317 |
318#define MTAG_COOKIE 1218492000 319#define MTAG_XENNET 0 320 321static void mbuf_grab(struct mbuf *m) 322{ 323 struct mbuf_xennet *ref; 324 325 ref = (struct mbuf_xennet *)m_tag_locate(m, MTAG_COOKIE, 326 MTAG_XENNET, NULL); 327 KASSERT(ref != NULL, ("Cannot find refcount")); 328 ref->count++; 329} 330 331static void mbuf_release(struct mbuf *m) 332{ 333 struct mbuf_xennet *ref; 334 335 ref = (struct mbuf_xennet *)m_tag_locate(m, MTAG_COOKIE, 336 MTAG_XENNET, NULL); 337 KASSERT(ref != NULL, ("Cannot find refcount")); 338 KASSERT(ref->count > 0, ("Invalid reference count")); 339 340 if (--ref->count == 0) 341 m_freem(m); 342} 343 344static void tag_free(struct m_tag *t) 345{ 346 struct mbuf_xennet *ref = (struct mbuf_xennet *)t; 347 348 KASSERT(ref->count == 0, ("Free mbuf tag with pending refcnt")); 349 bus_dmamap_sync(ref->dma_tag, ref->dma_map, BUS_DMASYNC_POSTWRITE); 350 bus_dmamap_destroy(ref->dma_tag, ref->dma_map); 351 SLIST_INSERT_HEAD(&ref->txq->tags, ref, next); 352} 353 |
|
304#define IPRINTK(fmt, args...) \ 305 printf("[XEN] " fmt, ##args) 306#ifdef INVARIANTS 307#define WPRINTK(fmt, args...) \ 308 printf("[XEN] " fmt, ##args) 309#else 310#define WPRINTK(fmt, args...) 311#endif --- 461 unchanged lines hidden (view full) --- 773 gnttab_free_grant_references(txq->gref_head); 774 gnttab_end_foreign_access(txq->ring_ref, NULL); 775 xen_intr_unbind(&txq->xen_intr_handle); 776} 777 778static void 779destroy_txq(struct netfront_txq *txq) 780{ | 354#define IPRINTK(fmt, args...) \ 355 printf("[XEN] " fmt, ##args) 356#ifdef INVARIANTS 357#define WPRINTK(fmt, args...) \ 358 printf("[XEN] " fmt, ##args) 359#else 360#define WPRINTK(fmt, args...) 361#endif --- 461 unchanged lines hidden (view full) --- 823 gnttab_free_grant_references(txq->gref_head); 824 gnttab_end_foreign_access(txq->ring_ref, NULL); 825 xen_intr_unbind(&txq->xen_intr_handle); 826} 827 828static void 829destroy_txq(struct netfront_txq *txq) 830{ |
831 unsigned int i; |
|
781 782 free(txq->ring.sring, M_DEVBUF); 783 buf_ring_free(txq->br, M_DEVBUF); 784 taskqueue_drain_all(txq->tq); 785 taskqueue_free(txq->tq); | 832 833 free(txq->ring.sring, M_DEVBUF); 834 buf_ring_free(txq->br, M_DEVBUF); 835 taskqueue_drain_all(txq->tq); 836 taskqueue_free(txq->tq); |
837 838 for (i = 0; i <= NET_TX_RING_SIZE; i++) { 839 bus_dmamap_destroy(txq->info->dma_tag, 840 txq->xennet_tag[i].dma_map); 841 txq->xennet_tag[i].dma_map = NULL; 842 } |
|
786} 787 788static void 789destroy_txqs(struct netfront_info *np) 790{ 791 int i; 792 793 for (i = 0; i < np->num_queues; i++) --- 23 unchanged lines hidden (view full) --- 817 818 txq->ring_ref = GRANT_REF_INVALID; 819 txq->ring.sring = NULL; 820 821 snprintf(txq->name, XN_QUEUE_NAME_LEN, "xntx_%u", q); 822 823 mtx_init(&txq->lock, txq->name, "netfront transmit lock", 824 MTX_DEF); | 843} 844 845static void 846destroy_txqs(struct netfront_info *np) 847{ 848 int i; 849 850 for (i = 0; i < np->num_queues; i++) --- 23 unchanged lines hidden (view full) --- 874 875 txq->ring_ref = GRANT_REF_INVALID; 876 txq->ring.sring = NULL; 877 878 snprintf(txq->name, XN_QUEUE_NAME_LEN, "xntx_%u", q); 879 880 mtx_init(&txq->lock, txq->name, "netfront transmit lock", 881 MTX_DEF); |
882 SLIST_INIT(&txq->tags); |
|
825 826 for (i = 0; i <= NET_TX_RING_SIZE; i++) { 827 txq->mbufs[i] = (void *) ((u_long) i+1); 828 txq->grant_ref[i] = GRANT_REF_INVALID; | 883 884 for (i = 0; i <= NET_TX_RING_SIZE; i++) { 885 txq->mbufs[i] = (void *) ((u_long) i+1); 886 txq->grant_ref[i] = GRANT_REF_INVALID; |
887 txq->xennet_tag[i].txq = txq; 888 txq->xennet_tag[i].dma_tag = info->dma_tag; 889 error = bus_dmamap_create(info->dma_tag, 0, 890 &txq->xennet_tag[i].dma_map); 891 if (error != 0) { 892 device_printf(dev, 893 "failed to allocate dma map\n"); 894 goto fail; 895 } 896 m_tag_setup(&txq->xennet_tag[i].tag, 897 MTAG_COOKIE, MTAG_XENNET, 898 sizeof(txq->xennet_tag[i]) - 899 sizeof(txq->xennet_tag[i].tag)); 900 txq->xennet_tag[i].tag.m_tag_free = &tag_free; 901 SLIST_INSERT_HEAD(&txq->tags, &txq->xennet_tag[i], 902 next); |
|
829 } 830 txq->mbufs[NET_TX_RING_SIZE] = (void *)0; 831 832 /* Start resources allocation. */ 833 834 if (gnttab_alloc_grant_references(NET_TX_RING_SIZE, 835 &txq->gref_head) != 0) { 836 device_printf(dev, "failed to allocate tx grant refs\n"); --- 199 unchanged lines hidden (view full) --- 1036 gnttab_release_grant_reference(&txq->gref_head, 1037 txq->grant_ref[i]); 1038 txq->grant_ref[i] = GRANT_REF_INVALID; 1039 add_id_to_freelist(txq->mbufs, i); 1040 txq->mbufs_cnt--; 1041 if (txq->mbufs_cnt < 0) { 1042 panic("%s: tx_chain_cnt must be >= 0", __func__); 1043 } | 903 } 904 txq->mbufs[NET_TX_RING_SIZE] = (void *)0; 905 906 /* Start resources allocation. */ 907 908 if (gnttab_alloc_grant_references(NET_TX_RING_SIZE, 909 &txq->gref_head) != 0) { 910 device_printf(dev, "failed to allocate tx grant refs\n"); --- 199 unchanged lines hidden (view full) --- 1110 gnttab_release_grant_reference(&txq->gref_head, 1111 txq->grant_ref[i]); 1112 txq->grant_ref[i] = GRANT_REF_INVALID; 1113 add_id_to_freelist(txq->mbufs, i); 1114 txq->mbufs_cnt--; 1115 if (txq->mbufs_cnt < 0) { 1116 panic("%s: tx_chain_cnt must be >= 0", __func__); 1117 } |
1044 m_free(m); | 1118 mbuf_release(m); |
1045 } 1046} 1047 1048static struct mbuf * 1049xn_alloc_one_rx_buffer(struct netfront_rxq *rxq) 1050{ 1051 struct mbuf *m; 1052 --- 253 unchanged lines hidden (view full) --- 1306 gnttab_end_foreign_access_ref(txq->grant_ref[id]); 1307 gnttab_release_grant_reference( 1308 &txq->gref_head, txq->grant_ref[id]); 1309 txq->grant_ref[id] = GRANT_REF_INVALID; 1310 1311 txq->mbufs[id] = NULL; 1312 add_id_to_freelist(txq->mbufs, id); 1313 txq->mbufs_cnt--; | 1119 } 1120} 1121 1122static struct mbuf * 1123xn_alloc_one_rx_buffer(struct netfront_rxq *rxq) 1124{ 1125 struct mbuf *m; 1126 --- 253 unchanged lines hidden (view full) --- 1380 gnttab_end_foreign_access_ref(txq->grant_ref[id]); 1381 gnttab_release_grant_reference( 1382 &txq->gref_head, txq->grant_ref[id]); 1383 txq->grant_ref[id] = GRANT_REF_INVALID; 1384 1385 txq->mbufs[id] = NULL; 1386 add_id_to_freelist(txq->mbufs, id); 1387 txq->mbufs_cnt--; |
1314 m_free(m); | 1388 mbuf_release(m); |
1315 /* Only mark the txq active if we've freed up at least one slot to try */ 1316 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1317 } 1318 txq->ring.rsp_cons = prod; 1319 1320 /* 1321 * Set a new event, then check for race with update of 1322 * tx_cons. Note that it is essential to schedule a --- 180 unchanged lines hidden (view full) --- 1503 } 1504 *list = m0; 1505 *cons += frags; 1506 1507 return (err); 1508} 1509 1510/** | 1389 /* Only mark the txq active if we've freed up at least one slot to try */ 1390 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1391 } 1392 txq->ring.rsp_cons = prod; 1393 1394 /* 1395 * Set a new event, then check for race with update of 1396 * tx_cons. Note that it is essential to schedule a --- 180 unchanged lines hidden (view full) --- 1577 } 1578 *list = m0; 1579 *cons += frags; 1580 1581 return (err); 1582} 1583 1584/** |
1511 * \brief Count the number of fragments in an mbuf chain. 1512 * 1513 * Surprisingly, there isn't an M* macro for this. 1514 */ 1515static inline int 1516xn_count_frags(struct mbuf *m) 1517{ 1518 int nfrags; 1519 1520 for (nfrags = 0; m != NULL; m = m->m_next) 1521 nfrags++; 1522 1523 return (nfrags); 1524} 1525 1526/** | |
1527 * Given an mbuf chain, make sure we have enough room and then push 1528 * it onto the transmit ring. 1529 */ 1530static int 1531xn_assemble_tx_request(struct netfront_txq *txq, struct mbuf *m_head) 1532{ | 1585 * Given an mbuf chain, make sure we have enough room and then push 1586 * it onto the transmit ring. 1587 */ 1588static int 1589xn_assemble_tx_request(struct netfront_txq *txq, struct mbuf *m_head) 1590{ |
1533 struct mbuf *m; | |
1534 struct netfront_info *np = txq->info; 1535 struct ifnet *ifp = np->xn_ifp; | 1591 struct netfront_info *np = txq->info; 1592 struct ifnet *ifp = np->xn_ifp; |
1536 u_int nfrags; 1537 int otherend_id; | 1593 int otherend_id, error, nfrags; 1594 bus_dma_segment_t *segs = txq->segs; 1595 struct mbuf_xennet *tag; 1596 bus_dmamap_t map; 1597 unsigned int i; |
1538 | 1598 |
1539 /** 1540 * Defragment the mbuf if necessary. 1541 */ 1542 nfrags = xn_count_frags(m_head); | 1599 KASSERT(!SLIST_EMPTY(&txq->tags), ("no tags available")); 1600 tag = SLIST_FIRST(&txq->tags); 1601 SLIST_REMOVE_HEAD(&txq->tags, next); 1602 KASSERT(tag->count == 0, ("tag already in-use")); 1603 map = tag->dma_map; 1604 error = bus_dmamap_load_mbuf_sg(np->dma_tag, map, m_head, segs, 1605 &nfrags, 0); 1606 if (error == EFBIG || nfrags > np->maxfrags) { 1607 struct mbuf *m; |
1543 | 1608 |
1544 /* 1545 * Check to see whether this request is longer than netback 1546 * can handle, and try to defrag it. 1547 */ 1548 /** 1549 * It is a bit lame, but the netback driver in Linux can't 1550 * deal with nfrags > MAX_TX_REQ_FRAGS, which is a quirk of 1551 * the Linux network stack. 1552 */ 1553 if (nfrags > np->maxfrags) { | 1609 bus_dmamap_unload(np->dma_tag, map); |
1554 m = m_defrag(m_head, M_NOWAIT); 1555 if (!m) { 1556 /* 1557 * Defrag failed, so free the mbuf and 1558 * therefore drop the packet. 1559 */ | 1610 m = m_defrag(m_head, M_NOWAIT); 1611 if (!m) { 1612 /* 1613 * Defrag failed, so free the mbuf and 1614 * therefore drop the packet. 1615 */ |
1616 SLIST_INSERT_HEAD(&txq->tags, tag, next); |
|
1560 m_freem(m_head); 1561 return (EMSGSIZE); 1562 } 1563 m_head = m; | 1617 m_freem(m_head); 1618 return (EMSGSIZE); 1619 } 1620 m_head = m; |
1621 error = bus_dmamap_load_mbuf_sg(np->dma_tag, map, m_head, segs, 1622 &nfrags, 0); 1623 if (error != 0 || nfrags > np->maxfrags) { 1624 bus_dmamap_unload(np->dma_tag, map); 1625 SLIST_INSERT_HEAD(&txq->tags, tag, next); 1626 m_freem(m_head); 1627 return (error ?: EFBIG); 1628 } 1629 } else if (error != 0) { 1630 SLIST_INSERT_HEAD(&txq->tags, tag, next); 1631 m_freem(m_head); 1632 return (error); |
|
1564 } 1565 | 1633 } 1634 |
1566 /* Determine how many fragments now exist */ 1567 nfrags = xn_count_frags(m_head); 1568 1569 /* 1570 * Check to see whether the defragmented packet has too many 1571 * segments for the Linux netback driver. 1572 */ | |
1573 /** 1574 * The FreeBSD TCP stack, with TSO enabled, can produce a chain 1575 * of mbufs longer than Linux can handle. Make sure we don't 1576 * pass a too-long chain over to the other side by dropping the 1577 * packet. It doesn't look like there is currently a way to 1578 * tell the TCP stack to generate a shorter chain of packets. 1579 */ 1580 if (nfrags > MAX_TX_REQ_FRAGS) { 1581#ifdef DEBUG 1582 printf("%s: nfrags %d > MAX_TX_REQ_FRAGS %d, netback " 1583 "won't be able to handle it, dropping\n", 1584 __func__, nfrags, MAX_TX_REQ_FRAGS); 1585#endif | 1635 /** 1636 * The FreeBSD TCP stack, with TSO enabled, can produce a chain 1637 * of mbufs longer than Linux can handle. Make sure we don't 1638 * pass a too-long chain over to the other side by dropping the 1639 * packet. It doesn't look like there is currently a way to 1640 * tell the TCP stack to generate a shorter chain of packets. 1641 */ 1642 if (nfrags > MAX_TX_REQ_FRAGS) { 1643#ifdef DEBUG 1644 printf("%s: nfrags %d > MAX_TX_REQ_FRAGS %d, netback " 1645 "won't be able to handle it, dropping\n", 1646 __func__, nfrags, MAX_TX_REQ_FRAGS); 1647#endif |
1648 SLIST_INSERT_HEAD(&txq->tags, tag, next); 1649 bus_dmamap_unload(np->dma_tag, map); |
|
1586 m_freem(m_head); 1587 return (EMSGSIZE); 1588 } 1589 1590 /* 1591 * This check should be redundant. We've already verified that we 1592 * have enough slots in the ring to handle a packet of maximum 1593 * size, and that our packet is less than the maximum size. Keep --- 5 unchanged lines hidden (view full) --- 1599 "(%d)!", __func__, (int) txq->mbufs_cnt, 1600 (int) nfrags, (int) NET_TX_RING_SIZE)); 1601 1602 /* 1603 * Start packing the mbufs in this chain into 1604 * the fragment pointers. Stop when we run out 1605 * of fragments or hit the end of the mbuf chain. 1606 */ | 1650 m_freem(m_head); 1651 return (EMSGSIZE); 1652 } 1653 1654 /* 1655 * This check should be redundant. We've already verified that we 1656 * have enough slots in the ring to handle a packet of maximum 1657 * size, and that our packet is less than the maximum size. Keep --- 5 unchanged lines hidden (view full) --- 1663 "(%d)!", __func__, (int) txq->mbufs_cnt, 1664 (int) nfrags, (int) NET_TX_RING_SIZE)); 1665 1666 /* 1667 * Start packing the mbufs in this chain into 1668 * the fragment pointers. Stop when we run out 1669 * of fragments or hit the end of the mbuf chain. 1670 */ |
1607 m = m_head; | |
1608 otherend_id = xenbus_get_otherend_id(np->xbdev); | 1671 otherend_id = xenbus_get_otherend_id(np->xbdev); |
1609 for (m = m_head; m; m = m->m_next) { | 1672 m_tag_prepend(m_head, &tag->tag); 1673 for (i = 0; i < nfrags; i++) { |
1610 netif_tx_request_t *tx; 1611 uintptr_t id; 1612 grant_ref_t ref; 1613 u_long mfn; /* XXX Wrong type? */ 1614 1615 tx = RING_GET_REQUEST(&txq->ring, txq->ring.req_prod_pvt); 1616 id = get_id_from_freelist(txq->mbufs); 1617 if (id == 0) 1618 panic("%s: was allocated the freelist head!\n", 1619 __func__); 1620 txq->mbufs_cnt++; 1621 if (txq->mbufs_cnt > NET_TX_RING_SIZE) 1622 panic("%s: tx_chain_cnt must be <= NET_TX_RING_SIZE\n", 1623 __func__); | 1674 netif_tx_request_t *tx; 1675 uintptr_t id; 1676 grant_ref_t ref; 1677 u_long mfn; /* XXX Wrong type? */ 1678 1679 tx = RING_GET_REQUEST(&txq->ring, txq->ring.req_prod_pvt); 1680 id = get_id_from_freelist(txq->mbufs); 1681 if (id == 0) 1682 panic("%s: was allocated the freelist head!\n", 1683 __func__); 1684 txq->mbufs_cnt++; 1685 if (txq->mbufs_cnt > NET_TX_RING_SIZE) 1686 panic("%s: tx_chain_cnt must be <= NET_TX_RING_SIZE\n", 1687 __func__); |
1624 txq->mbufs[id] = m; | 1688 mbuf_grab(m_head); 1689 txq->mbufs[id] = m_head; |
1625 tx->id = id; 1626 ref = gnttab_claim_grant_reference(&txq->gref_head); 1627 KASSERT((short)ref >= 0, ("Negative ref")); | 1690 tx->id = id; 1691 ref = gnttab_claim_grant_reference(&txq->gref_head); 1692 KASSERT((short)ref >= 0, ("Negative ref")); |
1628 mfn = virt_to_mfn(mtod(m, vm_offset_t)); | 1693 mfn = atop(segs[i].ds_addr); |
1629 gnttab_grant_foreign_access_ref(ref, otherend_id, 1630 mfn, GNTMAP_readonly); 1631 tx->gref = txq->grant_ref[id] = ref; | 1694 gnttab_grant_foreign_access_ref(ref, otherend_id, 1695 mfn, GNTMAP_readonly); 1696 tx->gref = txq->grant_ref[id] = ref; |
1632 tx->offset = mtod(m, vm_offset_t) & (PAGE_SIZE - 1); | 1697 tx->offset = segs[i].ds_addr & PAGE_MASK; 1698 KASSERT(tx->offset + segs[i].ds_len <= PAGE_SIZE, 1699 ("mbuf segment crosses a page boundary")); |
1633 tx->flags = 0; | 1700 tx->flags = 0; |
1634 if (m == m_head) { | 1701 if (i == 0) { |
1635 /* 1636 * The first fragment has the entire packet 1637 * size, subsequent fragments have just the 1638 * fragment size. The backend works out the 1639 * true size of the first fragment by 1640 * subtracting the sizes of the other 1641 * fragments. 1642 */ | 1702 /* 1703 * The first fragment has the entire packet 1704 * size, subsequent fragments have just the 1705 * fragment size. The backend works out the 1706 * true size of the first fragment by 1707 * subtracting the sizes of the other 1708 * fragments. 1709 */ |
1643 tx->size = m->m_pkthdr.len; | 1710 tx->size = m_head->m_pkthdr.len; |
1644 1645 /* 1646 * The first fragment contains the checksum flags 1647 * and is optionally followed by extra data for 1648 * TSO etc. 1649 */ 1650 /** 1651 * CSUM_TSO requires checksum offloading. 1652 * Some versions of FreeBSD fail to 1653 * set CSUM_TCP in the CSUM_TSO case, 1654 * so we have to test for CSUM_TSO 1655 * explicitly. 1656 */ | 1711 1712 /* 1713 * The first fragment contains the checksum flags 1714 * and is optionally followed by extra data for 1715 * TSO etc. 1716 */ 1717 /** 1718 * CSUM_TSO requires checksum offloading. 1719 * Some versions of FreeBSD fail to 1720 * set CSUM_TCP in the CSUM_TSO case, 1721 * so we have to test for CSUM_TSO 1722 * explicitly. 1723 */ |
1657 if (m->m_pkthdr.csum_flags | 1724 if (m_head->m_pkthdr.csum_flags |
1658 & (CSUM_DELAY_DATA | CSUM_TSO)) { 1659 tx->flags |= (NETTXF_csum_blank 1660 | NETTXF_data_validated); 1661 } | 1725 & (CSUM_DELAY_DATA | CSUM_TSO)) { 1726 tx->flags |= (NETTXF_csum_blank 1727 | NETTXF_data_validated); 1728 } |
1662 if (m->m_pkthdr.csum_flags & CSUM_TSO) { | 1729 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { |
1663 struct netif_extra_info *gso = 1664 (struct netif_extra_info *) 1665 RING_GET_REQUEST(&txq->ring, 1666 ++txq->ring.req_prod_pvt); 1667 1668 tx->flags |= NETTXF_extra_info; 1669 | 1730 struct netif_extra_info *gso = 1731 (struct netif_extra_info *) 1732 RING_GET_REQUEST(&txq->ring, 1733 ++txq->ring.req_prod_pvt); 1734 1735 tx->flags |= NETTXF_extra_info; 1736 |
1670 gso->u.gso.size = m->m_pkthdr.tso_segsz; | 1737 gso->u.gso.size = m_head->m_pkthdr.tso_segsz; |
1671 gso->u.gso.type = 1672 XEN_NETIF_GSO_TYPE_TCPV4; 1673 gso->u.gso.pad = 0; 1674 gso->u.gso.features = 0; 1675 1676 gso->type = XEN_NETIF_EXTRA_TYPE_GSO; 1677 gso->flags = 0; 1678 } 1679 } else { | 1738 gso->u.gso.type = 1739 XEN_NETIF_GSO_TYPE_TCPV4; 1740 gso->u.gso.pad = 0; 1741 gso->u.gso.features = 0; 1742 1743 gso->type = XEN_NETIF_EXTRA_TYPE_GSO; 1744 gso->flags = 0; 1745 } 1746 } else { |
1680 tx->size = m->m_len; | 1747 tx->size = segs[i].ds_len; |
1681 } | 1748 } |
1682 if (m->m_next) | 1749 if (i != nfrags - 1) |
1683 tx->flags |= NETTXF_more_data; 1684 1685 txq->ring.req_prod_pvt++; 1686 } | 1750 tx->flags |= NETTXF_more_data; 1751 1752 txq->ring.req_prod_pvt++; 1753 } |
1754 bus_dmamap_sync(np->dma_tag, map, BUS_DMASYNC_PREWRITE); |
|
1687 BPF_MTAP(ifp, m_head); 1688 1689 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1690 if_inc_counter(ifp, IFCOUNTER_OBYTES, m_head->m_pkthdr.len); 1691 if (m_head->m_flags & M_MCAST) 1692 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); 1693 1694 xn_txeof(txq); --- 544 unchanged lines hidden (view full) --- 2239 IFCAP_HWCSUM|IFCAP_TSO4|IFCAP_LRO; 2240 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 2241 ifp->if_hw_tsomaxsegcount = MAX_TX_REQ_FRAGS; 2242 ifp->if_hw_tsomaxsegsize = PAGE_SIZE; 2243 2244 ether_ifattach(ifp, np->mac); 2245 netfront_carrier_off(np); 2246 | 1755 BPF_MTAP(ifp, m_head); 1756 1757 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1758 if_inc_counter(ifp, IFCOUNTER_OBYTES, m_head->m_pkthdr.len); 1759 if (m_head->m_flags & M_MCAST) 1760 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); 1761 1762 xn_txeof(txq); --- 544 unchanged lines hidden (view full) --- 2307 IFCAP_HWCSUM|IFCAP_TSO4|IFCAP_LRO; 2308 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 2309 ifp->if_hw_tsomaxsegcount = MAX_TX_REQ_FRAGS; 2310 ifp->if_hw_tsomaxsegsize = PAGE_SIZE; 2311 2312 ether_ifattach(ifp, np->mac); 2313 netfront_carrier_off(np); 2314 |
2247 return (0); | 2315 err = bus_dma_tag_create( 2316 bus_get_dma_tag(dev), /* parent */ 2317 1, PAGE_SIZE, /* algnmnt, boundary */ 2318 BUS_SPACE_MAXADDR, /* lowaddr */ 2319 BUS_SPACE_MAXADDR, /* highaddr */ 2320 NULL, NULL, /* filter, filterarg */ 2321 PAGE_SIZE * MAX_TX_REQ_FRAGS, /* max request size */ 2322 MAX_TX_REQ_FRAGS, /* max segments */ 2323 PAGE_SIZE, /* maxsegsize */ 2324 BUS_DMA_ALLOCNOW, /* flags */ 2325 NULL, NULL, /* lockfunc, lockarg */ 2326 &np->dma_tag); |
2248 | 2327 |
2328 return (err); 2329 |
|
2249error: 2250 KASSERT(err != 0, ("Error path with no error code specified")); 2251 return (err); 2252} 2253 2254static int 2255netfront_detach(device_t dev) 2256{ --- 15 unchanged lines hidden (view full) --- 2272 XN_UNLOCK(np); 2273 netif_disconnect_backend(np); 2274 ether_ifdetach(np->xn_ifp); 2275 free(np->rxq, M_DEVBUF); 2276 free(np->txq, M_DEVBUF); 2277 if_free(np->xn_ifp); 2278 np->xn_ifp = NULL; 2279 ifmedia_removeall(&np->sc_media); | 2330error: 2331 KASSERT(err != 0, ("Error path with no error code specified")); 2332 return (err); 2333} 2334 2335static int 2336netfront_detach(device_t dev) 2337{ --- 15 unchanged lines hidden (view full) --- 2353 XN_UNLOCK(np); 2354 netif_disconnect_backend(np); 2355 ether_ifdetach(np->xn_ifp); 2356 free(np->rxq, M_DEVBUF); 2357 free(np->txq, M_DEVBUF); 2358 if_free(np->xn_ifp); 2359 np->xn_ifp = NULL; 2360 ifmedia_removeall(&np->sc_media); |
2361 bus_dma_tag_destroy(np->dma_tag); |
|
2280} 2281 2282static void 2283netif_disconnect_backend(struct netfront_info *np) 2284{ 2285 u_int i; 2286 2287 for (i = 0; i < np->num_queues; i++) { --- 53 unchanged lines hidden --- | 2362} 2363 2364static void 2365netif_disconnect_backend(struct netfront_info *np) 2366{ 2367 u_int i; 2368 2369 for (i = 0; i < np->num_queues; i++) { --- 53 unchanged lines hidden --- |