18e0ad55aSJoel Dahl /*- 289e0f4d2SKip Macy * Copyright (c) 2004-2006 Kip Macy 389e0f4d2SKip Macy * All rights reserved. 489e0f4d2SKip Macy * 58e0ad55aSJoel Dahl * Redistribution and use in source and binary forms, with or without 68e0ad55aSJoel Dahl * modification, are permitted provided that the following conditions 78e0ad55aSJoel Dahl * are met: 88e0ad55aSJoel Dahl * 1. Redistributions of source code must retain the above copyright 98e0ad55aSJoel Dahl * notice, this list of conditions and the following disclaimer. 108e0ad55aSJoel Dahl * 2. Redistributions in binary form must reproduce the above copyright 118e0ad55aSJoel Dahl * notice, this list of conditions and the following disclaimer in the 128e0ad55aSJoel Dahl * documentation and/or other materials provided with the distribution. 1389e0f4d2SKip Macy * 148e0ad55aSJoel Dahl * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 158e0ad55aSJoel Dahl * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 168e0ad55aSJoel Dahl * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 178e0ad55aSJoel Dahl * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 188e0ad55aSJoel Dahl * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 198e0ad55aSJoel Dahl * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 208e0ad55aSJoel Dahl * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 218e0ad55aSJoel Dahl * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 228e0ad55aSJoel Dahl * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 238e0ad55aSJoel Dahl * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 248e0ad55aSJoel Dahl * SUCH DAMAGE. 2589e0f4d2SKip Macy */ 2689e0f4d2SKip Macy 2789e0f4d2SKip Macy #include <sys/cdefs.h> 2889e0f4d2SKip Macy __FBSDID("$FreeBSD$"); 2989e0f4d2SKip Macy 30a0ae8f04SBjoern A. Zeeb #include "opt_inet.h" 31f909bbb4SGleb Smirnoff #include "opt_inet6.h" 32a0ae8f04SBjoern A. Zeeb 3389e0f4d2SKip Macy #include <sys/param.h> 3489e0f4d2SKip Macy #include <sys/systm.h> 3589e0f4d2SKip Macy #include <sys/sockio.h> 36c578b6acSGleb Smirnoff #include <sys/limits.h> 3789e0f4d2SKip Macy #include <sys/mbuf.h> 3889e0f4d2SKip Macy #include <sys/malloc.h> 3923dc5621SKip Macy #include <sys/module.h> 4089e0f4d2SKip Macy #include <sys/kernel.h> 4189e0f4d2SKip Macy #include <sys/socket.h> 4212678024SDoug Rabson #include <sys/sysctl.h> 4389e0f4d2SKip Macy #include <sys/queue.h> 448cb07992SAdrian Chadd #include <sys/lock.h> 4589e0f4d2SKip Macy #include <sys/sx.h> 4689e0f4d2SKip Macy 4789e0f4d2SKip Macy #include <net/if.h> 4877374386SGleb Smirnoff #include <net/if_var.h> 4989e0f4d2SKip Macy #include <net/if_arp.h> 5089e0f4d2SKip Macy #include <net/ethernet.h> 5189e0f4d2SKip Macy #include <net/if_dl.h> 5289e0f4d2SKip Macy #include <net/if_media.h> 5389e0f4d2SKip Macy 5489e0f4d2SKip Macy #include <net/bpf.h> 5589e0f4d2SKip Macy 5689e0f4d2SKip Macy #include <net/if_types.h> 5789e0f4d2SKip Macy 5889e0f4d2SKip Macy #include <netinet/in_systm.h> 5989e0f4d2SKip Macy #include <netinet/in.h> 6089e0f4d2SKip Macy #include <netinet/ip.h> 6189e0f4d2SKip Macy #include <netinet/if_ether.h> 6212678024SDoug Rabson #include <netinet/tcp.h> 6312678024SDoug Rabson #include <netinet/tcp_lro.h> 6489e0f4d2SKip Macy 6589e0f4d2SKip Macy #include <vm/vm.h> 6689e0f4d2SKip Macy #include <vm/pmap.h> 6789e0f4d2SKip Macy 6889e0f4d2SKip Macy #include <machine/clock.h> /* for DELAY */ 6989e0f4d2SKip Macy #include <machine/bus.h> 7089e0f4d2SKip Macy #include <machine/resource.h> 7189e0f4d2SKip Macy #include <machine/frame.h> 72980c7178SKip Macy #include <machine/vmparam.h> 7389e0f4d2SKip Macy 7489e0f4d2SKip Macy #include <sys/bus.h> 7589e0f4d2SKip Macy #include <sys/rman.h> 7689e0f4d2SKip Macy 7789e0f4d2SKip Macy #include <machine/intr_machdep.h> 7889e0f4d2SKip Macy 7976acc41fSJustin T. Gibbs #include <xen/xen-os.h> 803a6d1fcfSKip Macy #include <xen/hypervisor.h> 813a6d1fcfSKip Macy #include <xen/xen_intr.h> 8289e0f4d2SKip Macy #include <xen/gnttab.h> 8389e0f4d2SKip Macy #include <xen/interface/memory.h> 8489e0f4d2SKip Macy #include <xen/interface/io/netif.h> 8523dc5621SKip Macy #include <xen/xenbus/xenbusvar.h> 8689e0f4d2SKip Macy 8723dc5621SKip Macy #include "xenbus_if.h" 8889e0f4d2SKip Macy 89578e4bf7SJustin T. Gibbs /* Features supported by all backends. TSO and LRO can be negotiated */ 90578e4bf7SJustin T. Gibbs #define XN_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 9112678024SDoug Rabson 9289e0f4d2SKip Macy #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE) 9389e0f4d2SKip Macy #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE) 9489e0f4d2SKip Macy 9512678024SDoug Rabson /* 9612678024SDoug Rabson * Should the driver do LRO on the RX end 9712678024SDoug Rabson * this can be toggled on the fly, but the 9812678024SDoug Rabson * interface must be reset (down/up) for it 9912678024SDoug Rabson * to take effect. 10012678024SDoug Rabson */ 10112678024SDoug Rabson static int xn_enable_lro = 1; 10212678024SDoug Rabson TUNABLE_INT("hw.xn.enable_lro", &xn_enable_lro); 10312678024SDoug Rabson 104931eeffaSKenneth D. Merry /** 105931eeffaSKenneth D. Merry * \brief The maximum allowed data fragments in a single transmit 106931eeffaSKenneth D. Merry * request. 107931eeffaSKenneth D. Merry * 108931eeffaSKenneth D. Merry * This limit is imposed by the backend driver. We assume here that 109931eeffaSKenneth D. Merry * we are dealing with a Linux driver domain and have set our limit 110931eeffaSKenneth D. Merry * to mirror the Linux MAX_SKB_FRAGS constant. 111931eeffaSKenneth D. Merry */ 112931eeffaSKenneth D. Merry #define MAX_TX_REQ_FRAGS (65536 / PAGE_SIZE + 2) 113931eeffaSKenneth D. Merry 11489e0f4d2SKip Macy #define RX_COPY_THRESHOLD 256 11589e0f4d2SKip Macy 11689e0f4d2SKip Macy #define net_ratelimit() 0 11789e0f4d2SKip Macy 11889e0f4d2SKip Macy struct netfront_info; 11989e0f4d2SKip Macy struct netfront_rx_info; 12089e0f4d2SKip Macy 12189e0f4d2SKip Macy static void xn_txeof(struct netfront_info *); 12289e0f4d2SKip Macy static void xn_rxeof(struct netfront_info *); 12389e0f4d2SKip Macy static void network_alloc_rx_buffers(struct netfront_info *); 12489e0f4d2SKip Macy 12589e0f4d2SKip Macy static void xn_tick_locked(struct netfront_info *); 12689e0f4d2SKip Macy static void xn_tick(void *); 12789e0f4d2SKip Macy 12889e0f4d2SKip Macy static void xn_intr(void *); 129931eeffaSKenneth D. Merry static inline int xn_count_frags(struct mbuf *m); 130931eeffaSKenneth D. Merry static int xn_assemble_tx_request(struct netfront_info *sc, 131931eeffaSKenneth D. Merry struct mbuf *m_head); 13289e0f4d2SKip Macy static void xn_start_locked(struct ifnet *); 13389e0f4d2SKip Macy static void xn_start(struct ifnet *); 13489e0f4d2SKip Macy static int xn_ioctl(struct ifnet *, u_long, caddr_t); 13589e0f4d2SKip Macy static void xn_ifinit_locked(struct netfront_info *); 13689e0f4d2SKip Macy static void xn_ifinit(void *); 13789e0f4d2SKip Macy static void xn_stop(struct netfront_info *); 138578e4bf7SJustin T. Gibbs static void xn_query_features(struct netfront_info *np); 139578e4bf7SJustin T. Gibbs static int xn_configure_features(struct netfront_info *np); 14089e0f4d2SKip Macy #ifdef notyet 14189e0f4d2SKip Macy static void xn_watchdog(struct ifnet *); 14289e0f4d2SKip Macy #endif 14389e0f4d2SKip Macy 14489e0f4d2SKip Macy #ifdef notyet 14523dc5621SKip Macy static void netfront_closing(device_t dev); 14689e0f4d2SKip Macy #endif 14789e0f4d2SKip Macy static void netif_free(struct netfront_info *info); 14823dc5621SKip Macy static int netfront_detach(device_t dev); 14989e0f4d2SKip Macy 15023dc5621SKip Macy static int talk_to_backend(device_t dev, struct netfront_info *info); 15123dc5621SKip Macy static int create_netdev(device_t dev); 15289e0f4d2SKip Macy static void netif_disconnect_backend(struct netfront_info *info); 15323dc5621SKip Macy static int setup_device(device_t dev, struct netfront_info *info); 154cf9c09e1SJustin T. Gibbs static void free_ring(int *ref, void *ring_ptr_ref); 15589e0f4d2SKip Macy 1560e509842SJustin T. Gibbs static int xn_ifmedia_upd(struct ifnet *ifp); 1570e509842SJustin T. Gibbs static void xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); 1580e509842SJustin T. Gibbs 15989e0f4d2SKip Macy /* Xenolinux helper functions */ 16023dc5621SKip Macy int network_connect(struct netfront_info *); 16189e0f4d2SKip Macy 16289e0f4d2SKip Macy static void xn_free_rx_ring(struct netfront_info *); 16389e0f4d2SKip Macy 16489e0f4d2SKip Macy static void xn_free_tx_ring(struct netfront_info *); 16589e0f4d2SKip Macy 16689e0f4d2SKip Macy static int xennet_get_responses(struct netfront_info *np, 167931eeffaSKenneth D. Merry struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons, 168d0f3a8b9SRoger Pau Monné struct mbuf **list); 16989e0f4d2SKip Macy 1703c790178SJohn Baldwin #define virt_to_mfn(x) (vtophys(x) >> PAGE_SHIFT) 17189e0f4d2SKip Macy 17289e0f4d2SKip Macy #define INVALID_P2M_ENTRY (~0UL) 17389e0f4d2SKip Macy 17489e0f4d2SKip Macy /* 17589e0f4d2SKip Macy * Mbuf pointers. We need these to keep track of the virtual addresses 17689e0f4d2SKip Macy * of our mbuf chains since we can only convert from virtual to physical, 17789e0f4d2SKip Macy * not the other way around. The size must track the free index arrays. 17889e0f4d2SKip Macy */ 17989e0f4d2SKip Macy struct xn_chain_data { 18089e0f4d2SKip Macy struct mbuf *xn_tx_chain[NET_TX_RING_SIZE+1]; 181a4ec37f5SAdrian Chadd int xn_tx_chain_cnt; 18289e0f4d2SKip Macy struct mbuf *xn_rx_chain[NET_RX_RING_SIZE+1]; 18389e0f4d2SKip Macy }; 18489e0f4d2SKip Macy 1859a7f9feaSRoger Pau Monné struct netfront_stats 18689e0f4d2SKip Macy { 18789e0f4d2SKip Macy u_long rx_packets; /* total packets received */ 18889e0f4d2SKip Macy u_long tx_packets; /* total packets transmitted */ 18989e0f4d2SKip Macy u_long rx_bytes; /* total bytes received */ 19089e0f4d2SKip Macy u_long tx_bytes; /* total bytes transmitted */ 19189e0f4d2SKip Macy u_long rx_errors; /* bad packets received */ 19289e0f4d2SKip Macy u_long tx_errors; /* packet transmit problems */ 19389e0f4d2SKip Macy }; 19489e0f4d2SKip Macy 19589e0f4d2SKip Macy struct netfront_info { 19689e0f4d2SKip Macy struct ifnet *xn_ifp; 19712678024SDoug Rabson struct lro_ctrl xn_lro; 19889e0f4d2SKip Macy 1999a7f9feaSRoger Pau Monné struct netfront_stats stats; 20089e0f4d2SKip Macy u_int tx_full; 20189e0f4d2SKip Macy 20289e0f4d2SKip Macy netif_tx_front_ring_t tx; 20389e0f4d2SKip Macy netif_rx_front_ring_t rx; 20489e0f4d2SKip Macy 20589e0f4d2SKip Macy struct mtx tx_lock; 20689e0f4d2SKip Macy struct mtx rx_lock; 207227ca257SKip Macy struct mtx sc_lock; 20889e0f4d2SKip Macy 20976acc41fSJustin T. Gibbs xen_intr_handle_t xen_intr_handle; 21089e0f4d2SKip Macy u_int carrier; 211578e4bf7SJustin T. Gibbs u_int maxfrags; 21289e0f4d2SKip Macy 21389e0f4d2SKip Macy /* Receive-ring batched refills. */ 21489e0f4d2SKip Macy #define RX_MIN_TARGET 32 21589e0f4d2SKip Macy #define RX_MAX_TARGET NET_RX_RING_SIZE 2160e509842SJustin T. Gibbs int rx_min_target; 2170e509842SJustin T. Gibbs int rx_max_target; 2180e509842SJustin T. Gibbs int rx_target; 21989e0f4d2SKip Macy 22089e0f4d2SKip Macy grant_ref_t gref_tx_head; 22189e0f4d2SKip Macy grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1]; 22289e0f4d2SKip Macy grant_ref_t gref_rx_head; 22389e0f4d2SKip Macy grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1]; 22489e0f4d2SKip Macy 22523dc5621SKip Macy device_t xbdev; 22689e0f4d2SKip Macy int tx_ring_ref; 22789e0f4d2SKip Macy int rx_ring_ref; 22889e0f4d2SKip Macy uint8_t mac[ETHER_ADDR_LEN]; 22989e0f4d2SKip Macy struct xn_chain_data xn_cdata; /* mbufs */ 230c578b6acSGleb Smirnoff struct mbufq xn_rx_batch; /* batch queue */ 23189e0f4d2SKip Macy 23289e0f4d2SKip Macy int xn_if_flags; 23389e0f4d2SKip Macy struct callout xn_stat_ch; 23489e0f4d2SKip Macy 23589e0f4d2SKip Macy u_long rx_pfn_array[NET_RX_RING_SIZE]; 2360e509842SJustin T. Gibbs struct ifmedia sc_media; 2376a8e9695SRoger Pau Monné 2386a8e9695SRoger Pau Monné bool xn_resume; 23989e0f4d2SKip Macy }; 24089e0f4d2SKip Macy 24189e0f4d2SKip Macy #define rx_mbufs xn_cdata.xn_rx_chain 24289e0f4d2SKip Macy #define tx_mbufs xn_cdata.xn_tx_chain 24389e0f4d2SKip Macy 24489e0f4d2SKip Macy #define XN_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_lock) 24589e0f4d2SKip Macy #define XN_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_lock) 24689e0f4d2SKip Macy 24789e0f4d2SKip Macy #define XN_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_lock) 24889e0f4d2SKip Macy #define XN_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_lock) 24989e0f4d2SKip Macy 250227ca257SKip Macy #define XN_LOCK(_sc) mtx_lock(&(_sc)->sc_lock); 251227ca257SKip Macy #define XN_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_lock); 25289e0f4d2SKip Macy 253227ca257SKip Macy #define XN_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_lock, MA_OWNED); 25489e0f4d2SKip Macy #define XN_RX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rx_lock, MA_OWNED); 25589e0f4d2SKip Macy #define XN_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_lock, MA_OWNED); 25689e0f4d2SKip Macy 25789e0f4d2SKip Macy struct netfront_rx_info { 25889e0f4d2SKip Macy struct netif_rx_response rx; 25989e0f4d2SKip Macy struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; 26089e0f4d2SKip Macy }; 26189e0f4d2SKip Macy 26289e0f4d2SKip Macy #define netfront_carrier_on(netif) ((netif)->carrier = 1) 26389e0f4d2SKip Macy #define netfront_carrier_off(netif) ((netif)->carrier = 0) 26489e0f4d2SKip Macy #define netfront_carrier_ok(netif) ((netif)->carrier) 26589e0f4d2SKip Macy 26689e0f4d2SKip Macy /* Access macros for acquiring freeing slots in xn_free_{tx,rx}_idxs[]. */ 26789e0f4d2SKip Macy 26889e0f4d2SKip Macy static inline void 269931eeffaSKenneth D. Merry add_id_to_freelist(struct mbuf **list, uintptr_t id) 27089e0f4d2SKip Macy { 271931eeffaSKenneth D. Merry KASSERT(id != 0, 272931eeffaSKenneth D. Merry ("%s: the head item (0) must always be free.", __func__)); 27389e0f4d2SKip Macy list[id] = list[0]; 274931eeffaSKenneth D. Merry list[0] = (struct mbuf *)id; 27589e0f4d2SKip Macy } 27689e0f4d2SKip Macy 27789e0f4d2SKip Macy static inline unsigned short 27889e0f4d2SKip Macy get_id_from_freelist(struct mbuf **list) 27989e0f4d2SKip Macy { 280931eeffaSKenneth D. Merry uintptr_t id; 281931eeffaSKenneth D. Merry 282931eeffaSKenneth D. Merry id = (uintptr_t)list[0]; 283931eeffaSKenneth D. Merry KASSERT(id != 0, 284931eeffaSKenneth D. Merry ("%s: the head item (0) must always remain free.", __func__)); 28589e0f4d2SKip Macy list[0] = list[id]; 28689e0f4d2SKip Macy return (id); 28789e0f4d2SKip Macy } 28889e0f4d2SKip Macy 28989e0f4d2SKip Macy static inline int 29089e0f4d2SKip Macy xennet_rxidx(RING_IDX idx) 29189e0f4d2SKip Macy { 29289e0f4d2SKip Macy return idx & (NET_RX_RING_SIZE - 1); 29389e0f4d2SKip Macy } 29489e0f4d2SKip Macy 29589e0f4d2SKip Macy static inline struct mbuf * 296931eeffaSKenneth D. Merry xennet_get_rx_mbuf(struct netfront_info *np, RING_IDX ri) 29789e0f4d2SKip Macy { 29889e0f4d2SKip Macy int i = xennet_rxidx(ri); 29989e0f4d2SKip Macy struct mbuf *m; 30089e0f4d2SKip Macy 30189e0f4d2SKip Macy m = np->rx_mbufs[i]; 30289e0f4d2SKip Macy np->rx_mbufs[i] = NULL; 30389e0f4d2SKip Macy return (m); 30489e0f4d2SKip Macy } 30589e0f4d2SKip Macy 30689e0f4d2SKip Macy static inline grant_ref_t 30789e0f4d2SKip Macy xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri) 30889e0f4d2SKip Macy { 30989e0f4d2SKip Macy int i = xennet_rxidx(ri); 31089e0f4d2SKip Macy grant_ref_t ref = np->grant_rx_ref[i]; 311ff662b5cSJustin T. Gibbs KASSERT(ref != GRANT_REF_INVALID, ("Invalid grant reference!\n")); 312ff662b5cSJustin T. Gibbs np->grant_rx_ref[i] = GRANT_REF_INVALID; 31389e0f4d2SKip Macy return ref; 31489e0f4d2SKip Macy } 31589e0f4d2SKip Macy 31689e0f4d2SKip Macy #define IPRINTK(fmt, args...) \ 31789e0f4d2SKip Macy printf("[XEN] " fmt, ##args) 318227ca257SKip Macy #ifdef INVARIANTS 31989e0f4d2SKip Macy #define WPRINTK(fmt, args...) \ 32089e0f4d2SKip Macy printf("[XEN] " fmt, ##args) 321227ca257SKip Macy #else 322227ca257SKip Macy #define WPRINTK(fmt, args...) 323227ca257SKip Macy #endif 324227ca257SKip Macy #ifdef DEBUG 32589e0f4d2SKip Macy #define DPRINTK(fmt, args...) \ 32623dc5621SKip Macy printf("[XEN] %s: " fmt, __func__, ##args) 32712678024SDoug Rabson #else 32812678024SDoug Rabson #define DPRINTK(fmt, args...) 32912678024SDoug Rabson #endif 33089e0f4d2SKip Macy 33189e0f4d2SKip Macy /** 33289e0f4d2SKip Macy * Read the 'mac' node at the given device's node in the store, and parse that 33389e0f4d2SKip Macy * as colon-separated octets, placing result the given mac array. mac must be 33489e0f4d2SKip Macy * a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h). 33589e0f4d2SKip Macy * Return 0 on success, or errno on error. 33689e0f4d2SKip Macy */ 33789e0f4d2SKip Macy static int 33823dc5621SKip Macy xen_net_read_mac(device_t dev, uint8_t mac[]) 33989e0f4d2SKip Macy { 3403a6d1fcfSKip Macy int error, i; 3413a6d1fcfSKip Macy char *s, *e, *macstr; 342ffa06904SJustin T. Gibbs const char *path; 3433a6d1fcfSKip Macy 344ffa06904SJustin T. Gibbs path = xenbus_get_node(dev); 345ffa06904SJustin T. Gibbs error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr); 346ffa06904SJustin T. Gibbs if (error == ENOENT) { 347ffa06904SJustin T. Gibbs /* 348ffa06904SJustin T. Gibbs * Deal with missing mac XenStore nodes on devices with 349ffa06904SJustin T. Gibbs * HVM emulation (the 'ioemu' configuration attribute) 350ffa06904SJustin T. Gibbs * enabled. 351ffa06904SJustin T. Gibbs * 352ffa06904SJustin T. Gibbs * The HVM emulator may execute in a stub device model 353ffa06904SJustin T. Gibbs * domain which lacks the permission, only given to Dom0, 354ffa06904SJustin T. Gibbs * to update the guest's XenStore tree. For this reason, 355ffa06904SJustin T. Gibbs * the HVM emulator doesn't even attempt to write the 356ffa06904SJustin T. Gibbs * front-side mac node, even when operating in Dom0. 357ffa06904SJustin T. Gibbs * However, there should always be a mac listed in the 358ffa06904SJustin T. Gibbs * backend tree. Fallback to this version if our query 359ffa06904SJustin T. Gibbs * of the front side XenStore location doesn't find 360ffa06904SJustin T. Gibbs * anything. 361ffa06904SJustin T. Gibbs */ 362ffa06904SJustin T. Gibbs path = xenbus_get_otherend_path(dev); 363ffa06904SJustin T. Gibbs error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr); 364ffa06904SJustin T. Gibbs } 365ffa06904SJustin T. Gibbs if (error != 0) { 366ffa06904SJustin T. Gibbs xenbus_dev_fatal(dev, error, "parsing %s/mac", path); 3673a6d1fcfSKip Macy return (error); 368ffa06904SJustin T. Gibbs } 3693a6d1fcfSKip Macy 37089e0f4d2SKip Macy s = macstr; 37189e0f4d2SKip Macy for (i = 0; i < ETHER_ADDR_LEN; i++) { 37289e0f4d2SKip Macy mac[i] = strtoul(s, &e, 16); 37389e0f4d2SKip Macy if (s == e || (e[0] != ':' && e[0] != 0)) { 374ff662b5cSJustin T. Gibbs free(macstr, M_XENBUS); 3753a6d1fcfSKip Macy return (ENOENT); 37689e0f4d2SKip Macy } 37789e0f4d2SKip Macy s = &e[1]; 37889e0f4d2SKip Macy } 379ff662b5cSJustin T. Gibbs free(macstr, M_XENBUS); 3803a6d1fcfSKip Macy return (0); 38189e0f4d2SKip Macy } 38289e0f4d2SKip Macy 38389e0f4d2SKip Macy /** 38489e0f4d2SKip Macy * Entry point to this code when a new device is created. Allocate the basic 38589e0f4d2SKip Macy * structures and the ring buffers for communication with the backend, and 38689e0f4d2SKip Macy * inform the backend of the appropriate details for those. Switch to 38789e0f4d2SKip Macy * Connected state. 38889e0f4d2SKip Macy */ 38989e0f4d2SKip Macy static int 39023dc5621SKip Macy netfront_probe(device_t dev) 39123dc5621SKip Macy { 39223dc5621SKip Macy 393f8f1bb83SRoger Pau Monné if (xen_hvm_domain() && xen_disable_pv_nics != 0) 394f8f1bb83SRoger Pau Monné return (ENXIO); 395f8f1bb83SRoger Pau Monné 39623dc5621SKip Macy if (!strcmp(xenbus_get_type(dev), "vif")) { 39723dc5621SKip Macy device_set_desc(dev, "Virtual Network Interface"); 39823dc5621SKip Macy return (0); 39923dc5621SKip Macy } 40023dc5621SKip Macy 40123dc5621SKip Macy return (ENXIO); 40223dc5621SKip Macy } 40323dc5621SKip Macy 40423dc5621SKip Macy static int 40523dc5621SKip Macy netfront_attach(device_t dev) 40689e0f4d2SKip Macy { 40789e0f4d2SKip Macy int err; 40889e0f4d2SKip Macy 40923dc5621SKip Macy err = create_netdev(dev); 41089e0f4d2SKip Macy if (err) { 41189e0f4d2SKip Macy xenbus_dev_fatal(dev, err, "creating netdev"); 412ffa06904SJustin T. Gibbs return (err); 41389e0f4d2SKip Macy } 41489e0f4d2SKip Macy 41512678024SDoug Rabson SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 41612678024SDoug Rabson SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 417f0188618SHans Petter Selasky OID_AUTO, "enable_lro", CTLFLAG_RW, 41812678024SDoug Rabson &xn_enable_lro, 0, "Large Receive Offload"); 41912678024SDoug Rabson 420ffa06904SJustin T. Gibbs return (0); 42189e0f4d2SKip Macy } 42289e0f4d2SKip Macy 423cf9c09e1SJustin T. Gibbs static int 424cf9c09e1SJustin T. Gibbs netfront_suspend(device_t dev) 425cf9c09e1SJustin T. Gibbs { 426cf9c09e1SJustin T. Gibbs struct netfront_info *info = device_get_softc(dev); 427cf9c09e1SJustin T. Gibbs 428cf9c09e1SJustin T. Gibbs XN_RX_LOCK(info); 429cf9c09e1SJustin T. Gibbs XN_TX_LOCK(info); 430cf9c09e1SJustin T. Gibbs netfront_carrier_off(info); 431cf9c09e1SJustin T. Gibbs XN_TX_UNLOCK(info); 432cf9c09e1SJustin T. Gibbs XN_RX_UNLOCK(info); 433cf9c09e1SJustin T. Gibbs return (0); 434cf9c09e1SJustin T. Gibbs } 43589e0f4d2SKip Macy 43689e0f4d2SKip Macy /** 43789e0f4d2SKip Macy * We are reconnecting to the backend, due to a suspend/resume, or a backend 43889e0f4d2SKip Macy * driver restart. We tear down our netif structure and recreate it, but 43989e0f4d2SKip Macy * leave the device-layer structures intact so that this is transparent to the 44089e0f4d2SKip Macy * rest of the kernel. 44189e0f4d2SKip Macy */ 44289e0f4d2SKip Macy static int 44323dc5621SKip Macy netfront_resume(device_t dev) 44489e0f4d2SKip Macy { 44523dc5621SKip Macy struct netfront_info *info = device_get_softc(dev); 44689e0f4d2SKip Macy 4476a8e9695SRoger Pau Monné info->xn_resume = true; 44889e0f4d2SKip Macy netif_disconnect_backend(info); 44989e0f4d2SKip Macy return (0); 45089e0f4d2SKip Macy } 45189e0f4d2SKip Macy 45289e0f4d2SKip Macy /* Common code used when first setting up, and when resuming. */ 45389e0f4d2SKip Macy static int 45423dc5621SKip Macy talk_to_backend(device_t dev, struct netfront_info *info) 45589e0f4d2SKip Macy { 45689e0f4d2SKip Macy const char *message; 457ff662b5cSJustin T. Gibbs struct xs_transaction xst; 45823dc5621SKip Macy const char *node = xenbus_get_node(dev); 45989e0f4d2SKip Macy int err; 46089e0f4d2SKip Macy 46189e0f4d2SKip Macy err = xen_net_read_mac(dev, info->mac); 46289e0f4d2SKip Macy if (err) { 46323dc5621SKip Macy xenbus_dev_fatal(dev, err, "parsing %s/mac", node); 46489e0f4d2SKip Macy goto out; 46589e0f4d2SKip Macy } 46689e0f4d2SKip Macy 46789e0f4d2SKip Macy /* Create shared ring, alloc event channel. */ 46889e0f4d2SKip Macy err = setup_device(dev, info); 46989e0f4d2SKip Macy if (err) 47089e0f4d2SKip Macy goto out; 47189e0f4d2SKip Macy 47289e0f4d2SKip Macy again: 473ff662b5cSJustin T. Gibbs err = xs_transaction_start(&xst); 47489e0f4d2SKip Macy if (err) { 47589e0f4d2SKip Macy xenbus_dev_fatal(dev, err, "starting transaction"); 47689e0f4d2SKip Macy goto destroy_ring; 47789e0f4d2SKip Macy } 478ff662b5cSJustin T. Gibbs err = xs_printf(xst, node, "tx-ring-ref","%u", 47989e0f4d2SKip Macy info->tx_ring_ref); 48089e0f4d2SKip Macy if (err) { 48189e0f4d2SKip Macy message = "writing tx ring-ref"; 48289e0f4d2SKip Macy goto abort_transaction; 48389e0f4d2SKip Macy } 484ff662b5cSJustin T. Gibbs err = xs_printf(xst, node, "rx-ring-ref","%u", 48589e0f4d2SKip Macy info->rx_ring_ref); 48689e0f4d2SKip Macy if (err) { 48789e0f4d2SKip Macy message = "writing rx ring-ref"; 48889e0f4d2SKip Macy goto abort_transaction; 48989e0f4d2SKip Macy } 490ff662b5cSJustin T. Gibbs err = xs_printf(xst, node, 49176acc41fSJustin T. Gibbs "event-channel", "%u", 49276acc41fSJustin T. Gibbs xen_intr_port(info->xen_intr_handle)); 49389e0f4d2SKip Macy if (err) { 49489e0f4d2SKip Macy message = "writing event-channel"; 49589e0f4d2SKip Macy goto abort_transaction; 49689e0f4d2SKip Macy } 497d0f3a8b9SRoger Pau Monné err = xs_printf(xst, node, "request-rx-copy", "%u", 1); 49889e0f4d2SKip Macy if (err) { 49989e0f4d2SKip Macy message = "writing request-rx-copy"; 50089e0f4d2SKip Macy goto abort_transaction; 50189e0f4d2SKip Macy } 502ff662b5cSJustin T. Gibbs err = xs_printf(xst, node, "feature-rx-notify", "%d", 1); 50389e0f4d2SKip Macy if (err) { 50489e0f4d2SKip Macy message = "writing feature-rx-notify"; 50589e0f4d2SKip Macy goto abort_transaction; 50689e0f4d2SKip Macy } 507ff662b5cSJustin T. Gibbs err = xs_printf(xst, node, "feature-sg", "%d", 1); 50889e0f4d2SKip Macy if (err) { 50989e0f4d2SKip Macy message = "writing feature-sg"; 51089e0f4d2SKip Macy goto abort_transaction; 51189e0f4d2SKip Macy } 512ff662b5cSJustin T. Gibbs err = xs_printf(xst, node, "feature-gso-tcpv4", "%d", 1); 51389e0f4d2SKip Macy if (err) { 51489e0f4d2SKip Macy message = "writing feature-gso-tcpv4"; 51589e0f4d2SKip Macy goto abort_transaction; 51689e0f4d2SKip Macy } 51789e0f4d2SKip Macy 518ff662b5cSJustin T. Gibbs err = xs_transaction_end(xst, 0); 51989e0f4d2SKip Macy if (err) { 52089e0f4d2SKip Macy if (err == EAGAIN) 52189e0f4d2SKip Macy goto again; 52289e0f4d2SKip Macy xenbus_dev_fatal(dev, err, "completing transaction"); 52389e0f4d2SKip Macy goto destroy_ring; 52489e0f4d2SKip Macy } 52589e0f4d2SKip Macy 52689e0f4d2SKip Macy return 0; 52789e0f4d2SKip Macy 52889e0f4d2SKip Macy abort_transaction: 529ff662b5cSJustin T. Gibbs xs_transaction_end(xst, 1); 53089e0f4d2SKip Macy xenbus_dev_fatal(dev, err, "%s", message); 53189e0f4d2SKip Macy destroy_ring: 53289e0f4d2SKip Macy netif_free(info); 53389e0f4d2SKip Macy out: 53489e0f4d2SKip Macy return err; 53589e0f4d2SKip Macy } 53689e0f4d2SKip Macy 53789e0f4d2SKip Macy static int 53823dc5621SKip Macy setup_device(device_t dev, struct netfront_info *info) 53989e0f4d2SKip Macy { 54089e0f4d2SKip Macy netif_tx_sring_t *txs; 54189e0f4d2SKip Macy netif_rx_sring_t *rxs; 5423a6d1fcfSKip Macy int error; 54389e0f4d2SKip Macy 544ff662b5cSJustin T. Gibbs info->tx_ring_ref = GRANT_REF_INVALID; 545ff662b5cSJustin T. Gibbs info->rx_ring_ref = GRANT_REF_INVALID; 54689e0f4d2SKip Macy info->rx.sring = NULL; 54789e0f4d2SKip Macy info->tx.sring = NULL; 54889e0f4d2SKip Macy 54989e0f4d2SKip Macy txs = (netif_tx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO); 55089e0f4d2SKip Macy if (!txs) { 5513a6d1fcfSKip Macy error = ENOMEM; 5523a6d1fcfSKip Macy xenbus_dev_fatal(dev, error, "allocating tx ring page"); 55389e0f4d2SKip Macy goto fail; 55489e0f4d2SKip Macy } 55589e0f4d2SKip Macy SHARED_RING_INIT(txs); 55689e0f4d2SKip Macy FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); 5573a6d1fcfSKip Macy error = xenbus_grant_ring(dev, virt_to_mfn(txs), &info->tx_ring_ref); 5583a6d1fcfSKip Macy if (error) 55989e0f4d2SKip Macy goto fail; 56089e0f4d2SKip Macy 56189e0f4d2SKip Macy rxs = (netif_rx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO); 56289e0f4d2SKip Macy if (!rxs) { 5633a6d1fcfSKip Macy error = ENOMEM; 5643a6d1fcfSKip Macy xenbus_dev_fatal(dev, error, "allocating rx ring page"); 56589e0f4d2SKip Macy goto fail; 56689e0f4d2SKip Macy } 56789e0f4d2SKip Macy SHARED_RING_INIT(rxs); 56889e0f4d2SKip Macy FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); 56989e0f4d2SKip Macy 5703a6d1fcfSKip Macy error = xenbus_grant_ring(dev, virt_to_mfn(rxs), &info->rx_ring_ref); 5713a6d1fcfSKip Macy if (error) 57289e0f4d2SKip Macy goto fail; 57389e0f4d2SKip Macy 57476acc41fSJustin T. Gibbs error = xen_intr_alloc_and_bind_local_port(dev, 57576acc41fSJustin T. Gibbs xenbus_get_otherend_id(dev), /*filter*/NULL, xn_intr, info, 57676acc41fSJustin T. Gibbs INTR_TYPE_NET | INTR_MPSAFE | INTR_ENTROPY, &info->xen_intr_handle); 57789e0f4d2SKip Macy 5783a6d1fcfSKip Macy if (error) { 5793a6d1fcfSKip Macy xenbus_dev_fatal(dev, error, 58076acc41fSJustin T. Gibbs "xen_intr_alloc_and_bind_local_port failed"); 58189e0f4d2SKip Macy goto fail; 58289e0f4d2SKip Macy } 58389e0f4d2SKip Macy 5843a6d1fcfSKip Macy return (0); 58589e0f4d2SKip Macy 58689e0f4d2SKip Macy fail: 58789e0f4d2SKip Macy netif_free(info); 5883a6d1fcfSKip Macy return (error); 58989e0f4d2SKip Macy } 59089e0f4d2SKip Macy 591a0ae8f04SBjoern A. Zeeb #ifdef INET 59289e0f4d2SKip Macy /** 59312678024SDoug Rabson * If this interface has an ipv4 address, send an arp for it. This 59412678024SDoug Rabson * helps to get the network going again after migrating hosts. 59512678024SDoug Rabson */ 59612678024SDoug Rabson static void 59712678024SDoug Rabson netfront_send_fake_arp(device_t dev, struct netfront_info *info) 59812678024SDoug Rabson { 59912678024SDoug Rabson struct ifnet *ifp; 60012678024SDoug Rabson struct ifaddr *ifa; 60112678024SDoug Rabson 60212678024SDoug Rabson ifp = info->xn_ifp; 60312678024SDoug Rabson TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 60412678024SDoug Rabson if (ifa->ifa_addr->sa_family == AF_INET) { 60512678024SDoug Rabson arp_ifinit(ifp, ifa); 60612678024SDoug Rabson } 60712678024SDoug Rabson } 60812678024SDoug Rabson } 609a0ae8f04SBjoern A. Zeeb #endif 61012678024SDoug Rabson 61112678024SDoug Rabson /** 61289e0f4d2SKip Macy * Callback received when the backend's state changes. 61389e0f4d2SKip Macy */ 614283d6f72SJustin T. Gibbs static void 61523dc5621SKip Macy netfront_backend_changed(device_t dev, XenbusState newstate) 61689e0f4d2SKip Macy { 61723dc5621SKip Macy struct netfront_info *sc = device_get_softc(dev); 61889e0f4d2SKip Macy 61923dc5621SKip Macy DPRINTK("newstate=%d\n", newstate); 62089e0f4d2SKip Macy 62123dc5621SKip Macy switch (newstate) { 62289e0f4d2SKip Macy case XenbusStateInitialising: 62389e0f4d2SKip Macy case XenbusStateInitialised: 62489e0f4d2SKip Macy case XenbusStateUnknown: 62589e0f4d2SKip Macy case XenbusStateClosed: 626920ba15bSKip Macy case XenbusStateReconfigured: 627920ba15bSKip Macy case XenbusStateReconfiguring: 62889e0f4d2SKip Macy break; 62989e0f4d2SKip Macy case XenbusStateInitWait: 63023dc5621SKip Macy if (xenbus_get_state(dev) != XenbusStateInitialising) 63189e0f4d2SKip Macy break; 63223dc5621SKip Macy if (network_connect(sc) != 0) 63389e0f4d2SKip Macy break; 63423dc5621SKip Macy xenbus_set_state(dev, XenbusStateConnected); 63523dc5621SKip Macy break; 63689e0f4d2SKip Macy case XenbusStateClosing: 63723dc5621SKip Macy xenbus_set_state(dev, XenbusStateClosed); 63889e0f4d2SKip Macy break; 639dbf82bdeSRoger Pau Monné case XenbusStateConnected: 640dbf82bdeSRoger Pau Monné #ifdef INET 641dbf82bdeSRoger Pau Monné netfront_send_fake_arp(dev, sc); 642dbf82bdeSRoger Pau Monné #endif 643dbf82bdeSRoger Pau Monné break; 64489e0f4d2SKip Macy } 64589e0f4d2SKip Macy } 64689e0f4d2SKip Macy 64789e0f4d2SKip Macy static void 64889e0f4d2SKip Macy xn_free_rx_ring(struct netfront_info *sc) 64989e0f4d2SKip Macy { 65089e0f4d2SKip Macy #if 0 65189e0f4d2SKip Macy int i; 65289e0f4d2SKip Macy 65389e0f4d2SKip Macy for (i = 0; i < NET_RX_RING_SIZE; i++) { 654931eeffaSKenneth D. Merry if (sc->xn_cdata.rx_mbufs[i] != NULL) { 655931eeffaSKenneth D. Merry m_freem(sc->rx_mbufs[i]); 656931eeffaSKenneth D. Merry sc->rx_mbufs[i] = NULL; 65789e0f4d2SKip Macy } 65889e0f4d2SKip Macy } 65989e0f4d2SKip Macy 66089e0f4d2SKip Macy sc->rx.rsp_cons = 0; 66189e0f4d2SKip Macy sc->xn_rx_if->req_prod = 0; 66289e0f4d2SKip Macy sc->xn_rx_if->event = sc->rx.rsp_cons ; 66389e0f4d2SKip Macy #endif 66489e0f4d2SKip Macy } 66589e0f4d2SKip Macy 66689e0f4d2SKip Macy static void 66789e0f4d2SKip Macy xn_free_tx_ring(struct netfront_info *sc) 66889e0f4d2SKip Macy { 66989e0f4d2SKip Macy #if 0 67089e0f4d2SKip Macy int i; 67189e0f4d2SKip Macy 67289e0f4d2SKip Macy for (i = 0; i < NET_TX_RING_SIZE; i++) { 673931eeffaSKenneth D. Merry if (sc->tx_mbufs[i] != NULL) { 674931eeffaSKenneth D. Merry m_freem(sc->tx_mbufs[i]); 67589e0f4d2SKip Macy sc->xn_cdata.xn_tx_chain[i] = NULL; 67689e0f4d2SKip Macy } 67789e0f4d2SKip Macy } 67889e0f4d2SKip Macy 67989e0f4d2SKip Macy return; 68089e0f4d2SKip Macy #endif 68189e0f4d2SKip Macy } 68289e0f4d2SKip Macy 683931eeffaSKenneth D. Merry /** 684931eeffaSKenneth D. Merry * \brief Verify that there is sufficient space in the Tx ring 685931eeffaSKenneth D. Merry * buffer for a maximally sized request to be enqueued. 686c099cafaSAdrian Chadd * 687931eeffaSKenneth D. Merry * A transmit request requires a transmit descriptor for each packet 688931eeffaSKenneth D. Merry * fragment, plus up to 2 entries for "options" (e.g. TSO). 689c099cafaSAdrian Chadd */ 69089e0f4d2SKip Macy static inline int 691931eeffaSKenneth D. Merry xn_tx_slot_available(struct netfront_info *np) 69289e0f4d2SKip Macy { 693931eeffaSKenneth D. Merry return (RING_FREE_REQUESTS(&np->tx) > (MAX_TX_REQ_FRAGS + 2)); 69489e0f4d2SKip Macy } 695931eeffaSKenneth D. Merry 69689e0f4d2SKip Macy static void 69789e0f4d2SKip Macy netif_release_tx_bufs(struct netfront_info *np) 69889e0f4d2SKip Macy { 69989e0f4d2SKip Macy int i; 70089e0f4d2SKip Macy 70189e0f4d2SKip Macy for (i = 1; i <= NET_TX_RING_SIZE; i++) { 702931eeffaSKenneth D. Merry struct mbuf *m; 70389e0f4d2SKip Macy 704931eeffaSKenneth D. Merry m = np->tx_mbufs[i]; 705931eeffaSKenneth D. Merry 706931eeffaSKenneth D. Merry /* 707931eeffaSKenneth D. Merry * We assume that no kernel addresses are 708931eeffaSKenneth D. Merry * less than NET_TX_RING_SIZE. Any entry 709931eeffaSKenneth D. Merry * in the table that is below this number 710931eeffaSKenneth D. Merry * must be an index from free-list tracking. 711931eeffaSKenneth D. Merry */ 712931eeffaSKenneth D. Merry if (((uintptr_t)m) <= NET_TX_RING_SIZE) 71389e0f4d2SKip Macy continue; 714cf9c09e1SJustin T. Gibbs gnttab_end_foreign_access_ref(np->grant_tx_ref[i]); 71589e0f4d2SKip Macy gnttab_release_grant_reference(&np->gref_tx_head, 71689e0f4d2SKip Macy np->grant_tx_ref[i]); 717ff662b5cSJustin T. Gibbs np->grant_tx_ref[i] = GRANT_REF_INVALID; 71889e0f4d2SKip Macy add_id_to_freelist(np->tx_mbufs, i); 719a4ec37f5SAdrian Chadd np->xn_cdata.xn_tx_chain_cnt--; 720a4ec37f5SAdrian Chadd if (np->xn_cdata.xn_tx_chain_cnt < 0) { 7216f9767acSMarius Strobl panic("%s: tx_chain_cnt must be >= 0", __func__); 722a4ec37f5SAdrian Chadd } 723cf9c09e1SJustin T. Gibbs m_free(m); 72489e0f4d2SKip Macy } 72589e0f4d2SKip Macy } 72689e0f4d2SKip Macy 72789e0f4d2SKip Macy static void 72889e0f4d2SKip Macy network_alloc_rx_buffers(struct netfront_info *sc) 72989e0f4d2SKip Macy { 73023dc5621SKip Macy int otherend_id = xenbus_get_otherend_id(sc->xbdev); 73189e0f4d2SKip Macy unsigned short id; 73289e0f4d2SKip Macy struct mbuf *m_new; 73389e0f4d2SKip Macy int i, batch_target, notify; 73489e0f4d2SKip Macy RING_IDX req_prod; 73589e0f4d2SKip Macy grant_ref_t ref; 73689e0f4d2SKip Macy netif_rx_request_t *req; 73789e0f4d2SKip Macy vm_offset_t vaddr; 73889e0f4d2SKip Macy u_long pfn; 73989e0f4d2SKip Macy 74089e0f4d2SKip Macy req_prod = sc->rx.req_prod_pvt; 74189e0f4d2SKip Macy 74276acc41fSJustin T. Gibbs if (__predict_false(sc->carrier == 0)) 74389e0f4d2SKip Macy return; 74489e0f4d2SKip Macy 74589e0f4d2SKip Macy /* 746931eeffaSKenneth D. Merry * Allocate mbufs greedily, even though we batch updates to the 74789e0f4d2SKip Macy * receive ring. This creates a less bursty demand on the memory 748931eeffaSKenneth D. Merry * allocator, and so should reduce the chance of failed allocation 74989e0f4d2SKip Macy * requests both for ourself and for other kernel subsystems. 750931eeffaSKenneth D. Merry * 751931eeffaSKenneth D. Merry * Here we attempt to maintain rx_target buffers in flight, counting 752931eeffaSKenneth D. Merry * buffers that we have yet to process in the receive ring. 75389e0f4d2SKip Macy */ 75489e0f4d2SKip Macy batch_target = sc->rx_target - (req_prod - sc->rx.rsp_cons); 75589e0f4d2SKip Macy for (i = mbufq_len(&sc->xn_rx_batch); i < batch_target; i++) { 756c2d9c6f0SGleb Smirnoff m_new = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 757931eeffaSKenneth D. Merry if (m_new == NULL) { 75889e0f4d2SKip Macy if (i != 0) 75989e0f4d2SKip Macy goto refill; 76089e0f4d2SKip Macy /* 76189e0f4d2SKip Macy * XXX set timer 76289e0f4d2SKip Macy */ 76389e0f4d2SKip Macy break; 76489e0f4d2SKip Macy } 76589e0f4d2SKip Macy m_new->m_len = m_new->m_pkthdr.len = MJUMPAGESIZE; 76689e0f4d2SKip Macy 76789e0f4d2SKip Macy /* queue the mbufs allocated */ 768c578b6acSGleb Smirnoff (void )mbufq_enqueue(&sc->xn_rx_batch, m_new); 76989e0f4d2SKip Macy } 77089e0f4d2SKip Macy 771931eeffaSKenneth D. Merry /* 772931eeffaSKenneth D. Merry * If we've allocated at least half of our target number of entries, 773931eeffaSKenneth D. Merry * submit them to the backend - we have enough to make the overhead 774931eeffaSKenneth D. Merry * of submission worthwhile. Otherwise wait for more mbufs and 775931eeffaSKenneth D. Merry * request entries to become available. 776931eeffaSKenneth D. Merry */ 77789e0f4d2SKip Macy if (i < (sc->rx_target/2)) { 77889e0f4d2SKip Macy if (req_prod >sc->rx.sring->req_prod) 77989e0f4d2SKip Macy goto push; 78089e0f4d2SKip Macy return; 78189e0f4d2SKip Macy } 782931eeffaSKenneth D. Merry 783931eeffaSKenneth D. Merry /* 784931eeffaSKenneth D. Merry * Double floating fill target if we risked having the backend 785931eeffaSKenneth D. Merry * run out of empty buffers for receive traffic. We define "running 786931eeffaSKenneth D. Merry * low" as having less than a fourth of our target buffers free 787931eeffaSKenneth D. Merry * at the time we refilled the queue. 788931eeffaSKenneth D. Merry */ 789931eeffaSKenneth D. Merry if ((req_prod - sc->rx.sring->rsp_prod) < (sc->rx_target / 4)) { 790931eeffaSKenneth D. Merry sc->rx_target *= 2; 791931eeffaSKenneth D. Merry if (sc->rx_target > sc->rx_max_target) 79289e0f4d2SKip Macy sc->rx_target = sc->rx_max_target; 793931eeffaSKenneth D. Merry } 79489e0f4d2SKip Macy 79589e0f4d2SKip Macy refill: 796d0f3a8b9SRoger Pau Monné for (i = 0; ; i++) { 79789e0f4d2SKip Macy if ((m_new = mbufq_dequeue(&sc->xn_rx_batch)) == NULL) 79889e0f4d2SKip Macy break; 79989e0f4d2SKip Macy 80089e0f4d2SKip Macy m_new->m_ext.ext_arg1 = (vm_paddr_t *)(uintptr_t)( 80189e0f4d2SKip Macy vtophys(m_new->m_ext.ext_buf) >> PAGE_SHIFT); 80289e0f4d2SKip Macy 80389e0f4d2SKip Macy id = xennet_rxidx(req_prod + i); 80489e0f4d2SKip Macy 805931eeffaSKenneth D. Merry KASSERT(sc->rx_mbufs[id] == NULL, ("non-NULL xm_rx_chain")); 806931eeffaSKenneth D. Merry sc->rx_mbufs[id] = m_new; 80789e0f4d2SKip Macy 80889e0f4d2SKip Macy ref = gnttab_claim_grant_reference(&sc->gref_rx_head); 809ff662b5cSJustin T. Gibbs KASSERT(ref != GNTTAB_LIST_END, 810ff662b5cSJustin T. Gibbs ("reserved grant references exhuasted")); 81189e0f4d2SKip Macy sc->grant_rx_ref[id] = ref; 81289e0f4d2SKip Macy 81389e0f4d2SKip Macy vaddr = mtod(m_new, vm_offset_t); 81489e0f4d2SKip Macy pfn = vtophys(vaddr) >> PAGE_SHIFT; 81589e0f4d2SKip Macy req = RING_GET_REQUEST(&sc->rx, req_prod + i); 81689e0f4d2SKip Macy 817d0f3a8b9SRoger Pau Monné gnttab_grant_foreign_access_ref(ref, otherend_id, pfn, 0); 81889e0f4d2SKip Macy req->id = id; 81989e0f4d2SKip Macy req->gref = ref; 82089e0f4d2SKip Macy 82189e0f4d2SKip Macy sc->rx_pfn_array[i] = 8223c790178SJohn Baldwin vtophys(mtod(m_new,vm_offset_t)) >> PAGE_SHIFT; 82389e0f4d2SKip Macy } 82489e0f4d2SKip Macy 82589e0f4d2SKip Macy KASSERT(i, ("no mbufs processed")); /* should have returned earlier */ 82689e0f4d2SKip Macy KASSERT(mbufq_len(&sc->xn_rx_batch) == 0, ("not all mbufs processed")); 82789e0f4d2SKip Macy /* 82889e0f4d2SKip Macy * We may have allocated buffers which have entries outstanding 82989e0f4d2SKip Macy * in the page * update queue -- make sure we flush those first! 83089e0f4d2SKip Macy */ 83189e0f4d2SKip Macy wmb(); 83289e0f4d2SKip Macy 83389e0f4d2SKip Macy /* Above is a suitable barrier to ensure backend will see requests. */ 83489e0f4d2SKip Macy sc->rx.req_prod_pvt = req_prod + i; 83589e0f4d2SKip Macy push: 83689e0f4d2SKip Macy RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->rx, notify); 83789e0f4d2SKip Macy if (notify) 83876acc41fSJustin T. Gibbs xen_intr_signal(sc->xen_intr_handle); 83989e0f4d2SKip Macy } 84089e0f4d2SKip Macy 84189e0f4d2SKip Macy static void 84289e0f4d2SKip Macy xn_rxeof(struct netfront_info *np) 84389e0f4d2SKip Macy { 84489e0f4d2SKip Macy struct ifnet *ifp; 84512678024SDoug Rabson struct lro_ctrl *lro = &np->xn_lro; 84612678024SDoug Rabson struct lro_entry *queued; 84789e0f4d2SKip Macy struct netfront_rx_info rinfo; 84889e0f4d2SKip Macy struct netif_rx_response *rx = &rinfo.rx; 84989e0f4d2SKip Macy struct netif_extra_info *extras = rinfo.extras; 85089e0f4d2SKip Macy RING_IDX i, rp; 85189e0f4d2SKip Macy struct mbuf *m; 852c578b6acSGleb Smirnoff struct mbufq rxq, errq; 853d0f3a8b9SRoger Pau Monné int err, work_to_do; 85489e0f4d2SKip Macy 85549906218SDoug Rabson do { 85689e0f4d2SKip Macy XN_RX_LOCK_ASSERT(np); 85789e0f4d2SKip Macy if (!netfront_carrier_ok(np)) 85889e0f4d2SKip Macy return; 85989e0f4d2SKip Macy 860c578b6acSGleb Smirnoff /* XXX: there should be some sane limit. */ 861c578b6acSGleb Smirnoff mbufq_init(&errq, INT_MAX); 862c578b6acSGleb Smirnoff mbufq_init(&rxq, INT_MAX); 86389e0f4d2SKip Macy 86489e0f4d2SKip Macy ifp = np->xn_ifp; 86589e0f4d2SKip Macy 86689e0f4d2SKip Macy rp = np->rx.sring->rsp_prod; 86789e0f4d2SKip Macy rmb(); /* Ensure we see queued responses up to 'rp'. */ 86889e0f4d2SKip Macy 86989e0f4d2SKip Macy i = np->rx.rsp_cons; 87089e0f4d2SKip Macy while ((i != rp)) { 87189e0f4d2SKip Macy memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); 87289e0f4d2SKip Macy memset(extras, 0, sizeof(rinfo.extras)); 87389e0f4d2SKip Macy 87483b92f6eSKip Macy m = NULL; 875d0f3a8b9SRoger Pau Monné err = xennet_get_responses(np, &rinfo, rp, &i, &m); 87689e0f4d2SKip Macy 87776acc41fSJustin T. Gibbs if (__predict_false(err)) { 87883b92f6eSKip Macy if (m) 879c578b6acSGleb Smirnoff (void )mbufq_enqueue(&errq, m); 88089e0f4d2SKip Macy np->stats.rx_errors++; 88189e0f4d2SKip Macy continue; 88289e0f4d2SKip Macy } 88389e0f4d2SKip Macy 88489e0f4d2SKip Macy m->m_pkthdr.rcvif = ifp; 88589e0f4d2SKip Macy if ( rx->flags & NETRXF_data_validated ) { 88689e0f4d2SKip Macy /* Tell the stack the checksums are okay */ 88789e0f4d2SKip Macy /* 88889e0f4d2SKip Macy * XXX this isn't necessarily the case - need to add 88989e0f4d2SKip Macy * check 89089e0f4d2SKip Macy */ 89189e0f4d2SKip Macy 89289e0f4d2SKip Macy m->m_pkthdr.csum_flags |= 89389e0f4d2SKip Macy (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID 89489e0f4d2SKip Macy | CSUM_PSEUDO_HDR); 89589e0f4d2SKip Macy m->m_pkthdr.csum_data = 0xffff; 89689e0f4d2SKip Macy } 89789e0f4d2SKip Macy 89889e0f4d2SKip Macy np->stats.rx_packets++; 89983b92f6eSKip Macy np->stats.rx_bytes += m->m_pkthdr.len; 90089e0f4d2SKip Macy 901c578b6acSGleb Smirnoff (void )mbufq_enqueue(&rxq, m); 902931eeffaSKenneth D. Merry np->rx.rsp_cons = i; 90389e0f4d2SKip Macy } 90489e0f4d2SKip Macy 905c578b6acSGleb Smirnoff mbufq_drain(&errq); 90689e0f4d2SKip Macy 90789e0f4d2SKip Macy /* 90889e0f4d2SKip Macy * Process all the mbufs after the remapping is complete. 90989e0f4d2SKip Macy * Break the mbuf chain first though. 91089e0f4d2SKip Macy */ 91189e0f4d2SKip Macy while ((m = mbufq_dequeue(&rxq)) != NULL) { 912c8dfaf38SGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 91389e0f4d2SKip Macy 91489e0f4d2SKip Macy /* 91589e0f4d2SKip Macy * Do we really need to drop the rx lock? 91689e0f4d2SKip Macy */ 91789e0f4d2SKip Macy XN_RX_UNLOCK(np); 918*08c9c2e0SRoger Pau Monné #if (defined(INET) || defined(INET6)) 91912678024SDoug Rabson /* Use LRO if possible */ 92012678024SDoug Rabson if ((ifp->if_capenable & IFCAP_LRO) == 0 || 92112678024SDoug Rabson lro->lro_cnt == 0 || tcp_lro_rx(lro, m, 0)) { 92212678024SDoug Rabson /* 92312678024SDoug Rabson * If LRO fails, pass up to the stack 92412678024SDoug Rabson * directly. 92512678024SDoug Rabson */ 92689e0f4d2SKip Macy (*ifp->if_input)(ifp, m); 92712678024SDoug Rabson } 92812678024SDoug Rabson #else 92912678024SDoug Rabson (*ifp->if_input)(ifp, m); 93012678024SDoug Rabson #endif 93189e0f4d2SKip Macy XN_RX_LOCK(np); 93289e0f4d2SKip Macy } 93389e0f4d2SKip Macy 93489e0f4d2SKip Macy np->rx.rsp_cons = i; 93589e0f4d2SKip Macy 936*08c9c2e0SRoger Pau Monné #if (defined(INET) || defined(INET6)) 93712678024SDoug Rabson /* 93812678024SDoug Rabson * Flush any outstanding LRO work 93912678024SDoug Rabson */ 94012678024SDoug Rabson while (!SLIST_EMPTY(&lro->lro_active)) { 94112678024SDoug Rabson queued = SLIST_FIRST(&lro->lro_active); 94212678024SDoug Rabson SLIST_REMOVE_HEAD(&lro->lro_active, next); 94312678024SDoug Rabson tcp_lro_flush(lro, queued); 94412678024SDoug Rabson } 94512678024SDoug Rabson #endif 94612678024SDoug Rabson 94789e0f4d2SKip Macy #if 0 94889e0f4d2SKip Macy /* If we get a callback with very few responses, reduce fill target. */ 94989e0f4d2SKip Macy /* NB. Note exponential increase, linear decrease. */ 95089e0f4d2SKip Macy if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > 95189e0f4d2SKip Macy ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target)) 95289e0f4d2SKip Macy np->rx_target = np->rx_min_target; 95389e0f4d2SKip Macy #endif 95489e0f4d2SKip Macy 95589e0f4d2SKip Macy network_alloc_rx_buffers(np); 95689e0f4d2SKip Macy 95749906218SDoug Rabson RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, work_to_do); 95849906218SDoug Rabson } while (work_to_do); 95989e0f4d2SKip Macy } 96089e0f4d2SKip Macy 96189e0f4d2SKip Macy static void 96289e0f4d2SKip Macy xn_txeof(struct netfront_info *np) 96389e0f4d2SKip Macy { 96489e0f4d2SKip Macy RING_IDX i, prod; 96589e0f4d2SKip Macy unsigned short id; 96689e0f4d2SKip Macy struct ifnet *ifp; 96712678024SDoug Rabson netif_tx_response_t *txr; 96889e0f4d2SKip Macy struct mbuf *m; 96989e0f4d2SKip Macy 97089e0f4d2SKip Macy XN_TX_LOCK_ASSERT(np); 97189e0f4d2SKip Macy 97289e0f4d2SKip Macy if (!netfront_carrier_ok(np)) 97389e0f4d2SKip Macy return; 97489e0f4d2SKip Macy 97589e0f4d2SKip Macy ifp = np->xn_ifp; 97689e0f4d2SKip Macy 97789e0f4d2SKip Macy do { 97889e0f4d2SKip Macy prod = np->tx.sring->rsp_prod; 97989e0f4d2SKip Macy rmb(); /* Ensure we see responses up to 'rp'. */ 98089e0f4d2SKip Macy 98189e0f4d2SKip Macy for (i = np->tx.rsp_cons; i != prod; i++) { 98212678024SDoug Rabson txr = RING_GET_RESPONSE(&np->tx, i); 98312678024SDoug Rabson if (txr->status == NETIF_RSP_NULL) 98412678024SDoug Rabson continue; 98512678024SDoug Rabson 986931eeffaSKenneth D. Merry if (txr->status != NETIF_RSP_OKAY) { 987931eeffaSKenneth D. Merry printf("%s: WARNING: response is %d!\n", 988931eeffaSKenneth D. Merry __func__, txr->status); 989931eeffaSKenneth D. Merry } 99012678024SDoug Rabson id = txr->id; 991931eeffaSKenneth D. Merry m = np->tx_mbufs[id]; 9922d8fae98SAdrian Chadd KASSERT(m != NULL, ("mbuf not found in xn_tx_chain")); 993931eeffaSKenneth D. Merry KASSERT((uintptr_t)m > NET_TX_RING_SIZE, 994931eeffaSKenneth D. Merry ("mbuf already on the free list, but we're " 995931eeffaSKenneth D. Merry "trying to free it again!")); 9962d8fae98SAdrian Chadd M_ASSERTVALID(m); 99789e0f4d2SKip Macy 99812678024SDoug Rabson /* 99912678024SDoug Rabson * Increment packet count if this is the last 100012678024SDoug Rabson * mbuf of the chain. 100112678024SDoug Rabson */ 100212678024SDoug Rabson if (!m->m_next) 1003c8dfaf38SGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 100476acc41fSJustin T. Gibbs if (__predict_false(gnttab_query_foreign_access( 100589e0f4d2SKip Macy np->grant_tx_ref[id]) != 0)) { 10066f9767acSMarius Strobl panic("%s: grant id %u still in use by the " 10076f9767acSMarius Strobl "backend", __func__, id); 100889e0f4d2SKip Macy } 100989e0f4d2SKip Macy gnttab_end_foreign_access_ref( 1010920ba15bSKip Macy np->grant_tx_ref[id]); 101189e0f4d2SKip Macy gnttab_release_grant_reference( 101289e0f4d2SKip Macy &np->gref_tx_head, np->grant_tx_ref[id]); 1013ff662b5cSJustin T. Gibbs np->grant_tx_ref[id] = GRANT_REF_INVALID; 101489e0f4d2SKip Macy 1015931eeffaSKenneth D. Merry np->tx_mbufs[id] = NULL; 1016931eeffaSKenneth D. Merry add_id_to_freelist(np->tx_mbufs, id); 1017a4ec37f5SAdrian Chadd np->xn_cdata.xn_tx_chain_cnt--; 101812678024SDoug Rabson m_free(m); 1019d76e4550SAdrian Chadd /* Only mark the queue active if we've freed up at least one slot to try */ 1020d76e4550SAdrian Chadd ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 102189e0f4d2SKip Macy } 102289e0f4d2SKip Macy np->tx.rsp_cons = prod; 102389e0f4d2SKip Macy 102489e0f4d2SKip Macy /* 102589e0f4d2SKip Macy * Set a new event, then check for race with update of 102689e0f4d2SKip Macy * tx_cons. Note that it is essential to schedule a 102789e0f4d2SKip Macy * callback, no matter how few buffers are pending. Even if 102889e0f4d2SKip Macy * there is space in the transmit ring, higher layers may 102989e0f4d2SKip Macy * be blocked because too much data is outstanding: in such 103089e0f4d2SKip Macy * cases notification from Xen is likely to be the only kick 103189e0f4d2SKip Macy * that we'll get. 103289e0f4d2SKip Macy */ 103389e0f4d2SKip Macy np->tx.sring->rsp_event = 103489e0f4d2SKip Macy prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; 103589e0f4d2SKip Macy 103689e0f4d2SKip Macy mb(); 103789e0f4d2SKip Macy } while (prod != np->tx.sring->rsp_prod); 103889e0f4d2SKip Macy 103989e0f4d2SKip Macy if (np->tx_full && 104089e0f4d2SKip Macy ((np->tx.sring->req_prod - prod) < NET_TX_RING_SIZE)) { 104189e0f4d2SKip Macy np->tx_full = 0; 104289e0f4d2SKip Macy #if 0 104389e0f4d2SKip Macy if (np->user_state == UST_OPEN) 104489e0f4d2SKip Macy netif_wake_queue(dev); 104589e0f4d2SKip Macy #endif 104689e0f4d2SKip Macy } 104789e0f4d2SKip Macy } 104889e0f4d2SKip Macy 104989e0f4d2SKip Macy static void 105089e0f4d2SKip Macy xn_intr(void *xsc) 105189e0f4d2SKip Macy { 105289e0f4d2SKip Macy struct netfront_info *np = xsc; 105389e0f4d2SKip Macy struct ifnet *ifp = np->xn_ifp; 105489e0f4d2SKip Macy 105589e0f4d2SKip Macy #if 0 105689e0f4d2SKip Macy if (!(np->rx.rsp_cons != np->rx.sring->rsp_prod && 105789e0f4d2SKip Macy likely(netfront_carrier_ok(np)) && 105889e0f4d2SKip Macy ifp->if_drv_flags & IFF_DRV_RUNNING)) 105989e0f4d2SKip Macy return; 106089e0f4d2SKip Macy #endif 1061931eeffaSKenneth D. Merry if (RING_HAS_UNCONSUMED_RESPONSES(&np->tx)) { 106289e0f4d2SKip Macy XN_TX_LOCK(np); 106389e0f4d2SKip Macy xn_txeof(np); 106489e0f4d2SKip Macy XN_TX_UNLOCK(np); 106589e0f4d2SKip Macy } 106689e0f4d2SKip Macy 106789e0f4d2SKip Macy XN_RX_LOCK(np); 106889e0f4d2SKip Macy xn_rxeof(np); 106989e0f4d2SKip Macy XN_RX_UNLOCK(np); 107089e0f4d2SKip Macy 107189e0f4d2SKip Macy if (ifp->if_drv_flags & IFF_DRV_RUNNING && 107289e0f4d2SKip Macy !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 107389e0f4d2SKip Macy xn_start(ifp); 107489e0f4d2SKip Macy } 107589e0f4d2SKip Macy 107689e0f4d2SKip Macy static void 107789e0f4d2SKip Macy xennet_move_rx_slot(struct netfront_info *np, struct mbuf *m, 107889e0f4d2SKip Macy grant_ref_t ref) 107989e0f4d2SKip Macy { 108089e0f4d2SKip Macy int new = xennet_rxidx(np->rx.req_prod_pvt); 108189e0f4d2SKip Macy 108289e0f4d2SKip Macy KASSERT(np->rx_mbufs[new] == NULL, ("rx_mbufs != NULL")); 108389e0f4d2SKip Macy np->rx_mbufs[new] = m; 108489e0f4d2SKip Macy np->grant_rx_ref[new] = ref; 108589e0f4d2SKip Macy RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; 108689e0f4d2SKip Macy RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; 108789e0f4d2SKip Macy np->rx.req_prod_pvt++; 108889e0f4d2SKip Macy } 108989e0f4d2SKip Macy 109089e0f4d2SKip Macy static int 109189e0f4d2SKip Macy xennet_get_extras(struct netfront_info *np, 1092931eeffaSKenneth D. Merry struct netif_extra_info *extras, RING_IDX rp, RING_IDX *cons) 109389e0f4d2SKip Macy { 109489e0f4d2SKip Macy struct netif_extra_info *extra; 109589e0f4d2SKip Macy 109689e0f4d2SKip Macy int err = 0; 109789e0f4d2SKip Macy 109889e0f4d2SKip Macy do { 109989e0f4d2SKip Macy struct mbuf *m; 110089e0f4d2SKip Macy grant_ref_t ref; 110189e0f4d2SKip Macy 110276acc41fSJustin T. Gibbs if (__predict_false(*cons + 1 == rp)) { 110389e0f4d2SKip Macy #if 0 110489e0f4d2SKip Macy if (net_ratelimit()) 110589e0f4d2SKip Macy WPRINTK("Missing extra info\n"); 110689e0f4d2SKip Macy #endif 1107931eeffaSKenneth D. Merry err = EINVAL; 110889e0f4d2SKip Macy break; 110989e0f4d2SKip Macy } 111089e0f4d2SKip Macy 111189e0f4d2SKip Macy extra = (struct netif_extra_info *) 1112931eeffaSKenneth D. Merry RING_GET_RESPONSE(&np->rx, ++(*cons)); 111389e0f4d2SKip Macy 111476acc41fSJustin T. Gibbs if (__predict_false(!extra->type || 111589e0f4d2SKip Macy extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 111689e0f4d2SKip Macy #if 0 111789e0f4d2SKip Macy if (net_ratelimit()) 111889e0f4d2SKip Macy WPRINTK("Invalid extra type: %d\n", 111989e0f4d2SKip Macy extra->type); 112089e0f4d2SKip Macy #endif 1121931eeffaSKenneth D. Merry err = EINVAL; 112289e0f4d2SKip Macy } else { 112389e0f4d2SKip Macy memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); 112489e0f4d2SKip Macy } 112589e0f4d2SKip Macy 1126931eeffaSKenneth D. Merry m = xennet_get_rx_mbuf(np, *cons); 1127931eeffaSKenneth D. Merry ref = xennet_get_rx_ref(np, *cons); 112889e0f4d2SKip Macy xennet_move_rx_slot(np, m, ref); 112989e0f4d2SKip Macy } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); 113089e0f4d2SKip Macy 113189e0f4d2SKip Macy return err; 113289e0f4d2SKip Macy } 113389e0f4d2SKip Macy 113489e0f4d2SKip Macy static int 113589e0f4d2SKip Macy xennet_get_responses(struct netfront_info *np, 1136931eeffaSKenneth D. Merry struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons, 1137d0f3a8b9SRoger Pau Monné struct mbuf **list) 113889e0f4d2SKip Macy { 113989e0f4d2SKip Macy struct netif_rx_response *rx = &rinfo->rx; 114089e0f4d2SKip Macy struct netif_extra_info *extras = rinfo->extras; 114183b92f6eSKip Macy struct mbuf *m, *m0, *m_prev; 1142931eeffaSKenneth D. Merry grant_ref_t ref = xennet_get_rx_ref(np, *cons); 1143931eeffaSKenneth D. Merry RING_IDX ref_cons = *cons; 114489e0f4d2SKip Macy int frags = 1; 114589e0f4d2SKip Macy int err = 0; 114689e0f4d2SKip Macy u_long ret; 114789e0f4d2SKip Macy 1148931eeffaSKenneth D. Merry m0 = m = m_prev = xennet_get_rx_mbuf(np, *cons); 114983b92f6eSKip Macy 115089e0f4d2SKip Macy if (rx->flags & NETRXF_extra_info) { 1151931eeffaSKenneth D. Merry err = xennet_get_extras(np, extras, rp, cons); 115289e0f4d2SKip Macy } 115389e0f4d2SKip Macy 115483b92f6eSKip Macy if (m0 != NULL) { 115583b92f6eSKip Macy m0->m_pkthdr.len = 0; 115683b92f6eSKip Macy m0->m_next = NULL; 115783b92f6eSKip Macy } 115883b92f6eSKip Macy 115989e0f4d2SKip Macy for (;;) { 116083b92f6eSKip Macy #if 0 1161227ca257SKip Macy DPRINTK("rx->status=%hd rx->offset=%hu frags=%u\n", 116283b92f6eSKip Macy rx->status, rx->offset, frags); 116383b92f6eSKip Macy #endif 116476acc41fSJustin T. Gibbs if (__predict_false(rx->status < 0 || 116589e0f4d2SKip Macy rx->offset + rx->status > PAGE_SIZE)) { 1166931eeffaSKenneth D. Merry 116789e0f4d2SKip Macy #if 0 116889e0f4d2SKip Macy if (net_ratelimit()) 116989e0f4d2SKip Macy WPRINTK("rx->offset: %x, size: %u\n", 117089e0f4d2SKip Macy rx->offset, rx->status); 117189e0f4d2SKip Macy #endif 117289e0f4d2SKip Macy xennet_move_rx_slot(np, m, ref); 1173931eeffaSKenneth D. Merry if (m0 == m) 1174931eeffaSKenneth D. Merry m0 = NULL; 1175931eeffaSKenneth D. Merry m = NULL; 1176931eeffaSKenneth D. Merry err = EINVAL; 1177931eeffaSKenneth D. Merry goto next_skip_queue; 117889e0f4d2SKip Macy } 117989e0f4d2SKip Macy 118089e0f4d2SKip Macy /* 118189e0f4d2SKip Macy * This definitely indicates a bug, either in this driver or in 118289e0f4d2SKip Macy * the backend driver. In future this should flag the bad 118389e0f4d2SKip Macy * situation to the system controller to reboot the backed. 118489e0f4d2SKip Macy */ 1185ff662b5cSJustin T. Gibbs if (ref == GRANT_REF_INVALID) { 1186931eeffaSKenneth D. Merry 118789e0f4d2SKip Macy #if 0 118889e0f4d2SKip Macy if (net_ratelimit()) 118989e0f4d2SKip Macy WPRINTK("Bad rx response id %d.\n", rx->id); 119089e0f4d2SKip Macy #endif 1191ff662b5cSJustin T. Gibbs printf("%s: Bad rx response id %d.\n", __func__,rx->id); 1192931eeffaSKenneth D. Merry err = EINVAL; 119389e0f4d2SKip Macy goto next; 119489e0f4d2SKip Macy } 119589e0f4d2SKip Macy 1196920ba15bSKip Macy ret = gnttab_end_foreign_access_ref(ref); 1197d0f3a8b9SRoger Pau Monné KASSERT(ret, ("Unable to end access to grant references")); 119889e0f4d2SKip Macy 119989e0f4d2SKip Macy gnttab_release_grant_reference(&np->gref_rx_head, ref); 120089e0f4d2SKip Macy 120189e0f4d2SKip Macy next: 12023a539122SAdrian Chadd if (m == NULL) 12033a539122SAdrian Chadd break; 12043a539122SAdrian Chadd 120583b92f6eSKip Macy m->m_len = rx->status; 120683b92f6eSKip Macy m->m_data += rx->offset; 120783b92f6eSKip Macy m0->m_pkthdr.len += rx->status; 120883b92f6eSKip Macy 1209931eeffaSKenneth D. Merry next_skip_queue: 121089e0f4d2SKip Macy if (!(rx->flags & NETRXF_more_data)) 121189e0f4d2SKip Macy break; 121289e0f4d2SKip Macy 1213931eeffaSKenneth D. Merry if (*cons + frags == rp) { 121489e0f4d2SKip Macy if (net_ratelimit()) 121589e0f4d2SKip Macy WPRINTK("Need more frags\n"); 1216931eeffaSKenneth D. Merry err = ENOENT; 1217931eeffaSKenneth D. Merry printf("%s: cons %u frags %u rp %u, not enough frags\n", 1218931eeffaSKenneth D. Merry __func__, *cons, frags, rp); 121989e0f4d2SKip Macy break; 122089e0f4d2SKip Macy } 1221931eeffaSKenneth D. Merry /* 1222931eeffaSKenneth D. Merry * Note that m can be NULL, if rx->status < 0 or if 1223931eeffaSKenneth D. Merry * rx->offset + rx->status > PAGE_SIZE above. 1224931eeffaSKenneth D. Merry */ 122583b92f6eSKip Macy m_prev = m; 122689e0f4d2SKip Macy 1227931eeffaSKenneth D. Merry rx = RING_GET_RESPONSE(&np->rx, *cons + frags); 1228931eeffaSKenneth D. Merry m = xennet_get_rx_mbuf(np, *cons + frags); 122983b92f6eSKip Macy 1230931eeffaSKenneth D. Merry /* 1231931eeffaSKenneth D. Merry * m_prev == NULL can happen if rx->status < 0 or if 1232931eeffaSKenneth D. Merry * rx->offset + * rx->status > PAGE_SIZE above. 1233931eeffaSKenneth D. Merry */ 1234931eeffaSKenneth D. Merry if (m_prev != NULL) 123583b92f6eSKip Macy m_prev->m_next = m; 1236931eeffaSKenneth D. Merry 1237931eeffaSKenneth D. Merry /* 1238931eeffaSKenneth D. Merry * m0 can be NULL if rx->status < 0 or if * rx->offset + 1239931eeffaSKenneth D. Merry * rx->status > PAGE_SIZE above. 1240931eeffaSKenneth D. Merry */ 1241931eeffaSKenneth D. Merry if (m0 == NULL) 1242931eeffaSKenneth D. Merry m0 = m; 124383b92f6eSKip Macy m->m_next = NULL; 1244931eeffaSKenneth D. Merry ref = xennet_get_rx_ref(np, *cons + frags); 1245931eeffaSKenneth D. Merry ref_cons = *cons + frags; 124689e0f4d2SKip Macy frags++; 124789e0f4d2SKip Macy } 124883b92f6eSKip Macy *list = m0; 1249931eeffaSKenneth D. Merry *cons += frags; 125089e0f4d2SKip Macy 12518577146eSJustin T. Gibbs return (err); 125289e0f4d2SKip Macy } 125389e0f4d2SKip Macy 125489e0f4d2SKip Macy static void 125589e0f4d2SKip Macy xn_tick_locked(struct netfront_info *sc) 125689e0f4d2SKip Macy { 125789e0f4d2SKip Macy XN_RX_LOCK_ASSERT(sc); 125889e0f4d2SKip Macy callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc); 125989e0f4d2SKip Macy 126089e0f4d2SKip Macy /* XXX placeholder for printing debug information */ 126189e0f4d2SKip Macy } 126289e0f4d2SKip Macy 126389e0f4d2SKip Macy static void 126489e0f4d2SKip Macy xn_tick(void *xsc) 126589e0f4d2SKip Macy { 126689e0f4d2SKip Macy struct netfront_info *sc; 126789e0f4d2SKip Macy 126889e0f4d2SKip Macy sc = xsc; 126989e0f4d2SKip Macy XN_RX_LOCK(sc); 127089e0f4d2SKip Macy xn_tick_locked(sc); 127189e0f4d2SKip Macy XN_RX_UNLOCK(sc); 127289e0f4d2SKip Macy } 127389e0f4d2SKip Macy 1274931eeffaSKenneth D. Merry /** 1275931eeffaSKenneth D. Merry * \brief Count the number of fragments in an mbuf chain. 1276931eeffaSKenneth D. Merry * 1277931eeffaSKenneth D. Merry * Surprisingly, there isn't an M* macro for this. 1278c099cafaSAdrian Chadd */ 1279931eeffaSKenneth D. Merry static inline int 1280931eeffaSKenneth D. Merry xn_count_frags(struct mbuf *m) 1281931eeffaSKenneth D. Merry { 1282931eeffaSKenneth D. Merry int nfrags; 1283931eeffaSKenneth D. Merry 1284931eeffaSKenneth D. Merry for (nfrags = 0; m != NULL; m = m->m_next) 1285931eeffaSKenneth D. Merry nfrags++; 1286931eeffaSKenneth D. Merry 1287931eeffaSKenneth D. Merry return (nfrags); 128889e0f4d2SKip Macy } 128989e0f4d2SKip Macy 1290931eeffaSKenneth D. Merry /** 1291931eeffaSKenneth D. Merry * Given an mbuf chain, make sure we have enough room and then push 1292931eeffaSKenneth D. Merry * it onto the transmit ring. 1293931eeffaSKenneth D. Merry */ 1294931eeffaSKenneth D. Merry static int 1295931eeffaSKenneth D. Merry xn_assemble_tx_request(struct netfront_info *sc, struct mbuf *m_head) 1296931eeffaSKenneth D. Merry { 1297931eeffaSKenneth D. Merry struct ifnet *ifp; 1298931eeffaSKenneth D. Merry struct mbuf *m; 1299931eeffaSKenneth D. Merry u_int nfrags; 1300931eeffaSKenneth D. Merry int otherend_id; 1301931eeffaSKenneth D. Merry 1302931eeffaSKenneth D. Merry ifp = sc->xn_ifp; 1303931eeffaSKenneth D. Merry 1304931eeffaSKenneth D. Merry /** 130512678024SDoug Rabson * Defragment the mbuf if necessary. 130612678024SDoug Rabson */ 1307931eeffaSKenneth D. Merry nfrags = xn_count_frags(m_head); 1308931eeffaSKenneth D. Merry 1309931eeffaSKenneth D. Merry /* 1310931eeffaSKenneth D. Merry * Check to see whether this request is longer than netback 1311931eeffaSKenneth D. Merry * can handle, and try to defrag it. 1312931eeffaSKenneth D. Merry */ 1313931eeffaSKenneth D. Merry /** 1314931eeffaSKenneth D. Merry * It is a bit lame, but the netback driver in Linux can't 1315931eeffaSKenneth D. Merry * deal with nfrags > MAX_TX_REQ_FRAGS, which is a quirk of 1316931eeffaSKenneth D. Merry * the Linux network stack. 1317931eeffaSKenneth D. Merry */ 1318578e4bf7SJustin T. Gibbs if (nfrags > sc->maxfrags) { 1319c6499eccSGleb Smirnoff m = m_defrag(m_head, M_NOWAIT); 132012678024SDoug Rabson if (!m) { 1321931eeffaSKenneth D. Merry /* 1322931eeffaSKenneth D. Merry * Defrag failed, so free the mbuf and 1323931eeffaSKenneth D. Merry * therefore drop the packet. 1324931eeffaSKenneth D. Merry */ 132512678024SDoug Rabson m_freem(m_head); 1326931eeffaSKenneth D. Merry return (EMSGSIZE); 132712678024SDoug Rabson } 132812678024SDoug Rabson m_head = m; 132912678024SDoug Rabson } 133089e0f4d2SKip Macy 1331a4ec37f5SAdrian Chadd /* Determine how many fragments now exist */ 1332931eeffaSKenneth D. Merry nfrags = xn_count_frags(m_head); 1333a4ec37f5SAdrian Chadd 1334a4ec37f5SAdrian Chadd /* 1335931eeffaSKenneth D. Merry * Check to see whether the defragmented packet has too many 1336931eeffaSKenneth D. Merry * segments for the Linux netback driver. 1337a4ec37f5SAdrian Chadd */ 1338931eeffaSKenneth D. Merry /** 1339931eeffaSKenneth D. Merry * The FreeBSD TCP stack, with TSO enabled, can produce a chain 1340931eeffaSKenneth D. Merry * of mbufs longer than Linux can handle. Make sure we don't 1341931eeffaSKenneth D. Merry * pass a too-long chain over to the other side by dropping the 1342931eeffaSKenneth D. Merry * packet. It doesn't look like there is currently a way to 1343931eeffaSKenneth D. Merry * tell the TCP stack to generate a shorter chain of packets. 13443fb28bbbSAdrian Chadd */ 1345931eeffaSKenneth D. Merry if (nfrags > MAX_TX_REQ_FRAGS) { 1346ff662b5cSJustin T. Gibbs #ifdef DEBUG 1347ff662b5cSJustin T. Gibbs printf("%s: nfrags %d > MAX_TX_REQ_FRAGS %d, netback " 1348ff662b5cSJustin T. Gibbs "won't be able to handle it, dropping\n", 1349ff662b5cSJustin T. Gibbs __func__, nfrags, MAX_TX_REQ_FRAGS); 1350ff662b5cSJustin T. Gibbs #endif 1351931eeffaSKenneth D. Merry m_freem(m_head); 1352931eeffaSKenneth D. Merry return (EMSGSIZE); 1353a4ec37f5SAdrian Chadd } 1354a4ec37f5SAdrian Chadd 13553fb28bbbSAdrian Chadd /* 1356931eeffaSKenneth D. Merry * This check should be redundant. We've already verified that we 1357931eeffaSKenneth D. Merry * have enough slots in the ring to handle a packet of maximum 1358931eeffaSKenneth D. Merry * size, and that our packet is less than the maximum size. Keep 1359931eeffaSKenneth D. Merry * it in here as an assert for now just to make certain that 1360931eeffaSKenneth D. Merry * xn_tx_chain_cnt is accurate. 13613fb28bbbSAdrian Chadd */ 1362931eeffaSKenneth D. Merry KASSERT((sc->xn_cdata.xn_tx_chain_cnt + nfrags) <= NET_TX_RING_SIZE, 1363931eeffaSKenneth D. Merry ("%s: xn_tx_chain_cnt (%d) + nfrags (%d) > NET_TX_RING_SIZE " 1364931eeffaSKenneth D. Merry "(%d)!", __func__, (int) sc->xn_cdata.xn_tx_chain_cnt, 1365931eeffaSKenneth D. Merry (int) nfrags, (int) NET_TX_RING_SIZE)); 1366a4ec37f5SAdrian Chadd 136789e0f4d2SKip Macy /* 136889e0f4d2SKip Macy * Start packing the mbufs in this chain into 136989e0f4d2SKip Macy * the fragment pointers. Stop when we run out 137089e0f4d2SKip Macy * of fragments or hit the end of the mbuf chain. 137189e0f4d2SKip Macy */ 137212678024SDoug Rabson m = m_head; 1373931eeffaSKenneth D. Merry otherend_id = xenbus_get_otherend_id(sc->xbdev); 137412678024SDoug Rabson for (m = m_head; m; m = m->m_next) { 1375931eeffaSKenneth D. Merry netif_tx_request_t *tx; 1376931eeffaSKenneth D. Merry uintptr_t id; 1377931eeffaSKenneth D. Merry grant_ref_t ref; 1378931eeffaSKenneth D. Merry u_long mfn; /* XXX Wrong type? */ 1379931eeffaSKenneth D. Merry 1380931eeffaSKenneth D. Merry tx = RING_GET_REQUEST(&sc->tx, sc->tx.req_prod_pvt); 1381931eeffaSKenneth D. Merry id = get_id_from_freelist(sc->tx_mbufs); 1382a4ec37f5SAdrian Chadd if (id == 0) 13836f9767acSMarius Strobl panic("%s: was allocated the freelist head!\n", 13846f9767acSMarius Strobl __func__); 1385a4ec37f5SAdrian Chadd sc->xn_cdata.xn_tx_chain_cnt++; 1386931eeffaSKenneth D. Merry if (sc->xn_cdata.xn_tx_chain_cnt > NET_TX_RING_SIZE) 13876f9767acSMarius Strobl panic("%s: tx_chain_cnt must be <= NET_TX_RING_SIZE\n", 13886f9767acSMarius Strobl __func__); 1389931eeffaSKenneth D. Merry sc->tx_mbufs[id] = m; 139089e0f4d2SKip Macy tx->id = id; 139189e0f4d2SKip Macy ref = gnttab_claim_grant_reference(&sc->gref_tx_head); 139289e0f4d2SKip Macy KASSERT((short)ref >= 0, ("Negative ref")); 139312678024SDoug Rabson mfn = virt_to_mfn(mtod(m, vm_offset_t)); 139423dc5621SKip Macy gnttab_grant_foreign_access_ref(ref, otherend_id, 139589e0f4d2SKip Macy mfn, GNTMAP_readonly); 139689e0f4d2SKip Macy tx->gref = sc->grant_tx_ref[id] = ref; 139712678024SDoug Rabson tx->offset = mtod(m, vm_offset_t) & (PAGE_SIZE - 1); 139889e0f4d2SKip Macy tx->flags = 0; 139912678024SDoug Rabson if (m == m_head) { 140012678024SDoug Rabson /* 140112678024SDoug Rabson * The first fragment has the entire packet 140212678024SDoug Rabson * size, subsequent fragments have just the 140312678024SDoug Rabson * fragment size. The backend works out the 140412678024SDoug Rabson * true size of the first fragment by 140512678024SDoug Rabson * subtracting the sizes of the other 140612678024SDoug Rabson * fragments. 140712678024SDoug Rabson */ 140812678024SDoug Rabson tx->size = m->m_pkthdr.len; 140989e0f4d2SKip Macy 141012678024SDoug Rabson /* 1411931eeffaSKenneth D. Merry * The first fragment contains the checksum flags 1412931eeffaSKenneth D. Merry * and is optionally followed by extra data for 1413931eeffaSKenneth D. Merry * TSO etc. 1414931eeffaSKenneth D. Merry */ 1415931eeffaSKenneth D. Merry /** 1416931eeffaSKenneth D. Merry * CSUM_TSO requires checksum offloading. 1417931eeffaSKenneth D. Merry * Some versions of FreeBSD fail to 1418931eeffaSKenneth D. Merry * set CSUM_TCP in the CSUM_TSO case, 1419931eeffaSKenneth D. Merry * so we have to test for CSUM_TSO 1420931eeffaSKenneth D. Merry * explicitly. 142112678024SDoug Rabson */ 142212678024SDoug Rabson if (m->m_pkthdr.csum_flags 1423931eeffaSKenneth D. Merry & (CSUM_DELAY_DATA | CSUM_TSO)) { 142412678024SDoug Rabson tx->flags |= (NETTXF_csum_blank 142512678024SDoug Rabson | NETTXF_data_validated); 142612678024SDoug Rabson } 142712678024SDoug Rabson if (m->m_pkthdr.csum_flags & CSUM_TSO) { 142812678024SDoug Rabson struct netif_extra_info *gso = 142912678024SDoug Rabson (struct netif_extra_info *) 1430931eeffaSKenneth D. Merry RING_GET_REQUEST(&sc->tx, 1431931eeffaSKenneth D. Merry ++sc->tx.req_prod_pvt); 143289e0f4d2SKip Macy 143312678024SDoug Rabson tx->flags |= NETTXF_extra_info; 143489e0f4d2SKip Macy 143512678024SDoug Rabson gso->u.gso.size = m->m_pkthdr.tso_segsz; 143612678024SDoug Rabson gso->u.gso.type = 143712678024SDoug Rabson XEN_NETIF_GSO_TYPE_TCPV4; 143812678024SDoug Rabson gso->u.gso.pad = 0; 143912678024SDoug Rabson gso->u.gso.features = 0; 144012678024SDoug Rabson 144112678024SDoug Rabson gso->type = XEN_NETIF_EXTRA_TYPE_GSO; 144212678024SDoug Rabson gso->flags = 0; 144312678024SDoug Rabson } 144412678024SDoug Rabson } else { 144512678024SDoug Rabson tx->size = m->m_len; 144612678024SDoug Rabson } 1447931eeffaSKenneth D. Merry if (m->m_next) 144812678024SDoug Rabson tx->flags |= NETTXF_more_data; 144912678024SDoug Rabson 1450931eeffaSKenneth D. Merry sc->tx.req_prod_pvt++; 1451931eeffaSKenneth D. Merry } 145212678024SDoug Rabson BPF_MTAP(ifp, m_head); 145312678024SDoug Rabson 145412678024SDoug Rabson sc->stats.tx_bytes += m_head->m_pkthdr.len; 145589e0f4d2SKip Macy sc->stats.tx_packets++; 1456931eeffaSKenneth D. Merry 1457931eeffaSKenneth D. Merry return (0); 145889e0f4d2SKip Macy } 145989e0f4d2SKip Macy 1460931eeffaSKenneth D. Merry static void 1461931eeffaSKenneth D. Merry xn_start_locked(struct ifnet *ifp) 1462931eeffaSKenneth D. Merry { 1463931eeffaSKenneth D. Merry struct netfront_info *sc; 1464931eeffaSKenneth D. Merry struct mbuf *m_head; 1465931eeffaSKenneth D. Merry int notify; 1466931eeffaSKenneth D. Merry 1467931eeffaSKenneth D. Merry sc = ifp->if_softc; 1468931eeffaSKenneth D. Merry 1469931eeffaSKenneth D. Merry if (!netfront_carrier_ok(sc)) 1470931eeffaSKenneth D. Merry return; 1471931eeffaSKenneth D. Merry 1472931eeffaSKenneth D. Merry /* 1473931eeffaSKenneth D. Merry * While we have enough transmit slots available for at least one 1474931eeffaSKenneth D. Merry * maximum-sized packet, pull mbufs off the queue and put them on 1475931eeffaSKenneth D. Merry * the transmit ring. 1476931eeffaSKenneth D. Merry */ 1477931eeffaSKenneth D. Merry while (xn_tx_slot_available(sc)) { 1478931eeffaSKenneth D. Merry IF_DEQUEUE(&ifp->if_snd, m_head); 1479931eeffaSKenneth D. Merry if (m_head == NULL) 1480931eeffaSKenneth D. Merry break; 1481931eeffaSKenneth D. Merry 1482931eeffaSKenneth D. Merry if (xn_assemble_tx_request(sc, m_head) != 0) 1483931eeffaSKenneth D. Merry break; 1484931eeffaSKenneth D. Merry } 1485931eeffaSKenneth D. Merry 148689e0f4d2SKip Macy RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->tx, notify); 148789e0f4d2SKip Macy if (notify) 148876acc41fSJustin T. Gibbs xen_intr_signal(sc->xen_intr_handle); 148989e0f4d2SKip Macy 149089e0f4d2SKip Macy if (RING_FULL(&sc->tx)) { 149189e0f4d2SKip Macy sc->tx_full = 1; 149289e0f4d2SKip Macy #if 0 149389e0f4d2SKip Macy netif_stop_queue(dev); 149489e0f4d2SKip Macy #endif 149589e0f4d2SKip Macy } 149689e0f4d2SKip Macy } 149789e0f4d2SKip Macy 149889e0f4d2SKip Macy static void 149989e0f4d2SKip Macy xn_start(struct ifnet *ifp) 150089e0f4d2SKip Macy { 150189e0f4d2SKip Macy struct netfront_info *sc; 150289e0f4d2SKip Macy sc = ifp->if_softc; 150389e0f4d2SKip Macy XN_TX_LOCK(sc); 150489e0f4d2SKip Macy xn_start_locked(ifp); 150589e0f4d2SKip Macy XN_TX_UNLOCK(sc); 150689e0f4d2SKip Macy } 150789e0f4d2SKip Macy 150889e0f4d2SKip Macy /* equivalent of network_open() in Linux */ 150989e0f4d2SKip Macy static void 151089e0f4d2SKip Macy xn_ifinit_locked(struct netfront_info *sc) 151189e0f4d2SKip Macy { 151289e0f4d2SKip Macy struct ifnet *ifp; 151389e0f4d2SKip Macy 151489e0f4d2SKip Macy XN_LOCK_ASSERT(sc); 151589e0f4d2SKip Macy 151689e0f4d2SKip Macy ifp = sc->xn_ifp; 151789e0f4d2SKip Macy 151889e0f4d2SKip Macy if (ifp->if_drv_flags & IFF_DRV_RUNNING) 151989e0f4d2SKip Macy return; 152089e0f4d2SKip Macy 152189e0f4d2SKip Macy xn_stop(sc); 152289e0f4d2SKip Macy 152389e0f4d2SKip Macy network_alloc_rx_buffers(sc); 152489e0f4d2SKip Macy sc->rx.sring->rsp_event = sc->rx.rsp_cons + 1; 152589e0f4d2SKip Macy 152689e0f4d2SKip Macy ifp->if_drv_flags |= IFF_DRV_RUNNING; 152789e0f4d2SKip Macy ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 15280e509842SJustin T. Gibbs if_link_state_change(ifp, LINK_STATE_UP); 152989e0f4d2SKip Macy 153089e0f4d2SKip Macy callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc); 153189e0f4d2SKip Macy } 153289e0f4d2SKip Macy 153389e0f4d2SKip Macy static void 153489e0f4d2SKip Macy xn_ifinit(void *xsc) 153589e0f4d2SKip Macy { 153689e0f4d2SKip Macy struct netfront_info *sc = xsc; 153789e0f4d2SKip Macy 153889e0f4d2SKip Macy XN_LOCK(sc); 153989e0f4d2SKip Macy xn_ifinit_locked(sc); 154089e0f4d2SKip Macy XN_UNLOCK(sc); 154189e0f4d2SKip Macy } 154289e0f4d2SKip Macy 154389e0f4d2SKip Macy static int 154489e0f4d2SKip Macy xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 154589e0f4d2SKip Macy { 154689e0f4d2SKip Macy struct netfront_info *sc = ifp->if_softc; 154789e0f4d2SKip Macy struct ifreq *ifr = (struct ifreq *) data; 1548a0ae8f04SBjoern A. Zeeb #ifdef INET 154989e0f4d2SKip Macy struct ifaddr *ifa = (struct ifaddr *)data; 1550a0ae8f04SBjoern A. Zeeb #endif 155189e0f4d2SKip Macy 155289e0f4d2SKip Macy int mask, error = 0; 155389e0f4d2SKip Macy switch(cmd) { 155489e0f4d2SKip Macy case SIOCSIFADDR: 1555a0ae8f04SBjoern A. Zeeb #ifdef INET 155689e0f4d2SKip Macy XN_LOCK(sc); 155789e0f4d2SKip Macy if (ifa->ifa_addr->sa_family == AF_INET) { 155889e0f4d2SKip Macy ifp->if_flags |= IFF_UP; 155989e0f4d2SKip Macy if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 156089e0f4d2SKip Macy xn_ifinit_locked(sc); 156189e0f4d2SKip Macy arp_ifinit(ifp, ifa); 156289e0f4d2SKip Macy XN_UNLOCK(sc); 156349906218SDoug Rabson } else { 156449906218SDoug Rabson XN_UNLOCK(sc); 1565a0ae8f04SBjoern A. Zeeb #endif 156649906218SDoug Rabson error = ether_ioctl(ifp, cmd, data); 1567a0ae8f04SBjoern A. Zeeb #ifdef INET 156849906218SDoug Rabson } 1569a0ae8f04SBjoern A. Zeeb #endif 157089e0f4d2SKip Macy break; 157189e0f4d2SKip Macy case SIOCSIFMTU: 157289e0f4d2SKip Macy /* XXX can we alter the MTU on a VN ?*/ 157389e0f4d2SKip Macy #ifdef notyet 157489e0f4d2SKip Macy if (ifr->ifr_mtu > XN_JUMBO_MTU) 157589e0f4d2SKip Macy error = EINVAL; 157689e0f4d2SKip Macy else 157789e0f4d2SKip Macy #endif 157889e0f4d2SKip Macy { 157989e0f4d2SKip Macy ifp->if_mtu = ifr->ifr_mtu; 158089e0f4d2SKip Macy ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 158189e0f4d2SKip Macy xn_ifinit(sc); 158289e0f4d2SKip Macy } 158389e0f4d2SKip Macy break; 158489e0f4d2SKip Macy case SIOCSIFFLAGS: 158589e0f4d2SKip Macy XN_LOCK(sc); 158689e0f4d2SKip Macy if (ifp->if_flags & IFF_UP) { 158789e0f4d2SKip Macy /* 158889e0f4d2SKip Macy * If only the state of the PROMISC flag changed, 158989e0f4d2SKip Macy * then just use the 'set promisc mode' command 159089e0f4d2SKip Macy * instead of reinitializing the entire NIC. Doing 159189e0f4d2SKip Macy * a full re-init means reloading the firmware and 159289e0f4d2SKip Macy * waiting for it to start up, which may take a 159389e0f4d2SKip Macy * second or two. 159489e0f4d2SKip Macy */ 159589e0f4d2SKip Macy #ifdef notyet 159689e0f4d2SKip Macy /* No promiscuous mode with Xen */ 159789e0f4d2SKip Macy if (ifp->if_drv_flags & IFF_DRV_RUNNING && 159889e0f4d2SKip Macy ifp->if_flags & IFF_PROMISC && 159989e0f4d2SKip Macy !(sc->xn_if_flags & IFF_PROMISC)) { 160089e0f4d2SKip Macy XN_SETBIT(sc, XN_RX_MODE, 160189e0f4d2SKip Macy XN_RXMODE_RX_PROMISC); 160289e0f4d2SKip Macy } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && 160389e0f4d2SKip Macy !(ifp->if_flags & IFF_PROMISC) && 160489e0f4d2SKip Macy sc->xn_if_flags & IFF_PROMISC) { 160589e0f4d2SKip Macy XN_CLRBIT(sc, XN_RX_MODE, 160689e0f4d2SKip Macy XN_RXMODE_RX_PROMISC); 160789e0f4d2SKip Macy } else 160889e0f4d2SKip Macy #endif 160989e0f4d2SKip Macy xn_ifinit_locked(sc); 161089e0f4d2SKip Macy } else { 161189e0f4d2SKip Macy if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 161289e0f4d2SKip Macy xn_stop(sc); 161389e0f4d2SKip Macy } 161489e0f4d2SKip Macy } 161589e0f4d2SKip Macy sc->xn_if_flags = ifp->if_flags; 161689e0f4d2SKip Macy XN_UNLOCK(sc); 161789e0f4d2SKip Macy error = 0; 161889e0f4d2SKip Macy break; 161989e0f4d2SKip Macy case SIOCSIFCAP: 162089e0f4d2SKip Macy mask = ifr->ifr_reqcap ^ ifp->if_capenable; 162112678024SDoug Rabson if (mask & IFCAP_TXCSUM) { 162212678024SDoug Rabson if (IFCAP_TXCSUM & ifp->if_capenable) { 162312678024SDoug Rabson ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4); 162412678024SDoug Rabson ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP 162512678024SDoug Rabson | CSUM_IP | CSUM_TSO); 162612678024SDoug Rabson } else { 162712678024SDoug Rabson ifp->if_capenable |= IFCAP_TXCSUM; 162812678024SDoug Rabson ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP 162912678024SDoug Rabson | CSUM_IP); 163089e0f4d2SKip Macy } 163112678024SDoug Rabson } 163212678024SDoug Rabson if (mask & IFCAP_RXCSUM) { 163312678024SDoug Rabson ifp->if_capenable ^= IFCAP_RXCSUM; 163412678024SDoug Rabson } 163512678024SDoug Rabson if (mask & IFCAP_TSO4) { 163612678024SDoug Rabson if (IFCAP_TSO4 & ifp->if_capenable) { 163712678024SDoug Rabson ifp->if_capenable &= ~IFCAP_TSO4; 163812678024SDoug Rabson ifp->if_hwassist &= ~CSUM_TSO; 163912678024SDoug Rabson } else if (IFCAP_TXCSUM & ifp->if_capenable) { 164012678024SDoug Rabson ifp->if_capenable |= IFCAP_TSO4; 164112678024SDoug Rabson ifp->if_hwassist |= CSUM_TSO; 164212678024SDoug Rabson } else { 16433552092bSAdrian Chadd IPRINTK("Xen requires tx checksum offload" 164412678024SDoug Rabson " be enabled to use TSO\n"); 164512678024SDoug Rabson error = EINVAL; 164612678024SDoug Rabson } 164712678024SDoug Rabson } 164812678024SDoug Rabson if (mask & IFCAP_LRO) { 164912678024SDoug Rabson ifp->if_capenable ^= IFCAP_LRO; 165012678024SDoug Rabson 165112678024SDoug Rabson } 165289e0f4d2SKip Macy error = 0; 165389e0f4d2SKip Macy break; 165489e0f4d2SKip Macy case SIOCADDMULTI: 165589e0f4d2SKip Macy case SIOCDELMULTI: 165689e0f4d2SKip Macy #ifdef notyet 165789e0f4d2SKip Macy if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 165889e0f4d2SKip Macy XN_LOCK(sc); 165989e0f4d2SKip Macy xn_setmulti(sc); 166089e0f4d2SKip Macy XN_UNLOCK(sc); 166189e0f4d2SKip Macy error = 0; 166289e0f4d2SKip Macy } 166389e0f4d2SKip Macy #endif 166489e0f4d2SKip Macy /* FALLTHROUGH */ 166589e0f4d2SKip Macy case SIOCSIFMEDIA: 166689e0f4d2SKip Macy case SIOCGIFMEDIA: 16670e509842SJustin T. Gibbs error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 166889e0f4d2SKip Macy break; 166989e0f4d2SKip Macy default: 167089e0f4d2SKip Macy error = ether_ioctl(ifp, cmd, data); 167189e0f4d2SKip Macy } 167289e0f4d2SKip Macy 167389e0f4d2SKip Macy return (error); 167489e0f4d2SKip Macy } 167589e0f4d2SKip Macy 167689e0f4d2SKip Macy static void 167789e0f4d2SKip Macy xn_stop(struct netfront_info *sc) 167889e0f4d2SKip Macy { 167989e0f4d2SKip Macy struct ifnet *ifp; 168089e0f4d2SKip Macy 168189e0f4d2SKip Macy XN_LOCK_ASSERT(sc); 168289e0f4d2SKip Macy 168389e0f4d2SKip Macy ifp = sc->xn_ifp; 168489e0f4d2SKip Macy 168589e0f4d2SKip Macy callout_stop(&sc->xn_stat_ch); 168689e0f4d2SKip Macy 168789e0f4d2SKip Macy xn_free_rx_ring(sc); 168889e0f4d2SKip Macy xn_free_tx_ring(sc); 168989e0f4d2SKip Macy 169089e0f4d2SKip Macy ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 16910e509842SJustin T. Gibbs if_link_state_change(ifp, LINK_STATE_DOWN); 169289e0f4d2SKip Macy } 169389e0f4d2SKip Macy 169489e0f4d2SKip Macy /* START of Xenolinux helper functions adapted to FreeBSD */ 169523dc5621SKip Macy int 169623dc5621SKip Macy network_connect(struct netfront_info *np) 169789e0f4d2SKip Macy { 16983a6d1fcfSKip Macy int i, requeue_idx, error; 169989e0f4d2SKip Macy grant_ref_t ref; 170089e0f4d2SKip Macy netif_rx_request_t *req; 1701d0f3a8b9SRoger Pau Monné u_int feature_rx_copy; 170289e0f4d2SKip Macy 1703ff662b5cSJustin T. Gibbs error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), 17043a6d1fcfSKip Macy "feature-rx-copy", NULL, "%u", &feature_rx_copy); 17053a6d1fcfSKip Macy if (error) 170689e0f4d2SKip Macy feature_rx_copy = 0; 170789e0f4d2SKip Macy 1708d0f3a8b9SRoger Pau Monné /* We only support rx copy. */ 1709d0f3a8b9SRoger Pau Monné if (!feature_rx_copy) 1710d0f3a8b9SRoger Pau Monné return (EPROTONOSUPPORT); 171189e0f4d2SKip Macy 171289e0f4d2SKip Macy /* Recovery procedure: */ 17133a6d1fcfSKip Macy error = talk_to_backend(np->xbdev, np); 17143a6d1fcfSKip Macy if (error) 17153a6d1fcfSKip Macy return (error); 171689e0f4d2SKip Macy 171789e0f4d2SKip Macy /* Step 1: Reinitialise variables. */ 1718578e4bf7SJustin T. Gibbs xn_query_features(np); 1719578e4bf7SJustin T. Gibbs xn_configure_features(np); 172089e0f4d2SKip Macy netif_release_tx_bufs(np); 172189e0f4d2SKip Macy 172289e0f4d2SKip Macy /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ 172389e0f4d2SKip Macy for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { 172489e0f4d2SKip Macy struct mbuf *m; 17253a6d1fcfSKip Macy u_long pfn; 172689e0f4d2SKip Macy 172789e0f4d2SKip Macy if (np->rx_mbufs[i] == NULL) 172889e0f4d2SKip Macy continue; 172989e0f4d2SKip Macy 173089e0f4d2SKip Macy m = np->rx_mbufs[requeue_idx] = xennet_get_rx_mbuf(np, i); 173189e0f4d2SKip Macy ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); 1732931eeffaSKenneth D. Merry 173389e0f4d2SKip Macy req = RING_GET_REQUEST(&np->rx, requeue_idx); 17343a6d1fcfSKip Macy pfn = vtophys(mtod(m, vm_offset_t)) >> PAGE_SHIFT; 173589e0f4d2SKip Macy 173689e0f4d2SKip Macy gnttab_grant_foreign_access_ref(ref, 173723dc5621SKip Macy xenbus_get_otherend_id(np->xbdev), 1738ed95805eSJohn Baldwin pfn, 0); 1739d0f3a8b9SRoger Pau Monné 174089e0f4d2SKip Macy req->gref = ref; 174189e0f4d2SKip Macy req->id = requeue_idx; 174289e0f4d2SKip Macy 174389e0f4d2SKip Macy requeue_idx++; 174489e0f4d2SKip Macy } 174589e0f4d2SKip Macy 174689e0f4d2SKip Macy np->rx.req_prod_pvt = requeue_idx; 174789e0f4d2SKip Macy 174889e0f4d2SKip Macy /* Step 3: All public and private state should now be sane. Get 174989e0f4d2SKip Macy * ready to start sending and receiving packets and give the driver 175089e0f4d2SKip Macy * domain a kick because we've probably just requeued some 175189e0f4d2SKip Macy * packets. 175289e0f4d2SKip Macy */ 175389e0f4d2SKip Macy netfront_carrier_on(np); 175476acc41fSJustin T. Gibbs xen_intr_signal(np->xen_intr_handle); 175589e0f4d2SKip Macy XN_TX_LOCK(np); 175689e0f4d2SKip Macy xn_txeof(np); 175789e0f4d2SKip Macy XN_TX_UNLOCK(np); 175889e0f4d2SKip Macy network_alloc_rx_buffers(np); 175989e0f4d2SKip Macy 176089e0f4d2SKip Macy return (0); 176189e0f4d2SKip Macy } 176289e0f4d2SKip Macy 176389e0f4d2SKip Macy static void 1764578e4bf7SJustin T. Gibbs xn_query_features(struct netfront_info *np) 1765578e4bf7SJustin T. Gibbs { 1766578e4bf7SJustin T. Gibbs int val; 1767578e4bf7SJustin T. Gibbs 1768578e4bf7SJustin T. Gibbs device_printf(np->xbdev, "backend features:"); 1769578e4bf7SJustin T. Gibbs 1770578e4bf7SJustin T. Gibbs if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), 1771578e4bf7SJustin T. Gibbs "feature-sg", NULL, "%d", &val) < 0) 1772578e4bf7SJustin T. Gibbs val = 0; 1773578e4bf7SJustin T. Gibbs 1774578e4bf7SJustin T. Gibbs np->maxfrags = 1; 1775578e4bf7SJustin T. Gibbs if (val) { 1776578e4bf7SJustin T. Gibbs np->maxfrags = MAX_TX_REQ_FRAGS; 1777578e4bf7SJustin T. Gibbs printf(" feature-sg"); 1778578e4bf7SJustin T. Gibbs } 1779578e4bf7SJustin T. Gibbs 1780578e4bf7SJustin T. Gibbs if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), 1781578e4bf7SJustin T. Gibbs "feature-gso-tcpv4", NULL, "%d", &val) < 0) 1782578e4bf7SJustin T. Gibbs val = 0; 1783578e4bf7SJustin T. Gibbs 1784578e4bf7SJustin T. Gibbs np->xn_ifp->if_capabilities &= ~(IFCAP_TSO4|IFCAP_LRO); 1785578e4bf7SJustin T. Gibbs if (val) { 1786578e4bf7SJustin T. Gibbs np->xn_ifp->if_capabilities |= IFCAP_TSO4|IFCAP_LRO; 1787578e4bf7SJustin T. Gibbs printf(" feature-gso-tcp4"); 1788578e4bf7SJustin T. Gibbs } 1789578e4bf7SJustin T. Gibbs 1790578e4bf7SJustin T. Gibbs printf("\n"); 1791578e4bf7SJustin T. Gibbs } 1792578e4bf7SJustin T. Gibbs 1793cf9c09e1SJustin T. Gibbs static int 1794578e4bf7SJustin T. Gibbs xn_configure_features(struct netfront_info *np) 1795cf9c09e1SJustin T. Gibbs { 17966a8e9695SRoger Pau Monné int err, cap_enabled; 1797cf9c09e1SJustin T. Gibbs 1798cf9c09e1SJustin T. Gibbs err = 0; 17996a8e9695SRoger Pau Monné 18006a8e9695SRoger Pau Monné if (np->xn_resume && 18016a8e9695SRoger Pau Monné ((np->xn_ifp->if_capenable & np->xn_ifp->if_capabilities) 18026a8e9695SRoger Pau Monné == np->xn_ifp->if_capenable)) { 18036a8e9695SRoger Pau Monné /* Current options are available, no need to do anything. */ 18046a8e9695SRoger Pau Monné return (0); 18056a8e9695SRoger Pau Monné } 18066a8e9695SRoger Pau Monné 18076a8e9695SRoger Pau Monné /* Try to preserve as many options as possible. */ 18086a8e9695SRoger Pau Monné if (np->xn_resume) 18096a8e9695SRoger Pau Monné cap_enabled = np->xn_ifp->if_capenable; 18106a8e9695SRoger Pau Monné else 18116a8e9695SRoger Pau Monné cap_enabled = UINT_MAX; 18126a8e9695SRoger Pau Monné 1813*08c9c2e0SRoger Pau Monné #if (defined(INET) || defined(INET6)) 18146a8e9695SRoger Pau Monné if ((np->xn_ifp->if_capenable & IFCAP_LRO) == (cap_enabled & IFCAP_LRO)) 1815cf9c09e1SJustin T. Gibbs tcp_lro_free(&np->xn_lro); 1816578e4bf7SJustin T. Gibbs #endif 1817578e4bf7SJustin T. Gibbs np->xn_ifp->if_capenable = 18186a8e9695SRoger Pau Monné np->xn_ifp->if_capabilities & ~(IFCAP_LRO|IFCAP_TSO4) & cap_enabled; 1819578e4bf7SJustin T. Gibbs np->xn_ifp->if_hwassist &= ~CSUM_TSO; 1820*08c9c2e0SRoger Pau Monné #if (defined(INET) || defined(INET6)) 18216a8e9695SRoger Pau Monné if (xn_enable_lro && (np->xn_ifp->if_capabilities & IFCAP_LRO) == 18226a8e9695SRoger Pau Monné (cap_enabled & IFCAP_LRO)) { 1823cf9c09e1SJustin T. Gibbs err = tcp_lro_init(&np->xn_lro); 1824cf9c09e1SJustin T. Gibbs if (err) { 1825cf9c09e1SJustin T. Gibbs device_printf(np->xbdev, "LRO initialization failed\n"); 1826cf9c09e1SJustin T. Gibbs } else { 1827cf9c09e1SJustin T. Gibbs np->xn_lro.ifp = np->xn_ifp; 1828578e4bf7SJustin T. Gibbs np->xn_ifp->if_capenable |= IFCAP_LRO; 1829cf9c09e1SJustin T. Gibbs } 1830cf9c09e1SJustin T. Gibbs } 18316a8e9695SRoger Pau Monné if ((np->xn_ifp->if_capabilities & IFCAP_TSO4) == 18326a8e9695SRoger Pau Monné (cap_enabled & IFCAP_TSO4)) { 1833578e4bf7SJustin T. Gibbs np->xn_ifp->if_capenable |= IFCAP_TSO4; 1834578e4bf7SJustin T. Gibbs np->xn_ifp->if_hwassist |= CSUM_TSO; 1835578e4bf7SJustin T. Gibbs } 1836cf9c09e1SJustin T. Gibbs #endif 1837cf9c09e1SJustin T. Gibbs return (err); 1838cf9c09e1SJustin T. Gibbs } 1839cf9c09e1SJustin T. Gibbs 184076acc41fSJustin T. Gibbs /** 184176acc41fSJustin T. Gibbs * Create a network device. 184276acc41fSJustin T. Gibbs * @param dev Newbus device representing this virtual NIC. 184389e0f4d2SKip Macy */ 184423dc5621SKip Macy int 184523dc5621SKip Macy create_netdev(device_t dev) 184689e0f4d2SKip Macy { 184789e0f4d2SKip Macy int i; 184889e0f4d2SKip Macy struct netfront_info *np; 184989e0f4d2SKip Macy int err; 185089e0f4d2SKip Macy struct ifnet *ifp; 185189e0f4d2SKip Macy 185223dc5621SKip Macy np = device_get_softc(dev); 185389e0f4d2SKip Macy 185489e0f4d2SKip Macy np->xbdev = dev; 185589e0f4d2SKip Macy 1856177e3f13SRoger Pau Monné mtx_init(&np->tx_lock, "xntx", "network transmit lock", MTX_DEF); 1857177e3f13SRoger Pau Monné mtx_init(&np->rx_lock, "xnrx", "network receive lock", MTX_DEF); 1858177e3f13SRoger Pau Monné mtx_init(&np->sc_lock, "xnsc", "netfront softc lock", MTX_DEF); 18590e509842SJustin T. Gibbs 18600e509842SJustin T. Gibbs ifmedia_init(&np->sc_media, 0, xn_ifmedia_upd, xn_ifmedia_sts); 18610e509842SJustin T. Gibbs ifmedia_add(&np->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 18620e509842SJustin T. Gibbs ifmedia_set(&np->sc_media, IFM_ETHER|IFM_MANUAL); 18630e509842SJustin T. Gibbs 186489e0f4d2SKip Macy np->rx_target = RX_MIN_TARGET; 186589e0f4d2SKip Macy np->rx_min_target = RX_MIN_TARGET; 186689e0f4d2SKip Macy np->rx_max_target = RX_MAX_TARGET; 186789e0f4d2SKip Macy 186889e0f4d2SKip Macy /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */ 186989e0f4d2SKip Macy for (i = 0; i <= NET_TX_RING_SIZE; i++) { 187089e0f4d2SKip Macy np->tx_mbufs[i] = (void *) ((u_long) i+1); 1871ff662b5cSJustin T. Gibbs np->grant_tx_ref[i] = GRANT_REF_INVALID; 187289e0f4d2SKip Macy } 1873931eeffaSKenneth D. Merry np->tx_mbufs[NET_TX_RING_SIZE] = (void *)0; 1874931eeffaSKenneth D. Merry 187589e0f4d2SKip Macy for (i = 0; i <= NET_RX_RING_SIZE; i++) { 1876931eeffaSKenneth D. Merry 187789e0f4d2SKip Macy np->rx_mbufs[i] = NULL; 1878ff662b5cSJustin T. Gibbs np->grant_rx_ref[i] = GRANT_REF_INVALID; 187989e0f4d2SKip Macy } 188049e6be9cSGleb Smirnoff 188149e6be9cSGleb Smirnoff mbufq_init(&np->xn_rx_batch, INT_MAX); 188249e6be9cSGleb Smirnoff 188389e0f4d2SKip Macy /* A grant for every tx ring slot */ 1884931eeffaSKenneth D. Merry if (gnttab_alloc_grant_references(NET_TX_RING_SIZE, 1885931eeffaSKenneth D. Merry &np->gref_tx_head) != 0) { 1886227ca257SKip Macy IPRINTK("#### netfront can't alloc tx grant refs\n"); 188789e0f4d2SKip Macy err = ENOMEM; 188889e0f4d2SKip Macy goto exit; 188989e0f4d2SKip Macy } 189089e0f4d2SKip Macy /* A grant for every rx ring slot */ 189189e0f4d2SKip Macy if (gnttab_alloc_grant_references(RX_MAX_TARGET, 1892931eeffaSKenneth D. Merry &np->gref_rx_head) != 0) { 1893227ca257SKip Macy WPRINTK("#### netfront can't alloc rx grant refs\n"); 189489e0f4d2SKip Macy gnttab_free_grant_references(np->gref_tx_head); 189589e0f4d2SKip Macy err = ENOMEM; 189689e0f4d2SKip Macy goto exit; 189789e0f4d2SKip Macy } 189889e0f4d2SKip Macy 189989e0f4d2SKip Macy err = xen_net_read_mac(dev, np->mac); 1900ffa06904SJustin T. Gibbs if (err) 190189e0f4d2SKip Macy goto out; 190289e0f4d2SKip Macy 190389e0f4d2SKip Macy /* Set up ifnet structure */ 190423dc5621SKip Macy ifp = np->xn_ifp = if_alloc(IFT_ETHER); 190589e0f4d2SKip Macy ifp->if_softc = np; 190623dc5621SKip Macy if_initname(ifp, "xn", device_get_unit(dev)); 19073a6d1fcfSKip Macy ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 190889e0f4d2SKip Macy ifp->if_ioctl = xn_ioctl; 190989e0f4d2SKip Macy ifp->if_output = ether_output; 191089e0f4d2SKip Macy ifp->if_start = xn_start; 1911227ca257SKip Macy #ifdef notyet 1912227ca257SKip Macy ifp->if_watchdog = xn_watchdog; 1913227ca257SKip Macy #endif 191489e0f4d2SKip Macy ifp->if_init = xn_ifinit; 191589e0f4d2SKip Macy ifp->if_snd.ifq_maxlen = NET_TX_RING_SIZE - 1; 191689e0f4d2SKip Macy 191789e0f4d2SKip Macy ifp->if_hwassist = XN_CSUM_FEATURES; 191889e0f4d2SKip Macy ifp->if_capabilities = IFCAP_HWCSUM; 19199fd573c3SHans Petter Selasky ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 19209fd573c3SHans Petter Selasky ifp->if_hw_tsomaxsegcount = MAX_TX_REQ_FRAGS; 19219fd573c3SHans Petter Selasky ifp->if_hw_tsomaxsegsize = PAGE_SIZE; 192289e0f4d2SKip Macy 192389e0f4d2SKip Macy ether_ifattach(ifp, np->mac); 1924fd90e2edSJung-uk Kim callout_init(&np->xn_stat_ch, 1); 192589e0f4d2SKip Macy netfront_carrier_off(np); 192689e0f4d2SKip Macy 192789e0f4d2SKip Macy return (0); 192889e0f4d2SKip Macy 192989e0f4d2SKip Macy exit: 193089e0f4d2SKip Macy gnttab_free_grant_references(np->gref_tx_head); 193189e0f4d2SKip Macy out: 1932ffa06904SJustin T. Gibbs return (err); 193389e0f4d2SKip Macy } 193489e0f4d2SKip Macy 193589e0f4d2SKip Macy /** 193689e0f4d2SKip Macy * Handle the change of state of the backend to Closing. We must delete our 193789e0f4d2SKip Macy * device-layer structures now, to ensure that writes are flushed through to 193889e0f4d2SKip Macy * the backend. Once is this done, we can switch to Closed in 193989e0f4d2SKip Macy * acknowledgement. 194089e0f4d2SKip Macy */ 194189e0f4d2SKip Macy #if 0 19420e509842SJustin T. Gibbs static void 19430e509842SJustin T. Gibbs netfront_closing(device_t dev) 194489e0f4d2SKip Macy { 194589e0f4d2SKip Macy #if 0 194689e0f4d2SKip Macy struct netfront_info *info = dev->dev_driver_data; 194789e0f4d2SKip Macy 194889e0f4d2SKip Macy DPRINTK("netfront_closing: %s removed\n", dev->nodename); 194989e0f4d2SKip Macy 195089e0f4d2SKip Macy close_netdev(info); 195189e0f4d2SKip Macy #endif 195289e0f4d2SKip Macy xenbus_switch_state(dev, XenbusStateClosed); 195389e0f4d2SKip Macy } 195489e0f4d2SKip Macy #endif 195589e0f4d2SKip Macy 19560e509842SJustin T. Gibbs static int 19570e509842SJustin T. Gibbs netfront_detach(device_t dev) 195889e0f4d2SKip Macy { 195923dc5621SKip Macy struct netfront_info *info = device_get_softc(dev); 196089e0f4d2SKip Macy 196123dc5621SKip Macy DPRINTK("%s\n", xenbus_get_node(dev)); 196289e0f4d2SKip Macy 196389e0f4d2SKip Macy netif_free(info); 196489e0f4d2SKip Macy 196589e0f4d2SKip Macy return 0; 196689e0f4d2SKip Macy } 196789e0f4d2SKip Macy 19680e509842SJustin T. Gibbs static void 19690e509842SJustin T. Gibbs netif_free(struct netfront_info *info) 197089e0f4d2SKip Macy { 1971818fe953SJustin T. Gibbs XN_LOCK(info); 1972818fe953SJustin T. Gibbs xn_stop(info); 1973818fe953SJustin T. Gibbs XN_UNLOCK(info); 1974818fe953SJustin T. Gibbs callout_drain(&info->xn_stat_ch); 197589e0f4d2SKip Macy netif_disconnect_backend(info); 1976e3242f9dSJustin T. Gibbs if (info->xn_ifp != NULL) { 1977818fe953SJustin T. Gibbs ether_ifdetach(info->xn_ifp); 1978818fe953SJustin T. Gibbs if_free(info->xn_ifp); 1979e3242f9dSJustin T. Gibbs info->xn_ifp = NULL; 1980e3242f9dSJustin T. Gibbs } 1981d5aeb779SJustin T. Gibbs ifmedia_removeall(&info->sc_media); 198289e0f4d2SKip Macy } 198389e0f4d2SKip Macy 19840e509842SJustin T. Gibbs static void 19850e509842SJustin T. Gibbs netif_disconnect_backend(struct netfront_info *info) 198689e0f4d2SKip Macy { 19873a6d1fcfSKip Macy XN_RX_LOCK(info); 19883a6d1fcfSKip Macy XN_TX_LOCK(info); 19893a6d1fcfSKip Macy netfront_carrier_off(info); 19903a6d1fcfSKip Macy XN_TX_UNLOCK(info); 19913a6d1fcfSKip Macy XN_RX_UNLOCK(info); 19923a6d1fcfSKip Macy 1993cf9c09e1SJustin T. Gibbs free_ring(&info->tx_ring_ref, &info->tx.sring); 1994cf9c09e1SJustin T. Gibbs free_ring(&info->rx_ring_ref, &info->rx.sring); 199589e0f4d2SKip Macy 199676acc41fSJustin T. Gibbs xen_intr_unbind(&info->xen_intr_handle); 199789e0f4d2SKip Macy } 199889e0f4d2SKip Macy 19990e509842SJustin T. Gibbs static void 2000cf9c09e1SJustin T. Gibbs free_ring(int *ref, void *ring_ptr_ref) 200189e0f4d2SKip Macy { 2002cf9c09e1SJustin T. Gibbs void **ring_ptr_ptr = ring_ptr_ref; 2003cf9c09e1SJustin T. Gibbs 2004cf9c09e1SJustin T. Gibbs if (*ref != GRANT_REF_INVALID) { 2005cf9c09e1SJustin T. Gibbs /* This API frees the associated storage. */ 2006cf9c09e1SJustin T. Gibbs gnttab_end_foreign_access(*ref, *ring_ptr_ptr); 2007cf9c09e1SJustin T. Gibbs *ref = GRANT_REF_INVALID; 2008cf9c09e1SJustin T. Gibbs } 2009cf9c09e1SJustin T. Gibbs *ring_ptr_ptr = NULL; 201089e0f4d2SKip Macy } 201189e0f4d2SKip Macy 20120e509842SJustin T. Gibbs static int 20130e509842SJustin T. Gibbs xn_ifmedia_upd(struct ifnet *ifp) 20140e509842SJustin T. Gibbs { 20150e509842SJustin T. Gibbs return (0); 20160e509842SJustin T. Gibbs } 20170e509842SJustin T. Gibbs 20180e509842SJustin T. Gibbs static void 20190e509842SJustin T. Gibbs xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 20200e509842SJustin T. Gibbs { 20210e509842SJustin T. Gibbs ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE; 20220e509842SJustin T. Gibbs ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; 20230e509842SJustin T. Gibbs } 20240e509842SJustin T. Gibbs 202589e0f4d2SKip Macy /* ** Driver registration ** */ 202623dc5621SKip Macy static device_method_t netfront_methods[] = { 202723dc5621SKip Macy /* Device interface */ 202823dc5621SKip Macy DEVMETHOD(device_probe, netfront_probe), 202923dc5621SKip Macy DEVMETHOD(device_attach, netfront_attach), 203023dc5621SKip Macy DEVMETHOD(device_detach, netfront_detach), 203123dc5621SKip Macy DEVMETHOD(device_shutdown, bus_generic_shutdown), 2032cf9c09e1SJustin T. Gibbs DEVMETHOD(device_suspend, netfront_suspend), 203323dc5621SKip Macy DEVMETHOD(device_resume, netfront_resume), 203489e0f4d2SKip Macy 203523dc5621SKip Macy /* Xenbus interface */ 2036ff662b5cSJustin T. Gibbs DEVMETHOD(xenbus_otherend_changed, netfront_backend_changed), 203789e0f4d2SKip Macy 20386f9767acSMarius Strobl DEVMETHOD_END 203989e0f4d2SKip Macy }; 204089e0f4d2SKip Macy 204123dc5621SKip Macy static driver_t netfront_driver = { 204223dc5621SKip Macy "xn", 204323dc5621SKip Macy netfront_methods, 204423dc5621SKip Macy sizeof(struct netfront_info), 204589e0f4d2SKip Macy }; 204623dc5621SKip Macy devclass_t netfront_devclass; 204789e0f4d2SKip Macy 20486f9767acSMarius Strobl DRIVER_MODULE(xe, xenbusb_front, netfront_driver, netfront_devclass, NULL, 20496f9767acSMarius Strobl NULL); 2050