18e0ad55aSJoel Dahl /*- 289e0f4d2SKip Macy * Copyright (c) 2004-2006 Kip Macy 389e0f4d2SKip Macy * All rights reserved. 489e0f4d2SKip Macy * 58e0ad55aSJoel Dahl * Redistribution and use in source and binary forms, with or without 68e0ad55aSJoel Dahl * modification, are permitted provided that the following conditions 78e0ad55aSJoel Dahl * are met: 88e0ad55aSJoel Dahl * 1. Redistributions of source code must retain the above copyright 98e0ad55aSJoel Dahl * notice, this list of conditions and the following disclaimer. 108e0ad55aSJoel Dahl * 2. Redistributions in binary form must reproduce the above copyright 118e0ad55aSJoel Dahl * notice, this list of conditions and the following disclaimer in the 128e0ad55aSJoel Dahl * documentation and/or other materials provided with the distribution. 1389e0f4d2SKip Macy * 148e0ad55aSJoel Dahl * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 158e0ad55aSJoel Dahl * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 168e0ad55aSJoel Dahl * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 178e0ad55aSJoel Dahl * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 188e0ad55aSJoel Dahl * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 198e0ad55aSJoel Dahl * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 208e0ad55aSJoel Dahl * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 218e0ad55aSJoel Dahl * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 228e0ad55aSJoel Dahl * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 238e0ad55aSJoel Dahl * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 248e0ad55aSJoel Dahl * SUCH DAMAGE. 2589e0f4d2SKip Macy */ 2689e0f4d2SKip Macy 2789e0f4d2SKip Macy 2889e0f4d2SKip Macy #include <sys/cdefs.h> 2989e0f4d2SKip Macy __FBSDID("$FreeBSD$"); 3089e0f4d2SKip Macy 31a0ae8f04SBjoern A. Zeeb #include "opt_inet.h" 32a0ae8f04SBjoern A. Zeeb 3389e0f4d2SKip Macy #include <sys/param.h> 3489e0f4d2SKip Macy #include <sys/systm.h> 3589e0f4d2SKip Macy #include <sys/sockio.h> 3689e0f4d2SKip Macy #include <sys/mbuf.h> 3789e0f4d2SKip Macy #include <sys/malloc.h> 3823dc5621SKip Macy #include <sys/module.h> 3989e0f4d2SKip Macy #include <sys/kernel.h> 4089e0f4d2SKip Macy #include <sys/socket.h> 4112678024SDoug Rabson #include <sys/sysctl.h> 4289e0f4d2SKip Macy #include <sys/queue.h> 438cb07992SAdrian Chadd #include <sys/lock.h> 4489e0f4d2SKip Macy #include <sys/sx.h> 4589e0f4d2SKip Macy 4689e0f4d2SKip Macy #include <net/if.h> 4789e0f4d2SKip Macy #include <net/if_arp.h> 4889e0f4d2SKip Macy #include <net/ethernet.h> 4989e0f4d2SKip Macy #include <net/if_dl.h> 5089e0f4d2SKip Macy #include <net/if_media.h> 5189e0f4d2SKip Macy 5289e0f4d2SKip Macy #include <net/bpf.h> 5389e0f4d2SKip Macy 5489e0f4d2SKip Macy #include <net/if_types.h> 5589e0f4d2SKip Macy #include <net/if.h> 5689e0f4d2SKip Macy 5789e0f4d2SKip Macy #include <netinet/in_systm.h> 5889e0f4d2SKip Macy #include <netinet/in.h> 5989e0f4d2SKip Macy #include <netinet/ip.h> 6089e0f4d2SKip Macy #include <netinet/if_ether.h> 6112678024SDoug Rabson #if __FreeBSD_version >= 700000 6212678024SDoug Rabson #include <netinet/tcp.h> 6312678024SDoug Rabson #include <netinet/tcp_lro.h> 6412678024SDoug Rabson #endif 6589e0f4d2SKip Macy 6689e0f4d2SKip Macy #include <vm/vm.h> 6789e0f4d2SKip Macy #include <vm/pmap.h> 6889e0f4d2SKip Macy 6989e0f4d2SKip Macy #include <machine/clock.h> /* for DELAY */ 7089e0f4d2SKip Macy #include <machine/bus.h> 7189e0f4d2SKip Macy #include <machine/resource.h> 7289e0f4d2SKip Macy #include <machine/frame.h> 73980c7178SKip Macy #include <machine/vmparam.h> 7489e0f4d2SKip Macy 7589e0f4d2SKip Macy #include <sys/bus.h> 7689e0f4d2SKip Macy #include <sys/rman.h> 7789e0f4d2SKip Macy 7889e0f4d2SKip Macy #include <machine/intr_machdep.h> 7989e0f4d2SKip Macy 8089e0f4d2SKip Macy #include <machine/xen/xen-os.h> 8112678024SDoug Rabson #include <machine/xen/xenfunc.h> 822913e88cSRobert Watson #include <machine/xen/xenvar.h> 833a6d1fcfSKip Macy #include <xen/hypervisor.h> 843a6d1fcfSKip Macy #include <xen/xen_intr.h> 853a6d1fcfSKip Macy #include <xen/evtchn.h> 8689e0f4d2SKip Macy #include <xen/gnttab.h> 8789e0f4d2SKip Macy #include <xen/interface/memory.h> 8889e0f4d2SKip Macy #include <xen/interface/io/netif.h> 8923dc5621SKip Macy #include <xen/xenbus/xenbusvar.h> 9089e0f4d2SKip Macy 9112678024SDoug Rabson #include <dev/xen/netfront/mbufq.h> 9212678024SDoug Rabson 9323dc5621SKip Macy #include "xenbus_if.h" 9489e0f4d2SKip Macy 95*578e4bf7SJustin T. Gibbs /* Features supported by all backends. TSO and LRO can be negotiated */ 96*578e4bf7SJustin T. Gibbs #define XN_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 9712678024SDoug Rabson 9889e0f4d2SKip Macy #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE) 9989e0f4d2SKip Macy #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE) 10089e0f4d2SKip Macy 10112678024SDoug Rabson #if __FreeBSD_version >= 700000 10212678024SDoug Rabson /* 10312678024SDoug Rabson * Should the driver do LRO on the RX end 10412678024SDoug Rabson * this can be toggled on the fly, but the 10512678024SDoug Rabson * interface must be reset (down/up) for it 10612678024SDoug Rabson * to take effect. 10712678024SDoug Rabson */ 10812678024SDoug Rabson static int xn_enable_lro = 1; 10912678024SDoug Rabson TUNABLE_INT("hw.xn.enable_lro", &xn_enable_lro); 11012678024SDoug Rabson #else 11112678024SDoug Rabson 11212678024SDoug Rabson #define IFCAP_TSO4 0 11312678024SDoug Rabson #define CSUM_TSO 0 11412678024SDoug Rabson 11512678024SDoug Rabson #endif 11612678024SDoug Rabson 11789e0f4d2SKip Macy #ifdef CONFIG_XEN 11889e0f4d2SKip Macy static int MODPARM_rx_copy = 0; 11989e0f4d2SKip Macy module_param_named(rx_copy, MODPARM_rx_copy, bool, 0); 12089e0f4d2SKip Macy MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)"); 12189e0f4d2SKip Macy static int MODPARM_rx_flip = 0; 12289e0f4d2SKip Macy module_param_named(rx_flip, MODPARM_rx_flip, bool, 0); 12389e0f4d2SKip Macy MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)"); 12489e0f4d2SKip Macy #else 12589e0f4d2SKip Macy static const int MODPARM_rx_copy = 1; 12689e0f4d2SKip Macy static const int MODPARM_rx_flip = 0; 12789e0f4d2SKip Macy #endif 12889e0f4d2SKip Macy 129931eeffaSKenneth D. Merry /** 130931eeffaSKenneth D. Merry * \brief The maximum allowed data fragments in a single transmit 131931eeffaSKenneth D. Merry * request. 132931eeffaSKenneth D. Merry * 133931eeffaSKenneth D. Merry * This limit is imposed by the backend driver. We assume here that 134931eeffaSKenneth D. Merry * we are dealing with a Linux driver domain and have set our limit 135931eeffaSKenneth D. Merry * to mirror the Linux MAX_SKB_FRAGS constant. 136931eeffaSKenneth D. Merry */ 137931eeffaSKenneth D. Merry #define MAX_TX_REQ_FRAGS (65536 / PAGE_SIZE + 2) 138931eeffaSKenneth D. Merry 13989e0f4d2SKip Macy #define RX_COPY_THRESHOLD 256 14089e0f4d2SKip Macy 14189e0f4d2SKip Macy #define net_ratelimit() 0 14289e0f4d2SKip Macy 14389e0f4d2SKip Macy struct netfront_info; 14489e0f4d2SKip Macy struct netfront_rx_info; 14589e0f4d2SKip Macy 14689e0f4d2SKip Macy static void xn_txeof(struct netfront_info *); 14789e0f4d2SKip Macy static void xn_rxeof(struct netfront_info *); 14889e0f4d2SKip Macy static void network_alloc_rx_buffers(struct netfront_info *); 14989e0f4d2SKip Macy 15089e0f4d2SKip Macy static void xn_tick_locked(struct netfront_info *); 15189e0f4d2SKip Macy static void xn_tick(void *); 15289e0f4d2SKip Macy 15389e0f4d2SKip Macy static void xn_intr(void *); 154931eeffaSKenneth D. Merry static inline int xn_count_frags(struct mbuf *m); 155931eeffaSKenneth D. Merry static int xn_assemble_tx_request(struct netfront_info *sc, 156931eeffaSKenneth D. Merry struct mbuf *m_head); 15789e0f4d2SKip Macy static void xn_start_locked(struct ifnet *); 15889e0f4d2SKip Macy static void xn_start(struct ifnet *); 15989e0f4d2SKip Macy static int xn_ioctl(struct ifnet *, u_long, caddr_t); 16089e0f4d2SKip Macy static void xn_ifinit_locked(struct netfront_info *); 16189e0f4d2SKip Macy static void xn_ifinit(void *); 16289e0f4d2SKip Macy static void xn_stop(struct netfront_info *); 163*578e4bf7SJustin T. Gibbs static void xn_query_features(struct netfront_info *np); 164*578e4bf7SJustin T. Gibbs static int xn_configure_features(struct netfront_info *np); 16589e0f4d2SKip Macy #ifdef notyet 16689e0f4d2SKip Macy static void xn_watchdog(struct ifnet *); 16789e0f4d2SKip Macy #endif 16889e0f4d2SKip Macy 16989e0f4d2SKip Macy static void show_device(struct netfront_info *sc); 17089e0f4d2SKip Macy #ifdef notyet 17123dc5621SKip Macy static void netfront_closing(device_t dev); 17289e0f4d2SKip Macy #endif 17389e0f4d2SKip Macy static void netif_free(struct netfront_info *info); 17423dc5621SKip Macy static int netfront_detach(device_t dev); 17589e0f4d2SKip Macy 17623dc5621SKip Macy static int talk_to_backend(device_t dev, struct netfront_info *info); 17723dc5621SKip Macy static int create_netdev(device_t dev); 17889e0f4d2SKip Macy static void netif_disconnect_backend(struct netfront_info *info); 17923dc5621SKip Macy static int setup_device(device_t dev, struct netfront_info *info); 180cf9c09e1SJustin T. Gibbs static void free_ring(int *ref, void *ring_ptr_ref); 18189e0f4d2SKip Macy 1820e509842SJustin T. Gibbs static int xn_ifmedia_upd(struct ifnet *ifp); 1830e509842SJustin T. Gibbs static void xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); 1840e509842SJustin T. Gibbs 18589e0f4d2SKip Macy /* Xenolinux helper functions */ 18623dc5621SKip Macy int network_connect(struct netfront_info *); 18789e0f4d2SKip Macy 18889e0f4d2SKip Macy static void xn_free_rx_ring(struct netfront_info *); 18989e0f4d2SKip Macy 19089e0f4d2SKip Macy static void xn_free_tx_ring(struct netfront_info *); 19189e0f4d2SKip Macy 19289e0f4d2SKip Macy static int xennet_get_responses(struct netfront_info *np, 193931eeffaSKenneth D. Merry struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons, 194931eeffaSKenneth D. Merry struct mbuf **list, int *pages_flipped_p); 19589e0f4d2SKip Macy 19689e0f4d2SKip Macy #define virt_to_mfn(x) (vtomach(x) >> PAGE_SHIFT) 19789e0f4d2SKip Macy 19889e0f4d2SKip Macy #define INVALID_P2M_ENTRY (~0UL) 19989e0f4d2SKip Macy 20089e0f4d2SKip Macy /* 20189e0f4d2SKip Macy * Mbuf pointers. We need these to keep track of the virtual addresses 20289e0f4d2SKip Macy * of our mbuf chains since we can only convert from virtual to physical, 20389e0f4d2SKip Macy * not the other way around. The size must track the free index arrays. 20489e0f4d2SKip Macy */ 20589e0f4d2SKip Macy struct xn_chain_data { 20689e0f4d2SKip Macy struct mbuf *xn_tx_chain[NET_TX_RING_SIZE+1]; 207a4ec37f5SAdrian Chadd int xn_tx_chain_cnt; 20889e0f4d2SKip Macy struct mbuf *xn_rx_chain[NET_RX_RING_SIZE+1]; 20989e0f4d2SKip Macy }; 21089e0f4d2SKip Macy 211931eeffaSKenneth D. Merry #define NUM_ELEMENTS(x) (sizeof(x)/sizeof(*x)) 21289e0f4d2SKip Macy 21389e0f4d2SKip Macy struct net_device_stats 21489e0f4d2SKip Macy { 21589e0f4d2SKip Macy u_long rx_packets; /* total packets received */ 21689e0f4d2SKip Macy u_long tx_packets; /* total packets transmitted */ 21789e0f4d2SKip Macy u_long rx_bytes; /* total bytes received */ 21889e0f4d2SKip Macy u_long tx_bytes; /* total bytes transmitted */ 21989e0f4d2SKip Macy u_long rx_errors; /* bad packets received */ 22089e0f4d2SKip Macy u_long tx_errors; /* packet transmit problems */ 22189e0f4d2SKip Macy u_long rx_dropped; /* no space in linux buffers */ 22289e0f4d2SKip Macy u_long tx_dropped; /* no space available in linux */ 22389e0f4d2SKip Macy u_long multicast; /* multicast packets received */ 22489e0f4d2SKip Macy u_long collisions; 22589e0f4d2SKip Macy 22689e0f4d2SKip Macy /* detailed rx_errors: */ 22789e0f4d2SKip Macy u_long rx_length_errors; 22889e0f4d2SKip Macy u_long rx_over_errors; /* receiver ring buff overflow */ 22989e0f4d2SKip Macy u_long rx_crc_errors; /* recved pkt with crc error */ 23089e0f4d2SKip Macy u_long rx_frame_errors; /* recv'd frame alignment error */ 23189e0f4d2SKip Macy u_long rx_fifo_errors; /* recv'r fifo overrun */ 23289e0f4d2SKip Macy u_long rx_missed_errors; /* receiver missed packet */ 23389e0f4d2SKip Macy 23489e0f4d2SKip Macy /* detailed tx_errors */ 23589e0f4d2SKip Macy u_long tx_aborted_errors; 23689e0f4d2SKip Macy u_long tx_carrier_errors; 23789e0f4d2SKip Macy u_long tx_fifo_errors; 23889e0f4d2SKip Macy u_long tx_heartbeat_errors; 23989e0f4d2SKip Macy u_long tx_window_errors; 24089e0f4d2SKip Macy 24189e0f4d2SKip Macy /* for cslip etc */ 24289e0f4d2SKip Macy u_long rx_compressed; 24389e0f4d2SKip Macy u_long tx_compressed; 24489e0f4d2SKip Macy }; 24589e0f4d2SKip Macy 24689e0f4d2SKip Macy struct netfront_info { 24789e0f4d2SKip Macy 24889e0f4d2SKip Macy struct ifnet *xn_ifp; 24912678024SDoug Rabson #if __FreeBSD_version >= 700000 25012678024SDoug Rabson struct lro_ctrl xn_lro; 25112678024SDoug Rabson #endif 25289e0f4d2SKip Macy 25389e0f4d2SKip Macy struct net_device_stats stats; 25489e0f4d2SKip Macy u_int tx_full; 25589e0f4d2SKip Macy 25689e0f4d2SKip Macy netif_tx_front_ring_t tx; 25789e0f4d2SKip Macy netif_rx_front_ring_t rx; 25889e0f4d2SKip Macy 25989e0f4d2SKip Macy struct mtx tx_lock; 26089e0f4d2SKip Macy struct mtx rx_lock; 261227ca257SKip Macy struct mtx sc_lock; 26289e0f4d2SKip Macy 26389e0f4d2SKip Macy u_int handle; 26489e0f4d2SKip Macy u_int irq; 26589e0f4d2SKip Macy u_int copying_receiver; 26689e0f4d2SKip Macy u_int carrier; 267*578e4bf7SJustin T. Gibbs u_int maxfrags; 26889e0f4d2SKip Macy 26989e0f4d2SKip Macy /* Receive-ring batched refills. */ 27089e0f4d2SKip Macy #define RX_MIN_TARGET 32 27189e0f4d2SKip Macy #define RX_MAX_TARGET NET_RX_RING_SIZE 2720e509842SJustin T. Gibbs int rx_min_target; 2730e509842SJustin T. Gibbs int rx_max_target; 2740e509842SJustin T. Gibbs int rx_target; 27589e0f4d2SKip Macy 27689e0f4d2SKip Macy grant_ref_t gref_tx_head; 27789e0f4d2SKip Macy grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1]; 27889e0f4d2SKip Macy grant_ref_t gref_rx_head; 27989e0f4d2SKip Macy grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1]; 28089e0f4d2SKip Macy 28123dc5621SKip Macy device_t xbdev; 28289e0f4d2SKip Macy int tx_ring_ref; 28389e0f4d2SKip Macy int rx_ring_ref; 28489e0f4d2SKip Macy uint8_t mac[ETHER_ADDR_LEN]; 28589e0f4d2SKip Macy struct xn_chain_data xn_cdata; /* mbufs */ 28689e0f4d2SKip Macy struct mbuf_head xn_rx_batch; /* head of the batch queue */ 28789e0f4d2SKip Macy 28889e0f4d2SKip Macy int xn_if_flags; 28989e0f4d2SKip Macy struct callout xn_stat_ch; 29089e0f4d2SKip Macy 29189e0f4d2SKip Macy u_long rx_pfn_array[NET_RX_RING_SIZE]; 29289e0f4d2SKip Macy multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1]; 29389e0f4d2SKip Macy mmu_update_t rx_mmu[NET_RX_RING_SIZE]; 2940e509842SJustin T. Gibbs struct ifmedia sc_media; 29589e0f4d2SKip Macy }; 29689e0f4d2SKip Macy 29789e0f4d2SKip Macy #define rx_mbufs xn_cdata.xn_rx_chain 29889e0f4d2SKip Macy #define tx_mbufs xn_cdata.xn_tx_chain 29989e0f4d2SKip Macy 30089e0f4d2SKip Macy #define XN_LOCK_INIT(_sc, _name) \ 30189e0f4d2SKip Macy mtx_init(&(_sc)->tx_lock, #_name"_tx", "network transmit lock", MTX_DEF); \ 30289e0f4d2SKip Macy mtx_init(&(_sc)->rx_lock, #_name"_rx", "network receive lock", MTX_DEF); \ 303227ca257SKip Macy mtx_init(&(_sc)->sc_lock, #_name"_sc", "netfront softc lock", MTX_DEF) 30489e0f4d2SKip Macy 30589e0f4d2SKip Macy #define XN_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_lock) 30689e0f4d2SKip Macy #define XN_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_lock) 30789e0f4d2SKip Macy 30889e0f4d2SKip Macy #define XN_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_lock) 30989e0f4d2SKip Macy #define XN_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_lock) 31089e0f4d2SKip Macy 311227ca257SKip Macy #define XN_LOCK(_sc) mtx_lock(&(_sc)->sc_lock); 312227ca257SKip Macy #define XN_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_lock); 31389e0f4d2SKip Macy 314227ca257SKip Macy #define XN_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_lock, MA_OWNED); 31589e0f4d2SKip Macy #define XN_RX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rx_lock, MA_OWNED); 31689e0f4d2SKip Macy #define XN_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_lock, MA_OWNED); 31789e0f4d2SKip Macy #define XN_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_lock); \ 31889e0f4d2SKip Macy mtx_destroy(&(_sc)->tx_lock); \ 319227ca257SKip Macy mtx_destroy(&(_sc)->sc_lock); 32089e0f4d2SKip Macy 32189e0f4d2SKip Macy struct netfront_rx_info { 32289e0f4d2SKip Macy struct netif_rx_response rx; 32389e0f4d2SKip Macy struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; 32489e0f4d2SKip Macy }; 32589e0f4d2SKip Macy 32689e0f4d2SKip Macy #define netfront_carrier_on(netif) ((netif)->carrier = 1) 32789e0f4d2SKip Macy #define netfront_carrier_off(netif) ((netif)->carrier = 0) 32889e0f4d2SKip Macy #define netfront_carrier_ok(netif) ((netif)->carrier) 32989e0f4d2SKip Macy 33089e0f4d2SKip Macy /* Access macros for acquiring freeing slots in xn_free_{tx,rx}_idxs[]. */ 33189e0f4d2SKip Macy 33289e0f4d2SKip Macy 33389e0f4d2SKip Macy 33489e0f4d2SKip Macy /* 33589e0f4d2SKip Macy * Access macros for acquiring freeing slots in tx_skbs[]. 33689e0f4d2SKip Macy */ 33789e0f4d2SKip Macy 33889e0f4d2SKip Macy static inline void 339931eeffaSKenneth D. Merry add_id_to_freelist(struct mbuf **list, uintptr_t id) 34089e0f4d2SKip Macy { 341931eeffaSKenneth D. Merry KASSERT(id != 0, 342931eeffaSKenneth D. Merry ("%s: the head item (0) must always be free.", __func__)); 34389e0f4d2SKip Macy list[id] = list[0]; 344931eeffaSKenneth D. Merry list[0] = (struct mbuf *)id; 34589e0f4d2SKip Macy } 34689e0f4d2SKip Macy 34789e0f4d2SKip Macy static inline unsigned short 34889e0f4d2SKip Macy get_id_from_freelist(struct mbuf **list) 34989e0f4d2SKip Macy { 350931eeffaSKenneth D. Merry uintptr_t id; 351931eeffaSKenneth D. Merry 352931eeffaSKenneth D. Merry id = (uintptr_t)list[0]; 353931eeffaSKenneth D. Merry KASSERT(id != 0, 354931eeffaSKenneth D. Merry ("%s: the head item (0) must always remain free.", __func__)); 35589e0f4d2SKip Macy list[0] = list[id]; 35689e0f4d2SKip Macy return (id); 35789e0f4d2SKip Macy } 35889e0f4d2SKip Macy 35989e0f4d2SKip Macy static inline int 36089e0f4d2SKip Macy xennet_rxidx(RING_IDX idx) 36189e0f4d2SKip Macy { 36289e0f4d2SKip Macy return idx & (NET_RX_RING_SIZE - 1); 36389e0f4d2SKip Macy } 36489e0f4d2SKip Macy 36589e0f4d2SKip Macy static inline struct mbuf * 366931eeffaSKenneth D. Merry xennet_get_rx_mbuf(struct netfront_info *np, RING_IDX ri) 36789e0f4d2SKip Macy { 36889e0f4d2SKip Macy int i = xennet_rxidx(ri); 36989e0f4d2SKip Macy struct mbuf *m; 37089e0f4d2SKip Macy 37189e0f4d2SKip Macy m = np->rx_mbufs[i]; 37289e0f4d2SKip Macy np->rx_mbufs[i] = NULL; 37389e0f4d2SKip Macy return (m); 37489e0f4d2SKip Macy } 37589e0f4d2SKip Macy 37689e0f4d2SKip Macy static inline grant_ref_t 37789e0f4d2SKip Macy xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri) 37889e0f4d2SKip Macy { 37989e0f4d2SKip Macy int i = xennet_rxidx(ri); 38089e0f4d2SKip Macy grant_ref_t ref = np->grant_rx_ref[i]; 381ff662b5cSJustin T. Gibbs KASSERT(ref != GRANT_REF_INVALID, ("Invalid grant reference!\n")); 382ff662b5cSJustin T. Gibbs np->grant_rx_ref[i] = GRANT_REF_INVALID; 38389e0f4d2SKip Macy return ref; 38489e0f4d2SKip Macy } 38589e0f4d2SKip Macy 38689e0f4d2SKip Macy #define IPRINTK(fmt, args...) \ 38789e0f4d2SKip Macy printf("[XEN] " fmt, ##args) 388227ca257SKip Macy #ifdef INVARIANTS 38989e0f4d2SKip Macy #define WPRINTK(fmt, args...) \ 39089e0f4d2SKip Macy printf("[XEN] " fmt, ##args) 391227ca257SKip Macy #else 392227ca257SKip Macy #define WPRINTK(fmt, args...) 393227ca257SKip Macy #endif 394227ca257SKip Macy #ifdef DEBUG 39589e0f4d2SKip Macy #define DPRINTK(fmt, args...) \ 39623dc5621SKip Macy printf("[XEN] %s: " fmt, __func__, ##args) 39712678024SDoug Rabson #else 39812678024SDoug Rabson #define DPRINTK(fmt, args...) 39912678024SDoug Rabson #endif 40089e0f4d2SKip Macy 40189e0f4d2SKip Macy /** 40289e0f4d2SKip Macy * Read the 'mac' node at the given device's node in the store, and parse that 40389e0f4d2SKip Macy * as colon-separated octets, placing result the given mac array. mac must be 40489e0f4d2SKip Macy * a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h). 40589e0f4d2SKip Macy * Return 0 on success, or errno on error. 40689e0f4d2SKip Macy */ 40789e0f4d2SKip Macy static int 40823dc5621SKip Macy xen_net_read_mac(device_t dev, uint8_t mac[]) 40989e0f4d2SKip Macy { 4103a6d1fcfSKip Macy int error, i; 4113a6d1fcfSKip Macy char *s, *e, *macstr; 412ffa06904SJustin T. Gibbs const char *path; 4133a6d1fcfSKip Macy 414ffa06904SJustin T. Gibbs path = xenbus_get_node(dev); 415ffa06904SJustin T. Gibbs error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr); 416ffa06904SJustin T. Gibbs if (error == ENOENT) { 417ffa06904SJustin T. Gibbs /* 418ffa06904SJustin T. Gibbs * Deal with missing mac XenStore nodes on devices with 419ffa06904SJustin T. Gibbs * HVM emulation (the 'ioemu' configuration attribute) 420ffa06904SJustin T. Gibbs * enabled. 421ffa06904SJustin T. Gibbs * 422ffa06904SJustin T. Gibbs * The HVM emulator may execute in a stub device model 423ffa06904SJustin T. Gibbs * domain which lacks the permission, only given to Dom0, 424ffa06904SJustin T. Gibbs * to update the guest's XenStore tree. For this reason, 425ffa06904SJustin T. Gibbs * the HVM emulator doesn't even attempt to write the 426ffa06904SJustin T. Gibbs * front-side mac node, even when operating in Dom0. 427ffa06904SJustin T. Gibbs * However, there should always be a mac listed in the 428ffa06904SJustin T. Gibbs * backend tree. Fallback to this version if our query 429ffa06904SJustin T. Gibbs * of the front side XenStore location doesn't find 430ffa06904SJustin T. Gibbs * anything. 431ffa06904SJustin T. Gibbs */ 432ffa06904SJustin T. Gibbs path = xenbus_get_otherend_path(dev); 433ffa06904SJustin T. Gibbs error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr); 434ffa06904SJustin T. Gibbs } 435ffa06904SJustin T. Gibbs if (error != 0) { 436ffa06904SJustin T. Gibbs xenbus_dev_fatal(dev, error, "parsing %s/mac", path); 4373a6d1fcfSKip Macy return (error); 438ffa06904SJustin T. Gibbs } 4393a6d1fcfSKip Macy 44089e0f4d2SKip Macy s = macstr; 44189e0f4d2SKip Macy for (i = 0; i < ETHER_ADDR_LEN; i++) { 44289e0f4d2SKip Macy mac[i] = strtoul(s, &e, 16); 44389e0f4d2SKip Macy if (s == e || (e[0] != ':' && e[0] != 0)) { 444ff662b5cSJustin T. Gibbs free(macstr, M_XENBUS); 4453a6d1fcfSKip Macy return (ENOENT); 44689e0f4d2SKip Macy } 44789e0f4d2SKip Macy s = &e[1]; 44889e0f4d2SKip Macy } 449ff662b5cSJustin T. Gibbs free(macstr, M_XENBUS); 4503a6d1fcfSKip Macy return (0); 45189e0f4d2SKip Macy } 45289e0f4d2SKip Macy 45389e0f4d2SKip Macy /** 45489e0f4d2SKip Macy * Entry point to this code when a new device is created. Allocate the basic 45589e0f4d2SKip Macy * structures and the ring buffers for communication with the backend, and 45689e0f4d2SKip Macy * inform the backend of the appropriate details for those. Switch to 45789e0f4d2SKip Macy * Connected state. 45889e0f4d2SKip Macy */ 45989e0f4d2SKip Macy static int 46023dc5621SKip Macy netfront_probe(device_t dev) 46123dc5621SKip Macy { 46223dc5621SKip Macy 46323dc5621SKip Macy if (!strcmp(xenbus_get_type(dev), "vif")) { 46423dc5621SKip Macy device_set_desc(dev, "Virtual Network Interface"); 46523dc5621SKip Macy return (0); 46623dc5621SKip Macy } 46723dc5621SKip Macy 46823dc5621SKip Macy return (ENXIO); 46923dc5621SKip Macy } 47023dc5621SKip Macy 47123dc5621SKip Macy static int 47223dc5621SKip Macy netfront_attach(device_t dev) 47389e0f4d2SKip Macy { 47489e0f4d2SKip Macy int err; 47589e0f4d2SKip Macy 47623dc5621SKip Macy err = create_netdev(dev); 47789e0f4d2SKip Macy if (err) { 47889e0f4d2SKip Macy xenbus_dev_fatal(dev, err, "creating netdev"); 479ffa06904SJustin T. Gibbs return (err); 48089e0f4d2SKip Macy } 48189e0f4d2SKip Macy 48212678024SDoug Rabson #if __FreeBSD_version >= 700000 48312678024SDoug Rabson SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 48412678024SDoug Rabson SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 48512678024SDoug Rabson OID_AUTO, "enable_lro", CTLTYPE_INT|CTLFLAG_RW, 48612678024SDoug Rabson &xn_enable_lro, 0, "Large Receive Offload"); 48712678024SDoug Rabson #endif 48812678024SDoug Rabson 489ffa06904SJustin T. Gibbs return (0); 49089e0f4d2SKip Macy } 49189e0f4d2SKip Macy 492cf9c09e1SJustin T. Gibbs static int 493cf9c09e1SJustin T. Gibbs netfront_suspend(device_t dev) 494cf9c09e1SJustin T. Gibbs { 495cf9c09e1SJustin T. Gibbs struct netfront_info *info = device_get_softc(dev); 496cf9c09e1SJustin T. Gibbs 497cf9c09e1SJustin T. Gibbs XN_RX_LOCK(info); 498cf9c09e1SJustin T. Gibbs XN_TX_LOCK(info); 499cf9c09e1SJustin T. Gibbs netfront_carrier_off(info); 500cf9c09e1SJustin T. Gibbs XN_TX_UNLOCK(info); 501cf9c09e1SJustin T. Gibbs XN_RX_UNLOCK(info); 502cf9c09e1SJustin T. Gibbs return (0); 503cf9c09e1SJustin T. Gibbs } 50489e0f4d2SKip Macy 50589e0f4d2SKip Macy /** 50689e0f4d2SKip Macy * We are reconnecting to the backend, due to a suspend/resume, or a backend 50789e0f4d2SKip Macy * driver restart. We tear down our netif structure and recreate it, but 50889e0f4d2SKip Macy * leave the device-layer structures intact so that this is transparent to the 50989e0f4d2SKip Macy * rest of the kernel. 51089e0f4d2SKip Macy */ 51189e0f4d2SKip Macy static int 51223dc5621SKip Macy netfront_resume(device_t dev) 51389e0f4d2SKip Macy { 51423dc5621SKip Macy struct netfront_info *info = device_get_softc(dev); 51589e0f4d2SKip Macy 51689e0f4d2SKip Macy netif_disconnect_backend(info); 51789e0f4d2SKip Macy return (0); 51889e0f4d2SKip Macy } 51989e0f4d2SKip Macy 52089e0f4d2SKip Macy 52189e0f4d2SKip Macy /* Common code used when first setting up, and when resuming. */ 52289e0f4d2SKip Macy static int 52323dc5621SKip Macy talk_to_backend(device_t dev, struct netfront_info *info) 52489e0f4d2SKip Macy { 52589e0f4d2SKip Macy const char *message; 526ff662b5cSJustin T. Gibbs struct xs_transaction xst; 52723dc5621SKip Macy const char *node = xenbus_get_node(dev); 52889e0f4d2SKip Macy int err; 52989e0f4d2SKip Macy 53089e0f4d2SKip Macy err = xen_net_read_mac(dev, info->mac); 53189e0f4d2SKip Macy if (err) { 53223dc5621SKip Macy xenbus_dev_fatal(dev, err, "parsing %s/mac", node); 53389e0f4d2SKip Macy goto out; 53489e0f4d2SKip Macy } 53589e0f4d2SKip Macy 53689e0f4d2SKip Macy /* Create shared ring, alloc event channel. */ 53789e0f4d2SKip Macy err = setup_device(dev, info); 53889e0f4d2SKip Macy if (err) 53989e0f4d2SKip Macy goto out; 54089e0f4d2SKip Macy 54189e0f4d2SKip Macy again: 542ff662b5cSJustin T. Gibbs err = xs_transaction_start(&xst); 54389e0f4d2SKip Macy if (err) { 54489e0f4d2SKip Macy xenbus_dev_fatal(dev, err, "starting transaction"); 54589e0f4d2SKip Macy goto destroy_ring; 54689e0f4d2SKip Macy } 547ff662b5cSJustin T. Gibbs err = xs_printf(xst, node, "tx-ring-ref","%u", 54889e0f4d2SKip Macy info->tx_ring_ref); 54989e0f4d2SKip Macy if (err) { 55089e0f4d2SKip Macy message = "writing tx ring-ref"; 55189e0f4d2SKip Macy goto abort_transaction; 55289e0f4d2SKip Macy } 553ff662b5cSJustin T. Gibbs err = xs_printf(xst, node, "rx-ring-ref","%u", 55489e0f4d2SKip Macy info->rx_ring_ref); 55589e0f4d2SKip Macy if (err) { 55689e0f4d2SKip Macy message = "writing rx ring-ref"; 55789e0f4d2SKip Macy goto abort_transaction; 55889e0f4d2SKip Macy } 559ff662b5cSJustin T. Gibbs err = xs_printf(xst, node, 56089e0f4d2SKip Macy "event-channel", "%u", irq_to_evtchn_port(info->irq)); 56189e0f4d2SKip Macy if (err) { 56289e0f4d2SKip Macy message = "writing event-channel"; 56389e0f4d2SKip Macy goto abort_transaction; 56489e0f4d2SKip Macy } 565ff662b5cSJustin T. Gibbs err = xs_printf(xst, node, "request-rx-copy", "%u", 56689e0f4d2SKip Macy info->copying_receiver); 56789e0f4d2SKip Macy if (err) { 56889e0f4d2SKip Macy message = "writing request-rx-copy"; 56989e0f4d2SKip Macy goto abort_transaction; 57089e0f4d2SKip Macy } 571ff662b5cSJustin T. Gibbs err = xs_printf(xst, node, "feature-rx-notify", "%d", 1); 57289e0f4d2SKip Macy if (err) { 57389e0f4d2SKip Macy message = "writing feature-rx-notify"; 57489e0f4d2SKip Macy goto abort_transaction; 57589e0f4d2SKip Macy } 576ff662b5cSJustin T. Gibbs err = xs_printf(xst, node, "feature-sg", "%d", 1); 57789e0f4d2SKip Macy if (err) { 57889e0f4d2SKip Macy message = "writing feature-sg"; 57989e0f4d2SKip Macy goto abort_transaction; 58089e0f4d2SKip Macy } 58112678024SDoug Rabson #if __FreeBSD_version >= 700000 582ff662b5cSJustin T. Gibbs err = xs_printf(xst, node, "feature-gso-tcpv4", "%d", 1); 58389e0f4d2SKip Macy if (err) { 58489e0f4d2SKip Macy message = "writing feature-gso-tcpv4"; 58589e0f4d2SKip Macy goto abort_transaction; 58689e0f4d2SKip Macy } 58789e0f4d2SKip Macy #endif 58889e0f4d2SKip Macy 589ff662b5cSJustin T. Gibbs err = xs_transaction_end(xst, 0); 59089e0f4d2SKip Macy if (err) { 59189e0f4d2SKip Macy if (err == EAGAIN) 59289e0f4d2SKip Macy goto again; 59389e0f4d2SKip Macy xenbus_dev_fatal(dev, err, "completing transaction"); 59489e0f4d2SKip Macy goto destroy_ring; 59589e0f4d2SKip Macy } 59689e0f4d2SKip Macy 59789e0f4d2SKip Macy return 0; 59889e0f4d2SKip Macy 59989e0f4d2SKip Macy abort_transaction: 600ff662b5cSJustin T. Gibbs xs_transaction_end(xst, 1); 60189e0f4d2SKip Macy xenbus_dev_fatal(dev, err, "%s", message); 60289e0f4d2SKip Macy destroy_ring: 60389e0f4d2SKip Macy netif_free(info); 60489e0f4d2SKip Macy out: 60589e0f4d2SKip Macy return err; 60689e0f4d2SKip Macy } 60789e0f4d2SKip Macy 60889e0f4d2SKip Macy 60989e0f4d2SKip Macy static int 61023dc5621SKip Macy setup_device(device_t dev, struct netfront_info *info) 61189e0f4d2SKip Macy { 61289e0f4d2SKip Macy netif_tx_sring_t *txs; 61389e0f4d2SKip Macy netif_rx_sring_t *rxs; 6143a6d1fcfSKip Macy int error; 61589e0f4d2SKip Macy struct ifnet *ifp; 61689e0f4d2SKip Macy 61789e0f4d2SKip Macy ifp = info->xn_ifp; 61889e0f4d2SKip Macy 619ff662b5cSJustin T. Gibbs info->tx_ring_ref = GRANT_REF_INVALID; 620ff662b5cSJustin T. Gibbs info->rx_ring_ref = GRANT_REF_INVALID; 62189e0f4d2SKip Macy info->rx.sring = NULL; 62289e0f4d2SKip Macy info->tx.sring = NULL; 62389e0f4d2SKip Macy info->irq = 0; 62489e0f4d2SKip Macy 62589e0f4d2SKip Macy txs = (netif_tx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO); 62689e0f4d2SKip Macy if (!txs) { 6273a6d1fcfSKip Macy error = ENOMEM; 6283a6d1fcfSKip Macy xenbus_dev_fatal(dev, error, "allocating tx ring page"); 62989e0f4d2SKip Macy goto fail; 63089e0f4d2SKip Macy } 63189e0f4d2SKip Macy SHARED_RING_INIT(txs); 63289e0f4d2SKip Macy FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); 6333a6d1fcfSKip Macy error = xenbus_grant_ring(dev, virt_to_mfn(txs), &info->tx_ring_ref); 6343a6d1fcfSKip Macy if (error) 63589e0f4d2SKip Macy goto fail; 63689e0f4d2SKip Macy 63789e0f4d2SKip Macy rxs = (netif_rx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO); 63889e0f4d2SKip Macy if (!rxs) { 6393a6d1fcfSKip Macy error = ENOMEM; 6403a6d1fcfSKip Macy xenbus_dev_fatal(dev, error, "allocating rx ring page"); 64189e0f4d2SKip Macy goto fail; 64289e0f4d2SKip Macy } 64389e0f4d2SKip Macy SHARED_RING_INIT(rxs); 64489e0f4d2SKip Macy FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); 64589e0f4d2SKip Macy 6463a6d1fcfSKip Macy error = xenbus_grant_ring(dev, virt_to_mfn(rxs), &info->rx_ring_ref); 6473a6d1fcfSKip Macy if (error) 64889e0f4d2SKip Macy goto fail; 64989e0f4d2SKip Macy 6503a6d1fcfSKip Macy error = bind_listening_port_to_irqhandler(xenbus_get_otherend_id(dev), 6513a6d1fcfSKip Macy "xn", xn_intr, info, INTR_TYPE_NET | INTR_MPSAFE, &info->irq); 65289e0f4d2SKip Macy 6533a6d1fcfSKip Macy if (error) { 6543a6d1fcfSKip Macy xenbus_dev_fatal(dev, error, 65589e0f4d2SKip Macy "bind_evtchn_to_irqhandler failed"); 65689e0f4d2SKip Macy goto fail; 65789e0f4d2SKip Macy } 65889e0f4d2SKip Macy 65989e0f4d2SKip Macy show_device(info); 66089e0f4d2SKip Macy 6613a6d1fcfSKip Macy return (0); 66289e0f4d2SKip Macy 66389e0f4d2SKip Macy fail: 66489e0f4d2SKip Macy netif_free(info); 6653a6d1fcfSKip Macy return (error); 66689e0f4d2SKip Macy } 66789e0f4d2SKip Macy 668a0ae8f04SBjoern A. Zeeb #ifdef INET 66989e0f4d2SKip Macy /** 67012678024SDoug Rabson * If this interface has an ipv4 address, send an arp for it. This 67112678024SDoug Rabson * helps to get the network going again after migrating hosts. 67212678024SDoug Rabson */ 67312678024SDoug Rabson static void 67412678024SDoug Rabson netfront_send_fake_arp(device_t dev, struct netfront_info *info) 67512678024SDoug Rabson { 67612678024SDoug Rabson struct ifnet *ifp; 67712678024SDoug Rabson struct ifaddr *ifa; 67812678024SDoug Rabson 67912678024SDoug Rabson ifp = info->xn_ifp; 68012678024SDoug Rabson TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 68112678024SDoug Rabson if (ifa->ifa_addr->sa_family == AF_INET) { 68212678024SDoug Rabson arp_ifinit(ifp, ifa); 68312678024SDoug Rabson } 68412678024SDoug Rabson } 68512678024SDoug Rabson } 686a0ae8f04SBjoern A. Zeeb #endif 68712678024SDoug Rabson 68812678024SDoug Rabson /** 68989e0f4d2SKip Macy * Callback received when the backend's state changes. 69089e0f4d2SKip Macy */ 691283d6f72SJustin T. Gibbs static void 69223dc5621SKip Macy netfront_backend_changed(device_t dev, XenbusState newstate) 69389e0f4d2SKip Macy { 69423dc5621SKip Macy struct netfront_info *sc = device_get_softc(dev); 69589e0f4d2SKip Macy 69623dc5621SKip Macy DPRINTK("newstate=%d\n", newstate); 69789e0f4d2SKip Macy 69823dc5621SKip Macy switch (newstate) { 69989e0f4d2SKip Macy case XenbusStateInitialising: 70089e0f4d2SKip Macy case XenbusStateInitialised: 70189e0f4d2SKip Macy case XenbusStateConnected: 70289e0f4d2SKip Macy case XenbusStateUnknown: 70389e0f4d2SKip Macy case XenbusStateClosed: 704920ba15bSKip Macy case XenbusStateReconfigured: 705920ba15bSKip Macy case XenbusStateReconfiguring: 70689e0f4d2SKip Macy break; 70789e0f4d2SKip Macy case XenbusStateInitWait: 70823dc5621SKip Macy if (xenbus_get_state(dev) != XenbusStateInitialising) 70989e0f4d2SKip Macy break; 71023dc5621SKip Macy if (network_connect(sc) != 0) 71189e0f4d2SKip Macy break; 71223dc5621SKip Macy xenbus_set_state(dev, XenbusStateConnected); 713a0ae8f04SBjoern A. Zeeb #ifdef INET 71412678024SDoug Rabson netfront_send_fake_arp(dev, sc); 715a0ae8f04SBjoern A. Zeeb #endif 71623dc5621SKip Macy break; 71789e0f4d2SKip Macy case XenbusStateClosing: 71823dc5621SKip Macy xenbus_set_state(dev, XenbusStateClosed); 71989e0f4d2SKip Macy break; 72089e0f4d2SKip Macy } 72189e0f4d2SKip Macy } 72289e0f4d2SKip Macy 72389e0f4d2SKip Macy static void 72489e0f4d2SKip Macy xn_free_rx_ring(struct netfront_info *sc) 72589e0f4d2SKip Macy { 72689e0f4d2SKip Macy #if 0 72789e0f4d2SKip Macy int i; 72889e0f4d2SKip Macy 72989e0f4d2SKip Macy for (i = 0; i < NET_RX_RING_SIZE; i++) { 730931eeffaSKenneth D. Merry if (sc->xn_cdata.rx_mbufs[i] != NULL) { 731931eeffaSKenneth D. Merry m_freem(sc->rx_mbufs[i]); 732931eeffaSKenneth D. Merry sc->rx_mbufs[i] = NULL; 73389e0f4d2SKip Macy } 73489e0f4d2SKip Macy } 73589e0f4d2SKip Macy 73689e0f4d2SKip Macy sc->rx.rsp_cons = 0; 73789e0f4d2SKip Macy sc->xn_rx_if->req_prod = 0; 73889e0f4d2SKip Macy sc->xn_rx_if->event = sc->rx.rsp_cons ; 73989e0f4d2SKip Macy #endif 74089e0f4d2SKip Macy } 74189e0f4d2SKip Macy 74289e0f4d2SKip Macy static void 74389e0f4d2SKip Macy xn_free_tx_ring(struct netfront_info *sc) 74489e0f4d2SKip Macy { 74589e0f4d2SKip Macy #if 0 74689e0f4d2SKip Macy int i; 74789e0f4d2SKip Macy 74889e0f4d2SKip Macy for (i = 0; i < NET_TX_RING_SIZE; i++) { 749931eeffaSKenneth D. Merry if (sc->tx_mbufs[i] != NULL) { 750931eeffaSKenneth D. Merry m_freem(sc->tx_mbufs[i]); 75189e0f4d2SKip Macy sc->xn_cdata.xn_tx_chain[i] = NULL; 75289e0f4d2SKip Macy } 75389e0f4d2SKip Macy } 75489e0f4d2SKip Macy 75589e0f4d2SKip Macy return; 75689e0f4d2SKip Macy #endif 75789e0f4d2SKip Macy } 75889e0f4d2SKip Macy 759931eeffaSKenneth D. Merry /** 760931eeffaSKenneth D. Merry * \brief Verify that there is sufficient space in the Tx ring 761931eeffaSKenneth D. Merry * buffer for a maximally sized request to be enqueued. 762c099cafaSAdrian Chadd * 763931eeffaSKenneth D. Merry * A transmit request requires a transmit descriptor for each packet 764931eeffaSKenneth D. Merry * fragment, plus up to 2 entries for "options" (e.g. TSO). 765c099cafaSAdrian Chadd */ 76689e0f4d2SKip Macy static inline int 767931eeffaSKenneth D. Merry xn_tx_slot_available(struct netfront_info *np) 76889e0f4d2SKip Macy { 769931eeffaSKenneth D. Merry return (RING_FREE_REQUESTS(&np->tx) > (MAX_TX_REQ_FRAGS + 2)); 77089e0f4d2SKip Macy } 771931eeffaSKenneth D. Merry 77289e0f4d2SKip Macy static void 77389e0f4d2SKip Macy netif_release_tx_bufs(struct netfront_info *np) 77489e0f4d2SKip Macy { 77589e0f4d2SKip Macy int i; 77689e0f4d2SKip Macy 77789e0f4d2SKip Macy for (i = 1; i <= NET_TX_RING_SIZE; i++) { 778931eeffaSKenneth D. Merry struct mbuf *m; 77989e0f4d2SKip Macy 780931eeffaSKenneth D. Merry m = np->tx_mbufs[i]; 781931eeffaSKenneth D. Merry 782931eeffaSKenneth D. Merry /* 783931eeffaSKenneth D. Merry * We assume that no kernel addresses are 784931eeffaSKenneth D. Merry * less than NET_TX_RING_SIZE. Any entry 785931eeffaSKenneth D. Merry * in the table that is below this number 786931eeffaSKenneth D. Merry * must be an index from free-list tracking. 787931eeffaSKenneth D. Merry */ 788931eeffaSKenneth D. Merry if (((uintptr_t)m) <= NET_TX_RING_SIZE) 78989e0f4d2SKip Macy continue; 790cf9c09e1SJustin T. Gibbs gnttab_end_foreign_access_ref(np->grant_tx_ref[i]); 79189e0f4d2SKip Macy gnttab_release_grant_reference(&np->gref_tx_head, 79289e0f4d2SKip Macy np->grant_tx_ref[i]); 793ff662b5cSJustin T. Gibbs np->grant_tx_ref[i] = GRANT_REF_INVALID; 79489e0f4d2SKip Macy add_id_to_freelist(np->tx_mbufs, i); 795a4ec37f5SAdrian Chadd np->xn_cdata.xn_tx_chain_cnt--; 796a4ec37f5SAdrian Chadd if (np->xn_cdata.xn_tx_chain_cnt < 0) { 797a4ec37f5SAdrian Chadd panic("netif_release_tx_bufs: tx_chain_cnt must be >= 0"); 798a4ec37f5SAdrian Chadd } 799cf9c09e1SJustin T. Gibbs m_free(m); 80089e0f4d2SKip Macy } 80189e0f4d2SKip Macy } 80289e0f4d2SKip Macy 80389e0f4d2SKip Macy static void 80489e0f4d2SKip Macy network_alloc_rx_buffers(struct netfront_info *sc) 80589e0f4d2SKip Macy { 80623dc5621SKip Macy int otherend_id = xenbus_get_otherend_id(sc->xbdev); 80789e0f4d2SKip Macy unsigned short id; 80889e0f4d2SKip Macy struct mbuf *m_new; 80989e0f4d2SKip Macy int i, batch_target, notify; 81089e0f4d2SKip Macy RING_IDX req_prod; 81189e0f4d2SKip Macy struct xen_memory_reservation reservation; 81289e0f4d2SKip Macy grant_ref_t ref; 81389e0f4d2SKip Macy int nr_flips; 81489e0f4d2SKip Macy netif_rx_request_t *req; 81589e0f4d2SKip Macy vm_offset_t vaddr; 81689e0f4d2SKip Macy u_long pfn; 81789e0f4d2SKip Macy 81889e0f4d2SKip Macy req_prod = sc->rx.req_prod_pvt; 81989e0f4d2SKip Macy 82089e0f4d2SKip Macy if (unlikely(sc->carrier == 0)) 82189e0f4d2SKip Macy return; 82289e0f4d2SKip Macy 82389e0f4d2SKip Macy /* 824931eeffaSKenneth D. Merry * Allocate mbufs greedily, even though we batch updates to the 82589e0f4d2SKip Macy * receive ring. This creates a less bursty demand on the memory 826931eeffaSKenneth D. Merry * allocator, and so should reduce the chance of failed allocation 82789e0f4d2SKip Macy * requests both for ourself and for other kernel subsystems. 828931eeffaSKenneth D. Merry * 829931eeffaSKenneth D. Merry * Here we attempt to maintain rx_target buffers in flight, counting 830931eeffaSKenneth D. Merry * buffers that we have yet to process in the receive ring. 83189e0f4d2SKip Macy */ 83289e0f4d2SKip Macy batch_target = sc->rx_target - (req_prod - sc->rx.rsp_cons); 83389e0f4d2SKip Macy for (i = mbufq_len(&sc->xn_rx_batch); i < batch_target; i++) { 83489e0f4d2SKip Macy MGETHDR(m_new, M_DONTWAIT, MT_DATA); 835931eeffaSKenneth D. Merry if (m_new == NULL) { 836931eeffaSKenneth D. Merry printf("%s: MGETHDR failed\n", __func__); 83789e0f4d2SKip Macy goto no_mbuf; 838931eeffaSKenneth D. Merry } 83989e0f4d2SKip Macy 84089e0f4d2SKip Macy m_cljget(m_new, M_DONTWAIT, MJUMPAGESIZE); 84189e0f4d2SKip Macy if ((m_new->m_flags & M_EXT) == 0) { 842931eeffaSKenneth D. Merry printf("%s: m_cljget failed\n", __func__); 84389e0f4d2SKip Macy m_freem(m_new); 84489e0f4d2SKip Macy 84589e0f4d2SKip Macy no_mbuf: 84689e0f4d2SKip Macy if (i != 0) 84789e0f4d2SKip Macy goto refill; 84889e0f4d2SKip Macy /* 84989e0f4d2SKip Macy * XXX set timer 85089e0f4d2SKip Macy */ 85189e0f4d2SKip Macy break; 85289e0f4d2SKip Macy } 85389e0f4d2SKip Macy m_new->m_len = m_new->m_pkthdr.len = MJUMPAGESIZE; 85489e0f4d2SKip Macy 85589e0f4d2SKip Macy /* queue the mbufs allocated */ 85689e0f4d2SKip Macy mbufq_tail(&sc->xn_rx_batch, m_new); 85789e0f4d2SKip Macy } 85889e0f4d2SKip Macy 859931eeffaSKenneth D. Merry /* 860931eeffaSKenneth D. Merry * If we've allocated at least half of our target number of entries, 861931eeffaSKenneth D. Merry * submit them to the backend - we have enough to make the overhead 862931eeffaSKenneth D. Merry * of submission worthwhile. Otherwise wait for more mbufs and 863931eeffaSKenneth D. Merry * request entries to become available. 864931eeffaSKenneth D. Merry */ 86589e0f4d2SKip Macy if (i < (sc->rx_target/2)) { 86689e0f4d2SKip Macy if (req_prod >sc->rx.sring->req_prod) 86789e0f4d2SKip Macy goto push; 86889e0f4d2SKip Macy return; 86989e0f4d2SKip Macy } 870931eeffaSKenneth D. Merry 871931eeffaSKenneth D. Merry /* 872931eeffaSKenneth D. Merry * Double floating fill target if we risked having the backend 873931eeffaSKenneth D. Merry * run out of empty buffers for receive traffic. We define "running 874931eeffaSKenneth D. Merry * low" as having less than a fourth of our target buffers free 875931eeffaSKenneth D. Merry * at the time we refilled the queue. 876931eeffaSKenneth D. Merry */ 877931eeffaSKenneth D. Merry if ((req_prod - sc->rx.sring->rsp_prod) < (sc->rx_target / 4)) { 878931eeffaSKenneth D. Merry sc->rx_target *= 2; 879931eeffaSKenneth D. Merry if (sc->rx_target > sc->rx_max_target) 88089e0f4d2SKip Macy sc->rx_target = sc->rx_max_target; 881931eeffaSKenneth D. Merry } 88289e0f4d2SKip Macy 88389e0f4d2SKip Macy refill: 88489e0f4d2SKip Macy for (nr_flips = i = 0; ; i++) { 88589e0f4d2SKip Macy if ((m_new = mbufq_dequeue(&sc->xn_rx_batch)) == NULL) 88689e0f4d2SKip Macy break; 88789e0f4d2SKip Macy 88889e0f4d2SKip Macy m_new->m_ext.ext_arg1 = (vm_paddr_t *)(uintptr_t)( 88989e0f4d2SKip Macy vtophys(m_new->m_ext.ext_buf) >> PAGE_SHIFT); 89089e0f4d2SKip Macy 89189e0f4d2SKip Macy id = xennet_rxidx(req_prod + i); 89289e0f4d2SKip Macy 893931eeffaSKenneth D. Merry KASSERT(sc->rx_mbufs[id] == NULL, ("non-NULL xm_rx_chain")); 894931eeffaSKenneth D. Merry sc->rx_mbufs[id] = m_new; 89589e0f4d2SKip Macy 89689e0f4d2SKip Macy ref = gnttab_claim_grant_reference(&sc->gref_rx_head); 897ff662b5cSJustin T. Gibbs KASSERT(ref != GNTTAB_LIST_END, 898ff662b5cSJustin T. Gibbs ("reserved grant references exhuasted")); 89989e0f4d2SKip Macy sc->grant_rx_ref[id] = ref; 90089e0f4d2SKip Macy 90189e0f4d2SKip Macy vaddr = mtod(m_new, vm_offset_t); 90289e0f4d2SKip Macy pfn = vtophys(vaddr) >> PAGE_SHIFT; 90389e0f4d2SKip Macy req = RING_GET_REQUEST(&sc->rx, req_prod + i); 90489e0f4d2SKip Macy 90589e0f4d2SKip Macy if (sc->copying_receiver == 0) { 90689e0f4d2SKip Macy gnttab_grant_foreign_transfer_ref(ref, 90723dc5621SKip Macy otherend_id, pfn); 90889e0f4d2SKip Macy sc->rx_pfn_array[nr_flips] = PFNTOMFN(pfn); 90989e0f4d2SKip Macy if (!xen_feature(XENFEAT_auto_translated_physmap)) { 91089e0f4d2SKip Macy /* Remove this page before passing 91189e0f4d2SKip Macy * back to Xen. 91289e0f4d2SKip Macy */ 91389e0f4d2SKip Macy set_phys_to_machine(pfn, INVALID_P2M_ENTRY); 91489e0f4d2SKip Macy MULTI_update_va_mapping(&sc->rx_mcl[i], 91589e0f4d2SKip Macy vaddr, 0, 0); 91689e0f4d2SKip Macy } 91789e0f4d2SKip Macy nr_flips++; 91889e0f4d2SKip Macy } else { 91989e0f4d2SKip Macy gnttab_grant_foreign_access_ref(ref, 92023dc5621SKip Macy otherend_id, 92189e0f4d2SKip Macy PFNTOMFN(pfn), 0); 92289e0f4d2SKip Macy } 92389e0f4d2SKip Macy req->id = id; 92489e0f4d2SKip Macy req->gref = ref; 92589e0f4d2SKip Macy 92689e0f4d2SKip Macy sc->rx_pfn_array[i] = 92789e0f4d2SKip Macy vtomach(mtod(m_new,vm_offset_t)) >> PAGE_SHIFT; 92889e0f4d2SKip Macy } 92989e0f4d2SKip Macy 93089e0f4d2SKip Macy KASSERT(i, ("no mbufs processed")); /* should have returned earlier */ 93189e0f4d2SKip Macy KASSERT(mbufq_len(&sc->xn_rx_batch) == 0, ("not all mbufs processed")); 93289e0f4d2SKip Macy /* 93389e0f4d2SKip Macy * We may have allocated buffers which have entries outstanding 93489e0f4d2SKip Macy * in the page * update queue -- make sure we flush those first! 93589e0f4d2SKip Macy */ 93689e0f4d2SKip Macy PT_UPDATES_FLUSH(); 93789e0f4d2SKip Macy if (nr_flips != 0) { 93889e0f4d2SKip Macy #ifdef notyet 93989e0f4d2SKip Macy /* Tell the ballon driver what is going on. */ 94089e0f4d2SKip Macy balloon_update_driver_allowance(i); 94189e0f4d2SKip Macy #endif 942920ba15bSKip Macy set_xen_guest_handle(reservation.extent_start, sc->rx_pfn_array); 94389e0f4d2SKip Macy reservation.nr_extents = i; 94489e0f4d2SKip Macy reservation.extent_order = 0; 94589e0f4d2SKip Macy reservation.address_bits = 0; 94689e0f4d2SKip Macy reservation.domid = DOMID_SELF; 94789e0f4d2SKip Macy 94889e0f4d2SKip Macy if (!xen_feature(XENFEAT_auto_translated_physmap)) { 94989e0f4d2SKip Macy 95089e0f4d2SKip Macy /* After all PTEs have been zapped, flush the TLB. */ 95189e0f4d2SKip Macy sc->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = 95289e0f4d2SKip Macy UVMF_TLB_FLUSH|UVMF_ALL; 95389e0f4d2SKip Macy 95489e0f4d2SKip Macy /* Give away a batch of pages. */ 95589e0f4d2SKip Macy sc->rx_mcl[i].op = __HYPERVISOR_memory_op; 95689e0f4d2SKip Macy sc->rx_mcl[i].args[0] = XENMEM_decrease_reservation; 95789e0f4d2SKip Macy sc->rx_mcl[i].args[1] = (u_long)&reservation; 95889e0f4d2SKip Macy /* Zap PTEs and give away pages in one big multicall. */ 95989e0f4d2SKip Macy (void)HYPERVISOR_multicall(sc->rx_mcl, i+1); 96089e0f4d2SKip Macy 96189e0f4d2SKip Macy /* Check return status of HYPERVISOR_dom_mem_op(). */ 96289e0f4d2SKip Macy if (unlikely(sc->rx_mcl[i].result != i)) 96389e0f4d2SKip Macy panic("Unable to reduce memory reservation\n"); 96489e0f4d2SKip Macy } else { 96589e0f4d2SKip Macy if (HYPERVISOR_memory_op( 96689e0f4d2SKip Macy XENMEM_decrease_reservation, &reservation) 96789e0f4d2SKip Macy != i) 96889e0f4d2SKip Macy panic("Unable to reduce memory " 96989e0f4d2SKip Macy "reservation\n"); 97089e0f4d2SKip Macy } 97189e0f4d2SKip Macy } else { 97289e0f4d2SKip Macy wmb(); 97389e0f4d2SKip Macy } 97489e0f4d2SKip Macy 97589e0f4d2SKip Macy /* Above is a suitable barrier to ensure backend will see requests. */ 97689e0f4d2SKip Macy sc->rx.req_prod_pvt = req_prod + i; 97789e0f4d2SKip Macy push: 97889e0f4d2SKip Macy RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->rx, notify); 97989e0f4d2SKip Macy if (notify) 98089e0f4d2SKip Macy notify_remote_via_irq(sc->irq); 98189e0f4d2SKip Macy } 98289e0f4d2SKip Macy 98389e0f4d2SKip Macy static void 98489e0f4d2SKip Macy xn_rxeof(struct netfront_info *np) 98589e0f4d2SKip Macy { 98689e0f4d2SKip Macy struct ifnet *ifp; 98712678024SDoug Rabson #if __FreeBSD_version >= 700000 98812678024SDoug Rabson struct lro_ctrl *lro = &np->xn_lro; 98912678024SDoug Rabson struct lro_entry *queued; 99012678024SDoug Rabson #endif 99189e0f4d2SKip Macy struct netfront_rx_info rinfo; 99289e0f4d2SKip Macy struct netif_rx_response *rx = &rinfo.rx; 99389e0f4d2SKip Macy struct netif_extra_info *extras = rinfo.extras; 99489e0f4d2SKip Macy RING_IDX i, rp; 99589e0f4d2SKip Macy multicall_entry_t *mcl; 99689e0f4d2SKip Macy struct mbuf *m; 99783b92f6eSKip Macy struct mbuf_head rxq, errq; 99849906218SDoug Rabson int err, pages_flipped = 0, work_to_do; 99989e0f4d2SKip Macy 100049906218SDoug Rabson do { 100189e0f4d2SKip Macy XN_RX_LOCK_ASSERT(np); 100289e0f4d2SKip Macy if (!netfront_carrier_ok(np)) 100389e0f4d2SKip Macy return; 100489e0f4d2SKip Macy 100589e0f4d2SKip Macy mbufq_init(&errq); 100689e0f4d2SKip Macy mbufq_init(&rxq); 100789e0f4d2SKip Macy 100889e0f4d2SKip Macy ifp = np->xn_ifp; 100989e0f4d2SKip Macy 101089e0f4d2SKip Macy rp = np->rx.sring->rsp_prod; 101189e0f4d2SKip Macy rmb(); /* Ensure we see queued responses up to 'rp'. */ 101289e0f4d2SKip Macy 101389e0f4d2SKip Macy i = np->rx.rsp_cons; 101489e0f4d2SKip Macy while ((i != rp)) { 101589e0f4d2SKip Macy memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); 101689e0f4d2SKip Macy memset(extras, 0, sizeof(rinfo.extras)); 101789e0f4d2SKip Macy 101883b92f6eSKip Macy m = NULL; 1019931eeffaSKenneth D. Merry err = xennet_get_responses(np, &rinfo, rp, &i, &m, 102089e0f4d2SKip Macy &pages_flipped); 102189e0f4d2SKip Macy 102289e0f4d2SKip Macy if (unlikely(err)) { 102383b92f6eSKip Macy if (m) 102489e0f4d2SKip Macy mbufq_tail(&errq, m); 102589e0f4d2SKip Macy np->stats.rx_errors++; 102689e0f4d2SKip Macy continue; 102789e0f4d2SKip Macy } 102889e0f4d2SKip Macy 102989e0f4d2SKip Macy m->m_pkthdr.rcvif = ifp; 103089e0f4d2SKip Macy if ( rx->flags & NETRXF_data_validated ) { 103189e0f4d2SKip Macy /* Tell the stack the checksums are okay */ 103289e0f4d2SKip Macy /* 103389e0f4d2SKip Macy * XXX this isn't necessarily the case - need to add 103489e0f4d2SKip Macy * check 103589e0f4d2SKip Macy */ 103689e0f4d2SKip Macy 103789e0f4d2SKip Macy m->m_pkthdr.csum_flags |= 103889e0f4d2SKip Macy (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID 103989e0f4d2SKip Macy | CSUM_PSEUDO_HDR); 104089e0f4d2SKip Macy m->m_pkthdr.csum_data = 0xffff; 104189e0f4d2SKip Macy } 104289e0f4d2SKip Macy 104389e0f4d2SKip Macy np->stats.rx_packets++; 104483b92f6eSKip Macy np->stats.rx_bytes += m->m_pkthdr.len; 104589e0f4d2SKip Macy 104689e0f4d2SKip Macy mbufq_tail(&rxq, m); 1047931eeffaSKenneth D. Merry np->rx.rsp_cons = i; 104889e0f4d2SKip Macy } 104989e0f4d2SKip Macy 105089e0f4d2SKip Macy if (pages_flipped) { 105189e0f4d2SKip Macy /* Some pages are no longer absent... */ 105289e0f4d2SKip Macy #ifdef notyet 105389e0f4d2SKip Macy balloon_update_driver_allowance(-pages_flipped); 105489e0f4d2SKip Macy #endif 105589e0f4d2SKip Macy /* Do all the remapping work, and M->P updates, in one big 105689e0f4d2SKip Macy * hypercall. 105789e0f4d2SKip Macy */ 105889e0f4d2SKip Macy if (!!xen_feature(XENFEAT_auto_translated_physmap)) { 105989e0f4d2SKip Macy mcl = np->rx_mcl + pages_flipped; 106089e0f4d2SKip Macy mcl->op = __HYPERVISOR_mmu_update; 106189e0f4d2SKip Macy mcl->args[0] = (u_long)np->rx_mmu; 106289e0f4d2SKip Macy mcl->args[1] = pages_flipped; 106389e0f4d2SKip Macy mcl->args[2] = 0; 106489e0f4d2SKip Macy mcl->args[3] = DOMID_SELF; 106589e0f4d2SKip Macy (void)HYPERVISOR_multicall(np->rx_mcl, 106689e0f4d2SKip Macy pages_flipped + 1); 106789e0f4d2SKip Macy } 106889e0f4d2SKip Macy } 106989e0f4d2SKip Macy 107089e0f4d2SKip Macy while ((m = mbufq_dequeue(&errq))) 107189e0f4d2SKip Macy m_freem(m); 107289e0f4d2SKip Macy 107389e0f4d2SKip Macy /* 107489e0f4d2SKip Macy * Process all the mbufs after the remapping is complete. 107589e0f4d2SKip Macy * Break the mbuf chain first though. 107689e0f4d2SKip Macy */ 107789e0f4d2SKip Macy while ((m = mbufq_dequeue(&rxq)) != NULL) { 107889e0f4d2SKip Macy ifp->if_ipackets++; 107989e0f4d2SKip Macy 108089e0f4d2SKip Macy /* 108189e0f4d2SKip Macy * Do we really need to drop the rx lock? 108289e0f4d2SKip Macy */ 108389e0f4d2SKip Macy XN_RX_UNLOCK(np); 108412678024SDoug Rabson #if __FreeBSD_version >= 700000 108512678024SDoug Rabson /* Use LRO if possible */ 108612678024SDoug Rabson if ((ifp->if_capenable & IFCAP_LRO) == 0 || 108712678024SDoug Rabson lro->lro_cnt == 0 || tcp_lro_rx(lro, m, 0)) { 108812678024SDoug Rabson /* 108912678024SDoug Rabson * If LRO fails, pass up to the stack 109012678024SDoug Rabson * directly. 109112678024SDoug Rabson */ 109289e0f4d2SKip Macy (*ifp->if_input)(ifp, m); 109312678024SDoug Rabson } 109412678024SDoug Rabson #else 109512678024SDoug Rabson (*ifp->if_input)(ifp, m); 109612678024SDoug Rabson #endif 109789e0f4d2SKip Macy XN_RX_LOCK(np); 109889e0f4d2SKip Macy } 109989e0f4d2SKip Macy 110089e0f4d2SKip Macy np->rx.rsp_cons = i; 110189e0f4d2SKip Macy 110212678024SDoug Rabson #if __FreeBSD_version >= 700000 110312678024SDoug Rabson /* 110412678024SDoug Rabson * Flush any outstanding LRO work 110512678024SDoug Rabson */ 110612678024SDoug Rabson while (!SLIST_EMPTY(&lro->lro_active)) { 110712678024SDoug Rabson queued = SLIST_FIRST(&lro->lro_active); 110812678024SDoug Rabson SLIST_REMOVE_HEAD(&lro->lro_active, next); 110912678024SDoug Rabson tcp_lro_flush(lro, queued); 111012678024SDoug Rabson } 111112678024SDoug Rabson #endif 111212678024SDoug Rabson 111389e0f4d2SKip Macy #if 0 111489e0f4d2SKip Macy /* If we get a callback with very few responses, reduce fill target. */ 111589e0f4d2SKip Macy /* NB. Note exponential increase, linear decrease. */ 111689e0f4d2SKip Macy if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > 111789e0f4d2SKip Macy ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target)) 111889e0f4d2SKip Macy np->rx_target = np->rx_min_target; 111989e0f4d2SKip Macy #endif 112089e0f4d2SKip Macy 112189e0f4d2SKip Macy network_alloc_rx_buffers(np); 112289e0f4d2SKip Macy 112349906218SDoug Rabson RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, work_to_do); 112449906218SDoug Rabson } while (work_to_do); 112589e0f4d2SKip Macy } 112689e0f4d2SKip Macy 112789e0f4d2SKip Macy static void 112889e0f4d2SKip Macy xn_txeof(struct netfront_info *np) 112989e0f4d2SKip Macy { 113089e0f4d2SKip Macy RING_IDX i, prod; 113189e0f4d2SKip Macy unsigned short id; 113289e0f4d2SKip Macy struct ifnet *ifp; 113312678024SDoug Rabson netif_tx_response_t *txr; 113489e0f4d2SKip Macy struct mbuf *m; 113589e0f4d2SKip Macy 113689e0f4d2SKip Macy XN_TX_LOCK_ASSERT(np); 113789e0f4d2SKip Macy 113889e0f4d2SKip Macy if (!netfront_carrier_ok(np)) 113989e0f4d2SKip Macy return; 114089e0f4d2SKip Macy 114189e0f4d2SKip Macy ifp = np->xn_ifp; 114289e0f4d2SKip Macy 114389e0f4d2SKip Macy do { 114489e0f4d2SKip Macy prod = np->tx.sring->rsp_prod; 114589e0f4d2SKip Macy rmb(); /* Ensure we see responses up to 'rp'. */ 114689e0f4d2SKip Macy 114789e0f4d2SKip Macy for (i = np->tx.rsp_cons; i != prod; i++) { 114812678024SDoug Rabson txr = RING_GET_RESPONSE(&np->tx, i); 114912678024SDoug Rabson if (txr->status == NETIF_RSP_NULL) 115012678024SDoug Rabson continue; 115112678024SDoug Rabson 1152931eeffaSKenneth D. Merry if (txr->status != NETIF_RSP_OKAY) { 1153931eeffaSKenneth D. Merry printf("%s: WARNING: response is %d!\n", 1154931eeffaSKenneth D. Merry __func__, txr->status); 1155931eeffaSKenneth D. Merry } 115612678024SDoug Rabson id = txr->id; 1157931eeffaSKenneth D. Merry m = np->tx_mbufs[id]; 11582d8fae98SAdrian Chadd KASSERT(m != NULL, ("mbuf not found in xn_tx_chain")); 1159931eeffaSKenneth D. Merry KASSERT((uintptr_t)m > NET_TX_RING_SIZE, 1160931eeffaSKenneth D. Merry ("mbuf already on the free list, but we're " 1161931eeffaSKenneth D. Merry "trying to free it again!")); 11622d8fae98SAdrian Chadd M_ASSERTVALID(m); 116389e0f4d2SKip Macy 116412678024SDoug Rabson /* 116512678024SDoug Rabson * Increment packet count if this is the last 116612678024SDoug Rabson * mbuf of the chain. 116712678024SDoug Rabson */ 116812678024SDoug Rabson if (!m->m_next) 116989e0f4d2SKip Macy ifp->if_opackets++; 117089e0f4d2SKip Macy if (unlikely(gnttab_query_foreign_access( 117189e0f4d2SKip Macy np->grant_tx_ref[id]) != 0)) { 1172931eeffaSKenneth D. Merry panic("grant id %u still in use by the backend", 1173931eeffaSKenneth D. Merry id); 117489e0f4d2SKip Macy } 117589e0f4d2SKip Macy gnttab_end_foreign_access_ref( 1176920ba15bSKip Macy np->grant_tx_ref[id]); 117789e0f4d2SKip Macy gnttab_release_grant_reference( 117889e0f4d2SKip Macy &np->gref_tx_head, np->grant_tx_ref[id]); 1179ff662b5cSJustin T. Gibbs np->grant_tx_ref[id] = GRANT_REF_INVALID; 118089e0f4d2SKip Macy 1181931eeffaSKenneth D. Merry np->tx_mbufs[id] = NULL; 1182931eeffaSKenneth D. Merry add_id_to_freelist(np->tx_mbufs, id); 1183a4ec37f5SAdrian Chadd np->xn_cdata.xn_tx_chain_cnt--; 118412678024SDoug Rabson m_free(m); 1185d76e4550SAdrian Chadd /* Only mark the queue active if we've freed up at least one slot to try */ 1186d76e4550SAdrian Chadd ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 118789e0f4d2SKip Macy } 118889e0f4d2SKip Macy np->tx.rsp_cons = prod; 118989e0f4d2SKip Macy 119089e0f4d2SKip Macy /* 119189e0f4d2SKip Macy * Set a new event, then check for race with update of 119289e0f4d2SKip Macy * tx_cons. Note that it is essential to schedule a 119389e0f4d2SKip Macy * callback, no matter how few buffers are pending. Even if 119489e0f4d2SKip Macy * there is space in the transmit ring, higher layers may 119589e0f4d2SKip Macy * be blocked because too much data is outstanding: in such 119689e0f4d2SKip Macy * cases notification from Xen is likely to be the only kick 119789e0f4d2SKip Macy * that we'll get. 119889e0f4d2SKip Macy */ 119989e0f4d2SKip Macy np->tx.sring->rsp_event = 120089e0f4d2SKip Macy prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; 120189e0f4d2SKip Macy 120289e0f4d2SKip Macy mb(); 120389e0f4d2SKip Macy } while (prod != np->tx.sring->rsp_prod); 120489e0f4d2SKip Macy 120589e0f4d2SKip Macy if (np->tx_full && 120689e0f4d2SKip Macy ((np->tx.sring->req_prod - prod) < NET_TX_RING_SIZE)) { 120789e0f4d2SKip Macy np->tx_full = 0; 120889e0f4d2SKip Macy #if 0 120989e0f4d2SKip Macy if (np->user_state == UST_OPEN) 121089e0f4d2SKip Macy netif_wake_queue(dev); 121189e0f4d2SKip Macy #endif 121289e0f4d2SKip Macy } 121389e0f4d2SKip Macy 121489e0f4d2SKip Macy } 121589e0f4d2SKip Macy 121689e0f4d2SKip Macy static void 121789e0f4d2SKip Macy xn_intr(void *xsc) 121889e0f4d2SKip Macy { 121989e0f4d2SKip Macy struct netfront_info *np = xsc; 122089e0f4d2SKip Macy struct ifnet *ifp = np->xn_ifp; 122189e0f4d2SKip Macy 122289e0f4d2SKip Macy #if 0 122389e0f4d2SKip Macy if (!(np->rx.rsp_cons != np->rx.sring->rsp_prod && 122489e0f4d2SKip Macy likely(netfront_carrier_ok(np)) && 122589e0f4d2SKip Macy ifp->if_drv_flags & IFF_DRV_RUNNING)) 122689e0f4d2SKip Macy return; 122789e0f4d2SKip Macy #endif 1228931eeffaSKenneth D. Merry if (RING_HAS_UNCONSUMED_RESPONSES(&np->tx)) { 122989e0f4d2SKip Macy XN_TX_LOCK(np); 123089e0f4d2SKip Macy xn_txeof(np); 123189e0f4d2SKip Macy XN_TX_UNLOCK(np); 123289e0f4d2SKip Macy } 123389e0f4d2SKip Macy 123489e0f4d2SKip Macy XN_RX_LOCK(np); 123589e0f4d2SKip Macy xn_rxeof(np); 123689e0f4d2SKip Macy XN_RX_UNLOCK(np); 123789e0f4d2SKip Macy 123889e0f4d2SKip Macy if (ifp->if_drv_flags & IFF_DRV_RUNNING && 123989e0f4d2SKip Macy !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 124089e0f4d2SKip Macy xn_start(ifp); 124189e0f4d2SKip Macy } 124289e0f4d2SKip Macy 124389e0f4d2SKip Macy 124489e0f4d2SKip Macy static void 124589e0f4d2SKip Macy xennet_move_rx_slot(struct netfront_info *np, struct mbuf *m, 124689e0f4d2SKip Macy grant_ref_t ref) 124789e0f4d2SKip Macy { 124889e0f4d2SKip Macy int new = xennet_rxidx(np->rx.req_prod_pvt); 124989e0f4d2SKip Macy 125089e0f4d2SKip Macy KASSERT(np->rx_mbufs[new] == NULL, ("rx_mbufs != NULL")); 125189e0f4d2SKip Macy np->rx_mbufs[new] = m; 125289e0f4d2SKip Macy np->grant_rx_ref[new] = ref; 125389e0f4d2SKip Macy RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; 125489e0f4d2SKip Macy RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; 125589e0f4d2SKip Macy np->rx.req_prod_pvt++; 125689e0f4d2SKip Macy } 125789e0f4d2SKip Macy 125889e0f4d2SKip Macy static int 125989e0f4d2SKip Macy xennet_get_extras(struct netfront_info *np, 1260931eeffaSKenneth D. Merry struct netif_extra_info *extras, RING_IDX rp, RING_IDX *cons) 126189e0f4d2SKip Macy { 126289e0f4d2SKip Macy struct netif_extra_info *extra; 126389e0f4d2SKip Macy 126489e0f4d2SKip Macy int err = 0; 126589e0f4d2SKip Macy 126689e0f4d2SKip Macy do { 126789e0f4d2SKip Macy struct mbuf *m; 126889e0f4d2SKip Macy grant_ref_t ref; 126989e0f4d2SKip Macy 1270931eeffaSKenneth D. Merry if (unlikely(*cons + 1 == rp)) { 127189e0f4d2SKip Macy #if 0 127289e0f4d2SKip Macy if (net_ratelimit()) 127389e0f4d2SKip Macy WPRINTK("Missing extra info\n"); 127489e0f4d2SKip Macy #endif 1275931eeffaSKenneth D. Merry err = EINVAL; 127689e0f4d2SKip Macy break; 127789e0f4d2SKip Macy } 127889e0f4d2SKip Macy 127989e0f4d2SKip Macy extra = (struct netif_extra_info *) 1280931eeffaSKenneth D. Merry RING_GET_RESPONSE(&np->rx, ++(*cons)); 128189e0f4d2SKip Macy 128289e0f4d2SKip Macy if (unlikely(!extra->type || 128389e0f4d2SKip Macy extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 128489e0f4d2SKip Macy #if 0 128589e0f4d2SKip Macy if (net_ratelimit()) 128689e0f4d2SKip Macy WPRINTK("Invalid extra type: %d\n", 128789e0f4d2SKip Macy extra->type); 128889e0f4d2SKip Macy #endif 1289931eeffaSKenneth D. Merry err = EINVAL; 129089e0f4d2SKip Macy } else { 129189e0f4d2SKip Macy memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); 129289e0f4d2SKip Macy } 129389e0f4d2SKip Macy 1294931eeffaSKenneth D. Merry m = xennet_get_rx_mbuf(np, *cons); 1295931eeffaSKenneth D. Merry ref = xennet_get_rx_ref(np, *cons); 129689e0f4d2SKip Macy xennet_move_rx_slot(np, m, ref); 129789e0f4d2SKip Macy } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); 129889e0f4d2SKip Macy 129989e0f4d2SKip Macy return err; 130089e0f4d2SKip Macy } 130189e0f4d2SKip Macy 130289e0f4d2SKip Macy static int 130389e0f4d2SKip Macy xennet_get_responses(struct netfront_info *np, 1304931eeffaSKenneth D. Merry struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons, 130583b92f6eSKip Macy struct mbuf **list, 130689e0f4d2SKip Macy int *pages_flipped_p) 130789e0f4d2SKip Macy { 130889e0f4d2SKip Macy int pages_flipped = *pages_flipped_p; 130989e0f4d2SKip Macy struct mmu_update *mmu; 131089e0f4d2SKip Macy struct multicall_entry *mcl; 131189e0f4d2SKip Macy struct netif_rx_response *rx = &rinfo->rx; 131289e0f4d2SKip Macy struct netif_extra_info *extras = rinfo->extras; 131383b92f6eSKip Macy struct mbuf *m, *m0, *m_prev; 1314931eeffaSKenneth D. Merry grant_ref_t ref = xennet_get_rx_ref(np, *cons); 1315931eeffaSKenneth D. Merry RING_IDX ref_cons = *cons; 131689e0f4d2SKip Macy int frags = 1; 131789e0f4d2SKip Macy int err = 0; 131889e0f4d2SKip Macy u_long ret; 131989e0f4d2SKip Macy 1320931eeffaSKenneth D. Merry m0 = m = m_prev = xennet_get_rx_mbuf(np, *cons); 132183b92f6eSKip Macy 132283b92f6eSKip Macy 132389e0f4d2SKip Macy if (rx->flags & NETRXF_extra_info) { 1324931eeffaSKenneth D. Merry err = xennet_get_extras(np, extras, rp, cons); 132589e0f4d2SKip Macy } 132689e0f4d2SKip Macy 132783b92f6eSKip Macy 132883b92f6eSKip Macy if (m0 != NULL) { 132983b92f6eSKip Macy m0->m_pkthdr.len = 0; 133083b92f6eSKip Macy m0->m_next = NULL; 133183b92f6eSKip Macy } 133283b92f6eSKip Macy 133389e0f4d2SKip Macy for (;;) { 133489e0f4d2SKip Macy u_long mfn; 133589e0f4d2SKip Macy 133683b92f6eSKip Macy #if 0 1337227ca257SKip Macy DPRINTK("rx->status=%hd rx->offset=%hu frags=%u\n", 133883b92f6eSKip Macy rx->status, rx->offset, frags); 133983b92f6eSKip Macy #endif 134089e0f4d2SKip Macy if (unlikely(rx->status < 0 || 134189e0f4d2SKip Macy rx->offset + rx->status > PAGE_SIZE)) { 1342931eeffaSKenneth D. Merry 134389e0f4d2SKip Macy #if 0 134489e0f4d2SKip Macy if (net_ratelimit()) 134589e0f4d2SKip Macy WPRINTK("rx->offset: %x, size: %u\n", 134689e0f4d2SKip Macy rx->offset, rx->status); 134789e0f4d2SKip Macy #endif 134889e0f4d2SKip Macy xennet_move_rx_slot(np, m, ref); 1349931eeffaSKenneth D. Merry if (m0 == m) 1350931eeffaSKenneth D. Merry m0 = NULL; 1351931eeffaSKenneth D. Merry m = NULL; 1352931eeffaSKenneth D. Merry err = EINVAL; 1353931eeffaSKenneth D. Merry goto next_skip_queue; 135489e0f4d2SKip Macy } 135589e0f4d2SKip Macy 135689e0f4d2SKip Macy /* 135789e0f4d2SKip Macy * This definitely indicates a bug, either in this driver or in 135889e0f4d2SKip Macy * the backend driver. In future this should flag the bad 135989e0f4d2SKip Macy * situation to the system controller to reboot the backed. 136089e0f4d2SKip Macy */ 1361ff662b5cSJustin T. Gibbs if (ref == GRANT_REF_INVALID) { 1362931eeffaSKenneth D. Merry 136389e0f4d2SKip Macy #if 0 136489e0f4d2SKip Macy if (net_ratelimit()) 136589e0f4d2SKip Macy WPRINTK("Bad rx response id %d.\n", rx->id); 136689e0f4d2SKip Macy #endif 1367ff662b5cSJustin T. Gibbs printf("%s: Bad rx response id %d.\n", __func__,rx->id); 1368931eeffaSKenneth D. Merry err = EINVAL; 136989e0f4d2SKip Macy goto next; 137089e0f4d2SKip Macy } 137189e0f4d2SKip Macy 137289e0f4d2SKip Macy if (!np->copying_receiver) { 137389e0f4d2SKip Macy /* Memory pressure, insufficient buffer 137489e0f4d2SKip Macy * headroom, ... 137589e0f4d2SKip Macy */ 137689e0f4d2SKip Macy if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) { 1377931eeffaSKenneth D. Merry WPRINTK("Unfulfilled rx req (id=%d, st=%d).\n", 137889e0f4d2SKip Macy rx->id, rx->status); 137989e0f4d2SKip Macy xennet_move_rx_slot(np, m, ref); 1380931eeffaSKenneth D. Merry err = ENOMEM; 138189e0f4d2SKip Macy goto next; 138289e0f4d2SKip Macy } 138389e0f4d2SKip Macy 138489e0f4d2SKip Macy if (!xen_feature( XENFEAT_auto_translated_physmap)) { 138589e0f4d2SKip Macy /* Remap the page. */ 138689e0f4d2SKip Macy void *vaddr = mtod(m, void *); 138789e0f4d2SKip Macy uint32_t pfn; 138889e0f4d2SKip Macy 138989e0f4d2SKip Macy mcl = np->rx_mcl + pages_flipped; 139089e0f4d2SKip Macy mmu = np->rx_mmu + pages_flipped; 139189e0f4d2SKip Macy 139289e0f4d2SKip Macy MULTI_update_va_mapping(mcl, (u_long)vaddr, 13936ae0e31bSKip Macy (((vm_paddr_t)mfn) << PAGE_SHIFT) | PG_RW | 139489e0f4d2SKip Macy PG_V | PG_M | PG_A, 0); 13953a6d1fcfSKip Macy pfn = (uintptr_t)m->m_ext.ext_arg1; 139689e0f4d2SKip Macy mmu->ptr = ((vm_paddr_t)mfn << PAGE_SHIFT) | 139789e0f4d2SKip Macy MMU_MACHPHYS_UPDATE; 139889e0f4d2SKip Macy mmu->val = pfn; 139989e0f4d2SKip Macy 140089e0f4d2SKip Macy set_phys_to_machine(pfn, mfn); 140189e0f4d2SKip Macy } 140289e0f4d2SKip Macy pages_flipped++; 140389e0f4d2SKip Macy } else { 1404920ba15bSKip Macy ret = gnttab_end_foreign_access_ref(ref); 140589e0f4d2SKip Macy KASSERT(ret, ("ret != 0")); 140689e0f4d2SKip Macy } 140789e0f4d2SKip Macy 140889e0f4d2SKip Macy gnttab_release_grant_reference(&np->gref_rx_head, ref); 140989e0f4d2SKip Macy 141089e0f4d2SKip Macy next: 14113a539122SAdrian Chadd if (m == NULL) 14123a539122SAdrian Chadd break; 14133a539122SAdrian Chadd 141483b92f6eSKip Macy m->m_len = rx->status; 141583b92f6eSKip Macy m->m_data += rx->offset; 141683b92f6eSKip Macy m0->m_pkthdr.len += rx->status; 141783b92f6eSKip Macy 1418931eeffaSKenneth D. Merry next_skip_queue: 141989e0f4d2SKip Macy if (!(rx->flags & NETRXF_more_data)) 142089e0f4d2SKip Macy break; 142189e0f4d2SKip Macy 1422931eeffaSKenneth D. Merry if (*cons + frags == rp) { 142389e0f4d2SKip Macy if (net_ratelimit()) 142489e0f4d2SKip Macy WPRINTK("Need more frags\n"); 1425931eeffaSKenneth D. Merry err = ENOENT; 1426931eeffaSKenneth D. Merry printf("%s: cons %u frags %u rp %u, not enough frags\n", 1427931eeffaSKenneth D. Merry __func__, *cons, frags, rp); 142889e0f4d2SKip Macy break; 142989e0f4d2SKip Macy } 1430931eeffaSKenneth D. Merry /* 1431931eeffaSKenneth D. Merry * Note that m can be NULL, if rx->status < 0 or if 1432931eeffaSKenneth D. Merry * rx->offset + rx->status > PAGE_SIZE above. 1433931eeffaSKenneth D. Merry */ 143483b92f6eSKip Macy m_prev = m; 143589e0f4d2SKip Macy 1436931eeffaSKenneth D. Merry rx = RING_GET_RESPONSE(&np->rx, *cons + frags); 1437931eeffaSKenneth D. Merry m = xennet_get_rx_mbuf(np, *cons + frags); 143883b92f6eSKip Macy 1439931eeffaSKenneth D. Merry /* 1440931eeffaSKenneth D. Merry * m_prev == NULL can happen if rx->status < 0 or if 1441931eeffaSKenneth D. Merry * rx->offset + * rx->status > PAGE_SIZE above. 1442931eeffaSKenneth D. Merry */ 1443931eeffaSKenneth D. Merry if (m_prev != NULL) 144483b92f6eSKip Macy m_prev->m_next = m; 1445931eeffaSKenneth D. Merry 1446931eeffaSKenneth D. Merry /* 1447931eeffaSKenneth D. Merry * m0 can be NULL if rx->status < 0 or if * rx->offset + 1448931eeffaSKenneth D. Merry * rx->status > PAGE_SIZE above. 1449931eeffaSKenneth D. Merry */ 1450931eeffaSKenneth D. Merry if (m0 == NULL) 1451931eeffaSKenneth D. Merry m0 = m; 145283b92f6eSKip Macy m->m_next = NULL; 1453931eeffaSKenneth D. Merry ref = xennet_get_rx_ref(np, *cons + frags); 1454931eeffaSKenneth D. Merry ref_cons = *cons + frags; 145589e0f4d2SKip Macy frags++; 145689e0f4d2SKip Macy } 145783b92f6eSKip Macy *list = m0; 1458931eeffaSKenneth D. Merry *cons += frags; 145989e0f4d2SKip Macy *pages_flipped_p = pages_flipped; 146089e0f4d2SKip Macy 14618577146eSJustin T. Gibbs return (err); 146289e0f4d2SKip Macy } 146389e0f4d2SKip Macy 146489e0f4d2SKip Macy static void 146589e0f4d2SKip Macy xn_tick_locked(struct netfront_info *sc) 146689e0f4d2SKip Macy { 146789e0f4d2SKip Macy XN_RX_LOCK_ASSERT(sc); 146889e0f4d2SKip Macy callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc); 146989e0f4d2SKip Macy 147089e0f4d2SKip Macy /* XXX placeholder for printing debug information */ 147189e0f4d2SKip Macy 147289e0f4d2SKip Macy } 147389e0f4d2SKip Macy 147489e0f4d2SKip Macy 147589e0f4d2SKip Macy static void 147689e0f4d2SKip Macy xn_tick(void *xsc) 147789e0f4d2SKip Macy { 147889e0f4d2SKip Macy struct netfront_info *sc; 147989e0f4d2SKip Macy 148089e0f4d2SKip Macy sc = xsc; 148189e0f4d2SKip Macy XN_RX_LOCK(sc); 148289e0f4d2SKip Macy xn_tick_locked(sc); 148389e0f4d2SKip Macy XN_RX_UNLOCK(sc); 148489e0f4d2SKip Macy 148589e0f4d2SKip Macy } 148689e0f4d2SKip Macy 1487931eeffaSKenneth D. Merry /** 1488931eeffaSKenneth D. Merry * \brief Count the number of fragments in an mbuf chain. 1489931eeffaSKenneth D. Merry * 1490931eeffaSKenneth D. Merry * Surprisingly, there isn't an M* macro for this. 1491c099cafaSAdrian Chadd */ 1492931eeffaSKenneth D. Merry static inline int 1493931eeffaSKenneth D. Merry xn_count_frags(struct mbuf *m) 1494931eeffaSKenneth D. Merry { 1495931eeffaSKenneth D. Merry int nfrags; 1496931eeffaSKenneth D. Merry 1497931eeffaSKenneth D. Merry for (nfrags = 0; m != NULL; m = m->m_next) 1498931eeffaSKenneth D. Merry nfrags++; 1499931eeffaSKenneth D. Merry 1500931eeffaSKenneth D. Merry return (nfrags); 150189e0f4d2SKip Macy } 150289e0f4d2SKip Macy 1503931eeffaSKenneth D. Merry /** 1504931eeffaSKenneth D. Merry * Given an mbuf chain, make sure we have enough room and then push 1505931eeffaSKenneth D. Merry * it onto the transmit ring. 1506931eeffaSKenneth D. Merry */ 1507931eeffaSKenneth D. Merry static int 1508931eeffaSKenneth D. Merry xn_assemble_tx_request(struct netfront_info *sc, struct mbuf *m_head) 1509931eeffaSKenneth D. Merry { 1510931eeffaSKenneth D. Merry struct ifnet *ifp; 1511931eeffaSKenneth D. Merry struct mbuf *m; 1512931eeffaSKenneth D. Merry u_int nfrags; 1513931eeffaSKenneth D. Merry netif_extra_info_t *extra; 1514931eeffaSKenneth D. Merry int otherend_id; 1515931eeffaSKenneth D. Merry 1516931eeffaSKenneth D. Merry ifp = sc->xn_ifp; 1517931eeffaSKenneth D. Merry 1518931eeffaSKenneth D. Merry /** 151912678024SDoug Rabson * Defragment the mbuf if necessary. 152012678024SDoug Rabson */ 1521931eeffaSKenneth D. Merry nfrags = xn_count_frags(m_head); 1522931eeffaSKenneth D. Merry 1523931eeffaSKenneth D. Merry /* 1524931eeffaSKenneth D. Merry * Check to see whether this request is longer than netback 1525931eeffaSKenneth D. Merry * can handle, and try to defrag it. 1526931eeffaSKenneth D. Merry */ 1527931eeffaSKenneth D. Merry /** 1528931eeffaSKenneth D. Merry * It is a bit lame, but the netback driver in Linux can't 1529931eeffaSKenneth D. Merry * deal with nfrags > MAX_TX_REQ_FRAGS, which is a quirk of 1530931eeffaSKenneth D. Merry * the Linux network stack. 1531931eeffaSKenneth D. Merry */ 1532*578e4bf7SJustin T. Gibbs if (nfrags > sc->maxfrags) { 153312678024SDoug Rabson m = m_defrag(m_head, M_DONTWAIT); 153412678024SDoug Rabson if (!m) { 1535931eeffaSKenneth D. Merry /* 1536931eeffaSKenneth D. Merry * Defrag failed, so free the mbuf and 1537931eeffaSKenneth D. Merry * therefore drop the packet. 1538931eeffaSKenneth D. Merry */ 153912678024SDoug Rabson m_freem(m_head); 1540931eeffaSKenneth D. Merry return (EMSGSIZE); 154112678024SDoug Rabson } 154212678024SDoug Rabson m_head = m; 154312678024SDoug Rabson } 154489e0f4d2SKip Macy 1545a4ec37f5SAdrian Chadd /* Determine how many fragments now exist */ 1546931eeffaSKenneth D. Merry nfrags = xn_count_frags(m_head); 1547a4ec37f5SAdrian Chadd 1548a4ec37f5SAdrian Chadd /* 1549931eeffaSKenneth D. Merry * Check to see whether the defragmented packet has too many 1550931eeffaSKenneth D. Merry * segments for the Linux netback driver. 1551a4ec37f5SAdrian Chadd */ 1552931eeffaSKenneth D. Merry /** 1553931eeffaSKenneth D. Merry * The FreeBSD TCP stack, with TSO enabled, can produce a chain 1554931eeffaSKenneth D. Merry * of mbufs longer than Linux can handle. Make sure we don't 1555931eeffaSKenneth D. Merry * pass a too-long chain over to the other side by dropping the 1556931eeffaSKenneth D. Merry * packet. It doesn't look like there is currently a way to 1557931eeffaSKenneth D. Merry * tell the TCP stack to generate a shorter chain of packets. 15583fb28bbbSAdrian Chadd */ 1559931eeffaSKenneth D. Merry if (nfrags > MAX_TX_REQ_FRAGS) { 1560ff662b5cSJustin T. Gibbs #ifdef DEBUG 1561ff662b5cSJustin T. Gibbs printf("%s: nfrags %d > MAX_TX_REQ_FRAGS %d, netback " 1562ff662b5cSJustin T. Gibbs "won't be able to handle it, dropping\n", 1563ff662b5cSJustin T. Gibbs __func__, nfrags, MAX_TX_REQ_FRAGS); 1564ff662b5cSJustin T. Gibbs #endif 1565931eeffaSKenneth D. Merry m_freem(m_head); 1566931eeffaSKenneth D. Merry return (EMSGSIZE); 1567a4ec37f5SAdrian Chadd } 1568a4ec37f5SAdrian Chadd 15693fb28bbbSAdrian Chadd /* 1570931eeffaSKenneth D. Merry * This check should be redundant. We've already verified that we 1571931eeffaSKenneth D. Merry * have enough slots in the ring to handle a packet of maximum 1572931eeffaSKenneth D. Merry * size, and that our packet is less than the maximum size. Keep 1573931eeffaSKenneth D. Merry * it in here as an assert for now just to make certain that 1574931eeffaSKenneth D. Merry * xn_tx_chain_cnt is accurate. 15753fb28bbbSAdrian Chadd */ 1576931eeffaSKenneth D. Merry KASSERT((sc->xn_cdata.xn_tx_chain_cnt + nfrags) <= NET_TX_RING_SIZE, 1577931eeffaSKenneth D. Merry ("%s: xn_tx_chain_cnt (%d) + nfrags (%d) > NET_TX_RING_SIZE " 1578931eeffaSKenneth D. Merry "(%d)!", __func__, (int) sc->xn_cdata.xn_tx_chain_cnt, 1579931eeffaSKenneth D. Merry (int) nfrags, (int) NET_TX_RING_SIZE)); 1580a4ec37f5SAdrian Chadd 158189e0f4d2SKip Macy /* 158289e0f4d2SKip Macy * Start packing the mbufs in this chain into 158389e0f4d2SKip Macy * the fragment pointers. Stop when we run out 158489e0f4d2SKip Macy * of fragments or hit the end of the mbuf chain. 158589e0f4d2SKip Macy */ 158612678024SDoug Rabson m = m_head; 158712678024SDoug Rabson extra = NULL; 1588931eeffaSKenneth D. Merry otherend_id = xenbus_get_otherend_id(sc->xbdev); 158912678024SDoug Rabson for (m = m_head; m; m = m->m_next) { 1590931eeffaSKenneth D. Merry netif_tx_request_t *tx; 1591931eeffaSKenneth D. Merry uintptr_t id; 1592931eeffaSKenneth D. Merry grant_ref_t ref; 1593931eeffaSKenneth D. Merry u_long mfn; /* XXX Wrong type? */ 1594931eeffaSKenneth D. Merry 1595931eeffaSKenneth D. Merry tx = RING_GET_REQUEST(&sc->tx, sc->tx.req_prod_pvt); 1596931eeffaSKenneth D. Merry id = get_id_from_freelist(sc->tx_mbufs); 1597a4ec37f5SAdrian Chadd if (id == 0) 1598a4ec37f5SAdrian Chadd panic("xn_start_locked: was allocated the freelist head!\n"); 1599a4ec37f5SAdrian Chadd sc->xn_cdata.xn_tx_chain_cnt++; 1600931eeffaSKenneth D. Merry if (sc->xn_cdata.xn_tx_chain_cnt > NET_TX_RING_SIZE) 1601931eeffaSKenneth D. Merry panic("xn_start_locked: tx_chain_cnt must be <= NET_TX_RING_SIZE\n"); 1602931eeffaSKenneth D. Merry sc->tx_mbufs[id] = m; 160389e0f4d2SKip Macy tx->id = id; 160489e0f4d2SKip Macy ref = gnttab_claim_grant_reference(&sc->gref_tx_head); 160589e0f4d2SKip Macy KASSERT((short)ref >= 0, ("Negative ref")); 160612678024SDoug Rabson mfn = virt_to_mfn(mtod(m, vm_offset_t)); 160723dc5621SKip Macy gnttab_grant_foreign_access_ref(ref, otherend_id, 160889e0f4d2SKip Macy mfn, GNTMAP_readonly); 160989e0f4d2SKip Macy tx->gref = sc->grant_tx_ref[id] = ref; 161012678024SDoug Rabson tx->offset = mtod(m, vm_offset_t) & (PAGE_SIZE - 1); 161189e0f4d2SKip Macy tx->flags = 0; 161212678024SDoug Rabson if (m == m_head) { 161312678024SDoug Rabson /* 161412678024SDoug Rabson * The first fragment has the entire packet 161512678024SDoug Rabson * size, subsequent fragments have just the 161612678024SDoug Rabson * fragment size. The backend works out the 161712678024SDoug Rabson * true size of the first fragment by 161812678024SDoug Rabson * subtracting the sizes of the other 161912678024SDoug Rabson * fragments. 162012678024SDoug Rabson */ 162112678024SDoug Rabson tx->size = m->m_pkthdr.len; 162289e0f4d2SKip Macy 162312678024SDoug Rabson /* 1624931eeffaSKenneth D. Merry * The first fragment contains the checksum flags 1625931eeffaSKenneth D. Merry * and is optionally followed by extra data for 1626931eeffaSKenneth D. Merry * TSO etc. 1627931eeffaSKenneth D. Merry */ 1628931eeffaSKenneth D. Merry /** 1629931eeffaSKenneth D. Merry * CSUM_TSO requires checksum offloading. 1630931eeffaSKenneth D. Merry * Some versions of FreeBSD fail to 1631931eeffaSKenneth D. Merry * set CSUM_TCP in the CSUM_TSO case, 1632931eeffaSKenneth D. Merry * so we have to test for CSUM_TSO 1633931eeffaSKenneth D. Merry * explicitly. 163412678024SDoug Rabson */ 163512678024SDoug Rabson if (m->m_pkthdr.csum_flags 1636931eeffaSKenneth D. Merry & (CSUM_DELAY_DATA | CSUM_TSO)) { 163712678024SDoug Rabson tx->flags |= (NETTXF_csum_blank 163812678024SDoug Rabson | NETTXF_data_validated); 163912678024SDoug Rabson } 164012678024SDoug Rabson #if __FreeBSD_version >= 700000 164112678024SDoug Rabson if (m->m_pkthdr.csum_flags & CSUM_TSO) { 164212678024SDoug Rabson struct netif_extra_info *gso = 164312678024SDoug Rabson (struct netif_extra_info *) 1644931eeffaSKenneth D. Merry RING_GET_REQUEST(&sc->tx, 1645931eeffaSKenneth D. Merry ++sc->tx.req_prod_pvt); 164689e0f4d2SKip Macy 164712678024SDoug Rabson tx->flags |= NETTXF_extra_info; 164889e0f4d2SKip Macy 164912678024SDoug Rabson gso->u.gso.size = m->m_pkthdr.tso_segsz; 165012678024SDoug Rabson gso->u.gso.type = 165112678024SDoug Rabson XEN_NETIF_GSO_TYPE_TCPV4; 165212678024SDoug Rabson gso->u.gso.pad = 0; 165312678024SDoug Rabson gso->u.gso.features = 0; 165412678024SDoug Rabson 165512678024SDoug Rabson gso->type = XEN_NETIF_EXTRA_TYPE_GSO; 165612678024SDoug Rabson gso->flags = 0; 165712678024SDoug Rabson } 165812678024SDoug Rabson #endif 165912678024SDoug Rabson } else { 166012678024SDoug Rabson tx->size = m->m_len; 166112678024SDoug Rabson } 1662931eeffaSKenneth D. Merry if (m->m_next) 166312678024SDoug Rabson tx->flags |= NETTXF_more_data; 166412678024SDoug Rabson 1665931eeffaSKenneth D. Merry sc->tx.req_prod_pvt++; 1666931eeffaSKenneth D. Merry } 166712678024SDoug Rabson BPF_MTAP(ifp, m_head); 166812678024SDoug Rabson 166912678024SDoug Rabson sc->stats.tx_bytes += m_head->m_pkthdr.len; 167089e0f4d2SKip Macy sc->stats.tx_packets++; 1671931eeffaSKenneth D. Merry 1672931eeffaSKenneth D. Merry return (0); 167389e0f4d2SKip Macy } 167489e0f4d2SKip Macy 1675931eeffaSKenneth D. Merry static void 1676931eeffaSKenneth D. Merry xn_start_locked(struct ifnet *ifp) 1677931eeffaSKenneth D. Merry { 1678931eeffaSKenneth D. Merry struct netfront_info *sc; 1679931eeffaSKenneth D. Merry struct mbuf *m_head; 1680931eeffaSKenneth D. Merry int notify; 1681931eeffaSKenneth D. Merry 1682931eeffaSKenneth D. Merry sc = ifp->if_softc; 1683931eeffaSKenneth D. Merry 1684931eeffaSKenneth D. Merry if (!netfront_carrier_ok(sc)) 1685931eeffaSKenneth D. Merry return; 1686931eeffaSKenneth D. Merry 1687931eeffaSKenneth D. Merry /* 1688931eeffaSKenneth D. Merry * While we have enough transmit slots available for at least one 1689931eeffaSKenneth D. Merry * maximum-sized packet, pull mbufs off the queue and put them on 1690931eeffaSKenneth D. Merry * the transmit ring. 1691931eeffaSKenneth D. Merry */ 1692931eeffaSKenneth D. Merry while (xn_tx_slot_available(sc)) { 1693931eeffaSKenneth D. Merry IF_DEQUEUE(&ifp->if_snd, m_head); 1694931eeffaSKenneth D. Merry if (m_head == NULL) 1695931eeffaSKenneth D. Merry break; 1696931eeffaSKenneth D. Merry 1697931eeffaSKenneth D. Merry if (xn_assemble_tx_request(sc, m_head) != 0) 1698931eeffaSKenneth D. Merry break; 1699931eeffaSKenneth D. Merry } 1700931eeffaSKenneth D. Merry 170189e0f4d2SKip Macy RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->tx, notify); 170289e0f4d2SKip Macy if (notify) 170389e0f4d2SKip Macy notify_remote_via_irq(sc->irq); 170489e0f4d2SKip Macy 170589e0f4d2SKip Macy if (RING_FULL(&sc->tx)) { 170689e0f4d2SKip Macy sc->tx_full = 1; 170789e0f4d2SKip Macy #if 0 170889e0f4d2SKip Macy netif_stop_queue(dev); 170989e0f4d2SKip Macy #endif 171089e0f4d2SKip Macy } 171189e0f4d2SKip Macy } 171289e0f4d2SKip Macy 1713931eeffaSKenneth D. Merry 171489e0f4d2SKip Macy static void 171589e0f4d2SKip Macy xn_start(struct ifnet *ifp) 171689e0f4d2SKip Macy { 171789e0f4d2SKip Macy struct netfront_info *sc; 171889e0f4d2SKip Macy sc = ifp->if_softc; 171989e0f4d2SKip Macy XN_TX_LOCK(sc); 172089e0f4d2SKip Macy xn_start_locked(ifp); 172189e0f4d2SKip Macy XN_TX_UNLOCK(sc); 172289e0f4d2SKip Macy } 172389e0f4d2SKip Macy 172489e0f4d2SKip Macy /* equivalent of network_open() in Linux */ 172589e0f4d2SKip Macy static void 172689e0f4d2SKip Macy xn_ifinit_locked(struct netfront_info *sc) 172789e0f4d2SKip Macy { 172889e0f4d2SKip Macy struct ifnet *ifp; 172989e0f4d2SKip Macy 173089e0f4d2SKip Macy XN_LOCK_ASSERT(sc); 173189e0f4d2SKip Macy 173289e0f4d2SKip Macy ifp = sc->xn_ifp; 173389e0f4d2SKip Macy 173489e0f4d2SKip Macy if (ifp->if_drv_flags & IFF_DRV_RUNNING) 173589e0f4d2SKip Macy return; 173689e0f4d2SKip Macy 173789e0f4d2SKip Macy xn_stop(sc); 173889e0f4d2SKip Macy 173989e0f4d2SKip Macy network_alloc_rx_buffers(sc); 174089e0f4d2SKip Macy sc->rx.sring->rsp_event = sc->rx.rsp_cons + 1; 174189e0f4d2SKip Macy 174289e0f4d2SKip Macy ifp->if_drv_flags |= IFF_DRV_RUNNING; 174389e0f4d2SKip Macy ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 17440e509842SJustin T. Gibbs if_link_state_change(ifp, LINK_STATE_UP); 174589e0f4d2SKip Macy 174689e0f4d2SKip Macy callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc); 174789e0f4d2SKip Macy 174889e0f4d2SKip Macy } 174989e0f4d2SKip Macy 175089e0f4d2SKip Macy 175189e0f4d2SKip Macy static void 175289e0f4d2SKip Macy xn_ifinit(void *xsc) 175389e0f4d2SKip Macy { 175489e0f4d2SKip Macy struct netfront_info *sc = xsc; 175589e0f4d2SKip Macy 175689e0f4d2SKip Macy XN_LOCK(sc); 175789e0f4d2SKip Macy xn_ifinit_locked(sc); 175889e0f4d2SKip Macy XN_UNLOCK(sc); 175989e0f4d2SKip Macy 176089e0f4d2SKip Macy } 176189e0f4d2SKip Macy 176289e0f4d2SKip Macy 176389e0f4d2SKip Macy static int 176489e0f4d2SKip Macy xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 176589e0f4d2SKip Macy { 176689e0f4d2SKip Macy struct netfront_info *sc = ifp->if_softc; 176789e0f4d2SKip Macy struct ifreq *ifr = (struct ifreq *) data; 1768a0ae8f04SBjoern A. Zeeb #ifdef INET 176989e0f4d2SKip Macy struct ifaddr *ifa = (struct ifaddr *)data; 1770a0ae8f04SBjoern A. Zeeb #endif 177189e0f4d2SKip Macy 177289e0f4d2SKip Macy int mask, error = 0; 177389e0f4d2SKip Macy switch(cmd) { 177489e0f4d2SKip Macy case SIOCSIFADDR: 177589e0f4d2SKip Macy case SIOCGIFADDR: 1776a0ae8f04SBjoern A. Zeeb #ifdef INET 177789e0f4d2SKip Macy XN_LOCK(sc); 177889e0f4d2SKip Macy if (ifa->ifa_addr->sa_family == AF_INET) { 177989e0f4d2SKip Macy ifp->if_flags |= IFF_UP; 178089e0f4d2SKip Macy if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 178189e0f4d2SKip Macy xn_ifinit_locked(sc); 178289e0f4d2SKip Macy arp_ifinit(ifp, ifa); 178389e0f4d2SKip Macy XN_UNLOCK(sc); 178449906218SDoug Rabson } else { 178549906218SDoug Rabson XN_UNLOCK(sc); 1786a0ae8f04SBjoern A. Zeeb #endif 178749906218SDoug Rabson error = ether_ioctl(ifp, cmd, data); 1788a0ae8f04SBjoern A. Zeeb #ifdef INET 178949906218SDoug Rabson } 1790a0ae8f04SBjoern A. Zeeb #endif 179189e0f4d2SKip Macy break; 179289e0f4d2SKip Macy case SIOCSIFMTU: 179389e0f4d2SKip Macy /* XXX can we alter the MTU on a VN ?*/ 179489e0f4d2SKip Macy #ifdef notyet 179589e0f4d2SKip Macy if (ifr->ifr_mtu > XN_JUMBO_MTU) 179689e0f4d2SKip Macy error = EINVAL; 179789e0f4d2SKip Macy else 179889e0f4d2SKip Macy #endif 179989e0f4d2SKip Macy { 180089e0f4d2SKip Macy ifp->if_mtu = ifr->ifr_mtu; 180189e0f4d2SKip Macy ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 180289e0f4d2SKip Macy xn_ifinit(sc); 180389e0f4d2SKip Macy } 180489e0f4d2SKip Macy break; 180589e0f4d2SKip Macy case SIOCSIFFLAGS: 180689e0f4d2SKip Macy XN_LOCK(sc); 180789e0f4d2SKip Macy if (ifp->if_flags & IFF_UP) { 180889e0f4d2SKip Macy /* 180989e0f4d2SKip Macy * If only the state of the PROMISC flag changed, 181089e0f4d2SKip Macy * then just use the 'set promisc mode' command 181189e0f4d2SKip Macy * instead of reinitializing the entire NIC. Doing 181289e0f4d2SKip Macy * a full re-init means reloading the firmware and 181389e0f4d2SKip Macy * waiting for it to start up, which may take a 181489e0f4d2SKip Macy * second or two. 181589e0f4d2SKip Macy */ 181689e0f4d2SKip Macy #ifdef notyet 181789e0f4d2SKip Macy /* No promiscuous mode with Xen */ 181889e0f4d2SKip Macy if (ifp->if_drv_flags & IFF_DRV_RUNNING && 181989e0f4d2SKip Macy ifp->if_flags & IFF_PROMISC && 182089e0f4d2SKip Macy !(sc->xn_if_flags & IFF_PROMISC)) { 182189e0f4d2SKip Macy XN_SETBIT(sc, XN_RX_MODE, 182289e0f4d2SKip Macy XN_RXMODE_RX_PROMISC); 182389e0f4d2SKip Macy } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && 182489e0f4d2SKip Macy !(ifp->if_flags & IFF_PROMISC) && 182589e0f4d2SKip Macy sc->xn_if_flags & IFF_PROMISC) { 182689e0f4d2SKip Macy XN_CLRBIT(sc, XN_RX_MODE, 182789e0f4d2SKip Macy XN_RXMODE_RX_PROMISC); 182889e0f4d2SKip Macy } else 182989e0f4d2SKip Macy #endif 183089e0f4d2SKip Macy xn_ifinit_locked(sc); 183189e0f4d2SKip Macy } else { 183289e0f4d2SKip Macy if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 183389e0f4d2SKip Macy xn_stop(sc); 183489e0f4d2SKip Macy } 183589e0f4d2SKip Macy } 183689e0f4d2SKip Macy sc->xn_if_flags = ifp->if_flags; 183789e0f4d2SKip Macy XN_UNLOCK(sc); 183889e0f4d2SKip Macy error = 0; 183989e0f4d2SKip Macy break; 184089e0f4d2SKip Macy case SIOCSIFCAP: 184189e0f4d2SKip Macy mask = ifr->ifr_reqcap ^ ifp->if_capenable; 184212678024SDoug Rabson if (mask & IFCAP_TXCSUM) { 184312678024SDoug Rabson if (IFCAP_TXCSUM & ifp->if_capenable) { 184412678024SDoug Rabson ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4); 184512678024SDoug Rabson ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP 184612678024SDoug Rabson | CSUM_IP | CSUM_TSO); 184712678024SDoug Rabson } else { 184812678024SDoug Rabson ifp->if_capenable |= IFCAP_TXCSUM; 184912678024SDoug Rabson ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP 185012678024SDoug Rabson | CSUM_IP); 185189e0f4d2SKip Macy } 185212678024SDoug Rabson } 185312678024SDoug Rabson if (mask & IFCAP_RXCSUM) { 185412678024SDoug Rabson ifp->if_capenable ^= IFCAP_RXCSUM; 185512678024SDoug Rabson } 185612678024SDoug Rabson #if __FreeBSD_version >= 700000 185712678024SDoug Rabson if (mask & IFCAP_TSO4) { 185812678024SDoug Rabson if (IFCAP_TSO4 & ifp->if_capenable) { 185912678024SDoug Rabson ifp->if_capenable &= ~IFCAP_TSO4; 186012678024SDoug Rabson ifp->if_hwassist &= ~CSUM_TSO; 186112678024SDoug Rabson } else if (IFCAP_TXCSUM & ifp->if_capenable) { 186212678024SDoug Rabson ifp->if_capenable |= IFCAP_TSO4; 186312678024SDoug Rabson ifp->if_hwassist |= CSUM_TSO; 186412678024SDoug Rabson } else { 18653552092bSAdrian Chadd IPRINTK("Xen requires tx checksum offload" 186612678024SDoug Rabson " be enabled to use TSO\n"); 186712678024SDoug Rabson error = EINVAL; 186812678024SDoug Rabson } 186912678024SDoug Rabson } 187012678024SDoug Rabson if (mask & IFCAP_LRO) { 187112678024SDoug Rabson ifp->if_capenable ^= IFCAP_LRO; 187212678024SDoug Rabson 187312678024SDoug Rabson } 187412678024SDoug Rabson #endif 187589e0f4d2SKip Macy error = 0; 187689e0f4d2SKip Macy break; 187789e0f4d2SKip Macy case SIOCADDMULTI: 187889e0f4d2SKip Macy case SIOCDELMULTI: 187989e0f4d2SKip Macy #ifdef notyet 188089e0f4d2SKip Macy if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 188189e0f4d2SKip Macy XN_LOCK(sc); 188289e0f4d2SKip Macy xn_setmulti(sc); 188389e0f4d2SKip Macy XN_UNLOCK(sc); 188489e0f4d2SKip Macy error = 0; 188589e0f4d2SKip Macy } 188689e0f4d2SKip Macy #endif 188789e0f4d2SKip Macy /* FALLTHROUGH */ 188889e0f4d2SKip Macy case SIOCSIFMEDIA: 188989e0f4d2SKip Macy case SIOCGIFMEDIA: 18900e509842SJustin T. Gibbs error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 189189e0f4d2SKip Macy break; 189289e0f4d2SKip Macy default: 189389e0f4d2SKip Macy error = ether_ioctl(ifp, cmd, data); 189489e0f4d2SKip Macy } 189589e0f4d2SKip Macy 189689e0f4d2SKip Macy return (error); 189789e0f4d2SKip Macy } 189889e0f4d2SKip Macy 189989e0f4d2SKip Macy static void 190089e0f4d2SKip Macy xn_stop(struct netfront_info *sc) 190189e0f4d2SKip Macy { 190289e0f4d2SKip Macy struct ifnet *ifp; 190389e0f4d2SKip Macy 190489e0f4d2SKip Macy XN_LOCK_ASSERT(sc); 190589e0f4d2SKip Macy 190689e0f4d2SKip Macy ifp = sc->xn_ifp; 190789e0f4d2SKip Macy 190889e0f4d2SKip Macy callout_stop(&sc->xn_stat_ch); 190989e0f4d2SKip Macy 191089e0f4d2SKip Macy xn_free_rx_ring(sc); 191189e0f4d2SKip Macy xn_free_tx_ring(sc); 191289e0f4d2SKip Macy 191389e0f4d2SKip Macy ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 19140e509842SJustin T. Gibbs if_link_state_change(ifp, LINK_STATE_DOWN); 191589e0f4d2SKip Macy } 191689e0f4d2SKip Macy 191789e0f4d2SKip Macy /* START of Xenolinux helper functions adapted to FreeBSD */ 191823dc5621SKip Macy int 191923dc5621SKip Macy network_connect(struct netfront_info *np) 192089e0f4d2SKip Macy { 19213a6d1fcfSKip Macy int i, requeue_idx, error; 192289e0f4d2SKip Macy grant_ref_t ref; 192389e0f4d2SKip Macy netif_rx_request_t *req; 192489e0f4d2SKip Macy u_int feature_rx_copy, feature_rx_flip; 192589e0f4d2SKip Macy 1926ff662b5cSJustin T. Gibbs error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), 19273a6d1fcfSKip Macy "feature-rx-copy", NULL, "%u", &feature_rx_copy); 19283a6d1fcfSKip Macy if (error) 192989e0f4d2SKip Macy feature_rx_copy = 0; 1930ff662b5cSJustin T. Gibbs error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), 19313a6d1fcfSKip Macy "feature-rx-flip", NULL, "%u", &feature_rx_flip); 19323a6d1fcfSKip Macy if (error) 193389e0f4d2SKip Macy feature_rx_flip = 1; 193489e0f4d2SKip Macy 193589e0f4d2SKip Macy /* 193689e0f4d2SKip Macy * Copy packets on receive path if: 193789e0f4d2SKip Macy * (a) This was requested by user, and the backend supports it; or 193889e0f4d2SKip Macy * (b) Flipping was requested, but this is unsupported by the backend. 193989e0f4d2SKip Macy */ 194089e0f4d2SKip Macy np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) || 194189e0f4d2SKip Macy (MODPARM_rx_flip && !feature_rx_flip)); 194289e0f4d2SKip Macy 194389e0f4d2SKip Macy /* Recovery procedure: */ 19443a6d1fcfSKip Macy error = talk_to_backend(np->xbdev, np); 19453a6d1fcfSKip Macy if (error) 19463a6d1fcfSKip Macy return (error); 194789e0f4d2SKip Macy 194889e0f4d2SKip Macy /* Step 1: Reinitialise variables. */ 1949*578e4bf7SJustin T. Gibbs xn_query_features(np); 1950*578e4bf7SJustin T. Gibbs xn_configure_features(np); 195189e0f4d2SKip Macy netif_release_tx_bufs(np); 195289e0f4d2SKip Macy 195389e0f4d2SKip Macy /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ 195489e0f4d2SKip Macy for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { 195589e0f4d2SKip Macy struct mbuf *m; 19563a6d1fcfSKip Macy u_long pfn; 195789e0f4d2SKip Macy 195889e0f4d2SKip Macy if (np->rx_mbufs[i] == NULL) 195989e0f4d2SKip Macy continue; 196089e0f4d2SKip Macy 196189e0f4d2SKip Macy m = np->rx_mbufs[requeue_idx] = xennet_get_rx_mbuf(np, i); 196289e0f4d2SKip Macy ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); 1963931eeffaSKenneth D. Merry 196489e0f4d2SKip Macy req = RING_GET_REQUEST(&np->rx, requeue_idx); 19653a6d1fcfSKip Macy pfn = vtophys(mtod(m, vm_offset_t)) >> PAGE_SHIFT; 196689e0f4d2SKip Macy 196789e0f4d2SKip Macy if (!np->copying_receiver) { 196889e0f4d2SKip Macy gnttab_grant_foreign_transfer_ref(ref, 196923dc5621SKip Macy xenbus_get_otherend_id(np->xbdev), 19703a6d1fcfSKip Macy pfn); 197189e0f4d2SKip Macy } else { 197289e0f4d2SKip Macy gnttab_grant_foreign_access_ref(ref, 197323dc5621SKip Macy xenbus_get_otherend_id(np->xbdev), 19743a6d1fcfSKip Macy PFNTOMFN(pfn), 0); 197589e0f4d2SKip Macy } 197689e0f4d2SKip Macy req->gref = ref; 197789e0f4d2SKip Macy req->id = requeue_idx; 197889e0f4d2SKip Macy 197989e0f4d2SKip Macy requeue_idx++; 198089e0f4d2SKip Macy } 198189e0f4d2SKip Macy 198289e0f4d2SKip Macy np->rx.req_prod_pvt = requeue_idx; 198389e0f4d2SKip Macy 198489e0f4d2SKip Macy /* Step 3: All public and private state should now be sane. Get 198589e0f4d2SKip Macy * ready to start sending and receiving packets and give the driver 198689e0f4d2SKip Macy * domain a kick because we've probably just requeued some 198789e0f4d2SKip Macy * packets. 198889e0f4d2SKip Macy */ 198989e0f4d2SKip Macy netfront_carrier_on(np); 199089e0f4d2SKip Macy notify_remote_via_irq(np->irq); 199189e0f4d2SKip Macy XN_TX_LOCK(np); 199289e0f4d2SKip Macy xn_txeof(np); 199389e0f4d2SKip Macy XN_TX_UNLOCK(np); 199489e0f4d2SKip Macy network_alloc_rx_buffers(np); 199589e0f4d2SKip Macy 199689e0f4d2SKip Macy return (0); 199789e0f4d2SKip Macy } 199889e0f4d2SKip Macy 199989e0f4d2SKip Macy static void 200089e0f4d2SKip Macy show_device(struct netfront_info *sc) 200189e0f4d2SKip Macy { 200289e0f4d2SKip Macy #ifdef DEBUG 200389e0f4d2SKip Macy if (sc) { 200489e0f4d2SKip Macy IPRINTK("<vif handle=%u %s(%s) evtchn=%u irq=%u tx=%p rx=%p>\n", 200589e0f4d2SKip Macy sc->xn_ifno, 200689e0f4d2SKip Macy be_state_name[sc->xn_backend_state], 200789e0f4d2SKip Macy sc->xn_user_state ? "open" : "closed", 200889e0f4d2SKip Macy sc->xn_evtchn, 200989e0f4d2SKip Macy sc->xn_irq, 201089e0f4d2SKip Macy sc->xn_tx_if, 201189e0f4d2SKip Macy sc->xn_rx_if); 201289e0f4d2SKip Macy } else { 201389e0f4d2SKip Macy IPRINTK("<vif NULL>\n"); 201489e0f4d2SKip Macy } 201589e0f4d2SKip Macy #endif 201689e0f4d2SKip Macy } 201789e0f4d2SKip Macy 2018*578e4bf7SJustin T. Gibbs static void 2019*578e4bf7SJustin T. Gibbs xn_query_features(struct netfront_info *np) 2020*578e4bf7SJustin T. Gibbs { 2021*578e4bf7SJustin T. Gibbs int val; 2022*578e4bf7SJustin T. Gibbs 2023*578e4bf7SJustin T. Gibbs device_printf(np->xbdev, "backend features:"); 2024*578e4bf7SJustin T. Gibbs 2025*578e4bf7SJustin T. Gibbs if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), 2026*578e4bf7SJustin T. Gibbs "feature-sg", NULL, "%d", &val) < 0) 2027*578e4bf7SJustin T. Gibbs val = 0; 2028*578e4bf7SJustin T. Gibbs 2029*578e4bf7SJustin T. Gibbs np->maxfrags = 1; 2030*578e4bf7SJustin T. Gibbs if (val) { 2031*578e4bf7SJustin T. Gibbs np->maxfrags = MAX_TX_REQ_FRAGS; 2032*578e4bf7SJustin T. Gibbs printf(" feature-sg"); 2033*578e4bf7SJustin T. Gibbs } 2034*578e4bf7SJustin T. Gibbs 2035*578e4bf7SJustin T. Gibbs if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), 2036*578e4bf7SJustin T. Gibbs "feature-gso-tcpv4", NULL, "%d", &val) < 0) 2037*578e4bf7SJustin T. Gibbs val = 0; 2038*578e4bf7SJustin T. Gibbs 2039*578e4bf7SJustin T. Gibbs np->xn_ifp->if_capabilities &= ~(IFCAP_TSO4|IFCAP_LRO); 2040*578e4bf7SJustin T. Gibbs if (val) { 2041*578e4bf7SJustin T. Gibbs np->xn_ifp->if_capabilities |= IFCAP_TSO4|IFCAP_LRO; 2042*578e4bf7SJustin T. Gibbs printf(" feature-gso-tcp4"); 2043*578e4bf7SJustin T. Gibbs } 2044*578e4bf7SJustin T. Gibbs 2045*578e4bf7SJustin T. Gibbs printf("\n"); 2046*578e4bf7SJustin T. Gibbs } 2047*578e4bf7SJustin T. Gibbs 2048cf9c09e1SJustin T. Gibbs static int 2049*578e4bf7SJustin T. Gibbs xn_configure_features(struct netfront_info *np) 2050cf9c09e1SJustin T. Gibbs { 2051cf9c09e1SJustin T. Gibbs int err; 2052cf9c09e1SJustin T. Gibbs 2053cf9c09e1SJustin T. Gibbs err = 0; 2054cf9c09e1SJustin T. Gibbs #if __FreeBSD_version >= 700000 2055*578e4bf7SJustin T. Gibbs if ((np->xn_ifp->if_capenable & IFCAP_LRO) != 0) 2056cf9c09e1SJustin T. Gibbs tcp_lro_free(&np->xn_lro); 2057*578e4bf7SJustin T. Gibbs #endif 2058*578e4bf7SJustin T. Gibbs np->xn_ifp->if_capenable = 2059*578e4bf7SJustin T. Gibbs np->xn_ifp->if_capabilities & ~(IFCAP_LRO|IFCAP_TSO4); 2060*578e4bf7SJustin T. Gibbs np->xn_ifp->if_hwassist &= ~CSUM_TSO; 2061*578e4bf7SJustin T. Gibbs #if __FreeBSD_version >= 700000 2062*578e4bf7SJustin T. Gibbs if (xn_enable_lro && (np->xn_ifp->if_capabilities & IFCAP_LRO) != 0) { 2063cf9c09e1SJustin T. Gibbs err = tcp_lro_init(&np->xn_lro); 2064cf9c09e1SJustin T. Gibbs if (err) { 2065cf9c09e1SJustin T. Gibbs device_printf(np->xbdev, "LRO initialization failed\n"); 2066cf9c09e1SJustin T. Gibbs } else { 2067cf9c09e1SJustin T. Gibbs np->xn_lro.ifp = np->xn_ifp; 2068*578e4bf7SJustin T. Gibbs np->xn_ifp->if_capenable |= IFCAP_LRO; 2069cf9c09e1SJustin T. Gibbs } 2070cf9c09e1SJustin T. Gibbs } 2071*578e4bf7SJustin T. Gibbs if ((np->xn_ifp->if_capabilities & IFCAP_TSO4) != 0) { 2072*578e4bf7SJustin T. Gibbs np->xn_ifp->if_capenable |= IFCAP_TSO4; 2073*578e4bf7SJustin T. Gibbs np->xn_ifp->if_hwassist |= CSUM_TSO; 2074*578e4bf7SJustin T. Gibbs } 2075cf9c09e1SJustin T. Gibbs #endif 2076cf9c09e1SJustin T. Gibbs return (err); 2077cf9c09e1SJustin T. Gibbs } 2078cf9c09e1SJustin T. Gibbs 207989e0f4d2SKip Macy /** Create a network device. 208089e0f4d2SKip Macy * @param handle device handle 208189e0f4d2SKip Macy */ 208223dc5621SKip Macy int 208323dc5621SKip Macy create_netdev(device_t dev) 208489e0f4d2SKip Macy { 208589e0f4d2SKip Macy int i; 208689e0f4d2SKip Macy struct netfront_info *np; 208789e0f4d2SKip Macy int err; 208889e0f4d2SKip Macy struct ifnet *ifp; 208989e0f4d2SKip Macy 209023dc5621SKip Macy np = device_get_softc(dev); 209189e0f4d2SKip Macy 209289e0f4d2SKip Macy np->xbdev = dev; 209389e0f4d2SKip Macy 209489e0f4d2SKip Macy XN_LOCK_INIT(np, xennetif); 20950e509842SJustin T. Gibbs 20960e509842SJustin T. Gibbs ifmedia_init(&np->sc_media, 0, xn_ifmedia_upd, xn_ifmedia_sts); 20970e509842SJustin T. Gibbs ifmedia_add(&np->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 20980e509842SJustin T. Gibbs ifmedia_set(&np->sc_media, IFM_ETHER|IFM_MANUAL); 20990e509842SJustin T. Gibbs 210089e0f4d2SKip Macy np->rx_target = RX_MIN_TARGET; 210189e0f4d2SKip Macy np->rx_min_target = RX_MIN_TARGET; 210289e0f4d2SKip Macy np->rx_max_target = RX_MAX_TARGET; 210389e0f4d2SKip Macy 210489e0f4d2SKip Macy /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */ 210589e0f4d2SKip Macy for (i = 0; i <= NET_TX_RING_SIZE; i++) { 210689e0f4d2SKip Macy np->tx_mbufs[i] = (void *) ((u_long) i+1); 2107ff662b5cSJustin T. Gibbs np->grant_tx_ref[i] = GRANT_REF_INVALID; 210889e0f4d2SKip Macy } 2109931eeffaSKenneth D. Merry np->tx_mbufs[NET_TX_RING_SIZE] = (void *)0; 2110931eeffaSKenneth D. Merry 211189e0f4d2SKip Macy for (i = 0; i <= NET_RX_RING_SIZE; i++) { 2112931eeffaSKenneth D. Merry 211389e0f4d2SKip Macy np->rx_mbufs[i] = NULL; 2114ff662b5cSJustin T. Gibbs np->grant_rx_ref[i] = GRANT_REF_INVALID; 211589e0f4d2SKip Macy } 211689e0f4d2SKip Macy /* A grant for every tx ring slot */ 2117931eeffaSKenneth D. Merry if (gnttab_alloc_grant_references(NET_TX_RING_SIZE, 2118931eeffaSKenneth D. Merry &np->gref_tx_head) != 0) { 2119227ca257SKip Macy IPRINTK("#### netfront can't alloc tx grant refs\n"); 212089e0f4d2SKip Macy err = ENOMEM; 212189e0f4d2SKip Macy goto exit; 212289e0f4d2SKip Macy } 212389e0f4d2SKip Macy /* A grant for every rx ring slot */ 212489e0f4d2SKip Macy if (gnttab_alloc_grant_references(RX_MAX_TARGET, 2125931eeffaSKenneth D. Merry &np->gref_rx_head) != 0) { 2126227ca257SKip Macy WPRINTK("#### netfront can't alloc rx grant refs\n"); 212789e0f4d2SKip Macy gnttab_free_grant_references(np->gref_tx_head); 212889e0f4d2SKip Macy err = ENOMEM; 212989e0f4d2SKip Macy goto exit; 213089e0f4d2SKip Macy } 213189e0f4d2SKip Macy 213289e0f4d2SKip Macy err = xen_net_read_mac(dev, np->mac); 2133ffa06904SJustin T. Gibbs if (err) 213489e0f4d2SKip Macy goto out; 213589e0f4d2SKip Macy 213689e0f4d2SKip Macy /* Set up ifnet structure */ 213723dc5621SKip Macy ifp = np->xn_ifp = if_alloc(IFT_ETHER); 213889e0f4d2SKip Macy ifp->if_softc = np; 213923dc5621SKip Macy if_initname(ifp, "xn", device_get_unit(dev)); 21403a6d1fcfSKip Macy ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 214189e0f4d2SKip Macy ifp->if_ioctl = xn_ioctl; 214289e0f4d2SKip Macy ifp->if_output = ether_output; 214389e0f4d2SKip Macy ifp->if_start = xn_start; 2144227ca257SKip Macy #ifdef notyet 2145227ca257SKip Macy ifp->if_watchdog = xn_watchdog; 2146227ca257SKip Macy #endif 214789e0f4d2SKip Macy ifp->if_init = xn_ifinit; 214889e0f4d2SKip Macy ifp->if_mtu = ETHERMTU; 214989e0f4d2SKip Macy ifp->if_snd.ifq_maxlen = NET_TX_RING_SIZE - 1; 215089e0f4d2SKip Macy 215189e0f4d2SKip Macy ifp->if_hwassist = XN_CSUM_FEATURES; 215289e0f4d2SKip Macy ifp->if_capabilities = IFCAP_HWCSUM; 215389e0f4d2SKip Macy 215489e0f4d2SKip Macy ether_ifattach(ifp, np->mac); 215589e0f4d2SKip Macy callout_init(&np->xn_stat_ch, CALLOUT_MPSAFE); 215689e0f4d2SKip Macy netfront_carrier_off(np); 215789e0f4d2SKip Macy 215889e0f4d2SKip Macy return (0); 215989e0f4d2SKip Macy 216089e0f4d2SKip Macy exit: 216189e0f4d2SKip Macy gnttab_free_grant_references(np->gref_tx_head); 216289e0f4d2SKip Macy out: 2163ffa06904SJustin T. Gibbs return (err); 216489e0f4d2SKip Macy } 216589e0f4d2SKip Macy 216689e0f4d2SKip Macy /** 216789e0f4d2SKip Macy * Handle the change of state of the backend to Closing. We must delete our 216889e0f4d2SKip Macy * device-layer structures now, to ensure that writes are flushed through to 216989e0f4d2SKip Macy * the backend. Once is this done, we can switch to Closed in 217089e0f4d2SKip Macy * acknowledgement. 217189e0f4d2SKip Macy */ 217289e0f4d2SKip Macy #if 0 21730e509842SJustin T. Gibbs static void 21740e509842SJustin T. Gibbs netfront_closing(device_t dev) 217589e0f4d2SKip Macy { 217689e0f4d2SKip Macy #if 0 217789e0f4d2SKip Macy struct netfront_info *info = dev->dev_driver_data; 217889e0f4d2SKip Macy 217989e0f4d2SKip Macy DPRINTK("netfront_closing: %s removed\n", dev->nodename); 218089e0f4d2SKip Macy 218189e0f4d2SKip Macy close_netdev(info); 218289e0f4d2SKip Macy #endif 218389e0f4d2SKip Macy xenbus_switch_state(dev, XenbusStateClosed); 218489e0f4d2SKip Macy } 218589e0f4d2SKip Macy #endif 218689e0f4d2SKip Macy 21870e509842SJustin T. Gibbs static int 21880e509842SJustin T. Gibbs netfront_detach(device_t dev) 218989e0f4d2SKip Macy { 219023dc5621SKip Macy struct netfront_info *info = device_get_softc(dev); 219189e0f4d2SKip Macy 219223dc5621SKip Macy DPRINTK("%s\n", xenbus_get_node(dev)); 219389e0f4d2SKip Macy 219489e0f4d2SKip Macy netif_free(info); 219589e0f4d2SKip Macy 219689e0f4d2SKip Macy return 0; 219789e0f4d2SKip Macy } 219889e0f4d2SKip Macy 21990e509842SJustin T. Gibbs static void 22000e509842SJustin T. Gibbs netif_free(struct netfront_info *info) 220189e0f4d2SKip Macy { 220289e0f4d2SKip Macy netif_disconnect_backend(info); 220389e0f4d2SKip Macy #if 0 220489e0f4d2SKip Macy close_netdev(info); 220589e0f4d2SKip Macy #endif 220689e0f4d2SKip Macy } 220789e0f4d2SKip Macy 22080e509842SJustin T. Gibbs static void 22090e509842SJustin T. Gibbs netif_disconnect_backend(struct netfront_info *info) 221089e0f4d2SKip Macy { 22113a6d1fcfSKip Macy XN_RX_LOCK(info); 22123a6d1fcfSKip Macy XN_TX_LOCK(info); 22133a6d1fcfSKip Macy netfront_carrier_off(info); 22143a6d1fcfSKip Macy XN_TX_UNLOCK(info); 22153a6d1fcfSKip Macy XN_RX_UNLOCK(info); 22163a6d1fcfSKip Macy 2217cf9c09e1SJustin T. Gibbs free_ring(&info->tx_ring_ref, &info->tx.sring); 2218cf9c09e1SJustin T. Gibbs free_ring(&info->rx_ring_ref, &info->rx.sring); 221989e0f4d2SKip Macy 222089e0f4d2SKip Macy if (info->irq) 22213a6d1fcfSKip Macy unbind_from_irqhandler(info->irq); 22223a6d1fcfSKip Macy 222389e0f4d2SKip Macy info->irq = 0; 222489e0f4d2SKip Macy } 222589e0f4d2SKip Macy 22260e509842SJustin T. Gibbs static void 2227cf9c09e1SJustin T. Gibbs free_ring(int *ref, void *ring_ptr_ref) 222889e0f4d2SKip Macy { 2229cf9c09e1SJustin T. Gibbs void **ring_ptr_ptr = ring_ptr_ref; 2230cf9c09e1SJustin T. Gibbs 2231cf9c09e1SJustin T. Gibbs if (*ref != GRANT_REF_INVALID) { 2232cf9c09e1SJustin T. Gibbs /* This API frees the associated storage. */ 2233cf9c09e1SJustin T. Gibbs gnttab_end_foreign_access(*ref, *ring_ptr_ptr); 2234cf9c09e1SJustin T. Gibbs *ref = GRANT_REF_INVALID; 2235cf9c09e1SJustin T. Gibbs } 2236cf9c09e1SJustin T. Gibbs *ring_ptr_ptr = NULL; 223789e0f4d2SKip Macy } 223889e0f4d2SKip Macy 22390e509842SJustin T. Gibbs static int 22400e509842SJustin T. Gibbs xn_ifmedia_upd(struct ifnet *ifp) 22410e509842SJustin T. Gibbs { 22420e509842SJustin T. Gibbs return (0); 22430e509842SJustin T. Gibbs } 22440e509842SJustin T. Gibbs 22450e509842SJustin T. Gibbs static void 22460e509842SJustin T. Gibbs xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 22470e509842SJustin T. Gibbs { 22480e509842SJustin T. Gibbs ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE; 22490e509842SJustin T. Gibbs ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; 22500e509842SJustin T. Gibbs } 22510e509842SJustin T. Gibbs 225289e0f4d2SKip Macy /* ** Driver registration ** */ 225323dc5621SKip Macy static device_method_t netfront_methods[] = { 225423dc5621SKip Macy /* Device interface */ 225523dc5621SKip Macy DEVMETHOD(device_probe, netfront_probe), 225623dc5621SKip Macy DEVMETHOD(device_attach, netfront_attach), 225723dc5621SKip Macy DEVMETHOD(device_detach, netfront_detach), 225823dc5621SKip Macy DEVMETHOD(device_shutdown, bus_generic_shutdown), 2259cf9c09e1SJustin T. Gibbs DEVMETHOD(device_suspend, netfront_suspend), 226023dc5621SKip Macy DEVMETHOD(device_resume, netfront_resume), 226189e0f4d2SKip Macy 226223dc5621SKip Macy /* Xenbus interface */ 2263ff662b5cSJustin T. Gibbs DEVMETHOD(xenbus_otherend_changed, netfront_backend_changed), 226489e0f4d2SKip Macy 226523dc5621SKip Macy { 0, 0 } 226689e0f4d2SKip Macy }; 226789e0f4d2SKip Macy 226823dc5621SKip Macy static driver_t netfront_driver = { 226923dc5621SKip Macy "xn", 227023dc5621SKip Macy netfront_methods, 227123dc5621SKip Macy sizeof(struct netfront_info), 227289e0f4d2SKip Macy }; 227323dc5621SKip Macy devclass_t netfront_devclass; 227489e0f4d2SKip Macy 2275ff662b5cSJustin T. Gibbs DRIVER_MODULE(xe, xenbusb_front, netfront_driver, netfront_devclass, 0, 0); 2276