1b6e66be2SVincenzo Maffione /* 2b6e66be2SVincenzo Maffione * Copyright (C) 2016-2018 Vincenzo Maffione 3b6e66be2SVincenzo Maffione * Copyright (C) 2015 Stefano Garzarella 4b6e66be2SVincenzo Maffione * All rights reserved. 5b6e66be2SVincenzo Maffione * 6b6e66be2SVincenzo Maffione * Redistribution and use in source and binary forms, with or without 7b6e66be2SVincenzo Maffione * modification, are permitted provided that the following conditions 8b6e66be2SVincenzo Maffione * are met: 9b6e66be2SVincenzo Maffione * 1. Redistributions of source code must retain the above copyright 10b6e66be2SVincenzo Maffione * notice, this list of conditions and the following disclaimer. 11b6e66be2SVincenzo Maffione * 2. Redistributions in binary form must reproduce the above copyright 12b6e66be2SVincenzo Maffione * notice, this list of conditions and the following disclaimer in the 13b6e66be2SVincenzo Maffione * documentation and/or other materials provided with the distribution. 14b6e66be2SVincenzo Maffione * 15b6e66be2SVincenzo Maffione * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16b6e66be2SVincenzo Maffione * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17b6e66be2SVincenzo Maffione * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18b6e66be2SVincenzo Maffione * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19b6e66be2SVincenzo Maffione * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20b6e66be2SVincenzo Maffione * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21b6e66be2SVincenzo Maffione * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22b6e66be2SVincenzo Maffione * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23b6e66be2SVincenzo Maffione * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24b6e66be2SVincenzo Maffione * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25b6e66be2SVincenzo Maffione * SUCH DAMAGE. 26b6e66be2SVincenzo Maffione * 27b6e66be2SVincenzo Maffione * $FreeBSD$ 28b6e66be2SVincenzo Maffione */ 29b6e66be2SVincenzo Maffione 30b6e66be2SVincenzo Maffione /* 31b6e66be2SVincenzo Maffione * common headers 32b6e66be2SVincenzo Maffione */ 33b6e66be2SVincenzo Maffione #if defined(__FreeBSD__) 34b6e66be2SVincenzo Maffione #include <sys/cdefs.h> 35b6e66be2SVincenzo Maffione #include <sys/param.h> 36b6e66be2SVincenzo Maffione #include <sys/kernel.h> 37b6e66be2SVincenzo Maffione #include <sys/types.h> 38b6e66be2SVincenzo Maffione #include <sys/selinfo.h> 39b6e66be2SVincenzo Maffione #include <sys/socket.h> 40b6e66be2SVincenzo Maffione #include <net/if.h> 41b6e66be2SVincenzo Maffione #include <net/if_var.h> 42b6e66be2SVincenzo Maffione #include <machine/bus.h> 43b6e66be2SVincenzo Maffione 44b6e66be2SVincenzo Maffione #define usleep_range(_1, _2) \ 45b6e66be2SVincenzo Maffione pause_sbt("sync-kloop-sleep", SBT_1US * _1, SBT_1US * 1, C_ABSOLUTE) 46b6e66be2SVincenzo Maffione 47b6e66be2SVincenzo Maffione #elif defined(linux) 48b6e66be2SVincenzo Maffione #include <bsd_glue.h> 49b6e66be2SVincenzo Maffione #include <linux/file.h> 50b6e66be2SVincenzo Maffione #include <linux/eventfd.h> 51b6e66be2SVincenzo Maffione #endif 52b6e66be2SVincenzo Maffione 53b6e66be2SVincenzo Maffione #include <net/netmap.h> 54b6e66be2SVincenzo Maffione #include <dev/netmap/netmap_kern.h> 55b6e66be2SVincenzo Maffione #include <net/netmap_virt.h> 56b6e66be2SVincenzo Maffione #include <dev/netmap/netmap_mem2.h> 57b6e66be2SVincenzo Maffione 58b6e66be2SVincenzo Maffione /* Support for eventfd-based notifications. */ 59b6e66be2SVincenzo Maffione #if defined(linux) 60b6e66be2SVincenzo Maffione #define SYNC_KLOOP_POLL 61b6e66be2SVincenzo Maffione #endif 62b6e66be2SVincenzo Maffione 63b6e66be2SVincenzo Maffione /* Write kring pointers (hwcur, hwtail) to the CSB. 64b6e66be2SVincenzo Maffione * This routine is coupled with ptnetmap_guest_read_kring_csb(). */ 65b6e66be2SVincenzo Maffione static inline void 66b6e66be2SVincenzo Maffione sync_kloop_kernel_write(struct nm_csb_ktoa __user *ptr, uint32_t hwcur, 67b6e66be2SVincenzo Maffione uint32_t hwtail) 68b6e66be2SVincenzo Maffione { 69f79ba6d7SVincenzo Maffione /* Issue a first store-store barrier to make sure writes to the 70f79ba6d7SVincenzo Maffione * netmap ring do not overcome updates on ktoa->hwcur and ktoa->hwtail. */ 71f79ba6d7SVincenzo Maffione nm_stst_barrier(); 72f79ba6d7SVincenzo Maffione 73b6e66be2SVincenzo Maffione /* 74f79ba6d7SVincenzo Maffione * The same scheme used in nm_sync_kloop_appl_write() applies here. 75b6e66be2SVincenzo Maffione * We allow the application to read a value of hwcur more recent than the value 76b6e66be2SVincenzo Maffione * of hwtail, since this would anyway result in a consistent view of the 77b6e66be2SVincenzo Maffione * ring state (and hwcur can never wraparound hwtail, since hwcur must be 78b6e66be2SVincenzo Maffione * behind head). 79b6e66be2SVincenzo Maffione * 80b6e66be2SVincenzo Maffione * The following memory barrier scheme is used to make this happen: 81b6e66be2SVincenzo Maffione * 82b6e66be2SVincenzo Maffione * Application Kernel 83b6e66be2SVincenzo Maffione * 84b6e66be2SVincenzo Maffione * STORE(hwcur) LOAD(hwtail) 85f79ba6d7SVincenzo Maffione * wmb() <-------------> rmb() 86b6e66be2SVincenzo Maffione * STORE(hwtail) LOAD(hwcur) 87b6e66be2SVincenzo Maffione */ 88b6e66be2SVincenzo Maffione CSB_WRITE(ptr, hwcur, hwcur); 89b6e66be2SVincenzo Maffione nm_stst_barrier(); 90b6e66be2SVincenzo Maffione CSB_WRITE(ptr, hwtail, hwtail); 91b6e66be2SVincenzo Maffione } 92b6e66be2SVincenzo Maffione 93b6e66be2SVincenzo Maffione /* Read kring pointers (head, cur, sync_flags) from the CSB. 94b6e66be2SVincenzo Maffione * This routine is coupled with ptnetmap_guest_write_kring_csb(). */ 95b6e66be2SVincenzo Maffione static inline void 96b6e66be2SVincenzo Maffione sync_kloop_kernel_read(struct nm_csb_atok __user *ptr, 97b6e66be2SVincenzo Maffione struct netmap_ring *shadow_ring, 98b6e66be2SVincenzo Maffione uint32_t num_slots) 99b6e66be2SVincenzo Maffione { 100b6e66be2SVincenzo Maffione /* 101b6e66be2SVincenzo Maffione * We place a memory barrier to make sure that the update of head never 102b6e66be2SVincenzo Maffione * overtakes the update of cur. 103f79ba6d7SVincenzo Maffione * (see explanation in sync_kloop_kernel_write). 104b6e66be2SVincenzo Maffione */ 105b6e66be2SVincenzo Maffione CSB_READ(ptr, head, shadow_ring->head); 106f79ba6d7SVincenzo Maffione nm_ldld_barrier(); 107b6e66be2SVincenzo Maffione CSB_READ(ptr, cur, shadow_ring->cur); 108b6e66be2SVincenzo Maffione CSB_READ(ptr, sync_flags, shadow_ring->flags); 109f79ba6d7SVincenzo Maffione 110f79ba6d7SVincenzo Maffione /* Make sure that loads from atok->head and atok->cur are not delayed 111f79ba6d7SVincenzo Maffione * after the loads from the netmap ring. */ 112f79ba6d7SVincenzo Maffione nm_ldld_barrier(); 113b6e66be2SVincenzo Maffione } 114b6e66be2SVincenzo Maffione 115b6e66be2SVincenzo Maffione /* Enable or disable application --> kernel kicks. */ 116b6e66be2SVincenzo Maffione static inline void 117b6e66be2SVincenzo Maffione csb_ktoa_kick_enable(struct nm_csb_ktoa __user *csb_ktoa, uint32_t val) 118b6e66be2SVincenzo Maffione { 119b6e66be2SVincenzo Maffione CSB_WRITE(csb_ktoa, kern_need_kick, val); 120b6e66be2SVincenzo Maffione } 121b6e66be2SVincenzo Maffione 122dde885deSVincenzo Maffione #ifdef SYNC_KLOOP_POLL 123b6e66be2SVincenzo Maffione /* Are application interrupt enabled or disabled? */ 124b6e66be2SVincenzo Maffione static inline uint32_t 125b6e66be2SVincenzo Maffione csb_atok_intr_enabled(struct nm_csb_atok __user *csb_atok) 126b6e66be2SVincenzo Maffione { 127b6e66be2SVincenzo Maffione uint32_t v; 128b6e66be2SVincenzo Maffione 129b6e66be2SVincenzo Maffione CSB_READ(csb_atok, appl_need_kick, v); 130b6e66be2SVincenzo Maffione 131b6e66be2SVincenzo Maffione return v; 132b6e66be2SVincenzo Maffione } 133dde885deSVincenzo Maffione #endif /* SYNC_KLOOP_POLL */ 134b6e66be2SVincenzo Maffione 135b6e66be2SVincenzo Maffione static inline void 136b6e66be2SVincenzo Maffione sync_kloop_kring_dump(const char *title, const struct netmap_kring *kring) 137b6e66be2SVincenzo Maffione { 138f79ba6d7SVincenzo Maffione nm_prinf("%s, kring %s, hwcur %d, rhead %d, " 139f79ba6d7SVincenzo Maffione "rcur %d, rtail %d, hwtail %d", 140f79ba6d7SVincenzo Maffione title, kring->name, kring->nr_hwcur, kring->rhead, 141f79ba6d7SVincenzo Maffione kring->rcur, kring->rtail, kring->nr_hwtail); 142b6e66be2SVincenzo Maffione } 143b6e66be2SVincenzo Maffione 1445faab778SVincenzo Maffione /* Arguments for netmap_sync_kloop_tx_ring() and 1455faab778SVincenzo Maffione * netmap_sync_kloop_rx_ring(). 1465faab778SVincenzo Maffione */ 147b6e66be2SVincenzo Maffione struct sync_kloop_ring_args { 148b6e66be2SVincenzo Maffione struct netmap_kring *kring; 149b6e66be2SVincenzo Maffione struct nm_csb_atok *csb_atok; 150b6e66be2SVincenzo Maffione struct nm_csb_ktoa *csb_ktoa; 151b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL 152b6e66be2SVincenzo Maffione struct eventfd_ctx *irq_ctx; 153b6e66be2SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */ 1545faab778SVincenzo Maffione /* Are we busy waiting rather than using a schedule() loop ? */ 1555faab778SVincenzo Maffione bool busy_wait; 1565faab778SVincenzo Maffione /* Are we processing in the context of VM exit ? */ 1575faab778SVincenzo Maffione bool direct; 158b6e66be2SVincenzo Maffione }; 159b6e66be2SVincenzo Maffione 160b6e66be2SVincenzo Maffione static void 161b6e66be2SVincenzo Maffione netmap_sync_kloop_tx_ring(const struct sync_kloop_ring_args *a) 162b6e66be2SVincenzo Maffione { 163b6e66be2SVincenzo Maffione struct netmap_kring *kring = a->kring; 164b6e66be2SVincenzo Maffione struct nm_csb_atok *csb_atok = a->csb_atok; 165b6e66be2SVincenzo Maffione struct nm_csb_ktoa *csb_ktoa = a->csb_ktoa; 166b6e66be2SVincenzo Maffione struct netmap_ring shadow_ring; /* shadow copy of the netmap_ring */ 1676d2a46f0SJohn Baldwin #ifdef SYNC_KLOOP_POLL 168b6e66be2SVincenzo Maffione bool more_txspace = false; 1696d2a46f0SJohn Baldwin #endif /* SYNC_KLOOP_POLL */ 170b6e66be2SVincenzo Maffione uint32_t num_slots; 171b6e66be2SVincenzo Maffione int batch; 172b6e66be2SVincenzo Maffione 1735faab778SVincenzo Maffione if (unlikely(nm_kr_tryget(kring, 1, NULL))) { 1745faab778SVincenzo Maffione return; 1755faab778SVincenzo Maffione } 1765faab778SVincenzo Maffione 177b6e66be2SVincenzo Maffione num_slots = kring->nkr_num_slots; 178b6e66be2SVincenzo Maffione 179b6e66be2SVincenzo Maffione /* Disable application --> kernel notifications. */ 1805faab778SVincenzo Maffione if (!a->direct) { 181b6e66be2SVincenzo Maffione csb_ktoa_kick_enable(csb_ktoa, 0); 1825faab778SVincenzo Maffione } 183b6e66be2SVincenzo Maffione /* Copy the application kring pointers from the CSB */ 184b6e66be2SVincenzo Maffione sync_kloop_kernel_read(csb_atok, &shadow_ring, num_slots); 185b6e66be2SVincenzo Maffione 186b6e66be2SVincenzo Maffione for (;;) { 187b6e66be2SVincenzo Maffione batch = shadow_ring.head - kring->nr_hwcur; 188b6e66be2SVincenzo Maffione if (batch < 0) 189b6e66be2SVincenzo Maffione batch += num_slots; 190b6e66be2SVincenzo Maffione 191b6e66be2SVincenzo Maffione #ifdef PTN_TX_BATCH_LIM 192b6e66be2SVincenzo Maffione if (batch > PTN_TX_BATCH_LIM(num_slots)) { 193b6e66be2SVincenzo Maffione /* If application moves ahead too fast, let's cut the move so 194b6e66be2SVincenzo Maffione * that we don't exceed our batch limit. */ 195b6e66be2SVincenzo Maffione uint32_t head_lim = kring->nr_hwcur + PTN_TX_BATCH_LIM(num_slots); 196b6e66be2SVincenzo Maffione 197b6e66be2SVincenzo Maffione if (head_lim >= num_slots) 198b6e66be2SVincenzo Maffione head_lim -= num_slots; 199b6e66be2SVincenzo Maffione nm_prdis(1, "batch: %d head: %d head_lim: %d", batch, shadow_ring.head, 200b6e66be2SVincenzo Maffione head_lim); 201b6e66be2SVincenzo Maffione shadow_ring.head = head_lim; 202b6e66be2SVincenzo Maffione batch = PTN_TX_BATCH_LIM(num_slots); 203b6e66be2SVincenzo Maffione } 204b6e66be2SVincenzo Maffione #endif /* PTN_TX_BATCH_LIM */ 205b6e66be2SVincenzo Maffione 206b6e66be2SVincenzo Maffione if (nm_kr_txspace(kring) <= (num_slots >> 1)) { 207b6e66be2SVincenzo Maffione shadow_ring.flags |= NAF_FORCE_RECLAIM; 208b6e66be2SVincenzo Maffione } 209b6e66be2SVincenzo Maffione 210b6e66be2SVincenzo Maffione /* Netmap prologue */ 211b6e66be2SVincenzo Maffione shadow_ring.tail = kring->rtail; 212b6e66be2SVincenzo Maffione if (unlikely(nm_txsync_prologue(kring, &shadow_ring) >= num_slots)) { 213b6e66be2SVincenzo Maffione /* Reinit ring and enable notifications. */ 214b6e66be2SVincenzo Maffione netmap_ring_reinit(kring); 2155faab778SVincenzo Maffione if (!a->busy_wait) { 216b6e66be2SVincenzo Maffione csb_ktoa_kick_enable(csb_ktoa, 1); 2175faab778SVincenzo Maffione } 218b6e66be2SVincenzo Maffione break; 219b6e66be2SVincenzo Maffione } 220b6e66be2SVincenzo Maffione 221b6e66be2SVincenzo Maffione if (unlikely(netmap_debug & NM_DEBUG_TXSYNC)) { 222b6e66be2SVincenzo Maffione sync_kloop_kring_dump("pre txsync", kring); 223b6e66be2SVincenzo Maffione } 224b6e66be2SVincenzo Maffione 225b6e66be2SVincenzo Maffione if (unlikely(kring->nm_sync(kring, shadow_ring.flags))) { 2265faab778SVincenzo Maffione if (!a->busy_wait) { 22745c67e8fSVincenzo Maffione /* Re-enable notifications. */ 228b6e66be2SVincenzo Maffione csb_ktoa_kick_enable(csb_ktoa, 1); 2295faab778SVincenzo Maffione } 230b6e66be2SVincenzo Maffione nm_prerr("txsync() failed"); 231b6e66be2SVincenzo Maffione break; 232b6e66be2SVincenzo Maffione } 233b6e66be2SVincenzo Maffione 234b6e66be2SVincenzo Maffione /* 235b6e66be2SVincenzo Maffione * Finalize 236b6e66be2SVincenzo Maffione * Copy kernel hwcur and hwtail into the CSB for the application sync(), and 237b6e66be2SVincenzo Maffione * do the nm_sync_finalize. 238b6e66be2SVincenzo Maffione */ 239b6e66be2SVincenzo Maffione sync_kloop_kernel_write(csb_ktoa, kring->nr_hwcur, 240b6e66be2SVincenzo Maffione kring->nr_hwtail); 241b6e66be2SVincenzo Maffione if (kring->rtail != kring->nr_hwtail) { 242b6e66be2SVincenzo Maffione /* Some more room available in the parent adapter. */ 243b6e66be2SVincenzo Maffione kring->rtail = kring->nr_hwtail; 2446d2a46f0SJohn Baldwin #ifdef SYNC_KLOOP_POLL 245b6e66be2SVincenzo Maffione more_txspace = true; 2466d2a46f0SJohn Baldwin #endif /* SYNC_KLOOP_POLL */ 247b6e66be2SVincenzo Maffione } 248b6e66be2SVincenzo Maffione 249b6e66be2SVincenzo Maffione if (unlikely(netmap_debug & NM_DEBUG_TXSYNC)) { 250b6e66be2SVincenzo Maffione sync_kloop_kring_dump("post txsync", kring); 251b6e66be2SVincenzo Maffione } 252b6e66be2SVincenzo Maffione 253b6e66be2SVincenzo Maffione /* Interrupt the application if needed. */ 254b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL 255b6e66be2SVincenzo Maffione if (a->irq_ctx && more_txspace && csb_atok_intr_enabled(csb_atok)) { 2565faab778SVincenzo Maffione /* We could disable kernel --> application kicks here, 2575faab778SVincenzo Maffione * to avoid spurious interrupts. */ 258b6e66be2SVincenzo Maffione eventfd_signal(a->irq_ctx, 1); 259b6e66be2SVincenzo Maffione more_txspace = false; 260b6e66be2SVincenzo Maffione } 261b6e66be2SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */ 262b6e66be2SVincenzo Maffione 263b6e66be2SVincenzo Maffione /* Read CSB to see if there is more work to do. */ 264b6e66be2SVincenzo Maffione sync_kloop_kernel_read(csb_atok, &shadow_ring, num_slots); 265b6e66be2SVincenzo Maffione if (shadow_ring.head == kring->rhead) { 2665faab778SVincenzo Maffione if (a->busy_wait) { 2675faab778SVincenzo Maffione break; 2685faab778SVincenzo Maffione } 269b6e66be2SVincenzo Maffione /* 270b6e66be2SVincenzo Maffione * No more packets to transmit. We enable notifications and 271b6e66be2SVincenzo Maffione * go to sleep, waiting for a kick from the application when new 272b6e66be2SVincenzo Maffione * new slots are ready for transmission. 273b6e66be2SVincenzo Maffione */ 27445c67e8fSVincenzo Maffione /* Re-enable notifications. */ 275b6e66be2SVincenzo Maffione csb_ktoa_kick_enable(csb_ktoa, 1); 276f79ba6d7SVincenzo Maffione /* Double check, with store-load memory barrier. */ 277f79ba6d7SVincenzo Maffione nm_stld_barrier(); 278b6e66be2SVincenzo Maffione sync_kloop_kernel_read(csb_atok, &shadow_ring, num_slots); 279b6e66be2SVincenzo Maffione if (shadow_ring.head != kring->rhead) { 280b6e66be2SVincenzo Maffione /* We won the race condition, there are more packets to 281b6e66be2SVincenzo Maffione * transmit. Disable notifications and do another cycle */ 282b6e66be2SVincenzo Maffione csb_ktoa_kick_enable(csb_ktoa, 0); 283b6e66be2SVincenzo Maffione continue; 284b6e66be2SVincenzo Maffione } 285b6e66be2SVincenzo Maffione break; 286b6e66be2SVincenzo Maffione } 287b6e66be2SVincenzo Maffione 288b6e66be2SVincenzo Maffione if (nm_kr_txempty(kring)) { 289b6e66be2SVincenzo Maffione /* No more available TX slots. We stop waiting for a notification 290b6e66be2SVincenzo Maffione * from the backend (netmap_tx_irq). */ 291b6e66be2SVincenzo Maffione nm_prdis(1, "TX ring"); 292b6e66be2SVincenzo Maffione break; 293b6e66be2SVincenzo Maffione } 294b6e66be2SVincenzo Maffione } 295b6e66be2SVincenzo Maffione 2965faab778SVincenzo Maffione nm_kr_put(kring); 2975faab778SVincenzo Maffione 298b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL 299b6e66be2SVincenzo Maffione if (a->irq_ctx && more_txspace && csb_atok_intr_enabled(csb_atok)) { 300b6e66be2SVincenzo Maffione eventfd_signal(a->irq_ctx, 1); 301b6e66be2SVincenzo Maffione } 302b6e66be2SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */ 303b6e66be2SVincenzo Maffione } 304b6e66be2SVincenzo Maffione 305b6e66be2SVincenzo Maffione /* RX cycle without receive any packets */ 306b6e66be2SVincenzo Maffione #define SYNC_LOOP_RX_DRY_CYCLES_MAX 2 307b6e66be2SVincenzo Maffione 308b6e66be2SVincenzo Maffione static inline int 309b6e66be2SVincenzo Maffione sync_kloop_norxslots(struct netmap_kring *kring, uint32_t g_head) 310b6e66be2SVincenzo Maffione { 311b6e66be2SVincenzo Maffione return (NM_ACCESS_ONCE(kring->nr_hwtail) == nm_prev(g_head, 312b6e66be2SVincenzo Maffione kring->nkr_num_slots - 1)); 313b6e66be2SVincenzo Maffione } 314b6e66be2SVincenzo Maffione 315b6e66be2SVincenzo Maffione static void 316b6e66be2SVincenzo Maffione netmap_sync_kloop_rx_ring(const struct sync_kloop_ring_args *a) 317b6e66be2SVincenzo Maffione { 318b6e66be2SVincenzo Maffione 319b6e66be2SVincenzo Maffione struct netmap_kring *kring = a->kring; 320b6e66be2SVincenzo Maffione struct nm_csb_atok *csb_atok = a->csb_atok; 321b6e66be2SVincenzo Maffione struct nm_csb_ktoa *csb_ktoa = a->csb_ktoa; 322b6e66be2SVincenzo Maffione struct netmap_ring shadow_ring; /* shadow copy of the netmap_ring */ 323b6e66be2SVincenzo Maffione int dry_cycles = 0; 3246d2a46f0SJohn Baldwin #ifdef SYNC_KLOOP_POLL 325b6e66be2SVincenzo Maffione bool some_recvd = false; 3266d2a46f0SJohn Baldwin #endif /* SYNC_KLOOP_POLL */ 327b6e66be2SVincenzo Maffione uint32_t num_slots; 328b6e66be2SVincenzo Maffione 3295faab778SVincenzo Maffione if (unlikely(nm_kr_tryget(kring, 1, NULL))) { 3305faab778SVincenzo Maffione return; 3315faab778SVincenzo Maffione } 3325faab778SVincenzo Maffione 333b6e66be2SVincenzo Maffione num_slots = kring->nkr_num_slots; 334b6e66be2SVincenzo Maffione 335b6e66be2SVincenzo Maffione /* Get RX csb_atok and csb_ktoa pointers from the CSB. */ 336b6e66be2SVincenzo Maffione num_slots = kring->nkr_num_slots; 337b6e66be2SVincenzo Maffione 338b6e66be2SVincenzo Maffione /* Disable notifications. */ 3395faab778SVincenzo Maffione if (!a->direct) { 340b6e66be2SVincenzo Maffione csb_ktoa_kick_enable(csb_ktoa, 0); 3415faab778SVincenzo Maffione } 342b6e66be2SVincenzo Maffione /* Copy the application kring pointers from the CSB */ 343b6e66be2SVincenzo Maffione sync_kloop_kernel_read(csb_atok, &shadow_ring, num_slots); 344b6e66be2SVincenzo Maffione 345b6e66be2SVincenzo Maffione for (;;) { 346b6e66be2SVincenzo Maffione uint32_t hwtail; 347b6e66be2SVincenzo Maffione 348b6e66be2SVincenzo Maffione /* Netmap prologue */ 349b6e66be2SVincenzo Maffione shadow_ring.tail = kring->rtail; 350b6e66be2SVincenzo Maffione if (unlikely(nm_rxsync_prologue(kring, &shadow_ring) >= num_slots)) { 351b6e66be2SVincenzo Maffione /* Reinit ring and enable notifications. */ 352b6e66be2SVincenzo Maffione netmap_ring_reinit(kring); 3535faab778SVincenzo Maffione if (!a->busy_wait) { 354b6e66be2SVincenzo Maffione csb_ktoa_kick_enable(csb_ktoa, 1); 3555faab778SVincenzo Maffione } 356b6e66be2SVincenzo Maffione break; 357b6e66be2SVincenzo Maffione } 358b6e66be2SVincenzo Maffione 359b6e66be2SVincenzo Maffione if (unlikely(netmap_debug & NM_DEBUG_RXSYNC)) { 360b6e66be2SVincenzo Maffione sync_kloop_kring_dump("pre rxsync", kring); 361b6e66be2SVincenzo Maffione } 362b6e66be2SVincenzo Maffione 363b6e66be2SVincenzo Maffione if (unlikely(kring->nm_sync(kring, shadow_ring.flags))) { 3645faab778SVincenzo Maffione if (!a->busy_wait) { 36545c67e8fSVincenzo Maffione /* Re-enable notifications. */ 366b6e66be2SVincenzo Maffione csb_ktoa_kick_enable(csb_ktoa, 1); 3675faab778SVincenzo Maffione } 368b6e66be2SVincenzo Maffione nm_prerr("rxsync() failed"); 369b6e66be2SVincenzo Maffione break; 370b6e66be2SVincenzo Maffione } 371b6e66be2SVincenzo Maffione 372b6e66be2SVincenzo Maffione /* 373b6e66be2SVincenzo Maffione * Finalize 374b6e66be2SVincenzo Maffione * Copy kernel hwcur and hwtail into the CSB for the application sync() 375b6e66be2SVincenzo Maffione */ 376b6e66be2SVincenzo Maffione hwtail = NM_ACCESS_ONCE(kring->nr_hwtail); 377b6e66be2SVincenzo Maffione sync_kloop_kernel_write(csb_ktoa, kring->nr_hwcur, hwtail); 378b6e66be2SVincenzo Maffione if (kring->rtail != hwtail) { 379b6e66be2SVincenzo Maffione kring->rtail = hwtail; 3806d2a46f0SJohn Baldwin #ifdef SYNC_KLOOP_POLL 381b6e66be2SVincenzo Maffione some_recvd = true; 3826d2a46f0SJohn Baldwin #endif /* SYNC_KLOOP_POLL */ 383b6e66be2SVincenzo Maffione dry_cycles = 0; 384b6e66be2SVincenzo Maffione } else { 385b6e66be2SVincenzo Maffione dry_cycles++; 386b6e66be2SVincenzo Maffione } 387b6e66be2SVincenzo Maffione 388b6e66be2SVincenzo Maffione if (unlikely(netmap_debug & NM_DEBUG_RXSYNC)) { 389b6e66be2SVincenzo Maffione sync_kloop_kring_dump("post rxsync", kring); 390b6e66be2SVincenzo Maffione } 391b6e66be2SVincenzo Maffione 392b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL 393b6e66be2SVincenzo Maffione /* Interrupt the application if needed. */ 394b6e66be2SVincenzo Maffione if (a->irq_ctx && some_recvd && csb_atok_intr_enabled(csb_atok)) { 3955faab778SVincenzo Maffione /* We could disable kernel --> application kicks here, 3965faab778SVincenzo Maffione * to avoid spurious interrupts. */ 397b6e66be2SVincenzo Maffione eventfd_signal(a->irq_ctx, 1); 398b6e66be2SVincenzo Maffione some_recvd = false; 399b6e66be2SVincenzo Maffione } 400b6e66be2SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */ 401b6e66be2SVincenzo Maffione 402b6e66be2SVincenzo Maffione /* Read CSB to see if there is more work to do. */ 403b6e66be2SVincenzo Maffione sync_kloop_kernel_read(csb_atok, &shadow_ring, num_slots); 404b6e66be2SVincenzo Maffione if (sync_kloop_norxslots(kring, shadow_ring.head)) { 4055faab778SVincenzo Maffione if (a->busy_wait) { 4065faab778SVincenzo Maffione break; 4075faab778SVincenzo Maffione } 408b6e66be2SVincenzo Maffione /* 409b6e66be2SVincenzo Maffione * No more slots available for reception. We enable notification and 410b6e66be2SVincenzo Maffione * go to sleep, waiting for a kick from the application when new receive 411b6e66be2SVincenzo Maffione * slots are available. 412b6e66be2SVincenzo Maffione */ 41345c67e8fSVincenzo Maffione /* Re-enable notifications. */ 414b6e66be2SVincenzo Maffione csb_ktoa_kick_enable(csb_ktoa, 1); 415f79ba6d7SVincenzo Maffione /* Double check, with store-load memory barrier. */ 416f79ba6d7SVincenzo Maffione nm_stld_barrier(); 417b6e66be2SVincenzo Maffione sync_kloop_kernel_read(csb_atok, &shadow_ring, num_slots); 418b6e66be2SVincenzo Maffione if (!sync_kloop_norxslots(kring, shadow_ring.head)) { 419b6e66be2SVincenzo Maffione /* We won the race condition, more slots are available. Disable 420b6e66be2SVincenzo Maffione * notifications and do another cycle. */ 421b6e66be2SVincenzo Maffione csb_ktoa_kick_enable(csb_ktoa, 0); 422b6e66be2SVincenzo Maffione continue; 423b6e66be2SVincenzo Maffione } 424b6e66be2SVincenzo Maffione break; 425b6e66be2SVincenzo Maffione } 426b6e66be2SVincenzo Maffione 427b6e66be2SVincenzo Maffione hwtail = NM_ACCESS_ONCE(kring->nr_hwtail); 428b6e66be2SVincenzo Maffione if (unlikely(hwtail == kring->rhead || 429b6e66be2SVincenzo Maffione dry_cycles >= SYNC_LOOP_RX_DRY_CYCLES_MAX)) { 430b6e66be2SVincenzo Maffione /* No more packets to be read from the backend. We stop and 431b6e66be2SVincenzo Maffione * wait for a notification from the backend (netmap_rx_irq). */ 432b6e66be2SVincenzo Maffione nm_prdis(1, "nr_hwtail: %d rhead: %d dry_cycles: %d", 433b6e66be2SVincenzo Maffione hwtail, kring->rhead, dry_cycles); 434b6e66be2SVincenzo Maffione break; 435b6e66be2SVincenzo Maffione } 436b6e66be2SVincenzo Maffione } 437b6e66be2SVincenzo Maffione 438b6e66be2SVincenzo Maffione nm_kr_put(kring); 439b6e66be2SVincenzo Maffione 440b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL 441b6e66be2SVincenzo Maffione /* Interrupt the application if needed. */ 442b6e66be2SVincenzo Maffione if (a->irq_ctx && some_recvd && csb_atok_intr_enabled(csb_atok)) { 443b6e66be2SVincenzo Maffione eventfd_signal(a->irq_ctx, 1); 444b6e66be2SVincenzo Maffione } 445b6e66be2SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */ 446b6e66be2SVincenzo Maffione } 447b6e66be2SVincenzo Maffione 448b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL 4495faab778SVincenzo Maffione struct sync_kloop_poll_ctx; 450b6e66be2SVincenzo Maffione struct sync_kloop_poll_entry { 451b6e66be2SVincenzo Maffione /* Support for receiving notifications from 452b6e66be2SVincenzo Maffione * a netmap ring or from the application. */ 453b6e66be2SVincenzo Maffione struct file *filp; 454b6e66be2SVincenzo Maffione wait_queue_t wait; 455b6e66be2SVincenzo Maffione wait_queue_head_t *wqh; 456b6e66be2SVincenzo Maffione 457b6e66be2SVincenzo Maffione /* Support for sending notifications to the application. */ 458b6e66be2SVincenzo Maffione struct eventfd_ctx *irq_ctx; 459b6e66be2SVincenzo Maffione struct file *irq_filp; 4605faab778SVincenzo Maffione 4615faab778SVincenzo Maffione /* Arguments for the ring processing function. Useful 4625faab778SVincenzo Maffione * in case of custom wake-up function. */ 4635faab778SVincenzo Maffione struct sync_kloop_ring_args *args; 4645faab778SVincenzo Maffione struct sync_kloop_poll_ctx *parent; 4655faab778SVincenzo Maffione 466b6e66be2SVincenzo Maffione }; 467b6e66be2SVincenzo Maffione 468b6e66be2SVincenzo Maffione struct sync_kloop_poll_ctx { 469b6e66be2SVincenzo Maffione poll_table wait_table; 470b6e66be2SVincenzo Maffione unsigned int next_entry; 4715faab778SVincenzo Maffione int (*next_wake_fun)(wait_queue_t *, unsigned, int, void *); 472b6e66be2SVincenzo Maffione unsigned int num_entries; 4735faab778SVincenzo Maffione unsigned int num_tx_rings; 4745faab778SVincenzo Maffione unsigned int num_rings; 4755faab778SVincenzo Maffione /* First num_tx_rings entries are for the TX kicks. 4765faab778SVincenzo Maffione * Then the RX kicks entries follow. The last two 4775faab778SVincenzo Maffione * entries are for TX irq, and RX irq. */ 478b6e66be2SVincenzo Maffione struct sync_kloop_poll_entry entries[0]; 479b6e66be2SVincenzo Maffione }; 480b6e66be2SVincenzo Maffione 481b6e66be2SVincenzo Maffione static void 482b6e66be2SVincenzo Maffione sync_kloop_poll_table_queue_proc(struct file *file, wait_queue_head_t *wqh, 483b6e66be2SVincenzo Maffione poll_table *pt) 484b6e66be2SVincenzo Maffione { 485b6e66be2SVincenzo Maffione struct sync_kloop_poll_ctx *poll_ctx = 486b6e66be2SVincenzo Maffione container_of(pt, struct sync_kloop_poll_ctx, wait_table); 487b6e66be2SVincenzo Maffione struct sync_kloop_poll_entry *entry = poll_ctx->entries + 488b6e66be2SVincenzo Maffione poll_ctx->next_entry; 489b6e66be2SVincenzo Maffione 490b6e66be2SVincenzo Maffione BUG_ON(poll_ctx->next_entry >= poll_ctx->num_entries); 491b6e66be2SVincenzo Maffione entry->wqh = wqh; 492b6e66be2SVincenzo Maffione entry->filp = file; 493b6e66be2SVincenzo Maffione /* Use the default wake up function. */ 4945faab778SVincenzo Maffione if (poll_ctx->next_wake_fun == NULL) { 495b6e66be2SVincenzo Maffione init_waitqueue_entry(&entry->wait, current); 4965faab778SVincenzo Maffione } else { 4975faab778SVincenzo Maffione init_waitqueue_func_entry(&entry->wait, 4985faab778SVincenzo Maffione poll_ctx->next_wake_fun); 4995faab778SVincenzo Maffione } 500b6e66be2SVincenzo Maffione add_wait_queue(wqh, &entry->wait); 5015faab778SVincenzo Maffione } 5025faab778SVincenzo Maffione 5035faab778SVincenzo Maffione static int 5045faab778SVincenzo Maffione sync_kloop_tx_kick_wake_fun(wait_queue_t *wait, unsigned mode, 5055faab778SVincenzo Maffione int wake_flags, void *key) 5065faab778SVincenzo Maffione { 5075faab778SVincenzo Maffione struct sync_kloop_poll_entry *entry = 5085faab778SVincenzo Maffione container_of(wait, struct sync_kloop_poll_entry, wait); 5095faab778SVincenzo Maffione 5105faab778SVincenzo Maffione netmap_sync_kloop_tx_ring(entry->args); 5115faab778SVincenzo Maffione 5125faab778SVincenzo Maffione return 0; 5135faab778SVincenzo Maffione } 5145faab778SVincenzo Maffione 5155faab778SVincenzo Maffione static int 5165faab778SVincenzo Maffione sync_kloop_tx_irq_wake_fun(wait_queue_t *wait, unsigned mode, 5175faab778SVincenzo Maffione int wake_flags, void *key) 5185faab778SVincenzo Maffione { 5195faab778SVincenzo Maffione struct sync_kloop_poll_entry *entry = 5205faab778SVincenzo Maffione container_of(wait, struct sync_kloop_poll_entry, wait); 5215faab778SVincenzo Maffione struct sync_kloop_poll_ctx *poll_ctx = entry->parent; 5225faab778SVincenzo Maffione int i; 5235faab778SVincenzo Maffione 5245faab778SVincenzo Maffione for (i = 0; i < poll_ctx->num_tx_rings; i++) { 5255faab778SVincenzo Maffione struct eventfd_ctx *irq_ctx = poll_ctx->entries[i].irq_ctx; 5265faab778SVincenzo Maffione 5275faab778SVincenzo Maffione if (irq_ctx) { 5285faab778SVincenzo Maffione eventfd_signal(irq_ctx, 1); 5295faab778SVincenzo Maffione } 5305faab778SVincenzo Maffione } 5315faab778SVincenzo Maffione 5325faab778SVincenzo Maffione return 0; 5335faab778SVincenzo Maffione } 5345faab778SVincenzo Maffione 5355faab778SVincenzo Maffione static int 5365faab778SVincenzo Maffione sync_kloop_rx_kick_wake_fun(wait_queue_t *wait, unsigned mode, 5375faab778SVincenzo Maffione int wake_flags, void *key) 5385faab778SVincenzo Maffione { 5395faab778SVincenzo Maffione struct sync_kloop_poll_entry *entry = 5405faab778SVincenzo Maffione container_of(wait, struct sync_kloop_poll_entry, wait); 5415faab778SVincenzo Maffione 5425faab778SVincenzo Maffione netmap_sync_kloop_rx_ring(entry->args); 5435faab778SVincenzo Maffione 5445faab778SVincenzo Maffione return 0; 5455faab778SVincenzo Maffione } 5465faab778SVincenzo Maffione 5475faab778SVincenzo Maffione static int 5485faab778SVincenzo Maffione sync_kloop_rx_irq_wake_fun(wait_queue_t *wait, unsigned mode, 5495faab778SVincenzo Maffione int wake_flags, void *key) 5505faab778SVincenzo Maffione { 5515faab778SVincenzo Maffione struct sync_kloop_poll_entry *entry = 5525faab778SVincenzo Maffione container_of(wait, struct sync_kloop_poll_entry, wait); 5535faab778SVincenzo Maffione struct sync_kloop_poll_ctx *poll_ctx = entry->parent; 5545faab778SVincenzo Maffione int i; 5555faab778SVincenzo Maffione 5565faab778SVincenzo Maffione for (i = poll_ctx->num_tx_rings; i < poll_ctx->num_rings; i++) { 5575faab778SVincenzo Maffione struct eventfd_ctx *irq_ctx = poll_ctx->entries[i].irq_ctx; 5585faab778SVincenzo Maffione 5595faab778SVincenzo Maffione if (irq_ctx) { 5605faab778SVincenzo Maffione eventfd_signal(irq_ctx, 1); 5615faab778SVincenzo Maffione } 5625faab778SVincenzo Maffione } 5635faab778SVincenzo Maffione 5645faab778SVincenzo Maffione return 0; 565b6e66be2SVincenzo Maffione } 566b6e66be2SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */ 567b6e66be2SVincenzo Maffione 568b6e66be2SVincenzo Maffione int 569b6e66be2SVincenzo Maffione netmap_sync_kloop(struct netmap_priv_d *priv, struct nmreq_header *hdr) 570b6e66be2SVincenzo Maffione { 571b6e66be2SVincenzo Maffione struct nmreq_sync_kloop_start *req = 572b6e66be2SVincenzo Maffione (struct nmreq_sync_kloop_start *)(uintptr_t)hdr->nr_body; 573b6e66be2SVincenzo Maffione struct nmreq_opt_sync_kloop_eventfds *eventfds_opt = NULL; 574b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL 575b6e66be2SVincenzo Maffione struct sync_kloop_poll_ctx *poll_ctx = NULL; 576b6e66be2SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */ 577b6e66be2SVincenzo Maffione int num_rx_rings, num_tx_rings, num_rings; 578f79ba6d7SVincenzo Maffione struct sync_kloop_ring_args *args = NULL; 579b6e66be2SVincenzo Maffione uint32_t sleep_us = req->sleep_us; 580b6e66be2SVincenzo Maffione struct nm_csb_atok* csb_atok_base; 581b6e66be2SVincenzo Maffione struct nm_csb_ktoa* csb_ktoa_base; 582b6e66be2SVincenzo Maffione struct netmap_adapter *na; 583b6e66be2SVincenzo Maffione struct nmreq_option *opt; 5845faab778SVincenzo Maffione bool na_could_sleep = false; 5855faab778SVincenzo Maffione bool busy_wait = true; 5865faab778SVincenzo Maffione bool direct_tx = false; 5875faab778SVincenzo Maffione bool direct_rx = false; 588b6e66be2SVincenzo Maffione int err = 0; 589b6e66be2SVincenzo Maffione int i; 590b6e66be2SVincenzo Maffione 591b6e66be2SVincenzo Maffione if (sleep_us > 1000000) { 592b6e66be2SVincenzo Maffione /* We do not accept sleeping for more than a second. */ 593b6e66be2SVincenzo Maffione return EINVAL; 594b6e66be2SVincenzo Maffione } 595b6e66be2SVincenzo Maffione 596b6e66be2SVincenzo Maffione if (priv->np_nifp == NULL) { 597b6e66be2SVincenzo Maffione return ENXIO; 598b6e66be2SVincenzo Maffione } 599b6e66be2SVincenzo Maffione mb(); /* make sure following reads are not from cache */ 600b6e66be2SVincenzo Maffione 601b6e66be2SVincenzo Maffione na = priv->np_na; 602b6e66be2SVincenzo Maffione if (!nm_netmap_on(na)) { 603b6e66be2SVincenzo Maffione return ENXIO; 604b6e66be2SVincenzo Maffione } 605b6e66be2SVincenzo Maffione 606b6e66be2SVincenzo Maffione NMG_LOCK(); 607b6e66be2SVincenzo Maffione /* Make sure the application is working in CSB mode. */ 608b6e66be2SVincenzo Maffione if (!priv->np_csb_atok_base || !priv->np_csb_ktoa_base) { 609b6e66be2SVincenzo Maffione NMG_UNLOCK(); 610b6e66be2SVincenzo Maffione nm_prerr("sync-kloop on %s requires " 611b6e66be2SVincenzo Maffione "NETMAP_REQ_OPT_CSB option", na->name); 612b6e66be2SVincenzo Maffione return EINVAL; 613b6e66be2SVincenzo Maffione } 614b6e66be2SVincenzo Maffione 615b6e66be2SVincenzo Maffione csb_atok_base = priv->np_csb_atok_base; 616b6e66be2SVincenzo Maffione csb_ktoa_base = priv->np_csb_ktoa_base; 617b6e66be2SVincenzo Maffione 618b6e66be2SVincenzo Maffione /* Make sure that no kloop is currently running. */ 619b6e66be2SVincenzo Maffione if (priv->np_kloop_state & NM_SYNC_KLOOP_RUNNING) { 620b6e66be2SVincenzo Maffione err = EBUSY; 621b6e66be2SVincenzo Maffione } 622b6e66be2SVincenzo Maffione priv->np_kloop_state |= NM_SYNC_KLOOP_RUNNING; 623b6e66be2SVincenzo Maffione NMG_UNLOCK(); 624b6e66be2SVincenzo Maffione if (err) { 625b6e66be2SVincenzo Maffione return err; 626b6e66be2SVincenzo Maffione } 627b6e66be2SVincenzo Maffione 628b6e66be2SVincenzo Maffione num_rx_rings = priv->np_qlast[NR_RX] - priv->np_qfirst[NR_RX]; 629b6e66be2SVincenzo Maffione num_tx_rings = priv->np_qlast[NR_TX] - priv->np_qfirst[NR_TX]; 630b6e66be2SVincenzo Maffione num_rings = num_tx_rings + num_rx_rings; 631b6e66be2SVincenzo Maffione 632f79ba6d7SVincenzo Maffione args = nm_os_malloc(num_rings * sizeof(args[0])); 633f79ba6d7SVincenzo Maffione if (!args) { 634f79ba6d7SVincenzo Maffione err = ENOMEM; 635f79ba6d7SVincenzo Maffione goto out; 636f79ba6d7SVincenzo Maffione } 637f79ba6d7SVincenzo Maffione 6385faab778SVincenzo Maffione /* Prepare the arguments for netmap_sync_kloop_tx_ring() 6395faab778SVincenzo Maffione * and netmap_sync_kloop_rx_ring(). */ 6405faab778SVincenzo Maffione for (i = 0; i < num_tx_rings; i++) { 6415faab778SVincenzo Maffione struct sync_kloop_ring_args *a = args + i; 6425faab778SVincenzo Maffione 6435faab778SVincenzo Maffione a->kring = NMR(na, NR_TX)[i + priv->np_qfirst[NR_TX]]; 6445faab778SVincenzo Maffione a->csb_atok = csb_atok_base + i; 6455faab778SVincenzo Maffione a->csb_ktoa = csb_ktoa_base + i; 6465faab778SVincenzo Maffione a->busy_wait = busy_wait; 6475faab778SVincenzo Maffione a->direct = direct_tx; 6485faab778SVincenzo Maffione } 6495faab778SVincenzo Maffione for (i = 0; i < num_rx_rings; i++) { 6505faab778SVincenzo Maffione struct sync_kloop_ring_args *a = args + num_tx_rings + i; 6515faab778SVincenzo Maffione 6525faab778SVincenzo Maffione a->kring = NMR(na, NR_RX)[i + priv->np_qfirst[NR_RX]]; 6535faab778SVincenzo Maffione a->csb_atok = csb_atok_base + num_tx_rings + i; 6545faab778SVincenzo Maffione a->csb_ktoa = csb_ktoa_base + num_tx_rings + i; 6555faab778SVincenzo Maffione a->busy_wait = busy_wait; 6565faab778SVincenzo Maffione a->direct = direct_rx; 6575faab778SVincenzo Maffione } 6585faab778SVincenzo Maffione 659b6e66be2SVincenzo Maffione /* Validate notification options. */ 660253b2ec1SVincenzo Maffione opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_SYNC_KLOOP_MODE); 6615faab778SVincenzo Maffione if (opt != NULL) { 6625faab778SVincenzo Maffione struct nmreq_opt_sync_kloop_mode *mode_opt = 6635faab778SVincenzo Maffione (struct nmreq_opt_sync_kloop_mode *)opt; 6645faab778SVincenzo Maffione 6655faab778SVincenzo Maffione direct_tx = !!(mode_opt->mode & NM_OPT_SYNC_KLOOP_DIRECT_TX); 6665faab778SVincenzo Maffione direct_rx = !!(mode_opt->mode & NM_OPT_SYNC_KLOOP_DIRECT_RX); 6675faab778SVincenzo Maffione if (mode_opt->mode & ~(NM_OPT_SYNC_KLOOP_DIRECT_TX | 6685faab778SVincenzo Maffione NM_OPT_SYNC_KLOOP_DIRECT_RX)) { 6695faab778SVincenzo Maffione opt->nro_status = err = EINVAL; 6705faab778SVincenzo Maffione goto out; 6715faab778SVincenzo Maffione } 6725faab778SVincenzo Maffione opt->nro_status = 0; 6735faab778SVincenzo Maffione } 674253b2ec1SVincenzo Maffione opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_SYNC_KLOOP_EVENTFDS); 675b6e66be2SVincenzo Maffione if (opt != NULL) { 676b6e66be2SVincenzo Maffione if (opt->nro_size != sizeof(*eventfds_opt) + 677b6e66be2SVincenzo Maffione sizeof(eventfds_opt->eventfds[0]) * num_rings) { 678b6e66be2SVincenzo Maffione /* Option size not consistent with the number of 679b6e66be2SVincenzo Maffione * entries. */ 680b6e66be2SVincenzo Maffione opt->nro_status = err = EINVAL; 681b6e66be2SVincenzo Maffione goto out; 682b6e66be2SVincenzo Maffione } 683b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL 684b6e66be2SVincenzo Maffione eventfds_opt = (struct nmreq_opt_sync_kloop_eventfds *)opt; 685b6e66be2SVincenzo Maffione opt->nro_status = 0; 6865faab778SVincenzo Maffione 6875faab778SVincenzo Maffione /* Check if some ioeventfd entry is not defined, and force sleep 6885faab778SVincenzo Maffione * synchronization in that case. */ 6895faab778SVincenzo Maffione busy_wait = false; 6905faab778SVincenzo Maffione for (i = 0; i < num_rings; i++) { 6915faab778SVincenzo Maffione if (eventfds_opt->eventfds[i].ioeventfd < 0) { 6925faab778SVincenzo Maffione busy_wait = true; 6935faab778SVincenzo Maffione break; 6945faab778SVincenzo Maffione } 6955faab778SVincenzo Maffione } 6965faab778SVincenzo Maffione 6975faab778SVincenzo Maffione if (busy_wait && (direct_tx || direct_rx)) { 6985faab778SVincenzo Maffione /* For direct processing we need all the 6995faab778SVincenzo Maffione * ioeventfds to be valid. */ 7005faab778SVincenzo Maffione opt->nro_status = err = EINVAL; 7015faab778SVincenzo Maffione goto out; 7025faab778SVincenzo Maffione } 7035faab778SVincenzo Maffione 704b6e66be2SVincenzo Maffione /* We need 2 poll entries for TX and RX notifications coming 705b6e66be2SVincenzo Maffione * from the netmap adapter, plus one entries per ring for the 706b6e66be2SVincenzo Maffione * notifications coming from the application. */ 707b6e66be2SVincenzo Maffione poll_ctx = nm_os_malloc(sizeof(*poll_ctx) + 7085faab778SVincenzo Maffione (num_rings + 2) * sizeof(poll_ctx->entries[0])); 709b6e66be2SVincenzo Maffione init_poll_funcptr(&poll_ctx->wait_table, 710b6e66be2SVincenzo Maffione sync_kloop_poll_table_queue_proc); 711b6e66be2SVincenzo Maffione poll_ctx->num_entries = 2 + num_rings; 7125faab778SVincenzo Maffione poll_ctx->num_tx_rings = num_tx_rings; 7135faab778SVincenzo Maffione poll_ctx->num_rings = num_rings; 714b6e66be2SVincenzo Maffione poll_ctx->next_entry = 0; 7155faab778SVincenzo Maffione poll_ctx->next_wake_fun = NULL; 7165faab778SVincenzo Maffione 7175faab778SVincenzo Maffione if (direct_tx && (na->na_flags & NAF_BDG_MAYSLEEP)) { 7185faab778SVincenzo Maffione /* In direct mode, VALE txsync is called from 7195faab778SVincenzo Maffione * wake-up context, where it is not possible 7205faab778SVincenzo Maffione * to sleep. 7215faab778SVincenzo Maffione */ 7225faab778SVincenzo Maffione na->na_flags &= ~NAF_BDG_MAYSLEEP; 7235faab778SVincenzo Maffione na_could_sleep = true; 7245faab778SVincenzo Maffione } 7255faab778SVincenzo Maffione 7265faab778SVincenzo Maffione for (i = 0; i < num_rings + 2; i++) { 7275faab778SVincenzo Maffione poll_ctx->entries[i].args = args + i; 7285faab778SVincenzo Maffione poll_ctx->entries[i].parent = poll_ctx; 7295faab778SVincenzo Maffione } 7305faab778SVincenzo Maffione 731b6e66be2SVincenzo Maffione /* Poll for notifications coming from the applications through 732b6e66be2SVincenzo Maffione * eventfds. */ 7335faab778SVincenzo Maffione for (i = 0; i < num_rings; i++, poll_ctx->next_entry++) { 7345faab778SVincenzo Maffione struct eventfd_ctx *irq = NULL; 7355faab778SVincenzo Maffione struct file *filp = NULL; 736b6e66be2SVincenzo Maffione unsigned long mask; 7375faab778SVincenzo Maffione bool tx_ring = (i < num_tx_rings); 738b6e66be2SVincenzo Maffione 7395faab778SVincenzo Maffione if (eventfds_opt->eventfds[i].irqfd >= 0) { 7405faab778SVincenzo Maffione filp = eventfd_fget( 7415faab778SVincenzo Maffione eventfds_opt->eventfds[i].irqfd); 742b6e66be2SVincenzo Maffione if (IS_ERR(filp)) { 743b6e66be2SVincenzo Maffione err = PTR_ERR(filp); 744b6e66be2SVincenzo Maffione goto out; 745b6e66be2SVincenzo Maffione } 746b6e66be2SVincenzo Maffione irq = eventfd_ctx_fileget(filp); 747b6e66be2SVincenzo Maffione if (IS_ERR(irq)) { 748b6e66be2SVincenzo Maffione err = PTR_ERR(irq); 749b6e66be2SVincenzo Maffione goto out; 750b6e66be2SVincenzo Maffione } 751b6e66be2SVincenzo Maffione } 7525faab778SVincenzo Maffione poll_ctx->entries[i].irq_filp = filp; 7535faab778SVincenzo Maffione poll_ctx->entries[i].irq_ctx = irq; 7545faab778SVincenzo Maffione poll_ctx->entries[i].args->busy_wait = busy_wait; 7555faab778SVincenzo Maffione /* Don't let netmap_sync_kloop_*x_ring() use 7565faab778SVincenzo Maffione * IRQs in direct mode. */ 7575faab778SVincenzo Maffione poll_ctx->entries[i].args->irq_ctx = 7585faab778SVincenzo Maffione ((tx_ring && direct_tx) || 7595faab778SVincenzo Maffione (!tx_ring && direct_rx)) ? NULL : 7605faab778SVincenzo Maffione poll_ctx->entries[i].irq_ctx; 7615faab778SVincenzo Maffione poll_ctx->entries[i].args->direct = 7625faab778SVincenzo Maffione (tx_ring ? direct_tx : direct_rx); 7635faab778SVincenzo Maffione 7645faab778SVincenzo Maffione if (!busy_wait) { 7655faab778SVincenzo Maffione filp = eventfd_fget( 7665faab778SVincenzo Maffione eventfds_opt->eventfds[i].ioeventfd); 7675faab778SVincenzo Maffione if (IS_ERR(filp)) { 7685faab778SVincenzo Maffione err = PTR_ERR(filp); 7695faab778SVincenzo Maffione goto out; 7705faab778SVincenzo Maffione } 7715faab778SVincenzo Maffione if (tx_ring && direct_tx) { 7725faab778SVincenzo Maffione /* Override the wake up function 7735faab778SVincenzo Maffione * so that it can directly call 7745faab778SVincenzo Maffione * netmap_sync_kloop_tx_ring(). 7755faab778SVincenzo Maffione */ 7765faab778SVincenzo Maffione poll_ctx->next_wake_fun = 7775faab778SVincenzo Maffione sync_kloop_tx_kick_wake_fun; 7785faab778SVincenzo Maffione } else if (!tx_ring && direct_rx) { 7795faab778SVincenzo Maffione /* Same for direct RX. */ 7805faab778SVincenzo Maffione poll_ctx->next_wake_fun = 7815faab778SVincenzo Maffione sync_kloop_rx_kick_wake_fun; 7825faab778SVincenzo Maffione } else { 7835faab778SVincenzo Maffione poll_ctx->next_wake_fun = NULL; 7845faab778SVincenzo Maffione } 7855faab778SVincenzo Maffione mask = filp->f_op->poll(filp, 7865faab778SVincenzo Maffione &poll_ctx->wait_table); 7875faab778SVincenzo Maffione if (mask & POLLERR) { 7885faab778SVincenzo Maffione err = EINVAL; 7895faab778SVincenzo Maffione goto out; 7905faab778SVincenzo Maffione } 7915faab778SVincenzo Maffione } 7925faab778SVincenzo Maffione } 7935faab778SVincenzo Maffione 794b6e66be2SVincenzo Maffione /* Poll for notifications coming from the netmap rings bound to 795b6e66be2SVincenzo Maffione * this file descriptor. */ 7965faab778SVincenzo Maffione if (!busy_wait) { 797b6e66be2SVincenzo Maffione NMG_LOCK(); 7985faab778SVincenzo Maffione /* In direct mode, override the wake up function so 7995faab778SVincenzo Maffione * that it can forward the netmap_tx_irq() to the 8005faab778SVincenzo Maffione * guest. */ 8015faab778SVincenzo Maffione poll_ctx->next_wake_fun = direct_tx ? 8025faab778SVincenzo Maffione sync_kloop_tx_irq_wake_fun : NULL; 803a56136a1SVincenzo Maffione poll_wait(priv->np_filp, priv->np_si[NR_TX], 804a56136a1SVincenzo Maffione &poll_ctx->wait_table); 8055faab778SVincenzo Maffione poll_ctx->next_entry++; 8065faab778SVincenzo Maffione 8075faab778SVincenzo Maffione poll_ctx->next_wake_fun = direct_rx ? 8085faab778SVincenzo Maffione sync_kloop_rx_irq_wake_fun : NULL; 809a56136a1SVincenzo Maffione poll_wait(priv->np_filp, priv->np_si[NR_RX], 810a56136a1SVincenzo Maffione &poll_ctx->wait_table); 8115faab778SVincenzo Maffione poll_ctx->next_entry++; 812b6e66be2SVincenzo Maffione NMG_UNLOCK(); 813b6e66be2SVincenzo Maffione } 814b6e66be2SVincenzo Maffione #else /* SYNC_KLOOP_POLL */ 815b6e66be2SVincenzo Maffione opt->nro_status = EOPNOTSUPP; 816b6e66be2SVincenzo Maffione goto out; 817b6e66be2SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */ 818b6e66be2SVincenzo Maffione } 819b6e66be2SVincenzo Maffione 8205faab778SVincenzo Maffione nm_prinf("kloop busy_wait %u, direct_tx %u, direct_rx %u, " 8215faab778SVincenzo Maffione "na_could_sleep %u", busy_wait, direct_tx, direct_rx, 8225faab778SVincenzo Maffione na_could_sleep); 823f79ba6d7SVincenzo Maffione 824b6e66be2SVincenzo Maffione /* Main loop. */ 825b6e66be2SVincenzo Maffione for (;;) { 826b6e66be2SVincenzo Maffione if (unlikely(NM_ACCESS_ONCE(priv->np_kloop_state) & NM_SYNC_KLOOP_STOPPING)) { 827b6e66be2SVincenzo Maffione break; 828b6e66be2SVincenzo Maffione } 829b6e66be2SVincenzo Maffione 830b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL 8315faab778SVincenzo Maffione if (!busy_wait) { 832f79ba6d7SVincenzo Maffione /* It is important to set the task state as 833f79ba6d7SVincenzo Maffione * interruptible before processing any TX/RX ring, 834f79ba6d7SVincenzo Maffione * so that if a notification on ring Y comes after 835f79ba6d7SVincenzo Maffione * we have processed ring Y, but before we call 836f79ba6d7SVincenzo Maffione * schedule(), we don't miss it. This is true because 837*591a9b5eSGordon Bergling * the wake up function will change the task state, 838f79ba6d7SVincenzo Maffione * and therefore the schedule_timeout() call below 839f79ba6d7SVincenzo Maffione * will observe the change). 840f79ba6d7SVincenzo Maffione */ 841f79ba6d7SVincenzo Maffione set_current_state(TASK_INTERRUPTIBLE); 842f79ba6d7SVincenzo Maffione } 843b6e66be2SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */ 844b6e66be2SVincenzo Maffione 845b6e66be2SVincenzo Maffione /* Process all the TX rings bound to this file descriptor. */ 8465faab778SVincenzo Maffione for (i = 0; !direct_tx && i < num_tx_rings; i++) { 847f79ba6d7SVincenzo Maffione struct sync_kloop_ring_args *a = args + i; 848f79ba6d7SVincenzo Maffione netmap_sync_kloop_tx_ring(a); 849b6e66be2SVincenzo Maffione } 850b6e66be2SVincenzo Maffione 851b6e66be2SVincenzo Maffione /* Process all the RX rings bound to this file descriptor. */ 8525faab778SVincenzo Maffione for (i = 0; !direct_rx && i < num_rx_rings; i++) { 853f79ba6d7SVincenzo Maffione struct sync_kloop_ring_args *a = args + num_tx_rings + i; 854f79ba6d7SVincenzo Maffione netmap_sync_kloop_rx_ring(a); 855b6e66be2SVincenzo Maffione } 856b6e66be2SVincenzo Maffione 8575faab778SVincenzo Maffione if (busy_wait) { 858b6e66be2SVincenzo Maffione /* Default synchronization method: sleep for a while. */ 859b6e66be2SVincenzo Maffione usleep_range(sleep_us, sleep_us); 860b6e66be2SVincenzo Maffione } 8615faab778SVincenzo Maffione #ifdef SYNC_KLOOP_POLL 8625faab778SVincenzo Maffione else { 8635faab778SVincenzo Maffione /* Yield to the scheduler waiting for a notification 8645faab778SVincenzo Maffione * to come either from netmap or the application. */ 8655faab778SVincenzo Maffione schedule_timeout(msecs_to_jiffies(3000)); 8665faab778SVincenzo Maffione } 8675faab778SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */ 868b6e66be2SVincenzo Maffione } 869b6e66be2SVincenzo Maffione out: 870b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL 871b6e66be2SVincenzo Maffione if (poll_ctx) { 872b6e66be2SVincenzo Maffione /* Stop polling from netmap and the eventfds, and deallocate 873b6e66be2SVincenzo Maffione * the poll context. */ 8745faab778SVincenzo Maffione if (!busy_wait) { 875b6e66be2SVincenzo Maffione __set_current_state(TASK_RUNNING); 8765faab778SVincenzo Maffione } 877b6e66be2SVincenzo Maffione for (i = 0; i < poll_ctx->next_entry; i++) { 878b6e66be2SVincenzo Maffione struct sync_kloop_poll_entry *entry = 879b6e66be2SVincenzo Maffione poll_ctx->entries + i; 880b6e66be2SVincenzo Maffione 881b6e66be2SVincenzo Maffione if (entry->wqh) 882b6e66be2SVincenzo Maffione remove_wait_queue(entry->wqh, &entry->wait); 883b6e66be2SVincenzo Maffione /* We did not get a reference to the eventfds, but 884b6e66be2SVincenzo Maffione * don't do that on netmap file descriptors (since 885b6e66be2SVincenzo Maffione * a reference was not taken. */ 886b6e66be2SVincenzo Maffione if (entry->filp && entry->filp != priv->np_filp) 887b6e66be2SVincenzo Maffione fput(entry->filp); 888b6e66be2SVincenzo Maffione if (entry->irq_ctx) 889b6e66be2SVincenzo Maffione eventfd_ctx_put(entry->irq_ctx); 890b6e66be2SVincenzo Maffione if (entry->irq_filp) 891b6e66be2SVincenzo Maffione fput(entry->irq_filp); 892b6e66be2SVincenzo Maffione } 893b6e66be2SVincenzo Maffione nm_os_free(poll_ctx); 894b6e66be2SVincenzo Maffione poll_ctx = NULL; 895b6e66be2SVincenzo Maffione } 896b6e66be2SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */ 897b6e66be2SVincenzo Maffione 898f79ba6d7SVincenzo Maffione if (args) { 899f79ba6d7SVincenzo Maffione nm_os_free(args); 900f79ba6d7SVincenzo Maffione args = NULL; 901f79ba6d7SVincenzo Maffione } 902f79ba6d7SVincenzo Maffione 903b6e66be2SVincenzo Maffione /* Reset the kloop state. */ 904b6e66be2SVincenzo Maffione NMG_LOCK(); 905b6e66be2SVincenzo Maffione priv->np_kloop_state = 0; 9065faab778SVincenzo Maffione if (na_could_sleep) { 9075faab778SVincenzo Maffione na->na_flags |= NAF_BDG_MAYSLEEP; 9085faab778SVincenzo Maffione } 909b6e66be2SVincenzo Maffione NMG_UNLOCK(); 910b6e66be2SVincenzo Maffione 911b6e66be2SVincenzo Maffione return err; 912b6e66be2SVincenzo Maffione } 913b6e66be2SVincenzo Maffione 914b6e66be2SVincenzo Maffione int 915b6e66be2SVincenzo Maffione netmap_sync_kloop_stop(struct netmap_priv_d *priv) 916b6e66be2SVincenzo Maffione { 917a56136a1SVincenzo Maffione struct netmap_adapter *na; 918b6e66be2SVincenzo Maffione bool running = true; 919b6e66be2SVincenzo Maffione int err = 0; 920b6e66be2SVincenzo Maffione 921a56136a1SVincenzo Maffione if (priv->np_nifp == NULL) { 922a56136a1SVincenzo Maffione return ENXIO; 923a56136a1SVincenzo Maffione } 924a56136a1SVincenzo Maffione mb(); /* make sure following reads are not from cache */ 925a56136a1SVincenzo Maffione 926a56136a1SVincenzo Maffione na = priv->np_na; 927a56136a1SVincenzo Maffione if (!nm_netmap_on(na)) { 928a56136a1SVincenzo Maffione return ENXIO; 929a56136a1SVincenzo Maffione } 930a56136a1SVincenzo Maffione 931a56136a1SVincenzo Maffione /* Set the kloop stopping flag. */ 932b6e66be2SVincenzo Maffione NMG_LOCK(); 933b6e66be2SVincenzo Maffione priv->np_kloop_state |= NM_SYNC_KLOOP_STOPPING; 934b6e66be2SVincenzo Maffione NMG_UNLOCK(); 935a56136a1SVincenzo Maffione 936a56136a1SVincenzo Maffione /* Send a notification to the kloop, in case it is blocked in 937a56136a1SVincenzo Maffione * schedule_timeout(). We can use either RX or TX, because the 938a56136a1SVincenzo Maffione * kloop is waiting on both. */ 939a56136a1SVincenzo Maffione nm_os_selwakeup(priv->np_si[NR_RX]); 940a56136a1SVincenzo Maffione 941a56136a1SVincenzo Maffione /* Wait for the kloop to actually terminate. */ 942b6e66be2SVincenzo Maffione while (running) { 943b6e66be2SVincenzo Maffione usleep_range(1000, 1500); 944b6e66be2SVincenzo Maffione NMG_LOCK(); 945b6e66be2SVincenzo Maffione running = (NM_ACCESS_ONCE(priv->np_kloop_state) 946b6e66be2SVincenzo Maffione & NM_SYNC_KLOOP_RUNNING); 947b6e66be2SVincenzo Maffione NMG_UNLOCK(); 948b6e66be2SVincenzo Maffione } 949b6e66be2SVincenzo Maffione 950b6e66be2SVincenzo Maffione return err; 951b6e66be2SVincenzo Maffione } 952b6e66be2SVincenzo Maffione 953b6e66be2SVincenzo Maffione #ifdef WITH_PTNETMAP 954b6e66be2SVincenzo Maffione /* 955b6e66be2SVincenzo Maffione * Guest ptnetmap txsync()/rxsync() routines, used in ptnet device drivers. 956b6e66be2SVincenzo Maffione * These routines are reused across the different operating systems supported 957b6e66be2SVincenzo Maffione * by netmap. 958b6e66be2SVincenzo Maffione */ 959b6e66be2SVincenzo Maffione 960b6e66be2SVincenzo Maffione /* 961b6e66be2SVincenzo Maffione * Reconcile host and guest views of the transmit ring. 962b6e66be2SVincenzo Maffione * 963b6e66be2SVincenzo Maffione * Guest user wants to transmit packets up to the one before ring->head, 964b6e66be2SVincenzo Maffione * and guest kernel knows tx_ring->hwcur is the first packet unsent 965b6e66be2SVincenzo Maffione * by the host kernel. 966b6e66be2SVincenzo Maffione * 967b6e66be2SVincenzo Maffione * We push out as many packets as possible, and possibly 968b6e66be2SVincenzo Maffione * reclaim buffers from previously completed transmission. 969b6e66be2SVincenzo Maffione * 970b6e66be2SVincenzo Maffione * Notifications from the host are enabled only if the user guest would 971b6e66be2SVincenzo Maffione * block (no space in the ring). 972b6e66be2SVincenzo Maffione */ 973b6e66be2SVincenzo Maffione bool 974b6e66be2SVincenzo Maffione netmap_pt_guest_txsync(struct nm_csb_atok *atok, struct nm_csb_ktoa *ktoa, 975b6e66be2SVincenzo Maffione struct netmap_kring *kring, int flags) 976b6e66be2SVincenzo Maffione { 977b6e66be2SVincenzo Maffione bool notify = false; 978b6e66be2SVincenzo Maffione 979b6e66be2SVincenzo Maffione /* Disable notifications */ 980b6e66be2SVincenzo Maffione atok->appl_need_kick = 0; 981b6e66be2SVincenzo Maffione 982b6e66be2SVincenzo Maffione /* 9835faab778SVincenzo Maffione * First part: tell the host to process the new packets, 9845faab778SVincenzo Maffione * updating the CSB. 985b6e66be2SVincenzo Maffione */ 986b6e66be2SVincenzo Maffione kring->nr_hwcur = ktoa->hwcur; 987f79ba6d7SVincenzo Maffione nm_sync_kloop_appl_write(atok, kring->rcur, kring->rhead); 988b6e66be2SVincenzo Maffione 989b6e66be2SVincenzo Maffione /* Ask for a kick from a guest to the host if needed. */ 9905faab778SVincenzo Maffione if (((kring->rhead != kring->nr_hwcur || nm_kr_wouldblock(kring)) 991b6e66be2SVincenzo Maffione && NM_ACCESS_ONCE(ktoa->kern_need_kick)) || 992b6e66be2SVincenzo Maffione (flags & NAF_FORCE_RECLAIM)) { 993b6e66be2SVincenzo Maffione atok->sync_flags = flags; 994b6e66be2SVincenzo Maffione notify = true; 995b6e66be2SVincenzo Maffione } 996b6e66be2SVincenzo Maffione 997b6e66be2SVincenzo Maffione /* 998b6e66be2SVincenzo Maffione * Second part: reclaim buffers for completed transmissions. 999b6e66be2SVincenzo Maffione */ 10005faab778SVincenzo Maffione if (nm_kr_wouldblock(kring) || (flags & NAF_FORCE_RECLAIM)) { 1001f79ba6d7SVincenzo Maffione nm_sync_kloop_appl_read(ktoa, &kring->nr_hwtail, 1002f79ba6d7SVincenzo Maffione &kring->nr_hwcur); 1003b6e66be2SVincenzo Maffione } 1004b6e66be2SVincenzo Maffione 1005b6e66be2SVincenzo Maffione /* 1006b6e66be2SVincenzo Maffione * No more room in the ring for new transmissions. The user thread will 1007b6e66be2SVincenzo Maffione * go to sleep and we need to be notified by the host when more free 1008b6e66be2SVincenzo Maffione * space is available. 1009b6e66be2SVincenzo Maffione */ 10105faab778SVincenzo Maffione if (nm_kr_wouldblock(kring) && !(kring->nr_kflags & NKR_NOINTR)) { 101145c67e8fSVincenzo Maffione /* Re-enable notifications. */ 1012b6e66be2SVincenzo Maffione atok->appl_need_kick = 1; 1013f79ba6d7SVincenzo Maffione /* Double check, with store-load memory barrier. */ 1014f79ba6d7SVincenzo Maffione nm_stld_barrier(); 1015f79ba6d7SVincenzo Maffione nm_sync_kloop_appl_read(ktoa, &kring->nr_hwtail, 1016f79ba6d7SVincenzo Maffione &kring->nr_hwcur); 1017b6e66be2SVincenzo Maffione /* If there is new free space, disable notifications */ 10185faab778SVincenzo Maffione if (unlikely(!nm_kr_wouldblock(kring))) { 1019b6e66be2SVincenzo Maffione atok->appl_need_kick = 0; 1020b6e66be2SVincenzo Maffione } 1021b6e66be2SVincenzo Maffione } 1022b6e66be2SVincenzo Maffione 1023b6e66be2SVincenzo Maffione nm_prdis(1, "%s CSB(head:%u cur:%u hwtail:%u) KRING(head:%u cur:%u tail:%u)", 1024b6e66be2SVincenzo Maffione kring->name, atok->head, atok->cur, ktoa->hwtail, 1025b6e66be2SVincenzo Maffione kring->rhead, kring->rcur, kring->nr_hwtail); 1026b6e66be2SVincenzo Maffione 1027b6e66be2SVincenzo Maffione return notify; 1028b6e66be2SVincenzo Maffione } 1029b6e66be2SVincenzo Maffione 1030b6e66be2SVincenzo Maffione /* 1031b6e66be2SVincenzo Maffione * Reconcile host and guest view of the receive ring. 1032b6e66be2SVincenzo Maffione * 1033b6e66be2SVincenzo Maffione * Update hwcur/hwtail from host (reading from CSB). 1034b6e66be2SVincenzo Maffione * 1035b6e66be2SVincenzo Maffione * If guest user has released buffers up to the one before ring->head, we 1036b6e66be2SVincenzo Maffione * also give them to the host. 1037b6e66be2SVincenzo Maffione * 1038b6e66be2SVincenzo Maffione * Notifications from the host are enabled only if the user guest would 1039b6e66be2SVincenzo Maffione * block (no more completed slots in the ring). 1040b6e66be2SVincenzo Maffione */ 1041b6e66be2SVincenzo Maffione bool 1042b6e66be2SVincenzo Maffione netmap_pt_guest_rxsync(struct nm_csb_atok *atok, struct nm_csb_ktoa *ktoa, 1043b6e66be2SVincenzo Maffione struct netmap_kring *kring, int flags) 1044b6e66be2SVincenzo Maffione { 1045b6e66be2SVincenzo Maffione bool notify = false; 1046b6e66be2SVincenzo Maffione 1047b6e66be2SVincenzo Maffione /* Disable notifications */ 1048b6e66be2SVincenzo Maffione atok->appl_need_kick = 0; 1049b6e66be2SVincenzo Maffione 1050b6e66be2SVincenzo Maffione /* 1051b6e66be2SVincenzo Maffione * First part: import newly received packets, by updating the kring 1052b6e66be2SVincenzo Maffione * hwtail to the hwtail known from the host (read from the CSB). 1053b6e66be2SVincenzo Maffione * This also updates the kring hwcur. 1054b6e66be2SVincenzo Maffione */ 1055f79ba6d7SVincenzo Maffione nm_sync_kloop_appl_read(ktoa, &kring->nr_hwtail, &kring->nr_hwcur); 1056b6e66be2SVincenzo Maffione kring->nr_kflags &= ~NKR_PENDINTR; 1057b6e66be2SVincenzo Maffione 1058b6e66be2SVincenzo Maffione /* 1059b6e66be2SVincenzo Maffione * Second part: tell the host about the slots that guest user has 1060b6e66be2SVincenzo Maffione * released, by updating cur and head in the CSB. 1061b6e66be2SVincenzo Maffione */ 1062b6e66be2SVincenzo Maffione if (kring->rhead != kring->nr_hwcur) { 1063f79ba6d7SVincenzo Maffione nm_sync_kloop_appl_write(atok, kring->rcur, kring->rhead); 1064b6e66be2SVincenzo Maffione } 1065b6e66be2SVincenzo Maffione 1066b6e66be2SVincenzo Maffione /* 1067b6e66be2SVincenzo Maffione * No more completed RX slots. The user thread will go to sleep and 1068b6e66be2SVincenzo Maffione * we need to be notified by the host when more RX slots have been 1069b6e66be2SVincenzo Maffione * completed. 1070b6e66be2SVincenzo Maffione */ 10715faab778SVincenzo Maffione if (nm_kr_wouldblock(kring) && !(kring->nr_kflags & NKR_NOINTR)) { 107245c67e8fSVincenzo Maffione /* Re-enable notifications. */ 1073b6e66be2SVincenzo Maffione atok->appl_need_kick = 1; 1074f79ba6d7SVincenzo Maffione /* Double check, with store-load memory barrier. */ 1075f79ba6d7SVincenzo Maffione nm_stld_barrier(); 1076f79ba6d7SVincenzo Maffione nm_sync_kloop_appl_read(ktoa, &kring->nr_hwtail, 1077f79ba6d7SVincenzo Maffione &kring->nr_hwcur); 1078b6e66be2SVincenzo Maffione /* If there are new slots, disable notifications. */ 10795faab778SVincenzo Maffione if (!nm_kr_wouldblock(kring)) { 1080b6e66be2SVincenzo Maffione atok->appl_need_kick = 0; 1081b6e66be2SVincenzo Maffione } 1082b6e66be2SVincenzo Maffione } 1083b6e66be2SVincenzo Maffione 10845faab778SVincenzo Maffione /* Ask for a kick from the guest to the host if needed. */ 10855faab778SVincenzo Maffione if ((kring->rhead != kring->nr_hwcur || nm_kr_wouldblock(kring)) 10865faab778SVincenzo Maffione && NM_ACCESS_ONCE(ktoa->kern_need_kick)) { 10875faab778SVincenzo Maffione atok->sync_flags = flags; 10885faab778SVincenzo Maffione notify = true; 10895faab778SVincenzo Maffione } 10905faab778SVincenzo Maffione 1091b6e66be2SVincenzo Maffione nm_prdis(1, "%s CSB(head:%u cur:%u hwtail:%u) KRING(head:%u cur:%u tail:%u)", 1092b6e66be2SVincenzo Maffione kring->name, atok->head, atok->cur, ktoa->hwtail, 1093b6e66be2SVincenzo Maffione kring->rhead, kring->rcur, kring->nr_hwtail); 1094b6e66be2SVincenzo Maffione 1095b6e66be2SVincenzo Maffione return notify; 1096b6e66be2SVincenzo Maffione } 1097b6e66be2SVincenzo Maffione 1098b6e66be2SVincenzo Maffione /* 1099b6e66be2SVincenzo Maffione * Callbacks for ptnet drivers: nm_krings_create, nm_krings_delete, nm_dtor. 1100b6e66be2SVincenzo Maffione */ 1101b6e66be2SVincenzo Maffione int 1102b6e66be2SVincenzo Maffione ptnet_nm_krings_create(struct netmap_adapter *na) 1103b6e66be2SVincenzo Maffione { 1104b6e66be2SVincenzo Maffione struct netmap_pt_guest_adapter *ptna = 1105b6e66be2SVincenzo Maffione (struct netmap_pt_guest_adapter *)na; /* Upcast. */ 1106b6e66be2SVincenzo Maffione struct netmap_adapter *na_nm = &ptna->hwup.up; 1107b6e66be2SVincenzo Maffione struct netmap_adapter *na_dr = &ptna->dr.up; 1108b6e66be2SVincenzo Maffione int ret; 1109b6e66be2SVincenzo Maffione 1110b6e66be2SVincenzo Maffione if (ptna->backend_users) { 1111b6e66be2SVincenzo Maffione return 0; 1112b6e66be2SVincenzo Maffione } 1113b6e66be2SVincenzo Maffione 1114b6e66be2SVincenzo Maffione /* Create krings on the public netmap adapter. */ 1115b6e66be2SVincenzo Maffione ret = netmap_hw_krings_create(na_nm); 1116b6e66be2SVincenzo Maffione if (ret) { 1117b6e66be2SVincenzo Maffione return ret; 1118b6e66be2SVincenzo Maffione } 1119b6e66be2SVincenzo Maffione 1120b6e66be2SVincenzo Maffione /* Copy krings into the netmap adapter private to the driver. */ 1121b6e66be2SVincenzo Maffione na_dr->tx_rings = na_nm->tx_rings; 1122b6e66be2SVincenzo Maffione na_dr->rx_rings = na_nm->rx_rings; 1123b6e66be2SVincenzo Maffione 1124b6e66be2SVincenzo Maffione return 0; 1125b6e66be2SVincenzo Maffione } 1126b6e66be2SVincenzo Maffione 1127b6e66be2SVincenzo Maffione void 1128b6e66be2SVincenzo Maffione ptnet_nm_krings_delete(struct netmap_adapter *na) 1129b6e66be2SVincenzo Maffione { 1130b6e66be2SVincenzo Maffione struct netmap_pt_guest_adapter *ptna = 1131b6e66be2SVincenzo Maffione (struct netmap_pt_guest_adapter *)na; /* Upcast. */ 1132b6e66be2SVincenzo Maffione struct netmap_adapter *na_nm = &ptna->hwup.up; 1133b6e66be2SVincenzo Maffione struct netmap_adapter *na_dr = &ptna->dr.up; 1134b6e66be2SVincenzo Maffione 1135b6e66be2SVincenzo Maffione if (ptna->backend_users) { 1136b6e66be2SVincenzo Maffione return; 1137b6e66be2SVincenzo Maffione } 1138b6e66be2SVincenzo Maffione 1139b6e66be2SVincenzo Maffione na_dr->tx_rings = NULL; 1140b6e66be2SVincenzo Maffione na_dr->rx_rings = NULL; 1141b6e66be2SVincenzo Maffione 1142b6e66be2SVincenzo Maffione netmap_hw_krings_delete(na_nm); 1143b6e66be2SVincenzo Maffione } 1144b6e66be2SVincenzo Maffione 1145b6e66be2SVincenzo Maffione void 1146b6e66be2SVincenzo Maffione ptnet_nm_dtor(struct netmap_adapter *na) 1147b6e66be2SVincenzo Maffione { 1148b6e66be2SVincenzo Maffione struct netmap_pt_guest_adapter *ptna = 1149b6e66be2SVincenzo Maffione (struct netmap_pt_guest_adapter *)na; 1150b6e66be2SVincenzo Maffione 1151b6e66be2SVincenzo Maffione netmap_mem_put(ptna->dr.up.nm_mem); 1152b6e66be2SVincenzo Maffione memset(&ptna->dr, 0, sizeof(ptna->dr)); 1153b6e66be2SVincenzo Maffione netmap_mem_pt_guest_ifp_del(na->nm_mem, na->ifp); 1154b6e66be2SVincenzo Maffione } 1155b6e66be2SVincenzo Maffione 1156b6e66be2SVincenzo Maffione int 1157b6e66be2SVincenzo Maffione netmap_pt_guest_attach(struct netmap_adapter *arg, 1158b6e66be2SVincenzo Maffione unsigned int nifp_offset, unsigned int memid) 1159b6e66be2SVincenzo Maffione { 1160b6e66be2SVincenzo Maffione struct netmap_pt_guest_adapter *ptna; 1161b6e66be2SVincenzo Maffione struct ifnet *ifp = arg ? arg->ifp : NULL; 1162b6e66be2SVincenzo Maffione int error; 1163b6e66be2SVincenzo Maffione 1164b6e66be2SVincenzo Maffione /* get allocator */ 1165b6e66be2SVincenzo Maffione arg->nm_mem = netmap_mem_pt_guest_new(ifp, nifp_offset, memid); 1166b6e66be2SVincenzo Maffione if (arg->nm_mem == NULL) 1167b6e66be2SVincenzo Maffione return ENOMEM; 1168b6e66be2SVincenzo Maffione arg->na_flags |= NAF_MEM_OWNER; 1169b6e66be2SVincenzo Maffione error = netmap_attach_ext(arg, sizeof(struct netmap_pt_guest_adapter), 1); 1170b6e66be2SVincenzo Maffione if (error) 1171b6e66be2SVincenzo Maffione return error; 1172b6e66be2SVincenzo Maffione 1173b6e66be2SVincenzo Maffione /* get the netmap_pt_guest_adapter */ 1174b6e66be2SVincenzo Maffione ptna = (struct netmap_pt_guest_adapter *) NA(ifp); 1175b6e66be2SVincenzo Maffione 1176b6e66be2SVincenzo Maffione /* Initialize a separate pass-through netmap adapter that is going to 1177b6e66be2SVincenzo Maffione * be used by the ptnet driver only, and so never exposed to netmap 1178b6e66be2SVincenzo Maffione * applications. We only need a subset of the available fields. */ 1179b6e66be2SVincenzo Maffione memset(&ptna->dr, 0, sizeof(ptna->dr)); 1180b6e66be2SVincenzo Maffione ptna->dr.up.ifp = ifp; 1181b6e66be2SVincenzo Maffione ptna->dr.up.nm_mem = netmap_mem_get(ptna->hwup.up.nm_mem); 1182b6e66be2SVincenzo Maffione ptna->dr.up.nm_config = ptna->hwup.up.nm_config; 1183b6e66be2SVincenzo Maffione 1184b6e66be2SVincenzo Maffione ptna->backend_users = 0; 1185b6e66be2SVincenzo Maffione 1186b6e66be2SVincenzo Maffione return 0; 1187b6e66be2SVincenzo Maffione } 1188b6e66be2SVincenzo Maffione 1189b6e66be2SVincenzo Maffione #endif /* WITH_PTNETMAP */ 1190