xref: /freebsd/sys/dev/netmap/netmap_kloop.c (revision b6e66be22bdce2aadcf52ee6230faa1e6cd3f805)
1*b6e66be2SVincenzo Maffione /*
2*b6e66be2SVincenzo Maffione  * Copyright (C) 2016-2018 Vincenzo Maffione
3*b6e66be2SVincenzo Maffione  * Copyright (C) 2015 Stefano Garzarella
4*b6e66be2SVincenzo Maffione  * All rights reserved.
5*b6e66be2SVincenzo Maffione  *
6*b6e66be2SVincenzo Maffione  * Redistribution and use in source and binary forms, with or without
7*b6e66be2SVincenzo Maffione  * modification, are permitted provided that the following conditions
8*b6e66be2SVincenzo Maffione  * are met:
9*b6e66be2SVincenzo Maffione  *   1. Redistributions of source code must retain the above copyright
10*b6e66be2SVincenzo Maffione  *      notice, this list of conditions and the following disclaimer.
11*b6e66be2SVincenzo Maffione  *   2. Redistributions in binary form must reproduce the above copyright
12*b6e66be2SVincenzo Maffione  *      notice, this list of conditions and the following disclaimer in the
13*b6e66be2SVincenzo Maffione  *      documentation and/or other materials provided with the distribution.
14*b6e66be2SVincenzo Maffione  *
15*b6e66be2SVincenzo Maffione  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16*b6e66be2SVincenzo Maffione  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17*b6e66be2SVincenzo Maffione  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18*b6e66be2SVincenzo Maffione  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19*b6e66be2SVincenzo Maffione  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20*b6e66be2SVincenzo Maffione  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21*b6e66be2SVincenzo Maffione  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22*b6e66be2SVincenzo Maffione  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23*b6e66be2SVincenzo Maffione  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24*b6e66be2SVincenzo Maffione  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25*b6e66be2SVincenzo Maffione  * SUCH DAMAGE.
26*b6e66be2SVincenzo Maffione  *
27*b6e66be2SVincenzo Maffione  * $FreeBSD$
28*b6e66be2SVincenzo Maffione  */
29*b6e66be2SVincenzo Maffione 
30*b6e66be2SVincenzo Maffione /*
31*b6e66be2SVincenzo Maffione  * common headers
32*b6e66be2SVincenzo Maffione  */
33*b6e66be2SVincenzo Maffione #if defined(__FreeBSD__)
34*b6e66be2SVincenzo Maffione #include <sys/cdefs.h>
35*b6e66be2SVincenzo Maffione #include <sys/param.h>
36*b6e66be2SVincenzo Maffione #include <sys/kernel.h>
37*b6e66be2SVincenzo Maffione #include <sys/types.h>
38*b6e66be2SVincenzo Maffione #include <sys/selinfo.h>
39*b6e66be2SVincenzo Maffione #include <sys/socket.h>
40*b6e66be2SVincenzo Maffione #include <net/if.h>
41*b6e66be2SVincenzo Maffione #include <net/if_var.h>
42*b6e66be2SVincenzo Maffione #include <machine/bus.h>
43*b6e66be2SVincenzo Maffione 
44*b6e66be2SVincenzo Maffione #define usleep_range(_1, _2) \
45*b6e66be2SVincenzo Maffione         pause_sbt("sync-kloop-sleep", SBT_1US * _1, SBT_1US * 1, C_ABSOLUTE)
46*b6e66be2SVincenzo Maffione 
47*b6e66be2SVincenzo Maffione #elif defined(linux)
48*b6e66be2SVincenzo Maffione #include <bsd_glue.h>
49*b6e66be2SVincenzo Maffione #include <linux/file.h>
50*b6e66be2SVincenzo Maffione #include <linux/eventfd.h>
51*b6e66be2SVincenzo Maffione #endif
52*b6e66be2SVincenzo Maffione 
53*b6e66be2SVincenzo Maffione #include <net/netmap.h>
54*b6e66be2SVincenzo Maffione #include <dev/netmap/netmap_kern.h>
55*b6e66be2SVincenzo Maffione #include <net/netmap_virt.h>
56*b6e66be2SVincenzo Maffione #include <dev/netmap/netmap_mem2.h>
57*b6e66be2SVincenzo Maffione 
58*b6e66be2SVincenzo Maffione /* Support for eventfd-based notifications. */
59*b6e66be2SVincenzo Maffione #if defined(linux)
60*b6e66be2SVincenzo Maffione #define SYNC_KLOOP_POLL
61*b6e66be2SVincenzo Maffione #endif
62*b6e66be2SVincenzo Maffione 
63*b6e66be2SVincenzo Maffione /* Write kring pointers (hwcur, hwtail) to the CSB.
64*b6e66be2SVincenzo Maffione  * This routine is coupled with ptnetmap_guest_read_kring_csb(). */
65*b6e66be2SVincenzo Maffione static inline void
66*b6e66be2SVincenzo Maffione sync_kloop_kernel_write(struct nm_csb_ktoa __user *ptr, uint32_t hwcur,
67*b6e66be2SVincenzo Maffione 			   uint32_t hwtail)
68*b6e66be2SVincenzo Maffione {
69*b6e66be2SVincenzo Maffione 	/*
70*b6e66be2SVincenzo Maffione 	 * The same scheme used in ptnetmap_guest_write_kring_csb() applies here.
71*b6e66be2SVincenzo Maffione 	 * We allow the application to read a value of hwcur more recent than the value
72*b6e66be2SVincenzo Maffione 	 * of hwtail, since this would anyway result in a consistent view of the
73*b6e66be2SVincenzo Maffione 	 * ring state (and hwcur can never wraparound hwtail, since hwcur must be
74*b6e66be2SVincenzo Maffione 	 * behind head).
75*b6e66be2SVincenzo Maffione 	 *
76*b6e66be2SVincenzo Maffione 	 * The following memory barrier scheme is used to make this happen:
77*b6e66be2SVincenzo Maffione 	 *
78*b6e66be2SVincenzo Maffione 	 *          Application          Kernel
79*b6e66be2SVincenzo Maffione 	 *
80*b6e66be2SVincenzo Maffione 	 *          STORE(hwcur)         LOAD(hwtail)
81*b6e66be2SVincenzo Maffione 	 *          mb() <-------------> mb()
82*b6e66be2SVincenzo Maffione 	 *          STORE(hwtail)        LOAD(hwcur)
83*b6e66be2SVincenzo Maffione 	 */
84*b6e66be2SVincenzo Maffione 	CSB_WRITE(ptr, hwcur, hwcur);
85*b6e66be2SVincenzo Maffione 	nm_stst_barrier();
86*b6e66be2SVincenzo Maffione 	CSB_WRITE(ptr, hwtail, hwtail);
87*b6e66be2SVincenzo Maffione }
88*b6e66be2SVincenzo Maffione 
89*b6e66be2SVincenzo Maffione /* Read kring pointers (head, cur, sync_flags) from the CSB.
90*b6e66be2SVincenzo Maffione  * This routine is coupled with ptnetmap_guest_write_kring_csb(). */
91*b6e66be2SVincenzo Maffione static inline void
92*b6e66be2SVincenzo Maffione sync_kloop_kernel_read(struct nm_csb_atok __user *ptr,
93*b6e66be2SVincenzo Maffione 			  struct netmap_ring *shadow_ring,
94*b6e66be2SVincenzo Maffione 			  uint32_t num_slots)
95*b6e66be2SVincenzo Maffione {
96*b6e66be2SVincenzo Maffione 	/*
97*b6e66be2SVincenzo Maffione 	 * We place a memory barrier to make sure that the update of head never
98*b6e66be2SVincenzo Maffione 	 * overtakes the update of cur.
99*b6e66be2SVincenzo Maffione 	 * (see explanation in ptnetmap_guest_write_kring_csb).
100*b6e66be2SVincenzo Maffione 	 */
101*b6e66be2SVincenzo Maffione 	CSB_READ(ptr, head, shadow_ring->head);
102*b6e66be2SVincenzo Maffione 	nm_stst_barrier();
103*b6e66be2SVincenzo Maffione 	CSB_READ(ptr, cur, shadow_ring->cur);
104*b6e66be2SVincenzo Maffione 	CSB_READ(ptr, sync_flags, shadow_ring->flags);
105*b6e66be2SVincenzo Maffione }
106*b6e66be2SVincenzo Maffione 
107*b6e66be2SVincenzo Maffione /* Enable or disable application --> kernel kicks. */
108*b6e66be2SVincenzo Maffione static inline void
109*b6e66be2SVincenzo Maffione csb_ktoa_kick_enable(struct nm_csb_ktoa __user *csb_ktoa, uint32_t val)
110*b6e66be2SVincenzo Maffione {
111*b6e66be2SVincenzo Maffione 	CSB_WRITE(csb_ktoa, kern_need_kick, val);
112*b6e66be2SVincenzo Maffione }
113*b6e66be2SVincenzo Maffione 
114*b6e66be2SVincenzo Maffione /* Are application interrupt enabled or disabled? */
115*b6e66be2SVincenzo Maffione static inline uint32_t
116*b6e66be2SVincenzo Maffione csb_atok_intr_enabled(struct nm_csb_atok __user *csb_atok)
117*b6e66be2SVincenzo Maffione {
118*b6e66be2SVincenzo Maffione 	uint32_t v;
119*b6e66be2SVincenzo Maffione 
120*b6e66be2SVincenzo Maffione 	CSB_READ(csb_atok, appl_need_kick, v);
121*b6e66be2SVincenzo Maffione 
122*b6e66be2SVincenzo Maffione 	return v;
123*b6e66be2SVincenzo Maffione }
124*b6e66be2SVincenzo Maffione 
125*b6e66be2SVincenzo Maffione static inline void
126*b6e66be2SVincenzo Maffione sync_kloop_kring_dump(const char *title, const struct netmap_kring *kring)
127*b6e66be2SVincenzo Maffione {
128*b6e66be2SVincenzo Maffione 	nm_prinf("%s - name: %s hwcur: %d hwtail: %d "
129*b6e66be2SVincenzo Maffione 		"rhead: %d rcur: %d rtail: %d",
130*b6e66be2SVincenzo Maffione 		title, kring->name, kring->nr_hwcur, kring->nr_hwtail,
131*b6e66be2SVincenzo Maffione 		kring->rhead, kring->rcur, kring->rtail);
132*b6e66be2SVincenzo Maffione }
133*b6e66be2SVincenzo Maffione 
134*b6e66be2SVincenzo Maffione struct sync_kloop_ring_args {
135*b6e66be2SVincenzo Maffione 	struct netmap_kring *kring;
136*b6e66be2SVincenzo Maffione 	struct nm_csb_atok *csb_atok;
137*b6e66be2SVincenzo Maffione 	struct nm_csb_ktoa *csb_ktoa;
138*b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL
139*b6e66be2SVincenzo Maffione 	struct eventfd_ctx *irq_ctx;
140*b6e66be2SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */
141*b6e66be2SVincenzo Maffione };
142*b6e66be2SVincenzo Maffione 
143*b6e66be2SVincenzo Maffione static void
144*b6e66be2SVincenzo Maffione netmap_sync_kloop_tx_ring(const struct sync_kloop_ring_args *a)
145*b6e66be2SVincenzo Maffione {
146*b6e66be2SVincenzo Maffione 	struct netmap_kring *kring = a->kring;
147*b6e66be2SVincenzo Maffione 	struct nm_csb_atok *csb_atok = a->csb_atok;
148*b6e66be2SVincenzo Maffione 	struct nm_csb_ktoa *csb_ktoa = a->csb_ktoa;
149*b6e66be2SVincenzo Maffione 	struct netmap_ring shadow_ring; /* shadow copy of the netmap_ring */
150*b6e66be2SVincenzo Maffione 	bool more_txspace = false;
151*b6e66be2SVincenzo Maffione 	uint32_t num_slots;
152*b6e66be2SVincenzo Maffione 	int batch;
153*b6e66be2SVincenzo Maffione 
154*b6e66be2SVincenzo Maffione 	num_slots = kring->nkr_num_slots;
155*b6e66be2SVincenzo Maffione 
156*b6e66be2SVincenzo Maffione 	/* Disable application --> kernel notifications. */
157*b6e66be2SVincenzo Maffione 	csb_ktoa_kick_enable(csb_ktoa, 0);
158*b6e66be2SVincenzo Maffione 	/* Copy the application kring pointers from the CSB */
159*b6e66be2SVincenzo Maffione 	sync_kloop_kernel_read(csb_atok, &shadow_ring, num_slots);
160*b6e66be2SVincenzo Maffione 
161*b6e66be2SVincenzo Maffione 	for (;;) {
162*b6e66be2SVincenzo Maffione 		batch = shadow_ring.head - kring->nr_hwcur;
163*b6e66be2SVincenzo Maffione 		if (batch < 0)
164*b6e66be2SVincenzo Maffione 			batch += num_slots;
165*b6e66be2SVincenzo Maffione 
166*b6e66be2SVincenzo Maffione #ifdef PTN_TX_BATCH_LIM
167*b6e66be2SVincenzo Maffione 		if (batch > PTN_TX_BATCH_LIM(num_slots)) {
168*b6e66be2SVincenzo Maffione 			/* If application moves ahead too fast, let's cut the move so
169*b6e66be2SVincenzo Maffione 			 * that we don't exceed our batch limit. */
170*b6e66be2SVincenzo Maffione 			uint32_t head_lim = kring->nr_hwcur + PTN_TX_BATCH_LIM(num_slots);
171*b6e66be2SVincenzo Maffione 
172*b6e66be2SVincenzo Maffione 			if (head_lim >= num_slots)
173*b6e66be2SVincenzo Maffione 				head_lim -= num_slots;
174*b6e66be2SVincenzo Maffione 			nm_prdis(1, "batch: %d head: %d head_lim: %d", batch, shadow_ring.head,
175*b6e66be2SVincenzo Maffione 					head_lim);
176*b6e66be2SVincenzo Maffione 			shadow_ring.head = head_lim;
177*b6e66be2SVincenzo Maffione 			batch = PTN_TX_BATCH_LIM(num_slots);
178*b6e66be2SVincenzo Maffione 		}
179*b6e66be2SVincenzo Maffione #endif /* PTN_TX_BATCH_LIM */
180*b6e66be2SVincenzo Maffione 
181*b6e66be2SVincenzo Maffione 		if (nm_kr_txspace(kring) <= (num_slots >> 1)) {
182*b6e66be2SVincenzo Maffione 			shadow_ring.flags |= NAF_FORCE_RECLAIM;
183*b6e66be2SVincenzo Maffione 		}
184*b6e66be2SVincenzo Maffione 
185*b6e66be2SVincenzo Maffione 		/* Netmap prologue */
186*b6e66be2SVincenzo Maffione 		shadow_ring.tail = kring->rtail;
187*b6e66be2SVincenzo Maffione 		if (unlikely(nm_txsync_prologue(kring, &shadow_ring) >= num_slots)) {
188*b6e66be2SVincenzo Maffione 			/* Reinit ring and enable notifications. */
189*b6e66be2SVincenzo Maffione 			netmap_ring_reinit(kring);
190*b6e66be2SVincenzo Maffione 			csb_ktoa_kick_enable(csb_ktoa, 1);
191*b6e66be2SVincenzo Maffione 			break;
192*b6e66be2SVincenzo Maffione 		}
193*b6e66be2SVincenzo Maffione 
194*b6e66be2SVincenzo Maffione 		if (unlikely(netmap_debug & NM_DEBUG_TXSYNC)) {
195*b6e66be2SVincenzo Maffione 			sync_kloop_kring_dump("pre txsync", kring);
196*b6e66be2SVincenzo Maffione 		}
197*b6e66be2SVincenzo Maffione 
198*b6e66be2SVincenzo Maffione 		if (unlikely(kring->nm_sync(kring, shadow_ring.flags))) {
199*b6e66be2SVincenzo Maffione 			/* Reenable notifications. */
200*b6e66be2SVincenzo Maffione 			csb_ktoa_kick_enable(csb_ktoa, 1);
201*b6e66be2SVincenzo Maffione 			nm_prerr("txsync() failed");
202*b6e66be2SVincenzo Maffione 			break;
203*b6e66be2SVincenzo Maffione 		}
204*b6e66be2SVincenzo Maffione 
205*b6e66be2SVincenzo Maffione 		/*
206*b6e66be2SVincenzo Maffione 		 * Finalize
207*b6e66be2SVincenzo Maffione 		 * Copy kernel hwcur and hwtail into the CSB for the application sync(), and
208*b6e66be2SVincenzo Maffione 		 * do the nm_sync_finalize.
209*b6e66be2SVincenzo Maffione 		 */
210*b6e66be2SVincenzo Maffione 		sync_kloop_kernel_write(csb_ktoa, kring->nr_hwcur,
211*b6e66be2SVincenzo Maffione 				kring->nr_hwtail);
212*b6e66be2SVincenzo Maffione 		if (kring->rtail != kring->nr_hwtail) {
213*b6e66be2SVincenzo Maffione 			/* Some more room available in the parent adapter. */
214*b6e66be2SVincenzo Maffione 			kring->rtail = kring->nr_hwtail;
215*b6e66be2SVincenzo Maffione 			more_txspace = true;
216*b6e66be2SVincenzo Maffione 		}
217*b6e66be2SVincenzo Maffione 
218*b6e66be2SVincenzo Maffione 		if (unlikely(netmap_debug & NM_DEBUG_TXSYNC)) {
219*b6e66be2SVincenzo Maffione 			sync_kloop_kring_dump("post txsync", kring);
220*b6e66be2SVincenzo Maffione 		}
221*b6e66be2SVincenzo Maffione 
222*b6e66be2SVincenzo Maffione 		/* Interrupt the application if needed. */
223*b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL
224*b6e66be2SVincenzo Maffione 		if (a->irq_ctx && more_txspace && csb_atok_intr_enabled(csb_atok)) {
225*b6e66be2SVincenzo Maffione 			/* Disable application kick to avoid sending unnecessary kicks */
226*b6e66be2SVincenzo Maffione 			eventfd_signal(a->irq_ctx, 1);
227*b6e66be2SVincenzo Maffione 			more_txspace = false;
228*b6e66be2SVincenzo Maffione 		}
229*b6e66be2SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */
230*b6e66be2SVincenzo Maffione 
231*b6e66be2SVincenzo Maffione 		/* Read CSB to see if there is more work to do. */
232*b6e66be2SVincenzo Maffione 		sync_kloop_kernel_read(csb_atok, &shadow_ring, num_slots);
233*b6e66be2SVincenzo Maffione 		if (shadow_ring.head == kring->rhead) {
234*b6e66be2SVincenzo Maffione 			/*
235*b6e66be2SVincenzo Maffione 			 * No more packets to transmit. We enable notifications and
236*b6e66be2SVincenzo Maffione 			 * go to sleep, waiting for a kick from the application when new
237*b6e66be2SVincenzo Maffione 			 * new slots are ready for transmission.
238*b6e66be2SVincenzo Maffione 			 */
239*b6e66be2SVincenzo Maffione 			/* Reenable notifications. */
240*b6e66be2SVincenzo Maffione 			csb_ktoa_kick_enable(csb_ktoa, 1);
241*b6e66be2SVincenzo Maffione 			/* Doublecheck. */
242*b6e66be2SVincenzo Maffione 			sync_kloop_kernel_read(csb_atok, &shadow_ring, num_slots);
243*b6e66be2SVincenzo Maffione 			if (shadow_ring.head != kring->rhead) {
244*b6e66be2SVincenzo Maffione 				/* We won the race condition, there are more packets to
245*b6e66be2SVincenzo Maffione 				 * transmit. Disable notifications and do another cycle */
246*b6e66be2SVincenzo Maffione 				csb_ktoa_kick_enable(csb_ktoa, 0);
247*b6e66be2SVincenzo Maffione 				continue;
248*b6e66be2SVincenzo Maffione 			}
249*b6e66be2SVincenzo Maffione 			break;
250*b6e66be2SVincenzo Maffione 		}
251*b6e66be2SVincenzo Maffione 
252*b6e66be2SVincenzo Maffione 		if (nm_kr_txempty(kring)) {
253*b6e66be2SVincenzo Maffione 			/* No more available TX slots. We stop waiting for a notification
254*b6e66be2SVincenzo Maffione 			 * from the backend (netmap_tx_irq). */
255*b6e66be2SVincenzo Maffione 			nm_prdis(1, "TX ring");
256*b6e66be2SVincenzo Maffione 			break;
257*b6e66be2SVincenzo Maffione 		}
258*b6e66be2SVincenzo Maffione 	}
259*b6e66be2SVincenzo Maffione 
260*b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL
261*b6e66be2SVincenzo Maffione 	if (a->irq_ctx && more_txspace && csb_atok_intr_enabled(csb_atok)) {
262*b6e66be2SVincenzo Maffione 		eventfd_signal(a->irq_ctx, 1);
263*b6e66be2SVincenzo Maffione 	}
264*b6e66be2SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */
265*b6e66be2SVincenzo Maffione }
266*b6e66be2SVincenzo Maffione 
267*b6e66be2SVincenzo Maffione /* RX cycle without receive any packets */
268*b6e66be2SVincenzo Maffione #define SYNC_LOOP_RX_DRY_CYCLES_MAX	2
269*b6e66be2SVincenzo Maffione 
270*b6e66be2SVincenzo Maffione static inline int
271*b6e66be2SVincenzo Maffione sync_kloop_norxslots(struct netmap_kring *kring, uint32_t g_head)
272*b6e66be2SVincenzo Maffione {
273*b6e66be2SVincenzo Maffione 	return (NM_ACCESS_ONCE(kring->nr_hwtail) == nm_prev(g_head,
274*b6e66be2SVincenzo Maffione 				kring->nkr_num_slots - 1));
275*b6e66be2SVincenzo Maffione }
276*b6e66be2SVincenzo Maffione 
277*b6e66be2SVincenzo Maffione static void
278*b6e66be2SVincenzo Maffione netmap_sync_kloop_rx_ring(const struct sync_kloop_ring_args *a)
279*b6e66be2SVincenzo Maffione {
280*b6e66be2SVincenzo Maffione 
281*b6e66be2SVincenzo Maffione 	struct netmap_kring *kring = a->kring;
282*b6e66be2SVincenzo Maffione 	struct nm_csb_atok *csb_atok = a->csb_atok;
283*b6e66be2SVincenzo Maffione 	struct nm_csb_ktoa *csb_ktoa = a->csb_ktoa;
284*b6e66be2SVincenzo Maffione 	struct netmap_ring shadow_ring; /* shadow copy of the netmap_ring */
285*b6e66be2SVincenzo Maffione 	int dry_cycles = 0;
286*b6e66be2SVincenzo Maffione 	bool some_recvd = false;
287*b6e66be2SVincenzo Maffione 	uint32_t num_slots;
288*b6e66be2SVincenzo Maffione 
289*b6e66be2SVincenzo Maffione 	num_slots = kring->nkr_num_slots;
290*b6e66be2SVincenzo Maffione 
291*b6e66be2SVincenzo Maffione 	/* Get RX csb_atok and csb_ktoa pointers from the CSB. */
292*b6e66be2SVincenzo Maffione 	num_slots = kring->nkr_num_slots;
293*b6e66be2SVincenzo Maffione 
294*b6e66be2SVincenzo Maffione 	/* Disable notifications. */
295*b6e66be2SVincenzo Maffione 	csb_ktoa_kick_enable(csb_ktoa, 0);
296*b6e66be2SVincenzo Maffione 	/* Copy the application kring pointers from the CSB */
297*b6e66be2SVincenzo Maffione 	sync_kloop_kernel_read(csb_atok, &shadow_ring, num_slots);
298*b6e66be2SVincenzo Maffione 
299*b6e66be2SVincenzo Maffione 	for (;;) {
300*b6e66be2SVincenzo Maffione 		uint32_t hwtail;
301*b6e66be2SVincenzo Maffione 
302*b6e66be2SVincenzo Maffione 		/* Netmap prologue */
303*b6e66be2SVincenzo Maffione 		shadow_ring.tail = kring->rtail;
304*b6e66be2SVincenzo Maffione 		if (unlikely(nm_rxsync_prologue(kring, &shadow_ring) >= num_slots)) {
305*b6e66be2SVincenzo Maffione 			/* Reinit ring and enable notifications. */
306*b6e66be2SVincenzo Maffione 			netmap_ring_reinit(kring);
307*b6e66be2SVincenzo Maffione 			csb_ktoa_kick_enable(csb_ktoa, 1);
308*b6e66be2SVincenzo Maffione 			break;
309*b6e66be2SVincenzo Maffione 		}
310*b6e66be2SVincenzo Maffione 
311*b6e66be2SVincenzo Maffione 		if (unlikely(netmap_debug & NM_DEBUG_RXSYNC)) {
312*b6e66be2SVincenzo Maffione 			sync_kloop_kring_dump("pre rxsync", kring);
313*b6e66be2SVincenzo Maffione 		}
314*b6e66be2SVincenzo Maffione 
315*b6e66be2SVincenzo Maffione 		if (unlikely(kring->nm_sync(kring, shadow_ring.flags))) {
316*b6e66be2SVincenzo Maffione 			/* Reenable notifications. */
317*b6e66be2SVincenzo Maffione 			csb_ktoa_kick_enable(csb_ktoa, 1);
318*b6e66be2SVincenzo Maffione 			nm_prerr("rxsync() failed");
319*b6e66be2SVincenzo Maffione 			break;
320*b6e66be2SVincenzo Maffione 		}
321*b6e66be2SVincenzo Maffione 
322*b6e66be2SVincenzo Maffione 		/*
323*b6e66be2SVincenzo Maffione 		 * Finalize
324*b6e66be2SVincenzo Maffione 		 * Copy kernel hwcur and hwtail into the CSB for the application sync()
325*b6e66be2SVincenzo Maffione 		 */
326*b6e66be2SVincenzo Maffione 		hwtail = NM_ACCESS_ONCE(kring->nr_hwtail);
327*b6e66be2SVincenzo Maffione 		sync_kloop_kernel_write(csb_ktoa, kring->nr_hwcur, hwtail);
328*b6e66be2SVincenzo Maffione 		if (kring->rtail != hwtail) {
329*b6e66be2SVincenzo Maffione 			kring->rtail = hwtail;
330*b6e66be2SVincenzo Maffione 			some_recvd = true;
331*b6e66be2SVincenzo Maffione 			dry_cycles = 0;
332*b6e66be2SVincenzo Maffione 		} else {
333*b6e66be2SVincenzo Maffione 			dry_cycles++;
334*b6e66be2SVincenzo Maffione 		}
335*b6e66be2SVincenzo Maffione 
336*b6e66be2SVincenzo Maffione 		if (unlikely(netmap_debug & NM_DEBUG_RXSYNC)) {
337*b6e66be2SVincenzo Maffione 			sync_kloop_kring_dump("post rxsync", kring);
338*b6e66be2SVincenzo Maffione 		}
339*b6e66be2SVincenzo Maffione 
340*b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL
341*b6e66be2SVincenzo Maffione 		/* Interrupt the application if needed. */
342*b6e66be2SVincenzo Maffione 		if (a->irq_ctx && some_recvd && csb_atok_intr_enabled(csb_atok)) {
343*b6e66be2SVincenzo Maffione 			/* Disable application kick to avoid sending unnecessary kicks */
344*b6e66be2SVincenzo Maffione 			eventfd_signal(a->irq_ctx, 1);
345*b6e66be2SVincenzo Maffione 			some_recvd = false;
346*b6e66be2SVincenzo Maffione 		}
347*b6e66be2SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */
348*b6e66be2SVincenzo Maffione 
349*b6e66be2SVincenzo Maffione 		/* Read CSB to see if there is more work to do. */
350*b6e66be2SVincenzo Maffione 		sync_kloop_kernel_read(csb_atok, &shadow_ring, num_slots);
351*b6e66be2SVincenzo Maffione 		if (sync_kloop_norxslots(kring, shadow_ring.head)) {
352*b6e66be2SVincenzo Maffione 			/*
353*b6e66be2SVincenzo Maffione 			 * No more slots available for reception. We enable notification and
354*b6e66be2SVincenzo Maffione 			 * go to sleep, waiting for a kick from the application when new receive
355*b6e66be2SVincenzo Maffione 			 * slots are available.
356*b6e66be2SVincenzo Maffione 			 */
357*b6e66be2SVincenzo Maffione 			/* Reenable notifications. */
358*b6e66be2SVincenzo Maffione 			csb_ktoa_kick_enable(csb_ktoa, 1);
359*b6e66be2SVincenzo Maffione 			/* Doublecheck. */
360*b6e66be2SVincenzo Maffione 			sync_kloop_kernel_read(csb_atok, &shadow_ring, num_slots);
361*b6e66be2SVincenzo Maffione 			if (!sync_kloop_norxslots(kring, shadow_ring.head)) {
362*b6e66be2SVincenzo Maffione 				/* We won the race condition, more slots are available. Disable
363*b6e66be2SVincenzo Maffione 				 * notifications and do another cycle. */
364*b6e66be2SVincenzo Maffione 				csb_ktoa_kick_enable(csb_ktoa, 0);
365*b6e66be2SVincenzo Maffione 				continue;
366*b6e66be2SVincenzo Maffione 			}
367*b6e66be2SVincenzo Maffione 			break;
368*b6e66be2SVincenzo Maffione 		}
369*b6e66be2SVincenzo Maffione 
370*b6e66be2SVincenzo Maffione 		hwtail = NM_ACCESS_ONCE(kring->nr_hwtail);
371*b6e66be2SVincenzo Maffione 		if (unlikely(hwtail == kring->rhead ||
372*b6e66be2SVincenzo Maffione 					dry_cycles >= SYNC_LOOP_RX_DRY_CYCLES_MAX)) {
373*b6e66be2SVincenzo Maffione 			/* No more packets to be read from the backend. We stop and
374*b6e66be2SVincenzo Maffione 			 * wait for a notification from the backend (netmap_rx_irq). */
375*b6e66be2SVincenzo Maffione 			nm_prdis(1, "nr_hwtail: %d rhead: %d dry_cycles: %d",
376*b6e66be2SVincenzo Maffione 					hwtail, kring->rhead, dry_cycles);
377*b6e66be2SVincenzo Maffione 			break;
378*b6e66be2SVincenzo Maffione 		}
379*b6e66be2SVincenzo Maffione 	}
380*b6e66be2SVincenzo Maffione 
381*b6e66be2SVincenzo Maffione 	nm_kr_put(kring);
382*b6e66be2SVincenzo Maffione 
383*b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL
384*b6e66be2SVincenzo Maffione 	/* Interrupt the application if needed. */
385*b6e66be2SVincenzo Maffione 	if (a->irq_ctx && some_recvd && csb_atok_intr_enabled(csb_atok)) {
386*b6e66be2SVincenzo Maffione 		eventfd_signal(a->irq_ctx, 1);
387*b6e66be2SVincenzo Maffione 	}
388*b6e66be2SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */
389*b6e66be2SVincenzo Maffione }
390*b6e66be2SVincenzo Maffione 
391*b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL
392*b6e66be2SVincenzo Maffione struct sync_kloop_poll_entry {
393*b6e66be2SVincenzo Maffione 	/* Support for receiving notifications from
394*b6e66be2SVincenzo Maffione 	 * a netmap ring or from the application. */
395*b6e66be2SVincenzo Maffione 	struct file *filp;
396*b6e66be2SVincenzo Maffione 	wait_queue_t wait;
397*b6e66be2SVincenzo Maffione 	wait_queue_head_t *wqh;
398*b6e66be2SVincenzo Maffione 
399*b6e66be2SVincenzo Maffione 	/* Support for sending notifications to the application. */
400*b6e66be2SVincenzo Maffione 	struct eventfd_ctx *irq_ctx;
401*b6e66be2SVincenzo Maffione 	struct file *irq_filp;
402*b6e66be2SVincenzo Maffione };
403*b6e66be2SVincenzo Maffione 
404*b6e66be2SVincenzo Maffione struct sync_kloop_poll_ctx {
405*b6e66be2SVincenzo Maffione 	poll_table wait_table;
406*b6e66be2SVincenzo Maffione 	unsigned int next_entry;
407*b6e66be2SVincenzo Maffione 	unsigned int num_entries;
408*b6e66be2SVincenzo Maffione 	struct sync_kloop_poll_entry entries[0];
409*b6e66be2SVincenzo Maffione };
410*b6e66be2SVincenzo Maffione 
411*b6e66be2SVincenzo Maffione static void
412*b6e66be2SVincenzo Maffione sync_kloop_poll_table_queue_proc(struct file *file, wait_queue_head_t *wqh,
413*b6e66be2SVincenzo Maffione 				poll_table *pt)
414*b6e66be2SVincenzo Maffione {
415*b6e66be2SVincenzo Maffione 	struct sync_kloop_poll_ctx *poll_ctx =
416*b6e66be2SVincenzo Maffione 		container_of(pt, struct sync_kloop_poll_ctx, wait_table);
417*b6e66be2SVincenzo Maffione 	struct sync_kloop_poll_entry *entry = poll_ctx->entries +
418*b6e66be2SVincenzo Maffione 						poll_ctx->next_entry;
419*b6e66be2SVincenzo Maffione 
420*b6e66be2SVincenzo Maffione 	BUG_ON(poll_ctx->next_entry >= poll_ctx->num_entries);
421*b6e66be2SVincenzo Maffione 	entry->wqh = wqh;
422*b6e66be2SVincenzo Maffione 	entry->filp = file;
423*b6e66be2SVincenzo Maffione 	/* Use the default wake up function. */
424*b6e66be2SVincenzo Maffione 	init_waitqueue_entry(&entry->wait, current);
425*b6e66be2SVincenzo Maffione 	add_wait_queue(wqh, &entry->wait);
426*b6e66be2SVincenzo Maffione 	poll_ctx->next_entry++;
427*b6e66be2SVincenzo Maffione }
428*b6e66be2SVincenzo Maffione #endif  /* SYNC_KLOOP_POLL */
429*b6e66be2SVincenzo Maffione 
430*b6e66be2SVincenzo Maffione int
431*b6e66be2SVincenzo Maffione netmap_sync_kloop(struct netmap_priv_d *priv, struct nmreq_header *hdr)
432*b6e66be2SVincenzo Maffione {
433*b6e66be2SVincenzo Maffione 	struct nmreq_sync_kloop_start *req =
434*b6e66be2SVincenzo Maffione 		(struct nmreq_sync_kloop_start *)(uintptr_t)hdr->nr_body;
435*b6e66be2SVincenzo Maffione 	struct nmreq_opt_sync_kloop_eventfds *eventfds_opt = NULL;
436*b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL
437*b6e66be2SVincenzo Maffione 	struct sync_kloop_poll_ctx *poll_ctx = NULL;
438*b6e66be2SVincenzo Maffione #endif  /* SYNC_KLOOP_POLL */
439*b6e66be2SVincenzo Maffione 	int num_rx_rings, num_tx_rings, num_rings;
440*b6e66be2SVincenzo Maffione 	uint32_t sleep_us = req->sleep_us;
441*b6e66be2SVincenzo Maffione 	struct nm_csb_atok* csb_atok_base;
442*b6e66be2SVincenzo Maffione 	struct nm_csb_ktoa* csb_ktoa_base;
443*b6e66be2SVincenzo Maffione 	struct netmap_adapter *na;
444*b6e66be2SVincenzo Maffione 	struct nmreq_option *opt;
445*b6e66be2SVincenzo Maffione 	int err = 0;
446*b6e66be2SVincenzo Maffione 	int i;
447*b6e66be2SVincenzo Maffione 
448*b6e66be2SVincenzo Maffione 	if (sleep_us > 1000000) {
449*b6e66be2SVincenzo Maffione 		/* We do not accept sleeping for more than a second. */
450*b6e66be2SVincenzo Maffione 		return EINVAL;
451*b6e66be2SVincenzo Maffione 	}
452*b6e66be2SVincenzo Maffione 
453*b6e66be2SVincenzo Maffione 	if (priv->np_nifp == NULL) {
454*b6e66be2SVincenzo Maffione 		return ENXIO;
455*b6e66be2SVincenzo Maffione 	}
456*b6e66be2SVincenzo Maffione 	mb(); /* make sure following reads are not from cache */
457*b6e66be2SVincenzo Maffione 
458*b6e66be2SVincenzo Maffione 	na = priv->np_na;
459*b6e66be2SVincenzo Maffione 	if (!nm_netmap_on(na)) {
460*b6e66be2SVincenzo Maffione 		return ENXIO;
461*b6e66be2SVincenzo Maffione 	}
462*b6e66be2SVincenzo Maffione 
463*b6e66be2SVincenzo Maffione 	NMG_LOCK();
464*b6e66be2SVincenzo Maffione 	/* Make sure the application is working in CSB mode. */
465*b6e66be2SVincenzo Maffione 	if (!priv->np_csb_atok_base || !priv->np_csb_ktoa_base) {
466*b6e66be2SVincenzo Maffione 		NMG_UNLOCK();
467*b6e66be2SVincenzo Maffione 		nm_prerr("sync-kloop on %s requires "
468*b6e66be2SVincenzo Maffione 				"NETMAP_REQ_OPT_CSB option", na->name);
469*b6e66be2SVincenzo Maffione 		return EINVAL;
470*b6e66be2SVincenzo Maffione 	}
471*b6e66be2SVincenzo Maffione 
472*b6e66be2SVincenzo Maffione 	csb_atok_base = priv->np_csb_atok_base;
473*b6e66be2SVincenzo Maffione 	csb_ktoa_base = priv->np_csb_ktoa_base;
474*b6e66be2SVincenzo Maffione 
475*b6e66be2SVincenzo Maffione 	/* Make sure that no kloop is currently running. */
476*b6e66be2SVincenzo Maffione 	if (priv->np_kloop_state & NM_SYNC_KLOOP_RUNNING) {
477*b6e66be2SVincenzo Maffione 		err = EBUSY;
478*b6e66be2SVincenzo Maffione 	}
479*b6e66be2SVincenzo Maffione 	priv->np_kloop_state |= NM_SYNC_KLOOP_RUNNING;
480*b6e66be2SVincenzo Maffione 	NMG_UNLOCK();
481*b6e66be2SVincenzo Maffione 	if (err) {
482*b6e66be2SVincenzo Maffione 		return err;
483*b6e66be2SVincenzo Maffione 	}
484*b6e66be2SVincenzo Maffione 
485*b6e66be2SVincenzo Maffione 	num_rx_rings = priv->np_qlast[NR_RX] - priv->np_qfirst[NR_RX];
486*b6e66be2SVincenzo Maffione 	num_tx_rings = priv->np_qlast[NR_TX] - priv->np_qfirst[NR_TX];
487*b6e66be2SVincenzo Maffione 	num_rings = num_tx_rings + num_rx_rings;
488*b6e66be2SVincenzo Maffione 
489*b6e66be2SVincenzo Maffione 	/* Validate notification options. */
490*b6e66be2SVincenzo Maffione 	opt = nmreq_findoption((struct nmreq_option *)(uintptr_t)hdr->nr_options,
491*b6e66be2SVincenzo Maffione 				NETMAP_REQ_OPT_SYNC_KLOOP_EVENTFDS);
492*b6e66be2SVincenzo Maffione 	if (opt != NULL) {
493*b6e66be2SVincenzo Maffione 		err = nmreq_checkduplicate(opt);
494*b6e66be2SVincenzo Maffione 		if (err) {
495*b6e66be2SVincenzo Maffione 			opt->nro_status = err;
496*b6e66be2SVincenzo Maffione 			goto out;
497*b6e66be2SVincenzo Maffione 		}
498*b6e66be2SVincenzo Maffione 		if (opt->nro_size != sizeof(*eventfds_opt) +
499*b6e66be2SVincenzo Maffione 			sizeof(eventfds_opt->eventfds[0]) * num_rings) {
500*b6e66be2SVincenzo Maffione 			/* Option size not consistent with the number of
501*b6e66be2SVincenzo Maffione 			 * entries. */
502*b6e66be2SVincenzo Maffione 			opt->nro_status = err = EINVAL;
503*b6e66be2SVincenzo Maffione 			goto out;
504*b6e66be2SVincenzo Maffione 		}
505*b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL
506*b6e66be2SVincenzo Maffione 		eventfds_opt = (struct nmreq_opt_sync_kloop_eventfds *)opt;
507*b6e66be2SVincenzo Maffione 		opt->nro_status = 0;
508*b6e66be2SVincenzo Maffione 		/* We need 2 poll entries for TX and RX notifications coming
509*b6e66be2SVincenzo Maffione 		 * from the netmap adapter, plus one entries per ring for the
510*b6e66be2SVincenzo Maffione 		 * notifications coming from the application. */
511*b6e66be2SVincenzo Maffione 		poll_ctx = nm_os_malloc(sizeof(*poll_ctx) +
512*b6e66be2SVincenzo Maffione 				(2 + num_rings) * sizeof(poll_ctx->entries[0]));
513*b6e66be2SVincenzo Maffione 		init_poll_funcptr(&poll_ctx->wait_table,
514*b6e66be2SVincenzo Maffione 					sync_kloop_poll_table_queue_proc);
515*b6e66be2SVincenzo Maffione 		poll_ctx->num_entries = 2 + num_rings;
516*b6e66be2SVincenzo Maffione 		poll_ctx->next_entry = 0;
517*b6e66be2SVincenzo Maffione 		/* Poll for notifications coming from the applications through
518*b6e66be2SVincenzo Maffione 		 * eventfds . */
519*b6e66be2SVincenzo Maffione 		for (i = 0; i < num_rings; i++) {
520*b6e66be2SVincenzo Maffione 			struct eventfd_ctx *irq;
521*b6e66be2SVincenzo Maffione 			struct file *filp;
522*b6e66be2SVincenzo Maffione 			unsigned long mask;
523*b6e66be2SVincenzo Maffione 
524*b6e66be2SVincenzo Maffione 			filp = eventfd_fget(eventfds_opt->eventfds[i].ioeventfd);
525*b6e66be2SVincenzo Maffione 			if (IS_ERR(filp)) {
526*b6e66be2SVincenzo Maffione 				err = PTR_ERR(filp);
527*b6e66be2SVincenzo Maffione 				goto out;
528*b6e66be2SVincenzo Maffione 			}
529*b6e66be2SVincenzo Maffione 			mask = filp->f_op->poll(filp, &poll_ctx->wait_table);
530*b6e66be2SVincenzo Maffione 			if (mask & POLLERR) {
531*b6e66be2SVincenzo Maffione 				err = EINVAL;
532*b6e66be2SVincenzo Maffione 				goto out;
533*b6e66be2SVincenzo Maffione 			}
534*b6e66be2SVincenzo Maffione 
535*b6e66be2SVincenzo Maffione 			filp = eventfd_fget(eventfds_opt->eventfds[i].irqfd);
536*b6e66be2SVincenzo Maffione 			if (IS_ERR(filp)) {
537*b6e66be2SVincenzo Maffione 				err = PTR_ERR(filp);
538*b6e66be2SVincenzo Maffione 				goto out;
539*b6e66be2SVincenzo Maffione 			}
540*b6e66be2SVincenzo Maffione 			poll_ctx->entries[i].irq_filp = filp;
541*b6e66be2SVincenzo Maffione 			irq = eventfd_ctx_fileget(filp);
542*b6e66be2SVincenzo Maffione 			if (IS_ERR(irq)) {
543*b6e66be2SVincenzo Maffione 				err = PTR_ERR(irq);
544*b6e66be2SVincenzo Maffione 				goto out;
545*b6e66be2SVincenzo Maffione 			}
546*b6e66be2SVincenzo Maffione 			poll_ctx->entries[i].irq_ctx = irq;
547*b6e66be2SVincenzo Maffione 		}
548*b6e66be2SVincenzo Maffione 		/* Poll for notifications coming from the netmap rings bound to
549*b6e66be2SVincenzo Maffione 		 * this file descriptor. */
550*b6e66be2SVincenzo Maffione 		{
551*b6e66be2SVincenzo Maffione 			NM_SELINFO_T *si[NR_TXRX];
552*b6e66be2SVincenzo Maffione 
553*b6e66be2SVincenzo Maffione 			NMG_LOCK();
554*b6e66be2SVincenzo Maffione 			si[NR_RX] = nm_si_user(priv, NR_RX) ? &na->si[NR_RX] :
555*b6e66be2SVincenzo Maffione 				&na->rx_rings[priv->np_qfirst[NR_RX]]->si;
556*b6e66be2SVincenzo Maffione 			si[NR_TX] = nm_si_user(priv, NR_TX) ? &na->si[NR_TX] :
557*b6e66be2SVincenzo Maffione 				&na->tx_rings[priv->np_qfirst[NR_TX]]->si;
558*b6e66be2SVincenzo Maffione 			NMG_UNLOCK();
559*b6e66be2SVincenzo Maffione 			poll_wait(priv->np_filp, si[NR_RX], &poll_ctx->wait_table);
560*b6e66be2SVincenzo Maffione 			poll_wait(priv->np_filp, si[NR_TX], &poll_ctx->wait_table);
561*b6e66be2SVincenzo Maffione 		}
562*b6e66be2SVincenzo Maffione #else   /* SYNC_KLOOP_POLL */
563*b6e66be2SVincenzo Maffione 		opt->nro_status = EOPNOTSUPP;
564*b6e66be2SVincenzo Maffione 		goto out;
565*b6e66be2SVincenzo Maffione #endif  /* SYNC_KLOOP_POLL */
566*b6e66be2SVincenzo Maffione 	}
567*b6e66be2SVincenzo Maffione 
568*b6e66be2SVincenzo Maffione 	/* Main loop. */
569*b6e66be2SVincenzo Maffione 	for (;;) {
570*b6e66be2SVincenzo Maffione 		if (unlikely(NM_ACCESS_ONCE(priv->np_kloop_state) & NM_SYNC_KLOOP_STOPPING)) {
571*b6e66be2SVincenzo Maffione 			break;
572*b6e66be2SVincenzo Maffione 		}
573*b6e66be2SVincenzo Maffione 
574*b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL
575*b6e66be2SVincenzo Maffione 		if (poll_ctx)
576*b6e66be2SVincenzo Maffione 			__set_current_state(TASK_INTERRUPTIBLE);
577*b6e66be2SVincenzo Maffione #endif  /* SYNC_KLOOP_POLL */
578*b6e66be2SVincenzo Maffione 
579*b6e66be2SVincenzo Maffione 		/* Process all the TX rings bound to this file descriptor. */
580*b6e66be2SVincenzo Maffione 		for (i = 0; i < num_tx_rings; i++) {
581*b6e66be2SVincenzo Maffione 			struct sync_kloop_ring_args a = {
582*b6e66be2SVincenzo Maffione 				.kring = NMR(na, NR_TX)[i + priv->np_qfirst[NR_TX]],
583*b6e66be2SVincenzo Maffione 				.csb_atok = csb_atok_base + i,
584*b6e66be2SVincenzo Maffione 				.csb_ktoa = csb_ktoa_base + i,
585*b6e66be2SVincenzo Maffione 			};
586*b6e66be2SVincenzo Maffione 
587*b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL
588*b6e66be2SVincenzo Maffione 			if (poll_ctx)
589*b6e66be2SVincenzo Maffione 				a.irq_ctx = poll_ctx->entries[i].irq_ctx;
590*b6e66be2SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */
591*b6e66be2SVincenzo Maffione 			if (unlikely(nm_kr_tryget(a.kring, 1, NULL))) {
592*b6e66be2SVincenzo Maffione 				continue;
593*b6e66be2SVincenzo Maffione 			}
594*b6e66be2SVincenzo Maffione 			netmap_sync_kloop_tx_ring(&a);
595*b6e66be2SVincenzo Maffione 			nm_kr_put(a.kring);
596*b6e66be2SVincenzo Maffione 		}
597*b6e66be2SVincenzo Maffione 
598*b6e66be2SVincenzo Maffione 		/* Process all the RX rings bound to this file descriptor. */
599*b6e66be2SVincenzo Maffione 		for (i = 0; i < num_rx_rings; i++) {
600*b6e66be2SVincenzo Maffione 			struct sync_kloop_ring_args a = {
601*b6e66be2SVincenzo Maffione 				.kring = NMR(na, NR_RX)[i + priv->np_qfirst[NR_RX]],
602*b6e66be2SVincenzo Maffione 				.csb_atok = csb_atok_base + num_tx_rings + i,
603*b6e66be2SVincenzo Maffione 				.csb_ktoa = csb_ktoa_base + num_tx_rings + i,
604*b6e66be2SVincenzo Maffione 			};
605*b6e66be2SVincenzo Maffione 
606*b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL
607*b6e66be2SVincenzo Maffione 			if (poll_ctx)
608*b6e66be2SVincenzo Maffione 				a.irq_ctx = poll_ctx->entries[num_tx_rings + i].irq_ctx;
609*b6e66be2SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */
610*b6e66be2SVincenzo Maffione 
611*b6e66be2SVincenzo Maffione 			if (unlikely(nm_kr_tryget(a.kring, 1, NULL))) {
612*b6e66be2SVincenzo Maffione 				continue;
613*b6e66be2SVincenzo Maffione 			}
614*b6e66be2SVincenzo Maffione 			netmap_sync_kloop_rx_ring(&a);
615*b6e66be2SVincenzo Maffione 			nm_kr_put(a.kring);
616*b6e66be2SVincenzo Maffione 		}
617*b6e66be2SVincenzo Maffione 
618*b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL
619*b6e66be2SVincenzo Maffione 		if (poll_ctx) {
620*b6e66be2SVincenzo Maffione 			/* If a poll context is present, yield to the scheduler
621*b6e66be2SVincenzo Maffione 			 * waiting for a notification to come either from
622*b6e66be2SVincenzo Maffione 			 * netmap or the application. */
623*b6e66be2SVincenzo Maffione 			schedule_timeout_interruptible(msecs_to_jiffies(1000));
624*b6e66be2SVincenzo Maffione 		} else
625*b6e66be2SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */
626*b6e66be2SVincenzo Maffione 		{
627*b6e66be2SVincenzo Maffione 			/* Default synchronization method: sleep for a while. */
628*b6e66be2SVincenzo Maffione 			usleep_range(sleep_us, sleep_us);
629*b6e66be2SVincenzo Maffione 		}
630*b6e66be2SVincenzo Maffione 	}
631*b6e66be2SVincenzo Maffione out:
632*b6e66be2SVincenzo Maffione #ifdef SYNC_KLOOP_POLL
633*b6e66be2SVincenzo Maffione 	if (poll_ctx) {
634*b6e66be2SVincenzo Maffione 		/* Stop polling from netmap and the eventfds, and deallocate
635*b6e66be2SVincenzo Maffione 		 * the poll context. */
636*b6e66be2SVincenzo Maffione 		__set_current_state(TASK_RUNNING);
637*b6e66be2SVincenzo Maffione 		for (i = 0; i < poll_ctx->next_entry; i++) {
638*b6e66be2SVincenzo Maffione 			struct sync_kloop_poll_entry *entry =
639*b6e66be2SVincenzo Maffione 						poll_ctx->entries + i;
640*b6e66be2SVincenzo Maffione 
641*b6e66be2SVincenzo Maffione 			if (entry->wqh)
642*b6e66be2SVincenzo Maffione 				remove_wait_queue(entry->wqh, &entry->wait);
643*b6e66be2SVincenzo Maffione 			/* We did not get a reference to the eventfds, but
644*b6e66be2SVincenzo Maffione 			 * don't do that on netmap file descriptors (since
645*b6e66be2SVincenzo Maffione 			 * a reference was not taken. */
646*b6e66be2SVincenzo Maffione 			if (entry->filp && entry->filp != priv->np_filp)
647*b6e66be2SVincenzo Maffione 				fput(entry->filp);
648*b6e66be2SVincenzo Maffione 			if (entry->irq_ctx)
649*b6e66be2SVincenzo Maffione 				eventfd_ctx_put(entry->irq_ctx);
650*b6e66be2SVincenzo Maffione 			if (entry->irq_filp)
651*b6e66be2SVincenzo Maffione 				fput(entry->irq_filp);
652*b6e66be2SVincenzo Maffione 		}
653*b6e66be2SVincenzo Maffione 		nm_os_free(poll_ctx);
654*b6e66be2SVincenzo Maffione 		poll_ctx = NULL;
655*b6e66be2SVincenzo Maffione 	}
656*b6e66be2SVincenzo Maffione #endif /* SYNC_KLOOP_POLL */
657*b6e66be2SVincenzo Maffione 
658*b6e66be2SVincenzo Maffione 	/* Reset the kloop state. */
659*b6e66be2SVincenzo Maffione 	NMG_LOCK();
660*b6e66be2SVincenzo Maffione 	priv->np_kloop_state = 0;
661*b6e66be2SVincenzo Maffione 	NMG_UNLOCK();
662*b6e66be2SVincenzo Maffione 
663*b6e66be2SVincenzo Maffione 	return err;
664*b6e66be2SVincenzo Maffione }
665*b6e66be2SVincenzo Maffione 
666*b6e66be2SVincenzo Maffione int
667*b6e66be2SVincenzo Maffione netmap_sync_kloop_stop(struct netmap_priv_d *priv)
668*b6e66be2SVincenzo Maffione {
669*b6e66be2SVincenzo Maffione 	bool running = true;
670*b6e66be2SVincenzo Maffione 	int err = 0;
671*b6e66be2SVincenzo Maffione 
672*b6e66be2SVincenzo Maffione 	NMG_LOCK();
673*b6e66be2SVincenzo Maffione 	priv->np_kloop_state |= NM_SYNC_KLOOP_STOPPING;
674*b6e66be2SVincenzo Maffione 	NMG_UNLOCK();
675*b6e66be2SVincenzo Maffione 	while (running) {
676*b6e66be2SVincenzo Maffione 		usleep_range(1000, 1500);
677*b6e66be2SVincenzo Maffione 		NMG_LOCK();
678*b6e66be2SVincenzo Maffione 		running = (NM_ACCESS_ONCE(priv->np_kloop_state)
679*b6e66be2SVincenzo Maffione 				& NM_SYNC_KLOOP_RUNNING);
680*b6e66be2SVincenzo Maffione 		NMG_UNLOCK();
681*b6e66be2SVincenzo Maffione 	}
682*b6e66be2SVincenzo Maffione 
683*b6e66be2SVincenzo Maffione 	return err;
684*b6e66be2SVincenzo Maffione }
685*b6e66be2SVincenzo Maffione 
686*b6e66be2SVincenzo Maffione #ifdef WITH_PTNETMAP
687*b6e66be2SVincenzo Maffione /*
688*b6e66be2SVincenzo Maffione  * Guest ptnetmap txsync()/rxsync() routines, used in ptnet device drivers.
689*b6e66be2SVincenzo Maffione  * These routines are reused across the different operating systems supported
690*b6e66be2SVincenzo Maffione  * by netmap.
691*b6e66be2SVincenzo Maffione  */
692*b6e66be2SVincenzo Maffione 
693*b6e66be2SVincenzo Maffione /*
694*b6e66be2SVincenzo Maffione  * Reconcile host and guest views of the transmit ring.
695*b6e66be2SVincenzo Maffione  *
696*b6e66be2SVincenzo Maffione  * Guest user wants to transmit packets up to the one before ring->head,
697*b6e66be2SVincenzo Maffione  * and guest kernel knows tx_ring->hwcur is the first packet unsent
698*b6e66be2SVincenzo Maffione  * by the host kernel.
699*b6e66be2SVincenzo Maffione  *
700*b6e66be2SVincenzo Maffione  * We push out as many packets as possible, and possibly
701*b6e66be2SVincenzo Maffione  * reclaim buffers from previously completed transmission.
702*b6e66be2SVincenzo Maffione  *
703*b6e66be2SVincenzo Maffione  * Notifications from the host are enabled only if the user guest would
704*b6e66be2SVincenzo Maffione  * block (no space in the ring).
705*b6e66be2SVincenzo Maffione  */
706*b6e66be2SVincenzo Maffione bool
707*b6e66be2SVincenzo Maffione netmap_pt_guest_txsync(struct nm_csb_atok *atok, struct nm_csb_ktoa *ktoa,
708*b6e66be2SVincenzo Maffione 			struct netmap_kring *kring, int flags)
709*b6e66be2SVincenzo Maffione {
710*b6e66be2SVincenzo Maffione 	bool notify = false;
711*b6e66be2SVincenzo Maffione 
712*b6e66be2SVincenzo Maffione 	/* Disable notifications */
713*b6e66be2SVincenzo Maffione 	atok->appl_need_kick = 0;
714*b6e66be2SVincenzo Maffione 
715*b6e66be2SVincenzo Maffione 	/*
716*b6e66be2SVincenzo Maffione 	 * First part: tell the host (updating the CSB) to process the new
717*b6e66be2SVincenzo Maffione 	 * packets.
718*b6e66be2SVincenzo Maffione 	 */
719*b6e66be2SVincenzo Maffione 	kring->nr_hwcur = ktoa->hwcur;
720*b6e66be2SVincenzo Maffione 	ptnetmap_guest_write_kring_csb(atok, kring->rcur, kring->rhead);
721*b6e66be2SVincenzo Maffione 
722*b6e66be2SVincenzo Maffione         /* Ask for a kick from a guest to the host if needed. */
723*b6e66be2SVincenzo Maffione 	if (((kring->rhead != kring->nr_hwcur || nm_kr_txempty(kring))
724*b6e66be2SVincenzo Maffione 		&& NM_ACCESS_ONCE(ktoa->kern_need_kick)) ||
725*b6e66be2SVincenzo Maffione 			(flags & NAF_FORCE_RECLAIM)) {
726*b6e66be2SVincenzo Maffione 		atok->sync_flags = flags;
727*b6e66be2SVincenzo Maffione 		notify = true;
728*b6e66be2SVincenzo Maffione 	}
729*b6e66be2SVincenzo Maffione 
730*b6e66be2SVincenzo Maffione 	/*
731*b6e66be2SVincenzo Maffione 	 * Second part: reclaim buffers for completed transmissions.
732*b6e66be2SVincenzo Maffione 	 */
733*b6e66be2SVincenzo Maffione 	if (nm_kr_txempty(kring) || (flags & NAF_FORCE_RECLAIM)) {
734*b6e66be2SVincenzo Maffione                 ptnetmap_guest_read_kring_csb(ktoa, kring);
735*b6e66be2SVincenzo Maffione 	}
736*b6e66be2SVincenzo Maffione 
737*b6e66be2SVincenzo Maffione         /*
738*b6e66be2SVincenzo Maffione          * No more room in the ring for new transmissions. The user thread will
739*b6e66be2SVincenzo Maffione 	 * go to sleep and we need to be notified by the host when more free
740*b6e66be2SVincenzo Maffione 	 * space is available.
741*b6e66be2SVincenzo Maffione          */
742*b6e66be2SVincenzo Maffione 	if (nm_kr_txempty(kring) && !(kring->nr_kflags & NKR_NOINTR)) {
743*b6e66be2SVincenzo Maffione 		/* Reenable notifications. */
744*b6e66be2SVincenzo Maffione 		atok->appl_need_kick = 1;
745*b6e66be2SVincenzo Maffione                 /* Double check */
746*b6e66be2SVincenzo Maffione                 ptnetmap_guest_read_kring_csb(ktoa, kring);
747*b6e66be2SVincenzo Maffione                 /* If there is new free space, disable notifications */
748*b6e66be2SVincenzo Maffione 		if (unlikely(!nm_kr_txempty(kring))) {
749*b6e66be2SVincenzo Maffione 			atok->appl_need_kick = 0;
750*b6e66be2SVincenzo Maffione 		}
751*b6e66be2SVincenzo Maffione 	}
752*b6e66be2SVincenzo Maffione 
753*b6e66be2SVincenzo Maffione 	nm_prdis(1, "%s CSB(head:%u cur:%u hwtail:%u) KRING(head:%u cur:%u tail:%u)",
754*b6e66be2SVincenzo Maffione 		kring->name, atok->head, atok->cur, ktoa->hwtail,
755*b6e66be2SVincenzo Maffione 		kring->rhead, kring->rcur, kring->nr_hwtail);
756*b6e66be2SVincenzo Maffione 
757*b6e66be2SVincenzo Maffione 	return notify;
758*b6e66be2SVincenzo Maffione }
759*b6e66be2SVincenzo Maffione 
760*b6e66be2SVincenzo Maffione /*
761*b6e66be2SVincenzo Maffione  * Reconcile host and guest view of the receive ring.
762*b6e66be2SVincenzo Maffione  *
763*b6e66be2SVincenzo Maffione  * Update hwcur/hwtail from host (reading from CSB).
764*b6e66be2SVincenzo Maffione  *
765*b6e66be2SVincenzo Maffione  * If guest user has released buffers up to the one before ring->head, we
766*b6e66be2SVincenzo Maffione  * also give them to the host.
767*b6e66be2SVincenzo Maffione  *
768*b6e66be2SVincenzo Maffione  * Notifications from the host are enabled only if the user guest would
769*b6e66be2SVincenzo Maffione  * block (no more completed slots in the ring).
770*b6e66be2SVincenzo Maffione  */
771*b6e66be2SVincenzo Maffione bool
772*b6e66be2SVincenzo Maffione netmap_pt_guest_rxsync(struct nm_csb_atok *atok, struct nm_csb_ktoa *ktoa,
773*b6e66be2SVincenzo Maffione 			struct netmap_kring *kring, int flags)
774*b6e66be2SVincenzo Maffione {
775*b6e66be2SVincenzo Maffione 	bool notify = false;
776*b6e66be2SVincenzo Maffione 
777*b6e66be2SVincenzo Maffione         /* Disable notifications */
778*b6e66be2SVincenzo Maffione 	atok->appl_need_kick = 0;
779*b6e66be2SVincenzo Maffione 
780*b6e66be2SVincenzo Maffione 	/*
781*b6e66be2SVincenzo Maffione 	 * First part: import newly received packets, by updating the kring
782*b6e66be2SVincenzo Maffione 	 * hwtail to the hwtail known from the host (read from the CSB).
783*b6e66be2SVincenzo Maffione 	 * This also updates the kring hwcur.
784*b6e66be2SVincenzo Maffione 	 */
785*b6e66be2SVincenzo Maffione         ptnetmap_guest_read_kring_csb(ktoa, kring);
786*b6e66be2SVincenzo Maffione 	kring->nr_kflags &= ~NKR_PENDINTR;
787*b6e66be2SVincenzo Maffione 
788*b6e66be2SVincenzo Maffione 	/*
789*b6e66be2SVincenzo Maffione 	 * Second part: tell the host about the slots that guest user has
790*b6e66be2SVincenzo Maffione 	 * released, by updating cur and head in the CSB.
791*b6e66be2SVincenzo Maffione 	 */
792*b6e66be2SVincenzo Maffione 	if (kring->rhead != kring->nr_hwcur) {
793*b6e66be2SVincenzo Maffione 		ptnetmap_guest_write_kring_csb(atok, kring->rcur,
794*b6e66be2SVincenzo Maffione 					       kring->rhead);
795*b6e66be2SVincenzo Maffione                 /* Ask for a kick from the guest to the host if needed. */
796*b6e66be2SVincenzo Maffione 		if (NM_ACCESS_ONCE(ktoa->kern_need_kick)) {
797*b6e66be2SVincenzo Maffione 			atok->sync_flags = flags;
798*b6e66be2SVincenzo Maffione 			notify = true;
799*b6e66be2SVincenzo Maffione 		}
800*b6e66be2SVincenzo Maffione 	}
801*b6e66be2SVincenzo Maffione 
802*b6e66be2SVincenzo Maffione         /*
803*b6e66be2SVincenzo Maffione          * No more completed RX slots. The user thread will go to sleep and
804*b6e66be2SVincenzo Maffione 	 * we need to be notified by the host when more RX slots have been
805*b6e66be2SVincenzo Maffione 	 * completed.
806*b6e66be2SVincenzo Maffione          */
807*b6e66be2SVincenzo Maffione 	if (nm_kr_rxempty(kring) && !(kring->nr_kflags & NKR_NOINTR)) {
808*b6e66be2SVincenzo Maffione 		/* Reenable notifications. */
809*b6e66be2SVincenzo Maffione                 atok->appl_need_kick = 1;
810*b6e66be2SVincenzo Maffione                 /* Double check */
811*b6e66be2SVincenzo Maffione                 ptnetmap_guest_read_kring_csb(ktoa, kring);
812*b6e66be2SVincenzo Maffione                 /* If there are new slots, disable notifications. */
813*b6e66be2SVincenzo Maffione 		if (!nm_kr_rxempty(kring)) {
814*b6e66be2SVincenzo Maffione                         atok->appl_need_kick = 0;
815*b6e66be2SVincenzo Maffione                 }
816*b6e66be2SVincenzo Maffione         }
817*b6e66be2SVincenzo Maffione 
818*b6e66be2SVincenzo Maffione 	nm_prdis(1, "%s CSB(head:%u cur:%u hwtail:%u) KRING(head:%u cur:%u tail:%u)",
819*b6e66be2SVincenzo Maffione 		kring->name, atok->head, atok->cur, ktoa->hwtail,
820*b6e66be2SVincenzo Maffione 		kring->rhead, kring->rcur, kring->nr_hwtail);
821*b6e66be2SVincenzo Maffione 
822*b6e66be2SVincenzo Maffione 	return notify;
823*b6e66be2SVincenzo Maffione }
824*b6e66be2SVincenzo Maffione 
825*b6e66be2SVincenzo Maffione /*
826*b6e66be2SVincenzo Maffione  * Callbacks for ptnet drivers: nm_krings_create, nm_krings_delete, nm_dtor.
827*b6e66be2SVincenzo Maffione  */
828*b6e66be2SVincenzo Maffione int
829*b6e66be2SVincenzo Maffione ptnet_nm_krings_create(struct netmap_adapter *na)
830*b6e66be2SVincenzo Maffione {
831*b6e66be2SVincenzo Maffione 	struct netmap_pt_guest_adapter *ptna =
832*b6e66be2SVincenzo Maffione 			(struct netmap_pt_guest_adapter *)na; /* Upcast. */
833*b6e66be2SVincenzo Maffione 	struct netmap_adapter *na_nm = &ptna->hwup.up;
834*b6e66be2SVincenzo Maffione 	struct netmap_adapter *na_dr = &ptna->dr.up;
835*b6e66be2SVincenzo Maffione 	int ret;
836*b6e66be2SVincenzo Maffione 
837*b6e66be2SVincenzo Maffione 	if (ptna->backend_users) {
838*b6e66be2SVincenzo Maffione 		return 0;
839*b6e66be2SVincenzo Maffione 	}
840*b6e66be2SVincenzo Maffione 
841*b6e66be2SVincenzo Maffione 	/* Create krings on the public netmap adapter. */
842*b6e66be2SVincenzo Maffione 	ret = netmap_hw_krings_create(na_nm);
843*b6e66be2SVincenzo Maffione 	if (ret) {
844*b6e66be2SVincenzo Maffione 		return ret;
845*b6e66be2SVincenzo Maffione 	}
846*b6e66be2SVincenzo Maffione 
847*b6e66be2SVincenzo Maffione 	/* Copy krings into the netmap adapter private to the driver. */
848*b6e66be2SVincenzo Maffione 	na_dr->tx_rings = na_nm->tx_rings;
849*b6e66be2SVincenzo Maffione 	na_dr->rx_rings = na_nm->rx_rings;
850*b6e66be2SVincenzo Maffione 
851*b6e66be2SVincenzo Maffione 	return 0;
852*b6e66be2SVincenzo Maffione }
853*b6e66be2SVincenzo Maffione 
854*b6e66be2SVincenzo Maffione void
855*b6e66be2SVincenzo Maffione ptnet_nm_krings_delete(struct netmap_adapter *na)
856*b6e66be2SVincenzo Maffione {
857*b6e66be2SVincenzo Maffione 	struct netmap_pt_guest_adapter *ptna =
858*b6e66be2SVincenzo Maffione 			(struct netmap_pt_guest_adapter *)na; /* Upcast. */
859*b6e66be2SVincenzo Maffione 	struct netmap_adapter *na_nm = &ptna->hwup.up;
860*b6e66be2SVincenzo Maffione 	struct netmap_adapter *na_dr = &ptna->dr.up;
861*b6e66be2SVincenzo Maffione 
862*b6e66be2SVincenzo Maffione 	if (ptna->backend_users) {
863*b6e66be2SVincenzo Maffione 		return;
864*b6e66be2SVincenzo Maffione 	}
865*b6e66be2SVincenzo Maffione 
866*b6e66be2SVincenzo Maffione 	na_dr->tx_rings = NULL;
867*b6e66be2SVincenzo Maffione 	na_dr->rx_rings = NULL;
868*b6e66be2SVincenzo Maffione 
869*b6e66be2SVincenzo Maffione 	netmap_hw_krings_delete(na_nm);
870*b6e66be2SVincenzo Maffione }
871*b6e66be2SVincenzo Maffione 
872*b6e66be2SVincenzo Maffione void
873*b6e66be2SVincenzo Maffione ptnet_nm_dtor(struct netmap_adapter *na)
874*b6e66be2SVincenzo Maffione {
875*b6e66be2SVincenzo Maffione 	struct netmap_pt_guest_adapter *ptna =
876*b6e66be2SVincenzo Maffione 			(struct netmap_pt_guest_adapter *)na;
877*b6e66be2SVincenzo Maffione 
878*b6e66be2SVincenzo Maffione 	netmap_mem_put(ptna->dr.up.nm_mem);
879*b6e66be2SVincenzo Maffione 	memset(&ptna->dr, 0, sizeof(ptna->dr));
880*b6e66be2SVincenzo Maffione 	netmap_mem_pt_guest_ifp_del(na->nm_mem, na->ifp);
881*b6e66be2SVincenzo Maffione }
882*b6e66be2SVincenzo Maffione 
883*b6e66be2SVincenzo Maffione int
884*b6e66be2SVincenzo Maffione netmap_pt_guest_attach(struct netmap_adapter *arg,
885*b6e66be2SVincenzo Maffione 		       unsigned int nifp_offset, unsigned int memid)
886*b6e66be2SVincenzo Maffione {
887*b6e66be2SVincenzo Maffione 	struct netmap_pt_guest_adapter *ptna;
888*b6e66be2SVincenzo Maffione 	struct ifnet *ifp = arg ? arg->ifp : NULL;
889*b6e66be2SVincenzo Maffione 	int error;
890*b6e66be2SVincenzo Maffione 
891*b6e66be2SVincenzo Maffione 	/* get allocator */
892*b6e66be2SVincenzo Maffione 	arg->nm_mem = netmap_mem_pt_guest_new(ifp, nifp_offset, memid);
893*b6e66be2SVincenzo Maffione 	if (arg->nm_mem == NULL)
894*b6e66be2SVincenzo Maffione 		return ENOMEM;
895*b6e66be2SVincenzo Maffione 	arg->na_flags |= NAF_MEM_OWNER;
896*b6e66be2SVincenzo Maffione 	error = netmap_attach_ext(arg, sizeof(struct netmap_pt_guest_adapter), 1);
897*b6e66be2SVincenzo Maffione 	if (error)
898*b6e66be2SVincenzo Maffione 		return error;
899*b6e66be2SVincenzo Maffione 
900*b6e66be2SVincenzo Maffione 	/* get the netmap_pt_guest_adapter */
901*b6e66be2SVincenzo Maffione 	ptna = (struct netmap_pt_guest_adapter *) NA(ifp);
902*b6e66be2SVincenzo Maffione 
903*b6e66be2SVincenzo Maffione 	/* Initialize a separate pass-through netmap adapter that is going to
904*b6e66be2SVincenzo Maffione 	 * be used by the ptnet driver only, and so never exposed to netmap
905*b6e66be2SVincenzo Maffione          * applications. We only need a subset of the available fields. */
906*b6e66be2SVincenzo Maffione 	memset(&ptna->dr, 0, sizeof(ptna->dr));
907*b6e66be2SVincenzo Maffione 	ptna->dr.up.ifp = ifp;
908*b6e66be2SVincenzo Maffione 	ptna->dr.up.nm_mem = netmap_mem_get(ptna->hwup.up.nm_mem);
909*b6e66be2SVincenzo Maffione         ptna->dr.up.nm_config = ptna->hwup.up.nm_config;
910*b6e66be2SVincenzo Maffione 
911*b6e66be2SVincenzo Maffione 	ptna->backend_users = 0;
912*b6e66be2SVincenzo Maffione 
913*b6e66be2SVincenzo Maffione 	return 0;
914*b6e66be2SVincenzo Maffione }
915*b6e66be2SVincenzo Maffione 
916*b6e66be2SVincenzo Maffione #endif /* WITH_PTNETMAP */
917