netmap_kern.h (d1d015864103b253b3fcb2f72a0da5b0cfeb31b6) netmap_kern.h (ce3ee1e7c4cac5b86bbc15daac68f2129aa42187)
1/*
2 * Copyright (C) 2011-2013 Matteo Landi, Luigi Rizzo. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.

--- 21 unchanged lines hidden (view full) ---

30 * prototypes used only in kernelspace.
31 */
32
33#ifndef _NET_NETMAP_KERN_H_
34#define _NET_NETMAP_KERN_H_
35
36#if defined(__FreeBSD__)
37
1/*
2 * Copyright (C) 2011-2013 Matteo Landi, Luigi Rizzo. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.

--- 21 unchanged lines hidden (view full) ---

30 * prototypes used only in kernelspace.
31 */
32
33#ifndef _NET_NETMAP_KERN_H_
34#define _NET_NETMAP_KERN_H_
35
36#if defined(__FreeBSD__)
37
38#define likely(x) __builtin_expect(!!(x), 1)
39#define unlikely(x) __builtin_expect(!!(x), 0)
38#define likely(x) __builtin_expect((long)!!(x), 1L)
39#define unlikely(x) __builtin_expect((long)!!(x), 0L)
40
41#define NM_LOCK_T struct mtx
40
41#define NM_LOCK_T struct mtx
42#define NM_RWLOCK_T struct rwlock
43#define NM_SELINFO_T struct selinfo
44#define MBUF_LEN(m) ((m)->m_pkthdr.len)
45#define NM_SEND_UP(ifp, m) ((ifp)->if_input)(ifp, m)
46
42#define NM_SELINFO_T struct selinfo
43#define MBUF_LEN(m) ((m)->m_pkthdr.len)
44#define NM_SEND_UP(ifp, m) ((ifp)->if_input)(ifp, m)
45
46#define NM_ATOMIC_T volatile int
47
47#elif defined (linux)
48
49#define NM_LOCK_T safe_spinlock_t // see bsd_glue.h
48#elif defined (linux)
49
50#define NM_LOCK_T safe_spinlock_t // see bsd_glue.h
50#define NM_RWLOCK_T safe_spinlock_t // see bsd_glue.h
51#define NM_SELINFO_T wait_queue_head_t
52#define MBUF_LEN(m) ((m)->len)
53#define NM_SEND_UP(ifp, m) netif_rx(m)
54
51#define NM_SELINFO_T wait_queue_head_t
52#define MBUF_LEN(m) ((m)->len)
53#define NM_SEND_UP(ifp, m) netif_rx(m)
54
55#define NM_ATOMIC_T volatile long unsigned int
56
55#ifndef DEV_NETMAP
56#define DEV_NETMAP
57#ifndef DEV_NETMAP
58#define DEV_NETMAP
57#endif
59#endif /* DEV_NETMAP */
58
59/*
60 * IFCAP_NETMAP goes into net_device's priv_flags (if_capenable).
61 * This was 16 bits up to linux 2.6.36, so we need a 16 bit value on older
62 * platforms and tolerate the clash with IFF_DYNAMIC and IFF_BRIDGE_PORT.
63 * For the 32-bit value, 0x100000 has no clashes until at least 3.5.1
64 */
65#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)

--- 40 unchanged lines hidden (view full) ---

106 D(format, ##__VA_ARGS__); \
107 } while (0)
108
109struct netmap_adapter;
110struct nm_bdg_fwd;
111struct nm_bridge;
112struct netmap_priv_d;
113
60
61/*
62 * IFCAP_NETMAP goes into net_device's priv_flags (if_capenable).
63 * This was 16 bits up to linux 2.6.36, so we need a 16 bit value on older
64 * platforms and tolerate the clash with IFF_DYNAMIC and IFF_BRIDGE_PORT.
65 * For the 32-bit value, 0x100000 has no clashes until at least 3.5.1
66 */
67#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)

--- 40 unchanged lines hidden (view full) ---

108 D(format, ##__VA_ARGS__); \
109 } while (0)
110
111struct netmap_adapter;
112struct nm_bdg_fwd;
113struct nm_bridge;
114struct netmap_priv_d;
115
116const char *nm_dump_buf(char *p, int len, int lim, char *dst);
117
114/*
115 * private, kernel view of a ring. Keeps track of the status of
116 * a ring across system calls.
117 *
118 * nr_hwcur index of the next buffer to refill.
119 * It corresponds to ring->cur - ring->reserved
120 *
121 * nr_hwavail the number of slots "owned" by userspace.
122 * nr_hwavail =:= ring->avail + ring->reserved
123 *
124 * The indexes in the NIC and netmap rings are offset by nkr_hwofs slots.
125 * This is so that, on a reset, buffers owned by userspace are not
126 * modified by the kernel. In particular:
127 * RX rings: the next empty buffer (hwcur + hwavail + hwofs) coincides with
128 * the next empty buffer as known by the hardware (next_to_check or so).
129 * TX rings: hwcur + hwofs coincides with next_to_send
130 *
118/*
119 * private, kernel view of a ring. Keeps track of the status of
120 * a ring across system calls.
121 *
122 * nr_hwcur index of the next buffer to refill.
123 * It corresponds to ring->cur - ring->reserved
124 *
125 * nr_hwavail the number of slots "owned" by userspace.
126 * nr_hwavail =:= ring->avail + ring->reserved
127 *
128 * The indexes in the NIC and netmap rings are offset by nkr_hwofs slots.
129 * This is so that, on a reset, buffers owned by userspace are not
130 * modified by the kernel. In particular:
131 * RX rings: the next empty buffer (hwcur + hwavail + hwofs) coincides with
132 * the next empty buffer as known by the hardware (next_to_check or so).
133 * TX rings: hwcur + hwofs coincides with next_to_send
134 *
135 * Clients cannot issue concurrent syscall on a ring. The system
136 * detects this and reports an error using two flags,
137 * NKR_WBUSY and NKR_RBUSY
131 * For received packets, slot->flags is set to nkr_slot_flags
132 * so we can provide a proper initial value (e.g. set NS_FORWARD
133 * when operating in 'transparent' mode).
138 * For received packets, slot->flags is set to nkr_slot_flags
139 * so we can provide a proper initial value (e.g. set NS_FORWARD
140 * when operating in 'transparent' mode).
141 *
142 * The following fields are used to implement lock-free copy of packets
143 * from input to output ports in VALE switch:
144 * nkr_hwlease buffer after the last one being copied.
145 * A writer in nm_bdg_flush reserves N buffers
146 * from nr_hwlease, advances it, then does the
147 * copy outside the lock.
148 * In RX rings (used for VALE ports),
149 * nkr_hwcur + nkr_hwavail <= nkr_hwlease < nkr_hwcur+N-1
150 * In TX rings (used for NIC or host stack ports)
151 * nkr_hwcur <= nkr_hwlease < nkr_hwcur+ nkr_hwavail
152 * nkr_leases array of nkr_num_slots where writers can report
153 * completion of their block. NR_NOSLOT (~0) indicates
154 * that the writer has not finished yet
155 * nkr_lease_idx index of next free slot in nr_leases, to be assigned
156 *
157 * The kring is manipulated by txsync/rxsync and generic netmap function.
158 * q_lock is used to arbitrate access to the kring from within the netmap
159 * code, and this and other protections guarantee that there is never
160 * more than 1 concurrent call to txsync or rxsync. So we are free
161 * to manipulate the kring from within txsync/rxsync without any extra
162 * locks.
134 */
135struct netmap_kring {
136 struct netmap_ring *ring;
163 */
164struct netmap_kring {
165 struct netmap_ring *ring;
137 u_int nr_hwcur;
138 int nr_hwavail;
139 u_int nr_kflags; /* private driver flags */
166 uint32_t nr_hwcur;
167 uint32_t nr_hwavail;
168 uint32_t nr_kflags; /* private driver flags */
140#define NKR_PENDINTR 0x1 // Pending interrupt.
169#define NKR_PENDINTR 0x1 // Pending interrupt.
141 u_int nkr_num_slots;
170 uint32_t nkr_num_slots;
171 int32_t nkr_hwofs; /* offset between NIC and netmap ring */
142
143 uint16_t nkr_slot_flags; /* initial value for flags */
172
173 uint16_t nkr_slot_flags; /* initial value for flags */
144 int nkr_hwofs; /* offset between NIC and netmap ring */
145 struct netmap_adapter *na;
146 struct nm_bdg_fwd *nkr_ft;
174 struct netmap_adapter *na;
175 struct nm_bdg_fwd *nkr_ft;
176 uint32_t *nkr_leases;
177#define NR_NOSLOT ((uint32_t)~0)
178 uint32_t nkr_hwlease;
179 uint32_t nkr_lease_idx;
180
147 NM_SELINFO_T si; /* poll/select wait queue */
181 NM_SELINFO_T si; /* poll/select wait queue */
148 NM_LOCK_T q_lock; /* used if no device lock available */
182 NM_LOCK_T q_lock; /* protects kring and ring. */
183 NM_ATOMIC_T nr_busy; /* prevent concurrent syscalls */
184
185 volatile int nkr_stopped;
149} __attribute__((__aligned__(64)));
150
186} __attribute__((__aligned__(64)));
187
188
189/* return the next index, with wraparound */
190static inline uint32_t
191nm_next(uint32_t i, uint32_t lim)
192{
193 return unlikely (i == lim) ? 0 : i + 1;
194}
195
151/*
196/*
197 *
198 * Here is the layout for the Rx and Tx rings.
199
200 RxRING TxRING
201
202 +-----------------+ +-----------------+
203 | | | |
204 |XXX free slot XXX| |XXX free slot XXX|
205 +-----------------+ +-----------------+
206 | |<-hwcur | |<-hwcur
207 | reserved h | | (ready |
208 +----------- w -+ | to be |
209 cur->| a | | sent) h |
210 | v | +---------- w |
211 | a | cur->| (being a |
212 | i | | prepared) v |
213 | avail l | | a |
214 +-----------------+ + a ------ i +
215 | | ... | v l |<-hwlease
216 | (being | ... | a | ...
217 | prepared) | ... | i | ...
218 +-----------------+ ... | l | ...
219 | |<-hwlease +-----------------+
220 | | | |
221 | | | |
222 | | | |
223 | | | |
224 +-----------------+ +-----------------+
225
226 * The cur/avail (user view) and hwcur/hwavail (kernel view)
227 * are used in the normal operation of the card.
228 *
229 * When a ring is the output of a switch port (Rx ring for
230 * a VALE port, Tx ring for the host stack or NIC), slots
231 * are reserved in blocks through 'hwlease' which points
232 * to the next unused slot.
233 * On an Rx ring, hwlease is always after hwavail,
234 * and completions cause avail to advance.
235 * On a Tx ring, hwlease is always between cur and hwavail,
236 * and completions cause cur to advance.
237 *
238 * nm_kr_space() returns the maximum number of slots that
239 * can be assigned.
240 * nm_kr_lease() reserves the required number of buffers,
241 * advances nkr_hwlease and also returns an entry in
242 * a circular array where completions should be reported.
243 */
244
245
246
247
248
249/*
152 * This struct extends the 'struct adapter' (or
153 * equivalent) device descriptor. It contains all fields needed to
154 * support netmap operation.
155 */
156struct netmap_adapter {
157 /*
158 * On linux we do not have a good way to tell if an interface
159 * is netmap-capable. So we use the following trick:
160 * NA(ifp) points here, and the first entry (which hopefully
161 * always exists and is at least 32 bits) contains a magic
162 * value which we can use to detect that the interface is good.
163 */
164 uint32_t magic;
165 uint32_t na_flags; /* future place for IFCAP_NETMAP */
166#define NAF_SKIP_INTR 1 /* use the regular interrupt handler.
167 * useful during initialization
168 */
169#define NAF_SW_ONLY 2 /* forward packets only to sw adapter */
250 * This struct extends the 'struct adapter' (or
251 * equivalent) device descriptor. It contains all fields needed to
252 * support netmap operation.
253 */
254struct netmap_adapter {
255 /*
256 * On linux we do not have a good way to tell if an interface
257 * is netmap-capable. So we use the following trick:
258 * NA(ifp) points here, and the first entry (which hopefully
259 * always exists and is at least 32 bits) contains a magic
260 * value which we can use to detect that the interface is good.
261 */
262 uint32_t magic;
263 uint32_t na_flags; /* future place for IFCAP_NETMAP */
264#define NAF_SKIP_INTR 1 /* use the regular interrupt handler.
265 * useful during initialization
266 */
267#define NAF_SW_ONLY 2 /* forward packets only to sw adapter */
268#define NAF_BDG_MAYSLEEP 4 /* the bridge is allowed to sleep when
269 * forwarding packets coming from this
270 * interface
271 */
272#define NAF_MEM_OWNER 8 /* the adapter is responsible for the
273 * deallocation of the memory allocator
274 */
170 int refcount; /* number of user-space descriptors using this
171 interface, which is equal to the number of
172 struct netmap_if objs in the mapped region. */
173 /*
174 * The selwakeup in the interrupt thread can use per-ring
175 * and/or global wait queues. We track how many clients
176 * of each type we have so we can optimize the drivers,
177 * and especially avoid huge contention on the locks.
178 */
179 int na_single; /* threads attached to a single hw queue */
180 int na_multi; /* threads attached to multiple hw queues */
181
275 int refcount; /* number of user-space descriptors using this
276 interface, which is equal to the number of
277 struct netmap_if objs in the mapped region. */
278 /*
279 * The selwakeup in the interrupt thread can use per-ring
280 * and/or global wait queues. We track how many clients
281 * of each type we have so we can optimize the drivers,
282 * and especially avoid huge contention on the locks.
283 */
284 int na_single; /* threads attached to a single hw queue */
285 int na_multi; /* threads attached to multiple hw queues */
286
182 int separate_locks; /* set if the interface suports different
183 locks for rx, tx and core. */
184
185 u_int num_rx_rings; /* number of adapter receive rings */
186 u_int num_tx_rings; /* number of adapter transmit rings */
187
188 u_int num_tx_desc; /* number of descriptor in each queue */
189 u_int num_rx_desc;
190
191 /* tx_rings and rx_rings are private but allocated
192 * as a contiguous chunk of memory. Each array has

--- 12 unchanged lines hidden (view full) ---

205 /* references to the ifnet and device routines, used by
206 * the generic netmap functions.
207 */
208 struct ifnet *ifp; /* adapter is ifp->if_softc */
209
210 NM_LOCK_T core_lock; /* used if no device lock available */
211
212 int (*nm_register)(struct ifnet *, int onoff);
287 u_int num_rx_rings; /* number of adapter receive rings */
288 u_int num_tx_rings; /* number of adapter transmit rings */
289
290 u_int num_tx_desc; /* number of descriptor in each queue */
291 u_int num_rx_desc;
292
293 /* tx_rings and rx_rings are private but allocated
294 * as a contiguous chunk of memory. Each array has

--- 12 unchanged lines hidden (view full) ---

307 /* references to the ifnet and device routines, used by
308 * the generic netmap functions.
309 */
310 struct ifnet *ifp; /* adapter is ifp->if_softc */
311
312 NM_LOCK_T core_lock; /* used if no device lock available */
313
314 int (*nm_register)(struct ifnet *, int onoff);
213 void (*nm_lock)(struct ifnet *, int what, u_int ringid);
214 int (*nm_txsync)(struct ifnet *, u_int ring, int lock);
215 int (*nm_rxsync)(struct ifnet *, u_int ring, int lock);
315
316 int (*nm_txsync)(struct ifnet *, u_int ring, int flags);
317 int (*nm_rxsync)(struct ifnet *, u_int ring, int flags);
318#define NAF_FORCE_READ 1
319#define NAF_FORCE_RECLAIM 2
216 /* return configuration information */
217 int (*nm_config)(struct ifnet *, u_int *txr, u_int *txd,
218 u_int *rxr, u_int *rxd);
219
220 /*
221 * Bridge support:
222 *
223 * bdg_port is the port number used in the bridge;

--- 7 unchanged lines hidden (view full) ---

231 int na_bdg_refcount;
232 struct nm_bridge *na_bdg;
233 /* When we attach a physical interface to the bridge, we
234 * allow the controlling process to terminate, so we need
235 * a place to store the netmap_priv_d data structure.
236 * This is only done when physical interfaces are attached to a bridge.
237 */
238 struct netmap_priv_d *na_kpriv;
320 /* return configuration information */
321 int (*nm_config)(struct ifnet *, u_int *txr, u_int *txd,
322 u_int *rxr, u_int *rxd);
323
324 /*
325 * Bridge support:
326 *
327 * bdg_port is the port number used in the bridge;

--- 7 unchanged lines hidden (view full) ---

335 int na_bdg_refcount;
336 struct nm_bridge *na_bdg;
337 /* When we attach a physical interface to the bridge, we
338 * allow the controlling process to terminate, so we need
339 * a place to store the netmap_priv_d data structure.
340 * This is only done when physical interfaces are attached to a bridge.
341 */
342 struct netmap_priv_d *na_kpriv;
343
344 /* memory allocator */
345 struct netmap_mem_d *nm_mem;
239#ifdef linux
240 struct net_device_ops nm_ndo;
241#endif /* linux */
242};
243
244/*
346#ifdef linux
347 struct net_device_ops nm_ndo;
348#endif /* linux */
349};
350
351/*
352 * Available space in the ring.
353 */
354static inline uint32_t
355nm_kr_space(struct netmap_kring *k, int is_rx)
356{
357 int space;
358
359 if (is_rx) {
360 int busy = k->nkr_hwlease - k->nr_hwcur;
361 if (busy < 0)
362 busy += k->nkr_num_slots;
363 space = k->nkr_num_slots - 1 - busy;
364 } else {
365 space = k->nr_hwcur + k->nr_hwavail - k->nkr_hwlease;
366 if (space < 0)
367 space += k->nkr_num_slots;
368 }
369#if 0
370 // sanity check
371 if (k->nkr_hwlease >= k->nkr_num_slots ||
372 k->nr_hwcur >= k->nkr_num_slots ||
373 k->nr_hwavail >= k->nkr_num_slots ||
374 busy < 0 ||
375 busy >= k->nkr_num_slots) {
376 D("invalid kring, cur %d avail %d lease %d lease_idx %d lim %d", k->nr_hwcur, k->nr_hwavail, k->nkr_hwlease,
377 k->nkr_lease_idx, k->nkr_num_slots);
378 }
379#endif
380 return space;
381}
382
383
384/* return update position */
385static inline uint32_t
386nm_kr_rxpos(struct netmap_kring *k)
387{
388 uint32_t pos = k->nr_hwcur + k->nr_hwavail;
389 if (pos >= k->nkr_num_slots)
390 pos -= k->nkr_num_slots;
391#if 0
392 if (pos >= k->nkr_num_slots ||
393 k->nkr_hwlease >= k->nkr_num_slots ||
394 k->nr_hwcur >= k->nkr_num_slots ||
395 k->nr_hwavail >= k->nkr_num_slots ||
396 k->nkr_lease_idx >= k->nkr_num_slots) {
397 D("invalid kring, cur %d avail %d lease %d lease_idx %d lim %d", k->nr_hwcur, k->nr_hwavail, k->nkr_hwlease,
398 k->nkr_lease_idx, k->nkr_num_slots);
399 }
400#endif
401 return pos;
402}
403
404
405/* make a lease on the kring for N positions. return the
406 * lease index
407 */
408static inline uint32_t
409nm_kr_lease(struct netmap_kring *k, u_int n, int is_rx)
410{
411 uint32_t lim = k->nkr_num_slots - 1;
412 uint32_t lease_idx = k->nkr_lease_idx;
413
414 k->nkr_leases[lease_idx] = NR_NOSLOT;
415 k->nkr_lease_idx = nm_next(lease_idx, lim);
416
417 if (n > nm_kr_space(k, is_rx)) {
418 D("invalid request for %d slots", n);
419 panic("x");
420 }
421 /* XXX verify that there are n slots */
422 k->nkr_hwlease += n;
423 if (k->nkr_hwlease > lim)
424 k->nkr_hwlease -= lim + 1;
425
426 if (k->nkr_hwlease >= k->nkr_num_slots ||
427 k->nr_hwcur >= k->nkr_num_slots ||
428 k->nr_hwavail >= k->nkr_num_slots ||
429 k->nkr_lease_idx >= k->nkr_num_slots) {
430 D("invalid kring %s, cur %d avail %d lease %d lease_idx %d lim %d",
431 k->na->ifp->if_xname,
432 k->nr_hwcur, k->nr_hwavail, k->nkr_hwlease,
433 k->nkr_lease_idx, k->nkr_num_slots);
434 }
435 return lease_idx;
436}
437
438
439/*
440 * XXX NETMAP_DELETING() is unused
441 *
245 * The combination of "enable" (ifp->if_capenable & IFCAP_NETMAP)
246 * and refcount gives the status of the interface, namely:
247 *
248 * enable refcount Status
249 *
250 * FALSE 0 normal operation
251 * FALSE != 0 -- (impossible)
252 * TRUE 1 netmap mode
253 * TRUE 0 being deleted.
254 */
255
256#define NETMAP_DELETING(_na) ( ((_na)->refcount == 0) && \
257 ( (_na)->ifp->if_capenable & IFCAP_NETMAP) )
258
442 * The combination of "enable" (ifp->if_capenable & IFCAP_NETMAP)
443 * and refcount gives the status of the interface, namely:
444 *
445 * enable refcount Status
446 *
447 * FALSE 0 normal operation
448 * FALSE != 0 -- (impossible)
449 * TRUE 1 netmap mode
450 * TRUE 0 being deleted.
451 */
452
453#define NETMAP_DELETING(_na) ( ((_na)->refcount == 0) && \
454 ( (_na)->ifp->if_capenable & IFCAP_NETMAP) )
455
259/*
260 * parameters for (*nm_lock)(adapter, what, index)
261 */
262enum {
263 NETMAP_NO_LOCK = 0,
264 NETMAP_CORE_LOCK, NETMAP_CORE_UNLOCK,
265 NETMAP_TX_LOCK, NETMAP_TX_UNLOCK,
266 NETMAP_RX_LOCK, NETMAP_RX_UNLOCK,
267#ifdef __FreeBSD__
268#define NETMAP_REG_LOCK NETMAP_CORE_LOCK
269#define NETMAP_REG_UNLOCK NETMAP_CORE_UNLOCK
270#else
271 NETMAP_REG_LOCK, NETMAP_REG_UNLOCK
272#endif
273};
274
456
275/* How to handle locking support in netmap_rx_irq/netmap_tx_irq */
276#define NETMAP_LOCKED_ENTER 0x10000000 /* already locked on enter */
277#define NETMAP_LOCKED_EXIT 0x20000000 /* keep locked on exit */
278
279/*
280 * The following are support routines used by individual drivers to
281 * support netmap operation.
282 *
283 * netmap_attach() initializes a struct netmap_adapter, allocating the
284 * struct netmap_ring's and the struct selinfo.
285 *
286 * netmap_detach() frees the memory allocated by netmap_attach().
287 *
457/*
458 * The following are support routines used by individual drivers to
459 * support netmap operation.
460 *
461 * netmap_attach() initializes a struct netmap_adapter, allocating the
462 * struct netmap_ring's and the struct selinfo.
463 *
464 * netmap_detach() frees the memory allocated by netmap_attach().
465 *
288 * netmap_start() replaces the if_transmit routine of the interface,
466 * netmap_transmit() replaces the if_transmit routine of the interface,
289 * and is used to intercept packets coming from the stack.
290 *
291 * netmap_load_map/netmap_reload_map are helper routines to set/reset
292 * the dmamap for a packet buffer
293 *
294 * netmap_reset() is a helper routine to be called in the driver
295 * when reinitializing a ring.
296 */
467 * and is used to intercept packets coming from the stack.
468 *
469 * netmap_load_map/netmap_reload_map are helper routines to set/reset
470 * the dmamap for a packet buffer
471 *
472 * netmap_reset() is a helper routine to be called in the driver
473 * when reinitializing a ring.
474 */
297int netmap_attach(struct netmap_adapter *, int);
475int netmap_attach(struct netmap_adapter *, u_int);
298void netmap_detach(struct ifnet *);
476void netmap_detach(struct ifnet *);
299int netmap_start(struct ifnet *, struct mbuf *);
477int netmap_transmit(struct ifnet *, struct mbuf *);
300enum txrx { NR_RX = 0, NR_TX = 1 };
301struct netmap_slot *netmap_reset(struct netmap_adapter *na,
478enum txrx { NR_RX = 0, NR_TX = 1 };
479struct netmap_slot *netmap_reset(struct netmap_adapter *na,
302 enum txrx tx, int n, u_int new_cur);
480 enum txrx tx, u_int n, u_int new_cur);
303int netmap_ring_reinit(struct netmap_kring *);
304
481int netmap_ring_reinit(struct netmap_kring *);
482
483u_int nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg);
484
305/*
306 * The following bridge-related interfaces are used by other kernel modules
307 * In the version that only supports unicast or broadcast, the lookup
308 * function can return 0 .. NM_BDG_MAXPORTS-1 for regular ports,
309 * NM_BDG_MAXPORTS for broadcast, NM_BDG_MAXPORTS+1 for unknown.
310 * XXX in practice "unknown" might be handled same as broadcast.
311 */
312typedef u_int (*bdg_lookup_fn_t)(char *buf, u_int len, uint8_t *ring_nr,

--- 133 unchanged lines hidden (view full) ---

446
447/*
448 * The bus_dmamap_sync() can be one of wmb() or rmb() depending on direction.
449 */
450#define bus_dmamap_sync(_a, _b, _c)
451
452#endif /* linux */
453
485/*
486 * The following bridge-related interfaces are used by other kernel modules
487 * In the version that only supports unicast or broadcast, the lookup
488 * function can return 0 .. NM_BDG_MAXPORTS-1 for regular ports,
489 * NM_BDG_MAXPORTS for broadcast, NM_BDG_MAXPORTS+1 for unknown.
490 * XXX in practice "unknown" might be handled same as broadcast.
491 */
492typedef u_int (*bdg_lookup_fn_t)(char *buf, u_int len, uint8_t *ring_nr,

--- 133 unchanged lines hidden (view full) ---

626
627/*
628 * The bus_dmamap_sync() can be one of wmb() or rmb() depending on direction.
629 */
630#define bus_dmamap_sync(_a, _b, _c)
631
632#endif /* linux */
633
634
454/*
455 * functions to map NIC to KRING indexes (n2k) and vice versa (k2n)
456 */
457static inline int
458netmap_idx_n2k(struct netmap_kring *kr, int idx)
459{
460 int n = kr->nkr_num_slots;
461 idx += kr->nkr_hwofs;

--- 48 unchanged lines hidden (view full) ---

510 uint32_t i = slot->buf_idx;
511 void *ret = (i >= netmap_total_buffers) ? NMB_VA(0) : NMB_VA(i);
512
513 *pp = (i >= netmap_total_buffers) ? NMB_PA(0) : NMB_PA(i);
514 return ret;
515}
516
517/* default functions to handle rx/tx interrupts */
635/*
636 * functions to map NIC to KRING indexes (n2k) and vice versa (k2n)
637 */
638static inline int
639netmap_idx_n2k(struct netmap_kring *kr, int idx)
640{
641 int n = kr->nkr_num_slots;
642 idx += kr->nkr_hwofs;

--- 48 unchanged lines hidden (view full) ---

691 uint32_t i = slot->buf_idx;
692 void *ret = (i >= netmap_total_buffers) ? NMB_VA(0) : NMB_VA(i);
693
694 *pp = (i >= netmap_total_buffers) ? NMB_PA(0) : NMB_PA(i);
695 return ret;
696}
697
698/* default functions to handle rx/tx interrupts */
518int netmap_rx_irq(struct ifnet *, int, int *);
699int netmap_rx_irq(struct ifnet *, u_int, u_int *);
519#define netmap_tx_irq(_n, _q) netmap_rx_irq(_n, _q, NULL)
520
700#define netmap_tx_irq(_n, _q) netmap_rx_irq(_n, _q, NULL)
701
702#ifdef __FreeBSD__
703MALLOC_DECLARE(M_NETMAP);
704#endif /* __FreeBSD__ */
705
706
707void netmap_disable_all_rings(struct ifnet *);
708void netmap_enable_all_rings(struct ifnet *);
709
521#endif /* _NET_NETMAP_KERN_H_ */
710#endif /* _NET_NETMAP_KERN_H_ */