1 /*-
2 * Copyright (c) 2014-2018, Matthew Macy <mmacy@mattmacy.io>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Neither the name of Matthew Macy nor the names of its
12 * contributors may be used to endorse or promote products derived from
13 * this software without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 #include "opt_inet.h"
30 #include "opt_inet6.h"
31 #include "opt_acpi.h"
32 #include "opt_sched.h"
33
34 #include <sys/param.h>
35 #include <sys/types.h>
36 #include <sys/bus.h>
37 #include <sys/eventhandler.h>
38 #include <sys/kernel.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/module.h>
42 #include <sys/kobj.h>
43 #include <sys/rman.h>
44 #include <sys/sbuf.h>
45 #include <sys/smp.h>
46 #include <sys/socket.h>
47 #include <sys/sockio.h>
48 #include <sys/sysctl.h>
49 #include <sys/syslog.h>
50 #include <sys/taskqueue.h>
51 #include <sys/limits.h>
52
53 #include <net/if.h>
54 #include <net/if_var.h>
55 #include <net/if_private.h>
56 #include <net/if_types.h>
57 #include <net/if_media.h>
58 #include <net/bpf.h>
59 #include <net/ethernet.h>
60 #include <net/mp_ring.h>
61 #include <net/debugnet.h>
62 #include <net/pfil.h>
63 #include <net/vnet.h>
64
65 #include <netinet/in.h>
66 #include <netinet/in_pcb.h>
67 #include <netinet/tcp_lro.h>
68 #include <netinet/in_systm.h>
69 #include <netinet/if_ether.h>
70 #include <netinet/ip.h>
71 #include <netinet/ip6.h>
72 #include <netinet/tcp.h>
73 #include <netinet/udp.h>
74 #include <netinet/ip_var.h>
75 #include <netinet6/ip6_var.h>
76
77 #include <machine/bus.h>
78 #include <machine/in_cksum.h>
79
80 #include <vm/vm.h>
81 #include <vm/pmap.h>
82
83 #include <dev/led/led.h>
84 #include <dev/pci/pcireg.h>
85 #include <dev/pci/pcivar.h>
86 #include <dev/pci/pci_private.h>
87
88 #include <net/iflib.h>
89
90 #include "ifdi_if.h"
91
92 #ifdef PCI_IOV
93 #include <dev/pci/pci_iov.h>
94 #endif
95
96 #include <sys/bitstring.h>
97 /*
98 * enable accounting of every mbuf as it comes in to and goes out of
99 * iflib's software descriptor references
100 */
101 #define MEMORY_LOGGING 0
102 /*
103 * Enable mbuf vectors for compressing long mbuf chains
104 */
105
106 /*
107 * NB:
108 * - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead
109 * we prefetch needs to be determined by the time spent in m_free vis a vis
110 * the cost of a prefetch. This will of course vary based on the workload:
111 * - NFLX's m_free path is dominated by vm-based M_EXT manipulation which
112 * is quite expensive, thus suggesting very little prefetch.
113 * - small packet forwarding which is just returning a single mbuf to
114 * UMA will typically be very fast vis a vis the cost of a memory
115 * access.
116 */
117
118 /*
119 * File organization:
120 * - private structures
121 * - iflib private utility functions
122 * - ifnet functions
123 * - vlan registry and other exported functions
124 * - iflib public core functions
125 *
126 *
127 */
128 static MALLOC_DEFINE(M_IFLIB, "iflib", "ifnet library");
129
130 #define IFLIB_RXEOF_MORE (1U << 0)
131 #define IFLIB_RXEOF_EMPTY (2U << 0)
132
133 struct iflib_txq;
134 typedef struct iflib_txq *iflib_txq_t;
135 struct iflib_rxq;
136 typedef struct iflib_rxq *iflib_rxq_t;
137 struct iflib_fl;
138 typedef struct iflib_fl *iflib_fl_t;
139
140 struct iflib_ctx;
141
142 static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid);
143 static void iflib_timer(void *arg);
144 static void iflib_tqg_detach(if_ctx_t ctx);
145 #ifndef ALTQ
146 static int iflib_simple_transmit(if_t ifp, struct mbuf *m);
147 #endif
148
149 typedef struct iflib_filter_info {
150 driver_filter_t *ifi_filter;
151 void *ifi_filter_arg;
152 struct grouptask *ifi_task;
153 void *ifi_ctx;
154 } *iflib_filter_info_t;
155
156 struct iflib_ctx {
157 KOBJ_FIELDS;
158 /*
159 * Pointer to hardware driver's softc
160 */
161 void *ifc_softc;
162 device_t ifc_dev;
163 if_t ifc_ifp;
164
165 cpuset_t ifc_cpus;
166 if_shared_ctx_t ifc_sctx;
167 struct if_softc_ctx ifc_softc_ctx;
168
169 struct sx ifc_ctx_sx;
170 struct mtx ifc_state_mtx;
171
172 iflib_txq_t ifc_txqs;
173 iflib_rxq_t ifc_rxqs;
174 uint32_t ifc_if_flags;
175 uint32_t ifc_flags;
176 uint32_t ifc_max_fl_buf_size;
177 uint32_t ifc_rx_mbuf_sz;
178
179 int ifc_link_state;
180 int ifc_watchdog_events;
181 struct cdev *ifc_led_dev;
182 struct resource *ifc_msix_mem;
183
184 struct if_irq ifc_legacy_irq;
185 struct task ifc_admin_task;
186 struct task ifc_vflr_task;
187 struct taskqueue *ifc_tq;
188 struct iflib_filter_info ifc_filter_info;
189 struct ifmedia ifc_media;
190 struct ifmedia *ifc_mediap;
191
192 struct sysctl_oid *ifc_sysctl_node;
193 uint16_t ifc_sysctl_ntxqs;
194 uint16_t ifc_sysctl_nrxqs;
195 uint16_t ifc_sysctl_qs_eq_override;
196 uint16_t ifc_sysctl_rx_budget;
197 uint16_t ifc_sysctl_tx_abdicate;
198 uint16_t ifc_sysctl_core_offset;
199 #define CORE_OFFSET_UNSPECIFIED 0xffff
200 uint8_t ifc_sysctl_separate_txrx;
201 uint8_t ifc_sysctl_use_logical_cores;
202 uint16_t ifc_sysctl_extra_msix_vectors;
203 bool ifc_cpus_are_physical_cores;
204 bool ifc_sysctl_simple_tx;
205 uint16_t ifc_sysctl_tx_reclaim_thresh;
206 uint16_t ifc_sysctl_tx_reclaim_ticks;
207
208 qidx_t ifc_sysctl_ntxds[8];
209 qidx_t ifc_sysctl_nrxds[8];
210 struct if_txrx ifc_txrx;
211 #define isc_txd_encap ifc_txrx.ift_txd_encap
212 #define isc_txd_flush ifc_txrx.ift_txd_flush
213 #define isc_txd_credits_update ifc_txrx.ift_txd_credits_update
214 #define isc_rxd_available ifc_txrx.ift_rxd_available
215 #define isc_rxd_pkt_get ifc_txrx.ift_rxd_pkt_get
216 #define isc_rxd_refill ifc_txrx.ift_rxd_refill
217 #define isc_rxd_flush ifc_txrx.ift_rxd_flush
218 #define isc_legacy_intr ifc_txrx.ift_legacy_intr
219 #define isc_txq_select ifc_txrx.ift_txq_select
220 #define isc_txq_select_v2 ifc_txrx.ift_txq_select_v2
221
222 eventhandler_tag ifc_vlan_attach_event;
223 eventhandler_tag ifc_vlan_detach_event;
224 struct ether_addr ifc_mac;
225 };
226
227 void *
iflib_get_softc(if_ctx_t ctx)228 iflib_get_softc(if_ctx_t ctx)
229 {
230
231 return (ctx->ifc_softc);
232 }
233
234 device_t
iflib_get_dev(if_ctx_t ctx)235 iflib_get_dev(if_ctx_t ctx)
236 {
237
238 return (ctx->ifc_dev);
239 }
240
241 if_t
iflib_get_ifp(if_ctx_t ctx)242 iflib_get_ifp(if_ctx_t ctx)
243 {
244
245 return (ctx->ifc_ifp);
246 }
247
248 struct ifmedia *
iflib_get_media(if_ctx_t ctx)249 iflib_get_media(if_ctx_t ctx)
250 {
251
252 return (ctx->ifc_mediap);
253 }
254
255 void
iflib_set_mac(if_ctx_t ctx,uint8_t mac[ETHER_ADDR_LEN])256 iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN])
257 {
258
259 bcopy(mac, ctx->ifc_mac.octet, ETHER_ADDR_LEN);
260 }
261
262 if_softc_ctx_t
iflib_get_softc_ctx(if_ctx_t ctx)263 iflib_get_softc_ctx(if_ctx_t ctx)
264 {
265
266 return (&ctx->ifc_softc_ctx);
267 }
268
269 if_shared_ctx_t
iflib_get_sctx(if_ctx_t ctx)270 iflib_get_sctx(if_ctx_t ctx)
271 {
272
273 return (ctx->ifc_sctx);
274 }
275
276 uint16_t
iflib_get_extra_msix_vectors_sysctl(if_ctx_t ctx)277 iflib_get_extra_msix_vectors_sysctl(if_ctx_t ctx)
278 {
279
280 return (ctx->ifc_sysctl_extra_msix_vectors);
281 }
282
283 #define IP_ALIGNED(m) ((((uintptr_t)(m)->m_data) & 0x3) == 0x2)
284 #define CACHE_PTR_INCREMENT (CACHE_LINE_SIZE / sizeof(void *))
285 #define CACHE_PTR_NEXT(ptr) ((void *)(roundup2(ptr, CACHE_LINE_SIZE)))
286
287 #define LINK_ACTIVE(ctx) ((ctx)->ifc_link_state == LINK_STATE_UP)
288 #define CTX_IS_VF(ctx) ((ctx)->ifc_sctx->isc_flags & IFLIB_IS_VF)
289
290 typedef struct iflib_sw_rx_desc_array {
291 bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */
292 struct mbuf **ifsd_m; /* pkthdr mbufs */
293 caddr_t *ifsd_cl; /* direct cluster pointer for rx */
294 bus_addr_t *ifsd_ba; /* bus addr of cluster for rx */
295 } iflib_rxsd_array_t;
296
297 typedef struct iflib_sw_tx_desc_array {
298 bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */
299 bus_dmamap_t *ifsd_tso_map; /* bus_dma maps for TSO packet */
300 struct mbuf **ifsd_m; /* pkthdr mbufs */
301 } if_txsd_vec_t;
302
303 /* magic number that should be high enough for any hardware */
304 #define IFLIB_MAX_TX_SEGS 128
305 #define IFLIB_RX_COPY_THRESH 128
306 #define IFLIB_MAX_RX_REFRESH 32
307 /* The minimum descriptors per second before we start coalescing */
308 #define IFLIB_MIN_DESC_SEC 16384
309 #define IFLIB_DEFAULT_TX_UPDATE_FREQ 16
310 #define IFLIB_QUEUE_IDLE 0
311 #define IFLIB_QUEUE_HUNG 1
312 #define IFLIB_QUEUE_WORKING 2
313 /* maximum number of txqs that can share an rx interrupt */
314 #define IFLIB_MAX_TX_SHARED_INTR 4
315
316 /* this should really scale with ring size - this is a fairly arbitrary value */
317 #define TX_BATCH_SIZE 32
318
319 #define IFLIB_RESTART_BUDGET 8
320
321 #define IFC_LEGACY 0x001
322 #define IFC_QFLUSH 0x002
323 #define IFC_MULTISEG 0x004
324 #define IFC_SPARE1 0x008
325 #define IFC_SC_ALLOCATED 0x010
326 #define IFC_INIT_DONE 0x020
327 #define IFC_PREFETCH 0x040
328 #define IFC_DO_RESET 0x080
329 #define IFC_DO_WATCHDOG 0x100
330 #define IFC_SPARE0 0x200
331 #define IFC_SPARE2 0x400
332 #define IFC_IN_DETACH 0x800
333
334 #define IFC_NETMAP_TX_IRQ 0x80000000
335
336 #define CSUM_OFFLOAD (CSUM_IP_TSO | CSUM_IP6_TSO | CSUM_IP | \
337 CSUM_IP_UDP | CSUM_IP_TCP | CSUM_IP_SCTP | \
338 CSUM_IP6_UDP | CSUM_IP6_TCP | CSUM_IP6_SCTP)
339
340 struct iflib_txq {
341 qidx_t ift_in_use;
342 qidx_t ift_cidx;
343 qidx_t ift_cidx_processed;
344 qidx_t ift_pidx;
345 uint8_t ift_gen;
346 uint8_t ift_br_offset;
347 uint16_t ift_npending;
348 uint16_t ift_db_pending;
349 uint16_t ift_rs_pending;
350 uint32_t ift_last_reclaim;
351 uint16_t ift_reclaim_thresh;
352 uint16_t ift_reclaim_ticks;
353 uint8_t ift_txd_size[8];
354 uint64_t ift_processed;
355 uint64_t ift_cleaned;
356 uint64_t ift_cleaned_prev;
357 #if MEMORY_LOGGING
358 uint64_t ift_enqueued;
359 uint64_t ift_dequeued;
360 #endif
361 uint64_t ift_no_tx_dma_setup;
362 uint64_t ift_no_desc_avail;
363 uint64_t ift_mbuf_defrag_failed;
364 uint64_t ift_mbuf_defrag;
365 uint64_t ift_map_failed;
366 uint64_t ift_txd_encap_efbig;
367 uint64_t ift_pullups;
368 uint64_t ift_last_timer_tick;
369
370 struct mtx ift_mtx;
371 struct mtx ift_db_mtx;
372
373 /* constant values */
374 if_ctx_t ift_ctx;
375 struct ifmp_ring *ift_br;
376 struct grouptask ift_task;
377 qidx_t ift_size;
378 uint16_t ift_id;
379 struct callout ift_timer;
380 #ifdef DEV_NETMAP
381 struct callout ift_netmap_timer;
382 #endif /* DEV_NETMAP */
383
384 if_txsd_vec_t ift_sds;
385 uint8_t ift_qstatus;
386 uint8_t ift_closed;
387 uint8_t ift_update_freq;
388 struct iflib_filter_info ift_filter_info;
389 bus_dma_tag_t ift_buf_tag;
390 bus_dma_tag_t ift_tso_buf_tag;
391 iflib_dma_info_t ift_ifdi;
392 #define MTX_NAME_LEN 32
393 char ift_mtx_name[MTX_NAME_LEN];
394 bus_dma_segment_t ift_segs[IFLIB_MAX_TX_SEGS] __aligned(CACHE_LINE_SIZE);
395 #ifdef IFLIB_DIAGNOSTICS
396 uint64_t ift_cpu_exec_count[256];
397 #endif
398 } __aligned(CACHE_LINE_SIZE);
399
400 struct iflib_fl {
401 qidx_t ifl_cidx;
402 qidx_t ifl_pidx;
403 qidx_t ifl_credits;
404 uint8_t ifl_gen;
405 uint8_t ifl_rxd_size;
406 #if MEMORY_LOGGING
407 uint64_t ifl_m_enqueued;
408 uint64_t ifl_m_dequeued;
409 uint64_t ifl_cl_enqueued;
410 uint64_t ifl_cl_dequeued;
411 #endif
412 /* implicit pad */
413 bitstr_t *ifl_rx_bitmap;
414 qidx_t ifl_fragidx;
415 /* constant */
416 qidx_t ifl_size;
417 uint16_t ifl_buf_size;
418 uint16_t ifl_cltype;
419 uma_zone_t ifl_zone;
420 iflib_rxsd_array_t ifl_sds;
421 iflib_rxq_t ifl_rxq;
422 uint8_t ifl_id;
423 bus_dma_tag_t ifl_buf_tag;
424 iflib_dma_info_t ifl_ifdi;
425 uint64_t ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE);
426 qidx_t ifl_rxd_idxs[IFLIB_MAX_RX_REFRESH];
427 } __aligned(CACHE_LINE_SIZE);
428
429 static inline qidx_t
get_inuse(int size,qidx_t cidx,qidx_t pidx,uint8_t gen)430 get_inuse(int size, qidx_t cidx, qidx_t pidx, uint8_t gen)
431 {
432 qidx_t used;
433
434 if (pidx > cidx)
435 used = pidx - cidx;
436 else if (pidx < cidx)
437 used = size - cidx + pidx;
438 else if (gen == 0 && pidx == cidx)
439 used = 0;
440 else if (gen == 1 && pidx == cidx)
441 used = size;
442 else
443 panic("bad state");
444
445 return (used);
446 }
447
448 #define TXQ_AVAIL(txq) (txq->ift_size - get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen))
449
450 #define IDXDIFF(head, tail, wrap) \
451 ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head))
452
453 struct iflib_rxq {
454 if_ctx_t ifr_ctx;
455 iflib_fl_t ifr_fl;
456 uint64_t ifr_rx_irq;
457 struct pfil_head *pfil;
458 /*
459 * If there is a separate completion queue (IFLIB_HAS_RXCQ), this is
460 * the completion queue consumer index. Otherwise it's unused.
461 */
462 qidx_t ifr_cq_cidx;
463 uint16_t ifr_id;
464 uint8_t ifr_nfl;
465 uint8_t ifr_ntxqirq;
466 uint8_t ifr_txqid[IFLIB_MAX_TX_SHARED_INTR];
467 uint8_t ifr_fl_offset;
468 struct lro_ctrl ifr_lc;
469 struct grouptask ifr_task;
470 struct callout ifr_watchdog;
471 struct iflib_filter_info ifr_filter_info;
472 iflib_dma_info_t ifr_ifdi;
473
474 /* dynamically allocate if any drivers need a value substantially larger than this */
475 struct if_rxd_frag ifr_frags[IFLIB_MAX_RX_SEGS] __aligned(CACHE_LINE_SIZE);
476 #ifdef IFLIB_DIAGNOSTICS
477 uint64_t ifr_cpu_exec_count[256];
478 #endif
479 } __aligned(CACHE_LINE_SIZE);
480
481 typedef struct if_rxsd {
482 caddr_t *ifsd_cl;
483 iflib_fl_t ifsd_fl;
484 } *if_rxsd_t;
485
486 /* multiple of word size */
487 #ifdef __LP64__
488 #define PKT_INFO_SIZE 6
489 #define RXD_INFO_SIZE 5
490 #define PKT_TYPE uint64_t
491 #else
492 #define PKT_INFO_SIZE 11
493 #define RXD_INFO_SIZE 8
494 #define PKT_TYPE uint32_t
495 #endif
496 #define PKT_LOOP_BOUND ((PKT_INFO_SIZE / 3) * 3)
497 #define RXD_LOOP_BOUND ((RXD_INFO_SIZE / 4) * 4)
498
499 typedef struct if_pkt_info_pad {
500 PKT_TYPE pkt_val[PKT_INFO_SIZE];
501 } *if_pkt_info_pad_t;
502 typedef struct if_rxd_info_pad {
503 PKT_TYPE rxd_val[RXD_INFO_SIZE];
504 } *if_rxd_info_pad_t;
505
506 CTASSERT(sizeof(struct if_pkt_info_pad) == sizeof(struct if_pkt_info));
507 CTASSERT(sizeof(struct if_rxd_info_pad) == sizeof(struct if_rxd_info));
508
509 static inline void
pkt_info_zero(if_pkt_info_t pi)510 pkt_info_zero(if_pkt_info_t pi)
511 {
512 if_pkt_info_pad_t pi_pad;
513
514 pi_pad = (if_pkt_info_pad_t)pi;
515 pi_pad->pkt_val[0] = 0; pi_pad->pkt_val[1] = 0; pi_pad->pkt_val[2] = 0;
516 pi_pad->pkt_val[3] = 0; pi_pad->pkt_val[4] = 0; pi_pad->pkt_val[5] = 0;
517 #ifndef __LP64__
518 pi_pad->pkt_val[6] = 0; pi_pad->pkt_val[7] = 0; pi_pad->pkt_val[8] = 0;
519 pi_pad->pkt_val[9] = 0; pi_pad->pkt_val[10] = 0;
520 #endif
521 }
522
523 static inline void
rxd_info_zero(if_rxd_info_t ri)524 rxd_info_zero(if_rxd_info_t ri)
525 {
526 if_rxd_info_pad_t ri_pad;
527 int i;
528
529 ri_pad = (if_rxd_info_pad_t)ri;
530 for (i = 0; i < RXD_LOOP_BOUND; i += 4) {
531 ri_pad->rxd_val[i] = 0;
532 ri_pad->rxd_val[i + 1] = 0;
533 ri_pad->rxd_val[i + 2] = 0;
534 ri_pad->rxd_val[i + 3] = 0;
535 }
536 #ifdef __LP64__
537 ri_pad->rxd_val[RXD_INFO_SIZE - 1] = 0;
538 #endif
539 }
540
541 /*
542 * Only allow a single packet to take up most 1/nth of the tx ring
543 */
544 #define MAX_SINGLE_PACKET_FRACTION 12
545 #define IF_BAD_DMA ((bus_addr_t)-1)
546
547 #define CTX_ACTIVE(ctx) ((if_getdrvflags((ctx)->ifc_ifp) & IFF_DRV_RUNNING))
548
549 #define CTX_LOCK_INIT(_sc) sx_init(&(_sc)->ifc_ctx_sx, "iflib ctx lock")
550 #define CTX_LOCK(ctx) sx_xlock(&(ctx)->ifc_ctx_sx)
551 #define CTX_UNLOCK(ctx) sx_xunlock(&(ctx)->ifc_ctx_sx)
552 #define CTX_LOCK_DESTROY(ctx) sx_destroy(&(ctx)->ifc_ctx_sx)
553
554 #define STATE_LOCK_INIT(_sc, _name) mtx_init(&(_sc)->ifc_state_mtx, _name, "iflib state lock", MTX_DEF)
555 #define STATE_LOCK(ctx) mtx_lock(&(ctx)->ifc_state_mtx)
556 #define STATE_UNLOCK(ctx) mtx_unlock(&(ctx)->ifc_state_mtx)
557 #define STATE_LOCK_DESTROY(ctx) mtx_destroy(&(ctx)->ifc_state_mtx)
558
559 #define CALLOUT_LOCK(txq) mtx_lock(&txq->ift_mtx)
560 #define CALLOUT_UNLOCK(txq) mtx_unlock(&txq->ift_mtx)
561
562 /* Our boot-time initialization hook */
563 static int iflib_module_event_handler(module_t, int, void *);
564
565 static moduledata_t iflib_moduledata = {
566 "iflib",
567 iflib_module_event_handler,
568 NULL
569 };
570
571 DECLARE_MODULE(iflib, iflib_moduledata, SI_SUB_INIT_IF, SI_ORDER_ANY);
572 MODULE_VERSION(iflib, 1);
573
574 MODULE_DEPEND(iflib, pci, 1, 1, 1);
575 MODULE_DEPEND(iflib, ether, 1, 1, 1);
576
577 TASKQGROUP_DEFINE(if_io_tqg, mp_ncpus, 1);
578 TASKQGROUP_DEFINE(if_config_tqg, 1, 1);
579
580 #ifndef IFLIB_DEBUG_COUNTERS
581 #ifdef INVARIANTS
582 #define IFLIB_DEBUG_COUNTERS 1
583 #else
584 #define IFLIB_DEBUG_COUNTERS 0
585 #endif /* !INVARIANTS */
586 #endif
587
588 static SYSCTL_NODE(_net, OID_AUTO, iflib, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
589 "iflib driver parameters");
590
591 /*
592 * XXX need to ensure that this can't accidentally cause the head to be moved backwards
593 */
594 static int iflib_min_tx_latency = 0;
595 SYSCTL_INT(_net_iflib, OID_AUTO, min_tx_latency, CTLFLAG_RW,
596 &iflib_min_tx_latency, 0,
597 "minimize transmit latency at the possible expense of throughput");
598 static int iflib_no_tx_batch = 0;
599 SYSCTL_INT(_net_iflib, OID_AUTO, no_tx_batch, CTLFLAG_RW,
600 &iflib_no_tx_batch, 0,
601 "minimize transmit latency at the possible expense of throughput");
602 static int iflib_timer_default = 1000;
603 SYSCTL_INT(_net_iflib, OID_AUTO, timer_default, CTLFLAG_RW,
604 &iflib_timer_default, 0, "number of ticks between iflib_timer calls");
605
606
607 #if IFLIB_DEBUG_COUNTERS
608
609 static int iflib_tx_seen;
610 static int iflib_tx_sent;
611 static int iflib_tx_encap;
612 static int iflib_rx_allocs;
613 static int iflib_fl_refills;
614 static int iflib_fl_refills_large;
615 static int iflib_tx_frees;
616
617 SYSCTL_INT(_net_iflib, OID_AUTO, tx_seen, CTLFLAG_RD, &iflib_tx_seen, 0,
618 "# TX mbufs seen");
619 SYSCTL_INT(_net_iflib, OID_AUTO, tx_sent, CTLFLAG_RD, &iflib_tx_sent, 0,
620 "# TX mbufs sent");
621 SYSCTL_INT(_net_iflib, OID_AUTO, tx_encap, CTLFLAG_RD, &iflib_tx_encap, 0,
622 "# TX mbufs encapped");
623 SYSCTL_INT(_net_iflib, OID_AUTO, tx_frees, CTLFLAG_RD, &iflib_tx_frees, 0,
624 "# TX frees");
625 SYSCTL_INT(_net_iflib, OID_AUTO, rx_allocs, CTLFLAG_RD, &iflib_rx_allocs, 0,
626 "# RX allocations");
627 SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills, CTLFLAG_RD, &iflib_fl_refills, 0,
628 "# refills");
629 SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills_large, CTLFLAG_RD,
630 &iflib_fl_refills_large, 0, "# large refills");
631
632 static int iflib_txq_drain_flushing;
633 static int iflib_txq_drain_oactive;
634 static int iflib_txq_drain_notready;
635
636 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_flushing, CTLFLAG_RD,
637 &iflib_txq_drain_flushing, 0, "# drain flushes");
638 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_oactive, CTLFLAG_RD,
639 &iflib_txq_drain_oactive, 0, "# drain oactives");
640 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_notready, CTLFLAG_RD,
641 &iflib_txq_drain_notready, 0, "# drain notready");
642
643 static int iflib_encap_load_mbuf_fail;
644 static int iflib_encap_pad_mbuf_fail;
645 static int iflib_encap_txq_avail_fail;
646 static int iflib_encap_txd_encap_fail;
647
648 SYSCTL_INT(_net_iflib, OID_AUTO, encap_load_mbuf_fail, CTLFLAG_RD,
649 &iflib_encap_load_mbuf_fail, 0, "# busdma load failures");
650 SYSCTL_INT(_net_iflib, OID_AUTO, encap_pad_mbuf_fail, CTLFLAG_RD,
651 &iflib_encap_pad_mbuf_fail, 0, "# runt frame pad failures");
652 SYSCTL_INT(_net_iflib, OID_AUTO, encap_txq_avail_fail, CTLFLAG_RD,
653 &iflib_encap_txq_avail_fail, 0, "# txq avail failures");
654 SYSCTL_INT(_net_iflib, OID_AUTO, encap_txd_encap_fail, CTLFLAG_RD,
655 &iflib_encap_txd_encap_fail, 0, "# driver encap failures");
656
657 static int iflib_task_fn_rxs;
658 static int iflib_rx_intr_enables;
659 static int iflib_fast_intrs;
660 static int iflib_rx_unavail;
661 static int iflib_rx_ctx_inactive;
662 static int iflib_rx_if_input;
663 static int iflib_rxd_flush;
664
665 static int iflib_verbose_debug;
666
667 SYSCTL_INT(_net_iflib, OID_AUTO, task_fn_rx, CTLFLAG_RD, &iflib_task_fn_rxs, 0,
668 "# task_fn_rx calls");
669 SYSCTL_INT(_net_iflib, OID_AUTO, rx_intr_enables, CTLFLAG_RD,
670 &iflib_rx_intr_enables, 0, "# RX intr enables");
671 SYSCTL_INT(_net_iflib, OID_AUTO, fast_intrs, CTLFLAG_RD, &iflib_fast_intrs, 0,
672 "# fast_intr calls");
673 SYSCTL_INT(_net_iflib, OID_AUTO, rx_unavail, CTLFLAG_RD, &iflib_rx_unavail, 0,
674 "# times rxeof called with no available data");
675 SYSCTL_INT(_net_iflib, OID_AUTO, rx_ctx_inactive, CTLFLAG_RD,
676 &iflib_rx_ctx_inactive, 0, "# times rxeof called with inactive context");
677 SYSCTL_INT(_net_iflib, OID_AUTO, rx_if_input, CTLFLAG_RD, &iflib_rx_if_input,
678 0, "# times rxeof called if_input");
679 SYSCTL_INT(_net_iflib, OID_AUTO, rxd_flush, CTLFLAG_RD, &iflib_rxd_flush, 0,
680 "# times rxd_flush called");
681 SYSCTL_INT(_net_iflib, OID_AUTO, verbose_debug, CTLFLAG_RW,
682 &iflib_verbose_debug, 0, "enable verbose debugging");
683
684 #define DBG_COUNTER_INC(name) atomic_add_int(&(iflib_ ## name), 1)
685 static void
iflib_debug_reset(void)686 iflib_debug_reset(void)
687 {
688 iflib_tx_seen = iflib_tx_sent = iflib_tx_encap = iflib_rx_allocs =
689 iflib_fl_refills = iflib_fl_refills_large = iflib_tx_frees =
690 iflib_txq_drain_flushing = iflib_txq_drain_oactive =
691 iflib_txq_drain_notready =
692 iflib_encap_load_mbuf_fail = iflib_encap_pad_mbuf_fail =
693 iflib_encap_txq_avail_fail = iflib_encap_txd_encap_fail =
694 iflib_task_fn_rxs = iflib_rx_intr_enables = iflib_fast_intrs =
695 iflib_rx_unavail =
696 iflib_rx_ctx_inactive = iflib_rx_if_input =
697 iflib_rxd_flush = 0;
698 }
699
700 #else
701 #define DBG_COUNTER_INC(name)
iflib_debug_reset(void)702 static void iflib_debug_reset(void) {}
703 #endif
704
705 #define IFLIB_DEBUG 0
706
707 static void iflib_tx_structures_free(if_ctx_t ctx);
708 static void iflib_rx_structures_free(if_ctx_t ctx);
709 static int iflib_queues_alloc(if_ctx_t ctx);
710 static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq);
711 static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget);
712 static int iflib_qset_structures_setup(if_ctx_t ctx);
713 static int iflib_msix_init(if_ctx_t ctx);
714 static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, const char *str);
715 static void iflib_txq_check_drain(iflib_txq_t txq, int budget);
716 static uint32_t iflib_txq_can_drain(struct ifmp_ring *);
717 #ifdef ALTQ
718 static void iflib_altq_if_start(if_t ifp);
719 static int iflib_altq_if_transmit(if_t ifp, struct mbuf *m);
720 #endif
721 static void iflib_register(if_ctx_t);
722 static void iflib_deregister(if_ctx_t);
723 static void iflib_unregister_vlan_handlers(if_ctx_t ctx);
724 static uint16_t iflib_get_mbuf_size_for(unsigned int size);
725 static void iflib_init_locked(if_ctx_t ctx);
726 static void iflib_add_device_sysctl_pre(if_ctx_t ctx);
727 static void iflib_add_device_sysctl_post(if_ctx_t ctx);
728 static void iflib_ifmp_purge(iflib_txq_t txq);
729 static void _iflib_pre_assert(if_softc_ctx_t scctx);
730 static void iflib_stop(if_ctx_t ctx);
731 static void iflib_if_init_locked(if_ctx_t ctx);
732 static void iflib_free_intr_mem(if_ctx_t ctx);
733 #ifndef __NO_STRICT_ALIGNMENT
734 static struct mbuf *iflib_fixup_rx(struct mbuf *m);
735 #endif
736 static __inline int iflib_completed_tx_reclaim(iflib_txq_t txq);
737
738 static SLIST_HEAD(cpu_offset_list, cpu_offset) cpu_offsets =
739 SLIST_HEAD_INITIALIZER(cpu_offsets);
740 struct cpu_offset {
741 SLIST_ENTRY(cpu_offset) entries;
742 cpuset_t set;
743 unsigned int refcount;
744 uint16_t next_cpuid;
745 };
746 static struct mtx cpu_offset_mtx;
747 MTX_SYSINIT(iflib_cpu_offset, &cpu_offset_mtx, "iflib_cpu_offset lock",
748 MTX_DEF);
749
750 DEBUGNET_DEFINE(iflib);
751
752 static int
iflib_num_rx_descs(if_ctx_t ctx)753 iflib_num_rx_descs(if_ctx_t ctx)
754 {
755 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
756 if_shared_ctx_t sctx = ctx->ifc_sctx;
757 uint16_t first_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0;
758
759 return (scctx->isc_nrxd[first_rxq]);
760 }
761
762 static int
iflib_num_tx_descs(if_ctx_t ctx)763 iflib_num_tx_descs(if_ctx_t ctx)
764 {
765 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
766 if_shared_ctx_t sctx = ctx->ifc_sctx;
767 uint16_t first_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0;
768
769 return (scctx->isc_ntxd[first_txq]);
770 }
771
772 #ifdef DEV_NETMAP
773 #include <sys/selinfo.h>
774 #include <net/netmap.h>
775 #include <dev/netmap/netmap_kern.h>
776
777 MODULE_DEPEND(iflib, netmap, 1, 1, 1);
778
779 static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, bool init);
780 static void iflib_netmap_timer(void *arg);
781
782 /*
783 * device-specific sysctl variables:
784 *
785 * iflib_crcstrip: 0: keep CRC in rx frames (default), 1: strip it.
786 * During regular operations the CRC is stripped, but on some
787 * hardware reception of frames not multiple of 64 is slower,
788 * so using crcstrip=0 helps in benchmarks.
789 *
790 * iflib_rx_miss, iflib_rx_miss_bufs:
791 * count packets that might be missed due to lost interrupts.
792 */
793 SYSCTL_DECL(_dev_netmap);
794 /*
795 * The xl driver by default strips CRCs and we do not override it.
796 */
797
798 int iflib_crcstrip = 1;
799 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_crcstrip,
800 CTLFLAG_RW, &iflib_crcstrip, 1, "strip CRC on RX frames");
801
802 int iflib_rx_miss, iflib_rx_miss_bufs;
803 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss,
804 CTLFLAG_RW, &iflib_rx_miss, 0, "potentially missed RX intr");
805 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss_bufs,
806 CTLFLAG_RW, &iflib_rx_miss_bufs, 0, "potentially missed RX intr bufs");
807
808 /*
809 * Register/unregister. We are already under netmap lock.
810 * Only called on the first register or the last unregister.
811 */
812 static int
iflib_netmap_register(struct netmap_adapter * na,int onoff)813 iflib_netmap_register(struct netmap_adapter *na, int onoff)
814 {
815 if_t ifp = na->ifp;
816 if_ctx_t ctx = if_getsoftc(ifp);
817 int status;
818
819 CTX_LOCK(ctx);
820 if (!CTX_IS_VF(ctx))
821 IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip);
822
823 iflib_stop(ctx);
824
825 /*
826 * Enable (or disable) netmap flags, and intercept (or restore)
827 * ifp->if_transmit. This is done once the device has been stopped
828 * to prevent race conditions. Also, this must be done after
829 * calling netmap_disable_all_rings() and before calling
830 * netmap_enable_all_rings(), so that these two functions see the
831 * updated state of the NAF_NETMAP_ON bit.
832 */
833 if (onoff) {
834 nm_set_native_flags(na);
835 } else {
836 nm_clear_native_flags(na);
837 }
838
839 iflib_init_locked(ctx);
840 IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ?
841 status = if_getdrvflags(ifp) & IFF_DRV_RUNNING ? 0 : 1;
842 if (status)
843 nm_clear_native_flags(na);
844 CTX_UNLOCK(ctx);
845 return (status);
846 }
847
848 static int
iflib_netmap_config(struct netmap_adapter * na,struct nm_config_info * info)849 iflib_netmap_config(struct netmap_adapter *na, struct nm_config_info *info)
850 {
851 if_t ifp = na->ifp;
852 if_ctx_t ctx = if_getsoftc(ifp);
853 iflib_rxq_t rxq = &ctx->ifc_rxqs[0];
854 iflib_fl_t fl = &rxq->ifr_fl[0];
855
856 info->num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets;
857 info->num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets;
858 info->num_tx_descs = iflib_num_tx_descs(ctx);
859 info->num_rx_descs = iflib_num_rx_descs(ctx);
860 info->rx_buf_maxsize = fl->ifl_buf_size;
861 nm_prinf("txr %u rxr %u txd %u rxd %u rbufsz %u",
862 info->num_tx_rings, info->num_rx_rings, info->num_tx_descs,
863 info->num_rx_descs, info->rx_buf_maxsize);
864
865 return (0);
866 }
867
868 static int
netmap_fl_refill(iflib_rxq_t rxq,struct netmap_kring * kring,bool init)869 netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, bool init)
870 {
871 struct netmap_adapter *na = kring->na;
872 u_int const lim = kring->nkr_num_slots - 1;
873 struct netmap_ring *ring = kring->ring;
874 bus_dmamap_t *map;
875 struct if_rxd_update iru;
876 if_ctx_t ctx = rxq->ifr_ctx;
877 iflib_fl_t fl = &rxq->ifr_fl[0];
878 u_int nic_i_first, nic_i;
879 u_int nm_i;
880 int i, n;
881 #if IFLIB_DEBUG_COUNTERS
882 int rf_count = 0;
883 #endif
884
885 /*
886 * This function is used both at initialization and in rxsync.
887 * At initialization we need to prepare (with isc_rxd_refill())
888 * all the netmap buffers currently owned by the kernel, in
889 * such a way to keep fl->ifl_pidx and kring->nr_hwcur in sync
890 * (except for kring->nkr_hwofs). These may be less than
891 * kring->nkr_num_slots if netmap_reset() was called while
892 * an application using the kring that still owned some
893 * buffers.
894 * At rxsync time, both indexes point to the next buffer to be
895 * refilled.
896 * In any case we publish (with isc_rxd_flush()) up to
897 * (fl->ifl_pidx - 1) % N (included), to avoid the NIC tail/prod
898 * pointer to overrun the head/cons pointer, although this is
899 * not necessary for some NICs (e.g. vmx).
900 */
901 if (__predict_false(init)) {
902 n = kring->nkr_num_slots - nm_kr_rxspace(kring);
903 } else {
904 n = kring->rhead - kring->nr_hwcur;
905 if (n == 0)
906 return (0); /* Nothing to do. */
907 if (n < 0)
908 n += kring->nkr_num_slots;
909 }
910
911 iru_init(&iru, rxq, 0 /* flid */);
912 map = fl->ifl_sds.ifsd_map;
913 nic_i = fl->ifl_pidx;
914 nm_i = netmap_idx_n2k(kring, nic_i);
915 if (__predict_false(init)) {
916 /*
917 * On init/reset, nic_i must be 0, and we must
918 * start to refill from hwtail (see netmap_reset()).
919 */
920 MPASS(nic_i == 0);
921 MPASS(nm_i == kring->nr_hwtail);
922 } else
923 MPASS(nm_i == kring->nr_hwcur);
924 DBG_COUNTER_INC(fl_refills);
925 while (n > 0) {
926 #if IFLIB_DEBUG_COUNTERS
927 if (++rf_count == 9)
928 DBG_COUNTER_INC(fl_refills_large);
929 #endif
930 nic_i_first = nic_i;
931 for (i = 0; n > 0 && i < IFLIB_MAX_RX_REFRESH; n--, i++) {
932 struct netmap_slot *slot = &ring->slot[nm_i];
933 uint64_t paddr;
934 void *addr = PNMB(na, slot, &paddr);
935
936 MPASS(i < IFLIB_MAX_RX_REFRESH);
937
938 if (addr == NETMAP_BUF_BASE(na)) /* bad buf */
939 return (netmap_ring_reinit(kring));
940
941 fl->ifl_bus_addrs[i] = paddr +
942 nm_get_offset(kring, slot);
943 fl->ifl_rxd_idxs[i] = nic_i;
944
945 if (__predict_false(init)) {
946 netmap_load_map(na, fl->ifl_buf_tag,
947 map[nic_i], addr);
948 } else if (slot->flags & NS_BUF_CHANGED) {
949 /* buffer has changed, reload map */
950 netmap_reload_map(na, fl->ifl_buf_tag,
951 map[nic_i], addr);
952 }
953 bus_dmamap_sync(fl->ifl_buf_tag, map[nic_i],
954 BUS_DMASYNC_PREREAD);
955 slot->flags &= ~NS_BUF_CHANGED;
956
957 nm_i = nm_next(nm_i, lim);
958 nic_i = nm_next(nic_i, lim);
959 }
960
961 iru.iru_pidx = nic_i_first;
962 iru.iru_count = i;
963 ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
964 }
965 fl->ifl_pidx = nic_i;
966 /*
967 * At the end of the loop we must have refilled everything
968 * we could possibly refill.
969 */
970 MPASS(nm_i == kring->rhead);
971 kring->nr_hwcur = nm_i;
972
973 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
974 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
975 ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id,
976 nm_prev(nic_i, lim));
977 DBG_COUNTER_INC(rxd_flush);
978
979 return (0);
980 }
981
982 #define NETMAP_TX_TIMER_US 90
983
984 /*
985 * Reconcile kernel and user view of the transmit ring.
986 *
987 * All information is in the kring.
988 * Userspace wants to send packets up to the one before kring->rhead,
989 * kernel knows kring->nr_hwcur is the first unsent packet.
990 *
991 * Here we push packets out (as many as possible), and possibly
992 * reclaim buffers from previously completed transmission.
993 *
994 * The caller (netmap) guarantees that there is only one instance
995 * running at any time. Any interference with other driver
996 * methods should be handled by the individual drivers.
997 */
998 static int
iflib_netmap_txsync(struct netmap_kring * kring,int flags)999 iflib_netmap_txsync(struct netmap_kring *kring, int flags)
1000 {
1001 struct netmap_adapter *na = kring->na;
1002 if_t ifp = na->ifp;
1003 struct netmap_ring *ring = kring->ring;
1004 u_int nm_i; /* index into the netmap kring */
1005 u_int nic_i; /* index into the NIC ring */
1006 u_int const lim = kring->nkr_num_slots - 1;
1007 u_int const head = kring->rhead;
1008 struct if_pkt_info pi;
1009 int tx_pkts = 0, tx_bytes = 0;
1010
1011 /*
1012 * interrupts on every tx packet are expensive so request
1013 * them every half ring, or where NS_REPORT is set
1014 */
1015 u_int report_frequency = kring->nkr_num_slots >> 1;
1016 /* device-specific */
1017 if_ctx_t ctx = if_getsoftc(ifp);
1018 iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id];
1019
1020 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
1021 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1022
1023 /*
1024 * First part: process new packets to send.
1025 * nm_i is the current index in the netmap kring,
1026 * nic_i is the corresponding index in the NIC ring.
1027 *
1028 * If we have packets to send (nm_i != head)
1029 * iterate over the netmap ring, fetch length and update
1030 * the corresponding slot in the NIC ring. Some drivers also
1031 * need to update the buffer's physical address in the NIC slot
1032 * even NS_BUF_CHANGED is not set (PNMB computes the addresses).
1033 *
1034 * The netmap_reload_map() calls is especially expensive,
1035 * even when (as in this case) the tag is 0, so do only
1036 * when the buffer has actually changed.
1037 *
1038 * If possible do not set the report/intr bit on all slots,
1039 * but only a few times per ring or when NS_REPORT is set.
1040 *
1041 * Finally, on 10G and faster drivers, it might be useful
1042 * to prefetch the next slot and txr entry.
1043 */
1044
1045 nm_i = kring->nr_hwcur;
1046 if (nm_i != head) { /* we have new packets to send */
1047 uint32_t pkt_len = 0, seg_idx = 0;
1048 int nic_i_start = -1, flags = 0;
1049 pkt_info_zero(&pi);
1050 pi.ipi_segs = txq->ift_segs;
1051 pi.ipi_qsidx = kring->ring_id;
1052 nic_i = netmap_idx_k2n(kring, nm_i);
1053
1054 __builtin_prefetch(&ring->slot[nm_i]);
1055 __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]);
1056 __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]);
1057
1058 while (nm_i != head) {
1059 struct netmap_slot *slot = &ring->slot[nm_i];
1060 uint64_t offset = nm_get_offset(kring, slot);
1061 u_int len = slot->len;
1062 uint64_t paddr;
1063 void *addr = PNMB(na, slot, &paddr);
1064
1065 flags |= (slot->flags & NS_REPORT ||
1066 nic_i == 0 || nic_i == report_frequency) ?
1067 IPI_TX_INTR : 0;
1068
1069 /*
1070 * If this is the first packet fragment, save the
1071 * index of the first NIC slot for later.
1072 */
1073 if (nic_i_start < 0)
1074 nic_i_start = nic_i;
1075
1076 pi.ipi_segs[seg_idx].ds_addr = paddr + offset;
1077 pi.ipi_segs[seg_idx].ds_len = len;
1078 if (len) {
1079 pkt_len += len;
1080 seg_idx++;
1081 }
1082
1083 if (!(slot->flags & NS_MOREFRAG)) {
1084 pi.ipi_len = pkt_len;
1085 pi.ipi_nsegs = seg_idx;
1086 pi.ipi_pidx = nic_i_start;
1087 pi.ipi_ndescs = 0;
1088 pi.ipi_flags = flags;
1089
1090 /* Prepare the NIC TX ring. */
1091 ctx->isc_txd_encap(ctx->ifc_softc, &pi);
1092 DBG_COUNTER_INC(tx_encap);
1093
1094 /* Update transmit counters */
1095 tx_bytes += pi.ipi_len;
1096 tx_pkts++;
1097
1098 /* Reinit per-packet info for the next one. */
1099 flags = seg_idx = pkt_len = 0;
1100 nic_i_start = -1;
1101 }
1102
1103 /* prefetch for next round */
1104 __builtin_prefetch(&ring->slot[nm_i + 1]);
1105 __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i + 1]);
1106 __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]);
1107
1108 NM_CHECK_ADDR_LEN_OFF(na, len, offset);
1109
1110 if (slot->flags & NS_BUF_CHANGED) {
1111 /* buffer has changed, reload map */
1112 netmap_reload_map(na, txq->ift_buf_tag,
1113 txq->ift_sds.ifsd_map[nic_i], addr);
1114 }
1115 /* make sure changes to the buffer are synced */
1116 bus_dmamap_sync(txq->ift_buf_tag,
1117 txq->ift_sds.ifsd_map[nic_i],
1118 BUS_DMASYNC_PREWRITE);
1119
1120 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED | NS_MOREFRAG);
1121 nm_i = nm_next(nm_i, lim);
1122 nic_i = nm_next(nic_i, lim);
1123 }
1124 kring->nr_hwcur = nm_i;
1125
1126 /* synchronize the NIC ring */
1127 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
1128 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1129
1130 /* (re)start the tx unit up to slot nic_i (excluded) */
1131 ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, nic_i);
1132 }
1133
1134 /*
1135 * Second part: reclaim buffers for completed transmissions.
1136 *
1137 * If there are unclaimed buffers, attempt to reclaim them.
1138 * If we don't manage to reclaim them all, and TX IRQs are not in use,
1139 * trigger a per-tx-queue timer to try again later.
1140 */
1141 if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) {
1142 if (iflib_tx_credits_update(ctx, txq)) {
1143 /* some tx completed, increment avail */
1144 nic_i = txq->ift_cidx_processed;
1145 kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim);
1146 }
1147 }
1148
1149 if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ))
1150 if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) {
1151 callout_reset_sbt_on(&txq->ift_netmap_timer,
1152 NETMAP_TX_TIMER_US * SBT_1US, SBT_1US,
1153 iflib_netmap_timer, txq,
1154 txq->ift_netmap_timer.c_cpu, 0);
1155 }
1156
1157 if_inc_counter(ifp, IFCOUNTER_OBYTES, tx_bytes);
1158 if_inc_counter(ifp, IFCOUNTER_OPACKETS, tx_pkts);
1159
1160 return (0);
1161 }
1162
1163 /*
1164 * Reconcile kernel and user view of the receive ring.
1165 * Same as for the txsync, this routine must be efficient.
1166 * The caller guarantees a single invocations, but races against
1167 * the rest of the driver should be handled here.
1168 *
1169 * On call, kring->rhead is the first packet that userspace wants
1170 * to keep, and kring->rcur is the wakeup point.
1171 * The kernel has previously reported packets up to kring->rtail.
1172 *
1173 * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective
1174 * of whether or not we received an interrupt.
1175 */
1176 static int
iflib_netmap_rxsync(struct netmap_kring * kring,int flags)1177 iflib_netmap_rxsync(struct netmap_kring *kring, int flags)
1178 {
1179 struct netmap_adapter *na = kring->na;
1180 struct netmap_ring *ring = kring->ring;
1181 if_t ifp = na->ifp;
1182 uint32_t nm_i; /* index into the netmap ring */
1183 uint32_t nic_i; /* index into the NIC ring */
1184 u_int n;
1185 u_int const lim = kring->nkr_num_slots - 1;
1186 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
1187 int i = 0, rx_bytes = 0, rx_pkts = 0;
1188
1189 if_ctx_t ctx = if_getsoftc(ifp);
1190 if_shared_ctx_t sctx = ctx->ifc_sctx;
1191 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
1192 iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id];
1193 iflib_fl_t fl = &rxq->ifr_fl[0];
1194 struct if_rxd_info ri;
1195 qidx_t *cidxp;
1196
1197 /*
1198 * netmap only uses free list 0, to avoid out of order consumption
1199 * of receive buffers
1200 */
1201
1202 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
1203 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1204
1205 /*
1206 * First part: import newly received packets.
1207 *
1208 * nm_i is the index of the next free slot in the netmap ring,
1209 * nic_i is the index of the next received packet in the NIC ring
1210 * (or in the free list 0 if IFLIB_HAS_RXCQ is set), and they may
1211 * differ in case if_init() has been called while
1212 * in netmap mode. For the receive ring we have
1213 *
1214 * nic_i = fl->ifl_cidx;
1215 * nm_i = kring->nr_hwtail (previous)
1216 * and
1217 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size
1218 *
1219 * fl->ifl_cidx is set to 0 on a ring reinit
1220 */
1221 if (netmap_no_pendintr || force_update) {
1222 uint32_t hwtail_lim = nm_prev(kring->nr_hwcur, lim);
1223 bool have_rxcq = sctx->isc_flags & IFLIB_HAS_RXCQ;
1224 int crclen = iflib_crcstrip ? 0 : 4;
1225 int error, avail;
1226
1227 /*
1228 * For the free list consumer index, we use the same
1229 * logic as in iflib_rxeof().
1230 */
1231 if (have_rxcq)
1232 cidxp = &rxq->ifr_cq_cidx;
1233 else
1234 cidxp = &fl->ifl_cidx;
1235 avail = ctx->isc_rxd_available(ctx->ifc_softc,
1236 rxq->ifr_id, *cidxp, USHRT_MAX);
1237
1238 nic_i = fl->ifl_cidx;
1239 nm_i = netmap_idx_n2k(kring, nic_i);
1240 MPASS(nm_i == kring->nr_hwtail);
1241 for (n = 0; avail > 0 && nm_i != hwtail_lim; n++, avail--) {
1242 rxd_info_zero(&ri);
1243 ri.iri_frags = rxq->ifr_frags;
1244 ri.iri_qsidx = kring->ring_id;
1245 ri.iri_ifp = ctx->ifc_ifp;
1246 ri.iri_cidx = *cidxp;
1247
1248 error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
1249 for (i = 0; i < ri.iri_nfrags; i++) {
1250 if (error) {
1251 ring->slot[nm_i].len = 0;
1252 ring->slot[nm_i].flags = 0;
1253 } else {
1254 ring->slot[nm_i].len = ri.iri_frags[i].irf_len;
1255 if (i == (ri.iri_nfrags - 1)) {
1256 ring->slot[nm_i].len -= crclen;
1257 ring->slot[nm_i].flags = 0;
1258
1259 /* Update receive counters */
1260 rx_bytes += ri.iri_len;
1261 rx_pkts++;
1262 } else
1263 ring->slot[nm_i].flags = NS_MOREFRAG;
1264 }
1265
1266 bus_dmamap_sync(fl->ifl_buf_tag,
1267 fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD);
1268 nm_i = nm_next(nm_i, lim);
1269 fl->ifl_cidx = nic_i = nm_next(nic_i, lim);
1270 }
1271
1272 if (have_rxcq) {
1273 *cidxp = ri.iri_cidx;
1274 while (*cidxp >= scctx->isc_nrxd[0])
1275 *cidxp -= scctx->isc_nrxd[0];
1276 }
1277
1278 }
1279 if (n) { /* update the state variables */
1280 if (netmap_no_pendintr && !force_update) {
1281 /* diagnostics */
1282 iflib_rx_miss++;
1283 iflib_rx_miss_bufs += n;
1284 }
1285 kring->nr_hwtail = nm_i;
1286 }
1287 kring->nr_kflags &= ~NKR_PENDINTR;
1288 }
1289 /*
1290 * Second part: skip past packets that userspace has released.
1291 * (kring->nr_hwcur to head excluded),
1292 * and make the buffers available for reception.
1293 * As usual nm_i is the index in the netmap ring,
1294 * nic_i is the index in the NIC ring, and
1295 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size
1296 */
1297 netmap_fl_refill(rxq, kring, false);
1298
1299 if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes);
1300 if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts);
1301
1302 return (0);
1303 }
1304
1305 static void
iflib_netmap_intr(struct netmap_adapter * na,int onoff)1306 iflib_netmap_intr(struct netmap_adapter *na, int onoff)
1307 {
1308 if_ctx_t ctx = if_getsoftc(na->ifp);
1309
1310 CTX_LOCK(ctx);
1311 if (onoff) {
1312 IFDI_INTR_ENABLE(ctx);
1313 } else {
1314 IFDI_INTR_DISABLE(ctx);
1315 }
1316 CTX_UNLOCK(ctx);
1317 }
1318
1319 static int
iflib_netmap_attach(if_ctx_t ctx)1320 iflib_netmap_attach(if_ctx_t ctx)
1321 {
1322 struct netmap_adapter na;
1323
1324 bzero(&na, sizeof(na));
1325
1326 na.ifp = ctx->ifc_ifp;
1327 na.na_flags = NAF_BDG_MAYSLEEP | NAF_MOREFRAG | NAF_OFFSETS;
1328 MPASS(ctx->ifc_softc_ctx.isc_ntxqsets);
1329 MPASS(ctx->ifc_softc_ctx.isc_nrxqsets);
1330
1331 na.num_tx_desc = iflib_num_tx_descs(ctx);
1332 na.num_rx_desc = iflib_num_rx_descs(ctx);
1333 na.nm_txsync = iflib_netmap_txsync;
1334 na.nm_rxsync = iflib_netmap_rxsync;
1335 na.nm_register = iflib_netmap_register;
1336 na.nm_intr = iflib_netmap_intr;
1337 na.nm_config = iflib_netmap_config;
1338 na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets;
1339 na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets;
1340 return (netmap_attach(&na));
1341 }
1342
1343 static int
iflib_netmap_txq_init(if_ctx_t ctx,iflib_txq_t txq)1344 iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq)
1345 {
1346 struct netmap_adapter *na = NA(ctx->ifc_ifp);
1347 struct netmap_slot *slot;
1348
1349 slot = netmap_reset(na, NR_TX, txq->ift_id, 0);
1350 if (slot == NULL)
1351 return (0);
1352 for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) {
1353 /*
1354 * In netmap mode, set the map for the packet buffer.
1355 * NOTE: Some drivers (not this one) also need to set
1356 * the physical buffer address in the NIC ring.
1357 * netmap_idx_n2k() maps a nic index, i, into the corresponding
1358 * netmap slot index, si
1359 */
1360 int si = netmap_idx_n2k(na->tx_rings[txq->ift_id], i);
1361 netmap_load_map(na, txq->ift_buf_tag, txq->ift_sds.ifsd_map[i],
1362 NMB(na, slot + si));
1363 }
1364 return (1);
1365 }
1366
1367 static int
iflib_netmap_rxq_init(if_ctx_t ctx,iflib_rxq_t rxq)1368 iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq)
1369 {
1370 struct netmap_adapter *na = NA(ctx->ifc_ifp);
1371 struct netmap_kring *kring;
1372 struct netmap_slot *slot;
1373
1374 slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0);
1375 if (slot == NULL)
1376 return (0);
1377 kring = na->rx_rings[rxq->ifr_id];
1378 netmap_fl_refill(rxq, kring, true);
1379 return (1);
1380 }
1381
1382 static void
iflib_netmap_timer(void * arg)1383 iflib_netmap_timer(void *arg)
1384 {
1385 iflib_txq_t txq = arg;
1386 if_ctx_t ctx = txq->ift_ctx;
1387
1388 /*
1389 * Wake up the netmap application, to give it a chance to
1390 * call txsync and reclaim more completed TX buffers.
1391 */
1392 netmap_tx_irq(ctx->ifc_ifp, txq->ift_id);
1393 }
1394
1395 #define iflib_netmap_detach(ifp) netmap_detach(ifp)
1396
1397 #else
1398 #define iflib_netmap_txq_init(ctx, txq) (0)
1399 #define iflib_netmap_rxq_init(ctx, rxq) (0)
1400 #define iflib_netmap_detach(ifp)
1401 #define netmap_enable_all_rings(ifp)
1402 #define netmap_disable_all_rings(ifp)
1403
1404 #define iflib_netmap_attach(ctx) (0)
1405 #define netmap_rx_irq(ifp, qid, budget) (0)
1406 #endif
1407
1408 #if defined(__i386__) || defined(__amd64__)
1409 static __inline void
prefetch(void * x)1410 prefetch(void *x)
1411 {
1412 __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
1413 }
1414
1415 static __inline void
prefetch2cachelines(void * x)1416 prefetch2cachelines(void *x)
1417 {
1418 __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
1419 #if (CACHE_LINE_SIZE < 128)
1420 __asm volatile("prefetcht0 %0" :: "m" (*(((unsigned long *)x) + CACHE_LINE_SIZE / (sizeof(unsigned long)))));
1421 #endif
1422 }
1423 #else
1424 static __inline void
prefetch(void * x)1425 prefetch(void *x)
1426 {
1427 }
1428
1429 static __inline void
prefetch2cachelines(void * x)1430 prefetch2cachelines(void *x)
1431 {
1432 }
1433 #endif
1434
1435 static void
iru_init(if_rxd_update_t iru,iflib_rxq_t rxq,uint8_t flid)1436 iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid)
1437 {
1438 iflib_fl_t fl;
1439
1440 fl = &rxq->ifr_fl[flid];
1441 iru->iru_paddrs = fl->ifl_bus_addrs;
1442 iru->iru_idxs = fl->ifl_rxd_idxs;
1443 iru->iru_qsidx = rxq->ifr_id;
1444 iru->iru_buf_size = fl->ifl_buf_size;
1445 iru->iru_flidx = fl->ifl_id;
1446 }
1447
1448 static void
_iflib_dmamap_cb(void * arg,bus_dma_segment_t * segs,int nseg,int err)1449 _iflib_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
1450 {
1451 if (err)
1452 return;
1453 *(bus_addr_t *) arg = segs[0].ds_addr;
1454 }
1455
1456 #define DMA_WIDTH_TO_BUS_LOWADDR(width) \
1457 (((width) == 0) || (width) == flsll(BUS_SPACE_MAXADDR) ? \
1458 BUS_SPACE_MAXADDR : (1ULL << (width)) - 1ULL)
1459
1460 int
iflib_dma_alloc_align(if_ctx_t ctx,int size,int align,iflib_dma_info_t dma,int mapflags)1461 iflib_dma_alloc_align(if_ctx_t ctx, int size, int align, iflib_dma_info_t dma, int mapflags)
1462 {
1463 int err;
1464 device_t dev = ctx->ifc_dev;
1465 bus_addr_t lowaddr;
1466
1467 lowaddr = DMA_WIDTH_TO_BUS_LOWADDR(ctx->ifc_softc_ctx.isc_dma_width);
1468
1469 err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1470 align, 0, /* alignment, bounds */
1471 lowaddr, /* lowaddr */
1472 BUS_SPACE_MAXADDR, /* highaddr */
1473 NULL, NULL, /* filter, filterarg */
1474 size, /* maxsize */
1475 1, /* nsegments */
1476 size, /* maxsegsize */
1477 BUS_DMA_ALLOCNOW, /* flags */
1478 NULL, /* lockfunc */
1479 NULL, /* lockarg */
1480 &dma->idi_tag);
1481 if (err) {
1482 device_printf(dev,
1483 "%s: bus_dma_tag_create failed: %d (size=%d, align=%d)\n",
1484 __func__, err, size, align);
1485 goto fail_0;
1486 }
1487
1488 err = bus_dmamem_alloc(dma->idi_tag, (void **)&dma->idi_vaddr,
1489 BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->idi_map);
1490 if (err) {
1491 device_printf(dev,
1492 "%s: bus_dmamem_alloc(%ju) failed: %d\n",
1493 __func__, (uintmax_t)size, err);
1494 goto fail_1;
1495 }
1496
1497 dma->idi_paddr = IF_BAD_DMA;
1498 err = bus_dmamap_load(dma->idi_tag, dma->idi_map, dma->idi_vaddr,
1499 size, _iflib_dmamap_cb, &dma->idi_paddr, mapflags | BUS_DMA_NOWAIT);
1500 if (err || dma->idi_paddr == IF_BAD_DMA) {
1501 device_printf(dev,
1502 "%s: bus_dmamap_load failed: %d\n",
1503 __func__, err);
1504 goto fail_2;
1505 }
1506
1507 dma->idi_size = size;
1508 return (0);
1509
1510 fail_2:
1511 bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map);
1512 fail_1:
1513 bus_dma_tag_destroy(dma->idi_tag);
1514 fail_0:
1515 dma->idi_tag = NULL;
1516
1517 return (err);
1518 }
1519
1520 int
iflib_dma_alloc(if_ctx_t ctx,int size,iflib_dma_info_t dma,int mapflags)1521 iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags)
1522 {
1523 if_shared_ctx_t sctx = ctx->ifc_sctx;
1524
1525 KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized"));
1526
1527 return (iflib_dma_alloc_align(ctx, size, sctx->isc_q_align, dma, mapflags));
1528 }
1529
1530 int
iflib_dma_alloc_multi(if_ctx_t ctx,int * sizes,iflib_dma_info_t * dmalist,int mapflags,int count)1531 iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, int mapflags, int count)
1532 {
1533 int i, err;
1534 iflib_dma_info_t *dmaiter;
1535
1536 dmaiter = dmalist;
1537 for (i = 0; i < count; i++, dmaiter++) {
1538 if ((err = iflib_dma_alloc(ctx, sizes[i], *dmaiter, mapflags)) != 0)
1539 break;
1540 }
1541 if (err)
1542 iflib_dma_free_multi(dmalist, i);
1543 return (err);
1544 }
1545
1546 void
iflib_dma_free(iflib_dma_info_t dma)1547 iflib_dma_free(iflib_dma_info_t dma)
1548 {
1549 if (dma->idi_tag == NULL)
1550 return;
1551 if (dma->idi_paddr != IF_BAD_DMA) {
1552 bus_dmamap_sync(dma->idi_tag, dma->idi_map,
1553 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1554 bus_dmamap_unload(dma->idi_tag, dma->idi_map);
1555 dma->idi_paddr = IF_BAD_DMA;
1556 }
1557 if (dma->idi_vaddr != NULL) {
1558 bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map);
1559 dma->idi_vaddr = NULL;
1560 }
1561 bus_dma_tag_destroy(dma->idi_tag);
1562 dma->idi_tag = NULL;
1563 }
1564
1565 void
iflib_dma_free_multi(iflib_dma_info_t * dmalist,int count)1566 iflib_dma_free_multi(iflib_dma_info_t *dmalist, int count)
1567 {
1568 int i;
1569 iflib_dma_info_t *dmaiter = dmalist;
1570
1571 for (i = 0; i < count; i++, dmaiter++)
1572 iflib_dma_free(*dmaiter);
1573 }
1574
1575 static int
iflib_fast_intr(void * arg)1576 iflib_fast_intr(void *arg)
1577 {
1578 iflib_filter_info_t info = arg;
1579 struct grouptask *gtask = info->ifi_task;
1580 int result;
1581
1582 DBG_COUNTER_INC(fast_intrs);
1583 if (info->ifi_filter != NULL) {
1584 result = info->ifi_filter(info->ifi_filter_arg);
1585 if ((result & FILTER_SCHEDULE_THREAD) == 0)
1586 return (result);
1587 }
1588
1589 GROUPTASK_ENQUEUE(gtask);
1590 return (FILTER_HANDLED);
1591 }
1592
1593 static int
iflib_fast_intr_rxtx(void * arg)1594 iflib_fast_intr_rxtx(void *arg)
1595 {
1596 iflib_filter_info_t info = arg;
1597 struct grouptask *gtask = info->ifi_task;
1598 if_ctx_t ctx;
1599 iflib_rxq_t rxq = (iflib_rxq_t)info->ifi_ctx;
1600 iflib_txq_t txq;
1601 void *sc;
1602 int i, cidx, result;
1603 qidx_t txqid;
1604 bool intr_enable, intr_legacy;
1605
1606 DBG_COUNTER_INC(fast_intrs);
1607 if (info->ifi_filter != NULL) {
1608 result = info->ifi_filter(info->ifi_filter_arg);
1609 if ((result & FILTER_SCHEDULE_THREAD) == 0)
1610 return (result);
1611 }
1612
1613 ctx = rxq->ifr_ctx;
1614 sc = ctx->ifc_softc;
1615 intr_enable = false;
1616 intr_legacy = !!(ctx->ifc_flags & IFC_LEGACY);
1617 MPASS(rxq->ifr_ntxqirq);
1618 for (i = 0; i < rxq->ifr_ntxqirq; i++) {
1619 txqid = rxq->ifr_txqid[i];
1620 txq = &ctx->ifc_txqs[txqid];
1621 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
1622 BUS_DMASYNC_POSTREAD);
1623 if (!ctx->isc_txd_credits_update(sc, txqid, false)) {
1624 if (intr_legacy)
1625 intr_enable = true;
1626 else
1627 IFDI_TX_QUEUE_INTR_ENABLE(ctx, txqid);
1628 continue;
1629 }
1630 GROUPTASK_ENQUEUE(&txq->ift_task);
1631 }
1632 if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_RXCQ)
1633 cidx = rxq->ifr_cq_cidx;
1634 else
1635 cidx = rxq->ifr_fl[0].ifl_cidx;
1636 if (iflib_rxd_avail(ctx, rxq, cidx, 1))
1637 GROUPTASK_ENQUEUE(gtask);
1638 else {
1639 if (intr_legacy)
1640 intr_enable = true;
1641 else
1642 IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
1643 DBG_COUNTER_INC(rx_intr_enables);
1644 }
1645 if (intr_enable)
1646 IFDI_INTR_ENABLE(ctx);
1647 return (FILTER_HANDLED);
1648 }
1649
1650 static int
iflib_fast_intr_ctx(void * arg)1651 iflib_fast_intr_ctx(void *arg)
1652 {
1653 iflib_filter_info_t info = arg;
1654 if_ctx_t ctx = info->ifi_ctx;
1655 int result;
1656
1657 DBG_COUNTER_INC(fast_intrs);
1658 if (info->ifi_filter != NULL) {
1659 result = info->ifi_filter(info->ifi_filter_arg);
1660 if ((result & FILTER_SCHEDULE_THREAD) == 0)
1661 return (result);
1662 }
1663
1664 taskqueue_enqueue(ctx->ifc_tq, &ctx->ifc_admin_task);
1665 return (FILTER_HANDLED);
1666 }
1667
1668 static int
_iflib_irq_alloc(if_ctx_t ctx,if_irq_t irq,int rid,driver_filter_t filter,driver_intr_t handler,void * arg,const char * name)1669 _iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
1670 driver_filter_t filter, driver_intr_t handler, void *arg,
1671 const char *name)
1672 {
1673 struct resource *res;
1674 void *tag = NULL;
1675 device_t dev = ctx->ifc_dev;
1676 int flags, i, rc;
1677
1678 flags = RF_ACTIVE;
1679 if (ctx->ifc_flags & IFC_LEGACY)
1680 flags |= RF_SHAREABLE;
1681 MPASS(rid < 512);
1682 i = rid;
1683 res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &i, flags);
1684 if (res == NULL) {
1685 device_printf(dev,
1686 "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
1687 return (ENOMEM);
1688 }
1689 irq->ii_res = res;
1690 KASSERT(filter == NULL || handler == NULL, ("filter and handler can't both be non-NULL"));
1691 rc = bus_setup_intr(dev, res, INTR_MPSAFE | INTR_TYPE_NET,
1692 filter, handler, arg, &tag);
1693 if (rc != 0) {
1694 device_printf(dev,
1695 "failed to setup interrupt for rid %d, name %s: %d\n",
1696 rid, name ? name : "unknown", rc);
1697 return (rc);
1698 } else if (name)
1699 bus_describe_intr(dev, res, tag, "%s", name);
1700
1701 irq->ii_tag = tag;
1702 return (0);
1703 }
1704
1705 /*********************************************************************
1706 *
1707 * Allocate DMA resources for TX buffers as well as memory for the TX
1708 * mbuf map. TX DMA maps (non-TSO/TSO) and TX mbuf map are kept in a
1709 * iflib_sw_tx_desc_array structure, storing all the information that
1710 * is needed to transmit a packet on the wire. This is called only
1711 * once at attach, setup is done every reset.
1712 *
1713 **********************************************************************/
1714 static int
iflib_txsd_alloc(iflib_txq_t txq)1715 iflib_txsd_alloc(iflib_txq_t txq)
1716 {
1717 if_ctx_t ctx = txq->ift_ctx;
1718 if_shared_ctx_t sctx = ctx->ifc_sctx;
1719 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
1720 device_t dev = ctx->ifc_dev;
1721 bus_size_t tsomaxsize;
1722 bus_addr_t lowaddr;
1723 int err, nsegments, ntsosegments;
1724 bool tso;
1725
1726 nsegments = scctx->isc_tx_nsegments;
1727 ntsosegments = scctx->isc_tx_tso_segments_max;
1728 tsomaxsize = scctx->isc_tx_tso_size_max;
1729 if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_VLAN_MTU)
1730 tsomaxsize += sizeof(struct ether_vlan_header);
1731 MPASS(scctx->isc_ntxd[0] > 0);
1732 MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0);
1733 MPASS(nsegments > 0);
1734 if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) {
1735 MPASS(ntsosegments > 0);
1736 MPASS(sctx->isc_tso_maxsize >= tsomaxsize);
1737 }
1738
1739 lowaddr = DMA_WIDTH_TO_BUS_LOWADDR(scctx->isc_dma_width);
1740
1741 /*
1742 * Set up DMA tags for TX buffers.
1743 */
1744 if ((err = bus_dma_tag_create(bus_get_dma_tag(dev),
1745 1, 0, /* alignment, bounds */
1746 lowaddr, /* lowaddr */
1747 BUS_SPACE_MAXADDR, /* highaddr */
1748 NULL, NULL, /* filter, filterarg */
1749 sctx->isc_tx_maxsize, /* maxsize */
1750 nsegments, /* nsegments */
1751 sctx->isc_tx_maxsegsize, /* maxsegsize */
1752 0, /* flags */
1753 NULL, /* lockfunc */
1754 NULL, /* lockfuncarg */
1755 &txq->ift_buf_tag))) {
1756 device_printf(dev, "Unable to allocate TX DMA tag: %d\n", err);
1757 device_printf(dev, "maxsize: %ju nsegments: %d maxsegsize: %ju\n",
1758 (uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize);
1759 goto fail;
1760 }
1761 tso = (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) != 0;
1762 if (tso && (err = bus_dma_tag_create(bus_get_dma_tag(dev),
1763 1, 0, /* alignment, bounds */
1764 lowaddr, /* lowaddr */
1765 BUS_SPACE_MAXADDR, /* highaddr */
1766 NULL, NULL, /* filter, filterarg */
1767 tsomaxsize, /* maxsize */
1768 ntsosegments, /* nsegments */
1769 sctx->isc_tso_maxsegsize, /* maxsegsize */
1770 0, /* flags */
1771 NULL, /* lockfunc */
1772 NULL, /* lockfuncarg */
1773 &txq->ift_tso_buf_tag))) {
1774 device_printf(dev, "Unable to allocate TSO TX DMA tag: %d\n",
1775 err);
1776 goto fail;
1777 }
1778
1779 /* Allocate memory for the TX mbuf map. */
1780 if (!(txq->ift_sds.ifsd_m =
1781 (struct mbuf **) malloc(sizeof(struct mbuf *) *
1782 scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1783 device_printf(dev, "Unable to allocate TX mbuf map memory\n");
1784 err = ENOMEM;
1785 goto fail;
1786 }
1787
1788 /*
1789 * Create the DMA maps for TX buffers.
1790 */
1791 if ((txq->ift_sds.ifsd_map = (bus_dmamap_t *)malloc(
1792 sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset],
1793 M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
1794 device_printf(dev,
1795 "Unable to allocate TX buffer DMA map memory\n");
1796 err = ENOMEM;
1797 goto fail;
1798 }
1799 if (tso && (txq->ift_sds.ifsd_tso_map = (bus_dmamap_t *)malloc(
1800 sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset],
1801 M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
1802 device_printf(dev,
1803 "Unable to allocate TSO TX buffer map memory\n");
1804 err = ENOMEM;
1805 goto fail;
1806 }
1807 for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) {
1808 err = bus_dmamap_create(txq->ift_buf_tag, 0,
1809 &txq->ift_sds.ifsd_map[i]);
1810 if (err != 0) {
1811 device_printf(dev, "Unable to create TX DMA map\n");
1812 goto fail;
1813 }
1814 if (!tso)
1815 continue;
1816 err = bus_dmamap_create(txq->ift_tso_buf_tag, 0,
1817 &txq->ift_sds.ifsd_tso_map[i]);
1818 if (err != 0) {
1819 device_printf(dev, "Unable to create TSO TX DMA map\n");
1820 goto fail;
1821 }
1822 }
1823 return (0);
1824 fail:
1825 /* We free all, it handles case where we are in the middle */
1826 iflib_tx_structures_free(ctx);
1827 return (err);
1828 }
1829
1830 static void
iflib_txsd_destroy(if_ctx_t ctx,iflib_txq_t txq,int i)1831 iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i)
1832 {
1833 bus_dmamap_t map;
1834
1835 if (txq->ift_sds.ifsd_map != NULL) {
1836 map = txq->ift_sds.ifsd_map[i];
1837 bus_dmamap_sync(txq->ift_buf_tag, map, BUS_DMASYNC_POSTWRITE);
1838 bus_dmamap_unload(txq->ift_buf_tag, map);
1839 bus_dmamap_destroy(txq->ift_buf_tag, map);
1840 txq->ift_sds.ifsd_map[i] = NULL;
1841 }
1842
1843 if (txq->ift_sds.ifsd_tso_map != NULL) {
1844 map = txq->ift_sds.ifsd_tso_map[i];
1845 bus_dmamap_sync(txq->ift_tso_buf_tag, map,
1846 BUS_DMASYNC_POSTWRITE);
1847 bus_dmamap_unload(txq->ift_tso_buf_tag, map);
1848 bus_dmamap_destroy(txq->ift_tso_buf_tag, map);
1849 txq->ift_sds.ifsd_tso_map[i] = NULL;
1850 }
1851 }
1852
1853 static void
iflib_txq_destroy(iflib_txq_t txq)1854 iflib_txq_destroy(iflib_txq_t txq)
1855 {
1856 if_ctx_t ctx = txq->ift_ctx;
1857
1858 for (int i = 0; i < txq->ift_size; i++)
1859 iflib_txsd_destroy(ctx, txq, i);
1860
1861 if (txq->ift_br != NULL) {
1862 ifmp_ring_free(txq->ift_br);
1863 txq->ift_br = NULL;
1864 }
1865
1866 mtx_destroy(&txq->ift_mtx);
1867
1868 if (txq->ift_sds.ifsd_map != NULL) {
1869 free(txq->ift_sds.ifsd_map, M_IFLIB);
1870 txq->ift_sds.ifsd_map = NULL;
1871 }
1872 if (txq->ift_sds.ifsd_tso_map != NULL) {
1873 free(txq->ift_sds.ifsd_tso_map, M_IFLIB);
1874 txq->ift_sds.ifsd_tso_map = NULL;
1875 }
1876 if (txq->ift_sds.ifsd_m != NULL) {
1877 free(txq->ift_sds.ifsd_m, M_IFLIB);
1878 txq->ift_sds.ifsd_m = NULL;
1879 }
1880 if (txq->ift_buf_tag != NULL) {
1881 bus_dma_tag_destroy(txq->ift_buf_tag);
1882 txq->ift_buf_tag = NULL;
1883 }
1884 if (txq->ift_tso_buf_tag != NULL) {
1885 bus_dma_tag_destroy(txq->ift_tso_buf_tag);
1886 txq->ift_tso_buf_tag = NULL;
1887 }
1888 if (txq->ift_ifdi != NULL) {
1889 free(txq->ift_ifdi, M_IFLIB);
1890 }
1891 }
1892
1893 static void
iflib_txsd_free(if_ctx_t ctx,iflib_txq_t txq,int i)1894 iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i)
1895 {
1896 struct mbuf **mp;
1897
1898 mp = &txq->ift_sds.ifsd_m[i];
1899 if (*mp == NULL)
1900 return;
1901
1902 if (txq->ift_sds.ifsd_map != NULL) {
1903 bus_dmamap_sync(txq->ift_buf_tag,
1904 txq->ift_sds.ifsd_map[i], BUS_DMASYNC_POSTWRITE);
1905 bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[i]);
1906 }
1907 if (txq->ift_sds.ifsd_tso_map != NULL) {
1908 bus_dmamap_sync(txq->ift_tso_buf_tag,
1909 txq->ift_sds.ifsd_tso_map[i], BUS_DMASYNC_POSTWRITE);
1910 bus_dmamap_unload(txq->ift_tso_buf_tag,
1911 txq->ift_sds.ifsd_tso_map[i]);
1912 }
1913 m_freem(*mp);
1914 DBG_COUNTER_INC(tx_frees);
1915 *mp = NULL;
1916 }
1917
1918 static int
iflib_txq_setup(iflib_txq_t txq)1919 iflib_txq_setup(iflib_txq_t txq)
1920 {
1921 if_ctx_t ctx = txq->ift_ctx;
1922 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
1923 if_shared_ctx_t sctx = ctx->ifc_sctx;
1924 iflib_dma_info_t di;
1925 int i;
1926
1927 /* Set number of descriptors available */
1928 txq->ift_qstatus = IFLIB_QUEUE_IDLE;
1929 /* XXX make configurable */
1930 txq->ift_update_freq = IFLIB_DEFAULT_TX_UPDATE_FREQ;
1931
1932 /* Reset indices */
1933 txq->ift_cidx_processed = 0;
1934 txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0;
1935 txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset];
1936
1937 for (i = 0, di = txq->ift_ifdi; i < sctx->isc_ntxqs; i++, di++)
1938 bzero((void *)di->idi_vaddr, di->idi_size);
1939
1940 IFDI_TXQ_SETUP(ctx, txq->ift_id);
1941 for (i = 0, di = txq->ift_ifdi; i < sctx->isc_ntxqs; i++, di++)
1942 bus_dmamap_sync(di->idi_tag, di->idi_map,
1943 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1944 return (0);
1945 }
1946
1947 /*********************************************************************
1948 *
1949 * Allocate DMA resources for RX buffers as well as memory for the RX
1950 * mbuf map, direct RX cluster pointer map and RX cluster bus address
1951 * map. RX DMA map, RX mbuf map, direct RX cluster pointer map and
1952 * RX cluster map are kept in a iflib_sw_rx_desc_array structure.
1953 * Since we use use one entry in iflib_sw_rx_desc_array per received
1954 * packet, the maximum number of entries we'll need is equal to the
1955 * number of hardware receive descriptors that we've allocated.
1956 *
1957 **********************************************************************/
1958 static int
iflib_rxsd_alloc(iflib_rxq_t rxq)1959 iflib_rxsd_alloc(iflib_rxq_t rxq)
1960 {
1961 if_ctx_t ctx = rxq->ifr_ctx;
1962 if_shared_ctx_t sctx = ctx->ifc_sctx;
1963 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
1964 device_t dev = ctx->ifc_dev;
1965 iflib_fl_t fl;
1966 bus_addr_t lowaddr;
1967 int err;
1968
1969 MPASS(scctx->isc_nrxd[0] > 0);
1970 MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0);
1971
1972 lowaddr = DMA_WIDTH_TO_BUS_LOWADDR(scctx->isc_dma_width);
1973
1974 fl = rxq->ifr_fl;
1975 for (int i = 0; i < rxq->ifr_nfl; i++, fl++) {
1976 fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */
1977 /* Set up DMA tag for RX buffers. */
1978 err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1979 1, 0, /* alignment, bounds */
1980 lowaddr, /* lowaddr */
1981 BUS_SPACE_MAXADDR, /* highaddr */
1982 NULL, NULL, /* filter, filterarg */
1983 sctx->isc_rx_maxsize, /* maxsize */
1984 sctx->isc_rx_nsegments, /* nsegments */
1985 sctx->isc_rx_maxsegsize, /* maxsegsize */
1986 0, /* flags */
1987 NULL, /* lockfunc */
1988 NULL, /* lockarg */
1989 &fl->ifl_buf_tag);
1990 if (err) {
1991 device_printf(dev,
1992 "Unable to allocate RX DMA tag: %d\n", err);
1993 goto fail;
1994 }
1995
1996 /* Allocate memory for the RX mbuf map. */
1997 if (!(fl->ifl_sds.ifsd_m =
1998 (struct mbuf **) malloc(sizeof(struct mbuf *) *
1999 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
2000 device_printf(dev,
2001 "Unable to allocate RX mbuf map memory\n");
2002 err = ENOMEM;
2003 goto fail;
2004 }
2005
2006 /* Allocate memory for the direct RX cluster pointer map. */
2007 if (!(fl->ifl_sds.ifsd_cl =
2008 (caddr_t *) malloc(sizeof(caddr_t) *
2009 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
2010 device_printf(dev,
2011 "Unable to allocate RX cluster map memory\n");
2012 err = ENOMEM;
2013 goto fail;
2014 }
2015
2016 /* Allocate memory for the RX cluster bus address map. */
2017 if (!(fl->ifl_sds.ifsd_ba =
2018 (bus_addr_t *) malloc(sizeof(bus_addr_t) *
2019 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
2020 device_printf(dev,
2021 "Unable to allocate RX bus address map memory\n");
2022 err = ENOMEM;
2023 goto fail;
2024 }
2025
2026 /*
2027 * Create the DMA maps for RX buffers.
2028 */
2029 if (!(fl->ifl_sds.ifsd_map =
2030 (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
2031 device_printf(dev,
2032 "Unable to allocate RX buffer DMA map memory\n");
2033 err = ENOMEM;
2034 goto fail;
2035 }
2036 for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) {
2037 err = bus_dmamap_create(fl->ifl_buf_tag, 0,
2038 &fl->ifl_sds.ifsd_map[i]);
2039 if (err != 0) {
2040 device_printf(dev, "Unable to create RX buffer DMA map\n");
2041 goto fail;
2042 }
2043 }
2044 }
2045 return (0);
2046
2047 fail:
2048 iflib_rx_structures_free(ctx);
2049 return (err);
2050 }
2051
2052 /*
2053 * Internal service routines
2054 */
2055
2056 struct rxq_refill_cb_arg {
2057 int error;
2058 bus_dma_segment_t seg;
2059 int nseg;
2060 };
2061
2062 static void
_rxq_refill_cb(void * arg,bus_dma_segment_t * segs,int nseg,int error)2063 _rxq_refill_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2064 {
2065 struct rxq_refill_cb_arg *cb_arg = arg;
2066
2067 cb_arg->error = error;
2068 cb_arg->seg = segs[0];
2069 cb_arg->nseg = nseg;
2070 }
2071
2072 /**
2073 * iflib_fl_refill - refill an rxq free-buffer list
2074 * @ctx: the iflib context
2075 * @fl: the free list to refill
2076 * @count: the number of new buffers to allocate
2077 *
2078 * (Re)populate an rxq free-buffer list with up to @count new packet buffers.
2079 * The caller must assure that @count does not exceed the queue's capacity
2080 * minus one (since we always leave a descriptor unavailable).
2081 */
2082 static uint8_t
iflib_fl_refill(if_ctx_t ctx,iflib_fl_t fl,int count)2083 iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
2084 {
2085 struct if_rxd_update iru;
2086 struct rxq_refill_cb_arg cb_arg;
2087 struct mbuf *m;
2088 caddr_t cl, *sd_cl;
2089 struct mbuf **sd_m;
2090 bus_dmamap_t *sd_map;
2091 bus_addr_t bus_addr, *sd_ba;
2092 int err, frag_idx, i, idx, n, pidx;
2093 qidx_t credits;
2094
2095 MPASS(count <= fl->ifl_size - fl->ifl_credits - 1);
2096
2097 sd_m = fl->ifl_sds.ifsd_m;
2098 sd_map = fl->ifl_sds.ifsd_map;
2099 sd_cl = fl->ifl_sds.ifsd_cl;
2100 sd_ba = fl->ifl_sds.ifsd_ba;
2101 pidx = fl->ifl_pidx;
2102 idx = pidx;
2103 frag_idx = fl->ifl_fragidx;
2104 credits = fl->ifl_credits;
2105
2106 i = 0;
2107 n = count;
2108 MPASS(n > 0);
2109 MPASS(credits + n <= fl->ifl_size);
2110
2111 if (pidx < fl->ifl_cidx)
2112 MPASS(pidx + n <= fl->ifl_cidx);
2113 if (pidx == fl->ifl_cidx && (credits < fl->ifl_size))
2114 MPASS(fl->ifl_gen == 0);
2115 if (pidx > fl->ifl_cidx)
2116 MPASS(n <= fl->ifl_size - pidx + fl->ifl_cidx);
2117
2118 DBG_COUNTER_INC(fl_refills);
2119 if (n > 8)
2120 DBG_COUNTER_INC(fl_refills_large);
2121 iru_init(&iru, fl->ifl_rxq, fl->ifl_id);
2122 while (n-- > 0) {
2123 /*
2124 * We allocate an uninitialized mbuf + cluster, mbuf is
2125 * initialized after rx.
2126 *
2127 * If the cluster is still set then we know a minimum sized
2128 * packet was received
2129 */
2130 bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size,
2131 &frag_idx);
2132 if (frag_idx < 0)
2133 bit_ffc(fl->ifl_rx_bitmap, fl->ifl_size, &frag_idx);
2134 MPASS(frag_idx >= 0);
2135 if ((cl = sd_cl[frag_idx]) == NULL) {
2136 cl = uma_zalloc(fl->ifl_zone, M_NOWAIT);
2137 if (__predict_false(cl == NULL))
2138 break;
2139
2140 cb_arg.error = 0;
2141 MPASS(sd_map != NULL);
2142 err = bus_dmamap_load(fl->ifl_buf_tag, sd_map[frag_idx],
2143 cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg,
2144 BUS_DMA_NOWAIT);
2145 if (__predict_false(err != 0 || cb_arg.error)) {
2146 uma_zfree(fl->ifl_zone, cl);
2147 break;
2148 }
2149
2150 sd_ba[frag_idx] = bus_addr = cb_arg.seg.ds_addr;
2151 sd_cl[frag_idx] = cl;
2152 #if MEMORY_LOGGING
2153 fl->ifl_cl_enqueued++;
2154 #endif
2155 } else {
2156 bus_addr = sd_ba[frag_idx];
2157 }
2158 bus_dmamap_sync(fl->ifl_buf_tag, sd_map[frag_idx],
2159 BUS_DMASYNC_PREREAD);
2160
2161 if (sd_m[frag_idx] == NULL) {
2162 m = m_gethdr_raw(M_NOWAIT, 0);
2163 if (__predict_false(m == NULL))
2164 break;
2165 sd_m[frag_idx] = m;
2166 }
2167 bit_set(fl->ifl_rx_bitmap, frag_idx);
2168 #if MEMORY_LOGGING
2169 fl->ifl_m_enqueued++;
2170 #endif
2171
2172 DBG_COUNTER_INC(rx_allocs);
2173 fl->ifl_rxd_idxs[i] = frag_idx;
2174 fl->ifl_bus_addrs[i] = bus_addr;
2175 credits++;
2176 i++;
2177 MPASS(credits <= fl->ifl_size);
2178 if (++idx == fl->ifl_size) {
2179 #ifdef INVARIANTS
2180 fl->ifl_gen = 1;
2181 #endif
2182 idx = 0;
2183 }
2184 if (n == 0 || i == IFLIB_MAX_RX_REFRESH) {
2185 iru.iru_pidx = pidx;
2186 iru.iru_count = i;
2187 ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
2188 fl->ifl_pidx = idx;
2189 fl->ifl_credits = credits;
2190 pidx = idx;
2191 i = 0;
2192 }
2193 }
2194
2195 if (n < count - 1) {
2196 if (i != 0) {
2197 iru.iru_pidx = pidx;
2198 iru.iru_count = i;
2199 ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
2200 fl->ifl_pidx = idx;
2201 fl->ifl_credits = credits;
2202 }
2203 DBG_COUNTER_INC(rxd_flush);
2204 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
2205 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2206 ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id,
2207 fl->ifl_id, fl->ifl_pidx);
2208 if (__predict_true(bit_test(fl->ifl_rx_bitmap, frag_idx))) {
2209 fl->ifl_fragidx = frag_idx + 1;
2210 if (fl->ifl_fragidx == fl->ifl_size)
2211 fl->ifl_fragidx = 0;
2212 } else {
2213 fl->ifl_fragidx = frag_idx;
2214 }
2215 }
2216
2217 return (n == -1 ? 0 : IFLIB_RXEOF_EMPTY);
2218 }
2219
2220 static inline uint8_t
iflib_fl_refill_all(if_ctx_t ctx,iflib_fl_t fl)2221 iflib_fl_refill_all(if_ctx_t ctx, iflib_fl_t fl)
2222 {
2223 /*
2224 * We leave an unused descriptor to avoid pidx to catch up with cidx.
2225 * This is important as it confuses most NICs. For instance,
2226 * Intel NICs have (per receive ring) RDH and RDT registers, where
2227 * RDH points to the next receive descriptor to be used by the NIC,
2228 * and RDT for the next receive descriptor to be published by the
2229 * driver to the NIC (RDT - 1 is thus the last valid one).
2230 * The condition RDH == RDT means no descriptors are available to
2231 * the NIC, and thus it would be ambiguous if it also meant that
2232 * all the descriptors are available to the NIC.
2233 */
2234 int32_t reclaimable = fl->ifl_size - fl->ifl_credits - 1;
2235 #ifdef INVARIANTS
2236 int32_t delta = fl->ifl_size - get_inuse(fl->ifl_size, fl->ifl_cidx, fl->ifl_pidx, fl->ifl_gen) - 1;
2237 #endif
2238
2239 MPASS(fl->ifl_credits <= fl->ifl_size);
2240 MPASS(reclaimable == delta);
2241
2242 if (reclaimable > 0)
2243 return (iflib_fl_refill(ctx, fl, reclaimable));
2244 return (0);
2245 }
2246
2247 uint8_t
iflib_in_detach(if_ctx_t ctx)2248 iflib_in_detach(if_ctx_t ctx)
2249 {
2250 bool in_detach;
2251
2252 STATE_LOCK(ctx);
2253 in_detach = !!(ctx->ifc_flags & IFC_IN_DETACH);
2254 STATE_UNLOCK(ctx);
2255 return (in_detach);
2256 }
2257
2258 static void
iflib_fl_bufs_free(iflib_fl_t fl)2259 iflib_fl_bufs_free(iflib_fl_t fl)
2260 {
2261 iflib_dma_info_t idi = fl->ifl_ifdi;
2262 bus_dmamap_t sd_map;
2263 uint32_t i;
2264
2265 for (i = 0; i < fl->ifl_size; i++) {
2266 struct mbuf **sd_m = &fl->ifl_sds.ifsd_m[i];
2267 caddr_t *sd_cl = &fl->ifl_sds.ifsd_cl[i];
2268
2269 if (*sd_cl != NULL) {
2270 sd_map = fl->ifl_sds.ifsd_map[i];
2271 bus_dmamap_sync(fl->ifl_buf_tag, sd_map,
2272 BUS_DMASYNC_POSTREAD);
2273 bus_dmamap_unload(fl->ifl_buf_tag, sd_map);
2274 uma_zfree(fl->ifl_zone, *sd_cl);
2275 *sd_cl = NULL;
2276 if (*sd_m != NULL) {
2277 m_init(*sd_m, M_NOWAIT, MT_DATA, 0);
2278 m_free_raw(*sd_m);
2279 *sd_m = NULL;
2280 }
2281 } else {
2282 MPASS(*sd_m == NULL);
2283 }
2284 #if MEMORY_LOGGING
2285 fl->ifl_m_dequeued++;
2286 fl->ifl_cl_dequeued++;
2287 #endif
2288 }
2289 #ifdef INVARIANTS
2290 for (i = 0; i < fl->ifl_size; i++) {
2291 MPASS(fl->ifl_sds.ifsd_cl[i] == NULL);
2292 MPASS(fl->ifl_sds.ifsd_m[i] == NULL);
2293 }
2294 #endif
2295 /*
2296 * Reset free list values
2297 */
2298 fl->ifl_credits = fl->ifl_cidx = fl->ifl_pidx = fl->ifl_gen = fl->ifl_fragidx = 0;
2299 bzero(idi->idi_vaddr, idi->idi_size);
2300 }
2301
2302 /*********************************************************************
2303 *
2304 * Initialize a free list and its buffers.
2305 *
2306 **********************************************************************/
2307 static int
iflib_fl_setup(iflib_fl_t fl)2308 iflib_fl_setup(iflib_fl_t fl)
2309 {
2310 iflib_rxq_t rxq = fl->ifl_rxq;
2311 if_ctx_t ctx = rxq->ifr_ctx;
2312 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
2313 int qidx;
2314
2315 bit_nclear(fl->ifl_rx_bitmap, 0, fl->ifl_size - 1);
2316 /*
2317 * Free current RX buffer structs and their mbufs
2318 */
2319 iflib_fl_bufs_free(fl);
2320 /* Now replenish the mbufs */
2321 MPASS(fl->ifl_credits == 0);
2322 qidx = rxq->ifr_fl_offset + fl->ifl_id;
2323 if (scctx->isc_rxd_buf_size[qidx] != 0)
2324 fl->ifl_buf_size = scctx->isc_rxd_buf_size[qidx];
2325 else
2326 fl->ifl_buf_size = ctx->ifc_rx_mbuf_sz;
2327 /*
2328 * ifl_buf_size may be a driver-supplied value, so pull it up
2329 * to the selected mbuf size.
2330 */
2331 fl->ifl_buf_size = iflib_get_mbuf_size_for(fl->ifl_buf_size);
2332 if (fl->ifl_buf_size > ctx->ifc_max_fl_buf_size)
2333 ctx->ifc_max_fl_buf_size = fl->ifl_buf_size;
2334 fl->ifl_cltype = m_gettype(fl->ifl_buf_size);
2335 fl->ifl_zone = m_getzone(fl->ifl_buf_size);
2336
2337 /*
2338 * Avoid pre-allocating zillions of clusters to an idle card
2339 * potentially speeding up attach. In any case make sure
2340 * to leave a descriptor unavailable. See the comment in
2341 * iflib_fl_refill_all().
2342 */
2343 MPASS(fl->ifl_size > 0);
2344 (void)iflib_fl_refill(ctx, fl, min(128, fl->ifl_size - 1));
2345 if (min(128, fl->ifl_size - 1) != fl->ifl_credits)
2346 return (ENOBUFS);
2347 /*
2348 * handle failure
2349 */
2350 MPASS(rxq != NULL);
2351 MPASS(fl->ifl_ifdi != NULL);
2352 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
2353 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2354 return (0);
2355 }
2356
2357 /*********************************************************************
2358 *
2359 * Free receive ring data structures
2360 *
2361 **********************************************************************/
2362 static void
iflib_rx_sds_free(iflib_rxq_t rxq)2363 iflib_rx_sds_free(iflib_rxq_t rxq)
2364 {
2365 iflib_fl_t fl;
2366 int i, j;
2367
2368 if (rxq->ifr_fl != NULL) {
2369 for (i = 0; i < rxq->ifr_nfl; i++) {
2370 fl = &rxq->ifr_fl[i];
2371 if (fl->ifl_buf_tag != NULL) {
2372 if (fl->ifl_sds.ifsd_map != NULL) {
2373 for (j = 0; j < fl->ifl_size; j++) {
2374 bus_dmamap_sync(
2375 fl->ifl_buf_tag,
2376 fl->ifl_sds.ifsd_map[j],
2377 BUS_DMASYNC_POSTREAD);
2378 bus_dmamap_unload(
2379 fl->ifl_buf_tag,
2380 fl->ifl_sds.ifsd_map[j]);
2381 bus_dmamap_destroy(
2382 fl->ifl_buf_tag,
2383 fl->ifl_sds.ifsd_map[j]);
2384 }
2385 }
2386 bus_dma_tag_destroy(fl->ifl_buf_tag);
2387 fl->ifl_buf_tag = NULL;
2388 }
2389 free(fl->ifl_sds.ifsd_m, M_IFLIB);
2390 free(fl->ifl_sds.ifsd_cl, M_IFLIB);
2391 free(fl->ifl_sds.ifsd_ba, M_IFLIB);
2392 free(fl->ifl_sds.ifsd_map, M_IFLIB);
2393 free(fl->ifl_rx_bitmap, M_IFLIB);
2394 fl->ifl_sds.ifsd_m = NULL;
2395 fl->ifl_sds.ifsd_cl = NULL;
2396 fl->ifl_sds.ifsd_ba = NULL;
2397 fl->ifl_sds.ifsd_map = NULL;
2398 fl->ifl_rx_bitmap = NULL;
2399 }
2400 free(rxq->ifr_fl, M_IFLIB);
2401 rxq->ifr_fl = NULL;
2402 free(rxq->ifr_ifdi, M_IFLIB);
2403 rxq->ifr_ifdi = NULL;
2404 rxq->ifr_cq_cidx = 0;
2405 }
2406 }
2407
2408 /*
2409 * Timer routine
2410 */
2411 static void
iflib_timer(void * arg)2412 iflib_timer(void *arg)
2413 {
2414 iflib_txq_t txq = arg;
2415 if_ctx_t ctx = txq->ift_ctx;
2416 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
2417 uint64_t this_tick = ticks;
2418
2419 if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
2420 return;
2421
2422 /*
2423 ** Check on the state of the TX queue(s), this
2424 ** can be done without the lock because its RO
2425 ** and the HUNG state will be static if set.
2426 */
2427 if (this_tick - txq->ift_last_timer_tick >= iflib_timer_default) {
2428 txq->ift_last_timer_tick = this_tick;
2429 IFDI_TIMER(ctx, txq->ift_id);
2430 if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) &&
2431 ((txq->ift_cleaned_prev == txq->ift_cleaned) ||
2432 (sctx->isc_pause_frames == 0)))
2433 goto hung;
2434
2435 if (txq->ift_qstatus != IFLIB_QUEUE_IDLE &&
2436 ifmp_ring_is_stalled(txq->ift_br)) {
2437 KASSERT(ctx->ifc_link_state == LINK_STATE_UP,
2438 ("queue can't be marked as hung if interface is down"));
2439 txq->ift_qstatus = IFLIB_QUEUE_HUNG;
2440 }
2441 txq->ift_cleaned_prev = txq->ift_cleaned;
2442 }
2443 /* handle any laggards */
2444 if (txq->ift_db_pending)
2445 GROUPTASK_ENQUEUE(&txq->ift_task);
2446
2447 sctx->isc_pause_frames = 0;
2448 if (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)
2449 callout_reset_on(&txq->ift_timer, iflib_timer_default, iflib_timer,
2450 txq, txq->ift_timer.c_cpu);
2451 return;
2452
2453 hung:
2454 device_printf(ctx->ifc_dev,
2455 "Watchdog timeout (TX: %d desc avail: %d pidx: %d) -- resetting\n",
2456 txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx);
2457 STATE_LOCK(ctx);
2458 if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2459 ctx->ifc_flags |= (IFC_DO_WATCHDOG | IFC_DO_RESET);
2460 iflib_admin_intr_deferred(ctx);
2461 STATE_UNLOCK(ctx);
2462 }
2463
2464 static uint16_t
iflib_get_mbuf_size_for(unsigned int size)2465 iflib_get_mbuf_size_for(unsigned int size)
2466 {
2467
2468 if (size <= MCLBYTES)
2469 return (MCLBYTES);
2470 else
2471 return (MJUMPAGESIZE);
2472 }
2473
2474 static void
iflib_calc_rx_mbuf_sz(if_ctx_t ctx)2475 iflib_calc_rx_mbuf_sz(if_ctx_t ctx)
2476 {
2477 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
2478
2479 /*
2480 * XXX don't set the max_frame_size to larger
2481 * than the hardware can handle
2482 */
2483 ctx->ifc_rx_mbuf_sz =
2484 iflib_get_mbuf_size_for(sctx->isc_max_frame_size);
2485 }
2486
2487 uint32_t
iflib_get_rx_mbuf_sz(if_ctx_t ctx)2488 iflib_get_rx_mbuf_sz(if_ctx_t ctx)
2489 {
2490
2491 return (ctx->ifc_rx_mbuf_sz);
2492 }
2493
2494 static void
iflib_init_locked(if_ctx_t ctx)2495 iflib_init_locked(if_ctx_t ctx)
2496 {
2497 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
2498 if_t ifp = ctx->ifc_ifp;
2499 iflib_fl_t fl;
2500 iflib_txq_t txq;
2501 iflib_rxq_t rxq;
2502 int i, j, tx_ip_csum_flags, tx_ip6_csum_flags;
2503
2504 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2505 IFDI_INTR_DISABLE(ctx);
2506
2507 /*
2508 * See iflib_stop(). Useful in case iflib_init_locked() is
2509 * called without first calling iflib_stop().
2510 */
2511 netmap_disable_all_rings(ifp);
2512
2513 tx_ip_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP);
2514 tx_ip6_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_SCTP);
2515 /* Set hardware offload abilities */
2516 if_clearhwassist(ifp);
2517 if (if_getcapenable(ifp) & IFCAP_TXCSUM)
2518 if_sethwassistbits(ifp, tx_ip_csum_flags, 0);
2519 if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6)
2520 if_sethwassistbits(ifp, tx_ip6_csum_flags, 0);
2521 if (if_getcapenable(ifp) & IFCAP_TSO4)
2522 if_sethwassistbits(ifp, CSUM_IP_TSO, 0);
2523 if (if_getcapenable(ifp) & IFCAP_TSO6)
2524 if_sethwassistbits(ifp, CSUM_IP6_TSO, 0);
2525
2526 for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) {
2527 CALLOUT_LOCK(txq);
2528 callout_stop(&txq->ift_timer);
2529 #ifdef DEV_NETMAP
2530 callout_stop(&txq->ift_netmap_timer);
2531 #endif /* DEV_NETMAP */
2532 CALLOUT_UNLOCK(txq);
2533 (void)iflib_netmap_txq_init(ctx, txq);
2534 }
2535
2536 /*
2537 * Calculate a suitable Rx mbuf size prior to calling IFDI_INIT, so
2538 * that drivers can use the value when setting up the hardware receive
2539 * buffers.
2540 */
2541 iflib_calc_rx_mbuf_sz(ctx);
2542
2543 #ifdef INVARIANTS
2544 i = if_getdrvflags(ifp);
2545 #endif
2546 IFDI_INIT(ctx);
2547 MPASS(if_getdrvflags(ifp) == i);
2548 for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) {
2549 if (iflib_netmap_rxq_init(ctx, rxq) > 0) {
2550 /* This rxq is in netmap mode. Skip normal init. */
2551 continue;
2552 }
2553 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
2554 if (iflib_fl_setup(fl)) {
2555 device_printf(ctx->ifc_dev,
2556 "setting up free list %d failed - "
2557 "check cluster settings\n", j);
2558 goto done;
2559 }
2560 }
2561 }
2562 done:
2563 if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2564 IFDI_INTR_ENABLE(ctx);
2565 txq = ctx->ifc_txqs;
2566 for (i = 0; i < scctx->isc_ntxqsets; i++, txq++)
2567 callout_reset_on(&txq->ift_timer, iflib_timer_default, iflib_timer, txq,
2568 txq->ift_timer.c_cpu);
2569
2570 /* Re-enable txsync/rxsync. */
2571 netmap_enable_all_rings(ifp);
2572 }
2573
2574 static int
iflib_media_change(if_t ifp)2575 iflib_media_change(if_t ifp)
2576 {
2577 if_ctx_t ctx = if_getsoftc(ifp);
2578 int err;
2579
2580 CTX_LOCK(ctx);
2581 if ((err = IFDI_MEDIA_CHANGE(ctx)) == 0)
2582 iflib_if_init_locked(ctx);
2583 CTX_UNLOCK(ctx);
2584 return (err);
2585 }
2586
2587 static void
iflib_media_status(if_t ifp,struct ifmediareq * ifmr)2588 iflib_media_status(if_t ifp, struct ifmediareq *ifmr)
2589 {
2590 if_ctx_t ctx = if_getsoftc(ifp);
2591
2592 CTX_LOCK(ctx);
2593 IFDI_UPDATE_ADMIN_STATUS(ctx);
2594 IFDI_MEDIA_STATUS(ctx, ifmr);
2595 CTX_UNLOCK(ctx);
2596 }
2597
2598 static void
iflib_stop(if_ctx_t ctx)2599 iflib_stop(if_ctx_t ctx)
2600 {
2601 iflib_txq_t txq = ctx->ifc_txqs;
2602 iflib_rxq_t rxq = ctx->ifc_rxqs;
2603 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
2604 if_shared_ctx_t sctx = ctx->ifc_sctx;
2605 iflib_dma_info_t di;
2606 iflib_fl_t fl;
2607 int i, j;
2608
2609 /* Tell the stack that the interface is no longer active */
2610 if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2611
2612 IFDI_INTR_DISABLE(ctx);
2613 DELAY(1000);
2614 IFDI_STOP(ctx);
2615 DELAY(1000);
2616
2617 /*
2618 * Stop any pending txsync/rxsync and prevent new ones
2619 * form starting. Processes blocked in poll() will get
2620 * POLLERR.
2621 */
2622 netmap_disable_all_rings(ctx->ifc_ifp);
2623
2624 iflib_debug_reset();
2625 /* Wait for current tx queue users to exit to disarm watchdog timer. */
2626 for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) {
2627 /* make sure all transmitters have completed before proceeding XXX */
2628
2629 CALLOUT_LOCK(txq);
2630 callout_stop(&txq->ift_timer);
2631 #ifdef DEV_NETMAP
2632 callout_stop(&txq->ift_netmap_timer);
2633 #endif /* DEV_NETMAP */
2634 CALLOUT_UNLOCK(txq);
2635
2636 if (!ctx->ifc_sysctl_simple_tx) {
2637 /* clean any enqueued buffers */
2638 iflib_ifmp_purge(txq);
2639 }
2640 /* Free any existing tx buffers. */
2641 for (j = 0; j < txq->ift_size; j++) {
2642 iflib_txsd_free(ctx, txq, j);
2643 }
2644 txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0;
2645 txq->ift_in_use = txq->ift_gen = txq->ift_no_desc_avail = 0;
2646 if (sctx->isc_flags & IFLIB_PRESERVE_TX_INDICES)
2647 txq->ift_cidx = txq->ift_pidx;
2648 else
2649 txq->ift_cidx = txq->ift_pidx = 0;
2650
2651 txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0;
2652 txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0;
2653 txq->ift_pullups = 0;
2654 ifmp_ring_reset_stats(txq->ift_br);
2655 for (j = 0, di = txq->ift_ifdi; j < sctx->isc_ntxqs; j++, di++)
2656 bzero((void *)di->idi_vaddr, di->idi_size);
2657 }
2658 for (i = 0; i < scctx->isc_nrxqsets; i++, rxq++) {
2659 if (rxq->ifr_task.gt_taskqueue != NULL)
2660 gtaskqueue_drain(rxq->ifr_task.gt_taskqueue,
2661 &rxq->ifr_task.gt_task);
2662
2663 rxq->ifr_cq_cidx = 0;
2664 for (j = 0, di = rxq->ifr_ifdi; j < sctx->isc_nrxqs; j++, di++)
2665 bzero((void *)di->idi_vaddr, di->idi_size);
2666 /* also resets the free lists pidx/cidx */
2667 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
2668 iflib_fl_bufs_free(fl);
2669 }
2670 }
2671
2672 static inline caddr_t
calc_next_rxd(iflib_fl_t fl,int cidx)2673 calc_next_rxd(iflib_fl_t fl, int cidx)
2674 {
2675 qidx_t size;
2676 int nrxd;
2677 caddr_t start, end, cur, next;
2678
2679 nrxd = fl->ifl_size;
2680 size = fl->ifl_rxd_size;
2681 start = fl->ifl_ifdi->idi_vaddr;
2682
2683 if (__predict_false(size == 0))
2684 return (start);
2685 cur = start + size * cidx;
2686 end = start + size * nrxd;
2687 next = CACHE_PTR_NEXT(cur);
2688 return (next < end ? next : start);
2689 }
2690
2691 static inline void
prefetch_pkts(iflib_fl_t fl,int cidx)2692 prefetch_pkts(iflib_fl_t fl, int cidx)
2693 {
2694 int nextptr;
2695 int nrxd = fl->ifl_size;
2696 caddr_t next_rxd;
2697
2698 nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd - 1);
2699 prefetch(&fl->ifl_sds.ifsd_m[nextptr]);
2700 prefetch(&fl->ifl_sds.ifsd_cl[nextptr]);
2701 next_rxd = calc_next_rxd(fl, cidx);
2702 prefetch(next_rxd);
2703 prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd - 1)]);
2704 prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd - 1)]);
2705 prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd - 1)]);
2706 prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd - 1)]);
2707 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd - 1)]);
2708 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd - 1)]);
2709 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd - 1)]);
2710 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd - 1)]);
2711 }
2712
2713 static struct mbuf *
rxd_frag_to_sd(iflib_rxq_t rxq,if_rxd_frag_t irf,bool unload,if_rxsd_t sd,int * pf_rv,if_rxd_info_t ri)2714 rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, bool unload, if_rxsd_t sd,
2715 int *pf_rv, if_rxd_info_t ri)
2716 {
2717 bus_dmamap_t map;
2718 iflib_fl_t fl;
2719 caddr_t payload;
2720 struct mbuf *m;
2721 int flid, cidx, len, next;
2722
2723 map = NULL;
2724 flid = irf->irf_flid;
2725 cidx = irf->irf_idx;
2726 fl = &rxq->ifr_fl[flid];
2727 sd->ifsd_fl = fl;
2728 sd->ifsd_cl = &fl->ifl_sds.ifsd_cl[cidx];
2729 fl->ifl_credits--;
2730 #if MEMORY_LOGGING
2731 fl->ifl_m_dequeued++;
2732 #endif
2733 if (rxq->ifr_ctx->ifc_flags & IFC_PREFETCH)
2734 prefetch_pkts(fl, cidx);
2735 next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size - 1);
2736 prefetch(&fl->ifl_sds.ifsd_map[next]);
2737 map = fl->ifl_sds.ifsd_map[cidx];
2738
2739 bus_dmamap_sync(fl->ifl_buf_tag, map, BUS_DMASYNC_POSTREAD);
2740
2741 if (rxq->pfil != NULL && PFIL_HOOKED_IN(rxq->pfil) && pf_rv != NULL &&
2742 irf->irf_len != 0) {
2743 payload = *sd->ifsd_cl;
2744 payload += ri->iri_pad;
2745 len = ri->iri_len - ri->iri_pad;
2746 *pf_rv = pfil_mem_in(rxq->pfil, payload, len, ri->iri_ifp, &m);
2747 switch (*pf_rv) {
2748 case PFIL_DROPPED:
2749 case PFIL_CONSUMED:
2750 /*
2751 * The filter ate it. Everything is recycled.
2752 */
2753 m = NULL;
2754 unload = 0;
2755 break;
2756 case PFIL_REALLOCED:
2757 /*
2758 * The filter copied it. Everything is recycled.
2759 * 'm' points at new mbuf.
2760 */
2761 unload = 0;
2762 break;
2763 case PFIL_PASS:
2764 /*
2765 * Filter said it was OK, so receive like
2766 * normal
2767 */
2768 m = fl->ifl_sds.ifsd_m[cidx];
2769 fl->ifl_sds.ifsd_m[cidx] = NULL;
2770 break;
2771 default:
2772 MPASS(0);
2773 }
2774 } else {
2775 m = fl->ifl_sds.ifsd_m[cidx];
2776 fl->ifl_sds.ifsd_m[cidx] = NULL;
2777 if (pf_rv != NULL)
2778 *pf_rv = PFIL_PASS;
2779 }
2780
2781 if (unload && irf->irf_len != 0)
2782 bus_dmamap_unload(fl->ifl_buf_tag, map);
2783 fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size - 1);
2784 if (__predict_false(fl->ifl_cidx == 0))
2785 fl->ifl_gen = 0;
2786 bit_clear(fl->ifl_rx_bitmap, cidx);
2787 return (m);
2788 }
2789
2790 static struct mbuf *
assemble_segments(iflib_rxq_t rxq,if_rxd_info_t ri,if_rxsd_t sd,int * pf_rv)2791 assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri, if_rxsd_t sd, int *pf_rv)
2792 {
2793 struct mbuf *m, *mh, *mt;
2794 caddr_t cl;
2795 int *pf_rv_ptr, flags, i, padlen;
2796 bool consumed;
2797
2798 i = 0;
2799 mh = NULL;
2800 consumed = false;
2801 *pf_rv = PFIL_PASS;
2802 pf_rv_ptr = pf_rv;
2803 do {
2804 m = rxd_frag_to_sd(rxq, &ri->iri_frags[i], !consumed, sd,
2805 pf_rv_ptr, ri);
2806
2807 MPASS(*sd->ifsd_cl != NULL);
2808
2809 /*
2810 * Exclude zero-length frags & frags from
2811 * packets the filter has consumed or dropped
2812 */
2813 if (ri->iri_frags[i].irf_len == 0 || consumed ||
2814 *pf_rv == PFIL_CONSUMED || *pf_rv == PFIL_DROPPED) {
2815 if (mh == NULL) {
2816 /* everything saved here */
2817 consumed = true;
2818 pf_rv_ptr = NULL;
2819 continue;
2820 }
2821 /* XXX we can save the cluster here, but not the mbuf */
2822 m_init(m, M_NOWAIT, MT_DATA, 0);
2823 m_free(m);
2824 continue;
2825 }
2826 if (mh == NULL) {
2827 flags = M_PKTHDR | M_EXT;
2828 mh = mt = m;
2829 padlen = ri->iri_pad;
2830 } else {
2831 flags = M_EXT;
2832 mt->m_next = m;
2833 mt = m;
2834 /* assuming padding is only on the first fragment */
2835 padlen = 0;
2836 }
2837 cl = *sd->ifsd_cl;
2838 *sd->ifsd_cl = NULL;
2839
2840 /* Can these two be made one ? */
2841 m_init(m, M_NOWAIT, MT_DATA, flags);
2842 m_cljset(m, cl, sd->ifsd_fl->ifl_cltype);
2843 /*
2844 * These must follow m_init and m_cljset
2845 */
2846 m->m_data += padlen;
2847 ri->iri_len -= padlen;
2848 m->m_len = ri->iri_frags[i].irf_len;
2849 } while (++i < ri->iri_nfrags);
2850
2851 return (mh);
2852 }
2853
2854 /*
2855 * Process one software descriptor
2856 */
2857 static struct mbuf *
iflib_rxd_pkt_get(iflib_rxq_t rxq,if_rxd_info_t ri)2858 iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri)
2859 {
2860 struct if_rxsd sd;
2861 struct mbuf *m;
2862 int pf_rv;
2863
2864 /* should I merge this back in now that the two paths are basically duplicated? */
2865 if (ri->iri_nfrags == 1 &&
2866 ri->iri_frags[0].irf_len != 0 &&
2867 ri->iri_frags[0].irf_len <= MIN(IFLIB_RX_COPY_THRESH, MHLEN)) {
2868 m = rxd_frag_to_sd(rxq, &ri->iri_frags[0], false, &sd,
2869 &pf_rv, ri);
2870 if (pf_rv != PFIL_PASS && pf_rv != PFIL_REALLOCED)
2871 return (m);
2872 if (pf_rv == PFIL_PASS) {
2873 m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR);
2874 #ifndef __NO_STRICT_ALIGNMENT
2875 if (!IP_ALIGNED(m) && ri->iri_pad == 0)
2876 m->m_data += 2;
2877 #endif
2878 memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len);
2879 m->m_len = ri->iri_frags[0].irf_len;
2880 m->m_data += ri->iri_pad;
2881 ri->iri_len -= ri->iri_pad;
2882 }
2883 } else {
2884 m = assemble_segments(rxq, ri, &sd, &pf_rv);
2885 if (m == NULL)
2886 return (NULL);
2887 if (pf_rv != PFIL_PASS && pf_rv != PFIL_REALLOCED)
2888 return (m);
2889 }
2890 m->m_pkthdr.len = ri->iri_len;
2891 m->m_pkthdr.rcvif = ri->iri_ifp;
2892 m->m_flags |= ri->iri_flags;
2893 m->m_pkthdr.ether_vtag = ri->iri_vtag;
2894 m->m_pkthdr.flowid = ri->iri_flowid;
2895 #ifdef NUMA
2896 m->m_pkthdr.numa_domain = if_getnumadomain(ri->iri_ifp);
2897 #endif
2898 M_HASHTYPE_SET(m, ri->iri_rsstype);
2899 m->m_pkthdr.csum_flags = ri->iri_csum_flags;
2900 m->m_pkthdr.csum_data = ri->iri_csum_data;
2901 return (m);
2902 }
2903
2904 static void
_task_fn_rx_watchdog(void * context)2905 _task_fn_rx_watchdog(void *context)
2906 {
2907 iflib_rxq_t rxq = context;
2908
2909 GROUPTASK_ENQUEUE(&rxq->ifr_task);
2910 }
2911
2912 static uint8_t
iflib_rxeof(iflib_rxq_t rxq,qidx_t budget)2913 iflib_rxeof(iflib_rxq_t rxq, qidx_t budget)
2914 {
2915 if_t ifp;
2916 if_ctx_t ctx = rxq->ifr_ctx;
2917 if_shared_ctx_t sctx = ctx->ifc_sctx;
2918 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
2919 int avail, i;
2920 qidx_t *cidxp;
2921 struct if_rxd_info ri;
2922 int err, budget_left, rx_bytes, rx_pkts;
2923 iflib_fl_t fl;
2924 #if defined(INET6) || defined(INET)
2925 int lro_enabled;
2926 #endif
2927 uint8_t retval = 0;
2928
2929 /*
2930 * XXX early demux data packets so that if_input processing only handles
2931 * acks in interrupt context
2932 */
2933 struct mbuf *m, *mh, *mt;
2934
2935 NET_EPOCH_ASSERT();
2936
2937 ifp = ctx->ifc_ifp;
2938 mh = mt = NULL;
2939 MPASS(budget > 0);
2940 rx_pkts = rx_bytes = 0;
2941 if (sctx->isc_flags & IFLIB_HAS_RXCQ)
2942 cidxp = &rxq->ifr_cq_cidx;
2943 else
2944 cidxp = &rxq->ifr_fl[0].ifl_cidx;
2945 if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) {
2946 for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
2947 retval |= iflib_fl_refill_all(ctx, fl);
2948 DBG_COUNTER_INC(rx_unavail);
2949 return (retval);
2950 }
2951
2952 #if defined(INET6) || defined(INET)
2953 lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO);
2954 #endif
2955
2956 /* pfil needs the vnet to be set */
2957 CURVNET_SET_QUIET(if_getvnet(ifp));
2958 for (budget_left = budget; budget_left > 0 && avail > 0;) {
2959 if (__predict_false(!CTX_ACTIVE(ctx))) {
2960 DBG_COUNTER_INC(rx_ctx_inactive);
2961 break;
2962 }
2963 /*
2964 * Reset client set fields to their default values
2965 */
2966 rxd_info_zero(&ri);
2967 ri.iri_qsidx = rxq->ifr_id;
2968 ri.iri_cidx = *cidxp;
2969 ri.iri_ifp = ifp;
2970 ri.iri_frags = rxq->ifr_frags;
2971 err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
2972
2973 if (err)
2974 goto err;
2975 rx_pkts += 1;
2976 rx_bytes += ri.iri_len;
2977 if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
2978 *cidxp = ri.iri_cidx;
2979 /* Update our consumer index */
2980 /* XXX NB: shurd - check if this is still safe */
2981 while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0])
2982 rxq->ifr_cq_cidx -= scctx->isc_nrxd[0];
2983 /* was this only a completion queue message? */
2984 if (__predict_false(ri.iri_nfrags == 0))
2985 continue;
2986 }
2987 MPASS(ri.iri_nfrags != 0);
2988 MPASS(ri.iri_len != 0);
2989
2990 /* will advance the cidx on the corresponding free lists */
2991 m = iflib_rxd_pkt_get(rxq, &ri);
2992 avail--;
2993 budget_left--;
2994 if (avail == 0 && budget_left)
2995 avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget_left);
2996
2997 if (__predict_false(m == NULL))
2998 continue;
2999
3000 #ifndef __NO_STRICT_ALIGNMENT
3001 if (!IP_ALIGNED(m) && (m = iflib_fixup_rx(m)) == NULL)
3002 continue;
3003 #endif
3004 #if defined(INET6) || defined(INET)
3005 if (lro_enabled) {
3006 tcp_lro_queue_mbuf(&rxq->ifr_lc, m);
3007 continue;
3008 }
3009 #endif
3010
3011 if (mh == NULL)
3012 mh = mt = m;
3013 else {
3014 mt->m_nextpkt = m;
3015 mt = m;
3016 }
3017 }
3018 CURVNET_RESTORE();
3019 /* make sure that we can refill faster than drain */
3020 for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
3021 retval |= iflib_fl_refill_all(ctx, fl);
3022
3023 if (mh != NULL) {
3024 if_input(ifp, mh);
3025 DBG_COUNTER_INC(rx_if_input);
3026 }
3027
3028 if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes);
3029 if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts);
3030
3031 /*
3032 * Flush any outstanding LRO work
3033 */
3034 #if defined(INET6) || defined(INET)
3035 tcp_lro_flush_all(&rxq->ifr_lc);
3036 #endif
3037 if (avail != 0 || iflib_rxd_avail(ctx, rxq, *cidxp, 1) != 0)
3038 retval |= IFLIB_RXEOF_MORE;
3039 return (retval);
3040 err:
3041 STATE_LOCK(ctx);
3042 ctx->ifc_flags |= IFC_DO_RESET;
3043 iflib_admin_intr_deferred(ctx);
3044 STATE_UNLOCK(ctx);
3045 return (0);
3046 }
3047
3048 #define TXD_NOTIFY_COUNT(txq) (((txq)->ift_size / (txq)->ift_update_freq) - 1)
3049 static inline qidx_t
txq_max_db_deferred(iflib_txq_t txq,qidx_t in_use)3050 txq_max_db_deferred(iflib_txq_t txq, qidx_t in_use)
3051 {
3052 qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
3053 qidx_t minthresh = txq->ift_size / 8;
3054 if (in_use > 4 * minthresh)
3055 return (notify_count);
3056 if (in_use > 2 * minthresh)
3057 return (notify_count >> 1);
3058 if (in_use > minthresh)
3059 return (notify_count >> 3);
3060 return (0);
3061 }
3062
3063 static inline qidx_t
txq_max_rs_deferred(iflib_txq_t txq)3064 txq_max_rs_deferred(iflib_txq_t txq)
3065 {
3066 qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
3067 qidx_t minthresh = txq->ift_size / 8;
3068 if (txq->ift_in_use > 4 * minthresh)
3069 return (notify_count);
3070 if (txq->ift_in_use > 2 * minthresh)
3071 return (notify_count >> 1);
3072 if (txq->ift_in_use > minthresh)
3073 return (notify_count >> 2);
3074 return (2);
3075 }
3076
3077 #define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags)
3078 #define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG)
3079
3080 #define TXQ_MAX_DB_DEFERRED(txq, in_use) txq_max_db_deferred((txq), (in_use))
3081 #define TXQ_MAX_RS_DEFERRED(txq) txq_max_rs_deferred(txq)
3082 #define TXQ_MAX_DB_CONSUMED(size) (size >> 4)
3083
3084 /* forward compatibility for cxgb */
3085 #define FIRST_QSET(ctx) 0
3086 #define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets)
3087 #define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets)
3088 #define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx))
3089 #define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments))
3090
3091 #define MAX_TX_DESC(ctx) MAX((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max, \
3092 (ctx)->ifc_softc_ctx.isc_tx_nsegments)
3093
3094 static inline bool
iflib_txd_db_check(iflib_txq_t txq,int ring)3095 iflib_txd_db_check(iflib_txq_t txq, int ring)
3096 {
3097 if_ctx_t ctx = txq->ift_ctx;
3098 qidx_t dbval, max;
3099
3100 max = TXQ_MAX_DB_DEFERRED(txq, txq->ift_in_use);
3101
3102 /* force || threshold exceeded || at the edge of the ring */
3103 if (ring || (txq->ift_db_pending >= max) || (TXQ_AVAIL(txq) <= MAX_TX_DESC(ctx) + 2)) {
3104
3105 /*
3106 * 'npending' is used if the card's doorbell is in terms of the number of descriptors
3107 * pending flush (BRCM). 'pidx' is used in cases where the card's doorbeel uses the
3108 * producer index explicitly (INTC).
3109 */
3110 dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx;
3111 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
3112 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3113 ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval);
3114
3115 /*
3116 * Absent bugs there are zero packets pending so reset pending counts to zero.
3117 */
3118 txq->ift_db_pending = txq->ift_npending = 0;
3119 return (true);
3120 }
3121 return (false);
3122 }
3123
3124 #ifdef PKT_DEBUG
3125 static void
print_pkt(if_pkt_info_t pi)3126 print_pkt(if_pkt_info_t pi)
3127 {
3128 printf("pi len: %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n",
3129 pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx);
3130 printf("pi new_pidx: %d csum_flags: %lx tso_segsz: %d mflags: %x vtag: %d\n",
3131 pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_tso_segsz, pi->ipi_mflags, pi->ipi_vtag);
3132 printf("pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n",
3133 pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto);
3134 }
3135 #endif
3136
3137 #define IS_TSO4(pi) ((pi)->ipi_csum_flags & CSUM_IP_TSO)
3138 #define IS_TX_OFFLOAD4(pi) ((pi)->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP_TSO))
3139 #define IS_TSO6(pi) ((pi)->ipi_csum_flags & CSUM_IP6_TSO)
3140 #define IS_TX_OFFLOAD6(pi) ((pi)->ipi_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_TSO))
3141
3142 /**
3143 * Parses out ethernet header information in the given mbuf.
3144 * Returns in pi: ipi_etype (EtherType) and ipi_ehdrlen (Ethernet header length)
3145 *
3146 * This will account for the VLAN header if present.
3147 *
3148 * XXX: This doesn't handle QinQ, which could prevent TX offloads for those
3149 * types of packets.
3150 */
3151 static int
iflib_parse_ether_header(if_pkt_info_t pi,struct mbuf ** mp,uint64_t * pullups)3152 iflib_parse_ether_header(if_pkt_info_t pi, struct mbuf **mp, uint64_t *pullups)
3153 {
3154 struct ether_vlan_header *eh;
3155 struct mbuf *m;
3156
3157 m = *mp;
3158 if (__predict_false(m->m_len < sizeof(*eh))) {
3159 (*pullups)++;
3160 if (__predict_false((m = m_pullup(m, sizeof(*eh))) == NULL))
3161 return (ENOMEM);
3162 }
3163 eh = mtod(m, struct ether_vlan_header *);
3164 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3165 pi->ipi_etype = ntohs(eh->evl_proto);
3166 pi->ipi_ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3167 } else {
3168 pi->ipi_etype = ntohs(eh->evl_encap_proto);
3169 pi->ipi_ehdrlen = ETHER_HDR_LEN;
3170 }
3171 *mp = m;
3172
3173 return (0);
3174 }
3175
3176 /**
3177 * Parse up to the L3 header and extract IPv4/IPv6 header information into pi.
3178 * Currently this information includes: IP ToS value, IP header version/presence
3179 *
3180 * This is missing some checks and doesn't edit the packet content as it goes,
3181 * unlike iflib_parse_header(), in order to keep the amount of code here minimal.
3182 */
3183 static int
iflib_parse_header_partial(if_pkt_info_t pi,struct mbuf ** mp,uint64_t * pullups)3184 iflib_parse_header_partial(if_pkt_info_t pi, struct mbuf **mp, uint64_t *pullups)
3185 {
3186 struct mbuf *m;
3187 int err;
3188
3189 *pullups = 0;
3190 m = *mp;
3191 if (!M_WRITABLE(m)) {
3192 if ((m = m_dup(m, M_NOWAIT)) == NULL) {
3193 return (ENOMEM);
3194 } else {
3195 m_freem(*mp);
3196 DBG_COUNTER_INC(tx_frees);
3197 *mp = m;
3198 }
3199 }
3200
3201 /* Fills out pi->ipi_etype */
3202 err = iflib_parse_ether_header(pi, mp, pullups);
3203 if (err)
3204 return (err);
3205 m = *mp;
3206
3207 switch (pi->ipi_etype) {
3208 #ifdef INET
3209 case ETHERTYPE_IP:
3210 {
3211 struct mbuf *n;
3212 struct ip *ip = NULL;
3213 int miniplen;
3214
3215 miniplen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip));
3216 if (__predict_false(m->m_len < miniplen)) {
3217 /*
3218 * Check for common case where the first mbuf only contains
3219 * the Ethernet header
3220 */
3221 if (m->m_len == pi->ipi_ehdrlen) {
3222 n = m->m_next;
3223 MPASS(n);
3224 /* If next mbuf contains at least the minimal IP header, then stop */
3225 if (n->m_len >= sizeof(*ip)) {
3226 ip = (struct ip *)n->m_data;
3227 } else {
3228 (*pullups)++;
3229 if (__predict_false((m = m_pullup(m, miniplen)) == NULL))
3230 return (ENOMEM);
3231 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
3232 }
3233 } else {
3234 (*pullups)++;
3235 if (__predict_false((m = m_pullup(m, miniplen)) == NULL))
3236 return (ENOMEM);
3237 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
3238 }
3239 } else {
3240 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
3241 }
3242
3243 /* Have the IPv4 header w/ no options here */
3244 pi->ipi_ip_hlen = ip->ip_hl << 2;
3245 pi->ipi_ipproto = ip->ip_p;
3246 pi->ipi_ip_tos = ip->ip_tos;
3247 pi->ipi_flags |= IPI_TX_IPV4;
3248
3249 break;
3250 }
3251 #endif
3252 #ifdef INET6
3253 case ETHERTYPE_IPV6:
3254 {
3255 struct ip6_hdr *ip6;
3256
3257 if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) {
3258 (*pullups)++;
3259 if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL))
3260 return (ENOMEM);
3261 }
3262 ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen);
3263
3264 /* Have the IPv6 fixed header here */
3265 pi->ipi_ip_hlen = sizeof(struct ip6_hdr);
3266 pi->ipi_ipproto = ip6->ip6_nxt;
3267 pi->ipi_ip_tos = IPV6_TRAFFIC_CLASS(ip6);
3268 pi->ipi_flags |= IPI_TX_IPV6;
3269
3270 break;
3271 }
3272 #endif
3273 default:
3274 pi->ipi_csum_flags &= ~CSUM_OFFLOAD;
3275 pi->ipi_ip_hlen = 0;
3276 break;
3277 }
3278 *mp = m;
3279
3280 return (0);
3281
3282 }
3283
3284 static int
iflib_parse_header(iflib_txq_t txq,if_pkt_info_t pi,struct mbuf ** mp)3285 iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp)
3286 {
3287 if_shared_ctx_t sctx = txq->ift_ctx->ifc_sctx;
3288 struct mbuf *m;
3289 int err;
3290
3291 m = *mp;
3292 if ((sctx->isc_flags & IFLIB_NEED_SCRATCH) &&
3293 M_WRITABLE(m) == 0) {
3294 if ((m = m_dup(m, M_NOWAIT)) == NULL) {
3295 return (ENOMEM);
3296 } else {
3297 m_freem(*mp);
3298 DBG_COUNTER_INC(tx_frees);
3299 *mp = m;
3300 }
3301 }
3302
3303 /* Fills out pi->ipi_etype */
3304 err = iflib_parse_ether_header(pi, mp, &txq->ift_pullups);
3305 if (__predict_false(err))
3306 return (err);
3307 m = *mp;
3308
3309 switch (pi->ipi_etype) {
3310 #ifdef INET
3311 case ETHERTYPE_IP:
3312 {
3313 struct ip *ip;
3314 struct tcphdr *th;
3315 uint8_t hlen;
3316
3317 hlen = pi->ipi_ehdrlen + sizeof(*ip);
3318 if (__predict_false(m->m_len < hlen)) {
3319 txq->ift_pullups++;
3320 if (__predict_false((m = m_pullup(m, hlen)) == NULL))
3321 return (ENOMEM);
3322 }
3323 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
3324 hlen = pi->ipi_ehdrlen + (ip->ip_hl << 2);
3325 if (ip->ip_p == IPPROTO_TCP) {
3326 hlen += sizeof(*th);
3327 th = (struct tcphdr *)((char *)ip + (ip->ip_hl << 2));
3328 } else if (ip->ip_p == IPPROTO_UDP) {
3329 hlen += sizeof(struct udphdr);
3330 }
3331 if (__predict_false(m->m_len < hlen)) {
3332 txq->ift_pullups++;
3333 if ((m = m_pullup(m, hlen)) == NULL)
3334 return (ENOMEM);
3335 }
3336 pi->ipi_ip_hlen = ip->ip_hl << 2;
3337 pi->ipi_ipproto = ip->ip_p;
3338 pi->ipi_ip_tos = ip->ip_tos;
3339 pi->ipi_flags |= IPI_TX_IPV4;
3340
3341 /* TCP checksum offload may require TCP header length */
3342 if (IS_TX_OFFLOAD4(pi)) {
3343 if (__predict_true(pi->ipi_ipproto == IPPROTO_TCP)) {
3344 pi->ipi_tcp_hflags = tcp_get_flags(th);
3345 pi->ipi_tcp_hlen = th->th_off << 2;
3346 pi->ipi_tcp_seq = th->th_seq;
3347 }
3348 if (IS_TSO4(pi)) {
3349 if (__predict_false(ip->ip_p != IPPROTO_TCP))
3350 return (ENXIO);
3351 /*
3352 * TSO always requires hardware checksum offload.
3353 */
3354 pi->ipi_csum_flags |= (CSUM_IP_TCP | CSUM_IP);
3355 th->th_sum = in_pseudo(ip->ip_src.s_addr,
3356 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3357 pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
3358 if (sctx->isc_flags & IFLIB_TSO_INIT_IP) {
3359 ip->ip_sum = 0;
3360 ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz);
3361 }
3362 }
3363 }
3364 if ((sctx->isc_flags & IFLIB_NEED_ZERO_CSUM) && (pi->ipi_csum_flags & CSUM_IP))
3365 ip->ip_sum = 0;
3366
3367 break;
3368 }
3369 #endif
3370 #ifdef INET6
3371 case ETHERTYPE_IPV6:
3372 {
3373 struct ip6_hdr *ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen);
3374 struct tcphdr *th;
3375 pi->ipi_ip_hlen = sizeof(struct ip6_hdr);
3376
3377 if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) {
3378 txq->ift_pullups++;
3379 if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL))
3380 return (ENOMEM);
3381 }
3382 th = (struct tcphdr *)((caddr_t)ip6 + pi->ipi_ip_hlen);
3383
3384 /* XXX-BZ this will go badly in case of ext hdrs. */
3385 pi->ipi_ipproto = ip6->ip6_nxt;
3386 pi->ipi_ip_tos = IPV6_TRAFFIC_CLASS(ip6);
3387 pi->ipi_flags |= IPI_TX_IPV6;
3388
3389 /* TCP checksum offload may require TCP header length */
3390 if (IS_TX_OFFLOAD6(pi)) {
3391 if (pi->ipi_ipproto == IPPROTO_TCP) {
3392 if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) {
3393 txq->ift_pullups++;
3394 if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) == NULL))
3395 return (ENOMEM);
3396 }
3397 pi->ipi_tcp_hflags = tcp_get_flags(th);
3398 pi->ipi_tcp_hlen = th->th_off << 2;
3399 pi->ipi_tcp_seq = th->th_seq;
3400 }
3401 if (IS_TSO6(pi)) {
3402 if (__predict_false(ip6->ip6_nxt != IPPROTO_TCP))
3403 return (ENXIO);
3404 /*
3405 * TSO always requires hardware checksum offload.
3406 */
3407 pi->ipi_csum_flags |= CSUM_IP6_TCP;
3408 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
3409 pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
3410 }
3411 }
3412 break;
3413 }
3414 #endif
3415 default:
3416 pi->ipi_csum_flags &= ~CSUM_OFFLOAD;
3417 pi->ipi_ip_hlen = 0;
3418 break;
3419 }
3420 *mp = m;
3421
3422 return (0);
3423 }
3424
3425 /*
3426 * If dodgy hardware rejects the scatter gather chain we've handed it
3427 * we'll need to remove the mbuf chain from ifsg_m[] before we can add the
3428 * m_defrag'd mbufs
3429 */
3430 static __noinline struct mbuf *
iflib_remove_mbuf(iflib_txq_t txq)3431 iflib_remove_mbuf(iflib_txq_t txq)
3432 {
3433 int ntxd, pidx;
3434 struct mbuf *m, **ifsd_m;
3435
3436 ifsd_m = txq->ift_sds.ifsd_m;
3437 ntxd = txq->ift_size;
3438 pidx = txq->ift_pidx & (ntxd - 1);
3439 ifsd_m = txq->ift_sds.ifsd_m;
3440 m = ifsd_m[pidx];
3441 ifsd_m[pidx] = NULL;
3442 bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[pidx]);
3443 if (txq->ift_sds.ifsd_tso_map != NULL)
3444 bus_dmamap_unload(txq->ift_tso_buf_tag,
3445 txq->ift_sds.ifsd_tso_map[pidx]);
3446 #if MEMORY_LOGGING
3447 txq->ift_dequeued++;
3448 #endif
3449 return (m);
3450 }
3451
3452 /*
3453 * Pad an mbuf to ensure a minimum ethernet frame size.
3454 * min_frame_size is the frame size (less CRC) to pad the mbuf to
3455 */
3456 static __noinline int
iflib_ether_pad(device_t dev,struct mbuf ** m_head,uint16_t min_frame_size)3457 iflib_ether_pad(device_t dev, struct mbuf **m_head, uint16_t min_frame_size)
3458 {
3459 /*
3460 * 18 is enough bytes to pad an ARP packet to 46 bytes, and
3461 * and ARP message is the smallest common payload I can think of
3462 */
3463 static char pad[18]; /* just zeros */
3464 int n;
3465 struct mbuf *new_head;
3466
3467 if (!M_WRITABLE(*m_head)) {
3468 new_head = m_dup(*m_head, M_NOWAIT);
3469 if (new_head == NULL) {
3470 m_freem(*m_head);
3471 device_printf(dev, "cannot pad short frame, m_dup() failed");
3472 DBG_COUNTER_INC(encap_pad_mbuf_fail);
3473 DBG_COUNTER_INC(tx_frees);
3474 return (ENOMEM);
3475 }
3476 m_freem(*m_head);
3477 *m_head = new_head;
3478 }
3479
3480 for (n = min_frame_size - (*m_head)->m_pkthdr.len;
3481 n > 0; n -= sizeof(pad))
3482 if (!m_append(*m_head, min(n, sizeof(pad)), pad))
3483 break;
3484
3485 if (n > 0) {
3486 m_freem(*m_head);
3487 device_printf(dev, "cannot pad short frame\n");
3488 DBG_COUNTER_INC(encap_pad_mbuf_fail);
3489 DBG_COUNTER_INC(tx_frees);
3490 return (ENOBUFS);
3491 }
3492
3493 return (0);
3494 }
3495
3496 static int
iflib_encap(iflib_txq_t txq,struct mbuf ** m_headp)3497 iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
3498 {
3499 if_ctx_t ctx;
3500 if_shared_ctx_t sctx;
3501 if_softc_ctx_t scctx;
3502 bus_dma_tag_t buf_tag;
3503 bus_dma_segment_t *segs;
3504 struct mbuf *m_head, **ifsd_m;
3505 bus_dmamap_t map;
3506 struct if_pkt_info pi;
3507 int remap = 0;
3508 int err, nsegs, ndesc, max_segs, pidx;
3509
3510 ctx = txq->ift_ctx;
3511 sctx = ctx->ifc_sctx;
3512 scctx = &ctx->ifc_softc_ctx;
3513 segs = txq->ift_segs;
3514 m_head = *m_headp;
3515 map = NULL;
3516
3517 /*
3518 * If we're doing TSO the next descriptor to clean may be quite far ahead
3519 */
3520 pidx = txq->ift_pidx;
3521 map = txq->ift_sds.ifsd_map[pidx];
3522 ifsd_m = txq->ift_sds.ifsd_m;
3523
3524 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3525 buf_tag = txq->ift_tso_buf_tag;
3526 max_segs = scctx->isc_tx_tso_segments_max;
3527 map = txq->ift_sds.ifsd_tso_map[pidx];
3528 MPASS(buf_tag != NULL);
3529 MPASS(max_segs > 0);
3530 } else {
3531 buf_tag = txq->ift_buf_tag;
3532 max_segs = scctx->isc_tx_nsegments;
3533 map = txq->ift_sds.ifsd_map[pidx];
3534 }
3535 if ((sctx->isc_flags & IFLIB_NEED_ETHER_PAD) &&
3536 __predict_false(m_head->m_pkthdr.len < scctx->isc_min_frame_size)) {
3537 err = iflib_ether_pad(ctx->ifc_dev, m_headp, scctx->isc_min_frame_size);
3538 if (err) {
3539 DBG_COUNTER_INC(encap_txd_encap_fail);
3540 return (err);
3541 }
3542 }
3543 m_head = *m_headp;
3544
3545 pkt_info_zero(&pi);
3546 pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG | M_BCAST | M_MCAST));
3547 pi.ipi_pidx = pidx;
3548 pi.ipi_qsidx = txq->ift_id;
3549 pi.ipi_len = m_head->m_pkthdr.len;
3550 pi.ipi_csum_flags = m_head->m_pkthdr.csum_flags;
3551 pi.ipi_vtag = M_HAS_VLANTAG(m_head) ? m_head->m_pkthdr.ether_vtag : 0;
3552
3553 /* deliberate bitwise OR to make one condition */
3554 if (__predict_true((pi.ipi_csum_flags | pi.ipi_vtag))) {
3555 if (__predict_false((err = iflib_parse_header(txq, &pi, m_headp)) != 0)) {
3556 DBG_COUNTER_INC(encap_txd_encap_fail);
3557 return (err);
3558 }
3559 m_head = *m_headp;
3560 }
3561
3562 retry:
3563 err = bus_dmamap_load_mbuf_sg(buf_tag, map, m_head, segs, &nsegs,
3564 BUS_DMA_NOWAIT);
3565 defrag:
3566 if (__predict_false(err)) {
3567 switch (err) {
3568 case EFBIG:
3569 /* try collapse once and defrag once */
3570 if (remap == 0) {
3571 m_head = m_collapse(*m_headp, M_NOWAIT, max_segs);
3572 /* try defrag if collapsing fails */
3573 if (m_head == NULL)
3574 remap++;
3575 }
3576 if (remap == 1) {
3577 txq->ift_mbuf_defrag++;
3578 m_head = m_defrag(*m_headp, M_NOWAIT);
3579 }
3580 /*
3581 * remap should never be >1 unless bus_dmamap_load_mbuf_sg
3582 * failed to map an mbuf that was run through m_defrag
3583 */
3584 MPASS(remap <= 1);
3585 if (__predict_false(m_head == NULL || remap > 1))
3586 goto defrag_failed;
3587 remap++;
3588 *m_headp = m_head;
3589 goto retry;
3590 break;
3591 case ENOMEM:
3592 txq->ift_no_tx_dma_setup++;
3593 break;
3594 default:
3595 txq->ift_no_tx_dma_setup++;
3596 m_freem(*m_headp);
3597 DBG_COUNTER_INC(tx_frees);
3598 *m_headp = NULL;
3599 break;
3600 }
3601 txq->ift_map_failed++;
3602 DBG_COUNTER_INC(encap_load_mbuf_fail);
3603 DBG_COUNTER_INC(encap_txd_encap_fail);
3604 return (err);
3605 }
3606 ifsd_m[pidx] = m_head;
3607 /*
3608 * XXX assumes a 1 to 1 relationship between segments and
3609 * descriptors - this does not hold true on all drivers, e.g.
3610 * cxgb
3611 */
3612 if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) {
3613 (void)iflib_completed_tx_reclaim(txq);
3614 if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) {
3615 txq->ift_no_desc_avail++;
3616 bus_dmamap_unload(buf_tag, map);
3617 DBG_COUNTER_INC(encap_txq_avail_fail);
3618 DBG_COUNTER_INC(encap_txd_encap_fail);
3619 if (ctx->ifc_sysctl_simple_tx) {
3620 *m_headp = m_head = iflib_remove_mbuf(txq);
3621 m_freem(*m_headp);
3622 DBG_COUNTER_INC(tx_frees);
3623 *m_headp = NULL;
3624 }
3625 if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0)
3626 GROUPTASK_ENQUEUE(&txq->ift_task);
3627 return (ENOBUFS);
3628 }
3629 }
3630 /*
3631 * On Intel cards we can greatly reduce the number of TX interrupts
3632 * we see by only setting report status on every Nth descriptor.
3633 * However, this also means that the driver will need to keep track
3634 * of the descriptors that RS was set on to check them for the DD bit.
3635 */
3636 txq->ift_rs_pending += nsegs + 1;
3637 if (txq->ift_rs_pending > TXQ_MAX_RS_DEFERRED(txq) ||
3638 iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs) <= MAX_TX_DESC(ctx) + 2) {
3639 pi.ipi_flags |= IPI_TX_INTR;
3640 txq->ift_rs_pending = 0;
3641 }
3642
3643 pi.ipi_segs = segs;
3644 pi.ipi_nsegs = nsegs;
3645
3646 MPASS(pidx >= 0 && pidx < txq->ift_size);
3647 #ifdef PKT_DEBUG
3648 print_pkt(&pi);
3649 #endif
3650 if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) {
3651 bus_dmamap_sync(buf_tag, map, BUS_DMASYNC_PREWRITE);
3652 DBG_COUNTER_INC(tx_encap);
3653 MPASS(pi.ipi_new_pidx < txq->ift_size);
3654
3655 ndesc = pi.ipi_new_pidx - pi.ipi_pidx;
3656 if (pi.ipi_new_pidx < pi.ipi_pidx) {
3657 ndesc += txq->ift_size;
3658 txq->ift_gen = 1;
3659 }
3660 /*
3661 * drivers can need as many as
3662 * two sentinels
3663 */
3664 MPASS(ndesc <= pi.ipi_nsegs + 2);
3665 MPASS(pi.ipi_new_pidx != pidx);
3666 MPASS(ndesc > 0);
3667 txq->ift_in_use += ndesc;
3668 txq->ift_db_pending += ndesc;
3669
3670 /*
3671 * We update the last software descriptor again here because there may
3672 * be a sentinel and/or there may be more mbufs than segments
3673 */
3674 txq->ift_pidx = pi.ipi_new_pidx;
3675 txq->ift_npending += pi.ipi_ndescs;
3676 } else {
3677 *m_headp = m_head = iflib_remove_mbuf(txq);
3678 if (err == EFBIG) {
3679 txq->ift_txd_encap_efbig++;
3680 if (remap < 2) {
3681 remap = 1;
3682 goto defrag;
3683 }
3684 }
3685 goto defrag_failed;
3686 }
3687 /*
3688 * err can't possibly be non-zero here, so we don't neet to test it
3689 * to see if we need to DBG_COUNTER_INC(encap_txd_encap_fail).
3690 */
3691 return (err);
3692
3693 defrag_failed:
3694 txq->ift_mbuf_defrag_failed++;
3695 txq->ift_map_failed++;
3696 m_freem(*m_headp);
3697 DBG_COUNTER_INC(tx_frees);
3698 *m_headp = NULL;
3699 DBG_COUNTER_INC(encap_txd_encap_fail);
3700 return (ENOMEM);
3701 }
3702
3703 static void
iflib_tx_desc_free(iflib_txq_t txq,int n)3704 iflib_tx_desc_free(iflib_txq_t txq, int n)
3705 {
3706 uint32_t qsize, cidx, gen;
3707 struct mbuf *m, **ifsd_m;
3708
3709 cidx = txq->ift_cidx;
3710 gen = txq->ift_gen;
3711 qsize = txq->ift_size;
3712 ifsd_m = txq->ift_sds.ifsd_m;
3713
3714 while (n-- > 0) {
3715 if ((m = ifsd_m[cidx]) != NULL) {
3716 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
3717 bus_dmamap_sync(txq->ift_tso_buf_tag,
3718 txq->ift_sds.ifsd_tso_map[cidx],
3719 BUS_DMASYNC_POSTWRITE);
3720 bus_dmamap_unload(txq->ift_tso_buf_tag,
3721 txq->ift_sds.ifsd_tso_map[cidx]);
3722 } else {
3723 bus_dmamap_sync(txq->ift_buf_tag,
3724 txq->ift_sds.ifsd_map[cidx],
3725 BUS_DMASYNC_POSTWRITE);
3726 bus_dmamap_unload(txq->ift_buf_tag,
3727 txq->ift_sds.ifsd_map[cidx]);
3728 }
3729 /* XXX we don't support any drivers that batch packets yet */
3730 MPASS(m->m_nextpkt == NULL);
3731 m_freem(m);
3732 ifsd_m[cidx] = NULL;
3733 #if MEMORY_LOGGING
3734 txq->ift_dequeued++;
3735 #endif
3736 DBG_COUNTER_INC(tx_frees);
3737 }
3738 if (__predict_false(++cidx == qsize)) {
3739 cidx = 0;
3740 gen = 0;
3741 }
3742 }
3743 txq->ift_cidx = cidx;
3744 txq->ift_gen = gen;
3745 }
3746
3747 static __inline int
iflib_completed_tx_reclaim(iflib_txq_t txq)3748 iflib_completed_tx_reclaim(iflib_txq_t txq)
3749 {
3750 int reclaim, thresh;
3751 uint32_t now;
3752 if_ctx_t ctx = txq->ift_ctx;
3753
3754 thresh = txq->ift_reclaim_thresh;
3755 KASSERT(thresh >= 0, ("invalid threshold to reclaim"));
3756 MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size);
3757
3758 now = ticks;
3759 if (now <= (txq->ift_last_reclaim + txq->ift_reclaim_ticks) &&
3760 txq->ift_in_use < thresh)
3761 return (0);
3762 txq->ift_last_reclaim = now;
3763 /*
3764 * Need a rate-limiting check so that this isn't called every time
3765 */
3766 iflib_tx_credits_update(ctx, txq);
3767 reclaim = DESC_RECLAIMABLE(txq);
3768
3769 if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */) {
3770 #ifdef INVARIANTS
3771 if (iflib_verbose_debug) {
3772 printf("%s processed=%ju cleaned=%ju tx_nsegments=%d reclaim=%d thresh=%d\n", __func__,
3773 txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments,
3774 reclaim, thresh);
3775 }
3776 #endif
3777 return (0);
3778 }
3779 iflib_tx_desc_free(txq, reclaim);
3780 txq->ift_cleaned += reclaim;
3781 txq->ift_in_use -= reclaim;
3782
3783 return (reclaim);
3784 }
3785
3786 static struct mbuf **
_ring_peek_one(struct ifmp_ring * r,int cidx,int offset,int remaining)3787 _ring_peek_one(struct ifmp_ring *r, int cidx, int offset, int remaining)
3788 {
3789 int next, size;
3790 struct mbuf **items;
3791
3792 size = r->size;
3793 next = (cidx + CACHE_PTR_INCREMENT) & (size - 1);
3794 items = __DEVOLATILE(struct mbuf **, &r->items[0]);
3795
3796 prefetch(items[(cidx + offset) & (size - 1)]);
3797 if (remaining > 1) {
3798 prefetch2cachelines(&items[next]);
3799 prefetch2cachelines(items[(cidx + offset + 1) & (size - 1)]);
3800 prefetch2cachelines(items[(cidx + offset + 2) & (size - 1)]);
3801 prefetch2cachelines(items[(cidx + offset + 3) & (size - 1)]);
3802 }
3803 return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size - 1)]));
3804 }
3805
3806 static void
iflib_txq_check_drain(iflib_txq_t txq,int budget)3807 iflib_txq_check_drain(iflib_txq_t txq, int budget)
3808 {
3809
3810 ifmp_ring_check_drainage(txq->ift_br, budget);
3811 }
3812
3813 static uint32_t
iflib_txq_can_drain(struct ifmp_ring * r)3814 iflib_txq_can_drain(struct ifmp_ring *r)
3815 {
3816 iflib_txq_t txq = r->cookie;
3817 if_ctx_t ctx = txq->ift_ctx;
3818
3819 if (TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2)
3820 return (1);
3821 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
3822 BUS_DMASYNC_POSTREAD);
3823 return (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id,
3824 false));
3825 }
3826
3827 static uint32_t
iflib_txq_drain(struct ifmp_ring * r,uint32_t cidx,uint32_t pidx)3828 iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
3829 {
3830 iflib_txq_t txq = r->cookie;
3831 if_ctx_t ctx = txq->ift_ctx;
3832 if_t ifp = ctx->ifc_ifp;
3833 struct mbuf *m, **mp;
3834 int avail, bytes_sent, skipped, count, err, i;
3835 int mcast_sent, pkt_sent, reclaimed;
3836 bool do_prefetch, rang, ring;
3837
3838 if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) ||
3839 !LINK_ACTIVE(ctx))) {
3840 DBG_COUNTER_INC(txq_drain_notready);
3841 return (0);
3842 }
3843 reclaimed = iflib_completed_tx_reclaim(txq);
3844 rang = iflib_txd_db_check(txq, reclaimed && txq->ift_db_pending);
3845 avail = IDXDIFF(pidx, cidx, r->size);
3846
3847 if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) {
3848 /*
3849 * The driver is unloading so we need to free all pending packets.
3850 */
3851 DBG_COUNTER_INC(txq_drain_flushing);
3852 for (i = 0; i < avail; i++) {
3853 if (__predict_true(r->items[(cidx + i) & (r->size - 1)] != (void *)txq))
3854 m_freem(r->items[(cidx + i) & (r->size - 1)]);
3855 r->items[(cidx + i) & (r->size - 1)] = NULL;
3856 }
3857 return (avail);
3858 }
3859
3860 if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) {
3861 txq->ift_qstatus = IFLIB_QUEUE_IDLE;
3862 CALLOUT_LOCK(txq);
3863 callout_stop(&txq->ift_timer);
3864 CALLOUT_UNLOCK(txq);
3865 DBG_COUNTER_INC(txq_drain_oactive);
3866 return (0);
3867 }
3868
3869 /*
3870 * If we've reclaimed any packets this queue cannot be hung.
3871 */
3872 if (reclaimed)
3873 txq->ift_qstatus = IFLIB_QUEUE_IDLE;
3874 skipped = mcast_sent = bytes_sent = pkt_sent = 0;
3875 count = MIN(avail, TX_BATCH_SIZE);
3876 #ifdef INVARIANTS
3877 if (iflib_verbose_debug)
3878 printf("%s avail=%d ifc_flags=%x txq_avail=%d ", __func__,
3879 avail, ctx->ifc_flags, TXQ_AVAIL(txq));
3880 #endif
3881 do_prefetch = (ctx->ifc_flags & IFC_PREFETCH);
3882 err = 0;
3883 for (i = 0; i < count && TXQ_AVAIL(txq) >= MAX_TX_DESC(ctx) + 2; i++) {
3884 int rem = do_prefetch ? count - i : 0;
3885
3886 mp = _ring_peek_one(r, cidx, i, rem);
3887 MPASS(mp != NULL && *mp != NULL);
3888
3889 /*
3890 * Completion interrupts will use the address of the txq
3891 * as a sentinel to enqueue _something_ in order to acquire
3892 * the lock on the mp_ring (there's no direct lock call).
3893 * We obviously whave to check for these sentinel cases
3894 * and skip them.
3895 */
3896 if (__predict_false(*mp == (struct mbuf *)txq)) {
3897 skipped++;
3898 continue;
3899 }
3900 err = iflib_encap(txq, mp);
3901 if (__predict_false(err)) {
3902 /* no room - bail out */
3903 if (err == ENOBUFS)
3904 break;
3905 skipped++;
3906 /* we can't send this packet - skip it */
3907 continue;
3908 }
3909 pkt_sent++;
3910 m = *mp;
3911 DBG_COUNTER_INC(tx_sent);
3912 bytes_sent += m->m_pkthdr.len;
3913 mcast_sent += !!(m->m_flags & M_MCAST);
3914
3915 if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)))
3916 break;
3917 ETHER_BPF_MTAP(ifp, m);
3918 rang = iflib_txd_db_check(txq, false);
3919 }
3920
3921 /* deliberate use of bitwise or to avoid gratuitous short-circuit */
3922 ring = rang ? false : (iflib_min_tx_latency | err | (!!txq->ift_reclaim_thresh));
3923 iflib_txd_db_check(txq, ring);
3924 if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent);
3925 if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent);
3926 if (mcast_sent)
3927 if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent);
3928 #ifdef INVARIANTS
3929 if (iflib_verbose_debug)
3930 printf("consumed=%d\n", skipped + pkt_sent);
3931 #endif
3932 return (skipped + pkt_sent);
3933 }
3934
3935 static uint32_t
iflib_txq_drain_always(struct ifmp_ring * r)3936 iflib_txq_drain_always(struct ifmp_ring *r)
3937 {
3938 return (1);
3939 }
3940
3941 static uint32_t
iflib_txq_drain_free(struct ifmp_ring * r,uint32_t cidx,uint32_t pidx)3942 iflib_txq_drain_free(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
3943 {
3944 int i, avail;
3945 struct mbuf **mp;
3946 iflib_txq_t txq;
3947
3948 txq = r->cookie;
3949
3950 txq->ift_qstatus = IFLIB_QUEUE_IDLE;
3951 CALLOUT_LOCK(txq);
3952 callout_stop(&txq->ift_timer);
3953 CALLOUT_UNLOCK(txq);
3954
3955 avail = IDXDIFF(pidx, cidx, r->size);
3956 for (i = 0; i < avail; i++) {
3957 mp = _ring_peek_one(r, cidx, i, avail - i);
3958 if (__predict_false(*mp == (struct mbuf *)txq))
3959 continue;
3960 m_freem(*mp);
3961 DBG_COUNTER_INC(tx_frees);
3962 }
3963 MPASS(ifmp_ring_is_stalled(r) == 0);
3964 return (avail);
3965 }
3966
3967 static void
iflib_ifmp_purge(iflib_txq_t txq)3968 iflib_ifmp_purge(iflib_txq_t txq)
3969 {
3970 struct ifmp_ring *r;
3971
3972 r = txq->ift_br;
3973 r->drain = iflib_txq_drain_free;
3974 r->can_drain = iflib_txq_drain_always;
3975
3976 ifmp_ring_check_drainage(r, r->size);
3977
3978 r->drain = iflib_txq_drain;
3979 r->can_drain = iflib_txq_can_drain;
3980 }
3981
3982 static void
_task_fn_tx(void * context)3983 _task_fn_tx(void *context)
3984 {
3985 iflib_txq_t txq = context;
3986 if_ctx_t ctx = txq->ift_ctx;
3987 if_t ifp = ctx->ifc_ifp;
3988 int abdicate = ctx->ifc_sysctl_tx_abdicate;
3989
3990 #ifdef IFLIB_DIAGNOSTICS
3991 txq->ift_cpu_exec_count[curcpu]++;
3992 #endif
3993 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
3994 return;
3995 #ifdef DEV_NETMAP
3996 if ((if_getcapenable(ifp) & IFCAP_NETMAP) &&
3997 netmap_tx_irq(ifp, txq->ift_id))
3998 goto skip_ifmp;
3999 #endif
4000 if (ctx->ifc_sysctl_simple_tx) {
4001 mtx_lock(&txq->ift_mtx);
4002 (void)iflib_completed_tx_reclaim(txq);
4003 mtx_unlock(&txq->ift_mtx);
4004 goto skip_ifmp;
4005 }
4006 #ifdef ALTQ
4007 if (if_altq_is_enabled(ifp))
4008 iflib_altq_if_start(ifp);
4009 #endif
4010 if (txq->ift_db_pending)
4011 ifmp_ring_enqueue(txq->ift_br, (void **)&txq, 1, TX_BATCH_SIZE, abdicate);
4012 else if (!abdicate)
4013 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
4014 /*
4015 * When abdicating, we always need to check drainage, not just when we don't enqueue
4016 */
4017 if (abdicate)
4018 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
4019
4020 skip_ifmp:
4021 if (ctx->ifc_flags & IFC_LEGACY)
4022 IFDI_INTR_ENABLE(ctx);
4023 else
4024 IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
4025 }
4026
4027 static void
_task_fn_rx(void * context)4028 _task_fn_rx(void *context)
4029 {
4030 iflib_rxq_t rxq = context;
4031 if_ctx_t ctx = rxq->ifr_ctx;
4032 uint8_t more;
4033 uint16_t budget;
4034 #ifdef DEV_NETMAP
4035 u_int work = 0;
4036 int nmirq;
4037 #endif
4038
4039 #ifdef IFLIB_DIAGNOSTICS
4040 rxq->ifr_cpu_exec_count[curcpu]++;
4041 #endif
4042 DBG_COUNTER_INC(task_fn_rxs);
4043 if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
4044 return;
4045 #ifdef DEV_NETMAP
4046 nmirq = netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &work);
4047 if (nmirq != NM_IRQ_PASS) {
4048 more = (nmirq == NM_IRQ_RESCHED) ? IFLIB_RXEOF_MORE : 0;
4049 goto skip_rxeof;
4050 }
4051 #endif
4052 budget = ctx->ifc_sysctl_rx_budget;
4053 if (budget == 0)
4054 budget = 16; /* XXX */
4055 more = iflib_rxeof(rxq, budget);
4056 #ifdef DEV_NETMAP
4057 skip_rxeof:
4058 #endif
4059 if ((more & IFLIB_RXEOF_MORE) == 0) {
4060 if (ctx->ifc_flags & IFC_LEGACY)
4061 IFDI_INTR_ENABLE(ctx);
4062 else
4063 IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
4064 DBG_COUNTER_INC(rx_intr_enables);
4065 }
4066 if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
4067 return;
4068
4069 if (more & IFLIB_RXEOF_MORE)
4070 GROUPTASK_ENQUEUE(&rxq->ifr_task);
4071 else if (more & IFLIB_RXEOF_EMPTY)
4072 callout_reset_curcpu(&rxq->ifr_watchdog, 1, &_task_fn_rx_watchdog, rxq);
4073 }
4074
4075 static void
_task_fn_admin(void * context,int pending)4076 _task_fn_admin(void *context, int pending)
4077 {
4078 if_ctx_t ctx = context;
4079 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
4080 iflib_txq_t txq;
4081 int i;
4082 bool oactive, running, do_reset, do_watchdog, in_detach;
4083
4084 STATE_LOCK(ctx);
4085 running = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING);
4086 oactive = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE);
4087 do_reset = (ctx->ifc_flags & IFC_DO_RESET);
4088 do_watchdog = (ctx->ifc_flags & IFC_DO_WATCHDOG);
4089 in_detach = (ctx->ifc_flags & IFC_IN_DETACH);
4090 ctx->ifc_flags &= ~(IFC_DO_RESET | IFC_DO_WATCHDOG);
4091 STATE_UNLOCK(ctx);
4092
4093 if ((!running && !oactive) && !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN))
4094 return;
4095 if (in_detach)
4096 return;
4097
4098 CTX_LOCK(ctx);
4099 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) {
4100 CALLOUT_LOCK(txq);
4101 callout_stop(&txq->ift_timer);
4102 CALLOUT_UNLOCK(txq);
4103 }
4104 if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_ADMINCQ)
4105 IFDI_ADMIN_COMPLETION_HANDLE(ctx);
4106 if (do_watchdog) {
4107 ctx->ifc_watchdog_events++;
4108 IFDI_WATCHDOG_RESET(ctx);
4109 }
4110 IFDI_UPDATE_ADMIN_STATUS(ctx);
4111 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) {
4112 callout_reset_on(&txq->ift_timer, iflib_timer_default, iflib_timer, txq,
4113 txq->ift_timer.c_cpu);
4114 }
4115 IFDI_LINK_INTR_ENABLE(ctx);
4116 if (do_reset)
4117 iflib_if_init_locked(ctx);
4118 CTX_UNLOCK(ctx);
4119
4120 if (LINK_ACTIVE(ctx) == 0)
4121 return;
4122 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++)
4123 iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
4124 }
4125
4126 static void
_task_fn_iov(void * context,int pending)4127 _task_fn_iov(void *context, int pending)
4128 {
4129 if_ctx_t ctx = context;
4130
4131 if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING) &&
4132 !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN))
4133 return;
4134
4135 CTX_LOCK(ctx);
4136 IFDI_VFLR_HANDLE(ctx);
4137 CTX_UNLOCK(ctx);
4138 }
4139
4140 static int
iflib_sysctl_int_delay(SYSCTL_HANDLER_ARGS)4141 iflib_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
4142 {
4143 int err;
4144 if_int_delay_info_t info;
4145 if_ctx_t ctx;
4146
4147 info = (if_int_delay_info_t)arg1;
4148 ctx = info->iidi_ctx;
4149 info->iidi_req = req;
4150 info->iidi_oidp = oidp;
4151 CTX_LOCK(ctx);
4152 err = IFDI_SYSCTL_INT_DELAY(ctx, info);
4153 CTX_UNLOCK(ctx);
4154 return (err);
4155 }
4156
4157 /*********************************************************************
4158 *
4159 * IFNET FUNCTIONS
4160 *
4161 **********************************************************************/
4162
4163 static void
iflib_if_init_locked(if_ctx_t ctx)4164 iflib_if_init_locked(if_ctx_t ctx)
4165 {
4166 iflib_stop(ctx);
4167 iflib_init_locked(ctx);
4168 }
4169
4170 static void
iflib_if_init(void * arg)4171 iflib_if_init(void *arg)
4172 {
4173 if_ctx_t ctx = arg;
4174
4175 CTX_LOCK(ctx);
4176 iflib_if_init_locked(ctx);
4177 CTX_UNLOCK(ctx);
4178 }
4179
4180 static int
iflib_if_transmit(if_t ifp,struct mbuf * m)4181 iflib_if_transmit(if_t ifp, struct mbuf *m)
4182 {
4183 if_ctx_t ctx = if_getsoftc(ifp);
4184 iflib_txq_t txq;
4185 int err, qidx;
4186 int abdicate;
4187
4188 if (__predict_false((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 || !LINK_ACTIVE(ctx))) {
4189 DBG_COUNTER_INC(tx_frees);
4190 m_freem(m);
4191 return (ENETDOWN);
4192 }
4193
4194 MPASS(m->m_nextpkt == NULL);
4195 /* ALTQ-enabled interfaces always use queue 0. */
4196 qidx = 0;
4197 /* Use driver-supplied queue selection method if it exists */
4198 if (ctx->isc_txq_select_v2) {
4199 struct if_pkt_info pi;
4200 uint64_t early_pullups = 0;
4201 pkt_info_zero(&pi);
4202
4203 err = iflib_parse_header_partial(&pi, &m, &early_pullups);
4204 if (__predict_false(err != 0)) {
4205 /* Assign pullups for bad pkts to default queue */
4206 ctx->ifc_txqs[0].ift_pullups += early_pullups;
4207 DBG_COUNTER_INC(encap_txd_encap_fail);
4208 return (err);
4209 }
4210 /* Let driver make queueing decision */
4211 qidx = ctx->isc_txq_select_v2(ctx->ifc_softc, m, &pi);
4212 ctx->ifc_txqs[qidx].ift_pullups += early_pullups;
4213 }
4214 /* Backwards compatibility w/ simpler queue select */
4215 else if (ctx->isc_txq_select)
4216 qidx = ctx->isc_txq_select(ctx->ifc_softc, m);
4217 /* If not, use iflib's standard method */
4218 else if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m) && !if_altq_is_enabled(ifp))
4219 qidx = QIDX(ctx, m);
4220
4221 /* Set TX queue */
4222 txq = &ctx->ifc_txqs[qidx];
4223
4224 #ifdef DRIVER_BACKPRESSURE
4225 if (txq->ift_closed) {
4226 while (m != NULL) {
4227 next = m->m_nextpkt;
4228 m->m_nextpkt = NULL;
4229 m_freem(m);
4230 DBG_COUNTER_INC(tx_frees);
4231 m = next;
4232 }
4233 return (ENOBUFS);
4234 }
4235 #endif
4236 #ifdef notyet
4237 qidx = count = 0;
4238 mp = marr;
4239 next = m;
4240 do {
4241 count++;
4242 next = next->m_nextpkt;
4243 } while (next != NULL);
4244
4245 if (count > nitems(marr))
4246 if ((mp = malloc(count * sizeof(struct mbuf *), M_IFLIB, M_NOWAIT)) == NULL) {
4247 /* XXX check nextpkt */
4248 m_freem(m);
4249 /* XXX simplify for now */
4250 DBG_COUNTER_INC(tx_frees);
4251 return (ENOBUFS);
4252 }
4253 for (next = m, i = 0; next != NULL; i++) {
4254 mp[i] = next;
4255 next = next->m_nextpkt;
4256 mp[i]->m_nextpkt = NULL;
4257 }
4258 #endif
4259 DBG_COUNTER_INC(tx_seen);
4260 abdicate = ctx->ifc_sysctl_tx_abdicate;
4261
4262 err = ifmp_ring_enqueue(txq->ift_br, (void **)&m, 1, TX_BATCH_SIZE, abdicate);
4263
4264 if (abdicate)
4265 GROUPTASK_ENQUEUE(&txq->ift_task);
4266 if (err) {
4267 if (!abdicate)
4268 GROUPTASK_ENQUEUE(&txq->ift_task);
4269 /* support forthcoming later */
4270 #ifdef DRIVER_BACKPRESSURE
4271 txq->ift_closed = TRUE;
4272 #endif
4273 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
4274 m_freem(m);
4275 DBG_COUNTER_INC(tx_frees);
4276 if (err == ENOBUFS)
4277 if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1);
4278 else
4279 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
4280 }
4281
4282 return (err);
4283 }
4284
4285 #ifdef ALTQ
4286 /*
4287 * The overall approach to integrating iflib with ALTQ is to continue to use
4288 * the iflib mp_ring machinery between the ALTQ queue(s) and the hardware
4289 * ring. Technically, when using ALTQ, queueing to an intermediate mp_ring
4290 * is redundant/unnecessary, but doing so minimizes the amount of
4291 * ALTQ-specific code required in iflib. It is assumed that the overhead of
4292 * redundantly queueing to an intermediate mp_ring is swamped by the
4293 * performance limitations inherent in using ALTQ.
4294 *
4295 * When ALTQ support is compiled in, all iflib drivers will use a transmit
4296 * routine, iflib_altq_if_transmit(), that checks if ALTQ is enabled for the
4297 * given interface. If ALTQ is enabled for an interface, then all
4298 * transmitted packets for that interface will be submitted to the ALTQ
4299 * subsystem via IFQ_ENQUEUE(). We don't use the legacy if_transmit()
4300 * implementation because it uses IFQ_HANDOFF(), which will duplicatively
4301 * update stats that the iflib machinery handles, and which is sensitve to
4302 * the disused IFF_DRV_OACTIVE flag. Additionally, iflib_altq_if_start()
4303 * will be installed as the start routine for use by ALTQ facilities that
4304 * need to trigger queue drains on a scheduled basis.
4305 *
4306 */
4307 static void
iflib_altq_if_start(if_t ifp)4308 iflib_altq_if_start(if_t ifp)
4309 {
4310 struct ifaltq *ifq = &ifp->if_snd; /* XXX - DRVAPI */
4311 struct mbuf *m;
4312
4313 IFQ_LOCK(ifq);
4314 IFQ_DEQUEUE_NOLOCK(ifq, m);
4315 while (m != NULL) {
4316 iflib_if_transmit(ifp, m);
4317 IFQ_DEQUEUE_NOLOCK(ifq, m);
4318 }
4319 IFQ_UNLOCK(ifq);
4320 }
4321
4322 static int
iflib_altq_if_transmit(if_t ifp,struct mbuf * m)4323 iflib_altq_if_transmit(if_t ifp, struct mbuf *m)
4324 {
4325 int err;
4326
4327 if (if_altq_is_enabled(ifp)) {
4328 IFQ_ENQUEUE(&ifp->if_snd, m, err); /* XXX - DRVAPI */
4329 if (err == 0)
4330 iflib_altq_if_start(ifp);
4331 } else
4332 err = iflib_if_transmit(ifp, m);
4333
4334 return (err);
4335 }
4336 #endif /* ALTQ */
4337
4338 static void
iflib_if_qflush(if_t ifp)4339 iflib_if_qflush(if_t ifp)
4340 {
4341 if_ctx_t ctx = if_getsoftc(ifp);
4342 iflib_txq_t txq = ctx->ifc_txqs;
4343 int i;
4344
4345 STATE_LOCK(ctx);
4346 ctx->ifc_flags |= IFC_QFLUSH;
4347 STATE_UNLOCK(ctx);
4348 for (i = 0; i < NTXQSETS(ctx); i++, txq++)
4349 while (!(ifmp_ring_is_idle(txq->ift_br) || ifmp_ring_is_stalled(txq->ift_br)))
4350 iflib_txq_check_drain(txq, 0);
4351 STATE_LOCK(ctx);
4352 ctx->ifc_flags &= ~IFC_QFLUSH;
4353 STATE_UNLOCK(ctx);
4354
4355 /*
4356 * When ALTQ is enabled, this will also take care of purging the
4357 * ALTQ queue(s).
4358 */
4359 if_qflush(ifp);
4360 }
4361
4362 #define IFCAP_FLAGS (IFCAP_HWCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \
4363 IFCAP_TSO | IFCAP_VLAN_HWTAGGING | IFCAP_HWSTATS | \
4364 IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | \
4365 IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM | IFCAP_MEXTPG)
4366
4367 static int
iflib_if_ioctl(if_t ifp,u_long command,caddr_t data)4368 iflib_if_ioctl(if_t ifp, u_long command, caddr_t data)
4369 {
4370 if_ctx_t ctx = if_getsoftc(ifp);
4371 struct ifreq *ifr = (struct ifreq *)data;
4372 #if defined(INET) || defined(INET6)
4373 struct ifaddr *ifa = (struct ifaddr *)data;
4374 #endif
4375 bool avoid_reset = false;
4376 int err = 0, reinit = 0, bits;
4377
4378 switch (command) {
4379 case SIOCSIFADDR:
4380 #ifdef INET
4381 if (ifa->ifa_addr->sa_family == AF_INET)
4382 avoid_reset = true;
4383 #endif
4384 #ifdef INET6
4385 if (ifa->ifa_addr->sa_family == AF_INET6)
4386 avoid_reset = true;
4387 #endif
4388 /*
4389 * Calling init results in link renegotiation,
4390 * so we avoid doing it when possible.
4391 */
4392 if (avoid_reset) {
4393 if_setflagbits(ifp, IFF_UP, 0);
4394 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
4395 reinit = 1;
4396 #ifdef INET
4397 if (!(if_getflags(ifp) & IFF_NOARP))
4398 arp_ifinit(ifp, ifa);
4399 #endif
4400 } else
4401 err = ether_ioctl(ifp, command, data);
4402 break;
4403 case SIOCSIFMTU:
4404 CTX_LOCK(ctx);
4405 if (ifr->ifr_mtu == if_getmtu(ifp)) {
4406 CTX_UNLOCK(ctx);
4407 break;
4408 }
4409 bits = if_getdrvflags(ifp);
4410 /* stop the driver and free any clusters before proceeding */
4411 iflib_stop(ctx);
4412
4413 if ((err = IFDI_MTU_SET(ctx, ifr->ifr_mtu)) == 0) {
4414 STATE_LOCK(ctx);
4415 if (ifr->ifr_mtu > ctx->ifc_max_fl_buf_size)
4416 ctx->ifc_flags |= IFC_MULTISEG;
4417 else
4418 ctx->ifc_flags &= ~IFC_MULTISEG;
4419 STATE_UNLOCK(ctx);
4420 err = if_setmtu(ifp, ifr->ifr_mtu);
4421 }
4422 iflib_init_locked(ctx);
4423 STATE_LOCK(ctx);
4424 if_setdrvflags(ifp, bits);
4425 STATE_UNLOCK(ctx);
4426 CTX_UNLOCK(ctx);
4427 break;
4428 case SIOCSIFFLAGS:
4429 CTX_LOCK(ctx);
4430 if (if_getflags(ifp) & IFF_UP) {
4431 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4432 if ((if_getflags(ifp) ^ ctx->ifc_if_flags) &
4433 (IFF_PROMISC | IFF_ALLMULTI)) {
4434 CTX_UNLOCK(ctx);
4435 err = IFDI_PROMISC_SET(ctx, if_getflags(ifp));
4436 CTX_LOCK(ctx);
4437 }
4438 } else
4439 reinit = 1;
4440 } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4441 iflib_stop(ctx);
4442 }
4443 ctx->ifc_if_flags = if_getflags(ifp);
4444 CTX_UNLOCK(ctx);
4445 break;
4446 case SIOCADDMULTI:
4447 case SIOCDELMULTI:
4448 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4449 CTX_LOCK(ctx);
4450 IFDI_INTR_DISABLE(ctx);
4451 IFDI_MULTI_SET(ctx);
4452 IFDI_INTR_ENABLE(ctx);
4453 CTX_UNLOCK(ctx);
4454 }
4455 break;
4456 case SIOCSIFMEDIA:
4457 CTX_LOCK(ctx);
4458 IFDI_MEDIA_SET(ctx);
4459 CTX_UNLOCK(ctx);
4460 /* FALLTHROUGH */
4461 case SIOCGIFMEDIA:
4462 case SIOCGIFXMEDIA:
4463 err = ifmedia_ioctl(ifp, ifr, ctx->ifc_mediap, command);
4464 break;
4465 case SIOCGI2C:
4466 {
4467 struct ifi2creq i2c;
4468
4469 err = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
4470 if (err != 0)
4471 break;
4472 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
4473 err = EINVAL;
4474 break;
4475 }
4476 if (i2c.len > sizeof(i2c.data)) {
4477 err = EINVAL;
4478 break;
4479 }
4480
4481 if ((err = IFDI_I2C_REQ(ctx, &i2c)) == 0)
4482 err = copyout(&i2c, ifr_data_get_ptr(ifr),
4483 sizeof(i2c));
4484 break;
4485 }
4486 case SIOCSIFCAP:
4487 {
4488 int mask, setmask, oldmask;
4489
4490 oldmask = if_getcapenable(ifp);
4491 mask = ifr->ifr_reqcap ^ oldmask;
4492 mask &= ctx->ifc_softc_ctx.isc_capabilities | IFCAP_MEXTPG;
4493 setmask = 0;
4494 #ifdef TCP_OFFLOAD
4495 setmask |= mask & (IFCAP_TOE4 | IFCAP_TOE6);
4496 #endif
4497 setmask |= (mask & IFCAP_FLAGS);
4498 setmask |= (mask & IFCAP_WOL);
4499
4500 /*
4501 * If any RX csum has changed, change all the ones that
4502 * are supported by the driver.
4503 */
4504 if (setmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
4505 setmask |= ctx->ifc_softc_ctx.isc_capabilities &
4506 (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6);
4507 }
4508
4509 /*
4510 * want to ensure that traffic has stopped before we change any of the flags
4511 */
4512 if (setmask) {
4513 CTX_LOCK(ctx);
4514 bits = if_getdrvflags(ifp);
4515 if (bits & IFF_DRV_RUNNING && setmask & ~IFCAP_WOL)
4516 iflib_stop(ctx);
4517 STATE_LOCK(ctx);
4518 if_togglecapenable(ifp, setmask);
4519 ctx->ifc_softc_ctx.isc_capenable ^= setmask;
4520 STATE_UNLOCK(ctx);
4521 if (bits & IFF_DRV_RUNNING && setmask & ~IFCAP_WOL)
4522 iflib_init_locked(ctx);
4523 STATE_LOCK(ctx);
4524 if_setdrvflags(ifp, bits);
4525 STATE_UNLOCK(ctx);
4526 CTX_UNLOCK(ctx);
4527 }
4528 if_vlancap(ifp);
4529 break;
4530 }
4531 case SIOCGPRIVATE_0:
4532 case SIOCSDRVSPEC:
4533 case SIOCGDRVSPEC:
4534 CTX_LOCK(ctx);
4535 err = IFDI_PRIV_IOCTL(ctx, command, data);
4536 CTX_UNLOCK(ctx);
4537 break;
4538 default:
4539 err = ether_ioctl(ifp, command, data);
4540 break;
4541 }
4542 if (reinit)
4543 iflib_if_init(ctx);
4544 return (err);
4545 }
4546
4547 static uint64_t
iflib_if_get_counter(if_t ifp,ift_counter cnt)4548 iflib_if_get_counter(if_t ifp, ift_counter cnt)
4549 {
4550 if_ctx_t ctx = if_getsoftc(ifp);
4551
4552 return (IFDI_GET_COUNTER(ctx, cnt));
4553 }
4554
4555 /*********************************************************************
4556 *
4557 * OTHER FUNCTIONS EXPORTED TO THE STACK
4558 *
4559 **********************************************************************/
4560
4561 static void
iflib_vlan_register(void * arg,if_t ifp,uint16_t vtag)4562 iflib_vlan_register(void *arg, if_t ifp, uint16_t vtag)
4563 {
4564 if_ctx_t ctx = if_getsoftc(ifp);
4565
4566 if ((void *)ctx != arg)
4567 return;
4568
4569 if ((vtag == 0) || (vtag > 4095))
4570 return;
4571
4572 if (iflib_in_detach(ctx))
4573 return;
4574
4575 CTX_LOCK(ctx);
4576 /* Driver may need all untagged packets to be flushed */
4577 if (IFDI_NEEDS_RESTART(ctx, IFLIB_RESTART_VLAN_CONFIG))
4578 iflib_stop(ctx);
4579 IFDI_VLAN_REGISTER(ctx, vtag);
4580 /* Re-init to load the changes, if required */
4581 if (IFDI_NEEDS_RESTART(ctx, IFLIB_RESTART_VLAN_CONFIG))
4582 iflib_init_locked(ctx);
4583 CTX_UNLOCK(ctx);
4584 }
4585
4586 static void
iflib_vlan_unregister(void * arg,if_t ifp,uint16_t vtag)4587 iflib_vlan_unregister(void *arg, if_t ifp, uint16_t vtag)
4588 {
4589 if_ctx_t ctx = if_getsoftc(ifp);
4590
4591 if ((void *)ctx != arg)
4592 return;
4593
4594 if ((vtag == 0) || (vtag > 4095))
4595 return;
4596
4597 CTX_LOCK(ctx);
4598 /* Driver may need all tagged packets to be flushed */
4599 if (IFDI_NEEDS_RESTART(ctx, IFLIB_RESTART_VLAN_CONFIG))
4600 iflib_stop(ctx);
4601 IFDI_VLAN_UNREGISTER(ctx, vtag);
4602 /* Re-init to load the changes, if required */
4603 if (IFDI_NEEDS_RESTART(ctx, IFLIB_RESTART_VLAN_CONFIG))
4604 iflib_init_locked(ctx);
4605 CTX_UNLOCK(ctx);
4606 }
4607
4608 static void
iflib_led_func(void * arg,int onoff)4609 iflib_led_func(void *arg, int onoff)
4610 {
4611 if_ctx_t ctx = arg;
4612
4613 CTX_LOCK(ctx);
4614 IFDI_LED_FUNC(ctx, onoff);
4615 CTX_UNLOCK(ctx);
4616 }
4617
4618 /*********************************************************************
4619 *
4620 * BUS FUNCTION DEFINITIONS
4621 *
4622 **********************************************************************/
4623
4624 int
iflib_device_probe(device_t dev)4625 iflib_device_probe(device_t dev)
4626 {
4627 const pci_vendor_info_t *ent;
4628 if_shared_ctx_t sctx;
4629 uint16_t pci_device_id, pci_rev_id, pci_subdevice_id, pci_subvendor_id;
4630 uint16_t pci_vendor_id;
4631
4632 if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
4633 return (ENOTSUP);
4634
4635 pci_vendor_id = pci_get_vendor(dev);
4636 pci_device_id = pci_get_device(dev);
4637 pci_subvendor_id = pci_get_subvendor(dev);
4638 pci_subdevice_id = pci_get_subdevice(dev);
4639 pci_rev_id = pci_get_revid(dev);
4640 if (sctx->isc_parse_devinfo != NULL)
4641 sctx->isc_parse_devinfo(&pci_device_id, &pci_subvendor_id, &pci_subdevice_id, &pci_rev_id);
4642
4643 ent = sctx->isc_vendor_info;
4644 while (ent->pvi_vendor_id != 0) {
4645 if (pci_vendor_id != ent->pvi_vendor_id) {
4646 ent++;
4647 continue;
4648 }
4649 if ((pci_device_id == ent->pvi_device_id) &&
4650 ((pci_subvendor_id == ent->pvi_subvendor_id) ||
4651 (ent->pvi_subvendor_id == 0)) &&
4652 ((pci_subdevice_id == ent->pvi_subdevice_id) ||
4653 (ent->pvi_subdevice_id == 0)) &&
4654 ((pci_rev_id == ent->pvi_rev_id) ||
4655 (ent->pvi_rev_id == 0))) {
4656 device_set_desc_copy(dev, ent->pvi_name);
4657 /* this needs to be changed to zero if the bus probing code
4658 * ever stops re-probing on best match because the sctx
4659 * may have its values over written by register calls
4660 * in subsequent probes
4661 */
4662 return (BUS_PROBE_DEFAULT);
4663 }
4664 ent++;
4665 }
4666 return (ENXIO);
4667 }
4668
4669 int
iflib_device_probe_vendor(device_t dev)4670 iflib_device_probe_vendor(device_t dev)
4671 {
4672 int probe;
4673
4674 probe = iflib_device_probe(dev);
4675 if (probe == BUS_PROBE_DEFAULT)
4676 return (BUS_PROBE_VENDOR);
4677 else
4678 return (probe);
4679 }
4680
4681 static void
iflib_reset_qvalues(if_ctx_t ctx)4682 iflib_reset_qvalues(if_ctx_t ctx)
4683 {
4684 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
4685 if_shared_ctx_t sctx = ctx->ifc_sctx;
4686 device_t dev = ctx->ifc_dev;
4687 int i;
4688
4689 if (ctx->ifc_sysctl_ntxqs != 0)
4690 scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs;
4691 if (ctx->ifc_sysctl_nrxqs != 0)
4692 scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs;
4693
4694 for (i = 0; i < sctx->isc_ntxqs; i++) {
4695 if (ctx->ifc_sysctl_ntxds[i] != 0)
4696 scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i];
4697 else
4698 scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i];
4699 }
4700
4701 for (i = 0; i < sctx->isc_nrxqs; i++) {
4702 if (ctx->ifc_sysctl_nrxds[i] != 0)
4703 scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i];
4704 else
4705 scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i];
4706 }
4707
4708 for (i = 0; i < sctx->isc_nrxqs; i++) {
4709 if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) {
4710 device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n",
4711 i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]);
4712 scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i];
4713 }
4714 if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) {
4715 device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n",
4716 i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]);
4717 scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i];
4718 }
4719 if (!powerof2(scctx->isc_nrxd[i])) {
4720 device_printf(dev, "nrxd%d: %d is not a power of 2 - using default value of %d\n",
4721 i, scctx->isc_nrxd[i], sctx->isc_nrxd_default[i]);
4722 scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i];
4723 }
4724 }
4725
4726 for (i = 0; i < sctx->isc_ntxqs; i++) {
4727 if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) {
4728 device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n",
4729 i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]);
4730 scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i];
4731 }
4732 if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) {
4733 device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n",
4734 i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]);
4735 scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i];
4736 }
4737 if (!powerof2(scctx->isc_ntxd[i])) {
4738 device_printf(dev, "ntxd%d: %d is not a power of 2 - using default value of %d\n",
4739 i, scctx->isc_ntxd[i], sctx->isc_ntxd_default[i]);
4740 scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i];
4741 }
4742 }
4743 }
4744
4745 static void
iflib_add_pfil(if_ctx_t ctx)4746 iflib_add_pfil(if_ctx_t ctx)
4747 {
4748 struct pfil_head *pfil;
4749 struct pfil_head_args pa;
4750 iflib_rxq_t rxq;
4751 int i;
4752
4753 pa.pa_version = PFIL_VERSION;
4754 pa.pa_flags = PFIL_IN;
4755 pa.pa_type = PFIL_TYPE_ETHERNET;
4756 pa.pa_headname = if_name(ctx->ifc_ifp);
4757 pfil = pfil_head_register(&pa);
4758
4759 for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
4760 rxq->pfil = pfil;
4761 }
4762 }
4763
4764 static void
iflib_rem_pfil(if_ctx_t ctx)4765 iflib_rem_pfil(if_ctx_t ctx)
4766 {
4767 struct pfil_head *pfil;
4768 iflib_rxq_t rxq;
4769 int i;
4770
4771 rxq = ctx->ifc_rxqs;
4772 pfil = rxq->pfil;
4773 for (i = 0; i < NRXQSETS(ctx); i++, rxq++) {
4774 rxq->pfil = NULL;
4775 }
4776 pfil_head_unregister(pfil);
4777 }
4778
4779
4780 /*
4781 * Advance forward by n members of the cpuset ctx->ifc_cpus starting from
4782 * cpuid and wrapping as necessary.
4783 */
4784 static unsigned int
cpuid_advance(if_ctx_t ctx,unsigned int cpuid,unsigned int n)4785 cpuid_advance(if_ctx_t ctx, unsigned int cpuid, unsigned int n)
4786 {
4787 unsigned int first_valid;
4788 unsigned int last_valid;
4789
4790 /* cpuid should always be in the valid set */
4791 MPASS(CPU_ISSET(cpuid, &ctx->ifc_cpus));
4792
4793 /* valid set should never be empty */
4794 MPASS(!CPU_EMPTY(&ctx->ifc_cpus));
4795
4796 first_valid = CPU_FFS(&ctx->ifc_cpus) - 1;
4797 last_valid = CPU_FLS(&ctx->ifc_cpus) - 1;
4798 n = n % CPU_COUNT(&ctx->ifc_cpus);
4799 while (n > 0) {
4800 do {
4801 cpuid++;
4802 if (cpuid > last_valid)
4803 cpuid = first_valid;
4804 } while (!CPU_ISSET(cpuid, &ctx->ifc_cpus));
4805 n--;
4806 }
4807
4808 return (cpuid);
4809 }
4810
4811 #if defined(SMP) && defined(SCHED_ULE)
4812 extern struct cpu_group *cpu_top; /* CPU topology */
4813
4814 static int
find_child_with_core(int cpu,struct cpu_group * grp)4815 find_child_with_core(int cpu, struct cpu_group *grp)
4816 {
4817 int i;
4818
4819 if (grp->cg_children == 0)
4820 return (-1);
4821
4822 MPASS(grp->cg_child);
4823 for (i = 0; i < grp->cg_children; i++) {
4824 if (CPU_ISSET(cpu, &grp->cg_child[i].cg_mask))
4825 return (i);
4826 }
4827
4828 return (-1);
4829 }
4830
4831
4832 /*
4833 * Find an L2 neighbor of the given CPU or return -1 if none found. This
4834 * does not distinguish among multiple L2 neighbors if the given CPU has
4835 * more than one (it will always return the same result in that case).
4836 */
4837 static int
find_l2_neighbor(int cpu)4838 find_l2_neighbor(int cpu)
4839 {
4840 struct cpu_group *grp;
4841 int i;
4842
4843 grp = cpu_top;
4844 if (grp == NULL)
4845 return (-1);
4846
4847 /*
4848 * Find the smallest CPU group that contains the given core.
4849 */
4850 i = 0;
4851 while ((i = find_child_with_core(cpu, grp)) != -1) {
4852 /*
4853 * If the smallest group containing the given CPU has less
4854 * than two members, we conclude the given CPU has no
4855 * L2 neighbor.
4856 */
4857 if (grp->cg_child[i].cg_count <= 1)
4858 return (-1);
4859 grp = &grp->cg_child[i];
4860 }
4861
4862 /* Must share L2. */
4863 if (grp->cg_level > CG_SHARE_L2 || grp->cg_level == CG_SHARE_NONE)
4864 return (-1);
4865
4866 /*
4867 * Select the first member of the set that isn't the reference
4868 * CPU, which at this point is guaranteed to exist.
4869 */
4870 for (i = 0; i < CPU_SETSIZE; i++) {
4871 if (CPU_ISSET(i, &grp->cg_mask) && i != cpu)
4872 return (i);
4873 }
4874
4875 /* Should never be reached */
4876 return (-1);
4877 }
4878
4879 #else
4880 static int
find_l2_neighbor(int cpu)4881 find_l2_neighbor(int cpu)
4882 {
4883
4884 return (-1);
4885 }
4886 #endif
4887
4888 /*
4889 * CPU mapping behaviors
4890 * ---------------------
4891 * 'separate txrx' refers to the separate_txrx sysctl
4892 * 'use logical' refers to the use_logical_cores sysctl
4893 * 'INTR CPUS' indicates whether bus_get_cpus(INTR_CPUS) succeeded
4894 *
4895 * separate use INTR
4896 * txrx logical CPUS result
4897 * ---------- --------- ------ ------------------------------------------------
4898 * - - X RX and TX queues mapped to consecutive physical
4899 * cores with RX/TX pairs on same core and excess
4900 * of either following
4901 * - X X RX and TX queues mapped to consecutive cores
4902 * of any type with RX/TX pairs on same core and
4903 * excess of either following
4904 * X - X RX and TX queues mapped to consecutive physical
4905 * cores; all RX then all TX
4906 * X X X RX queues mapped to consecutive physical cores
4907 * first, then TX queues mapped to L2 neighbor of
4908 * the corresponding RX queue if one exists,
4909 * otherwise to consecutive physical cores
4910 * - n/a - RX and TX queues mapped to consecutive cores of
4911 * any type with RX/TX pairs on same core and excess
4912 * of either following
4913 * X n/a - RX and TX queues mapped to consecutive cores of
4914 * any type; all RX then all TX
4915 */
4916 static unsigned int
get_cpuid_for_queue(if_ctx_t ctx,unsigned int base_cpuid,unsigned int qid,bool is_tx)4917 get_cpuid_for_queue(if_ctx_t ctx, unsigned int base_cpuid, unsigned int qid,
4918 bool is_tx)
4919 {
4920 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
4921 unsigned int core_index;
4922
4923 if (ctx->ifc_sysctl_separate_txrx) {
4924 /*
4925 * When using separate CPUs for TX and RX, the assignment
4926 * will always be of a consecutive CPU out of the set of
4927 * context CPUs, except for the specific case where the
4928 * context CPUs are phsyical cores, the use of logical cores
4929 * has been enabled, the assignment is for TX, the TX qid
4930 * corresponds to an RX qid, and the CPU assigned to the
4931 * corresponding RX queue has an L2 neighbor.
4932 */
4933 if (ctx->ifc_sysctl_use_logical_cores &&
4934 ctx->ifc_cpus_are_physical_cores &&
4935 is_tx && qid < scctx->isc_nrxqsets) {
4936 int l2_neighbor;
4937 unsigned int rx_cpuid;
4938
4939 rx_cpuid = cpuid_advance(ctx, base_cpuid, qid);
4940 l2_neighbor = find_l2_neighbor(rx_cpuid);
4941 if (l2_neighbor != -1) {
4942 return (l2_neighbor);
4943 }
4944 /*
4945 * ... else fall through to the normal
4946 * consecutive-after-RX assignment scheme.
4947 *
4948 * Note that we are assuming that all RX queue CPUs
4949 * have an L2 neighbor, or all do not. If a mixed
4950 * scenario is possible, we will have to keep track
4951 * separately of how many queues prior to this one
4952 * were not able to be assigned to an L2 neighbor.
4953 */
4954 }
4955 if (is_tx)
4956 core_index = scctx->isc_nrxqsets + qid;
4957 else
4958 core_index = qid;
4959 } else {
4960 core_index = qid;
4961 }
4962
4963 return (cpuid_advance(ctx, base_cpuid, core_index));
4964 }
4965
4966 static uint16_t
get_ctx_core_offset(if_ctx_t ctx)4967 get_ctx_core_offset(if_ctx_t ctx)
4968 {
4969 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
4970 struct cpu_offset *op;
4971 cpuset_t assigned_cpus;
4972 unsigned int cores_consumed;
4973 unsigned int base_cpuid = ctx->ifc_sysctl_core_offset;
4974 unsigned int first_valid;
4975 unsigned int last_valid;
4976 unsigned int i;
4977
4978 first_valid = CPU_FFS(&ctx->ifc_cpus) - 1;
4979 last_valid = CPU_FLS(&ctx->ifc_cpus) - 1;
4980
4981 if (base_cpuid != CORE_OFFSET_UNSPECIFIED) {
4982 /*
4983 * Align the user-chosen base CPU ID to the next valid CPU
4984 * for this device. If the chosen base CPU ID is smaller
4985 * than the first valid CPU or larger than the last valid
4986 * CPU, we assume the user does not know what the valid
4987 * range is for this device and is thinking in terms of a
4988 * zero-based reference frame, and so we shift the given
4989 * value into the valid range (and wrap accordingly) so the
4990 * intent is translated to the proper frame of reference.
4991 * If the base CPU ID is within the valid first/last, but
4992 * does not correspond to a valid CPU, it is advanced to the
4993 * next valid CPU (wrapping if necessary).
4994 */
4995 if (base_cpuid < first_valid || base_cpuid > last_valid) {
4996 /* shift from zero-based to first_valid-based */
4997 base_cpuid += first_valid;
4998 /* wrap to range [first_valid, last_valid] */
4999 base_cpuid = (base_cpuid - first_valid) %
5000 (last_valid - first_valid + 1);
5001 }
5002 if (!CPU_ISSET(base_cpuid, &ctx->ifc_cpus)) {
5003 /*
5004 * base_cpuid is in [first_valid, last_valid], but
5005 * not a member of the valid set. In this case,
5006 * there will always be a member of the valid set
5007 * with a CPU ID that is greater than base_cpuid,
5008 * and we simply advance to it.
5009 */
5010 while (!CPU_ISSET(base_cpuid, &ctx->ifc_cpus))
5011 base_cpuid++;
5012 }
5013 return (base_cpuid);
5014 }
5015
5016 /*
5017 * Determine how many cores will be consumed by performing the CPU
5018 * assignments and counting how many of the assigned CPUs correspond
5019 * to CPUs in the set of context CPUs. This is done using the CPU
5020 * ID first_valid as the base CPU ID, as the base CPU must be within
5021 * the set of context CPUs.
5022 *
5023 * Note not all assigned CPUs will be in the set of context CPUs
5024 * when separate CPUs are being allocated to TX and RX queues,
5025 * assignment to logical cores has been enabled, the set of context
5026 * CPUs contains only physical CPUs, and TX queues are mapped to L2
5027 * neighbors of CPUs that RX queues have been mapped to - in this
5028 * case we do only want to count how many CPUs in the set of context
5029 * CPUs have been consumed, as that determines the next CPU in that
5030 * set to start allocating at for the next device for which
5031 * core_offset is not set.
5032 */
5033 CPU_ZERO(&assigned_cpus);
5034 for (i = 0; i < scctx->isc_ntxqsets; i++)
5035 CPU_SET(get_cpuid_for_queue(ctx, first_valid, i, true),
5036 &assigned_cpus);
5037 for (i = 0; i < scctx->isc_nrxqsets; i++)
5038 CPU_SET(get_cpuid_for_queue(ctx, first_valid, i, false),
5039 &assigned_cpus);
5040 CPU_AND(&assigned_cpus, &assigned_cpus, &ctx->ifc_cpus);
5041 cores_consumed = CPU_COUNT(&assigned_cpus);
5042
5043 mtx_lock(&cpu_offset_mtx);
5044 SLIST_FOREACH(op, &cpu_offsets, entries) {
5045 if (CPU_CMP(&ctx->ifc_cpus, &op->set) == 0) {
5046 base_cpuid = op->next_cpuid;
5047 op->next_cpuid = cpuid_advance(ctx, op->next_cpuid,
5048 cores_consumed);
5049 MPASS(op->refcount < UINT_MAX);
5050 op->refcount++;
5051 break;
5052 }
5053 }
5054 if (base_cpuid == CORE_OFFSET_UNSPECIFIED) {
5055 base_cpuid = first_valid;
5056 op = malloc(sizeof(struct cpu_offset), M_IFLIB,
5057 M_NOWAIT | M_ZERO);
5058 if (op == NULL) {
5059 device_printf(ctx->ifc_dev,
5060 "allocation for cpu offset failed.\n");
5061 } else {
5062 op->next_cpuid = cpuid_advance(ctx, base_cpuid,
5063 cores_consumed);
5064 op->refcount = 1;
5065 CPU_COPY(&ctx->ifc_cpus, &op->set);
5066 SLIST_INSERT_HEAD(&cpu_offsets, op, entries);
5067 }
5068 }
5069 mtx_unlock(&cpu_offset_mtx);
5070
5071 return (base_cpuid);
5072 }
5073
5074 static void
unref_ctx_core_offset(if_ctx_t ctx)5075 unref_ctx_core_offset(if_ctx_t ctx)
5076 {
5077 struct cpu_offset *op, *top;
5078
5079 mtx_lock(&cpu_offset_mtx);
5080 SLIST_FOREACH_SAFE(op, &cpu_offsets, entries, top) {
5081 if (CPU_CMP(&ctx->ifc_cpus, &op->set) == 0) {
5082 MPASS(op->refcount > 0);
5083 op->refcount--;
5084 if (op->refcount == 0) {
5085 SLIST_REMOVE(&cpu_offsets, op, cpu_offset, entries);
5086 free(op, M_IFLIB);
5087 }
5088 break;
5089 }
5090 }
5091 mtx_unlock(&cpu_offset_mtx);
5092 }
5093
5094 int
iflib_device_register(device_t dev,void * sc,if_shared_ctx_t sctx,if_ctx_t * ctxp)5095 iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ctxp)
5096 {
5097 if_ctx_t ctx;
5098 if_t ifp;
5099 if_softc_ctx_t scctx;
5100 kobjop_desc_t kobj_desc;
5101 kobj_method_t *kobj_method;
5102 int err, msix, rid;
5103 int num_txd, num_rxd;
5104 char namebuf[TASKQUEUE_NAMELEN];
5105
5106 ctx = malloc(sizeof(*ctx), M_IFLIB, M_WAITOK | M_ZERO);
5107
5108 if (sc == NULL) {
5109 sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK | M_ZERO);
5110 device_set_softc(dev, ctx);
5111 ctx->ifc_flags |= IFC_SC_ALLOCATED;
5112 }
5113
5114 ctx->ifc_sctx = sctx;
5115 ctx->ifc_dev = dev;
5116 ctx->ifc_softc = sc;
5117
5118 iflib_register(ctx);
5119 iflib_add_device_sysctl_pre(ctx);
5120
5121 scctx = &ctx->ifc_softc_ctx;
5122 ifp = ctx->ifc_ifp;
5123 if (ctx->ifc_sysctl_simple_tx) {
5124 #ifndef ALTQ
5125 if_settransmitfn(ifp, iflib_simple_transmit);
5126 device_printf(dev, "using simple if_transmit\n");
5127 #else
5128 device_printf(dev, "ALTQ prevents using simple if_transmit\n");
5129 #endif
5130 }
5131 iflib_reset_qvalues(ctx);
5132 IFNET_WLOCK();
5133 CTX_LOCK(ctx);
5134 if ((err = IFDI_ATTACH_PRE(ctx)) != 0) {
5135 device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err);
5136 goto fail_unlock;
5137 }
5138 _iflib_pre_assert(scctx);
5139 ctx->ifc_txrx = *scctx->isc_txrx;
5140
5141 MPASS(scctx->isc_dma_width <= flsll(BUS_SPACE_MAXADDR));
5142
5143 if (sctx->isc_flags & IFLIB_DRIVER_MEDIA)
5144 ctx->ifc_mediap = scctx->isc_media;
5145
5146 #ifdef INVARIANTS
5147 if (scctx->isc_capabilities & IFCAP_TXCSUM)
5148 MPASS(scctx->isc_tx_csum_flags);
5149 #endif
5150
5151 if_setcapabilities(ifp,
5152 scctx->isc_capabilities | IFCAP_HWSTATS | IFCAP_MEXTPG);
5153 if_setcapenable(ifp,
5154 scctx->isc_capenable | IFCAP_HWSTATS | IFCAP_MEXTPG);
5155
5156 if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets))
5157 scctx->isc_ntxqsets = scctx->isc_ntxqsets_max;
5158 if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets))
5159 scctx->isc_nrxqsets = scctx->isc_nrxqsets_max;
5160
5161 num_txd = iflib_num_tx_descs(ctx);
5162 num_rxd = iflib_num_rx_descs(ctx);
5163
5164 /* XXX change for per-queue sizes */
5165 device_printf(dev, "Using %d TX descriptors and %d RX descriptors\n",
5166 num_txd, num_rxd);
5167
5168 if (scctx->isc_tx_nsegments > num_txd / MAX_SINGLE_PACKET_FRACTION)
5169 scctx->isc_tx_nsegments = max(1, num_txd /
5170 MAX_SINGLE_PACKET_FRACTION);
5171 if (scctx->isc_tx_tso_segments_max > num_txd /
5172 MAX_SINGLE_PACKET_FRACTION)
5173 scctx->isc_tx_tso_segments_max = max(1,
5174 num_txd / MAX_SINGLE_PACKET_FRACTION);
5175
5176 /* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */
5177 if (if_getcapabilities(ifp) & IFCAP_TSO) {
5178 /*
5179 * The stack can't handle a TSO size larger than IP_MAXPACKET,
5180 * but some MACs do.
5181 */
5182 if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max,
5183 IP_MAXPACKET));
5184 /*
5185 * Take maximum number of m_pullup(9)'s in iflib_parse_header()
5186 * into account. In the worst case, each of these calls will
5187 * add another mbuf and, thus, the requirement for another DMA
5188 * segment. So for best performance, it doesn't make sense to
5189 * advertize a maximum of TSO segments that typically will
5190 * require defragmentation in iflib_encap().
5191 */
5192 if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3);
5193 if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max);
5194 }
5195 if (scctx->isc_rss_table_size == 0)
5196 scctx->isc_rss_table_size = 64;
5197 scctx->isc_rss_table_mask = scctx->isc_rss_table_size - 1;
5198
5199 /* Create and start admin taskqueue */
5200 snprintf(namebuf, TASKQUEUE_NAMELEN, "if_%s_tq", device_get_nameunit(dev));
5201 ctx->ifc_tq = taskqueue_create_fast(namebuf, M_NOWAIT,
5202 taskqueue_thread_enqueue, &ctx->ifc_tq);
5203 if (ctx->ifc_tq == NULL) {
5204 device_printf(dev, "Unable to create admin taskqueue\n");
5205 return (ENOMEM);
5206 }
5207
5208 err = taskqueue_start_threads(&ctx->ifc_tq, 1, PI_NET, "%s", namebuf);
5209 if (err) {
5210 device_printf(dev,
5211 "Unable to start admin taskqueue threads error: %d\n",
5212 err);
5213 taskqueue_free(ctx->ifc_tq);
5214 return (err);
5215 }
5216
5217 TASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx);
5218
5219 /* Set up cpu set. If it fails, use the set of all CPUs. */
5220 if (bus_get_cpus(dev, INTR_CPUS, sizeof(ctx->ifc_cpus), &ctx->ifc_cpus) != 0) {
5221 device_printf(dev, "Unable to fetch CPU list\n");
5222 CPU_COPY(&all_cpus, &ctx->ifc_cpus);
5223 ctx->ifc_cpus_are_physical_cores = false;
5224 } else
5225 ctx->ifc_cpus_are_physical_cores = true;
5226 MPASS(CPU_COUNT(&ctx->ifc_cpus) > 0);
5227
5228 /*
5229 * Now set up MSI or MSI-X, should return us the number of supported
5230 * vectors (will be 1 for a legacy interrupt and MSI).
5231 */
5232 if (sctx->isc_flags & IFLIB_SKIP_MSIX) {
5233 msix = scctx->isc_vectors;
5234 } else if (scctx->isc_msix_bar != 0)
5235 /*
5236 * The simple fact that isc_msix_bar is not 0 does not mean we
5237 * we have a good value there that is known to work.
5238 */
5239 msix = iflib_msix_init(ctx);
5240 else {
5241 scctx->isc_vectors = 1;
5242 scctx->isc_ntxqsets = 1;
5243 scctx->isc_nrxqsets = 1;
5244 scctx->isc_intr = IFLIB_INTR_LEGACY;
5245 msix = 0;
5246 }
5247 /* Get memory for the station queues */
5248 if ((err = iflib_queues_alloc(ctx))) {
5249 device_printf(dev, "Unable to allocate queue memory\n");
5250 goto fail_intr_free;
5251 }
5252
5253 if ((err = iflib_qset_structures_setup(ctx)))
5254 goto fail_queues;
5255
5256 /*
5257 * Now that we know how many queues there are, get the core offset.
5258 */
5259 ctx->ifc_sysctl_core_offset = get_ctx_core_offset(ctx);
5260
5261 if (msix > 1) {
5262 /*
5263 * When using MSI-X, ensure that ifdi_{r,t}x_queue_intr_enable
5264 * aren't the default NULL implementation.
5265 */
5266 kobj_desc = &ifdi_rx_queue_intr_enable_desc;
5267 kobj_method = kobj_lookup_method(((kobj_t)ctx)->ops->cls, NULL,
5268 kobj_desc);
5269 if (kobj_method == &kobj_desc->deflt) {
5270 device_printf(dev,
5271 "MSI-X requires ifdi_rx_queue_intr_enable method");
5272 err = EOPNOTSUPP;
5273 goto fail_queues;
5274 }
5275 kobj_desc = &ifdi_tx_queue_intr_enable_desc;
5276 kobj_method = kobj_lookup_method(((kobj_t)ctx)->ops->cls, NULL,
5277 kobj_desc);
5278 if (kobj_method == &kobj_desc->deflt) {
5279 device_printf(dev,
5280 "MSI-X requires ifdi_tx_queue_intr_enable method");
5281 err = EOPNOTSUPP;
5282 goto fail_queues;
5283 }
5284
5285 /*
5286 * Assign the MSI-X vectors.
5287 * Note that the default NULL ifdi_msix_intr_assign method will
5288 * fail here, too.
5289 */
5290 err = IFDI_MSIX_INTR_ASSIGN(ctx, msix);
5291 if (err != 0) {
5292 device_printf(dev, "IFDI_MSIX_INTR_ASSIGN failed %d\n",
5293 err);
5294 goto fail_queues;
5295 }
5296 } else if (scctx->isc_intr != IFLIB_INTR_MSIX) {
5297 rid = 0;
5298 if (scctx->isc_intr == IFLIB_INTR_MSI) {
5299 MPASS(msix == 1);
5300 rid = 1;
5301 }
5302 if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) {
5303 device_printf(dev, "iflib_legacy_setup failed %d\n", err);
5304 goto fail_queues;
5305 }
5306 } else {
5307 device_printf(dev,
5308 "Cannot use iflib with only 1 MSI-X interrupt!\n");
5309 err = ENODEV;
5310 goto fail_queues;
5311 }
5312
5313 /*
5314 * It prevents a double-locking panic with iflib_media_status when
5315 * the driver loads.
5316 */
5317 CTX_UNLOCK(ctx);
5318 ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac.octet);
5319 CTX_LOCK(ctx);
5320
5321 if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
5322 device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
5323 goto fail_detach;
5324 }
5325
5326 /*
5327 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported.
5328 * This must appear after the call to ether_ifattach() because
5329 * ether_ifattach() sets if_hdrlen to the default value.
5330 */
5331 if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU)
5332 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
5333
5334 if ((err = iflib_netmap_attach(ctx))) {
5335 device_printf(ctx->ifc_dev, "netmap attach failed: %d\n", err);
5336 goto fail_detach;
5337 }
5338 *ctxp = ctx;
5339
5340 DEBUGNET_SET(ctx->ifc_ifp, iflib);
5341
5342 iflib_add_device_sysctl_post(ctx);
5343 iflib_add_pfil(ctx);
5344 ctx->ifc_flags |= IFC_INIT_DONE;
5345 CTX_UNLOCK(ctx);
5346 IFNET_WUNLOCK();
5347
5348 return (0);
5349
5350 fail_detach:
5351 ether_ifdetach(ctx->ifc_ifp);
5352 fail_queues:
5353 taskqueue_free(ctx->ifc_tq);
5354 iflib_tqg_detach(ctx);
5355 iflib_tx_structures_free(ctx);
5356 iflib_rx_structures_free(ctx);
5357 IFDI_DETACH(ctx);
5358 IFDI_QUEUES_FREE(ctx);
5359 fail_intr_free:
5360 iflib_free_intr_mem(ctx);
5361 fail_unlock:
5362 CTX_UNLOCK(ctx);
5363 IFNET_WUNLOCK();
5364 iflib_deregister(ctx);
5365 device_set_softc(ctx->ifc_dev, NULL);
5366 if (ctx->ifc_flags & IFC_SC_ALLOCATED)
5367 free(ctx->ifc_softc, M_IFLIB);
5368 free(ctx, M_IFLIB);
5369 return (err);
5370 }
5371
5372 int
iflib_device_attach(device_t dev)5373 iflib_device_attach(device_t dev)
5374 {
5375 if_ctx_t ctx;
5376 if_shared_ctx_t sctx;
5377
5378 if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
5379 return (ENOTSUP);
5380
5381 pci_enable_busmaster(dev);
5382
5383 return (iflib_device_register(dev, NULL, sctx, &ctx));
5384 }
5385
5386 int
iflib_device_deregister(if_ctx_t ctx)5387 iflib_device_deregister(if_ctx_t ctx)
5388 {
5389 if_t ifp = ctx->ifc_ifp;
5390 device_t dev = ctx->ifc_dev;
5391
5392 /* Make sure VLANS are not using driver */
5393 if (if_vlantrunkinuse(ifp)) {
5394 device_printf(dev, "Vlan in use, detach first\n");
5395 return (EBUSY);
5396 }
5397 #ifdef PCI_IOV
5398 if (!CTX_IS_VF(ctx) && pci_iov_detach(dev) != 0) {
5399 device_printf(dev, "SR-IOV in use; detach first.\n");
5400 return (EBUSY);
5401 }
5402 #endif
5403
5404 STATE_LOCK(ctx);
5405 ctx->ifc_flags |= IFC_IN_DETACH;
5406 STATE_UNLOCK(ctx);
5407
5408 /* Unregister VLAN handlers before calling iflib_stop() */
5409 iflib_unregister_vlan_handlers(ctx);
5410
5411 iflib_netmap_detach(ifp);
5412 ether_ifdetach(ifp);
5413
5414 CTX_LOCK(ctx);
5415 iflib_stop(ctx);
5416 CTX_UNLOCK(ctx);
5417
5418 iflib_rem_pfil(ctx);
5419 if (ctx->ifc_led_dev != NULL)
5420 led_destroy(ctx->ifc_led_dev);
5421
5422 iflib_tqg_detach(ctx);
5423 iflib_tx_structures_free(ctx);
5424 iflib_rx_structures_free(ctx);
5425
5426 CTX_LOCK(ctx);
5427 IFDI_DETACH(ctx);
5428 IFDI_QUEUES_FREE(ctx);
5429 CTX_UNLOCK(ctx);
5430
5431 taskqueue_free(ctx->ifc_tq);
5432 ctx->ifc_tq = NULL;
5433
5434 /* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
5435 iflib_free_intr_mem(ctx);
5436
5437 bus_generic_detach(dev);
5438
5439 iflib_deregister(ctx);
5440
5441 device_set_softc(ctx->ifc_dev, NULL);
5442 if (ctx->ifc_flags & IFC_SC_ALLOCATED)
5443 free(ctx->ifc_softc, M_IFLIB);
5444 unref_ctx_core_offset(ctx);
5445 free(ctx, M_IFLIB);
5446 return (0);
5447 }
5448
5449 static void
iflib_tqg_detach(if_ctx_t ctx)5450 iflib_tqg_detach(if_ctx_t ctx)
5451 {
5452 iflib_txq_t txq;
5453 iflib_rxq_t rxq;
5454 int i;
5455 struct taskqgroup *tqg;
5456
5457 /* XXX drain any dependent tasks */
5458 tqg = qgroup_if_io_tqg;
5459 for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) {
5460 callout_drain(&txq->ift_timer);
5461 #ifdef DEV_NETMAP
5462 callout_drain(&txq->ift_netmap_timer);
5463 #endif /* DEV_NETMAP */
5464 if (txq->ift_task.gt_uniq != NULL)
5465 taskqgroup_detach(tqg, &txq->ift_task);
5466 }
5467 for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
5468 if (rxq->ifr_task.gt_uniq != NULL)
5469 taskqgroup_detach(tqg, &rxq->ifr_task);
5470 }
5471 }
5472
5473 static void
iflib_free_intr_mem(if_ctx_t ctx)5474 iflib_free_intr_mem(if_ctx_t ctx)
5475 {
5476
5477 if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) {
5478 iflib_irq_free(ctx, &ctx->ifc_legacy_irq);
5479 }
5480 if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) {
5481 pci_release_msi(ctx->ifc_dev);
5482 }
5483 if (ctx->ifc_msix_mem != NULL) {
5484 bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY,
5485 rman_get_rid(ctx->ifc_msix_mem), ctx->ifc_msix_mem);
5486 ctx->ifc_msix_mem = NULL;
5487 }
5488 }
5489
5490 int
iflib_device_detach(device_t dev)5491 iflib_device_detach(device_t dev)
5492 {
5493 if_ctx_t ctx = device_get_softc(dev);
5494
5495 return (iflib_device_deregister(ctx));
5496 }
5497
5498 int
iflib_device_suspend(device_t dev)5499 iflib_device_suspend(device_t dev)
5500 {
5501 if_ctx_t ctx = device_get_softc(dev);
5502
5503 CTX_LOCK(ctx);
5504 IFDI_SUSPEND(ctx);
5505 CTX_UNLOCK(ctx);
5506
5507 return (bus_generic_suspend(dev));
5508 }
5509 int
iflib_device_shutdown(device_t dev)5510 iflib_device_shutdown(device_t dev)
5511 {
5512 if_ctx_t ctx = device_get_softc(dev);
5513
5514 CTX_LOCK(ctx);
5515 IFDI_SHUTDOWN(ctx);
5516 CTX_UNLOCK(ctx);
5517
5518 return (bus_generic_suspend(dev));
5519 }
5520
5521 int
iflib_device_resume(device_t dev)5522 iflib_device_resume(device_t dev)
5523 {
5524 if_ctx_t ctx = device_get_softc(dev);
5525 iflib_txq_t txq = ctx->ifc_txqs;
5526
5527 CTX_LOCK(ctx);
5528 IFDI_RESUME(ctx);
5529 iflib_if_init_locked(ctx);
5530 CTX_UNLOCK(ctx);
5531 for (int i = 0; i < NTXQSETS(ctx); i++, txq++)
5532 iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
5533
5534 return (bus_generic_resume(dev));
5535 }
5536
5537 int
iflib_device_iov_init(device_t dev,uint16_t num_vfs,const nvlist_t * params)5538 iflib_device_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
5539 {
5540 int error;
5541 if_ctx_t ctx = device_get_softc(dev);
5542
5543 CTX_LOCK(ctx);
5544 error = IFDI_IOV_INIT(ctx, num_vfs, params);
5545 CTX_UNLOCK(ctx);
5546
5547 return (error);
5548 }
5549
5550 void
iflib_device_iov_uninit(device_t dev)5551 iflib_device_iov_uninit(device_t dev)
5552 {
5553 if_ctx_t ctx = device_get_softc(dev);
5554
5555 CTX_LOCK(ctx);
5556 IFDI_IOV_UNINIT(ctx);
5557 CTX_UNLOCK(ctx);
5558 }
5559
5560 int
iflib_device_iov_add_vf(device_t dev,uint16_t vfnum,const nvlist_t * params)5561 iflib_device_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
5562 {
5563 int error;
5564 if_ctx_t ctx = device_get_softc(dev);
5565
5566 CTX_LOCK(ctx);
5567 error = IFDI_IOV_VF_ADD(ctx, vfnum, params);
5568 CTX_UNLOCK(ctx);
5569
5570 return (error);
5571 }
5572
5573 /*********************************************************************
5574 *
5575 * MODULE FUNCTION DEFINITIONS
5576 *
5577 **********************************************************************/
5578
5579 /*
5580 * - Start a fast taskqueue thread for each core
5581 * - Start a taskqueue for control operations
5582 */
5583 static int
iflib_module_init(void)5584 iflib_module_init(void)
5585 {
5586 iflib_timer_default = hz / 2;
5587 return (0);
5588 }
5589
5590 static int
iflib_module_event_handler(module_t mod,int what,void * arg)5591 iflib_module_event_handler(module_t mod, int what, void *arg)
5592 {
5593 int err;
5594
5595 switch (what) {
5596 case MOD_LOAD:
5597 if ((err = iflib_module_init()) != 0)
5598 return (err);
5599 break;
5600 case MOD_UNLOAD:
5601 return (EBUSY);
5602 default:
5603 return (EOPNOTSUPP);
5604 }
5605
5606 return (0);
5607 }
5608
5609 /*********************************************************************
5610 *
5611 * PUBLIC FUNCTION DEFINITIONS
5612 * ordered as in iflib.h
5613 *
5614 **********************************************************************/
5615
5616 static void
_iflib_assert(if_shared_ctx_t sctx)5617 _iflib_assert(if_shared_ctx_t sctx)
5618 {
5619 int i;
5620
5621 MPASS(sctx->isc_tx_maxsize);
5622 MPASS(sctx->isc_tx_maxsegsize);
5623
5624 MPASS(sctx->isc_rx_maxsize);
5625 MPASS(sctx->isc_rx_nsegments);
5626 MPASS(sctx->isc_rx_maxsegsize);
5627
5628 MPASS(sctx->isc_nrxqs >= 1 && sctx->isc_nrxqs <= 8);
5629 for (i = 0; i < sctx->isc_nrxqs; i++) {
5630 MPASS(sctx->isc_nrxd_min[i]);
5631 MPASS(powerof2(sctx->isc_nrxd_min[i]));
5632 MPASS(sctx->isc_nrxd_max[i]);
5633 MPASS(powerof2(sctx->isc_nrxd_max[i]));
5634 MPASS(sctx->isc_nrxd_default[i]);
5635 MPASS(powerof2(sctx->isc_nrxd_default[i]));
5636 }
5637
5638 MPASS(sctx->isc_ntxqs >= 1 && sctx->isc_ntxqs <= 8);
5639 for (i = 0; i < sctx->isc_ntxqs; i++) {
5640 MPASS(sctx->isc_ntxd_min[i]);
5641 MPASS(powerof2(sctx->isc_ntxd_min[i]));
5642 MPASS(sctx->isc_ntxd_max[i]);
5643 MPASS(powerof2(sctx->isc_ntxd_max[i]));
5644 MPASS(sctx->isc_ntxd_default[i]);
5645 MPASS(powerof2(sctx->isc_ntxd_default[i]));
5646 }
5647 }
5648
5649 static void
_iflib_pre_assert(if_softc_ctx_t scctx)5650 _iflib_pre_assert(if_softc_ctx_t scctx)
5651 {
5652
5653 MPASS(scctx->isc_txrx->ift_txd_encap);
5654 MPASS(scctx->isc_txrx->ift_txd_flush);
5655 MPASS(scctx->isc_txrx->ift_txd_credits_update);
5656 MPASS(scctx->isc_txrx->ift_rxd_available);
5657 MPASS(scctx->isc_txrx->ift_rxd_pkt_get);
5658 MPASS(scctx->isc_txrx->ift_rxd_refill);
5659 MPASS(scctx->isc_txrx->ift_rxd_flush);
5660 }
5661
5662 static void
iflib_register(if_ctx_t ctx)5663 iflib_register(if_ctx_t ctx)
5664 {
5665 if_shared_ctx_t sctx = ctx->ifc_sctx;
5666 driver_t *driver = sctx->isc_driver;
5667 device_t dev = ctx->ifc_dev;
5668 if_t ifp;
5669
5670 _iflib_assert(sctx);
5671
5672 CTX_LOCK_INIT(ctx);
5673 STATE_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev));
5674 ifp = ctx->ifc_ifp = if_alloc_dev(IFT_ETHER, dev);
5675
5676 /*
5677 * Initialize our context's device specific methods
5678 */
5679 kobj_init((kobj_t) ctx, (kobj_class_t) driver);
5680 kobj_class_compile((kobj_class_t) driver);
5681
5682 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
5683 if_setsoftc(ifp, ctx);
5684 if_setdev(ifp, dev);
5685 if_setinitfn(ifp, iflib_if_init);
5686 if_setioctlfn(ifp, iflib_if_ioctl);
5687 #ifdef ALTQ
5688 if_setstartfn(ifp, iflib_altq_if_start);
5689 if_settransmitfn(ifp, iflib_altq_if_transmit);
5690 if_setsendqready(ifp);
5691 #else
5692 if_settransmitfn(ifp, iflib_if_transmit);
5693 #endif
5694 if_setqflushfn(ifp, iflib_if_qflush);
5695 if_setgetcounterfn(ifp, iflib_if_get_counter);
5696 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
5697 ctx->ifc_vlan_attach_event =
5698 EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx,
5699 EVENTHANDLER_PRI_FIRST);
5700 ctx->ifc_vlan_detach_event =
5701 EVENTHANDLER_REGISTER(vlan_unconfig, iflib_vlan_unregister, ctx,
5702 EVENTHANDLER_PRI_FIRST);
5703
5704 if ((sctx->isc_flags & IFLIB_DRIVER_MEDIA) == 0) {
5705 ctx->ifc_mediap = &ctx->ifc_media;
5706 ifmedia_init(ctx->ifc_mediap, IFM_IMASK,
5707 iflib_media_change, iflib_media_status);
5708 }
5709 }
5710
5711 static void
iflib_unregister_vlan_handlers(if_ctx_t ctx)5712 iflib_unregister_vlan_handlers(if_ctx_t ctx)
5713 {
5714 /* Unregister VLAN events */
5715 if (ctx->ifc_vlan_attach_event != NULL) {
5716 EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event);
5717 ctx->ifc_vlan_attach_event = NULL;
5718 }
5719 if (ctx->ifc_vlan_detach_event != NULL) {
5720 EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event);
5721 ctx->ifc_vlan_detach_event = NULL;
5722 }
5723
5724 }
5725
5726 static void
iflib_deregister(if_ctx_t ctx)5727 iflib_deregister(if_ctx_t ctx)
5728 {
5729 if_t ifp = ctx->ifc_ifp;
5730
5731 /* Remove all media */
5732 ifmedia_removeall(&ctx->ifc_media);
5733
5734 /* Ensure that VLAN event handlers are unregistered */
5735 iflib_unregister_vlan_handlers(ctx);
5736
5737 /* Release kobject reference */
5738 kobj_delete((kobj_t) ctx, NULL);
5739
5740 /* Free the ifnet structure */
5741 if_free(ifp);
5742
5743 STATE_LOCK_DESTROY(ctx);
5744
5745 /* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
5746 CTX_LOCK_DESTROY(ctx);
5747 }
5748
5749 static int
iflib_queues_alloc(if_ctx_t ctx)5750 iflib_queues_alloc(if_ctx_t ctx)
5751 {
5752 if_shared_ctx_t sctx = ctx->ifc_sctx;
5753 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
5754 device_t dev = ctx->ifc_dev;
5755 int nrxqsets = scctx->isc_nrxqsets;
5756 int ntxqsets = scctx->isc_ntxqsets;
5757 iflib_txq_t txq;
5758 iflib_rxq_t rxq;
5759 iflib_fl_t fl = NULL;
5760 int i, j, cpu, err, txconf, rxconf;
5761 iflib_dma_info_t ifdip;
5762 uint32_t *rxqsizes = scctx->isc_rxqsizes;
5763 uint32_t *txqsizes = scctx->isc_txqsizes;
5764 uint8_t nrxqs = sctx->isc_nrxqs;
5765 uint8_t ntxqs = sctx->isc_ntxqs;
5766 int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1;
5767 int fl_offset = (sctx->isc_flags & IFLIB_HAS_RXCQ ? 1 : 0);
5768 caddr_t *vaddrs;
5769 uint64_t *paddrs;
5770
5771 KASSERT(ntxqs > 0, ("number of queues per qset must be at least 1"));
5772 KASSERT(nrxqs > 0, ("number of queues per qset must be at least 1"));
5773 KASSERT(nrxqs >= fl_offset + nfree_lists,
5774 ("there must be at least a rxq for each free list"));
5775
5776 /* Allocate the TX ring struct memory */
5777 if (!(ctx->ifc_txqs =
5778 (iflib_txq_t) malloc(sizeof(struct iflib_txq) *
5779 ntxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
5780 device_printf(dev, "Unable to allocate TX ring memory\n");
5781 err = ENOMEM;
5782 goto fail;
5783 }
5784
5785 /* Now allocate the RX */
5786 if (!(ctx->ifc_rxqs =
5787 (iflib_rxq_t) malloc(sizeof(struct iflib_rxq) *
5788 nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
5789 device_printf(dev, "Unable to allocate RX ring memory\n");
5790 err = ENOMEM;
5791 goto rx_fail;
5792 }
5793
5794 txq = ctx->ifc_txqs;
5795 rxq = ctx->ifc_rxqs;
5796
5797 /*
5798 * XXX handle allocation failure
5799 */
5800 for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) {
5801 /* Set up some basics */
5802
5803 if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs,
5804 M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
5805 device_printf(dev,
5806 "Unable to allocate TX DMA info memory\n");
5807 err = ENOMEM;
5808 goto err_tx_desc;
5809 }
5810 txq->ift_ifdi = ifdip;
5811 for (j = 0; j < ntxqs; j++, ifdip++) {
5812 if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, 0)) {
5813 device_printf(dev,
5814 "Unable to allocate TX descriptors\n");
5815 err = ENOMEM;
5816 goto err_tx_desc;
5817 }
5818 txq->ift_txd_size[j] = scctx->isc_txd_size[j];
5819 bzero((void *)ifdip->idi_vaddr, txqsizes[j]);
5820 }
5821 txq->ift_ctx = ctx;
5822 txq->ift_id = i;
5823 if (sctx->isc_flags & IFLIB_HAS_TXCQ) {
5824 txq->ift_br_offset = 1;
5825 } else {
5826 txq->ift_br_offset = 0;
5827 }
5828
5829 if (iflib_txsd_alloc(txq)) {
5830 device_printf(dev, "Critical Failure setting up TX buffers\n");
5831 err = ENOMEM;
5832 goto err_tx_desc;
5833 }
5834
5835 /* Initialize the TX lock */
5836 snprintf(txq->ift_mtx_name, MTX_NAME_LEN, "%s:TX(%d):callout",
5837 device_get_nameunit(dev), txq->ift_id);
5838 mtx_init(&txq->ift_mtx, txq->ift_mtx_name, NULL, MTX_DEF);
5839 callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0);
5840 txq->ift_timer.c_cpu = cpu;
5841 #ifdef DEV_NETMAP
5842 callout_init_mtx(&txq->ift_netmap_timer, &txq->ift_mtx, 0);
5843 txq->ift_netmap_timer.c_cpu = cpu;
5844 #endif /* DEV_NETMAP */
5845
5846 err = ifmp_ring_alloc(&txq->ift_br, 2048, txq, iflib_txq_drain,
5847 iflib_txq_can_drain, M_IFLIB, M_WAITOK);
5848 if (err) {
5849 /* XXX free any allocated rings */
5850 device_printf(dev, "Unable to allocate buf_ring\n");
5851 goto err_tx_desc;
5852 }
5853 txq->ift_reclaim_thresh = ctx->ifc_sysctl_tx_reclaim_thresh;
5854 }
5855
5856 for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) {
5857 /* Set up some basics */
5858 callout_init(&rxq->ifr_watchdog, 1);
5859
5860 if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs,
5861 M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
5862 device_printf(dev,
5863 "Unable to allocate RX DMA info memory\n");
5864 err = ENOMEM;
5865 goto err_tx_desc;
5866 }
5867
5868 rxq->ifr_ifdi = ifdip;
5869 /* XXX this needs to be changed if #rx queues != #tx queues */
5870 rxq->ifr_ntxqirq = 1;
5871 rxq->ifr_txqid[0] = i;
5872 for (j = 0; j < nrxqs; j++, ifdip++) {
5873 if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, 0)) {
5874 device_printf(dev,
5875 "Unable to allocate RX descriptors\n");
5876 err = ENOMEM;
5877 goto err_tx_desc;
5878 }
5879 bzero((void *)ifdip->idi_vaddr, rxqsizes[j]);
5880 }
5881 rxq->ifr_ctx = ctx;
5882 rxq->ifr_id = i;
5883 rxq->ifr_fl_offset = fl_offset;
5884 rxq->ifr_nfl = nfree_lists;
5885 if (!(fl =
5886 (iflib_fl_t) malloc(sizeof(struct iflib_fl) * nfree_lists, M_IFLIB, M_NOWAIT | M_ZERO))) {
5887 device_printf(dev, "Unable to allocate free list memory\n");
5888 err = ENOMEM;
5889 goto err_tx_desc;
5890 }
5891 rxq->ifr_fl = fl;
5892 for (j = 0; j < nfree_lists; j++) {
5893 fl[j].ifl_rxq = rxq;
5894 fl[j].ifl_id = j;
5895 fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset];
5896 fl[j].ifl_rxd_size = scctx->isc_rxd_size[j];
5897 }
5898 /* Allocate receive buffers for the ring */
5899 if (iflib_rxsd_alloc(rxq)) {
5900 device_printf(dev,
5901 "Critical Failure setting up receive buffers\n");
5902 err = ENOMEM;
5903 goto err_rx_desc;
5904 }
5905
5906 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
5907 fl->ifl_rx_bitmap = bit_alloc(fl->ifl_size, M_IFLIB,
5908 M_WAITOK);
5909 }
5910
5911 /* TXQs */
5912 vaddrs = malloc(sizeof(caddr_t) * ntxqsets * ntxqs, M_IFLIB, M_WAITOK);
5913 paddrs = malloc(sizeof(uint64_t) * ntxqsets * ntxqs, M_IFLIB, M_WAITOK);
5914 for (i = 0; i < ntxqsets; i++) {
5915 iflib_dma_info_t di = ctx->ifc_txqs[i].ift_ifdi;
5916
5917 for (j = 0; j < ntxqs; j++, di++) {
5918 vaddrs[i * ntxqs + j] = di->idi_vaddr;
5919 paddrs[i * ntxqs + j] = di->idi_paddr;
5920 }
5921 }
5922 if ((err = IFDI_TX_QUEUES_ALLOC(ctx, vaddrs, paddrs, ntxqs, ntxqsets)) != 0) {
5923 device_printf(ctx->ifc_dev,
5924 "Unable to allocate device TX queue\n");
5925 iflib_tx_structures_free(ctx);
5926 free(vaddrs, M_IFLIB);
5927 free(paddrs, M_IFLIB);
5928 goto err_rx_desc;
5929 }
5930 free(vaddrs, M_IFLIB);
5931 free(paddrs, M_IFLIB);
5932
5933 /* RXQs */
5934 vaddrs = malloc(sizeof(caddr_t) * nrxqsets * nrxqs, M_IFLIB, M_WAITOK);
5935 paddrs = malloc(sizeof(uint64_t) * nrxqsets * nrxqs, M_IFLIB, M_WAITOK);
5936 for (i = 0; i < nrxqsets; i++) {
5937 iflib_dma_info_t di = ctx->ifc_rxqs[i].ifr_ifdi;
5938
5939 for (j = 0; j < nrxqs; j++, di++) {
5940 vaddrs[i * nrxqs + j] = di->idi_vaddr;
5941 paddrs[i * nrxqs + j] = di->idi_paddr;
5942 }
5943 }
5944 if ((err = IFDI_RX_QUEUES_ALLOC(ctx, vaddrs, paddrs, nrxqs, nrxqsets)) != 0) {
5945 device_printf(ctx->ifc_dev,
5946 "Unable to allocate device RX queue\n");
5947 iflib_tx_structures_free(ctx);
5948 free(vaddrs, M_IFLIB);
5949 free(paddrs, M_IFLIB);
5950 goto err_rx_desc;
5951 }
5952 free(vaddrs, M_IFLIB);
5953 free(paddrs, M_IFLIB);
5954
5955 return (0);
5956
5957 /* XXX handle allocation failure changes */
5958 err_rx_desc:
5959 err_tx_desc:
5960 rx_fail:
5961 if (ctx->ifc_rxqs != NULL)
5962 free(ctx->ifc_rxqs, M_IFLIB);
5963 ctx->ifc_rxqs = NULL;
5964 if (ctx->ifc_txqs != NULL)
5965 free(ctx->ifc_txqs, M_IFLIB);
5966 ctx->ifc_txqs = NULL;
5967 fail:
5968 return (err);
5969 }
5970
5971 static int
iflib_tx_structures_setup(if_ctx_t ctx)5972 iflib_tx_structures_setup(if_ctx_t ctx)
5973 {
5974 iflib_txq_t txq = ctx->ifc_txqs;
5975 int i;
5976
5977 for (i = 0; i < NTXQSETS(ctx); i++, txq++)
5978 iflib_txq_setup(txq);
5979
5980 return (0);
5981 }
5982
5983 static void
iflib_tx_structures_free(if_ctx_t ctx)5984 iflib_tx_structures_free(if_ctx_t ctx)
5985 {
5986 iflib_txq_t txq = ctx->ifc_txqs;
5987 if_shared_ctx_t sctx = ctx->ifc_sctx;
5988 int i, j;
5989
5990 for (i = 0; i < NTXQSETS(ctx); i++, txq++) {
5991 for (j = 0; j < sctx->isc_ntxqs; j++)
5992 iflib_dma_free(&txq->ift_ifdi[j]);
5993 iflib_txq_destroy(txq);
5994 }
5995 free(ctx->ifc_txqs, M_IFLIB);
5996 ctx->ifc_txqs = NULL;
5997 }
5998
5999 /*********************************************************************
6000 *
6001 * Initialize all receive rings.
6002 *
6003 **********************************************************************/
6004 static int
iflib_rx_structures_setup(if_ctx_t ctx)6005 iflib_rx_structures_setup(if_ctx_t ctx)
6006 {
6007 iflib_rxq_t rxq = ctx->ifc_rxqs;
6008 int q;
6009 #if defined(INET6) || defined(INET)
6010 int err, i;
6011 #endif
6012
6013 for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) {
6014 #if defined(INET6) || defined(INET)
6015 err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp,
6016 TCP_LRO_ENTRIES, min(1024,
6017 ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset]));
6018 if (err != 0) {
6019 device_printf(ctx->ifc_dev,
6020 "LRO Initialization failed!\n");
6021 goto fail;
6022 }
6023 #endif
6024 IFDI_RXQ_SETUP(ctx, rxq->ifr_id);
6025 }
6026 return (0);
6027 #if defined(INET6) || defined(INET)
6028 fail:
6029 /*
6030 * Free LRO resources allocated so far, we will only handle
6031 * the rings that completed, the failing case will have
6032 * cleaned up for itself. 'q' failed, so its the terminus.
6033 */
6034 rxq = ctx->ifc_rxqs;
6035 for (i = 0; i < q; ++i, rxq++) {
6036 tcp_lro_free(&rxq->ifr_lc);
6037 }
6038 return (err);
6039 #endif
6040 }
6041
6042 /*********************************************************************
6043 *
6044 * Free all receive rings.
6045 *
6046 **********************************************************************/
6047 static void
iflib_rx_structures_free(if_ctx_t ctx)6048 iflib_rx_structures_free(if_ctx_t ctx)
6049 {
6050 iflib_rxq_t rxq = ctx->ifc_rxqs;
6051 if_shared_ctx_t sctx = ctx->ifc_sctx;
6052 int i, j;
6053
6054 for (i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) {
6055 for (j = 0; j < sctx->isc_nrxqs; j++)
6056 iflib_dma_free(&rxq->ifr_ifdi[j]);
6057 iflib_rx_sds_free(rxq);
6058 #if defined(INET6) || defined(INET)
6059 tcp_lro_free(&rxq->ifr_lc);
6060 #endif
6061 }
6062 free(ctx->ifc_rxqs, M_IFLIB);
6063 ctx->ifc_rxqs = NULL;
6064 }
6065
6066 static int
iflib_qset_structures_setup(if_ctx_t ctx)6067 iflib_qset_structures_setup(if_ctx_t ctx)
6068 {
6069 int err;
6070
6071 /*
6072 * It is expected that the caller takes care of freeing queues if this
6073 * fails.
6074 */
6075 if ((err = iflib_tx_structures_setup(ctx)) != 0) {
6076 device_printf(ctx->ifc_dev, "iflib_tx_structures_setup failed: %d\n", err);
6077 return (err);
6078 }
6079
6080 if ((err = iflib_rx_structures_setup(ctx)) != 0)
6081 device_printf(ctx->ifc_dev, "iflib_rx_structures_setup failed: %d\n", err);
6082
6083 return (err);
6084 }
6085
6086 int
iflib_irq_alloc(if_ctx_t ctx,if_irq_t irq,int rid,driver_filter_t filter,void * filter_arg,driver_intr_t handler,void * arg,const char * name)6087 iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
6088 driver_filter_t filter, void *filter_arg, driver_intr_t handler, void *arg, const char *name)
6089 {
6090
6091 return (_iflib_irq_alloc(ctx, irq, rid, filter, handler, arg, name));
6092 }
6093
6094 /* Just to avoid copy/paste */
6095 static inline int
iflib_irq_set_affinity(if_ctx_t ctx,if_irq_t irq,iflib_intr_type_t type,int qid,struct grouptask * gtask,struct taskqgroup * tqg,void * uniq,const char * name)6096 iflib_irq_set_affinity(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type,
6097 int qid, struct grouptask *gtask, struct taskqgroup *tqg, void *uniq,
6098 const char *name)
6099 {
6100 device_t dev;
6101 unsigned int base_cpuid, cpuid;
6102 int err;
6103
6104 dev = ctx->ifc_dev;
6105 base_cpuid = ctx->ifc_sysctl_core_offset;
6106 cpuid = get_cpuid_for_queue(ctx, base_cpuid, qid, type == IFLIB_INTR_TX);
6107 err = taskqgroup_attach_cpu(tqg, gtask, uniq, cpuid, dev,
6108 irq ? irq->ii_res : NULL, name);
6109 if (err) {
6110 device_printf(dev, "taskqgroup_attach_cpu failed %d\n", err);
6111 return (err);
6112 }
6113 #ifdef notyet
6114 if (cpuid > ctx->ifc_cpuid_highest)
6115 ctx->ifc_cpuid_highest = cpuid;
6116 #endif
6117 return (0);
6118 }
6119
6120 /*
6121 * Allocate a hardware interrupt for subctx using the parent (ctx)'s hardware
6122 * resources.
6123 *
6124 * Similar to iflib_irq_alloc_generic(), but for interrupt type IFLIB_INTR_RXTX
6125 * only.
6126 *
6127 * XXX: Could be removed if subctx's dev has its intr resource allocation
6128 * methods replaced with custom ones?
6129 */
6130 int
iflib_irq_alloc_generic_subctx(if_ctx_t ctx,if_ctx_t subctx,if_irq_t irq,int rid,iflib_intr_type_t type,driver_filter_t * filter,void * filter_arg,int qid,const char * name)6131 iflib_irq_alloc_generic_subctx(if_ctx_t ctx, if_ctx_t subctx, if_irq_t irq,
6132 int rid, iflib_intr_type_t type,
6133 driver_filter_t *filter, void *filter_arg,
6134 int qid, const char *name)
6135 {
6136 device_t dev, subdev;
6137 struct grouptask *gtask;
6138 struct taskqgroup *tqg;
6139 iflib_filter_info_t info;
6140 gtask_fn_t *fn;
6141 int tqrid, err;
6142 driver_filter_t *intr_fast;
6143 void *q;
6144
6145 MPASS(ctx != NULL);
6146 MPASS(subctx != NULL);
6147
6148 tqrid = rid;
6149 dev = ctx->ifc_dev;
6150 subdev = subctx->ifc_dev;
6151
6152 switch (type) {
6153 case IFLIB_INTR_RXTX:
6154 q = &subctx->ifc_rxqs[qid];
6155 info = &subctx->ifc_rxqs[qid].ifr_filter_info;
6156 gtask = &subctx->ifc_rxqs[qid].ifr_task;
6157 tqg = qgroup_if_io_tqg;
6158 fn = _task_fn_rx;
6159 intr_fast = iflib_fast_intr_rxtx;
6160 NET_GROUPTASK_INIT(gtask, 0, fn, q);
6161 break;
6162 default:
6163 device_printf(dev, "%s: unknown net intr type for subctx %s (%d)\n",
6164 __func__, device_get_nameunit(subdev), type);
6165 return (EINVAL);
6166 }
6167
6168 info->ifi_filter = filter;
6169 info->ifi_filter_arg = filter_arg;
6170 info->ifi_task = gtask;
6171 info->ifi_ctx = q;
6172
6173 NET_GROUPTASK_INIT(gtask, 0, fn, q);
6174
6175 /* Allocate interrupts from hardware using parent context */
6176 err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info, name);
6177 if (err != 0) {
6178 device_printf(dev, "_iflib_irq_alloc failed for subctx %s: %d\n",
6179 device_get_nameunit(subdev), err);
6180 return (err);
6181 }
6182
6183 if (tqrid != -1) {
6184 err = iflib_irq_set_affinity(ctx, irq, type, qid, gtask, tqg, q,
6185 name);
6186 if (err)
6187 return (err);
6188 } else {
6189 taskqgroup_attach(tqg, gtask, q, dev, irq->ii_res, name);
6190 }
6191
6192 return (0);
6193 }
6194
6195 int
iflib_irq_alloc_generic(if_ctx_t ctx,if_irq_t irq,int rid,iflib_intr_type_t type,driver_filter_t * filter,void * filter_arg,int qid,const char * name)6196 iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid,
6197 iflib_intr_type_t type, driver_filter_t *filter,
6198 void *filter_arg, int qid, const char *name)
6199 {
6200 device_t dev;
6201 struct grouptask *gtask;
6202 struct taskqgroup *tqg;
6203 iflib_filter_info_t info;
6204 gtask_fn_t *fn;
6205 int tqrid, err;
6206 driver_filter_t *intr_fast;
6207 void *q;
6208
6209 info = &ctx->ifc_filter_info;
6210 tqrid = rid;
6211
6212 switch (type) {
6213 /* XXX merge tx/rx for netmap? */
6214 case IFLIB_INTR_TX:
6215 q = &ctx->ifc_txqs[qid];
6216 info = &ctx->ifc_txqs[qid].ift_filter_info;
6217 gtask = &ctx->ifc_txqs[qid].ift_task;
6218 tqg = qgroup_if_io_tqg;
6219 fn = _task_fn_tx;
6220 intr_fast = iflib_fast_intr;
6221 GROUPTASK_INIT(gtask, 0, fn, q);
6222 ctx->ifc_flags |= IFC_NETMAP_TX_IRQ;
6223 break;
6224 case IFLIB_INTR_RX:
6225 q = &ctx->ifc_rxqs[qid];
6226 info = &ctx->ifc_rxqs[qid].ifr_filter_info;
6227 gtask = &ctx->ifc_rxqs[qid].ifr_task;
6228 tqg = qgroup_if_io_tqg;
6229 fn = _task_fn_rx;
6230 intr_fast = iflib_fast_intr;
6231 NET_GROUPTASK_INIT(gtask, 0, fn, q);
6232 break;
6233 case IFLIB_INTR_RXTX:
6234 q = &ctx->ifc_rxqs[qid];
6235 info = &ctx->ifc_rxqs[qid].ifr_filter_info;
6236 gtask = &ctx->ifc_rxqs[qid].ifr_task;
6237 tqg = qgroup_if_io_tqg;
6238 fn = _task_fn_rx;
6239 intr_fast = iflib_fast_intr_rxtx;
6240 NET_GROUPTASK_INIT(gtask, 0, fn, q);
6241 break;
6242 case IFLIB_INTR_ADMIN:
6243 q = ctx;
6244 tqrid = -1;
6245 info = &ctx->ifc_filter_info;
6246 gtask = NULL;
6247 intr_fast = iflib_fast_intr_ctx;
6248 break;
6249 default:
6250 device_printf(ctx->ifc_dev, "%s: unknown net intr type\n",
6251 __func__);
6252 return (EINVAL);
6253 }
6254
6255 info->ifi_filter = filter;
6256 info->ifi_filter_arg = filter_arg;
6257 info->ifi_task = gtask;
6258 info->ifi_ctx = q;
6259
6260 dev = ctx->ifc_dev;
6261 err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info, name);
6262 if (err != 0) {
6263 device_printf(dev, "_iflib_irq_alloc failed %d\n", err);
6264 return (err);
6265 }
6266 if (type == IFLIB_INTR_ADMIN)
6267 return (0);
6268
6269 if (tqrid != -1) {
6270 err = iflib_irq_set_affinity(ctx, irq, type, qid, gtask, tqg, q,
6271 name);
6272 if (err)
6273 return (err);
6274 } else {
6275 taskqgroup_attach(tqg, gtask, q, dev, irq->ii_res, name);
6276 }
6277
6278 return (0);
6279 }
6280
6281 void
iflib_softirq_alloc_generic(if_ctx_t ctx,if_irq_t irq,iflib_intr_type_t type,void * arg,int qid,const char * name)6282 iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type,
6283 void *arg, int qid, const char *name)
6284 {
6285 device_t dev;
6286 struct grouptask *gtask;
6287 struct taskqgroup *tqg;
6288 gtask_fn_t *fn;
6289 void *q;
6290 int err;
6291
6292 switch (type) {
6293 case IFLIB_INTR_TX:
6294 q = &ctx->ifc_txqs[qid];
6295 gtask = &ctx->ifc_txqs[qid].ift_task;
6296 tqg = qgroup_if_io_tqg;
6297 fn = _task_fn_tx;
6298 GROUPTASK_INIT(gtask, 0, fn, q);
6299 break;
6300 case IFLIB_INTR_RX:
6301 q = &ctx->ifc_rxqs[qid];
6302 gtask = &ctx->ifc_rxqs[qid].ifr_task;
6303 tqg = qgroup_if_io_tqg;
6304 fn = _task_fn_rx;
6305 NET_GROUPTASK_INIT(gtask, 0, fn, q);
6306 break;
6307 case IFLIB_INTR_IOV:
6308 TASK_INIT(&ctx->ifc_vflr_task, 0, _task_fn_iov, ctx);
6309 return;
6310 default:
6311 panic("unknown net intr type");
6312 }
6313 err = iflib_irq_set_affinity(ctx, irq, type, qid, gtask, tqg, q, name);
6314 if (err) {
6315 dev = ctx->ifc_dev;
6316 taskqgroup_attach(tqg, gtask, q, dev, irq ? irq->ii_res : NULL,
6317 name);
6318 }
6319 }
6320
6321 void
iflib_irq_free(if_ctx_t ctx,if_irq_t irq)6322 iflib_irq_free(if_ctx_t ctx, if_irq_t irq)
6323 {
6324
6325 if (irq->ii_tag)
6326 bus_teardown_intr(ctx->ifc_dev, irq->ii_res, irq->ii_tag);
6327
6328 if (irq->ii_res)
6329 bus_release_resource(ctx->ifc_dev, SYS_RES_IRQ,
6330 rman_get_rid(irq->ii_res), irq->ii_res);
6331 }
6332
6333 static int
iflib_legacy_setup(if_ctx_t ctx,driver_filter_t filter,void * filter_arg,int * rid,const char * name)6334 iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *rid, const char *name)
6335 {
6336 iflib_txq_t txq = ctx->ifc_txqs;
6337 iflib_rxq_t rxq = ctx->ifc_rxqs;
6338 if_irq_t irq = &ctx->ifc_legacy_irq;
6339 iflib_filter_info_t info;
6340 device_t dev;
6341 struct grouptask *gtask;
6342 struct resource *res;
6343 int err, tqrid;
6344 bool rx_only;
6345
6346 info = &rxq->ifr_filter_info;
6347 gtask = &rxq->ifr_task;
6348 tqrid = *rid;
6349 rx_only = (ctx->ifc_sctx->isc_flags & IFLIB_SINGLE_IRQ_RX_ONLY) != 0;
6350
6351 ctx->ifc_flags |= IFC_LEGACY;
6352 info->ifi_filter = filter;
6353 info->ifi_filter_arg = filter_arg;
6354 info->ifi_task = gtask;
6355 info->ifi_ctx = rxq;
6356
6357 dev = ctx->ifc_dev;
6358 /* We allocate a single interrupt resource */
6359 err = _iflib_irq_alloc(ctx, irq, tqrid, rx_only ? iflib_fast_intr :
6360 iflib_fast_intr_rxtx, NULL, info, name);
6361 if (err != 0)
6362 return (err);
6363 NET_GROUPTASK_INIT(gtask, 0, _task_fn_rx, rxq);
6364 res = irq->ii_res;
6365 taskqgroup_attach(qgroup_if_io_tqg, gtask, rxq, dev, res, name);
6366
6367 GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq);
6368 taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, dev, res,
6369 "tx");
6370 return (0);
6371 }
6372
6373 void
iflib_led_create(if_ctx_t ctx)6374 iflib_led_create(if_ctx_t ctx)
6375 {
6376
6377 ctx->ifc_led_dev = led_create(iflib_led_func, ctx,
6378 device_get_nameunit(ctx->ifc_dev));
6379 }
6380
6381 void
iflib_tx_intr_deferred(if_ctx_t ctx,int txqid)6382 iflib_tx_intr_deferred(if_ctx_t ctx, int txqid)
6383 {
6384
6385 GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task);
6386 }
6387
6388 void
iflib_rx_intr_deferred(if_ctx_t ctx,int rxqid)6389 iflib_rx_intr_deferred(if_ctx_t ctx, int rxqid)
6390 {
6391
6392 GROUPTASK_ENQUEUE(&ctx->ifc_rxqs[rxqid].ifr_task);
6393 }
6394
6395 void
iflib_admin_intr_deferred(if_ctx_t ctx)6396 iflib_admin_intr_deferred(if_ctx_t ctx)
6397 {
6398
6399 taskqueue_enqueue(ctx->ifc_tq, &ctx->ifc_admin_task);
6400 }
6401
6402 void
iflib_iov_intr_deferred(if_ctx_t ctx)6403 iflib_iov_intr_deferred(if_ctx_t ctx)
6404 {
6405
6406 taskqueue_enqueue(ctx->ifc_tq, &ctx->ifc_vflr_task);
6407 }
6408
6409 void
iflib_io_tqg_attach(struct grouptask * gt,void * uniq,int cpu,const char * name)6410 iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, const char *name)
6411 {
6412
6413 taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, NULL, NULL,
6414 name);
6415 }
6416
6417 void
iflib_config_task_init(if_ctx_t ctx,struct task * config_task,task_fn_t * fn)6418 iflib_config_task_init(if_ctx_t ctx, struct task *config_task, task_fn_t *fn)
6419 {
6420 TASK_INIT(config_task, 0, fn, ctx);
6421 }
6422
6423 void
iflib_config_task_enqueue(if_ctx_t ctx,struct task * config_task)6424 iflib_config_task_enqueue(if_ctx_t ctx, struct task *config_task)
6425 {
6426 taskqueue_enqueue(ctx->ifc_tq, config_task);
6427 }
6428
6429 void
iflib_link_state_change(if_ctx_t ctx,int link_state,uint64_t baudrate)6430 iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate)
6431 {
6432 if_t ifp = ctx->ifc_ifp;
6433 iflib_txq_t txq = ctx->ifc_txqs;
6434
6435 if_setbaudrate(ifp, baudrate);
6436 if (baudrate >= IF_Gbps(10)) {
6437 STATE_LOCK(ctx);
6438 ctx->ifc_flags |= IFC_PREFETCH;
6439 STATE_UNLOCK(ctx);
6440 }
6441 /* If link down, disable watchdog */
6442 if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) {
6443 for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++)
6444 txq->ift_qstatus = IFLIB_QUEUE_IDLE;
6445 }
6446 ctx->ifc_link_state = link_state;
6447 if_link_state_change(ifp, link_state);
6448 }
6449
6450 static int
iflib_tx_credits_update(if_ctx_t ctx,iflib_txq_t txq)6451 iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq)
6452 {
6453 int credits;
6454 #ifdef INVARIANTS
6455 int credits_pre = txq->ift_cidx_processed;
6456 #endif
6457
6458 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
6459 BUS_DMASYNC_POSTREAD);
6460 if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, true)) == 0)
6461 return (0);
6462
6463 txq->ift_processed += credits;
6464 txq->ift_cidx_processed += credits;
6465
6466 MPASS(credits_pre + credits == txq->ift_cidx_processed);
6467 if (txq->ift_cidx_processed >= txq->ift_size)
6468 txq->ift_cidx_processed -= txq->ift_size;
6469 return (credits);
6470 }
6471
6472 static int
iflib_rxd_avail(if_ctx_t ctx,iflib_rxq_t rxq,qidx_t cidx,qidx_t budget)6473 iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget)
6474 {
6475 iflib_fl_t fl;
6476 u_int i;
6477
6478 for (i = 0, fl = &rxq->ifr_fl[0]; i < rxq->ifr_nfl; i++, fl++)
6479 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
6480 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
6481 return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx,
6482 budget));
6483 }
6484
6485 void
iflib_add_int_delay_sysctl(if_ctx_t ctx,const char * name,const char * description,if_int_delay_info_t info,int offset,int value)6486 iflib_add_int_delay_sysctl(if_ctx_t ctx, const char *name,
6487 const char *description, if_int_delay_info_t info,
6488 int offset, int value)
6489 {
6490 info->iidi_ctx = ctx;
6491 info->iidi_offset = offset;
6492 info->iidi_value = value;
6493 SYSCTL_ADD_PROC(device_get_sysctl_ctx(ctx->ifc_dev),
6494 SYSCTL_CHILDREN(device_get_sysctl_tree(ctx->ifc_dev)),
6495 OID_AUTO, name, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
6496 info, 0, iflib_sysctl_int_delay, "I", description);
6497 }
6498
6499 struct sx *
iflib_ctx_lock_get(if_ctx_t ctx)6500 iflib_ctx_lock_get(if_ctx_t ctx)
6501 {
6502
6503 return (&ctx->ifc_ctx_sx);
6504 }
6505
6506 static int
iflib_msix_init(if_ctx_t ctx)6507 iflib_msix_init(if_ctx_t ctx)
6508 {
6509 device_t dev = ctx->ifc_dev;
6510 if_shared_ctx_t sctx = ctx->ifc_sctx;
6511 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
6512 int admincnt, bar, err, iflib_num_rx_queues, iflib_num_tx_queues;
6513 int msgs, queuemsgs, queues, rx_queues, tx_queues, vectors;
6514
6515 iflib_num_tx_queues = ctx->ifc_sysctl_ntxqs;
6516 iflib_num_rx_queues = ctx->ifc_sysctl_nrxqs;
6517
6518 if (bootverbose)
6519 device_printf(dev, "msix_init qsets capped at %d\n",
6520 imax(scctx->isc_ntxqsets, scctx->isc_nrxqsets));
6521
6522 /* Override by tuneable */
6523 if (scctx->isc_disable_msix)
6524 goto msi;
6525
6526 /* First try MSI-X */
6527 if ((msgs = pci_msix_count(dev)) == 0) {
6528 if (bootverbose)
6529 device_printf(dev, "MSI-X not supported or disabled\n");
6530 goto msi;
6531 }
6532
6533 bar = ctx->ifc_softc_ctx.isc_msix_bar;
6534 /*
6535 * bar == -1 => "trust me I know what I'm doing"
6536 * Some drivers are for hardware that is so shoddily
6537 * documented that no one knows which bars are which
6538 * so the developer has to map all bars. This hack
6539 * allows shoddy garbage to use MSI-X in this framework.
6540 */
6541 if (bar != -1) {
6542 ctx->ifc_msix_mem = bus_alloc_resource_any(dev,
6543 SYS_RES_MEMORY, &bar, RF_ACTIVE);
6544 if (ctx->ifc_msix_mem == NULL) {
6545 device_printf(dev, "Unable to map MSI-X table\n");
6546 goto msi;
6547 }
6548 }
6549
6550 admincnt = sctx->isc_admin_intrcnt;
6551 #if IFLIB_DEBUG
6552 /* use only 1 qset in debug mode */
6553 queuemsgs = min(msgs - admincnt, 1);
6554 #else
6555 queuemsgs = msgs - admincnt;
6556 #endif
6557 #ifdef RSS
6558 queues = imin(queuemsgs, rss_getnumbuckets());
6559 #else
6560 queues = queuemsgs;
6561 #endif
6562 queues = imin(CPU_COUNT(&ctx->ifc_cpus), queues);
6563 if (bootverbose)
6564 device_printf(dev,
6565 "intr CPUs: %d queue msgs: %d admincnt: %d\n",
6566 CPU_COUNT(&ctx->ifc_cpus), queuemsgs, admincnt);
6567 #ifdef RSS
6568 /* If we're doing RSS, clamp at the number of RSS buckets */
6569 if (queues > rss_getnumbuckets())
6570 queues = rss_getnumbuckets();
6571 #endif
6572 if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt)
6573 rx_queues = iflib_num_rx_queues;
6574 else
6575 rx_queues = queues;
6576
6577 if (rx_queues > scctx->isc_nrxqsets)
6578 rx_queues = scctx->isc_nrxqsets;
6579
6580 /*
6581 * We want this to be all logical CPUs by default
6582 */
6583 if (iflib_num_tx_queues > 0 && iflib_num_tx_queues < queues)
6584 tx_queues = iflib_num_tx_queues;
6585 else
6586 tx_queues = mp_ncpus;
6587
6588 if (tx_queues > scctx->isc_ntxqsets)
6589 tx_queues = scctx->isc_ntxqsets;
6590
6591 if (ctx->ifc_sysctl_qs_eq_override == 0) {
6592 #ifdef INVARIANTS
6593 if (tx_queues != rx_queues)
6594 device_printf(dev,
6595 "queue equality override not set, capping rx_queues at %d and tx_queues at %d\n",
6596 min(rx_queues, tx_queues), min(rx_queues, tx_queues));
6597 #endif
6598 tx_queues = min(rx_queues, tx_queues);
6599 rx_queues = min(rx_queues, tx_queues);
6600 }
6601
6602 vectors = rx_queues + admincnt;
6603 if (msgs < vectors) {
6604 device_printf(dev,
6605 "insufficient number of MSI-X vectors "
6606 "(supported %d, need %d)\n", msgs, vectors);
6607 goto msi;
6608 }
6609
6610 device_printf(dev, "Using %d RX queues %d TX queues\n", rx_queues,
6611 tx_queues);
6612 msgs = vectors;
6613 if ((err = pci_alloc_msix(dev, &vectors)) == 0) {
6614 if (vectors != msgs) {
6615 device_printf(dev,
6616 "Unable to allocate sufficient MSI-X vectors "
6617 "(got %d, need %d)\n", vectors, msgs);
6618 pci_release_msi(dev);
6619 if (bar != -1) {
6620 bus_release_resource(dev, SYS_RES_MEMORY, bar,
6621 ctx->ifc_msix_mem);
6622 ctx->ifc_msix_mem = NULL;
6623 }
6624 goto msi;
6625 }
6626 device_printf(dev, "Using MSI-X interrupts with %d vectors\n",
6627 vectors);
6628 scctx->isc_vectors = vectors;
6629 scctx->isc_nrxqsets = rx_queues;
6630 scctx->isc_ntxqsets = tx_queues;
6631 scctx->isc_intr = IFLIB_INTR_MSIX;
6632
6633 return (vectors);
6634 } else {
6635 device_printf(dev,
6636 "failed to allocate %d MSI-X vectors, err: %d\n", vectors,
6637 err);
6638 if (bar != -1) {
6639 bus_release_resource(dev, SYS_RES_MEMORY, bar,
6640 ctx->ifc_msix_mem);
6641 ctx->ifc_msix_mem = NULL;
6642 }
6643 }
6644
6645 msi:
6646 vectors = pci_msi_count(dev);
6647 scctx->isc_nrxqsets = 1;
6648 scctx->isc_ntxqsets = 1;
6649 scctx->isc_vectors = vectors;
6650 if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) {
6651 device_printf(dev, "Using an MSI interrupt\n");
6652 scctx->isc_intr = IFLIB_INTR_MSI;
6653 } else {
6654 scctx->isc_vectors = 1;
6655 device_printf(dev, "Using a Legacy interrupt\n");
6656 scctx->isc_intr = IFLIB_INTR_LEGACY;
6657 }
6658
6659 return (vectors);
6660 }
6661
6662 static const char *ring_states[] = { "IDLE", "BUSY", "STALLED", "ABDICATED" };
6663
6664 static int
mp_ring_state_handler(SYSCTL_HANDLER_ARGS)6665 mp_ring_state_handler(SYSCTL_HANDLER_ARGS)
6666 {
6667 int rc;
6668 uint16_t *state = ((uint16_t *)oidp->oid_arg1);
6669 struct sbuf *sb;
6670 const char *ring_state = "UNKNOWN";
6671
6672 /* XXX needed ? */
6673 rc = sysctl_wire_old_buffer(req, 0);
6674 MPASS(rc == 0);
6675 if (rc != 0)
6676 return (rc);
6677 sb = sbuf_new_for_sysctl(NULL, NULL, 80, req);
6678 MPASS(sb != NULL);
6679 if (sb == NULL)
6680 return (ENOMEM);
6681 if (state[3] <= 3)
6682 ring_state = ring_states[state[3]];
6683
6684 sbuf_printf(sb, "pidx_head: %04hd pidx_tail: %04hd cidx: %04hd state: %s",
6685 state[0], state[1], state[2], ring_state);
6686 rc = sbuf_finish(sb);
6687 sbuf_delete(sb);
6688 return (rc);
6689 }
6690
6691 enum iflib_ndesc_handler {
6692 IFLIB_NTXD_HANDLER,
6693 IFLIB_NRXD_HANDLER,
6694 };
6695
6696 static int
mp_ndesc_handler(SYSCTL_HANDLER_ARGS)6697 mp_ndesc_handler(SYSCTL_HANDLER_ARGS)
6698 {
6699 if_ctx_t ctx = (void *)arg1;
6700 enum iflib_ndesc_handler type = arg2;
6701 char buf[256] = {0};
6702 qidx_t *ndesc;
6703 char *p, *next;
6704 int nqs, rc, i;
6705
6706 nqs = 8;
6707 switch (type) {
6708 case IFLIB_NTXD_HANDLER:
6709 ndesc = ctx->ifc_sysctl_ntxds;
6710 if (ctx->ifc_sctx)
6711 nqs = ctx->ifc_sctx->isc_ntxqs;
6712 break;
6713 case IFLIB_NRXD_HANDLER:
6714 ndesc = ctx->ifc_sysctl_nrxds;
6715 if (ctx->ifc_sctx)
6716 nqs = ctx->ifc_sctx->isc_nrxqs;
6717 break;
6718 default:
6719 printf("%s: unhandled type\n", __func__);
6720 return (EINVAL);
6721 }
6722 if (nqs == 0)
6723 nqs = 8;
6724
6725 for (i = 0; i < 8; i++) {
6726 if (i >= nqs)
6727 break;
6728 if (i)
6729 strcat(buf, ",");
6730 sprintf(strchr(buf, 0), "%d", ndesc[i]);
6731 }
6732
6733 rc = sysctl_handle_string(oidp, buf, sizeof(buf), req);
6734 if (rc || req->newptr == NULL)
6735 return (rc);
6736
6737 for (i = 0, next = buf, p = strsep(&next, " ,"); i < 8 && p;
6738 i++, p = strsep(&next, " ,")) {
6739 ndesc[i] = strtoul(p, NULL, 10);
6740 }
6741
6742 return (rc);
6743 }
6744
6745 static int
iflib_handle_tx_reclaim_thresh(SYSCTL_HANDLER_ARGS)6746 iflib_handle_tx_reclaim_thresh(SYSCTL_HANDLER_ARGS)
6747 {
6748 if_ctx_t ctx = (void *)arg1;
6749 iflib_txq_t txq;
6750 int i, err;
6751 int thresh;
6752
6753 thresh = ctx->ifc_sysctl_tx_reclaim_thresh;
6754 err = sysctl_handle_int(oidp, &thresh, arg2, req);
6755 if (err != 0) {
6756 return err;
6757 }
6758
6759 if (thresh == ctx->ifc_sysctl_tx_reclaim_thresh)
6760 return 0;
6761
6762 if (thresh > ctx->ifc_softc_ctx.isc_ntxd[0] / 2) {
6763 device_printf(ctx->ifc_dev, "TX Reclaim thresh must be <= %d\n",
6764 ctx->ifc_softc_ctx.isc_ntxd[0] / 2);
6765 return (EINVAL);
6766 }
6767
6768 ctx->ifc_sysctl_tx_reclaim_thresh = thresh;
6769 if (ctx->ifc_txqs == NULL)
6770 return (err);
6771
6772 txq = &ctx->ifc_txqs[0];
6773 for (i = 0; i < NTXQSETS(ctx); i++, txq++) {
6774 txq->ift_reclaim_thresh = thresh;
6775 }
6776 return (err);
6777 }
6778
6779 static int
iflib_handle_tx_reclaim_ticks(SYSCTL_HANDLER_ARGS)6780 iflib_handle_tx_reclaim_ticks(SYSCTL_HANDLER_ARGS)
6781 {
6782 if_ctx_t ctx = (void *)arg1;
6783 iflib_txq_t txq;
6784 int i, err;
6785 int ticks;
6786
6787 ticks = ctx->ifc_sysctl_tx_reclaim_ticks;
6788 err = sysctl_handle_int(oidp, &ticks, arg2, req);
6789 if (err != 0) {
6790 return err;
6791 }
6792
6793 if (ticks == ctx->ifc_sysctl_tx_reclaim_ticks)
6794 return 0;
6795
6796 if (ticks > hz) {
6797 device_printf(ctx->ifc_dev,
6798 "TX Reclaim ticks must be <= hz (%d)\n", hz);
6799 return (EINVAL);
6800 }
6801
6802 ctx->ifc_sysctl_tx_reclaim_ticks = ticks;
6803 if (ctx->ifc_txqs == NULL)
6804 return (err);
6805
6806 txq = &ctx->ifc_txqs[0];
6807 for (i = 0; i < NTXQSETS(ctx); i++, txq++) {
6808 txq->ift_reclaim_ticks = ticks;
6809 }
6810 return (err);
6811 }
6812
6813 #define NAME_BUFLEN 32
6814 static void
iflib_add_device_sysctl_pre(if_ctx_t ctx)6815 iflib_add_device_sysctl_pre(if_ctx_t ctx)
6816 {
6817 device_t dev = iflib_get_dev(ctx);
6818 struct sysctl_oid_list *child, *oid_list;
6819 struct sysctl_ctx_list *ctx_list;
6820 struct sysctl_oid *node;
6821
6822 ctx_list = device_get_sysctl_ctx(dev);
6823 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
6824 ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child,
6825 OID_AUTO, "iflib", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
6826 "IFLIB fields");
6827 oid_list = SYSCTL_CHILDREN(node);
6828
6829 SYSCTL_ADD_CONST_STRING(ctx_list, oid_list, OID_AUTO, "driver_version",
6830 CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, "driver version");
6831
6832 SYSCTL_ADD_BOOL(ctx_list, oid_list, OID_AUTO, "simple_tx",
6833 CTLFLAG_RDTUN, &ctx->ifc_sysctl_simple_tx, 0,
6834 "use simple tx ring");
6835 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs",
6836 CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0,
6837 "# of txqs to use, 0 => use default #");
6838 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs",
6839 CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0,
6840 "# of rxqs to use, 0 => use default #");
6841 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_qs_enable",
6842 CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0,
6843 "permit #txq != #rxq");
6844 SYSCTL_ADD_INT(ctx_list, oid_list, OID_AUTO, "disable_msix",
6845 CTLFLAG_RWTUN, &ctx->ifc_softc_ctx.isc_disable_msix, 0,
6846 "disable MSI-X (default 0)");
6847 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "rx_budget",
6848 CTLFLAG_RWTUN, &ctx->ifc_sysctl_rx_budget, 0, "set the RX budget");
6849 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "tx_abdicate",
6850 CTLFLAG_RWTUN, &ctx->ifc_sysctl_tx_abdicate, 0,
6851 "cause TX to abdicate instead of running to completion");
6852 ctx->ifc_sysctl_core_offset = CORE_OFFSET_UNSPECIFIED;
6853 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "core_offset",
6854 CTLFLAG_RDTUN, &ctx->ifc_sysctl_core_offset, 0,
6855 "offset to start using cores at");
6856 SYSCTL_ADD_U8(ctx_list, oid_list, OID_AUTO, "separate_txrx",
6857 CTLFLAG_RDTUN, &ctx->ifc_sysctl_separate_txrx, 0,
6858 "use separate cores for TX and RX");
6859 SYSCTL_ADD_U8(ctx_list, oid_list, OID_AUTO, "use_logical_cores",
6860 CTLFLAG_RDTUN, &ctx->ifc_sysctl_use_logical_cores, 0,
6861 "try to make use of logical cores for TX and RX");
6862 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "use_extra_msix_vectors",
6863 CTLFLAG_RDTUN, &ctx->ifc_sysctl_extra_msix_vectors, 0,
6864 "attempt to reserve the given number of extra MSI-X vectors during driver load for the creation of additional interfaces later");
6865 SYSCTL_ADD_INT(ctx_list, oid_list, OID_AUTO, "allocated_msix_vectors",
6866 CTLFLAG_RDTUN, &ctx->ifc_softc_ctx.isc_vectors, 0,
6867 "total # of MSI-X vectors allocated by driver");
6868
6869 /* XXX change for per-queue sizes */
6870 SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds",
6871 CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, ctx,
6872 IFLIB_NTXD_HANDLER, mp_ndesc_handler, "A",
6873 "list of # of TX descriptors to use, 0 = use default #");
6874 SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_nrxds",
6875 CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, ctx,
6876 IFLIB_NRXD_HANDLER, mp_ndesc_handler, "A",
6877 "list of # of RX descriptors to use, 0 = use default #");
6878 }
6879
6880 static void
iflib_add_device_sysctl_post(if_ctx_t ctx)6881 iflib_add_device_sysctl_post(if_ctx_t ctx)
6882 {
6883 if_shared_ctx_t sctx = ctx->ifc_sctx;
6884 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
6885 device_t dev = iflib_get_dev(ctx);
6886 struct sysctl_oid_list *child;
6887 struct sysctl_ctx_list *ctx_list;
6888 iflib_fl_t fl;
6889 iflib_txq_t txq;
6890 iflib_rxq_t rxq;
6891 int i, j;
6892 char namebuf[NAME_BUFLEN];
6893 char *qfmt;
6894 struct sysctl_oid *queue_node, *fl_node, *node;
6895 struct sysctl_oid_list *queue_list, *fl_list;
6896 ctx_list = device_get_sysctl_ctx(dev);
6897
6898 node = ctx->ifc_sysctl_node;
6899 child = SYSCTL_CHILDREN(node);
6900
6901 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "tx_reclaim_thresh",
6902 CTLTYPE_INT | CTLFLAG_RWTUN, ctx,
6903 0, iflib_handle_tx_reclaim_thresh, "I",
6904 "Number of TX descs outstanding before reclaim is called");
6905
6906 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "tx_reclaim_ticks",
6907 CTLTYPE_INT | CTLFLAG_RWTUN, ctx,
6908 0, iflib_handle_tx_reclaim_ticks, "I",
6909 "Number of ticks before a TX reclaim is forced");
6910
6911 if (scctx->isc_ntxqsets > 100)
6912 qfmt = "txq%03d";
6913 else if (scctx->isc_ntxqsets > 10)
6914 qfmt = "txq%02d";
6915 else
6916 qfmt = "txq%d";
6917 for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) {
6918 snprintf(namebuf, NAME_BUFLEN, qfmt, i);
6919 queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
6920 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
6921 queue_list = SYSCTL_CHILDREN(queue_node);
6922 SYSCTL_ADD_INT(ctx_list, queue_list, OID_AUTO, "cpu",
6923 CTLFLAG_RD, &txq->ift_task.gt_cpu, 0,
6924 "cpu this queue is bound to");
6925 #if MEMORY_LOGGING
6926 SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO, "txq_dequeued",
6927 CTLFLAG_RD, &txq->ift_dequeued, "total mbufs freed");
6928 SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO, "txq_enqueued",
6929 CTLFLAG_RD, &txq->ift_enqueued, "total mbufs enqueued");
6930 #endif
6931 SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag",
6932 CTLFLAG_RD, &txq->ift_mbuf_defrag,
6933 "# of times m_defrag was called");
6934 SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO, "m_pullups",
6935 CTLFLAG_RD, &txq->ift_pullups,
6936 "# of times m_pullup was called");
6937 SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO,
6938 "mbuf_defrag_failed", CTLFLAG_RD,
6939 &txq->ift_mbuf_defrag_failed, "# of times m_defrag failed");
6940 SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO,
6941 "no_desc_avail", CTLFLAG_RD, &txq->ift_no_desc_avail,
6942 "# of times no descriptors were available");
6943 SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO,
6944 "tx_map_failed", CTLFLAG_RD, &txq->ift_map_failed,
6945 "# of times DMA map failed");
6946 SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO,
6947 "txd_encap_efbig", CTLFLAG_RD, &txq->ift_txd_encap_efbig,
6948 "# of times txd_encap returned EFBIG");
6949 SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO,
6950 "no_tx_dma_setup", CTLFLAG_RD, &txq->ift_no_tx_dma_setup,
6951 "# of times map failed for other than EFBIG");
6952 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_pidx",
6953 CTLFLAG_RD, &txq->ift_pidx, 1, "Producer Index");
6954 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx",
6955 CTLFLAG_RD, &txq->ift_cidx, 1, "Consumer Index");
6956 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO,
6957 "txq_cidx_processed", CTLFLAG_RD, &txq->ift_cidx_processed,
6958 1, "Consumer Index seen by credit update");
6959 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_in_use",
6960 CTLFLAG_RD, &txq->ift_in_use, 1, "descriptors in use");
6961 SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO,
6962 "txq_processed", CTLFLAG_RD, &txq->ift_processed,
6963 "descriptors procesed for clean");
6964 SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO, "txq_cleaned",
6965 CTLFLAG_RD, &txq->ift_cleaned, "total cleaned");
6966 SYSCTL_ADD_PROC(ctx_list, queue_list, OID_AUTO, "ring_state",
6967 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
6968 __DEVOLATILE(uint64_t *, &txq->ift_br->state), 0,
6969 mp_ring_state_handler, "A", "soft ring state");
6970 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO,
6971 "r_enqueues", CTLFLAG_RD, &txq->ift_br->enqueues,
6972 "# of enqueues to the mp_ring for this queue");
6973 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO,
6974 "r_drops", CTLFLAG_RD, &txq->ift_br->drops,
6975 "# of drops in the mp_ring for this queue");
6976 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO,
6977 "r_starts", CTLFLAG_RD, &txq->ift_br->starts,
6978 "# of normal consumer starts in mp_ring for this queue");
6979 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO,
6980 "r_stalls", CTLFLAG_RD, &txq->ift_br->stalls,
6981 "# of consumer stalls in the mp_ring for this queue");
6982 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO,
6983 "r_restarts", CTLFLAG_RD, &txq->ift_br->restarts,
6984 "# of consumer restarts in the mp_ring for this queue");
6985 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO,
6986 "r_abdications", CTLFLAG_RD, &txq->ift_br->abdications,
6987 "# of consumer abdications in the mp_ring for this queue");
6988 }
6989
6990 if (scctx->isc_nrxqsets > 100)
6991 qfmt = "rxq%03d";
6992 else if (scctx->isc_nrxqsets > 10)
6993 qfmt = "rxq%02d";
6994 else
6995 qfmt = "rxq%d";
6996 for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) {
6997 snprintf(namebuf, NAME_BUFLEN, qfmt, i);
6998 queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
6999 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
7000 queue_list = SYSCTL_CHILDREN(queue_node);
7001 SYSCTL_ADD_INT(ctx_list, queue_list, OID_AUTO, "cpu",
7002 CTLFLAG_RD, &rxq->ifr_task.gt_cpu, 0,
7003 "cpu this queue is bound to");
7004 if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
7005 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO,
7006 "rxq_cq_cidx", CTLFLAG_RD, &rxq->ifr_cq_cidx, 1,
7007 "Consumer Index");
7008 }
7009
7010 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
7011 snprintf(namebuf, NAME_BUFLEN, "rxq_fl%d", j);
7012 fl_node = SYSCTL_ADD_NODE(ctx_list, queue_list,
7013 OID_AUTO, namebuf, CTLFLAG_RD | CTLFLAG_MPSAFE,
7014 NULL, "freelist Name");
7015 fl_list = SYSCTL_CHILDREN(fl_node);
7016 SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "pidx",
7017 CTLFLAG_RD, &fl->ifl_pidx, 1, "Producer Index");
7018 SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "cidx",
7019 CTLFLAG_RD, &fl->ifl_cidx, 1, "Consumer Index");
7020 SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "credits",
7021 CTLFLAG_RD, &fl->ifl_credits, 1,
7022 "credits available");
7023 SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "buf_size",
7024 CTLFLAG_RD, &fl->ifl_buf_size, 1, "buffer size");
7025 #if MEMORY_LOGGING
7026 SYSCTL_ADD_UQUAD(ctx_list, fl_list, OID_AUTO,
7027 "fl_m_enqueued", CTLFLAG_RD, &fl->ifl_m_enqueued,
7028 "mbufs allocated");
7029 SYSCTL_ADD_UQUAD(ctx_list, fl_list, OID_AUTO,
7030 "fl_m_dequeued", CTLFLAG_RD, &fl->ifl_m_dequeued,
7031 "mbufs freed");
7032 SYSCTL_ADD_UQUAD(ctx_list, fl_list, OID_AUTO,
7033 "fl_cl_enqueued", CTLFLAG_RD, &fl->ifl_cl_enqueued,
7034 "clusters allocated");
7035 SYSCTL_ADD_UQUAD(ctx_list, fl_list, OID_AUTO,
7036 "fl_cl_dequeued", CTLFLAG_RD, &fl->ifl_cl_dequeued,
7037 "clusters freed");
7038 #endif
7039 }
7040 }
7041
7042 }
7043
7044 void
iflib_request_reset(if_ctx_t ctx)7045 iflib_request_reset(if_ctx_t ctx)
7046 {
7047
7048 STATE_LOCK(ctx);
7049 ctx->ifc_flags |= IFC_DO_RESET;
7050 STATE_UNLOCK(ctx);
7051 }
7052
7053 #ifndef __NO_STRICT_ALIGNMENT
7054 static struct mbuf *
iflib_fixup_rx(struct mbuf * m)7055 iflib_fixup_rx(struct mbuf *m)
7056 {
7057 struct mbuf *n;
7058
7059 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
7060 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
7061 m->m_data += ETHER_HDR_LEN;
7062 n = m;
7063 } else {
7064 MGETHDR(n, M_NOWAIT, MT_DATA);
7065 if (n == NULL) {
7066 m_freem(m);
7067 return (NULL);
7068 }
7069 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
7070 m->m_data += ETHER_HDR_LEN;
7071 m->m_len -= ETHER_HDR_LEN;
7072 n->m_len = ETHER_HDR_LEN;
7073 M_MOVE_PKTHDR(n, m);
7074 n->m_next = m;
7075 }
7076 return (n);
7077 }
7078 #endif
7079
7080 #ifdef DEBUGNET
7081 static void
iflib_debugnet_init(if_t ifp,int * nrxr,int * ncl,int * clsize)7082 iflib_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize)
7083 {
7084 if_ctx_t ctx;
7085
7086 ctx = if_getsoftc(ifp);
7087 CTX_LOCK(ctx);
7088 *nrxr = NRXQSETS(ctx);
7089 *ncl = ctx->ifc_rxqs[0].ifr_fl->ifl_size;
7090 *clsize = ctx->ifc_rxqs[0].ifr_fl->ifl_buf_size;
7091 CTX_UNLOCK(ctx);
7092 }
7093
7094 static void
iflib_debugnet_event(if_t ifp,enum debugnet_ev event)7095 iflib_debugnet_event(if_t ifp, enum debugnet_ev event)
7096 {
7097 if_ctx_t ctx;
7098 if_softc_ctx_t scctx;
7099 iflib_fl_t fl;
7100 iflib_rxq_t rxq;
7101 int i, j;
7102
7103 ctx = if_getsoftc(ifp);
7104 scctx = &ctx->ifc_softc_ctx;
7105
7106 switch (event) {
7107 case DEBUGNET_START:
7108 for (i = 0; i < scctx->isc_nrxqsets; i++) {
7109 rxq = &ctx->ifc_rxqs[i];
7110 for (j = 0; j < rxq->ifr_nfl; j++) {
7111 fl = rxq->ifr_fl;
7112 fl->ifl_zone = m_getzone(fl->ifl_buf_size);
7113 }
7114 }
7115 iflib_no_tx_batch = 1;
7116 break;
7117 default:
7118 break;
7119 }
7120 }
7121
7122 static int
iflib_debugnet_transmit(if_t ifp,struct mbuf * m)7123 iflib_debugnet_transmit(if_t ifp, struct mbuf *m)
7124 {
7125 if_ctx_t ctx;
7126 iflib_txq_t txq;
7127 int error;
7128
7129 ctx = if_getsoftc(ifp);
7130 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
7131 IFF_DRV_RUNNING)
7132 return (EBUSY);
7133
7134 txq = &ctx->ifc_txqs[0];
7135 error = iflib_encap(txq, &m);
7136 if (error == 0)
7137 (void)iflib_txd_db_check(txq, true);
7138 return (error);
7139 }
7140
7141 static int
iflib_debugnet_poll(if_t ifp,int count)7142 iflib_debugnet_poll(if_t ifp, int count)
7143 {
7144 struct epoch_tracker et;
7145 if_ctx_t ctx;
7146 if_softc_ctx_t scctx;
7147 iflib_txq_t txq;
7148 int i;
7149
7150 ctx = if_getsoftc(ifp);
7151 scctx = &ctx->ifc_softc_ctx;
7152
7153 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
7154 IFF_DRV_RUNNING)
7155 return (EBUSY);
7156
7157 txq = &ctx->ifc_txqs[0];
7158 (void)iflib_completed_tx_reclaim(txq);
7159
7160 NET_EPOCH_ENTER(et);
7161 for (i = 0; i < scctx->isc_nrxqsets; i++)
7162 (void)iflib_rxeof(&ctx->ifc_rxqs[i], 16 /* XXX */);
7163 NET_EPOCH_EXIT(et);
7164 return (0);
7165 }
7166 #endif /* DEBUGNET */
7167
7168 #ifndef ALTQ
7169 static inline iflib_txq_t
iflib_simple_select_queue(if_ctx_t ctx,struct mbuf * m)7170 iflib_simple_select_queue(if_ctx_t ctx, struct mbuf *m)
7171 {
7172 int qidx;
7173
7174 if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m))
7175 qidx = QIDX(ctx, m);
7176 else
7177 qidx = NTXQSETS(ctx) + FIRST_QSET(ctx) - 1;
7178 return (&ctx->ifc_txqs[qidx]);
7179 }
7180
7181 static int
iflib_simple_transmit(if_t ifp,struct mbuf * m)7182 iflib_simple_transmit(if_t ifp, struct mbuf *m)
7183 {
7184 if_ctx_t ctx;
7185 iflib_txq_t txq;
7186 int error;
7187 int bytes_sent = 0, pkt_sent = 0, mcast_sent = 0;
7188
7189
7190 ctx = if_getsoftc(ifp);
7191 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
7192 IFF_DRV_RUNNING)
7193 return (EBUSY);
7194 txq = iflib_simple_select_queue(ctx, m);
7195 mtx_lock(&txq->ift_mtx);
7196 error = iflib_encap(txq, &m);
7197 if (error == 0) {
7198 pkt_sent++;
7199 bytes_sent += m->m_pkthdr.len;
7200 mcast_sent += !!(m->m_flags & M_MCAST);
7201 (void)iflib_txd_db_check(txq, true);
7202 } else {
7203 if (error == ENOBUFS)
7204 if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1);
7205 else
7206 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
7207 }
7208 (void)iflib_completed_tx_reclaim(txq);
7209 mtx_unlock(&txq->ift_mtx);
7210 if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent);
7211 if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent);
7212 if (mcast_sent)
7213 if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent);
7214
7215 return (error);
7216 }
7217 #endif
7218