1 /**************************************************************************
2 SPDX-License-Identifier: BSD-2-Clause
3
4 Copyright (c) 2007-2009, Chelsio Inc.
5 All rights reserved.
6
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are met:
9
10 1. Redistributions of source code must retain the above copyright notice,
11 this list of conditions and the following disclaimer.
12
13 2. Neither the name of the Chelsio Corporation nor the names of its
14 contributors may be used to endorse or promote products derived from
15 this software without specific prior written permission.
16
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 POSSIBILITY OF SUCH DAMAGE.
28
29 ***************************************************************************/
30
31 #include <sys/cdefs.h>
32 #include "opt_inet.h"
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/bus.h>
38 #include <sys/module.h>
39 #include <sys/pciio.h>
40 #include <sys/conf.h>
41 #include <machine/bus.h>
42 #include <machine/resource.h>
43 #include <sys/ktr.h>
44 #include <sys/rman.h>
45 #include <sys/ioccom.h>
46 #include <sys/mbuf.h>
47 #include <sys/linker.h>
48 #include <sys/firmware.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/smp.h>
52 #include <sys/sysctl.h>
53 #include <sys/syslog.h>
54 #include <sys/queue.h>
55 #include <sys/taskqueue.h>
56 #include <sys/proc.h>
57
58 #include <net/bpf.h>
59 #include <net/debugnet.h>
60 #include <net/ethernet.h>
61 #include <net/if.h>
62 #include <net/if_var.h>
63 #include <net/if_arp.h>
64 #include <net/if_dl.h>
65 #include <net/if_media.h>
66 #include <net/if_types.h>
67 #include <net/if_vlan_var.h>
68
69 #include <netinet/in_systm.h>
70 #include <netinet/in.h>
71 #include <netinet/if_ether.h>
72 #include <netinet/ip.h>
73 #include <netinet/ip.h>
74 #include <netinet/tcp.h>
75 #include <netinet/udp.h>
76
77 #include <dev/pci/pcireg.h>
78 #include <dev/pci/pcivar.h>
79 #include <dev/pci/pci_private.h>
80
81 #include <cxgb_include.h>
82
83 #ifdef PRIV_SUPPORTED
84 #include <sys/priv.h>
85 #endif
86
87 static int cxgb_setup_interrupts(adapter_t *);
88 static void cxgb_teardown_interrupts(adapter_t *);
89 static void cxgb_init(void *);
90 static int cxgb_init_locked(struct port_info *);
91 static int cxgb_uninit_locked(struct port_info *);
92 static int cxgb_uninit_synchronized(struct port_info *);
93 static int cxgb_ioctl(if_t, unsigned long, caddr_t);
94 static int cxgb_media_change(if_t);
95 static int cxgb_ifm_type(int);
96 static void cxgb_build_medialist(struct port_info *);
97 static void cxgb_media_status(if_t, struct ifmediareq *);
98 static uint64_t cxgb_get_counter(if_t, ift_counter);
99 static int setup_sge_qsets(adapter_t *);
100 static void cxgb_async_intr(void *);
101 static void cxgb_tick_handler(void *, int);
102 static void cxgb_tick(void *);
103 static void link_check_callout(void *);
104 static void check_link_status(void *, int);
105 static void setup_rss(adapter_t *sc);
106 static int alloc_filters(struct adapter *);
107 static int setup_hw_filters(struct adapter *);
108 static int set_filter(struct adapter *, int, const struct filter_info *);
109 static inline void mk_set_tcb_field(struct cpl_set_tcb_field *, unsigned int,
110 unsigned int, u64, u64);
111 static inline void set_tcb_field_ulp(struct cpl_set_tcb_field *, unsigned int,
112 unsigned int, u64, u64);
113 #ifdef TCP_OFFLOAD
114 static int cpl_not_handled(struct sge_qset *, struct rsp_desc *, struct mbuf *);
115 #endif
116
117 /* Attachment glue for the PCI controller end of the device. Each port of
118 * the device is attached separately, as defined later.
119 */
120 static int cxgb_controller_probe(device_t);
121 static int cxgb_controller_attach(device_t);
122 static int cxgb_controller_detach(device_t);
123 static void cxgb_free(struct adapter *);
124 static __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
125 unsigned int end);
126 static void cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf);
127 static int cxgb_get_regs_len(void);
128 static void touch_bars(device_t dev);
129 static void cxgb_update_mac_settings(struct port_info *p);
130 #ifdef TCP_OFFLOAD
131 static int toe_capability(struct port_info *, int);
132 #endif
133
134 /* Table for probing the cards. The desc field isn't actually used */
135 struct cxgb_ident {
136 uint16_t vendor;
137 uint16_t device;
138 int index;
139 char *desc;
140 } cxgb_identifiers[] = {
141 {PCI_VENDOR_ID_CHELSIO, 0x0020, 0, "PE9000"},
142 {PCI_VENDOR_ID_CHELSIO, 0x0021, 1, "T302E"},
143 {PCI_VENDOR_ID_CHELSIO, 0x0022, 2, "T310E"},
144 {PCI_VENDOR_ID_CHELSIO, 0x0023, 3, "T320X"},
145 {PCI_VENDOR_ID_CHELSIO, 0x0024, 1, "T302X"},
146 {PCI_VENDOR_ID_CHELSIO, 0x0025, 3, "T320E"},
147 {PCI_VENDOR_ID_CHELSIO, 0x0026, 2, "T310X"},
148 {PCI_VENDOR_ID_CHELSIO, 0x0030, 2, "T3B10"},
149 {PCI_VENDOR_ID_CHELSIO, 0x0031, 3, "T3B20"},
150 {PCI_VENDOR_ID_CHELSIO, 0x0032, 1, "T3B02"},
151 {PCI_VENDOR_ID_CHELSIO, 0x0033, 4, "T3B04"},
152 {PCI_VENDOR_ID_CHELSIO, 0x0035, 6, "T3C10"},
153 {PCI_VENDOR_ID_CHELSIO, 0x0036, 3, "S320E-CR"},
154 {PCI_VENDOR_ID_CHELSIO, 0x0037, 7, "N320E-G2"},
155 {0, 0, 0, NULL}
156 };
157
158 static device_method_t cxgb_controller_methods[] = {
159 DEVMETHOD(device_probe, cxgb_controller_probe),
160 DEVMETHOD(device_attach, cxgb_controller_attach),
161 DEVMETHOD(device_detach, cxgb_controller_detach),
162
163 DEVMETHOD_END
164 };
165
166 static driver_t cxgb_controller_driver = {
167 "cxgbc",
168 cxgb_controller_methods,
169 sizeof(struct adapter)
170 };
171
172 static int cxgbc_mod_event(module_t, int, void *);
173
174 DRIVER_MODULE(cxgbc, pci, cxgb_controller_driver, cxgbc_mod_event, NULL);
175 MODULE_PNP_INFO("U16:vendor;U16:device", pci, cxgbc, cxgb_identifiers,
176 nitems(cxgb_identifiers) - 1);
177 MODULE_VERSION(cxgbc, 1);
178 MODULE_DEPEND(cxgbc, firmware, 1, 1, 1);
179
180 /*
181 * Attachment glue for the ports. Attachment is done directly to the
182 * controller device.
183 */
184 static int cxgb_port_probe(device_t);
185 static int cxgb_port_attach(device_t);
186 static int cxgb_port_detach(device_t);
187
188 static device_method_t cxgb_port_methods[] = {
189 DEVMETHOD(device_probe, cxgb_port_probe),
190 DEVMETHOD(device_attach, cxgb_port_attach),
191 DEVMETHOD(device_detach, cxgb_port_detach),
192 { 0, 0 }
193 };
194
195 static driver_t cxgb_port_driver = {
196 "cxgb",
197 cxgb_port_methods,
198 0
199 };
200
201 static d_ioctl_t cxgb_extension_ioctl;
202 static d_open_t cxgb_extension_open;
203 static d_close_t cxgb_extension_close;
204
205 static struct cdevsw cxgb_cdevsw = {
206 .d_version = D_VERSION,
207 .d_flags = 0,
208 .d_open = cxgb_extension_open,
209 .d_close = cxgb_extension_close,
210 .d_ioctl = cxgb_extension_ioctl,
211 .d_name = "cxgb",
212 };
213
214 DRIVER_MODULE(cxgb, cxgbc, cxgb_port_driver, 0, 0);
215 MODULE_VERSION(cxgb, 1);
216
217 DEBUGNET_DEFINE(cxgb);
218
219 static struct mtx t3_list_lock;
220 static SLIST_HEAD(, adapter) t3_list;
221 #ifdef TCP_OFFLOAD
222 static struct mtx t3_uld_list_lock;
223 static SLIST_HEAD(, uld_info) t3_uld_list;
224 #endif
225
226 /*
227 * The driver uses the best interrupt scheme available on a platform in the
228 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
229 * of these schemes the driver may consider as follows:
230 *
231 * msi = 2: choose from among all three options
232 * msi = 1 : only consider MSI and pin interrupts
233 * msi = 0: force pin interrupts
234 */
235 static int msi_allowed = 2;
236
237 SYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
238 "CXGB driver parameters");
239 SYSCTL_INT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0,
240 "MSI-X, MSI, INTx selector");
241
242 /*
243 * The driver uses an auto-queue algorithm by default.
244 * To disable it and force a single queue-set per port, use multiq = 0
245 */
246 static int multiq = 1;
247 SYSCTL_INT(_hw_cxgb, OID_AUTO, multiq, CTLFLAG_RDTUN, &multiq, 0,
248 "use min(ncpus/ports, 8) queue-sets per port");
249
250 /*
251 * By default the driver will not update the firmware unless
252 * it was compiled against a newer version
253 *
254 */
255 static int force_fw_update = 0;
256 SYSCTL_INT(_hw_cxgb, OID_AUTO, force_fw_update, CTLFLAG_RDTUN, &force_fw_update, 0,
257 "update firmware even if up to date");
258
259 int cxgb_use_16k_clusters = -1;
260 SYSCTL_INT(_hw_cxgb, OID_AUTO, use_16k_clusters, CTLFLAG_RDTUN,
261 &cxgb_use_16k_clusters, 0, "use 16kB clusters for the jumbo queue ");
262
263 static int nfilters = -1;
264 SYSCTL_INT(_hw_cxgb, OID_AUTO, nfilters, CTLFLAG_RDTUN,
265 &nfilters, 0, "max number of entries in the filter table");
266
267 enum {
268 MAX_TXQ_ENTRIES = 16384,
269 MAX_CTRL_TXQ_ENTRIES = 1024,
270 MAX_RSPQ_ENTRIES = 16384,
271 MAX_RX_BUFFERS = 16384,
272 MAX_RX_JUMBO_BUFFERS = 16384,
273 MIN_TXQ_ENTRIES = 4,
274 MIN_CTRL_TXQ_ENTRIES = 4,
275 MIN_RSPQ_ENTRIES = 32,
276 MIN_FL_ENTRIES = 32,
277 MIN_FL_JUMBO_ENTRIES = 32
278 };
279
280 struct filter_info {
281 u32 sip;
282 u32 sip_mask;
283 u32 dip;
284 u16 sport;
285 u16 dport;
286 u32 vlan:12;
287 u32 vlan_prio:3;
288 u32 mac_hit:1;
289 u32 mac_idx:4;
290 u32 mac_vld:1;
291 u32 pkt_type:2;
292 u32 report_filter_id:1;
293 u32 pass:1;
294 u32 rss:1;
295 u32 qset:3;
296 u32 locked:1;
297 u32 valid:1;
298 };
299
300 enum { FILTER_NO_VLAN_PRI = 7 };
301
302 #define EEPROM_MAGIC 0x38E2F10C
303
304 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
305
306
307 static int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset);
308
309
310 static __inline char
t3rev2char(struct adapter * adapter)311 t3rev2char(struct adapter *adapter)
312 {
313 char rev = 'z';
314
315 switch(adapter->params.rev) {
316 case T3_REV_A:
317 rev = 'a';
318 break;
319 case T3_REV_B:
320 case T3_REV_B2:
321 rev = 'b';
322 break;
323 case T3_REV_C:
324 rev = 'c';
325 break;
326 }
327 return rev;
328 }
329
330 static struct cxgb_ident *
cxgb_get_ident(device_t dev)331 cxgb_get_ident(device_t dev)
332 {
333 struct cxgb_ident *id;
334
335 for (id = cxgb_identifiers; id->desc != NULL; id++) {
336 if ((id->vendor == pci_get_vendor(dev)) &&
337 (id->device == pci_get_device(dev))) {
338 return (id);
339 }
340 }
341 return (NULL);
342 }
343
344 static const struct adapter_info *
cxgb_get_adapter_info(device_t dev)345 cxgb_get_adapter_info(device_t dev)
346 {
347 struct cxgb_ident *id;
348 const struct adapter_info *ai;
349
350 id = cxgb_get_ident(dev);
351 if (id == NULL)
352 return (NULL);
353
354 ai = t3_get_adapter_info(id->index);
355
356 return (ai);
357 }
358
359 static int
cxgb_controller_probe(device_t dev)360 cxgb_controller_probe(device_t dev)
361 {
362 const struct adapter_info *ai;
363 const char *ports;
364 int nports;
365
366 ai = cxgb_get_adapter_info(dev);
367 if (ai == NULL)
368 return (ENXIO);
369
370 nports = ai->nports0 + ai->nports1;
371 if (nports == 1)
372 ports = "port";
373 else
374 ports = "ports";
375
376 device_set_descf(dev, "%s, %d %s", ai->desc, nports, ports);
377 return (BUS_PROBE_DEFAULT);
378 }
379
380 #define FW_FNAME "cxgb_t3fw"
381 #define TPEEPROM_NAME "cxgb_t3%c_tp_eeprom"
382 #define TPSRAM_NAME "cxgb_t3%c_protocol_sram"
383
384 static int
upgrade_fw(adapter_t * sc)385 upgrade_fw(adapter_t *sc)
386 {
387 const struct firmware *fw;
388 int status;
389 u32 vers;
390
391 if ((fw = firmware_get(FW_FNAME)) == NULL) {
392 device_printf(sc->dev, "Could not find firmware image %s\n", FW_FNAME);
393 return (ENOENT);
394 } else
395 device_printf(sc->dev, "installing firmware on card\n");
396 status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize);
397
398 if (status != 0) {
399 device_printf(sc->dev, "failed to install firmware: %d\n",
400 status);
401 } else {
402 t3_get_fw_version(sc, &vers);
403 snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
404 G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
405 G_FW_VERSION_MICRO(vers));
406 }
407
408 firmware_put(fw, FIRMWARE_UNLOAD);
409
410 return (status);
411 }
412
413 /*
414 * The cxgb_controller_attach function is responsible for the initial
415 * bringup of the device. Its responsibilities include:
416 *
417 * 1. Determine if the device supports MSI or MSI-X.
418 * 2. Allocate bus resources so that we can access the Base Address Register
419 * 3. Create and initialize mutexes for the controller and its control
420 * logic such as SGE and MDIO.
421 * 4. Call hardware specific setup routine for the adapter as a whole.
422 * 5. Allocate the BAR for doing MSI-X.
423 * 6. Setup the line interrupt iff MSI-X is not supported.
424 * 7. Create the driver's taskq.
425 * 8. Start one task queue service thread.
426 * 9. Check if the firmware and SRAM are up-to-date. They will be
427 * auto-updated later (before FULL_INIT_DONE), if required.
428 * 10. Create a child device for each MAC (port)
429 * 11. Initialize T3 private state.
430 * 12. Trigger the LED
431 * 13. Setup offload iff supported.
432 * 14. Reset/restart the tick callout.
433 * 15. Attach sysctls
434 *
435 * NOTE: Any modification or deviation from this list MUST be reflected in
436 * the above comment. Failure to do so will result in problems on various
437 * error conditions including link flapping.
438 */
439 static int
cxgb_controller_attach(device_t dev)440 cxgb_controller_attach(device_t dev)
441 {
442 device_t child;
443 const struct adapter_info *ai;
444 struct adapter *sc;
445 int i, error = 0;
446 uint32_t vers;
447 int port_qsets = 1;
448 int msi_needed, reg;
449
450 sc = device_get_softc(dev);
451 sc->dev = dev;
452 sc->msi_count = 0;
453 ai = cxgb_get_adapter_info(dev);
454
455 snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d",
456 device_get_unit(dev));
457 ADAPTER_LOCK_INIT(sc, sc->lockbuf);
458
459 snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d",
460 device_get_unit(dev));
461 snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d",
462 device_get_unit(dev));
463 snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d",
464 device_get_unit(dev));
465
466 MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_SPIN);
467 MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF);
468 MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF);
469
470 mtx_lock(&t3_list_lock);
471 SLIST_INSERT_HEAD(&t3_list, sc, link);
472 mtx_unlock(&t3_list_lock);
473
474 /* find the PCIe link width and set max read request to 4KB*/
475 if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) {
476 uint16_t lnk;
477
478 lnk = pci_read_config(dev, reg + PCIER_LINK_STA, 2);
479 sc->link_width = (lnk & PCIEM_LINK_STA_WIDTH) >> 4;
480 if (sc->link_width < 8 &&
481 (ai->caps & SUPPORTED_10000baseT_Full)) {
482 device_printf(sc->dev,
483 "PCIe x%d Link, expect reduced performance\n",
484 sc->link_width);
485 }
486
487 pci_set_max_read_req(dev, 4096);
488 }
489
490 touch_bars(dev);
491 pci_enable_busmaster(dev);
492 /*
493 * Allocate the registers and make them available to the driver.
494 * The registers that we care about for NIC mode are in BAR 0
495 */
496 sc->regs_rid = PCIR_BAR(0);
497 if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
498 &sc->regs_rid, RF_ACTIVE)) == NULL) {
499 device_printf(dev, "Cannot allocate BAR region 0\n");
500 error = ENXIO;
501 goto out;
502 }
503
504 sc->bt = rman_get_bustag(sc->regs_res);
505 sc->bh = rman_get_bushandle(sc->regs_res);
506 sc->mmio_len = rman_get_size(sc->regs_res);
507
508 for (i = 0; i < MAX_NPORTS; i++)
509 sc->port[i].adapter = sc;
510
511 if (t3_prep_adapter(sc, ai, 1) < 0) {
512 printf("prep adapter failed\n");
513 error = ENODEV;
514 goto out;
515 }
516
517 sc->udbs_rid = PCIR_BAR(2);
518 sc->udbs_res = NULL;
519 if (is_offload(sc) &&
520 ((sc->udbs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
521 &sc->udbs_rid, RF_ACTIVE)) == NULL)) {
522 device_printf(dev, "Cannot allocate BAR region 1\n");
523 error = ENXIO;
524 goto out;
525 }
526
527 /* Allocate the BAR for doing MSI-X. If it succeeds, try to allocate
528 * enough messages for the queue sets. If that fails, try falling
529 * back to MSI. If that fails, then try falling back to the legacy
530 * interrupt pin model.
531 */
532 sc->msix_regs_rid = 0x20;
533 if ((msi_allowed >= 2) &&
534 (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
535 &sc->msix_regs_rid, RF_ACTIVE)) != NULL) {
536
537 if (multiq)
538 port_qsets = min(SGE_QSETS/sc->params.nports, mp_ncpus);
539 msi_needed = sc->msi_count = sc->params.nports * port_qsets + 1;
540
541 if (pci_msix_count(dev) == 0 ||
542 (error = pci_alloc_msix(dev, &sc->msi_count)) != 0 ||
543 sc->msi_count != msi_needed) {
544 device_printf(dev, "alloc msix failed - "
545 "msi_count=%d, msi_needed=%d, err=%d; "
546 "will try MSI\n", sc->msi_count,
547 msi_needed, error);
548 sc->msi_count = 0;
549 port_qsets = 1;
550 pci_release_msi(dev);
551 bus_release_resource(dev, SYS_RES_MEMORY,
552 sc->msix_regs_rid, sc->msix_regs_res);
553 sc->msix_regs_res = NULL;
554 } else {
555 sc->flags |= USING_MSIX;
556 sc->cxgb_intr = cxgb_async_intr;
557 device_printf(dev,
558 "using MSI-X interrupts (%u vectors)\n",
559 sc->msi_count);
560 }
561 }
562
563 if ((msi_allowed >= 1) && (sc->msi_count == 0)) {
564 sc->msi_count = 1;
565 if ((error = pci_alloc_msi(dev, &sc->msi_count)) != 0) {
566 device_printf(dev, "alloc msi failed - "
567 "err=%d; will try INTx\n", error);
568 sc->msi_count = 0;
569 port_qsets = 1;
570 pci_release_msi(dev);
571 } else {
572 sc->flags |= USING_MSI;
573 sc->cxgb_intr = t3_intr_msi;
574 device_printf(dev, "using MSI interrupts\n");
575 }
576 }
577 if (sc->msi_count == 0) {
578 device_printf(dev, "using line interrupts\n");
579 sc->cxgb_intr = t3b_intr;
580 }
581
582 /* Create a private taskqueue thread for handling driver events */
583 sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT,
584 taskqueue_thread_enqueue, &sc->tq);
585 if (sc->tq == NULL) {
586 device_printf(dev, "failed to allocate controller task queue\n");
587 goto out;
588 }
589
590 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
591 device_get_nameunit(dev));
592 TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc);
593
594
595 /* Create a periodic callout for checking adapter status */
596 callout_init(&sc->cxgb_tick_ch, 1);
597
598 if (t3_check_fw_version(sc) < 0 || force_fw_update) {
599 /*
600 * Warn user that a firmware update will be attempted in init.
601 */
602 device_printf(dev, "firmware needs to be updated to version %d.%d.%d\n",
603 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
604 sc->flags &= ~FW_UPTODATE;
605 } else {
606 sc->flags |= FW_UPTODATE;
607 }
608
609 if (t3_check_tpsram_version(sc) < 0) {
610 /*
611 * Warn user that a firmware update will be attempted in init.
612 */
613 device_printf(dev, "SRAM needs to be updated to version %c-%d.%d.%d\n",
614 t3rev2char(sc), TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
615 sc->flags &= ~TPS_UPTODATE;
616 } else {
617 sc->flags |= TPS_UPTODATE;
618 }
619
620 /*
621 * Create a child device for each MAC. The ethernet attachment
622 * will be done in these children.
623 */
624 for (i = 0; i < (sc)->params.nports; i++) {
625 struct port_info *pi;
626
627 if ((child = device_add_child(dev, "cxgb", -1)) == NULL) {
628 device_printf(dev, "failed to add child port\n");
629 error = EINVAL;
630 goto out;
631 }
632 pi = &sc->port[i];
633 pi->adapter = sc;
634 pi->nqsets = port_qsets;
635 pi->first_qset = i*port_qsets;
636 pi->port_id = i;
637 pi->tx_chan = i >= ai->nports0;
638 pi->txpkt_intf = pi->tx_chan ? 2 * (i - ai->nports0) + 1 : 2 * i;
639 sc->rxpkt_map[pi->txpkt_intf] = i;
640 sc->port[i].tx_chan = i >= ai->nports0;
641 sc->portdev[i] = child;
642 device_set_softc(child, pi);
643 }
644 bus_attach_children(dev);
645
646 /* initialize sge private state */
647 t3_sge_init_adapter(sc);
648
649 t3_led_ready(sc);
650
651 error = t3_get_fw_version(sc, &vers);
652 if (error)
653 goto out;
654
655 snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
656 G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
657 G_FW_VERSION_MICRO(vers));
658
659 device_set_descf(dev, "%s %sNIC\t E/C: %s S/N: %s",
660 ai->desc, is_offload(sc) ? "R" : "",
661 sc->params.vpd.ec, sc->params.vpd.sn);
662
663 snprintf(&sc->port_types[0], sizeof(sc->port_types), "%x%x%x%x",
664 sc->params.vpd.port_type[0], sc->params.vpd.port_type[1],
665 sc->params.vpd.port_type[2], sc->params.vpd.port_type[3]);
666
667 device_printf(sc->dev, "Firmware Version %s\n", &sc->fw_version[0]);
668 callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc);
669 t3_add_attach_sysctls(sc);
670
671 #ifdef TCP_OFFLOAD
672 for (i = 0; i < NUM_CPL_HANDLERS; i++)
673 sc->cpl_handler[i] = cpl_not_handled;
674 #endif
675
676 t3_intr_clear(sc);
677 error = cxgb_setup_interrupts(sc);
678 out:
679 if (error)
680 cxgb_free(sc);
681
682 return (error);
683 }
684
685 /*
686 * The cxgb_controller_detach routine is called with the device is
687 * unloaded from the system.
688 */
689
690 static int
cxgb_controller_detach(device_t dev)691 cxgb_controller_detach(device_t dev)
692 {
693 struct adapter *sc;
694
695 sc = device_get_softc(dev);
696
697 cxgb_free(sc);
698
699 return (0);
700 }
701
702 /*
703 * The cxgb_free() is called by the cxgb_controller_detach() routine
704 * to tear down the structures that were built up in
705 * cxgb_controller_attach(), and should be the final piece of work
706 * done when fully unloading the driver.
707 *
708 *
709 * 1. Shutting down the threads started by the cxgb_controller_attach()
710 * routine.
711 * 2. Stopping the lower level device and all callouts (cxgb_down_locked()).
712 * 3. Detaching all of the port devices created during the
713 * cxgb_controller_attach() routine.
714 * 4. Removing the device children created via cxgb_controller_attach().
715 * 5. Releasing PCI resources associated with the device.
716 * 6. Turning off the offload support, iff it was turned on.
717 * 7. Destroying the mutexes created in cxgb_controller_attach().
718 *
719 */
720 static void
cxgb_free(struct adapter * sc)721 cxgb_free(struct adapter *sc)
722 {
723 int i, nqsets = 0;
724
725 ADAPTER_LOCK(sc);
726 sc->flags |= CXGB_SHUTDOWN;
727 ADAPTER_UNLOCK(sc);
728
729 /*
730 * Make sure all child devices are gone.
731 */
732 bus_generic_detach(sc->dev);
733 for (i = 0; i < (sc)->params.nports; i++) {
734 if (sc->portdev[i] &&
735 device_delete_child(sc->dev, sc->portdev[i]) != 0)
736 device_printf(sc->dev, "failed to delete child port\n");
737 nqsets += sc->port[i].nqsets;
738 }
739
740 /*
741 * At this point, it is as if cxgb_port_detach has run on all ports, and
742 * cxgb_down has run on the adapter. All interrupts have been silenced,
743 * all open devices have been closed.
744 */
745 KASSERT(sc->open_device_map == 0, ("%s: device(s) still open (%x)",
746 __func__, sc->open_device_map));
747 for (i = 0; i < sc->params.nports; i++) {
748 KASSERT(sc->port[i].ifp == NULL, ("%s: port %i undead!",
749 __func__, i));
750 }
751
752 /*
753 * Finish off the adapter's callouts.
754 */
755 callout_drain(&sc->cxgb_tick_ch);
756 callout_drain(&sc->sge_timer_ch);
757
758 /*
759 * Release resources grabbed under FULL_INIT_DONE by cxgb_up. The
760 * sysctls are cleaned up by the kernel linker.
761 */
762 if (sc->flags & FULL_INIT_DONE) {
763 t3_free_sge_resources(sc, nqsets);
764 sc->flags &= ~FULL_INIT_DONE;
765 }
766
767 /*
768 * Release all interrupt resources.
769 */
770 cxgb_teardown_interrupts(sc);
771 if (sc->flags & (USING_MSI | USING_MSIX)) {
772 device_printf(sc->dev, "releasing msi message(s)\n");
773 pci_release_msi(sc->dev);
774 } else {
775 device_printf(sc->dev, "no msi message to release\n");
776 }
777
778 if (sc->msix_regs_res != NULL) {
779 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid,
780 sc->msix_regs_res);
781 }
782
783 /*
784 * Free the adapter's taskqueue.
785 */
786 if (sc->tq != NULL) {
787 taskqueue_free(sc->tq);
788 sc->tq = NULL;
789 }
790
791 free(sc->filters, M_DEVBUF);
792 t3_sge_free(sc);
793
794 if (sc->udbs_res != NULL)
795 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->udbs_rid,
796 sc->udbs_res);
797
798 if (sc->regs_res != NULL)
799 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid,
800 sc->regs_res);
801
802 MTX_DESTROY(&sc->mdio_lock);
803 MTX_DESTROY(&sc->sge.reg_lock);
804 MTX_DESTROY(&sc->elmer_lock);
805 mtx_lock(&t3_list_lock);
806 SLIST_REMOVE(&t3_list, sc, adapter, link);
807 mtx_unlock(&t3_list_lock);
808 ADAPTER_LOCK_DEINIT(sc);
809 }
810
811 /**
812 * setup_sge_qsets - configure SGE Tx/Rx/response queues
813 * @sc: the controller softc
814 *
815 * Determines how many sets of SGE queues to use and initializes them.
816 * We support multiple queue sets per port if we have MSI-X, otherwise
817 * just one queue set per port.
818 */
819 static int
setup_sge_qsets(adapter_t * sc)820 setup_sge_qsets(adapter_t *sc)
821 {
822 int i, j, err, irq_idx = 0, qset_idx = 0;
823 u_int ntxq = SGE_TXQ_PER_SET;
824
825 if ((err = t3_sge_alloc(sc)) != 0) {
826 device_printf(sc->dev, "t3_sge_alloc returned %d\n", err);
827 return (err);
828 }
829
830 if (sc->params.rev > 0 && !(sc->flags & USING_MSI))
831 irq_idx = -1;
832
833 for (i = 0; i < (sc)->params.nports; i++) {
834 struct port_info *pi = &sc->port[i];
835
836 for (j = 0; j < pi->nqsets; j++, qset_idx++) {
837 err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports,
838 (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx,
839 &sc->params.sge.qset[qset_idx], ntxq, pi);
840 if (err) {
841 t3_free_sge_resources(sc, qset_idx);
842 device_printf(sc->dev,
843 "t3_sge_alloc_qset failed with %d\n", err);
844 return (err);
845 }
846 }
847 }
848
849 sc->nqsets = qset_idx;
850
851 return (0);
852 }
853
854 static void
cxgb_teardown_interrupts(adapter_t * sc)855 cxgb_teardown_interrupts(adapter_t *sc)
856 {
857 int i;
858
859 for (i = 0; i < SGE_QSETS; i++) {
860 if (sc->msix_intr_tag[i] == NULL) {
861
862 /* Should have been setup fully or not at all */
863 KASSERT(sc->msix_irq_res[i] == NULL &&
864 sc->msix_irq_rid[i] == 0,
865 ("%s: half-done interrupt (%d).", __func__, i));
866
867 continue;
868 }
869
870 bus_teardown_intr(sc->dev, sc->msix_irq_res[i],
871 sc->msix_intr_tag[i]);
872 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->msix_irq_rid[i],
873 sc->msix_irq_res[i]);
874
875 sc->msix_irq_res[i] = sc->msix_intr_tag[i] = NULL;
876 sc->msix_irq_rid[i] = 0;
877 }
878
879 if (sc->intr_tag) {
880 KASSERT(sc->irq_res != NULL,
881 ("%s: half-done interrupt.", __func__));
882
883 bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag);
884 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
885 sc->irq_res);
886
887 sc->irq_res = sc->intr_tag = NULL;
888 sc->irq_rid = 0;
889 }
890 }
891
892 static int
cxgb_setup_interrupts(adapter_t * sc)893 cxgb_setup_interrupts(adapter_t *sc)
894 {
895 struct resource *res;
896 void *tag;
897 int i, rid, err, intr_flag = sc->flags & (USING_MSI | USING_MSIX);
898
899 sc->irq_rid = intr_flag ? 1 : 0;
900 sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irq_rid,
901 RF_SHAREABLE | RF_ACTIVE);
902 if (sc->irq_res == NULL) {
903 device_printf(sc->dev, "Cannot allocate interrupt (%x, %u)\n",
904 intr_flag, sc->irq_rid);
905 err = EINVAL;
906 sc->irq_rid = 0;
907 } else {
908 err = bus_setup_intr(sc->dev, sc->irq_res,
909 INTR_MPSAFE | INTR_TYPE_NET, NULL,
910 sc->cxgb_intr, sc, &sc->intr_tag);
911
912 if (err) {
913 device_printf(sc->dev,
914 "Cannot set up interrupt (%x, %u, %d)\n",
915 intr_flag, sc->irq_rid, err);
916 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
917 sc->irq_res);
918 sc->irq_res = sc->intr_tag = NULL;
919 sc->irq_rid = 0;
920 }
921 }
922
923 /* That's all for INTx or MSI */
924 if (!(intr_flag & USING_MSIX) || err)
925 return (err);
926
927 bus_describe_intr(sc->dev, sc->irq_res, sc->intr_tag, "err");
928 for (i = 0; i < sc->msi_count - 1; i++) {
929 rid = i + 2;
930 res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid,
931 RF_SHAREABLE | RF_ACTIVE);
932 if (res == NULL) {
933 device_printf(sc->dev, "Cannot allocate interrupt "
934 "for message %d\n", rid);
935 err = EINVAL;
936 break;
937 }
938
939 err = bus_setup_intr(sc->dev, res, INTR_MPSAFE | INTR_TYPE_NET,
940 NULL, t3_intr_msix, &sc->sge.qs[i], &tag);
941 if (err) {
942 device_printf(sc->dev, "Cannot set up interrupt "
943 "for message %d (%d)\n", rid, err);
944 bus_release_resource(sc->dev, SYS_RES_IRQ, rid, res);
945 break;
946 }
947
948 sc->msix_irq_rid[i] = rid;
949 sc->msix_irq_res[i] = res;
950 sc->msix_intr_tag[i] = tag;
951 bus_describe_intr(sc->dev, res, tag, "qs%d", i);
952 }
953
954 if (err)
955 cxgb_teardown_interrupts(sc);
956
957 return (err);
958 }
959
960
961 static int
cxgb_port_probe(device_t dev)962 cxgb_port_probe(device_t dev)
963 {
964 struct port_info *p;
965 const char *desc;
966
967 p = device_get_softc(dev);
968 desc = p->phy.desc;
969 device_set_descf(dev, "Port %d %s", p->port_id, desc);
970 return (0);
971 }
972
973
974 static int
cxgb_makedev(struct port_info * pi)975 cxgb_makedev(struct port_info *pi)
976 {
977
978 pi->port_cdev = make_dev(&cxgb_cdevsw, if_getdunit(pi->ifp),
979 UID_ROOT, GID_WHEEL, 0600, "%s", if_name(pi->ifp));
980
981 if (pi->port_cdev == NULL)
982 return (ENOMEM);
983
984 pi->port_cdev->si_drv1 = (void *)pi;
985
986 return (0);
987 }
988
989 #define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
990 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
991 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6)
992 #define CXGB_CAP_ENABLE CXGB_CAP
993
994 static int
cxgb_port_attach(device_t dev)995 cxgb_port_attach(device_t dev)
996 {
997 struct port_info *p;
998 if_t ifp;
999 int err;
1000 struct adapter *sc;
1001
1002 p = device_get_softc(dev);
1003 sc = p->adapter;
1004 snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d",
1005 device_get_unit(device_get_parent(dev)), p->port_id);
1006 PORT_LOCK_INIT(p, p->lockbuf);
1007
1008 callout_init(&p->link_check_ch, 1);
1009 TASK_INIT(&p->link_check_task, 0, check_link_status, p);
1010
1011 /* Allocate an ifnet object and set it up */
1012 ifp = p->ifp = if_alloc(IFT_ETHER);
1013 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1014 if_setinitfn(ifp, cxgb_init);
1015 if_setsoftc(ifp, p);
1016 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
1017 if_setioctlfn(ifp, cxgb_ioctl);
1018 if_settransmitfn(ifp, cxgb_transmit);
1019 if_setqflushfn(ifp, cxgb_qflush);
1020 if_setgetcounterfn(ifp, cxgb_get_counter);
1021
1022 if_setcapabilities(ifp, CXGB_CAP);
1023 #ifdef TCP_OFFLOAD
1024 if (is_offload(sc))
1025 if_setcapabilitiesbit(ifp, IFCAP_TOE4, 0);
1026 #endif
1027 if_setcapenable(ifp, CXGB_CAP_ENABLE);
1028 if_sethwassist(ifp, CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
1029 CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1030 if_sethwtsomax(ifp, IP_MAXPACKET);
1031 if_sethwtsomaxsegcount(ifp, 36);
1032 if_sethwtsomaxsegsize(ifp, 65536);
1033
1034 /*
1035 * Disable TSO on 4-port - it isn't supported by the firmware.
1036 */
1037 if (sc->params.nports > 2) {
1038 if_setcapabilitiesbit(ifp, 0, IFCAP_TSO | IFCAP_VLAN_HWTSO);
1039 if_setcapenablebit(ifp, 0, IFCAP_TSO | IFCAP_VLAN_HWTSO);
1040 if_sethwassistbits(ifp, 0, CSUM_TSO);
1041 }
1042
1043 /* Create a list of media supported by this port */
1044 ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change,
1045 cxgb_media_status);
1046 cxgb_build_medialist(p);
1047
1048 ether_ifattach(ifp, p->hw_addr);
1049
1050 /* Attach driver debugnet methods. */
1051 DEBUGNET_SET(ifp, cxgb);
1052
1053 #ifdef DEFAULT_JUMBO
1054 if (sc->params.nports <= 2)
1055 if_setmtu(ifp, ETHERMTU_JUMBO);
1056 #endif
1057 if ((err = cxgb_makedev(p)) != 0) {
1058 printf("makedev failed %d\n", err);
1059 return (err);
1060 }
1061
1062 t3_sge_init_port(p);
1063
1064 return (err);
1065 }
1066
1067 /*
1068 * cxgb_port_detach() is called via the device_detach methods when
1069 * cxgb_free() calls the bus_generic_detach. It is responsible for
1070 * removing the device from the view of the kernel, i.e. from all
1071 * interfaces lists etc. This routine is only called when the driver is
1072 * being unloaded, not when the link goes down.
1073 */
1074 static int
cxgb_port_detach(device_t dev)1075 cxgb_port_detach(device_t dev)
1076 {
1077 struct port_info *p;
1078 struct adapter *sc;
1079 int i;
1080
1081 p = device_get_softc(dev);
1082 sc = p->adapter;
1083
1084 /* Tell cxgb_ioctl and if_init that the port is going away */
1085 ADAPTER_LOCK(sc);
1086 SET_DOOMED(p);
1087 wakeup(&sc->flags);
1088 while (IS_BUSY(sc))
1089 mtx_sleep(&sc->flags, &sc->lock, 0, "cxgbdtch", 0);
1090 SET_BUSY(sc);
1091 ADAPTER_UNLOCK(sc);
1092
1093 if (p->port_cdev != NULL)
1094 destroy_dev(p->port_cdev);
1095
1096 cxgb_uninit_synchronized(p);
1097 ether_ifdetach(p->ifp);
1098
1099 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1100 struct sge_qset *qs = &sc->sge.qs[i];
1101 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1102
1103 callout_drain(&txq->txq_watchdog);
1104 callout_drain(&txq->txq_timer);
1105 }
1106
1107 PORT_LOCK_DEINIT(p);
1108 if_free(p->ifp);
1109 p->ifp = NULL;
1110
1111 ADAPTER_LOCK(sc);
1112 CLR_BUSY(sc);
1113 wakeup_one(&sc->flags);
1114 ADAPTER_UNLOCK(sc);
1115 return (0);
1116 }
1117
1118 void
t3_fatal_err(struct adapter * sc)1119 t3_fatal_err(struct adapter *sc)
1120 {
1121 u_int fw_status[4];
1122
1123 if (sc->flags & FULL_INIT_DONE) {
1124 t3_sge_stop(sc);
1125 t3_write_reg(sc, A_XGM_TX_CTRL, 0);
1126 t3_write_reg(sc, A_XGM_RX_CTRL, 0);
1127 t3_write_reg(sc, XGM_REG(A_XGM_TX_CTRL, 1), 0);
1128 t3_write_reg(sc, XGM_REG(A_XGM_RX_CTRL, 1), 0);
1129 t3_intr_disable(sc);
1130 }
1131 device_printf(sc->dev,"encountered fatal error, operation suspended\n");
1132 if (!t3_cim_ctl_blk_read(sc, 0xa0, 4, fw_status))
1133 device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n",
1134 fw_status[0], fw_status[1], fw_status[2], fw_status[3]);
1135 }
1136
1137 int
t3_os_find_pci_capability(adapter_t * sc,int cap)1138 t3_os_find_pci_capability(adapter_t *sc, int cap)
1139 {
1140 device_t dev;
1141 struct pci_devinfo *dinfo;
1142 pcicfgregs *cfg;
1143 uint32_t status;
1144 uint8_t ptr;
1145
1146 dev = sc->dev;
1147 dinfo = device_get_ivars(dev);
1148 cfg = &dinfo->cfg;
1149
1150 status = pci_read_config(dev, PCIR_STATUS, 2);
1151 if (!(status & PCIM_STATUS_CAPPRESENT))
1152 return (0);
1153
1154 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1155 case 0:
1156 case 1:
1157 ptr = PCIR_CAP_PTR;
1158 break;
1159 case 2:
1160 ptr = PCIR_CAP_PTR_2;
1161 break;
1162 default:
1163 return (0);
1164 break;
1165 }
1166 ptr = pci_read_config(dev, ptr, 1);
1167
1168 while (ptr != 0) {
1169 if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap)
1170 return (ptr);
1171 ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1172 }
1173
1174 return (0);
1175 }
1176
1177 int
t3_os_pci_save_state(struct adapter * sc)1178 t3_os_pci_save_state(struct adapter *sc)
1179 {
1180 device_t dev;
1181 struct pci_devinfo *dinfo;
1182
1183 dev = sc->dev;
1184 dinfo = device_get_ivars(dev);
1185
1186 pci_cfg_save(dev, dinfo, 0);
1187 return (0);
1188 }
1189
1190 int
t3_os_pci_restore_state(struct adapter * sc)1191 t3_os_pci_restore_state(struct adapter *sc)
1192 {
1193 device_t dev;
1194 struct pci_devinfo *dinfo;
1195
1196 dev = sc->dev;
1197 dinfo = device_get_ivars(dev);
1198
1199 pci_cfg_restore(dev, dinfo);
1200 return (0);
1201 }
1202
1203 /**
1204 * t3_os_link_changed - handle link status changes
1205 * @sc: the adapter associated with the link change
1206 * @port_id: the port index whose link status has changed
1207 * @link_status: the new status of the link
1208 * @speed: the new speed setting
1209 * @duplex: the new duplex setting
1210 * @fc: the new flow-control setting
1211 *
1212 * This is the OS-dependent handler for link status changes. The OS
1213 * neutral handler takes care of most of the processing for these events,
1214 * then calls this handler for any OS-specific processing.
1215 */
1216 void
t3_os_link_changed(adapter_t * adapter,int port_id,int link_status,int speed,int duplex,int fc,int mac_was_reset)1217 t3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed,
1218 int duplex, int fc, int mac_was_reset)
1219 {
1220 struct port_info *pi = &adapter->port[port_id];
1221 if_t ifp = pi->ifp;
1222
1223 /* no race with detach, so ifp should always be good */
1224 KASSERT(ifp, ("%s: if detached.", __func__));
1225
1226 /* Reapply mac settings if they were lost due to a reset */
1227 if (mac_was_reset) {
1228 PORT_LOCK(pi);
1229 cxgb_update_mac_settings(pi);
1230 PORT_UNLOCK(pi);
1231 }
1232
1233 if (link_status) {
1234 if_setbaudrate(ifp, IF_Mbps(speed));
1235 if_link_state_change(ifp, LINK_STATE_UP);
1236 } else
1237 if_link_state_change(ifp, LINK_STATE_DOWN);
1238 }
1239
1240 /**
1241 * t3_os_phymod_changed - handle PHY module changes
1242 * @phy: the PHY reporting the module change
1243 * @mod_type: new module type
1244 *
1245 * This is the OS-dependent handler for PHY module changes. It is
1246 * invoked when a PHY module is removed or inserted for any OS-specific
1247 * processing.
1248 */
t3_os_phymod_changed(struct adapter * adap,int port_id)1249 void t3_os_phymod_changed(struct adapter *adap, int port_id)
1250 {
1251 static const char *mod_str[] = {
1252 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX-L", "unknown"
1253 };
1254 struct port_info *pi = &adap->port[port_id];
1255 int mod = pi->phy.modtype;
1256
1257 if (mod != pi->media.ifm_cur->ifm_data)
1258 cxgb_build_medialist(pi);
1259
1260 if (mod == phy_modtype_none)
1261 if_printf(pi->ifp, "PHY module unplugged\n");
1262 else {
1263 KASSERT(mod < ARRAY_SIZE(mod_str),
1264 ("invalid PHY module type %d", mod));
1265 if_printf(pi->ifp, "%s PHY module inserted\n", mod_str[mod]);
1266 }
1267 }
1268
1269 void
t3_os_set_hw_addr(adapter_t * adapter,int port_idx,u8 hw_addr[])1270 t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[])
1271 {
1272
1273 /*
1274 * The ifnet might not be allocated before this gets called,
1275 * as this is called early on in attach by t3_prep_adapter
1276 * save the address off in the port structure
1277 */
1278 if (cxgb_debug)
1279 printf("set_hw_addr on idx %d addr %6D\n", port_idx, hw_addr, ":");
1280 bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN);
1281 }
1282
1283 /*
1284 * Programs the XGMAC based on the settings in the ifnet. These settings
1285 * include MTU, MAC address, mcast addresses, etc.
1286 */
1287 static void
cxgb_update_mac_settings(struct port_info * p)1288 cxgb_update_mac_settings(struct port_info *p)
1289 {
1290 if_t ifp = p->ifp;
1291 struct t3_rx_mode rm;
1292 struct cmac *mac = &p->mac;
1293 int mtu, hwtagging;
1294
1295 PORT_LOCK_ASSERT_OWNED(p);
1296
1297 bcopy(if_getlladdr(ifp), p->hw_addr, ETHER_ADDR_LEN);
1298
1299 mtu = if_getmtu(ifp);
1300 if (if_getcapenable(ifp) & IFCAP_VLAN_MTU)
1301 mtu += ETHER_VLAN_ENCAP_LEN;
1302
1303 hwtagging = (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0;
1304
1305 t3_mac_set_mtu(mac, mtu);
1306 t3_set_vlan_accel(p->adapter, 1 << p->tx_chan, hwtagging);
1307 t3_mac_set_address(mac, 0, p->hw_addr);
1308 t3_init_rx_mode(&rm, p);
1309 t3_mac_set_rx_mode(mac, &rm);
1310 }
1311
1312
1313 static int
await_mgmt_replies(struct adapter * adap,unsigned long init_cnt,unsigned long n)1314 await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
1315 unsigned long n)
1316 {
1317 int attempts = 5;
1318
1319 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
1320 if (!--attempts)
1321 return (ETIMEDOUT);
1322 t3_os_sleep(10);
1323 }
1324 return 0;
1325 }
1326
1327 static int
init_tp_parity(struct adapter * adap)1328 init_tp_parity(struct adapter *adap)
1329 {
1330 int i;
1331 struct mbuf *m;
1332 struct cpl_set_tcb_field *greq;
1333 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
1334
1335 t3_tp_set_offload_mode(adap, 1);
1336
1337 for (i = 0; i < 16; i++) {
1338 struct cpl_smt_write_req *req;
1339
1340 m = m_gethdr(M_WAITOK, MT_DATA);
1341 req = mtod(m, struct cpl_smt_write_req *);
1342 m->m_len = m->m_pkthdr.len = sizeof(*req);
1343 memset(req, 0, sizeof(*req));
1344 req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1345 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
1346 req->iff = i;
1347 t3_mgmt_tx(adap, m);
1348 }
1349
1350 for (i = 0; i < 2048; i++) {
1351 struct cpl_l2t_write_req *req;
1352
1353 m = m_gethdr(M_WAITOK, MT_DATA);
1354 req = mtod(m, struct cpl_l2t_write_req *);
1355 m->m_len = m->m_pkthdr.len = sizeof(*req);
1356 memset(req, 0, sizeof(*req));
1357 req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1358 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
1359 req->params = htonl(V_L2T_W_IDX(i));
1360 t3_mgmt_tx(adap, m);
1361 }
1362
1363 for (i = 0; i < 2048; i++) {
1364 struct cpl_rte_write_req *req;
1365
1366 m = m_gethdr(M_WAITOK, MT_DATA);
1367 req = mtod(m, struct cpl_rte_write_req *);
1368 m->m_len = m->m_pkthdr.len = sizeof(*req);
1369 memset(req, 0, sizeof(*req));
1370 req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1371 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
1372 req->l2t_idx = htonl(V_L2T_W_IDX(i));
1373 t3_mgmt_tx(adap, m);
1374 }
1375
1376 m = m_gethdr(M_WAITOK, MT_DATA);
1377 greq = mtod(m, struct cpl_set_tcb_field *);
1378 m->m_len = m->m_pkthdr.len = sizeof(*greq);
1379 memset(greq, 0, sizeof(*greq));
1380 greq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1381 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
1382 greq->mask = htobe64(1);
1383 t3_mgmt_tx(adap, m);
1384
1385 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
1386 t3_tp_set_offload_mode(adap, 0);
1387 return (i);
1388 }
1389
1390 /**
1391 * setup_rss - configure Receive Side Steering (per-queue connection demux)
1392 * @adap: the adapter
1393 *
1394 * Sets up RSS to distribute packets to multiple receive queues. We
1395 * configure the RSS CPU lookup table to distribute to the number of HW
1396 * receive queues, and the response queue lookup table to narrow that
1397 * down to the response queues actually configured for each port.
1398 * We always configure the RSS mapping for two ports since the mapping
1399 * table has plenty of entries.
1400 */
1401 static void
setup_rss(adapter_t * adap)1402 setup_rss(adapter_t *adap)
1403 {
1404 int i;
1405 u_int nq[2];
1406 uint8_t cpus[SGE_QSETS + 1];
1407 uint16_t rspq_map[RSS_TABLE_SIZE];
1408
1409 for (i = 0; i < SGE_QSETS; ++i)
1410 cpus[i] = i;
1411 cpus[SGE_QSETS] = 0xff;
1412
1413 nq[0] = nq[1] = 0;
1414 for_each_port(adap, i) {
1415 const struct port_info *pi = adap2pinfo(adap, i);
1416
1417 nq[pi->tx_chan] += pi->nqsets;
1418 }
1419 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
1420 rspq_map[i] = nq[0] ? i % nq[0] : 0;
1421 rspq_map[i + RSS_TABLE_SIZE / 2] = nq[1] ? i % nq[1] + nq[0] : 0;
1422 }
1423
1424 /* Calculate the reverse RSS map table */
1425 for (i = 0; i < SGE_QSETS; ++i)
1426 adap->rrss_map[i] = 0xff;
1427 for (i = 0; i < RSS_TABLE_SIZE; ++i)
1428 if (adap->rrss_map[rspq_map[i]] == 0xff)
1429 adap->rrss_map[rspq_map[i]] = i;
1430
1431 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
1432 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | F_OFDMAPEN |
1433 F_RRCPLMAPEN | V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ,
1434 cpus, rspq_map);
1435
1436 }
1437 static void
send_pktsched_cmd(struct adapter * adap,int sched,int qidx,int lo,int hi,int port)1438 send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
1439 int hi, int port)
1440 {
1441 struct mbuf *m;
1442 struct mngt_pktsched_wr *req;
1443
1444 m = m_gethdr(M_NOWAIT, MT_DATA);
1445 if (m) {
1446 req = mtod(m, struct mngt_pktsched_wr *);
1447 req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
1448 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
1449 req->sched = sched;
1450 req->idx = qidx;
1451 req->min = lo;
1452 req->max = hi;
1453 req->binding = port;
1454 m->m_len = m->m_pkthdr.len = sizeof(*req);
1455 t3_mgmt_tx(adap, m);
1456 }
1457 }
1458
1459 static void
bind_qsets(adapter_t * sc)1460 bind_qsets(adapter_t *sc)
1461 {
1462 int i, j;
1463
1464 for (i = 0; i < (sc)->params.nports; ++i) {
1465 const struct port_info *pi = adap2pinfo(sc, i);
1466
1467 for (j = 0; j < pi->nqsets; ++j) {
1468 send_pktsched_cmd(sc, 1, pi->first_qset + j, -1,
1469 -1, pi->tx_chan);
1470
1471 }
1472 }
1473 }
1474
1475 static void
update_tpeeprom(struct adapter * adap)1476 update_tpeeprom(struct adapter *adap)
1477 {
1478 const struct firmware *tpeeprom;
1479
1480 uint32_t version;
1481 unsigned int major, minor;
1482 int ret, len;
1483 char rev, name[32];
1484
1485 t3_seeprom_read(adap, TP_SRAM_OFFSET, &version);
1486
1487 major = G_TP_VERSION_MAJOR(version);
1488 minor = G_TP_VERSION_MINOR(version);
1489 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
1490 return;
1491
1492 rev = t3rev2char(adap);
1493 snprintf(name, sizeof(name), TPEEPROM_NAME, rev);
1494
1495 tpeeprom = firmware_get(name);
1496 if (tpeeprom == NULL) {
1497 device_printf(adap->dev,
1498 "could not load TP EEPROM: unable to load %s\n",
1499 name);
1500 return;
1501 }
1502
1503 len = tpeeprom->datasize - 4;
1504
1505 ret = t3_check_tpsram(adap, tpeeprom->data, tpeeprom->datasize);
1506 if (ret)
1507 goto release_tpeeprom;
1508
1509 if (len != TP_SRAM_LEN) {
1510 device_printf(adap->dev,
1511 "%s length is wrong len=%d expected=%d\n", name,
1512 len, TP_SRAM_LEN);
1513 return;
1514 }
1515
1516 ret = set_eeprom(&adap->port[0], tpeeprom->data, tpeeprom->datasize,
1517 TP_SRAM_OFFSET);
1518
1519 if (!ret) {
1520 device_printf(adap->dev,
1521 "Protocol SRAM image updated in EEPROM to %d.%d.%d\n",
1522 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1523 } else
1524 device_printf(adap->dev,
1525 "Protocol SRAM image update in EEPROM failed\n");
1526
1527 release_tpeeprom:
1528 firmware_put(tpeeprom, FIRMWARE_UNLOAD);
1529
1530 return;
1531 }
1532
1533 static int
update_tpsram(struct adapter * adap)1534 update_tpsram(struct adapter *adap)
1535 {
1536 const struct firmware *tpsram;
1537 int ret;
1538 char rev, name[32];
1539
1540 rev = t3rev2char(adap);
1541 snprintf(name, sizeof(name), TPSRAM_NAME, rev);
1542
1543 update_tpeeprom(adap);
1544
1545 tpsram = firmware_get(name);
1546 if (tpsram == NULL){
1547 device_printf(adap->dev, "could not load TP SRAM\n");
1548 return (EINVAL);
1549 } else
1550 device_printf(adap->dev, "updating TP SRAM\n");
1551
1552 ret = t3_check_tpsram(adap, tpsram->data, tpsram->datasize);
1553 if (ret)
1554 goto release_tpsram;
1555
1556 ret = t3_set_proto_sram(adap, tpsram->data);
1557 if (ret)
1558 device_printf(adap->dev, "loading protocol SRAM failed\n");
1559
1560 release_tpsram:
1561 firmware_put(tpsram, FIRMWARE_UNLOAD);
1562
1563 return ret;
1564 }
1565
1566 /**
1567 * cxgb_up - enable the adapter
1568 * @adap: adapter being enabled
1569 *
1570 * Called when the first port is enabled, this function performs the
1571 * actions necessary to make an adapter operational, such as completing
1572 * the initialization of HW modules, and enabling interrupts.
1573 */
1574 static int
cxgb_up(struct adapter * sc)1575 cxgb_up(struct adapter *sc)
1576 {
1577 int err = 0;
1578 unsigned int mxf = t3_mc5_size(&sc->mc5) - MC5_MIN_TIDS;
1579
1580 KASSERT(sc->open_device_map == 0, ("%s: device(s) already open (%x)",
1581 __func__, sc->open_device_map));
1582
1583 if ((sc->flags & FULL_INIT_DONE) == 0) {
1584
1585 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1586
1587 if ((sc->flags & FW_UPTODATE) == 0)
1588 if ((err = upgrade_fw(sc)))
1589 goto out;
1590
1591 if ((sc->flags & TPS_UPTODATE) == 0)
1592 if ((err = update_tpsram(sc)))
1593 goto out;
1594
1595 if (is_offload(sc) && nfilters != 0) {
1596 sc->params.mc5.nservers = 0;
1597
1598 if (nfilters < 0)
1599 sc->params.mc5.nfilters = mxf;
1600 else
1601 sc->params.mc5.nfilters = min(nfilters, mxf);
1602 }
1603
1604 err = t3_init_hw(sc, 0);
1605 if (err)
1606 goto out;
1607
1608 t3_set_reg_field(sc, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1609 t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1610
1611 err = setup_sge_qsets(sc);
1612 if (err)
1613 goto out;
1614
1615 alloc_filters(sc);
1616 setup_rss(sc);
1617
1618 t3_add_configured_sysctls(sc);
1619 sc->flags |= FULL_INIT_DONE;
1620 }
1621
1622 t3_intr_clear(sc);
1623 t3_sge_start(sc);
1624 t3_intr_enable(sc);
1625
1626 if (sc->params.rev >= T3_REV_C && !(sc->flags & TP_PARITY_INIT) &&
1627 is_offload(sc) && init_tp_parity(sc) == 0)
1628 sc->flags |= TP_PARITY_INIT;
1629
1630 if (sc->flags & TP_PARITY_INIT) {
1631 t3_write_reg(sc, A_TP_INT_CAUSE, F_CMCACHEPERR | F_ARPLUTPERR);
1632 t3_write_reg(sc, A_TP_INT_ENABLE, 0x7fbfffff);
1633 }
1634
1635 if (!(sc->flags & QUEUES_BOUND)) {
1636 bind_qsets(sc);
1637 setup_hw_filters(sc);
1638 sc->flags |= QUEUES_BOUND;
1639 }
1640
1641 t3_sge_reset_adapter(sc);
1642 out:
1643 return (err);
1644 }
1645
1646 /*
1647 * Called when the last open device is closed. Does NOT undo all of cxgb_up's
1648 * work. Specifically, the resources grabbed under FULL_INIT_DONE are released
1649 * during controller_detach, not here.
1650 */
1651 static void
cxgb_down(struct adapter * sc)1652 cxgb_down(struct adapter *sc)
1653 {
1654 t3_sge_stop(sc);
1655 t3_intr_disable(sc);
1656 }
1657
1658 /*
1659 * if_init for cxgb ports.
1660 */
1661 static void
cxgb_init(void * arg)1662 cxgb_init(void *arg)
1663 {
1664 struct port_info *p = arg;
1665 struct adapter *sc = p->adapter;
1666
1667 ADAPTER_LOCK(sc);
1668 cxgb_init_locked(p); /* releases adapter lock */
1669 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1670 }
1671
1672 static int
cxgb_init_locked(struct port_info * p)1673 cxgb_init_locked(struct port_info *p)
1674 {
1675 struct adapter *sc = p->adapter;
1676 if_t ifp = p->ifp;
1677 struct cmac *mac = &p->mac;
1678 int i, rc = 0, may_sleep = 0, gave_up_lock = 0;
1679
1680 ADAPTER_LOCK_ASSERT_OWNED(sc);
1681
1682 while (!IS_DOOMED(p) && IS_BUSY(sc)) {
1683 gave_up_lock = 1;
1684 if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbinit", 0)) {
1685 rc = EINTR;
1686 goto done;
1687 }
1688 }
1689 if (IS_DOOMED(p)) {
1690 rc = ENXIO;
1691 goto done;
1692 }
1693 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
1694
1695 /*
1696 * The code that runs during one-time adapter initialization can sleep
1697 * so it's important not to hold any locks across it.
1698 */
1699 may_sleep = sc->flags & FULL_INIT_DONE ? 0 : 1;
1700
1701 if (may_sleep) {
1702 SET_BUSY(sc);
1703 gave_up_lock = 1;
1704 ADAPTER_UNLOCK(sc);
1705 }
1706
1707 if (sc->open_device_map == 0 && ((rc = cxgb_up(sc)) != 0))
1708 goto done;
1709
1710 PORT_LOCK(p);
1711 if (isset(&sc->open_device_map, p->port_id) &&
1712 (if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
1713 PORT_UNLOCK(p);
1714 goto done;
1715 }
1716 t3_port_intr_enable(sc, p->port_id);
1717 if (!mac->multiport)
1718 t3_mac_init(mac);
1719 cxgb_update_mac_settings(p);
1720 t3_link_start(&p->phy, mac, &p->link_config);
1721 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
1722 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
1723 PORT_UNLOCK(p);
1724
1725 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1726 struct sge_qset *qs = &sc->sge.qs[i];
1727 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1728
1729 callout_reset_on(&txq->txq_watchdog, hz, cxgb_tx_watchdog, qs,
1730 txq->txq_watchdog.c_cpu);
1731 }
1732
1733 /* all ok */
1734 setbit(&sc->open_device_map, p->port_id);
1735 callout_reset(&p->link_check_ch,
1736 p->phy.caps & SUPPORTED_LINK_IRQ ? hz * 3 : hz / 4,
1737 link_check_callout, p);
1738
1739 done:
1740 if (may_sleep) {
1741 ADAPTER_LOCK(sc);
1742 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
1743 CLR_BUSY(sc);
1744 }
1745 if (gave_up_lock)
1746 wakeup_one(&sc->flags);
1747 ADAPTER_UNLOCK(sc);
1748 return (rc);
1749 }
1750
1751 static int
cxgb_uninit_locked(struct port_info * p)1752 cxgb_uninit_locked(struct port_info *p)
1753 {
1754 struct adapter *sc = p->adapter;
1755 int rc;
1756
1757 ADAPTER_LOCK_ASSERT_OWNED(sc);
1758
1759 while (!IS_DOOMED(p) && IS_BUSY(sc)) {
1760 if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbunin", 0)) {
1761 rc = EINTR;
1762 goto done;
1763 }
1764 }
1765 if (IS_DOOMED(p)) {
1766 rc = ENXIO;
1767 goto done;
1768 }
1769 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
1770 SET_BUSY(sc);
1771 ADAPTER_UNLOCK(sc);
1772
1773 rc = cxgb_uninit_synchronized(p);
1774
1775 ADAPTER_LOCK(sc);
1776 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
1777 CLR_BUSY(sc);
1778 wakeup_one(&sc->flags);
1779 done:
1780 ADAPTER_UNLOCK(sc);
1781 return (rc);
1782 }
1783
1784 /*
1785 * Called on "ifconfig down", and from port_detach
1786 */
1787 static int
cxgb_uninit_synchronized(struct port_info * pi)1788 cxgb_uninit_synchronized(struct port_info *pi)
1789 {
1790 struct adapter *sc = pi->adapter;
1791 if_t ifp = pi->ifp;
1792
1793 /*
1794 * taskqueue_drain may cause a deadlock if the adapter lock is held.
1795 */
1796 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1797
1798 /*
1799 * Clear this port's bit from the open device map, and then drain all
1800 * the tasks that can access/manipulate this port's port_info or ifp.
1801 * We disable this port's interrupts here and so the slow/ext
1802 * interrupt tasks won't be enqueued. The tick task will continue to
1803 * be enqueued every second but the runs after this drain will not see
1804 * this port in the open device map.
1805 *
1806 * A well behaved task must take open_device_map into account and ignore
1807 * ports that are not open.
1808 */
1809 clrbit(&sc->open_device_map, pi->port_id);
1810 t3_port_intr_disable(sc, pi->port_id);
1811 taskqueue_drain(sc->tq, &sc->slow_intr_task);
1812 taskqueue_drain(sc->tq, &sc->tick_task);
1813
1814 callout_drain(&pi->link_check_ch);
1815 taskqueue_drain(sc->tq, &pi->link_check_task);
1816
1817 PORT_LOCK(pi);
1818 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1819
1820 /* disable pause frames */
1821 t3_set_reg_field(sc, A_XGM_TX_CFG + pi->mac.offset, F_TXPAUSEEN, 0);
1822
1823 /* Reset RX FIFO HWM */
1824 t3_set_reg_field(sc, A_XGM_RXFIFO_CFG + pi->mac.offset,
1825 V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM), 0);
1826
1827 DELAY(100 * 1000);
1828
1829 /* Wait for TXFIFO empty */
1830 t3_wait_op_done(sc, A_XGM_TXFIFO_CFG + pi->mac.offset,
1831 F_TXFIFO_EMPTY, 1, 20, 5);
1832
1833 DELAY(100 * 1000);
1834 t3_mac_disable(&pi->mac, MAC_DIRECTION_RX);
1835
1836 pi->phy.ops->power_down(&pi->phy, 1);
1837
1838 PORT_UNLOCK(pi);
1839
1840 pi->link_config.link_ok = 0;
1841 t3_os_link_changed(sc, pi->port_id, 0, 0, 0, 0, 0);
1842
1843 if (sc->open_device_map == 0)
1844 cxgb_down(pi->adapter);
1845
1846 return (0);
1847 }
1848
1849 /*
1850 * Mark lro enabled or disabled in all qsets for this port
1851 */
1852 static int
cxgb_set_lro(struct port_info * p,int enabled)1853 cxgb_set_lro(struct port_info *p, int enabled)
1854 {
1855 int i;
1856 struct adapter *adp = p->adapter;
1857 struct sge_qset *q;
1858
1859 for (i = 0; i < p->nqsets; i++) {
1860 q = &adp->sge.qs[p->first_qset + i];
1861 q->lro.enabled = (enabled != 0);
1862 }
1863 return (0);
1864 }
1865
1866 static int
cxgb_ioctl(if_t ifp,unsigned long command,caddr_t data)1867 cxgb_ioctl(if_t ifp, unsigned long command, caddr_t data)
1868 {
1869 struct port_info *p = if_getsoftc(ifp);
1870 struct adapter *sc = p->adapter;
1871 struct ifreq *ifr = (struct ifreq *)data;
1872 int flags, error = 0, mtu;
1873 uint32_t mask;
1874
1875 switch (command) {
1876 case SIOCSIFMTU:
1877 ADAPTER_LOCK(sc);
1878 error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
1879 if (error) {
1880 fail:
1881 ADAPTER_UNLOCK(sc);
1882 return (error);
1883 }
1884
1885 mtu = ifr->ifr_mtu;
1886 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) {
1887 error = EINVAL;
1888 } else {
1889 if_setmtu(ifp, mtu);
1890 PORT_LOCK(p);
1891 cxgb_update_mac_settings(p);
1892 PORT_UNLOCK(p);
1893 }
1894 ADAPTER_UNLOCK(sc);
1895 break;
1896 case SIOCSIFFLAGS:
1897 ADAPTER_LOCK(sc);
1898 if (IS_DOOMED(p)) {
1899 error = ENXIO;
1900 goto fail;
1901 }
1902 if (if_getflags(ifp) & IFF_UP) {
1903 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1904 flags = p->if_flags;
1905 if (((if_getflags(ifp) ^ flags) & IFF_PROMISC) ||
1906 ((if_getflags(ifp) ^ flags) & IFF_ALLMULTI)) {
1907 if (IS_BUSY(sc)) {
1908 error = EBUSY;
1909 goto fail;
1910 }
1911 PORT_LOCK(p);
1912 cxgb_update_mac_settings(p);
1913 PORT_UNLOCK(p);
1914 }
1915 ADAPTER_UNLOCK(sc);
1916 } else
1917 error = cxgb_init_locked(p);
1918 p->if_flags = if_getflags(ifp);
1919 } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1920 error = cxgb_uninit_locked(p);
1921 else
1922 ADAPTER_UNLOCK(sc);
1923
1924 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1925 break;
1926 case SIOCADDMULTI:
1927 case SIOCDELMULTI:
1928 ADAPTER_LOCK(sc);
1929 error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
1930 if (error)
1931 goto fail;
1932
1933 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1934 PORT_LOCK(p);
1935 cxgb_update_mac_settings(p);
1936 PORT_UNLOCK(p);
1937 }
1938 ADAPTER_UNLOCK(sc);
1939
1940 break;
1941 case SIOCSIFCAP:
1942 ADAPTER_LOCK(sc);
1943 error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
1944 if (error)
1945 goto fail;
1946
1947 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
1948 if (mask & IFCAP_TXCSUM) {
1949 if_togglecapenable(ifp, IFCAP_TXCSUM);
1950 if_togglehwassist(ifp, CSUM_TCP | CSUM_UDP | CSUM_IP);
1951
1952 if (IFCAP_TSO4 & if_getcapenable(ifp) &&
1953 !(IFCAP_TXCSUM & if_getcapenable(ifp))) {
1954 mask &= ~IFCAP_TSO4;
1955 if_setcapenablebit(ifp, 0, IFCAP_TSO4);
1956 if_printf(ifp,
1957 "tso4 disabled due to -txcsum.\n");
1958 }
1959 }
1960 if (mask & IFCAP_TXCSUM_IPV6) {
1961 if_togglecapenable(ifp, IFCAP_TXCSUM_IPV6);
1962 if_togglehwassist(ifp, CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1963
1964 if (IFCAP_TSO6 & if_getcapenable(ifp) &&
1965 !(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) {
1966 mask &= ~IFCAP_TSO6;
1967 if_setcapenablebit(ifp, 0, IFCAP_TSO6);
1968 if_printf(ifp,
1969 "tso6 disabled due to -txcsum6.\n");
1970 }
1971 }
1972 if (mask & IFCAP_RXCSUM)
1973 if_togglecapenable(ifp, IFCAP_RXCSUM);
1974 if (mask & IFCAP_RXCSUM_IPV6)
1975 if_togglecapenable(ifp, IFCAP_RXCSUM_IPV6);
1976
1977 /*
1978 * Note that we leave CSUM_TSO alone (it is always set). The
1979 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1980 * sending a TSO request our way, so it's sufficient to toggle
1981 * IFCAP_TSOx only.
1982 */
1983 if (mask & IFCAP_TSO4) {
1984 if (!(IFCAP_TSO4 & if_getcapenable(ifp)) &&
1985 !(IFCAP_TXCSUM & if_getcapenable(ifp))) {
1986 if_printf(ifp, "enable txcsum first.\n");
1987 error = EAGAIN;
1988 goto fail;
1989 }
1990 if_togglecapenable(ifp, IFCAP_TSO4);
1991 }
1992 if (mask & IFCAP_TSO6) {
1993 if (!(IFCAP_TSO6 & if_getcapenable(ifp)) &&
1994 !(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) {
1995 if_printf(ifp, "enable txcsum6 first.\n");
1996 error = EAGAIN;
1997 goto fail;
1998 }
1999 if_togglecapenable(ifp, IFCAP_TSO6);
2000 }
2001 if (mask & IFCAP_LRO) {
2002 if_togglecapenable(ifp, IFCAP_LRO);
2003
2004 /* Safe to do this even if cxgb_up not called yet */
2005 cxgb_set_lro(p, if_getcapenable(ifp) & IFCAP_LRO);
2006 }
2007 #ifdef TCP_OFFLOAD
2008 if (mask & IFCAP_TOE4) {
2009 int enable = (if_getcapenable(ifp) ^ mask) & IFCAP_TOE4;
2010
2011 error = toe_capability(p, enable);
2012 if (error == 0)
2013 if_togglecapenable(ifp, mask);
2014 }
2015 #endif
2016 if (mask & IFCAP_VLAN_HWTAGGING) {
2017 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
2018 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2019 PORT_LOCK(p);
2020 cxgb_update_mac_settings(p);
2021 PORT_UNLOCK(p);
2022 }
2023 }
2024 if (mask & IFCAP_VLAN_MTU) {
2025 if_togglecapenable(ifp, IFCAP_VLAN_MTU);
2026 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2027 PORT_LOCK(p);
2028 cxgb_update_mac_settings(p);
2029 PORT_UNLOCK(p);
2030 }
2031 }
2032 if (mask & IFCAP_VLAN_HWTSO)
2033 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
2034 if (mask & IFCAP_VLAN_HWCSUM)
2035 if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM);
2036
2037 #ifdef VLAN_CAPABILITIES
2038 VLAN_CAPABILITIES(ifp);
2039 #endif
2040 ADAPTER_UNLOCK(sc);
2041 break;
2042 case SIOCSIFMEDIA:
2043 case SIOCGIFMEDIA:
2044 error = ifmedia_ioctl(ifp, ifr, &p->media, command);
2045 break;
2046 default:
2047 error = ether_ioctl(ifp, command, data);
2048 }
2049
2050 return (error);
2051 }
2052
2053 static int
cxgb_media_change(if_t ifp)2054 cxgb_media_change(if_t ifp)
2055 {
2056 return (EOPNOTSUPP);
2057 }
2058
2059 /*
2060 * Translates phy->modtype to the correct Ethernet media subtype.
2061 */
2062 static int
cxgb_ifm_type(int mod)2063 cxgb_ifm_type(int mod)
2064 {
2065 switch (mod) {
2066 case phy_modtype_sr:
2067 return (IFM_10G_SR);
2068 case phy_modtype_lr:
2069 return (IFM_10G_LR);
2070 case phy_modtype_lrm:
2071 return (IFM_10G_LRM);
2072 case phy_modtype_twinax:
2073 return (IFM_10G_TWINAX);
2074 case phy_modtype_twinax_long:
2075 return (IFM_10G_TWINAX_LONG);
2076 case phy_modtype_none:
2077 return (IFM_NONE);
2078 case phy_modtype_unknown:
2079 return (IFM_UNKNOWN);
2080 }
2081
2082 KASSERT(0, ("%s: modtype %d unknown", __func__, mod));
2083 return (IFM_UNKNOWN);
2084 }
2085
2086 /*
2087 * Rebuilds the ifmedia list for this port, and sets the current media.
2088 */
2089 static void
cxgb_build_medialist(struct port_info * p)2090 cxgb_build_medialist(struct port_info *p)
2091 {
2092 struct cphy *phy = &p->phy;
2093 struct ifmedia *media = &p->media;
2094 int mod = phy->modtype;
2095 int m = IFM_ETHER | IFM_FDX;
2096
2097 PORT_LOCK(p);
2098
2099 ifmedia_removeall(media);
2100 if (phy->caps & SUPPORTED_TP && phy->caps & SUPPORTED_Autoneg) {
2101 /* Copper (RJ45) */
2102
2103 if (phy->caps & SUPPORTED_10000baseT_Full)
2104 ifmedia_add(media, m | IFM_10G_T, mod, NULL);
2105
2106 if (phy->caps & SUPPORTED_1000baseT_Full)
2107 ifmedia_add(media, m | IFM_1000_T, mod, NULL);
2108
2109 if (phy->caps & SUPPORTED_100baseT_Full)
2110 ifmedia_add(media, m | IFM_100_TX, mod, NULL);
2111
2112 if (phy->caps & SUPPORTED_10baseT_Full)
2113 ifmedia_add(media, m | IFM_10_T, mod, NULL);
2114
2115 ifmedia_add(media, IFM_ETHER | IFM_AUTO, mod, NULL);
2116 ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2117
2118 } else if (phy->caps & SUPPORTED_TP) {
2119 /* Copper (CX4) */
2120
2121 KASSERT(phy->caps & SUPPORTED_10000baseT_Full,
2122 ("%s: unexpected cap 0x%x", __func__, phy->caps));
2123
2124 ifmedia_add(media, m | IFM_10G_CX4, mod, NULL);
2125 ifmedia_set(media, m | IFM_10G_CX4);
2126
2127 } else if (phy->caps & SUPPORTED_FIBRE &&
2128 phy->caps & SUPPORTED_10000baseT_Full) {
2129 /* 10G optical (but includes SFP+ twinax) */
2130
2131 m |= cxgb_ifm_type(mod);
2132 if (IFM_SUBTYPE(m) == IFM_NONE)
2133 m &= ~IFM_FDX;
2134
2135 ifmedia_add(media, m, mod, NULL);
2136 ifmedia_set(media, m);
2137
2138 } else if (phy->caps & SUPPORTED_FIBRE &&
2139 phy->caps & SUPPORTED_1000baseT_Full) {
2140 /* 1G optical */
2141
2142 /* XXX: Lie and claim to be SX, could actually be any 1G-X */
2143 ifmedia_add(media, m | IFM_1000_SX, mod, NULL);
2144 ifmedia_set(media, m | IFM_1000_SX);
2145
2146 } else {
2147 KASSERT(0, ("%s: don't know how to handle 0x%x.", __func__,
2148 phy->caps));
2149 }
2150
2151 PORT_UNLOCK(p);
2152 }
2153
2154 static void
cxgb_media_status(if_t ifp,struct ifmediareq * ifmr)2155 cxgb_media_status(if_t ifp, struct ifmediareq *ifmr)
2156 {
2157 struct port_info *p = if_getsoftc(ifp);
2158 struct ifmedia_entry *cur = p->media.ifm_cur;
2159 int speed = p->link_config.speed;
2160
2161 if (cur->ifm_data != p->phy.modtype) {
2162 cxgb_build_medialist(p);
2163 cur = p->media.ifm_cur;
2164 }
2165
2166 ifmr->ifm_status = IFM_AVALID;
2167 if (!p->link_config.link_ok)
2168 return;
2169
2170 ifmr->ifm_status |= IFM_ACTIVE;
2171
2172 /*
2173 * active and current will differ iff current media is autoselect. That
2174 * can happen only for copper RJ45.
2175 */
2176 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
2177 return;
2178 KASSERT(p->phy.caps & SUPPORTED_TP && p->phy.caps & SUPPORTED_Autoneg,
2179 ("%s: unexpected PHY caps 0x%x", __func__, p->phy.caps));
2180
2181 ifmr->ifm_active = IFM_ETHER | IFM_FDX;
2182 if (speed == SPEED_10000)
2183 ifmr->ifm_active |= IFM_10G_T;
2184 else if (speed == SPEED_1000)
2185 ifmr->ifm_active |= IFM_1000_T;
2186 else if (speed == SPEED_100)
2187 ifmr->ifm_active |= IFM_100_TX;
2188 else if (speed == SPEED_10)
2189 ifmr->ifm_active |= IFM_10_T;
2190 else
2191 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
2192 speed));
2193 }
2194
2195 static uint64_t
cxgb_get_counter(if_t ifp,ift_counter c)2196 cxgb_get_counter(if_t ifp, ift_counter c)
2197 {
2198 struct port_info *pi = if_getsoftc(ifp);
2199 struct adapter *sc = pi->adapter;
2200 struct cmac *mac = &pi->mac;
2201 struct mac_stats *mstats = &mac->stats;
2202
2203 cxgb_refresh_stats(pi);
2204
2205 switch (c) {
2206 case IFCOUNTER_IPACKETS:
2207 return (mstats->rx_frames);
2208
2209 case IFCOUNTER_IERRORS:
2210 return (mstats->rx_jabber + mstats->rx_data_errs +
2211 mstats->rx_sequence_errs + mstats->rx_runt +
2212 mstats->rx_too_long + mstats->rx_mac_internal_errs +
2213 mstats->rx_short + mstats->rx_fcs_errs);
2214
2215 case IFCOUNTER_OPACKETS:
2216 return (mstats->tx_frames);
2217
2218 case IFCOUNTER_OERRORS:
2219 return (mstats->tx_excess_collisions + mstats->tx_underrun +
2220 mstats->tx_len_errs + mstats->tx_mac_internal_errs +
2221 mstats->tx_excess_deferral + mstats->tx_fcs_errs);
2222
2223 case IFCOUNTER_COLLISIONS:
2224 return (mstats->tx_total_collisions);
2225
2226 case IFCOUNTER_IBYTES:
2227 return (mstats->rx_octets);
2228
2229 case IFCOUNTER_OBYTES:
2230 return (mstats->tx_octets);
2231
2232 case IFCOUNTER_IMCASTS:
2233 return (mstats->rx_mcast_frames);
2234
2235 case IFCOUNTER_OMCASTS:
2236 return (mstats->tx_mcast_frames);
2237
2238 case IFCOUNTER_IQDROPS:
2239 return (mstats->rx_cong_drops);
2240
2241 case IFCOUNTER_OQDROPS: {
2242 int i;
2243 uint64_t drops;
2244
2245 drops = 0;
2246 if (sc->flags & FULL_INIT_DONE) {
2247 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
2248 drops += sc->sge.qs[i].txq[TXQ_ETH].txq_mr->br_drops;
2249 }
2250
2251 return (drops);
2252
2253 }
2254
2255 default:
2256 return (if_get_counter_default(ifp, c));
2257 }
2258 }
2259
2260 static void
cxgb_async_intr(void * data)2261 cxgb_async_intr(void *data)
2262 {
2263 adapter_t *sc = data;
2264
2265 t3_write_reg(sc, A_PL_INT_ENABLE0, 0);
2266 (void) t3_read_reg(sc, A_PL_INT_ENABLE0);
2267 taskqueue_enqueue(sc->tq, &sc->slow_intr_task);
2268 }
2269
2270 static void
link_check_callout(void * arg)2271 link_check_callout(void *arg)
2272 {
2273 struct port_info *pi = arg;
2274 struct adapter *sc = pi->adapter;
2275
2276 if (!isset(&sc->open_device_map, pi->port_id))
2277 return;
2278
2279 taskqueue_enqueue(sc->tq, &pi->link_check_task);
2280 }
2281
2282 static void
check_link_status(void * arg,int pending)2283 check_link_status(void *arg, int pending)
2284 {
2285 struct port_info *pi = arg;
2286 struct adapter *sc = pi->adapter;
2287
2288 if (!isset(&sc->open_device_map, pi->port_id))
2289 return;
2290
2291 t3_link_changed(sc, pi->port_id);
2292
2293 if (pi->link_fault || !(pi->phy.caps & SUPPORTED_LINK_IRQ) ||
2294 pi->link_config.link_ok == 0)
2295 callout_reset(&pi->link_check_ch, hz, link_check_callout, pi);
2296 }
2297
2298 void
t3_os_link_intr(struct port_info * pi)2299 t3_os_link_intr(struct port_info *pi)
2300 {
2301 /*
2302 * Schedule a link check in the near future. If the link is flapping
2303 * rapidly we'll keep resetting the callout and delaying the check until
2304 * things stabilize a bit.
2305 */
2306 callout_reset(&pi->link_check_ch, hz / 4, link_check_callout, pi);
2307 }
2308
2309 static void
check_t3b2_mac(struct adapter * sc)2310 check_t3b2_mac(struct adapter *sc)
2311 {
2312 int i;
2313
2314 if (sc->flags & CXGB_SHUTDOWN)
2315 return;
2316
2317 for_each_port(sc, i) {
2318 struct port_info *p = &sc->port[i];
2319 int status;
2320 #ifdef INVARIANTS
2321 if_t ifp = p->ifp;
2322 #endif
2323
2324 if (!isset(&sc->open_device_map, p->port_id) || p->link_fault ||
2325 !p->link_config.link_ok)
2326 continue;
2327
2328 KASSERT(if_getdrvflags(ifp) & IFF_DRV_RUNNING,
2329 ("%s: state mismatch (drv_flags %x, device_map %x)",
2330 __func__, if_getdrvflags(ifp), sc->open_device_map));
2331
2332 PORT_LOCK(p);
2333 status = t3b2_mac_watchdog_task(&p->mac);
2334 if (status == 1)
2335 p->mac.stats.num_toggled++;
2336 else if (status == 2) {
2337 struct cmac *mac = &p->mac;
2338
2339 cxgb_update_mac_settings(p);
2340 t3_link_start(&p->phy, mac, &p->link_config);
2341 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2342 t3_port_intr_enable(sc, p->port_id);
2343 p->mac.stats.num_resets++;
2344 }
2345 PORT_UNLOCK(p);
2346 }
2347 }
2348
2349 static void
cxgb_tick(void * arg)2350 cxgb_tick(void *arg)
2351 {
2352 adapter_t *sc = (adapter_t *)arg;
2353
2354 if (sc->flags & CXGB_SHUTDOWN)
2355 return;
2356
2357 taskqueue_enqueue(sc->tq, &sc->tick_task);
2358 callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc);
2359 }
2360
2361 void
cxgb_refresh_stats(struct port_info * pi)2362 cxgb_refresh_stats(struct port_info *pi)
2363 {
2364 struct timeval tv;
2365 const struct timeval interval = {0, 250000}; /* 250ms */
2366
2367 getmicrotime(&tv);
2368 timevalsub(&tv, &interval);
2369 if (timevalcmp(&tv, &pi->last_refreshed, <))
2370 return;
2371
2372 PORT_LOCK(pi);
2373 t3_mac_update_stats(&pi->mac);
2374 PORT_UNLOCK(pi);
2375 getmicrotime(&pi->last_refreshed);
2376 }
2377
2378 static void
cxgb_tick_handler(void * arg,int count)2379 cxgb_tick_handler(void *arg, int count)
2380 {
2381 adapter_t *sc = (adapter_t *)arg;
2382 const struct adapter_params *p = &sc->params;
2383 int i;
2384 uint32_t cause, reset;
2385
2386 if (sc->flags & CXGB_SHUTDOWN || !(sc->flags & FULL_INIT_DONE))
2387 return;
2388
2389 if (p->rev == T3_REV_B2 && p->nports < 4 && sc->open_device_map)
2390 check_t3b2_mac(sc);
2391
2392 cause = t3_read_reg(sc, A_SG_INT_CAUSE) & (F_RSPQSTARVE | F_FLEMPTY);
2393 if (cause) {
2394 struct sge_qset *qs = &sc->sge.qs[0];
2395 uint32_t mask, v;
2396
2397 v = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS) & ~0xff00;
2398
2399 mask = 1;
2400 for (i = 0; i < SGE_QSETS; i++) {
2401 if (v & mask)
2402 qs[i].rspq.starved++;
2403 mask <<= 1;
2404 }
2405
2406 mask <<= SGE_QSETS; /* skip RSPQXDISABLED */
2407
2408 for (i = 0; i < SGE_QSETS * 2; i++) {
2409 if (v & mask) {
2410 qs[i / 2].fl[i % 2].empty++;
2411 }
2412 mask <<= 1;
2413 }
2414
2415 /* clear */
2416 t3_write_reg(sc, A_SG_RSPQ_FL_STATUS, v);
2417 t3_write_reg(sc, A_SG_INT_CAUSE, cause);
2418 }
2419
2420 for (i = 0; i < sc->params.nports; i++) {
2421 struct port_info *pi = &sc->port[i];
2422 struct cmac *mac = &pi->mac;
2423
2424 if (!isset(&sc->open_device_map, pi->port_id))
2425 continue;
2426
2427 cxgb_refresh_stats(pi);
2428
2429 if (mac->multiport)
2430 continue;
2431
2432 /* Count rx fifo overflows, once per second */
2433 cause = t3_read_reg(sc, A_XGM_INT_CAUSE + mac->offset);
2434 reset = 0;
2435 if (cause & F_RXFIFO_OVERFLOW) {
2436 mac->stats.rx_fifo_ovfl++;
2437 reset |= F_RXFIFO_OVERFLOW;
2438 }
2439 t3_write_reg(sc, A_XGM_INT_CAUSE + mac->offset, reset);
2440 }
2441 }
2442
2443 static void
touch_bars(device_t dev)2444 touch_bars(device_t dev)
2445 {
2446 /*
2447 * Don't enable yet
2448 */
2449 #if !defined(__LP64__) && 0
2450 u32 v;
2451
2452 pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &v);
2453 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_1, v);
2454 pci_read_config_dword(pdev, PCI_BASE_ADDRESS_3, &v);
2455 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_3, v);
2456 pci_read_config_dword(pdev, PCI_BASE_ADDRESS_5, &v);
2457 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_5, v);
2458 #endif
2459 }
2460
2461 static int
set_eeprom(struct port_info * pi,const uint8_t * data,int len,int offset)2462 set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset)
2463 {
2464 uint8_t *buf;
2465 int err = 0;
2466 u32 aligned_offset, aligned_len, *p;
2467 struct adapter *adapter = pi->adapter;
2468
2469
2470 aligned_offset = offset & ~3;
2471 aligned_len = (len + (offset & 3) + 3) & ~3;
2472
2473 if (aligned_offset != offset || aligned_len != len) {
2474 buf = malloc(aligned_len, M_DEVBUF, M_WAITOK | M_ZERO);
2475 err = t3_seeprom_read(adapter, aligned_offset, (u32 *)buf);
2476 if (!err && aligned_len > 4)
2477 err = t3_seeprom_read(adapter,
2478 aligned_offset + aligned_len - 4,
2479 (u32 *)&buf[aligned_len - 4]);
2480 if (err)
2481 goto out;
2482 memcpy(buf + (offset & 3), data, len);
2483 } else
2484 buf = (uint8_t *)(uintptr_t)data;
2485
2486 err = t3_seeprom_wp(adapter, 0);
2487 if (err)
2488 goto out;
2489
2490 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2491 err = t3_seeprom_write(adapter, aligned_offset, *p);
2492 aligned_offset += 4;
2493 }
2494
2495 if (!err)
2496 err = t3_seeprom_wp(adapter, 1);
2497 out:
2498 if (buf != data)
2499 free(buf, M_DEVBUF);
2500 return err;
2501 }
2502
2503
2504 static int
in_range(int val,int lo,int hi)2505 in_range(int val, int lo, int hi)
2506 {
2507 return val < 0 || (val <= hi && val >= lo);
2508 }
2509
2510 static int
cxgb_extension_open(struct cdev * dev,int flags,int fmp,struct thread * td)2511 cxgb_extension_open(struct cdev *dev, int flags, int fmp, struct thread *td)
2512 {
2513 return (0);
2514 }
2515
2516 static int
cxgb_extension_close(struct cdev * dev,int flags,int fmt,struct thread * td)2517 cxgb_extension_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2518 {
2519 return (0);
2520 }
2521
2522 static int
cxgb_extension_ioctl(struct cdev * dev,unsigned long cmd,caddr_t data,int fflag,struct thread * td)2523 cxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data,
2524 int fflag, struct thread *td)
2525 {
2526 int mmd, error = 0;
2527 struct port_info *pi = dev->si_drv1;
2528 adapter_t *sc = pi->adapter;
2529
2530 #ifdef PRIV_SUPPORTED
2531 if (priv_check(td, PRIV_DRIVER)) {
2532 if (cxgb_debug)
2533 printf("user does not have access to privileged ioctls\n");
2534 return (EPERM);
2535 }
2536 #else
2537 if (suser(td)) {
2538 if (cxgb_debug)
2539 printf("user does not have access to privileged ioctls\n");
2540 return (EPERM);
2541 }
2542 #endif
2543
2544 switch (cmd) {
2545 case CHELSIO_GET_MIIREG: {
2546 uint32_t val;
2547 struct cphy *phy = &pi->phy;
2548 struct ch_mii_data *mid = (struct ch_mii_data *)data;
2549
2550 if (!phy->mdio_read)
2551 return (EOPNOTSUPP);
2552 if (is_10G(sc)) {
2553 mmd = mid->phy_id >> 8;
2554 if (!mmd)
2555 mmd = MDIO_DEV_PCS;
2556 else if (mmd > MDIO_DEV_VEND2)
2557 return (EINVAL);
2558
2559 error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd,
2560 mid->reg_num, &val);
2561 } else
2562 error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0,
2563 mid->reg_num & 0x1f, &val);
2564 if (error == 0)
2565 mid->val_out = val;
2566 break;
2567 }
2568 case CHELSIO_SET_MIIREG: {
2569 struct cphy *phy = &pi->phy;
2570 struct ch_mii_data *mid = (struct ch_mii_data *)data;
2571
2572 if (!phy->mdio_write)
2573 return (EOPNOTSUPP);
2574 if (is_10G(sc)) {
2575 mmd = mid->phy_id >> 8;
2576 if (!mmd)
2577 mmd = MDIO_DEV_PCS;
2578 else if (mmd > MDIO_DEV_VEND2)
2579 return (EINVAL);
2580
2581 error = phy->mdio_write(sc, mid->phy_id & 0x1f,
2582 mmd, mid->reg_num, mid->val_in);
2583 } else
2584 error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0,
2585 mid->reg_num & 0x1f,
2586 mid->val_in);
2587 break;
2588 }
2589 case CHELSIO_SETREG: {
2590 struct ch_reg *edata = (struct ch_reg *)data;
2591 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2592 return (EFAULT);
2593 t3_write_reg(sc, edata->addr, edata->val);
2594 break;
2595 }
2596 case CHELSIO_GETREG: {
2597 struct ch_reg *edata = (struct ch_reg *)data;
2598 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2599 return (EFAULT);
2600 edata->val = t3_read_reg(sc, edata->addr);
2601 break;
2602 }
2603 case CHELSIO_GET_SGE_CONTEXT: {
2604 struct ch_cntxt *ecntxt = (struct ch_cntxt *)data;
2605 mtx_lock_spin(&sc->sge.reg_lock);
2606 switch (ecntxt->cntxt_type) {
2607 case CNTXT_TYPE_EGRESS:
2608 error = -t3_sge_read_ecntxt(sc, ecntxt->cntxt_id,
2609 ecntxt->data);
2610 break;
2611 case CNTXT_TYPE_FL:
2612 error = -t3_sge_read_fl(sc, ecntxt->cntxt_id,
2613 ecntxt->data);
2614 break;
2615 case CNTXT_TYPE_RSP:
2616 error = -t3_sge_read_rspq(sc, ecntxt->cntxt_id,
2617 ecntxt->data);
2618 break;
2619 case CNTXT_TYPE_CQ:
2620 error = -t3_sge_read_cq(sc, ecntxt->cntxt_id,
2621 ecntxt->data);
2622 break;
2623 default:
2624 error = EINVAL;
2625 break;
2626 }
2627 mtx_unlock_spin(&sc->sge.reg_lock);
2628 break;
2629 }
2630 case CHELSIO_GET_SGE_DESC: {
2631 struct ch_desc *edesc = (struct ch_desc *)data;
2632 int ret;
2633 if (edesc->queue_num >= SGE_QSETS * 6)
2634 return (EINVAL);
2635 ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6],
2636 edesc->queue_num % 6, edesc->idx, edesc->data);
2637 if (ret < 0)
2638 return (EINVAL);
2639 edesc->size = ret;
2640 break;
2641 }
2642 case CHELSIO_GET_QSET_PARAMS: {
2643 struct qset_params *q;
2644 struct ch_qset_params *t = (struct ch_qset_params *)data;
2645 int q1 = pi->first_qset;
2646 int nqsets = pi->nqsets;
2647 int i;
2648
2649 if (t->qset_idx >= nqsets)
2650 return EINVAL;
2651
2652 i = q1 + t->qset_idx;
2653 q = &sc->params.sge.qset[i];
2654 t->rspq_size = q->rspq_size;
2655 t->txq_size[0] = q->txq_size[0];
2656 t->txq_size[1] = q->txq_size[1];
2657 t->txq_size[2] = q->txq_size[2];
2658 t->fl_size[0] = q->fl_size;
2659 t->fl_size[1] = q->jumbo_size;
2660 t->polling = q->polling;
2661 t->lro = q->lro;
2662 t->intr_lat = q->coalesce_usecs;
2663 t->cong_thres = q->cong_thres;
2664 t->qnum = i;
2665
2666 if ((sc->flags & FULL_INIT_DONE) == 0)
2667 t->vector = 0;
2668 else if (sc->flags & USING_MSIX)
2669 t->vector = rman_get_start(sc->msix_irq_res[i]);
2670 else
2671 t->vector = rman_get_start(sc->irq_res);
2672
2673 break;
2674 }
2675 case CHELSIO_GET_QSET_NUM: {
2676 struct ch_reg *edata = (struct ch_reg *)data;
2677 edata->val = pi->nqsets;
2678 break;
2679 }
2680 case CHELSIO_LOAD_FW: {
2681 uint8_t *fw_data;
2682 uint32_t vers;
2683 struct ch_mem_range *t = (struct ch_mem_range *)data;
2684
2685 /*
2686 * You're allowed to load a firmware only before FULL_INIT_DONE
2687 *
2688 * FW_UPTODATE is also set so the rest of the initialization
2689 * will not overwrite what was loaded here. This gives you the
2690 * flexibility to load any firmware (and maybe shoot yourself in
2691 * the foot).
2692 */
2693
2694 ADAPTER_LOCK(sc);
2695 if (sc->open_device_map || sc->flags & FULL_INIT_DONE) {
2696 ADAPTER_UNLOCK(sc);
2697 return (EBUSY);
2698 }
2699
2700 fw_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
2701 if (!fw_data)
2702 error = ENOMEM;
2703 else
2704 error = copyin(t->buf, fw_data, t->len);
2705
2706 if (!error)
2707 error = -t3_load_fw(sc, fw_data, t->len);
2708
2709 if (t3_get_fw_version(sc, &vers) == 0) {
2710 snprintf(&sc->fw_version[0], sizeof(sc->fw_version),
2711 "%d.%d.%d", G_FW_VERSION_MAJOR(vers),
2712 G_FW_VERSION_MINOR(vers), G_FW_VERSION_MICRO(vers));
2713 }
2714
2715 if (!error)
2716 sc->flags |= FW_UPTODATE;
2717
2718 free(fw_data, M_DEVBUF);
2719 ADAPTER_UNLOCK(sc);
2720 break;
2721 }
2722 case CHELSIO_LOAD_BOOT: {
2723 uint8_t *boot_data;
2724 struct ch_mem_range *t = (struct ch_mem_range *)data;
2725
2726 boot_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
2727 if (!boot_data)
2728 return ENOMEM;
2729
2730 error = copyin(t->buf, boot_data, t->len);
2731 if (!error)
2732 error = -t3_load_boot(sc, boot_data, t->len);
2733
2734 free(boot_data, M_DEVBUF);
2735 break;
2736 }
2737 case CHELSIO_GET_PM: {
2738 struct ch_pm *m = (struct ch_pm *)data;
2739 struct tp_params *p = &sc->params.tp;
2740
2741 if (!is_offload(sc))
2742 return (EOPNOTSUPP);
2743
2744 m->tx_pg_sz = p->tx_pg_size;
2745 m->tx_num_pg = p->tx_num_pgs;
2746 m->rx_pg_sz = p->rx_pg_size;
2747 m->rx_num_pg = p->rx_num_pgs;
2748 m->pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2749
2750 break;
2751 }
2752 case CHELSIO_SET_PM: {
2753 struct ch_pm *m = (struct ch_pm *)data;
2754 struct tp_params *p = &sc->params.tp;
2755
2756 if (!is_offload(sc))
2757 return (EOPNOTSUPP);
2758 if (sc->flags & FULL_INIT_DONE)
2759 return (EBUSY);
2760
2761 if (!m->rx_pg_sz || (m->rx_pg_sz & (m->rx_pg_sz - 1)) ||
2762 !m->tx_pg_sz || (m->tx_pg_sz & (m->tx_pg_sz - 1)))
2763 return (EINVAL); /* not power of 2 */
2764 if (!(m->rx_pg_sz & 0x14000))
2765 return (EINVAL); /* not 16KB or 64KB */
2766 if (!(m->tx_pg_sz & 0x1554000))
2767 return (EINVAL);
2768 if (m->tx_num_pg == -1)
2769 m->tx_num_pg = p->tx_num_pgs;
2770 if (m->rx_num_pg == -1)
2771 m->rx_num_pg = p->rx_num_pgs;
2772 if (m->tx_num_pg % 24 || m->rx_num_pg % 24)
2773 return (EINVAL);
2774 if (m->rx_num_pg * m->rx_pg_sz > p->chan_rx_size ||
2775 m->tx_num_pg * m->tx_pg_sz > p->chan_tx_size)
2776 return (EINVAL);
2777
2778 p->rx_pg_size = m->rx_pg_sz;
2779 p->tx_pg_size = m->tx_pg_sz;
2780 p->rx_num_pgs = m->rx_num_pg;
2781 p->tx_num_pgs = m->tx_num_pg;
2782 break;
2783 }
2784 case CHELSIO_SETMTUTAB: {
2785 struct ch_mtus *m = (struct ch_mtus *)data;
2786 int i;
2787
2788 if (!is_offload(sc))
2789 return (EOPNOTSUPP);
2790 if (offload_running(sc))
2791 return (EBUSY);
2792 if (m->nmtus != NMTUS)
2793 return (EINVAL);
2794 if (m->mtus[0] < 81) /* accommodate SACK */
2795 return (EINVAL);
2796
2797 /*
2798 * MTUs must be in ascending order
2799 */
2800 for (i = 1; i < NMTUS; ++i)
2801 if (m->mtus[i] < m->mtus[i - 1])
2802 return (EINVAL);
2803
2804 memcpy(sc->params.mtus, m->mtus, sizeof(sc->params.mtus));
2805 break;
2806 }
2807 case CHELSIO_GETMTUTAB: {
2808 struct ch_mtus *m = (struct ch_mtus *)data;
2809
2810 if (!is_offload(sc))
2811 return (EOPNOTSUPP);
2812
2813 memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus));
2814 m->nmtus = NMTUS;
2815 break;
2816 }
2817 case CHELSIO_GET_MEM: {
2818 struct ch_mem_range *t = (struct ch_mem_range *)data;
2819 struct mc7 *mem;
2820 uint8_t *useraddr;
2821 u64 buf[32];
2822
2823 /*
2824 * Use these to avoid modifying len/addr in the return
2825 * struct
2826 */
2827 uint32_t len = t->len, addr = t->addr;
2828
2829 if (!is_offload(sc))
2830 return (EOPNOTSUPP);
2831 if (!(sc->flags & FULL_INIT_DONE))
2832 return (EIO); /* need the memory controllers */
2833 if ((addr & 0x7) || (len & 0x7))
2834 return (EINVAL);
2835 if (t->mem_id == MEM_CM)
2836 mem = &sc->cm;
2837 else if (t->mem_id == MEM_PMRX)
2838 mem = &sc->pmrx;
2839 else if (t->mem_id == MEM_PMTX)
2840 mem = &sc->pmtx;
2841 else
2842 return (EINVAL);
2843
2844 /*
2845 * Version scheme:
2846 * bits 0..9: chip version
2847 * bits 10..15: chip revision
2848 */
2849 t->version = 3 | (sc->params.rev << 10);
2850
2851 /*
2852 * Read 256 bytes at a time as len can be large and we don't
2853 * want to use huge intermediate buffers.
2854 */
2855 useraddr = (uint8_t *)t->buf;
2856 while (len) {
2857 unsigned int chunk = min(len, sizeof(buf));
2858
2859 error = t3_mc7_bd_read(mem, addr / 8, chunk / 8, buf);
2860 if (error)
2861 return (-error);
2862 if (copyout(buf, useraddr, chunk))
2863 return (EFAULT);
2864 useraddr += chunk;
2865 addr += chunk;
2866 len -= chunk;
2867 }
2868 break;
2869 }
2870 case CHELSIO_READ_TCAM_WORD: {
2871 struct ch_tcam_word *t = (struct ch_tcam_word *)data;
2872
2873 if (!is_offload(sc))
2874 return (EOPNOTSUPP);
2875 if (!(sc->flags & FULL_INIT_DONE))
2876 return (EIO); /* need MC5 */
2877 return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf);
2878 break;
2879 }
2880 case CHELSIO_SET_TRACE_FILTER: {
2881 struct ch_trace *t = (struct ch_trace *)data;
2882 const struct trace_params *tp;
2883
2884 tp = (const struct trace_params *)&t->sip;
2885 if (t->config_tx)
2886 t3_config_trace_filter(sc, tp, 0, t->invert_match,
2887 t->trace_tx);
2888 if (t->config_rx)
2889 t3_config_trace_filter(sc, tp, 1, t->invert_match,
2890 t->trace_rx);
2891 break;
2892 }
2893 case CHELSIO_SET_PKTSCHED: {
2894 struct ch_pktsched_params *p = (struct ch_pktsched_params *)data;
2895 if (sc->open_device_map == 0)
2896 return (EAGAIN);
2897 send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max,
2898 p->binding);
2899 break;
2900 }
2901 case CHELSIO_IFCONF_GETREGS: {
2902 struct ch_ifconf_regs *regs = (struct ch_ifconf_regs *)data;
2903 int reglen = cxgb_get_regs_len();
2904 uint8_t *buf = malloc(reglen, M_DEVBUF, M_NOWAIT);
2905 if (buf == NULL) {
2906 return (ENOMEM);
2907 }
2908 if (regs->len > reglen)
2909 regs->len = reglen;
2910 else if (regs->len < reglen)
2911 error = ENOBUFS;
2912
2913 if (!error) {
2914 cxgb_get_regs(sc, regs, buf);
2915 error = copyout(buf, regs->data, reglen);
2916 }
2917 free(buf, M_DEVBUF);
2918
2919 break;
2920 }
2921 case CHELSIO_SET_HW_SCHED: {
2922 struct ch_hw_sched *t = (struct ch_hw_sched *)data;
2923 unsigned int ticks_per_usec = core_ticks_per_usec(sc);
2924
2925 if ((sc->flags & FULL_INIT_DONE) == 0)
2926 return (EAGAIN); /* need TP to be initialized */
2927 if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) ||
2928 !in_range(t->channel, 0, 1) ||
2929 !in_range(t->kbps, 0, 10000000) ||
2930 !in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) ||
2931 !in_range(t->flow_ipg, 0,
2932 dack_ticks_to_usec(sc, 0x7ff)))
2933 return (EINVAL);
2934
2935 if (t->kbps >= 0) {
2936 error = t3_config_sched(sc, t->kbps, t->sched);
2937 if (error < 0)
2938 return (-error);
2939 }
2940 if (t->class_ipg >= 0)
2941 t3_set_sched_ipg(sc, t->sched, t->class_ipg);
2942 if (t->flow_ipg >= 0) {
2943 t->flow_ipg *= 1000; /* us -> ns */
2944 t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1);
2945 }
2946 if (t->mode >= 0) {
2947 int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched);
2948
2949 t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
2950 bit, t->mode ? bit : 0);
2951 }
2952 if (t->channel >= 0)
2953 t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
2954 1 << t->sched, t->channel << t->sched);
2955 break;
2956 }
2957 case CHELSIO_GET_EEPROM: {
2958 int i;
2959 struct ch_eeprom *e = (struct ch_eeprom *)data;
2960 uint8_t *buf;
2961
2962 if (e->offset & 3 || e->offset >= EEPROMSIZE ||
2963 e->len > EEPROMSIZE || e->offset + e->len > EEPROMSIZE) {
2964 return (EINVAL);
2965 }
2966
2967 buf = malloc(EEPROMSIZE, M_DEVBUF, M_NOWAIT);
2968 if (buf == NULL) {
2969 return (ENOMEM);
2970 }
2971 e->magic = EEPROM_MAGIC;
2972 for (i = e->offset & ~3; !error && i < e->offset + e->len; i += 4)
2973 error = -t3_seeprom_read(sc, i, (uint32_t *)&buf[i]);
2974
2975 if (!error)
2976 error = copyout(buf + e->offset, e->data, e->len);
2977
2978 free(buf, M_DEVBUF);
2979 break;
2980 }
2981 case CHELSIO_CLEAR_STATS: {
2982 if (!(sc->flags & FULL_INIT_DONE))
2983 return EAGAIN;
2984
2985 PORT_LOCK(pi);
2986 t3_mac_update_stats(&pi->mac);
2987 memset(&pi->mac.stats, 0, sizeof(pi->mac.stats));
2988 PORT_UNLOCK(pi);
2989 break;
2990 }
2991 case CHELSIO_GET_UP_LA: {
2992 struct ch_up_la *la = (struct ch_up_la *)data;
2993 uint8_t *buf = malloc(LA_BUFSIZE, M_DEVBUF, M_NOWAIT);
2994 if (buf == NULL) {
2995 return (ENOMEM);
2996 }
2997 if (la->bufsize < LA_BUFSIZE)
2998 error = ENOBUFS;
2999
3000 if (!error)
3001 error = -t3_get_up_la(sc, &la->stopped, &la->idx,
3002 &la->bufsize, buf);
3003 if (!error)
3004 error = copyout(buf, la->data, la->bufsize);
3005
3006 free(buf, M_DEVBUF);
3007 break;
3008 }
3009 case CHELSIO_GET_UP_IOQS: {
3010 struct ch_up_ioqs *ioqs = (struct ch_up_ioqs *)data;
3011 uint8_t *buf = malloc(IOQS_BUFSIZE, M_DEVBUF, M_NOWAIT);
3012 uint32_t *v;
3013
3014 if (buf == NULL) {
3015 return (ENOMEM);
3016 }
3017 if (ioqs->bufsize < IOQS_BUFSIZE)
3018 error = ENOBUFS;
3019
3020 if (!error)
3021 error = -t3_get_up_ioqs(sc, &ioqs->bufsize, buf);
3022
3023 if (!error) {
3024 v = (uint32_t *)buf;
3025
3026 ioqs->ioq_rx_enable = *v++;
3027 ioqs->ioq_tx_enable = *v++;
3028 ioqs->ioq_rx_status = *v++;
3029 ioqs->ioq_tx_status = *v++;
3030
3031 error = copyout(v, ioqs->data, ioqs->bufsize);
3032 }
3033
3034 free(buf, M_DEVBUF);
3035 break;
3036 }
3037 case CHELSIO_SET_FILTER: {
3038 struct ch_filter *f = (struct ch_filter *)data;
3039 struct filter_info *p;
3040 unsigned int nfilters = sc->params.mc5.nfilters;
3041
3042 if (!is_offload(sc))
3043 return (EOPNOTSUPP); /* No TCAM */
3044 if (!(sc->flags & FULL_INIT_DONE))
3045 return (EAGAIN); /* mc5 not setup yet */
3046 if (nfilters == 0)
3047 return (EBUSY); /* TOE will use TCAM */
3048
3049 /* sanity checks */
3050 if (f->filter_id >= nfilters ||
3051 (f->val.dip && f->mask.dip != 0xffffffff) ||
3052 (f->val.sport && f->mask.sport != 0xffff) ||
3053 (f->val.dport && f->mask.dport != 0xffff) ||
3054 (f->val.vlan && f->mask.vlan != 0xfff) ||
3055 (f->val.vlan_prio &&
3056 f->mask.vlan_prio != FILTER_NO_VLAN_PRI) ||
3057 (f->mac_addr_idx != 0xffff && f->mac_addr_idx > 15) ||
3058 f->qset >= SGE_QSETS ||
3059 sc->rrss_map[f->qset] >= RSS_TABLE_SIZE)
3060 return (EINVAL);
3061
3062 /* Was allocated with M_WAITOK */
3063 KASSERT(sc->filters, ("filter table NULL\n"));
3064
3065 p = &sc->filters[f->filter_id];
3066 if (p->locked)
3067 return (EPERM);
3068
3069 bzero(p, sizeof(*p));
3070 p->sip = f->val.sip;
3071 p->sip_mask = f->mask.sip;
3072 p->dip = f->val.dip;
3073 p->sport = f->val.sport;
3074 p->dport = f->val.dport;
3075 p->vlan = f->mask.vlan ? f->val.vlan : 0xfff;
3076 p->vlan_prio = f->mask.vlan_prio ? (f->val.vlan_prio & 6) :
3077 FILTER_NO_VLAN_PRI;
3078 p->mac_hit = f->mac_hit;
3079 p->mac_vld = f->mac_addr_idx != 0xffff;
3080 p->mac_idx = f->mac_addr_idx;
3081 p->pkt_type = f->proto;
3082 p->report_filter_id = f->want_filter_id;
3083 p->pass = f->pass;
3084 p->rss = f->rss;
3085 p->qset = f->qset;
3086
3087 error = set_filter(sc, f->filter_id, p);
3088 if (error == 0)
3089 p->valid = 1;
3090 break;
3091 }
3092 case CHELSIO_DEL_FILTER: {
3093 struct ch_filter *f = (struct ch_filter *)data;
3094 struct filter_info *p;
3095 unsigned int nfilters = sc->params.mc5.nfilters;
3096
3097 if (!is_offload(sc))
3098 return (EOPNOTSUPP);
3099 if (!(sc->flags & FULL_INIT_DONE))
3100 return (EAGAIN);
3101 if (nfilters == 0 || sc->filters == NULL)
3102 return (EINVAL);
3103 if (f->filter_id >= nfilters)
3104 return (EINVAL);
3105
3106 p = &sc->filters[f->filter_id];
3107 if (p->locked)
3108 return (EPERM);
3109 if (!p->valid)
3110 return (EFAULT); /* Read "Bad address" as "Bad index" */
3111
3112 bzero(p, sizeof(*p));
3113 p->sip = p->sip_mask = 0xffffffff;
3114 p->vlan = 0xfff;
3115 p->vlan_prio = FILTER_NO_VLAN_PRI;
3116 p->pkt_type = 1;
3117 error = set_filter(sc, f->filter_id, p);
3118 break;
3119 }
3120 case CHELSIO_GET_FILTER: {
3121 struct ch_filter *f = (struct ch_filter *)data;
3122 struct filter_info *p;
3123 unsigned int i, nfilters = sc->params.mc5.nfilters;
3124
3125 if (!is_offload(sc))
3126 return (EOPNOTSUPP);
3127 if (!(sc->flags & FULL_INIT_DONE))
3128 return (EAGAIN);
3129 if (nfilters == 0 || sc->filters == NULL)
3130 return (EINVAL);
3131
3132 i = f->filter_id == 0xffffffff ? 0 : f->filter_id + 1;
3133 for (; i < nfilters; i++) {
3134 p = &sc->filters[i];
3135 if (!p->valid)
3136 continue;
3137
3138 bzero(f, sizeof(*f));
3139
3140 f->filter_id = i;
3141 f->val.sip = p->sip;
3142 f->mask.sip = p->sip_mask;
3143 f->val.dip = p->dip;
3144 f->mask.dip = p->dip ? 0xffffffff : 0;
3145 f->val.sport = p->sport;
3146 f->mask.sport = p->sport ? 0xffff : 0;
3147 f->val.dport = p->dport;
3148 f->mask.dport = p->dport ? 0xffff : 0;
3149 f->val.vlan = p->vlan == 0xfff ? 0 : p->vlan;
3150 f->mask.vlan = p->vlan == 0xfff ? 0 : 0xfff;
3151 f->val.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ?
3152 0 : p->vlan_prio;
3153 f->mask.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ?
3154 0 : FILTER_NO_VLAN_PRI;
3155 f->mac_hit = p->mac_hit;
3156 f->mac_addr_idx = p->mac_vld ? p->mac_idx : 0xffff;
3157 f->proto = p->pkt_type;
3158 f->want_filter_id = p->report_filter_id;
3159 f->pass = p->pass;
3160 f->rss = p->rss;
3161 f->qset = p->qset;
3162
3163 break;
3164 }
3165
3166 if (i == nfilters)
3167 f->filter_id = 0xffffffff;
3168 break;
3169 }
3170 default:
3171 return (EOPNOTSUPP);
3172 break;
3173 }
3174
3175 return (error);
3176 }
3177
3178 static __inline void
reg_block_dump(struct adapter * ap,uint8_t * buf,unsigned int start,unsigned int end)3179 reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
3180 unsigned int end)
3181 {
3182 uint32_t *p = (uint32_t *)(buf + start);
3183
3184 for ( ; start <= end; start += sizeof(uint32_t))
3185 *p++ = t3_read_reg(ap, start);
3186 }
3187
3188 #define T3_REGMAP_SIZE (3 * 1024)
3189 static int
cxgb_get_regs_len(void)3190 cxgb_get_regs_len(void)
3191 {
3192 return T3_REGMAP_SIZE;
3193 }
3194
3195 static void
cxgb_get_regs(adapter_t * sc,struct ch_ifconf_regs * regs,uint8_t * buf)3196 cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf)
3197 {
3198
3199 /*
3200 * Version scheme:
3201 * bits 0..9: chip version
3202 * bits 10..15: chip revision
3203 * bit 31: set for PCIe cards
3204 */
3205 regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31);
3206
3207 /*
3208 * We skip the MAC statistics registers because they are clear-on-read.
3209 * Also reading multi-register stats would need to synchronize with the
3210 * periodic mac stats accumulation. Hard to justify the complexity.
3211 */
3212 memset(buf, 0, cxgb_get_regs_len());
3213 reg_block_dump(sc, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
3214 reg_block_dump(sc, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
3215 reg_block_dump(sc, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
3216 reg_block_dump(sc, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
3217 reg_block_dump(sc, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
3218 reg_block_dump(sc, buf, A_XGM_SERDES_STATUS0,
3219 XGM_REG(A_XGM_SERDES_STAT3, 1));
3220 reg_block_dump(sc, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
3221 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
3222 }
3223
3224 static int
alloc_filters(struct adapter * sc)3225 alloc_filters(struct adapter *sc)
3226 {
3227 struct filter_info *p;
3228 unsigned int nfilters = sc->params.mc5.nfilters;
3229
3230 if (nfilters == 0)
3231 return (0);
3232
3233 p = malloc(sizeof(*p) * nfilters, M_DEVBUF, M_WAITOK | M_ZERO);
3234 sc->filters = p;
3235
3236 p = &sc->filters[nfilters - 1];
3237 p->vlan = 0xfff;
3238 p->vlan_prio = FILTER_NO_VLAN_PRI;
3239 p->pass = p->rss = p->valid = p->locked = 1;
3240
3241 return (0);
3242 }
3243
3244 static int
setup_hw_filters(struct adapter * sc)3245 setup_hw_filters(struct adapter *sc)
3246 {
3247 int i, rc;
3248 unsigned int nfilters = sc->params.mc5.nfilters;
3249
3250 if (!sc->filters)
3251 return (0);
3252
3253 t3_enable_filters(sc);
3254
3255 for (i = rc = 0; i < nfilters && !rc; i++) {
3256 if (sc->filters[i].locked)
3257 rc = set_filter(sc, i, &sc->filters[i]);
3258 }
3259
3260 return (rc);
3261 }
3262
3263 static int
set_filter(struct adapter * sc,int id,const struct filter_info * f)3264 set_filter(struct adapter *sc, int id, const struct filter_info *f)
3265 {
3266 int len;
3267 struct mbuf *m;
3268 struct ulp_txpkt *txpkt;
3269 struct work_request_hdr *wr;
3270 struct cpl_pass_open_req *oreq;
3271 struct cpl_set_tcb_field *sreq;
3272
3273 len = sizeof(*wr) + sizeof(*oreq) + 2 * sizeof(*sreq);
3274 KASSERT(len <= MHLEN, ("filter request too big for an mbuf"));
3275
3276 id += t3_mc5_size(&sc->mc5) - sc->params.mc5.nroutes -
3277 sc->params.mc5.nfilters;
3278
3279 m = m_gethdr(M_WAITOK, MT_DATA);
3280 m->m_len = m->m_pkthdr.len = len;
3281 bzero(mtod(m, char *), len);
3282
3283 wr = mtod(m, struct work_request_hdr *);
3284 wr->wrh_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS) | F_WR_ATOMIC);
3285
3286 oreq = (struct cpl_pass_open_req *)(wr + 1);
3287 txpkt = (struct ulp_txpkt *)oreq;
3288 txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT));
3289 txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*oreq) / 8));
3290 OPCODE_TID(oreq) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, id));
3291 oreq->local_port = htons(f->dport);
3292 oreq->peer_port = htons(f->sport);
3293 oreq->local_ip = htonl(f->dip);
3294 oreq->peer_ip = htonl(f->sip);
3295 oreq->peer_netmask = htonl(f->sip_mask);
3296 oreq->opt0h = 0;
3297 oreq->opt0l = htonl(F_NO_OFFLOAD);
3298 oreq->opt1 = htonl(V_MAC_MATCH_VALID(f->mac_vld) |
3299 V_CONN_POLICY(CPL_CONN_POLICY_FILTER) |
3300 V_VLAN_PRI(f->vlan_prio >> 1) |
3301 V_VLAN_PRI_VALID(f->vlan_prio != FILTER_NO_VLAN_PRI) |
3302 V_PKT_TYPE(f->pkt_type) | V_OPT1_VLAN(f->vlan) |
3303 V_MAC_MATCH(f->mac_idx | (f->mac_hit << 4)));
3304
3305 sreq = (struct cpl_set_tcb_field *)(oreq + 1);
3306 set_tcb_field_ulp(sreq, id, 1, 0x1800808000ULL,
3307 (f->report_filter_id << 15) | (1 << 23) |
3308 ((u64)f->pass << 35) | ((u64)!f->rss << 36));
3309 set_tcb_field_ulp(sreq + 1, id, 0, 0xffffffff, (2 << 19) | 1);
3310 t3_mgmt_tx(sc, m);
3311
3312 if (f->pass && !f->rss) {
3313 len = sizeof(*sreq);
3314 m = m_gethdr(M_WAITOK, MT_DATA);
3315 m->m_len = m->m_pkthdr.len = len;
3316 bzero(mtod(m, char *), len);
3317 sreq = mtod(m, struct cpl_set_tcb_field *);
3318 sreq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
3319 mk_set_tcb_field(sreq, id, 25, 0x3f80000,
3320 (u64)sc->rrss_map[f->qset] << 19);
3321 t3_mgmt_tx(sc, m);
3322 }
3323 return 0;
3324 }
3325
3326 static inline void
mk_set_tcb_field(struct cpl_set_tcb_field * req,unsigned int tid,unsigned int word,u64 mask,u64 val)3327 mk_set_tcb_field(struct cpl_set_tcb_field *req, unsigned int tid,
3328 unsigned int word, u64 mask, u64 val)
3329 {
3330 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
3331 req->reply = V_NO_REPLY(1);
3332 req->cpu_idx = 0;
3333 req->word = htons(word);
3334 req->mask = htobe64(mask);
3335 req->val = htobe64(val);
3336 }
3337
3338 static inline void
set_tcb_field_ulp(struct cpl_set_tcb_field * req,unsigned int tid,unsigned int word,u64 mask,u64 val)3339 set_tcb_field_ulp(struct cpl_set_tcb_field *req, unsigned int tid,
3340 unsigned int word, u64 mask, u64 val)
3341 {
3342 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
3343
3344 txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT));
3345 txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*req) / 8));
3346 mk_set_tcb_field(req, tid, word, mask, val);
3347 }
3348
3349 void
t3_iterate(void (* func)(struct adapter *,void *),void * arg)3350 t3_iterate(void (*func)(struct adapter *, void *), void *arg)
3351 {
3352 struct adapter *sc;
3353
3354 mtx_lock(&t3_list_lock);
3355 SLIST_FOREACH(sc, &t3_list, link) {
3356 /*
3357 * func should not make any assumptions about what state sc is
3358 * in - the only guarantee is that sc->sc_lock is a valid lock.
3359 */
3360 func(sc, arg);
3361 }
3362 mtx_unlock(&t3_list_lock);
3363 }
3364
3365 #ifdef TCP_OFFLOAD
3366 static int
toe_capability(struct port_info * pi,int enable)3367 toe_capability(struct port_info *pi, int enable)
3368 {
3369 int rc;
3370 struct adapter *sc = pi->adapter;
3371
3372 ADAPTER_LOCK_ASSERT_OWNED(sc);
3373
3374 if (!is_offload(sc))
3375 return (ENODEV);
3376
3377 if (enable) {
3378 if (!(sc->flags & FULL_INIT_DONE)) {
3379 log(LOG_WARNING,
3380 "You must enable a cxgb interface first\n");
3381 return (EAGAIN);
3382 }
3383
3384 if (isset(&sc->offload_map, pi->port_id))
3385 return (0);
3386
3387 if (!(sc->flags & TOM_INIT_DONE)) {
3388 rc = t3_activate_uld(sc, ULD_TOM);
3389 if (rc == EAGAIN) {
3390 log(LOG_WARNING,
3391 "You must kldload t3_tom.ko before trying "
3392 "to enable TOE on a cxgb interface.\n");
3393 }
3394 if (rc != 0)
3395 return (rc);
3396 KASSERT(sc->tom_softc != NULL,
3397 ("%s: TOM activated but softc NULL", __func__));
3398 KASSERT(sc->flags & TOM_INIT_DONE,
3399 ("%s: TOM activated but flag not set", __func__));
3400 }
3401
3402 setbit(&sc->offload_map, pi->port_id);
3403
3404 /*
3405 * XXX: Temporary code to allow iWARP to be enabled when TOE is
3406 * enabled on any port. Need to figure out how to enable,
3407 * disable, load, and unload iWARP cleanly.
3408 */
3409 if (!isset(&sc->offload_map, MAX_NPORTS) &&
3410 t3_activate_uld(sc, ULD_IWARP) == 0)
3411 setbit(&sc->offload_map, MAX_NPORTS);
3412 } else {
3413 if (!isset(&sc->offload_map, pi->port_id))
3414 return (0);
3415
3416 KASSERT(sc->flags & TOM_INIT_DONE,
3417 ("%s: TOM never initialized?", __func__));
3418 clrbit(&sc->offload_map, pi->port_id);
3419 }
3420
3421 return (0);
3422 }
3423
3424 /*
3425 * Add an upper layer driver to the global list.
3426 */
3427 int
t3_register_uld(struct uld_info * ui)3428 t3_register_uld(struct uld_info *ui)
3429 {
3430 int rc = 0;
3431 struct uld_info *u;
3432
3433 mtx_lock(&t3_uld_list_lock);
3434 SLIST_FOREACH(u, &t3_uld_list, link) {
3435 if (u->uld_id == ui->uld_id) {
3436 rc = EEXIST;
3437 goto done;
3438 }
3439 }
3440
3441 SLIST_INSERT_HEAD(&t3_uld_list, ui, link);
3442 ui->refcount = 0;
3443 done:
3444 mtx_unlock(&t3_uld_list_lock);
3445 return (rc);
3446 }
3447
3448 int
t3_unregister_uld(struct uld_info * ui)3449 t3_unregister_uld(struct uld_info *ui)
3450 {
3451 int rc = EINVAL;
3452 struct uld_info *u;
3453
3454 mtx_lock(&t3_uld_list_lock);
3455
3456 SLIST_FOREACH(u, &t3_uld_list, link) {
3457 if (u == ui) {
3458 if (ui->refcount > 0) {
3459 rc = EBUSY;
3460 goto done;
3461 }
3462
3463 SLIST_REMOVE(&t3_uld_list, ui, uld_info, link);
3464 rc = 0;
3465 goto done;
3466 }
3467 }
3468 done:
3469 mtx_unlock(&t3_uld_list_lock);
3470 return (rc);
3471 }
3472
3473 int
t3_activate_uld(struct adapter * sc,int id)3474 t3_activate_uld(struct adapter *sc, int id)
3475 {
3476 int rc = EAGAIN;
3477 struct uld_info *ui;
3478
3479 mtx_lock(&t3_uld_list_lock);
3480
3481 SLIST_FOREACH(ui, &t3_uld_list, link) {
3482 if (ui->uld_id == id) {
3483 rc = ui->activate(sc);
3484 if (rc == 0)
3485 ui->refcount++;
3486 goto done;
3487 }
3488 }
3489 done:
3490 mtx_unlock(&t3_uld_list_lock);
3491
3492 return (rc);
3493 }
3494
3495 int
t3_deactivate_uld(struct adapter * sc,int id)3496 t3_deactivate_uld(struct adapter *sc, int id)
3497 {
3498 int rc = EINVAL;
3499 struct uld_info *ui;
3500
3501 mtx_lock(&t3_uld_list_lock);
3502
3503 SLIST_FOREACH(ui, &t3_uld_list, link) {
3504 if (ui->uld_id == id) {
3505 rc = ui->deactivate(sc);
3506 if (rc == 0)
3507 ui->refcount--;
3508 goto done;
3509 }
3510 }
3511 done:
3512 mtx_unlock(&t3_uld_list_lock);
3513
3514 return (rc);
3515 }
3516
3517 static int
cpl_not_handled(struct sge_qset * qs __unused,struct rsp_desc * r __unused,struct mbuf * m)3518 cpl_not_handled(struct sge_qset *qs __unused, struct rsp_desc *r __unused,
3519 struct mbuf *m)
3520 {
3521 m_freem(m);
3522 return (EDOOFUS);
3523 }
3524
3525 int
t3_register_cpl_handler(struct adapter * sc,int opcode,cpl_handler_t h)3526 t3_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
3527 {
3528 uintptr_t *loc, new;
3529
3530 if (opcode >= NUM_CPL_HANDLERS)
3531 return (EINVAL);
3532
3533 new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
3534 loc = (uintptr_t *) &sc->cpl_handler[opcode];
3535 atomic_store_rel_ptr(loc, new);
3536
3537 return (0);
3538 }
3539 #endif
3540
3541 static int
cxgbc_mod_event(module_t mod,int cmd,void * arg)3542 cxgbc_mod_event(module_t mod, int cmd, void *arg)
3543 {
3544 int rc = 0;
3545
3546 switch (cmd) {
3547 case MOD_LOAD:
3548 mtx_init(&t3_list_lock, "T3 adapters", 0, MTX_DEF);
3549 SLIST_INIT(&t3_list);
3550 #ifdef TCP_OFFLOAD
3551 mtx_init(&t3_uld_list_lock, "T3 ULDs", 0, MTX_DEF);
3552 SLIST_INIT(&t3_uld_list);
3553 #endif
3554 break;
3555
3556 case MOD_UNLOAD:
3557 #ifdef TCP_OFFLOAD
3558 mtx_lock(&t3_uld_list_lock);
3559 if (!SLIST_EMPTY(&t3_uld_list)) {
3560 rc = EBUSY;
3561 mtx_unlock(&t3_uld_list_lock);
3562 break;
3563 }
3564 mtx_unlock(&t3_uld_list_lock);
3565 mtx_destroy(&t3_uld_list_lock);
3566 #endif
3567 mtx_lock(&t3_list_lock);
3568 if (!SLIST_EMPTY(&t3_list)) {
3569 rc = EBUSY;
3570 mtx_unlock(&t3_list_lock);
3571 break;
3572 }
3573 mtx_unlock(&t3_list_lock);
3574 mtx_destroy(&t3_list_lock);
3575 break;
3576 }
3577
3578 return (rc);
3579 }
3580
3581 #ifdef DEBUGNET
3582 static void
cxgb_debugnet_init(if_t ifp,int * nrxr,int * ncl,int * clsize)3583 cxgb_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize)
3584 {
3585 struct port_info *pi;
3586 adapter_t *adap;
3587
3588 pi = if_getsoftc(ifp);
3589 adap = pi->adapter;
3590 ADAPTER_LOCK(adap);
3591 *nrxr = adap->nqsets;
3592 *ncl = adap->sge.qs[0].fl[1].size;
3593 *clsize = adap->sge.qs[0].fl[1].buf_size;
3594 ADAPTER_UNLOCK(adap);
3595 }
3596
3597 static void
cxgb_debugnet_event(if_t ifp,enum debugnet_ev event)3598 cxgb_debugnet_event(if_t ifp, enum debugnet_ev event)
3599 {
3600 struct port_info *pi;
3601 struct sge_qset *qs;
3602 int i;
3603
3604 pi = if_getsoftc(ifp);
3605 if (event == DEBUGNET_START)
3606 for (i = 0; i < pi->adapter->nqsets; i++) {
3607 qs = &pi->adapter->sge.qs[i];
3608
3609 /* Need to reinit after debugnet_mbuf_start(). */
3610 qs->fl[0].zone = zone_pack;
3611 qs->fl[1].zone = zone_clust;
3612 qs->lro.enabled = 0;
3613 }
3614 }
3615
3616 static int
cxgb_debugnet_transmit(if_t ifp,struct mbuf * m)3617 cxgb_debugnet_transmit(if_t ifp, struct mbuf *m)
3618 {
3619 struct port_info *pi;
3620 struct sge_qset *qs;
3621
3622 pi = if_getsoftc(ifp);
3623 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
3624 IFF_DRV_RUNNING)
3625 return (ENOENT);
3626
3627 qs = &pi->adapter->sge.qs[pi->first_qset];
3628 return (cxgb_debugnet_encap(qs, &m));
3629 }
3630
3631 static int
cxgb_debugnet_poll(if_t ifp,int count)3632 cxgb_debugnet_poll(if_t ifp, int count)
3633 {
3634 struct port_info *pi;
3635 adapter_t *adap;
3636 int i;
3637
3638 pi = if_getsoftc(ifp);
3639 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
3640 return (ENOENT);
3641
3642 adap = pi->adapter;
3643 for (i = 0; i < adap->nqsets; i++)
3644 (void)cxgb_debugnet_poll_rx(adap, &adap->sge.qs[i]);
3645 (void)cxgb_debugnet_poll_tx(&adap->sge.qs[pi->first_qset]);
3646 return (0);
3647 }
3648 #endif /* DEBUGNET */
3649