1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * This file is part of the Chelsio T4 support code.
14 *
15 * Copyright (C) 2010-2013 Chelsio Communications. All rights reserved.
16 *
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
20 * release for licensing terms and conditions.
21 */
22
23 #include <sys/ddi.h>
24 #include <sys/sunddi.h>
25 #include <sys/sunndi.h>
26 #include <sys/modctl.h>
27 #include <sys/conf.h>
28 #include <sys/devops.h>
29 #include <sys/pci.h>
30 #include <sys/atomic.h>
31 #include <sys/types.h>
32 #include <sys/file.h>
33 #include <sys/errno.h>
34 #include <sys/open.h>
35 #include <sys/cred.h>
36 #include <sys/stat.h>
37 #include <sys/mkdev.h>
38 #include <sys/queue.h>
39
40 #include "version.h"
41 #include "common/common.h"
42 #include "common/t4_regs.h"
43 #include "firmware/t4_fw.h"
44 #include "firmware/t4_cfg.h"
45 #include "t4_l2t.h"
46
47 static int t4_cb_open(dev_t *devp, int flag, int otyp, cred_t *credp);
48 static int t4_cb_close(dev_t dev, int flag, int otyp, cred_t *credp);
49 static int t4_cb_ioctl(dev_t dev, int cmd, intptr_t d, int mode, cred_t *credp,
50 int *rp);
51 struct cb_ops t4_cb_ops = {
52 .cb_open = t4_cb_open,
53 .cb_close = t4_cb_close,
54 .cb_strategy = nodev,
55 .cb_print = nodev,
56 .cb_dump = nodev,
57 .cb_read = nodev,
58 .cb_write = nodev,
59 .cb_ioctl = t4_cb_ioctl,
60 .cb_devmap = nodev,
61 .cb_mmap = nodev,
62 .cb_segmap = nodev,
63 .cb_chpoll = nochpoll,
64 .cb_prop_op = ddi_prop_op,
65 .cb_flag = D_MP,
66 .cb_rev = CB_REV,
67 .cb_aread = nodev,
68 .cb_awrite = nodev
69 };
70
71 static int t4_bus_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op,
72 void *arg, void *result);
73 static int t4_bus_config(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op,
74 void *arg, dev_info_t **cdipp);
75 static int t4_bus_unconfig(dev_info_t *dip, uint_t flags,
76 ddi_bus_config_op_t op, void *arg);
77 struct bus_ops t4_bus_ops = {
78 .busops_rev = BUSO_REV,
79 .bus_ctl = t4_bus_ctl,
80 .bus_prop_op = ddi_bus_prop_op,
81 .bus_config = t4_bus_config,
82 .bus_unconfig = t4_bus_unconfig,
83 };
84
85 static int t4_devo_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg,
86 void **rp);
87 static int t4_devo_probe(dev_info_t *dip);
88 static int t4_devo_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
89 static int t4_devo_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
90 static int t4_devo_quiesce(dev_info_t *dip);
91 struct dev_ops t4_dev_ops = {
92 .devo_rev = DEVO_REV,
93 .devo_getinfo = t4_devo_getinfo,
94 .devo_identify = nulldev,
95 .devo_probe = t4_devo_probe,
96 .devo_attach = t4_devo_attach,
97 .devo_detach = t4_devo_detach,
98 .devo_reset = nodev,
99 .devo_cb_ops = &t4_cb_ops,
100 .devo_bus_ops = &t4_bus_ops,
101 .devo_quiesce = &t4_devo_quiesce,
102 };
103
104 static struct modldrv modldrv = {
105 .drv_modops = &mod_driverops,
106 .drv_linkinfo = "Chelsio T4 nexus " DRV_VERSION,
107 .drv_dev_ops = &t4_dev_ops
108 };
109
110 static struct modlinkage modlinkage = {
111 .ml_rev = MODREV_1,
112 .ml_linkage = {&modldrv, NULL},
113 };
114
115 void *t4_list;
116
117 struct intrs_and_queues {
118 int intr_type; /* DDI_INTR_TYPE_* */
119 int nirq; /* Number of vectors */
120 int intr_fwd; /* Interrupts forwarded */
121 int ntxq10g; /* # of NIC txq's for each 10G port */
122 int nrxq10g; /* # of NIC rxq's for each 10G port */
123 int ntxq1g; /* # of NIC txq's for each 1G port */
124 int nrxq1g; /* # of NIC rxq's for each 1G port */
125 #ifndef TCP_OFFLOAD_DISABLE
126 int nofldtxq10g; /* # of TOE txq's for each 10G port */
127 int nofldrxq10g; /* # of TOE rxq's for each 10G port */
128 int nofldtxq1g; /* # of TOE txq's for each 1G port */
129 int nofldrxq1g; /* # of TOE rxq's for each 1G port */
130 #endif
131 };
132
133 static int cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss,
134 mblk_t *m);
135 int t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h);
136 static unsigned int getpf(struct adapter *sc);
137 static int prep_firmware(struct adapter *sc);
138 static int upload_config_file(struct adapter *sc, uint32_t *mt, uint32_t *ma);
139 static int partition_resources(struct adapter *sc);
140 static int get_params__pre_init(struct adapter *sc);
141 static int get_params__post_init(struct adapter *sc);
142 static void setup_memwin(struct adapter *sc);
143 static int prop_lookup_int_array(struct adapter *sc, char *name, int *data,
144 uint_t count);
145 static int prop_lookup_int_array(struct adapter *sc, char *name, int *data,
146 uint_t count);
147 static int init_driver_props(struct adapter *sc, struct driver_properties *p);
148 static int remove_extra_props(struct adapter *sc, int n10g, int n1g);
149 static int cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
150 struct intrs_and_queues *iaq);
151 static int add_child_node(struct adapter *sc, int idx);
152 static int remove_child_node(struct adapter *sc, int idx);
153 static kstat_t *setup_kstats(struct adapter *sc);
154 #ifndef TCP_OFFLOAD_DISABLE
155 static int toe_capability(struct port_info *pi, int enable);
156 static int activate_uld(struct adapter *sc, int id, struct uld_softc *usc);
157 static int deactivate_uld(struct uld_softc *usc);
158 #endif
159 static kmutex_t t4_adapter_list_lock;
160 static SLIST_HEAD(, adapter) t4_adapter_list;
161 #ifndef TCP_OFFLOAD_DISABLE
162 static kmutex_t t4_uld_list_lock;
163 static SLIST_HEAD(, uld_info) t4_uld_list;
164 #endif
165
166 int
_init(void)167 _init(void)
168 {
169 int rc;
170
171 rc = ddi_soft_state_init(&t4_list, sizeof (struct adapter), 0);
172 if (rc != 0)
173 return (rc);
174
175 rc = mod_install(&modlinkage);
176 if (rc != 0)
177 ddi_soft_state_fini(&t4_list);
178
179 mutex_init(&t4_adapter_list_lock, NULL, MUTEX_DRIVER, NULL);
180 SLIST_INIT(&t4_adapter_list);
181
182 #ifndef TCP_OFFLOAD_DISABLE
183 mutex_init(&t4_uld_list_lock, NULL, MUTEX_DRIVER, NULL);
184 SLIST_INIT(&t4_uld_list);
185 #endif
186
187 return (rc);
188 }
189
190 int
_fini(void)191 _fini(void)
192 {
193 int rc;
194
195 rc = mod_remove(&modlinkage);
196 if (rc != 0)
197 return (rc);
198
199 ddi_soft_state_fini(&t4_list);
200 return (0);
201 }
202
203 int
_info(struct modinfo * mi)204 _info(struct modinfo *mi)
205 {
206 return (mod_info(&modlinkage, mi));
207 }
208
209 /* ARGSUSED */
210 static int
t4_devo_getinfo(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** rp)211 t4_devo_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **rp)
212 {
213 struct adapter *sc;
214 minor_t minor;
215
216 minor = getminor((dev_t)arg); /* same as instance# in our case */
217
218 if (cmd == DDI_INFO_DEVT2DEVINFO) {
219 sc = ddi_get_soft_state(t4_list, minor);
220 if (sc == NULL)
221 return (DDI_FAILURE);
222
223 ASSERT(sc->dev == (dev_t)arg);
224 *rp = (void *)sc->dip;
225 } else if (cmd == DDI_INFO_DEVT2INSTANCE)
226 *rp = (void *) (unsigned long) minor;
227 else
228 ASSERT(0);
229
230 return (DDI_SUCCESS);
231 }
232
233 static int
t4_devo_probe(dev_info_t * dip)234 t4_devo_probe(dev_info_t *dip)
235 {
236 int rc, id, *reg;
237 uint_t n, pf;
238
239 id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
240 "device-id", 0xffff);
241 if (id == 0xffff)
242 return (DDI_PROBE_DONTCARE);
243
244 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
245 "reg", ®, &n);
246 if (rc != DDI_SUCCESS)
247 return (DDI_PROBE_DONTCARE);
248
249 pf = PCI_REG_FUNC_G(reg[0]);
250 ddi_prop_free(reg);
251
252 /* Prevent driver attachment on any PF except 0 on the FPGA */
253 if (id == 0xa000 && pf != 0)
254 return (DDI_PROBE_FAILURE);
255
256 return (DDI_PROBE_DONTCARE);
257 }
258
259 static int
t4_devo_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)260 t4_devo_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
261 {
262 struct adapter *sc = NULL;
263 struct sge *s;
264 int i, instance, rc = DDI_SUCCESS, n10g, n1g, rqidx, tqidx, q;
265 int irq = 0;
266 #ifndef TCP_OFFLOAD_DISABLE
267 int ofld_rqidx, ofld_tqidx;
268 #endif
269 char name[16];
270 struct driver_properties *prp;
271 struct intrs_and_queues iaq;
272 ddi_device_acc_attr_t da = {
273 .devacc_attr_version = DDI_DEVICE_ATTR_V0,
274 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC,
275 .devacc_attr_dataorder = DDI_UNORDERED_OK_ACC
276 };
277
278 if (cmd != DDI_ATTACH)
279 return (DDI_FAILURE);
280
281 /*
282 * Allocate space for soft state.
283 */
284 instance = ddi_get_instance(dip);
285 rc = ddi_soft_state_zalloc(t4_list, instance);
286 if (rc != DDI_SUCCESS) {
287 cxgb_printf(dip, CE_WARN,
288 "failed to allocate soft state: %d", rc);
289 return (DDI_FAILURE);
290 }
291
292 sc = ddi_get_soft_state(t4_list, instance);
293 sc->dip = dip;
294 sc->dev = makedevice(ddi_driver_major(dip), instance);
295 mutex_init(&sc->lock, NULL, MUTEX_DRIVER, NULL);
296 cv_init(&sc->cv, NULL, CV_DRIVER, NULL);
297 mutex_init(&sc->sfl_lock, NULL, MUTEX_DRIVER, NULL);
298
299 mutex_enter(&t4_adapter_list_lock);
300 SLIST_INSERT_HEAD(&t4_adapter_list, sc, link);
301 mutex_exit(&t4_adapter_list_lock);
302
303 sc->pf = getpf(sc);
304 if (sc->pf > 8) {
305 rc = EINVAL;
306 cxgb_printf(dip, CE_WARN,
307 "failed to determine PCI PF# of device");
308 goto done;
309 }
310 sc->mbox = sc->pf;
311
312 /*
313 * Enable access to the PCI config space.
314 */
315 rc = pci_config_setup(dip, &sc->pci_regh);
316 if (rc != DDI_SUCCESS) {
317 cxgb_printf(dip, CE_WARN,
318 "failed to enable PCI config space access: %d", rc);
319 goto done;
320 }
321
322 /* TODO: Set max read request to 4K */
323
324 /*
325 * Enable MMIO access.
326 */
327 rc = ddi_regs_map_setup(dip, 1, &sc->regp, 0, 0, &da, &sc->regh);
328 if (rc != DDI_SUCCESS) {
329 cxgb_printf(dip, CE_WARN,
330 "failed to map device registers: %d", rc);
331 goto done;
332 }
333
334 (void) memset(sc->chan_map, 0xff, sizeof (sc->chan_map));
335
336 /*
337 * Initialize cpl handler.
338 */
339 for (i = 0; i < ARRAY_SIZE(sc->cpl_handler); i++) {
340 sc->cpl_handler[i] = cpl_not_handled;
341 }
342
343 /*
344 * Prepare the adapter for operation.
345 */
346 rc = -t4_prep_adapter(sc);
347 if (rc != 0) {
348 cxgb_printf(dip, CE_WARN, "failed to prepare adapter: %d", rc);
349 goto done;
350 }
351
352 /*
353 * Do this really early. Note that minor number = instance.
354 */
355 (void) snprintf(name, sizeof (name), "%s,%d", T4_NEXUS_NAME, instance);
356 rc = ddi_create_minor_node(dip, name, S_IFCHR, instance,
357 DDI_NT_NEXUS, 0);
358 if (rc != DDI_SUCCESS) {
359 cxgb_printf(dip, CE_WARN,
360 "failed to create device node: %d", rc);
361 rc = DDI_SUCCESS; /* carry on */
362 }
363
364 /* Initialize the driver properties */
365 prp = &sc->props;
366 (void) init_driver_props(sc, prp);
367
368 /* Do this early. Memory window is required for loading config file. */
369 setup_memwin(sc);
370
371 /* Prepare the firmware for operation */
372 rc = prep_firmware(sc);
373 if (rc != 0)
374 goto done; /* error message displayed already */
375
376 rc = get_params__pre_init(sc);
377 if (rc != 0)
378 goto done; /* error message displayed already */
379
380 t4_sge_init(sc);
381
382 if (sc->flags & MASTER_PF) {
383 /* get basic stuff going */
384 rc = -t4_fw_initialize(sc, sc->mbox);
385 if (rc != 0) {
386 cxgb_printf(sc->dip, CE_WARN,
387 "early init failed: %d.\n", rc);
388 goto done;
389 }
390 }
391
392 rc = get_params__post_init(sc);
393 if (rc != 0)
394 goto done; /* error message displayed already */
395
396 /*
397 * TODO: This is the place to call t4_set_filter_mode()
398 */
399
400 /* tweak some settings */
401 t4_write_reg(sc, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) | V_RXTSHIFTMAXR1(4) |
402 V_RXTSHIFTMAXR2(15) | V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
403 V_KEEPALIVEMAXR1(4) | V_KEEPALIVEMAXR2(9));
404 t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
405
406 /*
407 * Work-around for bug 2619
408 * Set DisableVlan field in TP_RSS_CONFIG_VRT register so that the
409 * VLAN tag extraction is disabled.
410 */
411 t4_set_reg_field(sc, A_TP_RSS_CONFIG_VRT, F_DISABLEVLAN, F_DISABLEVLAN);
412
413 /* Store filter mode */
414 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &sc->filter_mode, 1,
415 A_TP_VLAN_PRI_MAP);
416
417 /*
418 * First pass over all the ports - allocate VIs and initialize some
419 * basic parameters like mac address, port type, etc. We also figure
420 * out whether a port is 10G or 1G and use that information when
421 * calculating how many interrupts to attempt to allocate.
422 */
423 n10g = n1g = 0;
424 for_each_port(sc, i) {
425 struct port_info *pi;
426
427 pi = kmem_zalloc(sizeof (*pi), KM_SLEEP);
428 sc->port[i] = pi;
429
430 /* These must be set before t4_port_init */
431 pi->adapter = sc;
432 /* LINTED: E_ASSIGN_NARROW_CONV */
433 pi->port_id = i;
434
435 /* Allocate the vi and initialize parameters like mac addr */
436 rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
437 if (rc != 0) {
438 cxgb_printf(dip, CE_WARN,
439 "unable to initialize port %d: %d", i, rc);
440 kmem_free(pi, sizeof (*pi));
441 sc->port[i] = NULL; /* indicates init failed */
442 continue;
443 }
444
445 mutex_init(&pi->lock, NULL, MUTEX_DRIVER, NULL);
446 pi->mtu = ETHERMTU;
447
448 if (is_10G_port(pi) != 0) {
449 n10g++;
450 pi->tmr_idx = prp->tmr_idx_10g;
451 pi->pktc_idx = prp->pktc_idx_10g;
452 } else {
453 n1g++;
454 pi->tmr_idx = prp->tmr_idx_1g;
455 pi->pktc_idx = prp->pktc_idx_1g;
456 }
457
458 pi->xact_addr_filt = -1;
459 t4_mc_init(pi);
460
461 setbit(&sc->registered_device_map, i);
462 }
463 (void) remove_extra_props(sc, n10g, n1g);
464
465 if (sc->registered_device_map == 0) {
466 cxgb_printf(dip, CE_WARN, "no usable ports");
467 rc = DDI_FAILURE;
468 goto done;
469 }
470
471 rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
472 if (rc != 0)
473 goto done; /* error message displayed already */
474
475 sc->intr_type = iaq.intr_type;
476 sc->intr_count = iaq.nirq;
477
478 s = &sc->sge;
479 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
480 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
481 s->neq = s->ntxq + s->nrxq; /* the fl in an rxq is an eq */
482 #ifndef TCP_OFFLOAD_DISABLE
483 /* control queues, 1 per port + 1 mgmtq */
484 s->neq += sc->params.nports + 1;
485 #endif
486 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
487 if (iaq.intr_fwd != 0)
488 sc->flags |= INTR_FWD;
489 #ifndef TCP_OFFLOAD_DISABLE
490 if (is_offload(sc) != 0) {
491
492 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
493 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
494 s->neq += s->nofldtxq + s->nofldrxq;
495 s->niq += s->nofldrxq;
496
497 s->ofld_rxq = kmem_zalloc(s->nofldrxq *
498 sizeof (struct sge_ofld_rxq), KM_SLEEP);
499 s->ofld_txq = kmem_zalloc(s->nofldtxq *
500 sizeof (struct sge_wrq), KM_SLEEP);
501 s->ctrlq = kmem_zalloc(sc->params.nports *
502 sizeof (struct sge_wrq), KM_SLEEP);
503
504 }
505 #endif
506 s->rxq = kmem_zalloc(s->nrxq * sizeof (struct sge_rxq), KM_SLEEP);
507 s->txq = kmem_zalloc(s->ntxq * sizeof (struct sge_txq), KM_SLEEP);
508 s->iqmap = kmem_zalloc(s->niq * sizeof (struct sge_iq *), KM_SLEEP);
509 s->eqmap = kmem_zalloc(s->neq * sizeof (struct sge_eq *), KM_SLEEP);
510
511 sc->intr_handle = kmem_zalloc(sc->intr_count *
512 sizeof (ddi_intr_handle_t), KM_SLEEP);
513
514 /*
515 * Second pass over the ports. This time we know the number of rx and
516 * tx queues that each port should get.
517 */
518 rqidx = tqidx = 0;
519 #ifndef TCP_OFFLOAD_DISABLE
520 ofld_rqidx = ofld_tqidx = 0;
521 #endif
522 for_each_port(sc, i) {
523 struct port_info *pi = sc->port[i];
524
525 if (pi == NULL)
526 continue;
527
528 /* LINTED: E_ASSIGN_NARROW_CONV */
529 pi->first_rxq = rqidx;
530 /* LINTED: E_ASSIGN_NARROW_CONV */
531 pi->nrxq = is_10G_port(pi) ? iaq.nrxq10g : iaq.nrxq1g;
532
533 /* LINTED: E_ASSIGN_NARROW_CONV */
534 pi->first_txq = tqidx;
535 /* LINTED: E_ASSIGN_NARROW_CONV */
536 pi->ntxq = is_10G_port(pi) ? iaq.ntxq10g : iaq.ntxq1g;
537
538 rqidx += pi->nrxq;
539 tqidx += pi->ntxq;
540
541 #ifndef TCP_OFFLOAD_DISABLE
542 if (is_offload(sc) != 0) {
543 /* LINTED: E_ASSIGN_NARROW_CONV */
544 pi->first_ofld_rxq = ofld_rqidx;
545 pi->nofldrxq = max(1, pi->nrxq / 4);
546
547 /* LINTED: E_ASSIGN_NARROW_CONV */
548 pi->first_ofld_txq = ofld_tqidx;
549 pi->nofldtxq = max(1, pi->ntxq / 2);
550
551 ofld_rqidx += pi->nofldrxq;
552 ofld_tqidx += pi->nofldtxq;
553 }
554 #endif
555
556 /*
557 * Enable hw checksumming and LSO for all ports by default.
558 * They can be disabled using ndd (hw_csum and hw_lso).
559 */
560 pi->features |= (CXGBE_HW_CSUM | CXGBE_HW_LSO);
561 }
562
563 #ifndef TCP_OFFLOAD_DISABLE
564 sc->l2t = t4_init_l2t(sc);
565 #endif
566
567 /*
568 * Setup Interrupts.
569 */
570
571 i = 0;
572 rc = ddi_intr_alloc(dip, sc->intr_handle, sc->intr_type, 0,
573 sc->intr_count, &i, DDI_INTR_ALLOC_STRICT);
574 if (rc != DDI_SUCCESS) {
575 cxgb_printf(dip, CE_WARN,
576 "failed to allocate %d interrupt(s) of type %d: %d, %d",
577 sc->intr_count, sc->intr_type, rc, i);
578 goto done;
579 }
580 ASSERT(sc->intr_count == i); /* allocation was STRICT */
581 (void) ddi_intr_get_cap(sc->intr_handle[0], &sc->intr_cap);
582 (void) ddi_intr_get_pri(sc->intr_handle[0], &sc->intr_pri);
583 if (sc->intr_count == 1) {
584 ASSERT(sc->flags & INTR_FWD);
585 (void) ddi_intr_add_handler(sc->intr_handle[0], t4_intr_all, sc,
586 &s->fwq);
587 } else {
588 /* Multiple interrupts. The first one is always error intr */
589 (void) ddi_intr_add_handler(sc->intr_handle[0], t4_intr_err, sc,
590 NULL);
591 irq++;
592
593 /* The second one is always the firmware event queue */
594 (void) ddi_intr_add_handler(sc->intr_handle[1], t4_intr, sc,
595 &s->fwq);
596 irq++;
597 /*
598 * Note that if INTR_FWD is set then either the NIC rx
599 * queues or (exclusive or) the TOE rx queueus will be taking
600 * direct interrupts.
601 *
602 * There is no need to check for is_offload(sc) as nofldrxq
603 * will be 0 if offload is disabled.
604 */
605 for_each_port(sc, i) {
606 struct port_info *pi = sc->port[i];
607 struct sge_rxq *rxq;
608 struct sge_ofld_rxq *ofld_rxq;
609
610 #ifndef TCP_OFFLOAD_DISABLE
611 /*
612 * Skip over the NIC queues if they aren't taking direct
613 * interrupts.
614 */
615 if ((sc->flags & INTR_FWD) &&
616 pi->nofldrxq > pi->nrxq)
617 goto ofld_queues;
618 #endif
619 rxq = &s->rxq[pi->first_rxq];
620 for (q = 0; q < pi->nrxq; q++, rxq++) {
621 (void) ddi_intr_add_handler(
622 sc->intr_handle[irq], t4_intr, sc,
623 &rxq->iq);
624 irq++;
625 }
626
627 #ifndef TCP_OFFLOAD_DISABLE
628 /*
629 * Skip over the offload queues if they aren't taking
630 * direct interrupts.
631 */
632 if ((sc->flags & INTR_FWD))
633 continue;
634 ofld_queues:
635 ofld_rxq = &s->ofld_rxq[pi->first_ofld_rxq];
636 for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
637 (void) ddi_intr_add_handler(
638 sc->intr_handle[irq], t4_intr, sc,
639 &ofld_rxq->iq);
640 irq++;
641 }
642 #endif
643 }
644
645 }
646 sc->flags |= INTR_ALLOCATED;
647
648 ASSERT(rc == DDI_SUCCESS);
649 ddi_report_dev(dip);
650
651 if (n10g && n1g) {
652 cxgb_printf(dip, CE_NOTE,
653 "%dx10G %dx1G (%d rxq, %d txq total) %d %s.",
654 n10g, n1g, rqidx, tqidx, sc->intr_count,
655 sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" :
656 sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" :
657 "fixed interrupt");
658 } else {
659 cxgb_printf(dip, CE_NOTE,
660 "%dx%sG (%d rxq, %d txq per port) %d %s.",
661 n10g ? n10g : n1g,
662 n10g ? "10" : "1",
663 n10g ? iaq.nrxq10g : iaq.nrxq1g,
664 n10g ? iaq.ntxq10g : iaq.ntxq1g,
665 sc->intr_count,
666 sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" :
667 sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" :
668 "fixed interrupt");
669 }
670
671 sc->ksp = setup_kstats(sc);
672
673 done:
674 if (rc != DDI_SUCCESS) {
675 (void) t4_devo_detach(dip, DDI_DETACH);
676
677 /* rc may have errno style errors or DDI errors */
678 rc = DDI_FAILURE;
679 }
680
681 return (rc);
682 }
683
684 static int
t4_devo_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)685 t4_devo_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
686 {
687 int instance, i;
688 struct adapter *sc;
689 struct port_info *pi;
690 struct sge *s;
691
692 if (cmd != DDI_DETACH)
693 return (DDI_FAILURE);
694
695 instance = ddi_get_instance(dip);
696 sc = ddi_get_soft_state(t4_list, instance);
697 if (sc == NULL)
698 return (DDI_SUCCESS);
699
700 if (sc->flags & FULL_INIT_DONE) {
701 t4_intr_disable(sc);
702 for_each_port(sc, i) {
703 pi = sc->port[i];
704 if (pi && pi->flags & PORT_INIT_DONE)
705 (void) port_full_uninit(pi);
706 }
707 (void) adapter_full_uninit(sc);
708 }
709
710 /* Safe to call no matter what */
711 ddi_prop_remove_all(dip);
712 ddi_remove_minor_node(dip, NULL);
713
714 if (sc->ksp != NULL)
715 kstat_delete(sc->ksp);
716
717 s = &sc->sge;
718 if (s->rxq != NULL)
719 kmem_free(s->rxq, s->nrxq * sizeof (struct sge_rxq));
720 #ifndef TCP_OFFLOAD_DISABLE
721 if (s->ofld_txq != NULL)
722 kmem_free(s->ofld_txq, s->nofldtxq * sizeof (struct sge_wrq));
723 if (s->ofld_rxq != NULL)
724 kmem_free(s->ofld_rxq,
725 s->nofldrxq * sizeof (struct sge_ofld_rxq));
726 if (s->ctrlq != NULL)
727 kmem_free(s->ctrlq,
728 sc->params.nports * sizeof (struct sge_wrq));
729 #endif
730 if (s->txq != NULL)
731 kmem_free(s->txq, s->ntxq * sizeof (struct sge_txq));
732 if (s->iqmap != NULL)
733 kmem_free(s->iqmap, s->niq * sizeof (struct sge_iq *));
734 if (s->eqmap != NULL)
735 kmem_free(s->eqmap, s->neq * sizeof (struct sge_eq *));
736
737 if (s->rxbuf_cache != NULL)
738 rxbuf_cache_destroy(s->rxbuf_cache);
739
740 if (sc->flags & INTR_ALLOCATED) {
741 for (i = 0; i < sc->intr_count; i++) {
742 (void) ddi_intr_remove_handler(sc->intr_handle[i]);
743 (void) ddi_intr_free(sc->intr_handle[i]);
744 }
745 sc->flags &= ~INTR_ALLOCATED;
746 }
747
748 if (sc->intr_handle != NULL) {
749 kmem_free(sc->intr_handle,
750 sc->intr_count * sizeof (*sc->intr_handle));
751 }
752
753 for_each_port(sc, i) {
754 pi = sc->port[i];
755 if (pi != NULL) {
756 mutex_destroy(&pi->lock);
757 kmem_free(pi, sizeof (*pi));
758 clrbit(&sc->registered_device_map, i);
759 }
760 }
761
762 if (sc->flags & FW_OK)
763 (void) t4_fw_bye(sc, sc->mbox);
764
765 if (sc->regh != NULL)
766 ddi_regs_map_free(&sc->regh);
767
768 if (sc->pci_regh != NULL)
769 pci_config_teardown(&sc->pci_regh);
770
771 mutex_enter(&t4_adapter_list_lock);
772 SLIST_REMOVE_HEAD(&t4_adapter_list, link);
773 mutex_exit(&t4_adapter_list_lock);
774
775 mutex_destroy(&sc->lock);
776 cv_destroy(&sc->cv);
777 mutex_destroy(&sc->sfl_lock);
778
779 #ifdef DEBUG
780 bzero(sc, sizeof (*sc));
781 #endif
782 ddi_soft_state_free(t4_list, instance);
783
784 return (DDI_SUCCESS);
785 }
786
787 static int
t4_devo_quiesce(dev_info_t * dip)788 t4_devo_quiesce(dev_info_t *dip)
789 {
790 int instance;
791 struct adapter *sc;
792
793 instance = ddi_get_instance(dip);
794 sc = ddi_get_soft_state(t4_list, instance);
795 if (sc == NULL)
796 return (DDI_SUCCESS);
797
798 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
799 t4_intr_disable(sc);
800 t4_write_reg(sc, A_PL_RST, F_PIORSTMODE | F_PIORST);
801
802 return (DDI_SUCCESS);
803 }
804
805 static int
t4_bus_ctl(dev_info_t * dip,dev_info_t * rdip,ddi_ctl_enum_t op,void * arg,void * result)806 t4_bus_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op, void *arg,
807 void *result)
808 {
809 char s[4];
810 struct port_info *pi;
811 dev_info_t *child = (dev_info_t *)arg;
812
813 switch (op) {
814 case DDI_CTLOPS_REPORTDEV:
815 pi = ddi_get_parent_data(rdip);
816 pi->instance = ddi_get_instance(dip);
817 pi->child_inst = ddi_get_instance(rdip);
818 cmn_err(CE_CONT, "?%s%d is port %s on %s%d\n",
819 ddi_node_name(rdip), ddi_get_instance(rdip),
820 ddi_get_name_addr(rdip), ddi_driver_name(dip),
821 ddi_get_instance(dip));
822 return (DDI_SUCCESS);
823
824 case DDI_CTLOPS_INITCHILD:
825 pi = ddi_get_parent_data(child);
826 if (pi == NULL)
827 return (DDI_NOT_WELL_FORMED);
828 (void) snprintf(s, sizeof (s), "%d", pi->port_id);
829 ddi_set_name_addr(child, s);
830 return (DDI_SUCCESS);
831
832 case DDI_CTLOPS_UNINITCHILD:
833 ddi_set_name_addr(child, NULL);
834 return (DDI_SUCCESS);
835
836 case DDI_CTLOPS_ATTACH:
837 case DDI_CTLOPS_DETACH:
838 return (DDI_SUCCESS);
839
840 default:
841 return (ddi_ctlops(dip, rdip, op, arg, result));
842 }
843 }
844
845 static int
t4_bus_config(dev_info_t * dip,uint_t flags,ddi_bus_config_op_t op,void * arg,dev_info_t ** cdipp)846 t4_bus_config(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op, void *arg,
847 dev_info_t **cdipp)
848 {
849 int instance, i;
850 struct adapter *sc;
851
852 instance = ddi_get_instance(dip);
853 sc = ddi_get_soft_state(t4_list, instance);
854
855 if (op == BUS_CONFIG_ONE) {
856 char *c;
857
858 /*
859 * arg is something like "cxgb@0" where 0 is the port_id hanging
860 * off this nexus.
861 */
862
863 c = arg;
864 while (*(c + 1))
865 c++;
866
867 /* There should be exactly 1 digit after '@' */
868 if (*(c - 1) != '@')
869 return (NDI_FAILURE);
870
871 i = *c - '0';
872
873 if (add_child_node(sc, i) != 0)
874 return (NDI_FAILURE);
875
876 flags |= NDI_ONLINE_ATTACH;
877
878 } else if (op == BUS_CONFIG_ALL || op == BUS_CONFIG_DRIVER) {
879 /* Allocate and bind all child device nodes */
880 for_each_port(sc, i)
881 (void) add_child_node(sc, i);
882 flags |= NDI_ONLINE_ATTACH;
883 }
884
885 return (ndi_busop_bus_config(dip, flags, op, arg, cdipp, 0));
886 }
887
888 static int
t4_bus_unconfig(dev_info_t * dip,uint_t flags,ddi_bus_config_op_t op,void * arg)889 t4_bus_unconfig(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op,
890 void *arg)
891 {
892 int instance, i, rc;
893 struct adapter *sc;
894
895 instance = ddi_get_instance(dip);
896 sc = ddi_get_soft_state(t4_list, instance);
897
898 if (op == BUS_CONFIG_ONE || op == BUS_UNCONFIG_ALL ||
899 op == BUS_UNCONFIG_DRIVER)
900 flags |= NDI_UNCONFIG;
901
902 rc = ndi_busop_bus_unconfig(dip, flags, op, arg);
903 if (rc != 0)
904 return (rc);
905
906 if (op == BUS_UNCONFIG_ONE) {
907 char *c;
908
909 c = arg;
910 while (*(c + 1))
911 c++;
912
913 if (*(c - 1) != '@')
914 return (NDI_SUCCESS);
915
916 i = *c - '0';
917
918 rc = remove_child_node(sc, i);
919
920 } else if (op == BUS_UNCONFIG_ALL || op == BUS_UNCONFIG_DRIVER) {
921
922 for_each_port(sc, i)
923 (void) remove_child_node(sc, i);
924 }
925
926 return (rc);
927 }
928
929 /* ARGSUSED */
930 static int
t4_cb_open(dev_t * devp,int flag,int otyp,cred_t * credp)931 t4_cb_open(dev_t *devp, int flag, int otyp, cred_t *credp)
932 {
933 struct adapter *sc;
934
935 if (otyp != OTYP_CHR)
936 return (EINVAL);
937
938 sc = ddi_get_soft_state(t4_list, getminor(*devp));
939 if (sc == NULL)
940 return (ENXIO);
941
942 return (atomic_cas_uint(&sc->open, 0, EBUSY));
943 }
944
945 /* ARGSUSED */
946 static int
t4_cb_close(dev_t dev,int flag,int otyp,cred_t * credp)947 t4_cb_close(dev_t dev, int flag, int otyp, cred_t *credp)
948 {
949 struct adapter *sc;
950
951 sc = ddi_get_soft_state(t4_list, getminor(dev));
952 if (sc == NULL)
953 return (EINVAL);
954
955 (void) atomic_swap_uint(&sc->open, 0);
956 return (0);
957 }
958
959 /* ARGSUSED */
960 static int
t4_cb_ioctl(dev_t dev,int cmd,intptr_t d,int mode,cred_t * credp,int * rp)961 t4_cb_ioctl(dev_t dev, int cmd, intptr_t d, int mode, cred_t *credp, int *rp)
962 {
963 int instance;
964 struct adapter *sc;
965 void *data = (void *)d;
966
967 if (crgetuid(credp) != 0)
968 return (EPERM);
969
970 instance = getminor(dev);
971 sc = ddi_get_soft_state(t4_list, instance);
972 if (sc == NULL)
973 return (EINVAL);
974
975 return (t4_ioctl(sc, cmd, data, mode));
976 }
977
978 static unsigned int
getpf(struct adapter * sc)979 getpf(struct adapter *sc)
980 {
981 int rc, *data;
982 uint_t n, pf;
983
984 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sc->dip,
985 DDI_PROP_DONTPASS, "reg", &data, &n);
986 if (rc != DDI_SUCCESS) {
987 cxgb_printf(sc->dip, CE_WARN,
988 "failed to lookup \"reg\" property: %d", rc);
989 return (0xff);
990 }
991
992 pf = PCI_REG_FUNC_G(data[0]);
993 ddi_prop_free(data);
994
995 return (pf);
996 }
997
998 /*
999 * Install a compatible firmware (if required), establish contact with it,
1000 * become the master, and reset the device.
1001 */
1002 static int
prep_firmware(struct adapter * sc)1003 prep_firmware(struct adapter *sc)
1004 {
1005 int rc;
1006 enum dev_state state;
1007
1008 /* Check firmware version and install a different one if necessary */
1009 rc = t4_check_fw_version(sc);
1010 if (rc != 0) {
1011 uint32_t v;
1012
1013 /* This is what the driver has */
1014 v = V_FW_HDR_FW_VER_MAJOR(T4FW_VERSION_MAJOR) |
1015 V_FW_HDR_FW_VER_MINOR(T4FW_VERSION_MINOR) |
1016 V_FW_HDR_FW_VER_MICRO(T4FW_VERSION_MICRO) |
1017 V_FW_HDR_FW_VER_BUILD(T4FW_VERSION_BUILD);
1018
1019 /*
1020 * Always upgrade, even for minor/micro/build mismatches.
1021 * Downgrade only for a major version mismatch.
1022 */
1023 if (rc < 0 || v > sc->params.fw_vers) {
1024 cxgb_printf(sc->dip, CE_NOTE,
1025 "installing firmware %d.%d.%d.%d on card.",
1026 T4FW_VERSION_MAJOR, T4FW_VERSION_MINOR,
1027 T4FW_VERSION_MICRO, T4FW_VERSION_BUILD);
1028 rc = -t4_load_fw(sc, t4fw_data, t4fw_size);
1029 if (rc != 0) {
1030 cxgb_printf(sc->dip, CE_WARN,
1031 "failed to install firmware: %d", rc);
1032 return (rc);
1033 } else {
1034 /* refresh */
1035 (void) t4_check_fw_version(sc);
1036 }
1037 }
1038 }
1039
1040 /* Contact firmware, request master */
1041 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MUST, &state);
1042 if (rc < 0) {
1043 rc = -rc;
1044 cxgb_printf(sc->dip, CE_WARN,
1045 "failed to connect to the firmware: %d.", rc);
1046 return (rc);
1047 }
1048
1049 if (rc == sc->mbox)
1050 sc->flags |= MASTER_PF;
1051
1052 /* Reset device */
1053 rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
1054 if (rc != 0) {
1055 cxgb_printf(sc->dip, CE_WARN,
1056 "firmware reset failed: %d.", rc);
1057 if (rc != ETIMEDOUT && rc != EIO)
1058 (void) t4_fw_bye(sc, sc->mbox);
1059 return (rc);
1060 }
1061
1062 /* Partition adapter resources as specified in the config file. */
1063 if (sc->flags & MASTER_PF) {
1064 /* Handle default vs special T4 config file */
1065
1066 rc = partition_resources(sc);
1067 if (rc != 0)
1068 goto err; /* error message displayed already */
1069 }
1070
1071 sc->flags |= FW_OK;
1072 return (0);
1073 err:
1074 return (rc);
1075
1076 }
1077
1078 #define FW_PARAM_DEV(param) \
1079 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
1080 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
1081 #define FW_PARAM_PFVF(param) \
1082 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
1083 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
1084
1085 /*
1086 * Upload configuration file to card's memory.
1087 */
1088 static int
upload_config_file(struct adapter * sc,uint32_t * mt,uint32_t * ma)1089 upload_config_file(struct adapter *sc, uint32_t *mt, uint32_t *ma)
1090 {
1091 int rc, i;
1092 uint32_t param, val, mtype, maddr, bar, off, win, remaining;
1093 const uint32_t *b;
1094
1095 /* Figure out where the firmware wants us to upload it. */
1096 param = FW_PARAM_DEV(CF);
1097 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
1098 if (rc != 0) {
1099 /* Firmwares without config file support will fail this way */
1100 cxgb_printf(sc->dip, CE_WARN,
1101 "failed to query config file location: %d.\n", rc);
1102 return (rc);
1103 }
1104 *mt = mtype = G_FW_PARAMS_PARAM_Y(val);
1105 *ma = maddr = G_FW_PARAMS_PARAM_Z(val) << 16;
1106
1107 if (maddr & 3) {
1108 cxgb_printf(sc->dip, CE_WARN,
1109 "cannot upload config file (type %u, addr %x).\n",
1110 mtype, maddr);
1111 return (EFAULT);
1112 }
1113
1114 /* Translate mtype/maddr to an address suitable for the PCIe window */
1115 val = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1116 val &= F_EDRAM0_ENABLE | F_EDRAM1_ENABLE | F_EXT_MEM_ENABLE;
1117 switch (mtype) {
1118 case FW_MEMTYPE_CF_EDC0:
1119 if (!(val & F_EDRAM0_ENABLE))
1120 goto err;
1121 bar = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1122 maddr += G_EDRAM0_BASE(bar) << 20;
1123 break;
1124
1125 case FW_MEMTYPE_CF_EDC1:
1126 if (!(val & F_EDRAM1_ENABLE))
1127 goto err;
1128 bar = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1129 maddr += G_EDRAM1_BASE(bar) << 20;
1130 break;
1131
1132 case FW_MEMTYPE_CF_EXTMEM:
1133 if (!(val & F_EXT_MEM_ENABLE))
1134 goto err;
1135 bar = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1136 maddr += G_EXT_MEM_BASE(bar) << 20;
1137 break;
1138
1139 default:
1140 err:
1141 cxgb_printf(sc->dip, CE_WARN,
1142 "cannot upload config file (type %u, enabled %u).\n",
1143 mtype, val);
1144 return (EFAULT);
1145 }
1146
1147 /*
1148 * Position the PCIe window (we use memwin2) to the 16B aligned area
1149 * just at/before the upload location.
1150 */
1151 win = maddr & ~0xf;
1152 off = maddr - win; /* offset from the start of the window. */
1153 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2), win);
1154 (void) t4_read_reg(sc,
1155 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2));
1156
1157 remaining = t4cfg_size;
1158 if (remaining > FLASH_CFG_MAX_SIZE ||
1159 remaining > MEMWIN2_APERTURE - off) {
1160 cxgb_printf(sc->dip, CE_WARN, "cannot upload config file all at"
1161 " once (size %u, max %u, room %u).\n",
1162 remaining, FLASH_CFG_MAX_SIZE, MEMWIN2_APERTURE - off);
1163 return (EFBIG);
1164 }
1165
1166 /*
1167 * TODO: sheer laziness. We deliberately added 4 bytes of useless
1168 * stuffing/comments at the end of the config file so it's ok to simply
1169 * throw away the last remaining bytes when the config file is not an
1170 * exact multiple of 4.
1171 */
1172 /* LINTED: E_BAD_PTR_CAST_ALIGN */
1173 b = (const uint32_t *)t4cfg_data;
1174 for (i = 0; remaining >= 4; i += 4, remaining -= 4)
1175 t4_write_reg(sc, MEMWIN2_BASE + off + i, *b++);
1176
1177 return (rc);
1178 }
1179
1180 /*
1181 * Partition chip resources for use between various PFs, VFs, etc. This is done
1182 * by uploading the firmware configuration file to the adapter and instructing
1183 * the firmware to process it.
1184 */
1185 static int
partition_resources(struct adapter * sc)1186 partition_resources(struct adapter *sc)
1187 {
1188 int rc;
1189 struct fw_caps_config_cmd caps;
1190 uint32_t mtype, maddr, finicsum, cfcsum;
1191
1192 rc = upload_config_file(sc, &mtype, &maddr);
1193 if (rc != 0) {
1194 mtype = FW_MEMTYPE_CF_FLASH;
1195 maddr = t4_flash_cfg_addr(sc);
1196 }
1197
1198 bzero(&caps, sizeof (caps));
1199 caps.op_to_write = BE_32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1200 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1201 caps.cfvalid_to_len16 = BE_32(F_FW_CAPS_CONFIG_CMD_CFVALID |
1202 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
1203 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | FW_LEN16(caps));
1204 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), &caps);
1205 if (rc != 0) {
1206 cxgb_printf(sc->dip, CE_WARN,
1207 "failed to pre-process config file: %d.\n", rc);
1208 return (rc);
1209 }
1210
1211 finicsum = ntohl(caps.finicsum);
1212 cfcsum = ntohl(caps.cfcsum);
1213 if (finicsum != cfcsum) {
1214 cxgb_printf(sc->dip, CE_WARN,
1215 "WARNING: config file checksum mismatch: %08x %08x\n",
1216 finicsum, cfcsum);
1217 }
1218 sc->cfcsum = cfcsum;
1219
1220 /* TODO: Need to configure this correctly */
1221 caps.toecaps = htons(FW_CAPS_CONFIG_TOE);
1222 caps.iscsicaps = 0;
1223 caps.rdmacaps = 0;
1224 caps.fcoecaps = 0;
1225 /* TODO: Disable VNIC cap for now */
1226 caps.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
1227
1228 caps.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1229 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
1230 caps.cfvalid_to_len16 = htonl(FW_LEN16(caps));
1231 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), NULL);
1232 if (rc != 0) {
1233 cxgb_printf(sc->dip, CE_WARN,
1234 "failed to process config file: %d.\n", rc);
1235 return (rc);
1236 }
1237
1238 return (0);
1239 }
1240
1241 /*
1242 * Retrieve parameters that are needed (or nice to have) prior to calling
1243 * t4_sge_init and t4_fw_initialize.
1244 */
1245 static int
get_params__pre_init(struct adapter * sc)1246 get_params__pre_init(struct adapter *sc)
1247 {
1248 int rc;
1249 uint32_t param[2], val[2];
1250 struct fw_devlog_cmd cmd;
1251 struct devlog_params *dlog = &sc->params.devlog;
1252
1253 param[0] = FW_PARAM_DEV(PORTVEC);
1254 param[1] = FW_PARAM_DEV(CCLK);
1255 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
1256 if (rc != 0) {
1257 cxgb_printf(sc->dip, CE_WARN,
1258 "failed to query parameters (pre_init): %d.\n", rc);
1259 return (rc);
1260 }
1261
1262 sc->params.portvec = val[0];
1263 sc->params.nports = 0;
1264 while (val[0]) {
1265 sc->params.nports++;
1266 val[0] &= val[0] - 1;
1267 }
1268
1269 sc->params.vpd.cclk = val[1];
1270
1271 /* Read device log parameters. */
1272 bzero(&cmd, sizeof (cmd));
1273 cmd.op_to_write = htonl(V_FW_CMD_OP(FW_DEVLOG_CMD) |
1274 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1275 cmd.retval_len16 = htonl(FW_LEN16(cmd));
1276 rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof (cmd), &cmd);
1277 if (rc != 0) {
1278 cxgb_printf(sc->dip, CE_WARN,
1279 "failed to get devlog parameters: %d.\n", rc);
1280 bzero(dlog, sizeof (*dlog));
1281 rc = 0; /* devlog isn't critical for device operation */
1282 } else {
1283 val[0] = ntohl(cmd.memtype_devlog_memaddr16_devlog);
1284 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
1285 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
1286 dlog->size = ntohl(cmd.memsize_devlog);
1287 }
1288
1289 return (rc);
1290 }
1291
1292 /*
1293 * Retrieve various parameters that are of interest to the driver. The device
1294 * has been initialized by the firmware at this point.
1295 */
1296 static int
get_params__post_init(struct adapter * sc)1297 get_params__post_init(struct adapter *sc)
1298 {
1299 int rc;
1300 uint32_t param[7], val[7];
1301 struct fw_caps_config_cmd caps;
1302
1303 param[0] = FW_PARAM_PFVF(IQFLINT_START);
1304 param[1] = FW_PARAM_PFVF(EQ_START);
1305 param[2] = FW_PARAM_PFVF(FILTER_START);
1306 param[3] = FW_PARAM_PFVF(FILTER_END);
1307 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val);
1308 if (rc != 0) {
1309 cxgb_printf(sc->dip, CE_WARN,
1310 "failed to query parameters (post_init): %d.\n", rc);
1311 return (rc);
1312 }
1313
1314 /* LINTED: E_ASSIGN_NARROW_CONV */
1315 sc->sge.iq_start = val[0];
1316 sc->sge.eq_start = val[1];
1317 sc->tids.ftid_base = val[2];
1318 sc->tids.nftids = val[3] - val[2] + 1;
1319
1320 /* get capabilites */
1321 bzero(&caps, sizeof (caps));
1322 caps.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1323 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1324 caps.cfvalid_to_len16 = htonl(FW_LEN16(caps));
1325 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), &caps);
1326 if (rc != 0) {
1327 cxgb_printf(sc->dip, CE_WARN,
1328 "failed to get card capabilities: %d.\n", rc);
1329 return (rc);
1330 }
1331
1332 if (caps.toecaps != 0) {
1333 /* query offload-related parameters */
1334 param[0] = FW_PARAM_DEV(NTID);
1335 param[1] = FW_PARAM_PFVF(SERVER_START);
1336 param[2] = FW_PARAM_PFVF(SERVER_END);
1337 param[3] = FW_PARAM_PFVF(TDDP_START);
1338 param[4] = FW_PARAM_PFVF(TDDP_END);
1339 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
1340 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
1341 if (rc != 0) {
1342 cxgb_printf(sc->dip, CE_WARN,
1343 "failed to query TOE parameters: %d.\n", rc);
1344 return (rc);
1345 }
1346 sc->tids.ntids = val[0];
1347 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
1348 sc->tids.stid_base = val[1];
1349 sc->tids.nstids = val[2] - val[1] + 1;
1350 sc->vres.ddp.start = val[3];
1351 sc->vres.ddp.size = val[4] - val[3] + 1;
1352 sc->params.ofldq_wr_cred = val[5];
1353 sc->params.offload = 1;
1354 }
1355
1356 /* These are finalized by FW initialization, load their values now */
1357 val[0] = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
1358 sc->params.tp.tre = G_TIMERRESOLUTION(val[0]);
1359 sc->params.tp.dack_re = G_DELAYEDACKRESOLUTION(val[0]);
1360 t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
1361
1362 return (rc);
1363 }
1364
1365 /* TODO: verify */
1366 static void
setup_memwin(struct adapter * sc)1367 setup_memwin(struct adapter *sc)
1368 {
1369 pci_regspec_t *data;
1370 int rc;
1371 uint_t n;
1372 uintptr_t bar0;
1373
1374 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sc->dip,
1375 DDI_PROP_DONTPASS, "assigned-addresses", (int **)&data, &n);
1376 if (rc != DDI_SUCCESS) {
1377 cxgb_printf(sc->dip, CE_WARN,
1378 "failed to lookup \"assigned-addresses\" property: %d", rc);
1379 return;
1380 }
1381 n /= sizeof (*data);
1382
1383 bar0 = ((uint64_t)data[0].pci_phys_mid << 32) | data[0].pci_phys_low;
1384 ddi_prop_free(data);
1385
1386 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 0),
1387 (bar0 + MEMWIN0_BASE) | V_BIR(0) |
1388 V_WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
1389
1390 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 1),
1391 (bar0 + MEMWIN1_BASE) | V_BIR(0) |
1392 V_WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
1393
1394 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2),
1395 (bar0 + MEMWIN2_BASE) | V_BIR(0) |
1396 V_WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
1397 }
1398
1399 /*
1400 * Reads the named property and fills up the "data" array (which has at least
1401 * "count" elements). We first try and lookup the property for our dev_t and
1402 * then retry with DDI_DEV_T_ANY if it's not found.
1403 *
1404 * Returns non-zero if the property was found and "data" has been updated.
1405 */
1406 static int
prop_lookup_int_array(struct adapter * sc,char * name,int * data,uint_t count)1407 prop_lookup_int_array(struct adapter *sc, char *name, int *data, uint_t count)
1408 {
1409 dev_info_t *dip = sc->dip;
1410 dev_t dev = sc->dev;
1411 int rc, *d;
1412 uint_t i, n;
1413
1414 rc = ddi_prop_lookup_int_array(dev, dip, DDI_PROP_DONTPASS,
1415 name, &d, &n);
1416 if (rc == DDI_PROP_SUCCESS)
1417 goto found;
1418
1419 if (rc != DDI_PROP_NOT_FOUND) {
1420 cxgb_printf(dip, CE_WARN,
1421 "failed to lookup property %s for minor %d: %d.",
1422 name, getminor(dev), rc);
1423 return (0);
1424 }
1425
1426 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1427 name, &d, &n);
1428 if (rc == DDI_PROP_SUCCESS)
1429 goto found;
1430
1431 if (rc != DDI_PROP_NOT_FOUND) {
1432 cxgb_printf(dip, CE_WARN,
1433 "failed to lookup property %s: %d.", name, rc);
1434 return (0);
1435 }
1436
1437 return (0);
1438
1439 found:
1440 if (n > count) {
1441 cxgb_printf(dip, CE_NOTE,
1442 "property %s has too many elements (%d), ignoring extras",
1443 name, n);
1444 }
1445
1446 for (i = 0; i < n && i < count; i++)
1447 data[i] = d[i];
1448 ddi_prop_free(d);
1449
1450 return (1);
1451 }
1452
1453 static int
prop_lookup_int(struct adapter * sc,char * name,int defval)1454 prop_lookup_int(struct adapter *sc, char *name, int defval)
1455 {
1456 int rc;
1457
1458 rc = ddi_prop_get_int(sc->dev, sc->dip, DDI_PROP_DONTPASS, name, -1);
1459 if (rc != -1)
1460 return (rc);
1461
1462 return (ddi_prop_get_int(DDI_DEV_T_ANY, sc->dip, DDI_PROP_DONTPASS,
1463 name, defval));
1464 }
1465
1466 static int
init_driver_props(struct adapter * sc,struct driver_properties * p)1467 init_driver_props(struct adapter *sc, struct driver_properties *p)
1468 {
1469 dev_t dev = sc->dev;
1470 dev_info_t *dip = sc->dip;
1471 int i, *data;
1472 uint_t tmr[SGE_NTIMERS] = {5, 10, 20, 50, 100, 200};
1473 uint_t cnt[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */
1474
1475 /*
1476 * Holdoff timer
1477 */
1478 data = &p->timer_val[0];
1479 for (i = 0; i < SGE_NTIMERS; i++)
1480 data[i] = tmr[i];
1481 (void) prop_lookup_int_array(sc, "holdoff-timer-values", data,
1482 SGE_NTIMERS);
1483 for (i = 0; i < SGE_NTIMERS; i++) {
1484 int limit = 200U;
1485 if (data[i] > limit) {
1486 cxgb_printf(dip, CE_WARN,
1487 "holdoff timer %d is too high (%d), lowered to %d.",
1488 i, data[i], limit);
1489 data[i] = limit;
1490 }
1491 }
1492 (void) ddi_prop_update_int_array(dev, dip, "holdoff-timer-values",
1493 data, SGE_NTIMERS);
1494
1495 /*
1496 * Holdoff packet counter
1497 */
1498 data = &p->counter_val[0];
1499 for (i = 0; i < SGE_NCOUNTERS; i++)
1500 data[i] = cnt[i];
1501 (void) prop_lookup_int_array(sc, "holdoff-pkt-counter-values", data,
1502 SGE_NCOUNTERS);
1503 for (i = 0; i < SGE_NCOUNTERS; i++) {
1504 int limit = M_THRESHOLD_0;
1505 if (data[i] > limit) {
1506 cxgb_printf(dip, CE_WARN,
1507 "holdoff pkt-counter %d is too high (%d), "
1508 "lowered to %d.", i, data[i], limit);
1509 data[i] = limit;
1510 }
1511 }
1512 (void) ddi_prop_update_int_array(dev, dip, "holdoff-pkt-counter-values",
1513 data, SGE_NCOUNTERS);
1514
1515 /*
1516 * Maximum # of tx and rx queues to use for each 10G and 1G port.
1517 */
1518 p->max_ntxq_10g = prop_lookup_int(sc, "max-ntxq-10G-port", 8);
1519 (void) ddi_prop_update_int(dev, dip, "max-ntxq-10G-port",
1520 p->max_ntxq_10g);
1521
1522 p->max_nrxq_10g = prop_lookup_int(sc, "max-nrxq-10G-port", 8);
1523 (void) ddi_prop_update_int(dev, dip, "max-nrxq-10G-port",
1524 p->max_nrxq_10g);
1525
1526 p->max_ntxq_1g = prop_lookup_int(sc, "max-ntxq-1G-port", 2);
1527 (void) ddi_prop_update_int(dev, dip, "max-ntxq-1G-port",
1528 p->max_ntxq_1g);
1529
1530 p->max_nrxq_1g = prop_lookup_int(sc, "max-nrxq-1G-port", 2);
1531 (void) ddi_prop_update_int(dev, dip, "max-nrxq-1G-port",
1532 p->max_nrxq_1g);
1533
1534 #ifndef TCP_OFFLOAD_DISABLE
1535 p->max_nofldtxq_10g = prop_lookup_int(sc, "max-nofldtxq-10G-port", 8);
1536 (void) ddi_prop_update_int(dev, dip, "max-ntxq-10G-port",
1537 p->max_nofldtxq_10g);
1538
1539 p->max_nofldrxq_10g = prop_lookup_int(sc, "max-nofldrxq-10G-port", 2);
1540 (void) ddi_prop_update_int(dev, dip, "max-nrxq-10G-port",
1541 p->max_nofldrxq_10g);
1542
1543 p->max_nofldtxq_1g = prop_lookup_int(sc, "max-nofldtxq-1G-port", 2);
1544 (void) ddi_prop_update_int(dev, dip, "max-ntxq-1G-port",
1545 p->max_nofldtxq_1g);
1546
1547 p->max_nofldrxq_1g = prop_lookup_int(sc, "max-nofldrxq-1G-port", 1);
1548 (void) ddi_prop_update_int(dev, dip, "max-nrxq-1G-port",
1549 p->max_nofldrxq_1g);
1550 #endif
1551
1552 /*
1553 * Holdoff parameters for 10G and 1G ports.
1554 */
1555 p->tmr_idx_10g = prop_lookup_int(sc, "holdoff-timer-idx-10G", 0);
1556 (void) ddi_prop_update_int(dev, dip, "holdoff-timer-idx-10G",
1557 p->tmr_idx_10g);
1558
1559 p->pktc_idx_10g = prop_lookup_int(sc, "holdoff-pktc-idx-10G", 2);
1560 (void) ddi_prop_update_int(dev, dip, "holdoff-pktc-idx-10G",
1561 p->pktc_idx_10g);
1562
1563 p->tmr_idx_1g = prop_lookup_int(sc, "holdoff-timer-idx-1G", 0);
1564 (void) ddi_prop_update_int(dev, dip, "holdoff-timer-idx-1G",
1565 p->tmr_idx_1g);
1566
1567 p->pktc_idx_1g = prop_lookup_int(sc, "holdoff-pktc-idx-1G", 2);
1568 (void) ddi_prop_update_int(dev, dip, "holdoff-pktc-idx-1G",
1569 p->pktc_idx_1g);
1570
1571 /*
1572 * Size (number of entries) of each tx and rx queue.
1573 */
1574 i = prop_lookup_int(sc, "qsize-txq", TX_EQ_QSIZE);
1575 p->qsize_txq = max(i, 128);
1576 if (p->qsize_txq != i) {
1577 cxgb_printf(dip, CE_WARN,
1578 "using %d instead of %d as the tx queue size",
1579 p->qsize_txq, i);
1580 }
1581 (void) ddi_prop_update_int(dev, dip, "qsize-txq", p->qsize_txq);
1582
1583 i = prop_lookup_int(sc, "qsize-rxq", RX_IQ_QSIZE);
1584 p->qsize_rxq = max(i, 128);
1585 while (p->qsize_rxq & 7)
1586 p->qsize_rxq--;
1587 if (p->qsize_rxq != i) {
1588 cxgb_printf(dip, CE_WARN,
1589 "using %d instead of %d as the rx queue size",
1590 p->qsize_rxq, i);
1591 }
1592 (void) ddi_prop_update_int(dev, dip, "qsize-rxq", p->qsize_rxq);
1593
1594 /*
1595 * Interrupt types allowed.
1596 * Bits 0, 1, 2 = INTx, MSI, MSI-X respectively. See sys/ddi_intr.h
1597 */
1598 p->intr_types = prop_lookup_int(sc, "interrupt-types",
1599 DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_FIXED);
1600 (void) ddi_prop_update_int(dev, dip, "interrupt-types", p->intr_types);
1601
1602 /*
1603 * Forwarded interrupt queues. Create this property to force the driver
1604 * to use forwarded interrupt queues.
1605 */
1606 if (ddi_prop_exists(dev, dip, DDI_PROP_DONTPASS,
1607 "interrupt-forwarding") != 0 ||
1608 ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1609 "interrupt-forwarding") != 0) {
1610 UNIMPLEMENTED();
1611 (void) ddi_prop_create(dev, dip, DDI_PROP_CANSLEEP,
1612 "interrupt-forwarding", NULL, 0);
1613 }
1614
1615 return (0);
1616 }
1617
1618 static int
remove_extra_props(struct adapter * sc,int n10g,int n1g)1619 remove_extra_props(struct adapter *sc, int n10g, int n1g)
1620 {
1621 if (n10g == 0) {
1622 (void) ddi_prop_remove(sc->dev, sc->dip, "max-ntxq-10G-port");
1623 (void) ddi_prop_remove(sc->dev, sc->dip, "max-nrxq-10G-port");
1624 (void) ddi_prop_remove(sc->dev, sc->dip,
1625 "holdoff-timer-idx-10G");
1626 (void) ddi_prop_remove(sc->dev, sc->dip,
1627 "holdoff-pktc-idx-10G");
1628 }
1629
1630 if (n1g == 0) {
1631 (void) ddi_prop_remove(sc->dev, sc->dip, "max-ntxq-1G-port");
1632 (void) ddi_prop_remove(sc->dev, sc->dip, "max-nrxq-1G-port");
1633 (void) ddi_prop_remove(sc->dev, sc->dip,
1634 "holdoff-timer-idx-1G");
1635 (void) ddi_prop_remove(sc->dev, sc->dip, "holdoff-pktc-idx-1G");
1636 }
1637
1638 return (0);
1639 }
1640
1641 static int
cfg_itype_and_nqueues(struct adapter * sc,int n10g,int n1g,struct intrs_and_queues * iaq)1642 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1643 struct intrs_and_queues *iaq)
1644 {
1645 struct driver_properties *p = &sc->props;
1646 int rc, itype, itypes, navail, nc, nrxq10g, nrxq1g, n;
1647 int nofldrxq10g = 0, nofldrxq1g = 0;
1648
1649 bzero(iaq, sizeof (*iaq));
1650 nc = ncpus; /* our snapshot of the number of CPUs */
1651 iaq->ntxq10g = min(nc, p->max_ntxq_10g);
1652 iaq->ntxq1g = min(nc, p->max_ntxq_1g);
1653 iaq->nrxq10g = nrxq10g = min(nc, p->max_nrxq_10g);
1654 iaq->nrxq1g = nrxq1g = min(nc, p->max_nrxq_1g);
1655 #ifndef TCP_OFFLOAD_DISABLE
1656 iaq->nofldtxq10g = min(nc, p->max_nofldtxq_10g);
1657 iaq->nofldtxq1g = min(nc, p->max_nofldtxq_1g);
1658 iaq->nofldrxq10g = nofldrxq10g = min(nc, p->max_nofldrxq_10g);
1659 iaq->nofldrxq1g = nofldrxq1g = min(nc, p->max_nofldrxq_1g);
1660 #endif
1661
1662 rc = ddi_intr_get_supported_types(sc->dip, &itypes);
1663 if (rc != DDI_SUCCESS) {
1664 cxgb_printf(sc->dip, CE_WARN,
1665 "failed to determine supported interrupt types: %d", rc);
1666 return (rc);
1667 }
1668
1669 for (itype = DDI_INTR_TYPE_MSIX; itype; itype >>= 1) {
1670 ASSERT(itype == DDI_INTR_TYPE_MSIX ||
1671 itype == DDI_INTR_TYPE_MSI ||
1672 itype == DDI_INTR_TYPE_FIXED);
1673
1674 if ((itype & itypes & p->intr_types) == 0)
1675 continue; /* not supported or not allowed */
1676
1677 navail = 0;
1678 rc = ddi_intr_get_navail(sc->dip, itype, &navail);
1679 if (rc != DDI_SUCCESS || navail == 0) {
1680 cxgb_printf(sc->dip, CE_WARN,
1681 "failed to get # of interrupts for type %d: %d",
1682 itype, rc);
1683 continue; /* carry on */
1684 }
1685
1686 iaq->intr_type = itype;
1687
1688 #if MAC_VERSION == 1
1689 iaq->ntxq10g = 1;
1690 iaq->ntxq1g = 1;
1691 #endif
1692
1693 if (navail == 0)
1694 continue;
1695
1696 /*
1697 * Best option: an interrupt vector for errors, one for the
1698 * firmware event queue, and one each for each rxq (NIC as well
1699 * as offload).
1700 */
1701 iaq->nirq = T4_EXTRA_INTR;
1702 iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
1703 iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
1704
1705 if (iaq->nirq <= navail &&
1706 (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq))) {
1707 iaq->intr_fwd = 0;
1708 goto allocate;
1709 }
1710
1711 /*
1712 * Second best option: an interrupt vector for errors, one for
1713 * the firmware event queue, and one each for either NIC or
1714 * offload rxq's.
1715 */
1716 iaq->nirq = T4_EXTRA_INTR;
1717 iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
1718 iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
1719 if (iaq->nirq <= navail &&
1720 (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq))) {
1721 iaq->intr_fwd = 1;
1722 goto allocate;
1723 }
1724
1725 /*
1726 * Next best option: an interrupt vector for errors, one for the
1727 * firmware event queue, and at least one per port. At this
1728 * point we know we'll have to downsize nrxq or nofldrxq to fit
1729 * what's available to us.
1730 */
1731 iaq->nirq = T4_EXTRA_INTR;
1732 iaq->nirq += n10g + n1g;
1733 if (iaq->nirq <= navail) {
1734 int leftover = navail - iaq->nirq;
1735
1736 if (n10g > 0) {
1737 int target = max(nrxq10g, nofldrxq10g);
1738
1739 n = 1;
1740 while (n < target && leftover >= n10g) {
1741 leftover -= n10g;
1742 iaq->nirq += n10g;
1743 n++;
1744 }
1745 iaq->nrxq10g = min(n, nrxq10g);
1746 #ifndef TCP_OFFLOAD_DISABLE
1747 iaq->nofldrxq10g = min(n, nofldrxq10g);
1748 #endif
1749 }
1750
1751 if (n1g > 0) {
1752 int target = max(nrxq1g, nofldrxq1g);
1753
1754 n = 1;
1755 while (n < target && leftover >= n1g) {
1756 leftover -= n1g;
1757 iaq->nirq += n1g;
1758 n++;
1759 }
1760 iaq->nrxq1g = min(n, nrxq1g);
1761 #ifndef TCP_OFFLOAD_DISABLE
1762 iaq->nofldrxq1g = min(n, nofldrxq1g);
1763 #endif
1764 }
1765
1766 if (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq)) {
1767 iaq->intr_fwd = 1;
1768 goto allocate;
1769 }
1770 }
1771
1772 /*
1773 * Least desirable option: one interrupt vector for everything.
1774 */
1775 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
1776 #ifndef TCP_OFFLOAD_DISABLE
1777 iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
1778 #endif
1779 iaq->intr_fwd = 1;
1780
1781 allocate:
1782 return (0);
1783 }
1784
1785 cxgb_printf(sc->dip, CE_WARN,
1786 "failed to find a usable interrupt type. supported=%d, allowed=%d",
1787 itypes, p->intr_types);
1788 return (DDI_FAILURE);
1789 }
1790
1791 static int
add_child_node(struct adapter * sc,int idx)1792 add_child_node(struct adapter *sc, int idx)
1793 {
1794 int rc;
1795 struct port_info *pi;
1796
1797 if (idx < 0 || idx >= sc->params.nports)
1798 return (EINVAL);
1799
1800 pi = sc->port[idx];
1801 if (pi == NULL)
1802 return (ENODEV); /* t4_port_init failed earlier */
1803
1804 PORT_LOCK(pi);
1805 if (pi->dip != NULL) {
1806 rc = 0; /* EEXIST really, but then bus_config fails */
1807 goto done;
1808 }
1809
1810 rc = ndi_devi_alloc(sc->dip, T4_PORT_NAME, DEVI_SID_NODEID, &pi->dip);
1811 if (rc != DDI_SUCCESS || pi->dip == NULL) {
1812 rc = ENOMEM;
1813 goto done;
1814 }
1815
1816 (void) ddi_set_parent_data(pi->dip, pi);
1817 (void) ndi_devi_bind_driver(pi->dip, 0);
1818 rc = 0;
1819 done:
1820 PORT_UNLOCK(pi);
1821 return (rc);
1822 }
1823
1824 static int
remove_child_node(struct adapter * sc,int idx)1825 remove_child_node(struct adapter *sc, int idx)
1826 {
1827 int rc;
1828 struct port_info *pi;
1829
1830 if (idx < 0 || idx >= sc->params.nports)
1831 return (EINVAL);
1832
1833 pi = sc->port[idx];
1834 if (pi == NULL)
1835 return (ENODEV);
1836
1837 PORT_LOCK(pi);
1838 if (pi->dip == NULL) {
1839 rc = ENODEV;
1840 goto done;
1841 }
1842
1843 rc = ndi_devi_free(pi->dip);
1844 if (rc == 0)
1845 pi->dip = NULL;
1846 done:
1847 PORT_UNLOCK(pi);
1848 return (rc);
1849 }
1850
1851 #define KS_UINIT(x) kstat_named_init(&kstatp->x, #x, KSTAT_DATA_ULONG)
1852 #define KS_CINIT(x) kstat_named_init(&kstatp->x, #x, KSTAT_DATA_CHAR)
1853 #define KS_U_SET(x, y) kstatp->x.value.ul = (y)
1854 #define KS_C_SET(x, ...) \
1855 (void) snprintf(kstatp->x.value.c, 16, __VA_ARGS__)
1856
1857 /*
1858 * t4nex:X:config
1859 */
1860 struct t4_kstats {
1861 kstat_named_t chip_ver;
1862 kstat_named_t fw_vers;
1863 kstat_named_t tp_vers;
1864 kstat_named_t driver_version;
1865 kstat_named_t serial_number;
1866 kstat_named_t ec_level;
1867 kstat_named_t id;
1868 kstat_named_t bus_type;
1869 kstat_named_t bus_width;
1870 kstat_named_t bus_speed;
1871 kstat_named_t core_clock;
1872 kstat_named_t port_cnt;
1873 kstat_named_t port_type;
1874 kstat_named_t pci_vendor_id;
1875 kstat_named_t pci_device_id;
1876 };
1877 static kstat_t *
setup_kstats(struct adapter * sc)1878 setup_kstats(struct adapter *sc)
1879 {
1880 kstat_t *ksp;
1881 struct t4_kstats *kstatp;
1882 int ndata;
1883 struct pci_params *p = &sc->params.pci;
1884 struct vpd_params *v = &sc->params.vpd;
1885 uint16_t pci_vendor, pci_device;
1886
1887 ndata = sizeof (struct t4_kstats) / sizeof (kstat_named_t);
1888
1889 ksp = kstat_create(T4_NEXUS_NAME, ddi_get_instance(sc->dip), "config",
1890 "nexus", KSTAT_TYPE_NAMED, ndata, 0);
1891 if (ksp == NULL) {
1892 cxgb_printf(sc->dip, CE_WARN, "failed to initialize kstats.");
1893 return (NULL);
1894 }
1895
1896 kstatp = (struct t4_kstats *)ksp->ks_data;
1897
1898 KS_UINIT(chip_ver);
1899 KS_CINIT(fw_vers);
1900 KS_CINIT(tp_vers);
1901 KS_CINIT(driver_version);
1902 KS_CINIT(serial_number);
1903 KS_CINIT(ec_level);
1904 KS_CINIT(id);
1905 KS_CINIT(bus_type);
1906 KS_CINIT(bus_width);
1907 KS_CINIT(bus_speed);
1908 KS_UINIT(core_clock);
1909 KS_UINIT(port_cnt);
1910 KS_CINIT(port_type);
1911 KS_CINIT(pci_vendor_id);
1912 KS_CINIT(pci_device_id);
1913
1914 KS_U_SET(chip_ver, sc->params.rev);
1915 KS_C_SET(fw_vers, "%d.%d.%d.%d",
1916 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
1917 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
1918 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
1919 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
1920 KS_C_SET(tp_vers, "%d.%d.%d.%d",
1921 G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers),
1922 G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers),
1923 G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers),
1924 G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers));
1925 KS_C_SET(driver_version, DRV_VERSION);
1926 KS_C_SET(serial_number, "%s", v->sn);
1927 KS_C_SET(ec_level, "%s", v->ec);
1928 KS_C_SET(id, "%s", v->id);
1929 KS_C_SET(bus_type, "pci-express");
1930 KS_C_SET(bus_width, "x%d lanes", p->width);
1931 KS_C_SET(bus_speed, "%d", p->speed);
1932 KS_U_SET(core_clock, v->cclk);
1933 KS_U_SET(port_cnt, sc->params.nports);
1934
1935 t4_os_pci_read_cfg2(sc, PCI_CONF_VENID, &pci_vendor);
1936 KS_C_SET(pci_vendor_id, "0x%x", pci_vendor);
1937
1938 t4_os_pci_read_cfg2(sc, PCI_CONF_DEVID, &pci_device);
1939 KS_C_SET(pci_device_id, "0x%x", pci_device);
1940
1941 #define PSTR(pi) (pi ? (is_10G_port(pi) ? "10G" : "1G") : "-")
1942 KS_C_SET(port_type, "%s/%s/%s/%s", PSTR(sc->port[0]), PSTR(sc->port[1]),
1943 PSTR(sc->port[2]), PSTR(sc->port[3]));
1944 #undef PSTR
1945
1946 /* Do NOT set ksp->ks_update. These kstats do not change. */
1947
1948 /* Install the kstat */
1949 ksp->ks_private = (void *)sc;
1950 kstat_install(ksp);
1951
1952 return (ksp);
1953 }
1954
1955 int
adapter_full_init(struct adapter * sc)1956 adapter_full_init(struct adapter *sc)
1957 {
1958 int i, rc = 0;
1959
1960 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1961
1962 rc = t4_setup_adapter_queues(sc);
1963 if (rc != 0)
1964 goto done;
1965
1966 if (sc->intr_cap & DDI_INTR_FLAG_BLOCK)
1967 (void) ddi_intr_block_enable(sc->intr_handle, sc->intr_count);
1968 else {
1969 for (i = 0; i < sc->intr_count; i++)
1970 (void) ddi_intr_enable(sc->intr_handle[i]);
1971 }
1972 t4_intr_enable(sc);
1973 sc->flags |= FULL_INIT_DONE;
1974
1975 #ifndef TCP_OFFLOAD_DISABLE
1976 /* TODO: wrong place to enable TOE capability */
1977 if (is_offload(sc) != 0) {
1978 for_each_port(sc, i) {
1979 struct port_info *pi = sc->port[i];
1980 rc = toe_capability(pi, 1);
1981 if (rc != 0) {
1982 cxgb_printf(pi->dip, CE_WARN,
1983 "Failed to activate toe capability: %d",
1984 rc);
1985 rc = 0; /* not a fatal error */
1986 }
1987 }
1988 }
1989 #endif
1990
1991 done:
1992 if (rc != 0)
1993 (void) adapter_full_uninit(sc);
1994
1995 return (rc);
1996 }
1997
1998 int
adapter_full_uninit(struct adapter * sc)1999 adapter_full_uninit(struct adapter *sc)
2000 {
2001 int i, rc = 0;
2002
2003 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2004
2005 if (sc->intr_cap & DDI_INTR_FLAG_BLOCK)
2006 (void) ddi_intr_block_disable(sc->intr_handle, sc->intr_count);
2007 else {
2008 for (i = 0; i < sc->intr_count; i++)
2009 (void) ddi_intr_disable(sc->intr_handle[i]);
2010 }
2011
2012 rc = t4_teardown_adapter_queues(sc);
2013 if (rc != 0)
2014 return (rc);
2015
2016 sc->flags &= ~FULL_INIT_DONE;
2017
2018 return (0);
2019 }
2020
2021 int
port_full_init(struct port_info * pi)2022 port_full_init(struct port_info *pi)
2023 {
2024 struct adapter *sc = pi->adapter;
2025 uint16_t *rss;
2026 struct sge_rxq *rxq;
2027 int rc, i;
2028
2029 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2030 ASSERT((pi->flags & PORT_INIT_DONE) == 0);
2031
2032 /*
2033 * Allocate tx/rx/fl queues for this port.
2034 */
2035 rc = t4_setup_port_queues(pi);
2036 if (rc != 0)
2037 goto done; /* error message displayed already */
2038
2039 /*
2040 * Setup RSS for this port.
2041 */
2042 rss = kmem_zalloc(pi->nrxq * sizeof (*rss), KM_SLEEP);
2043 for_each_rxq(pi, i, rxq) {
2044 rss[i] = rxq->iq.abs_id;
2045 }
2046 rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0,
2047 pi->rss_size, rss, pi->nrxq);
2048 kmem_free(rss, pi->nrxq * sizeof (*rss));
2049 if (rc != 0) {
2050 cxgb_printf(pi->dip, CE_WARN, "rss_config failed: %d", rc);
2051 goto done;
2052 }
2053
2054 pi->flags |= PORT_INIT_DONE;
2055 done:
2056 if (rc != 0)
2057 (void) port_full_uninit(pi);
2058
2059 return (rc);
2060 }
2061
2062 /*
2063 * Idempotent.
2064 */
2065 int
port_full_uninit(struct port_info * pi)2066 port_full_uninit(struct port_info *pi)
2067 {
2068
2069 ASSERT(pi->flags & PORT_INIT_DONE);
2070
2071 (void) t4_teardown_port_queues(pi);
2072 pi->flags &= ~PORT_INIT_DONE;
2073
2074 return (0);
2075 }
2076
2077 void
enable_port_queues(struct port_info * pi)2078 enable_port_queues(struct port_info *pi)
2079 {
2080 struct adapter *sc = pi->adapter;
2081 int i;
2082 struct sge_iq *iq;
2083 struct sge_rxq *rxq;
2084 #ifndef TCP_OFFLOAD_DISABLE
2085 struct sge_ofld_rxq *ofld_rxq;
2086 #endif
2087
2088 ASSERT(pi->flags & PORT_INIT_DONE);
2089
2090 /*
2091 * TODO: whatever was queued up after we set iq->state to IQS_DISABLED
2092 * back in disable_port_queues will be processed now, after an unbounded
2093 * delay. This can't be good.
2094 */
2095
2096 #ifndef TCP_OFFLOAD_DISABLE
2097 for_each_ofld_rxq(pi, i, ofld_rxq) {
2098 iq = &ofld_rxq->iq;
2099 if (atomic_cas_uint(&iq->state, IQS_DISABLED, IQS_IDLE) !=
2100 IQS_DISABLED)
2101 panic("%s: iq %p wasn't disabled", __func__,
2102 (void *)iq);
2103 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
2104 V_SEINTARM(iq->intr_params) | V_INGRESSQID(iq->cntxt_id));
2105 }
2106 #endif
2107
2108 for_each_rxq(pi, i, rxq) {
2109 iq = &rxq->iq;
2110 if (atomic_cas_uint(&iq->state, IQS_DISABLED, IQS_IDLE) !=
2111 IQS_DISABLED)
2112 panic("%s: iq %p wasn't disabled", __func__,
2113 (void *) iq);
2114 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
2115 V_SEINTARM(iq->intr_params) | V_INGRESSQID(iq->cntxt_id));
2116 }
2117 }
2118
2119 void
disable_port_queues(struct port_info * pi)2120 disable_port_queues(struct port_info *pi)
2121 {
2122 int i;
2123 struct adapter *sc = pi->adapter;
2124 struct sge_rxq *rxq;
2125 #ifndef TCP_OFFLOAD_DISABLE
2126 struct sge_ofld_rxq *ofld_rxq;
2127 #endif
2128
2129 ASSERT(pi->flags & PORT_INIT_DONE);
2130
2131 /*
2132 * TODO: need proper implementation for all tx queues (ctrl, eth, ofld).
2133 */
2134
2135 #ifndef TCP_OFFLOAD_DISABLE
2136 for_each_ofld_rxq(pi, i, ofld_rxq) {
2137 while (atomic_cas_uint(&ofld_rxq->iq.state, IQS_IDLE,
2138 IQS_DISABLED) != IQS_IDLE)
2139 msleep(1);
2140 }
2141 #endif
2142
2143 for_each_rxq(pi, i, rxq) {
2144 while (atomic_cas_uint(&rxq->iq.state, IQS_IDLE,
2145 IQS_DISABLED) != IQS_IDLE)
2146 msleep(1);
2147 }
2148
2149 mutex_enter(&sc->sfl_lock);
2150 #ifndef TCP_OFFLOAD_DISABLE
2151 for_each_ofld_rxq(pi, i, ofld_rxq)
2152 ofld_rxq->fl.flags |= FL_DOOMED;
2153 #endif
2154 for_each_rxq(pi, i, rxq)
2155 rxq->fl.flags |= FL_DOOMED;
2156 mutex_exit(&sc->sfl_lock);
2157 /* TODO: need to wait for all fl's to be removed from sc->sfl */
2158 }
2159
2160 void
t4_fatal_err(struct adapter * sc)2161 t4_fatal_err(struct adapter *sc)
2162 {
2163 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
2164 t4_intr_disable(sc);
2165 cxgb_printf(sc->dip, CE_WARN,
2166 "encountered fatal error, adapter stopped.");
2167 }
2168
2169 int
t4_os_find_pci_capability(struct adapter * sc,int cap)2170 t4_os_find_pci_capability(struct adapter *sc, int cap)
2171 {
2172 uint16_t stat;
2173 uint8_t cap_ptr, cap_id;
2174
2175 t4_os_pci_read_cfg2(sc, PCI_CONF_STAT, &stat);
2176 if ((stat & PCI_STAT_CAP) == 0)
2177 return (0); /* does not implement capabilities */
2178
2179 t4_os_pci_read_cfg1(sc, PCI_CONF_CAP_PTR, &cap_ptr);
2180 while (cap_ptr) {
2181 t4_os_pci_read_cfg1(sc, cap_ptr + PCI_CAP_ID, &cap_id);
2182 if (cap_id == cap)
2183 return (cap_ptr); /* found */
2184 t4_os_pci_read_cfg1(sc, cap_ptr + PCI_CAP_NEXT_PTR, &cap_ptr);
2185 }
2186
2187 return (0); /* not found */
2188 }
2189
2190 void
t4_os_portmod_changed(const struct adapter * sc,int idx)2191 t4_os_portmod_changed(const struct adapter *sc, int idx)
2192 {
2193 static const char *mod_str[] = {
2194 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
2195 };
2196 const struct port_info *pi = sc->port[idx];
2197
2198 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
2199 cxgb_printf(pi->dip, CE_NOTE, "transceiver unplugged.");
2200 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
2201 cxgb_printf(pi->dip, CE_NOTE,
2202 "unknown transceiver inserted.\n");
2203 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
2204 cxgb_printf(pi->dip, CE_NOTE,
2205 "unsupported transceiver inserted.\n");
2206 else if (pi->mod_type > 0 && pi->mod_type < ARRAY_SIZE(mod_str))
2207 cxgb_printf(pi->dip, CE_NOTE, "%s transceiver inserted.\n",
2208 mod_str[pi->mod_type]);
2209 else
2210 cxgb_printf(pi->dip, CE_NOTE, "transceiver (type %d) inserted.",
2211 pi->mod_type);
2212 }
2213
2214 /* ARGSUSED */
2215 static int
cpl_not_handled(struct sge_iq * iq,const struct rss_header * rss,mblk_t * m)2216 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, mblk_t *m)
2217 {
2218 if (m != NULL)
2219 freemsg(m);
2220 return (0);
2221 }
2222
2223 int
t4_register_cpl_handler(struct adapter * sc,int opcode,cpl_handler_t h)2224 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
2225 {
2226 uint_t *loc, new;
2227
2228 if (opcode >= ARRAY_SIZE(sc->cpl_handler))
2229 return (EINVAL);
2230
2231 new = (uint_t)(unsigned long) (h ? h : cpl_not_handled);
2232 loc = (uint_t *)&sc->cpl_handler[opcode];
2233 (void) atomic_swap_uint(loc, new);
2234
2235 return (0);
2236 }
2237
2238 #ifndef TCP_OFFLOAD_DISABLE
2239 static int
toe_capability(struct port_info * pi,int enable)2240 toe_capability(struct port_info *pi, int enable)
2241 {
2242 int rc;
2243 struct adapter *sc = pi->adapter;
2244
2245 if (!is_offload(sc))
2246 return (ENODEV);
2247
2248 if (enable != 0) {
2249 if (isset(&sc->offload_map, pi->port_id) != 0)
2250 return (0);
2251
2252 if (sc->offload_map == 0) {
2253 rc = activate_uld(sc, ULD_TOM, &sc->tom);
2254 if (rc != 0)
2255 return (rc);
2256 }
2257
2258 setbit(&sc->offload_map, pi->port_id);
2259 } else {
2260 if (!isset(&sc->offload_map, pi->port_id))
2261 return (0);
2262
2263 clrbit(&sc->offload_map, pi->port_id);
2264
2265 if (sc->offload_map == 0) {
2266 rc = deactivate_uld(&sc->tom);
2267 if (rc != 0) {
2268 setbit(&sc->offload_map, pi->port_id);
2269 return (rc);
2270 }
2271 }
2272 }
2273
2274 return (0);
2275 }
2276
2277 /*
2278 * Add an upper layer driver to the global list.
2279 */
2280 int
t4_register_uld(struct uld_info * ui)2281 t4_register_uld(struct uld_info *ui)
2282 {
2283 int rc = 0;
2284 struct uld_info *u;
2285
2286 mutex_enter(&t4_uld_list_lock);
2287 SLIST_FOREACH(u, &t4_uld_list, link) {
2288 if (u->uld_id == ui->uld_id) {
2289 rc = EEXIST;
2290 goto done;
2291 }
2292 }
2293
2294 SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
2295 ui->refcount = 0;
2296 done:
2297 mutex_exit(&t4_uld_list_lock);
2298 return (rc);
2299 }
2300
2301 int
t4_unregister_uld(struct uld_info * ui)2302 t4_unregister_uld(struct uld_info *ui)
2303 {
2304 int rc = EINVAL;
2305 struct uld_info *u;
2306
2307 mutex_enter(&t4_uld_list_lock);
2308
2309 SLIST_FOREACH(u, &t4_uld_list, link) {
2310 if (u == ui) {
2311 if (ui->refcount > 0) {
2312 rc = EBUSY;
2313 goto done;
2314 }
2315
2316 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
2317 rc = 0;
2318 goto done;
2319 }
2320 }
2321 done:
2322 mutex_exit(&t4_uld_list_lock);
2323 return (rc);
2324 }
2325
2326 static int
activate_uld(struct adapter * sc,int id,struct uld_softc * usc)2327 activate_uld(struct adapter *sc, int id, struct uld_softc *usc)
2328 {
2329 int rc = EAGAIN;
2330 struct uld_info *ui;
2331
2332 mutex_enter(&t4_uld_list_lock);
2333
2334 SLIST_FOREACH(ui, &t4_uld_list, link) {
2335 if (ui->uld_id == id) {
2336 rc = ui->attach(sc, &usc->softc);
2337 if (rc == 0) {
2338 ASSERT(usc->softc != NULL);
2339 ui->refcount++;
2340 usc->uld = ui;
2341 }
2342 goto done;
2343 }
2344 }
2345 done:
2346 mutex_exit(&t4_uld_list_lock);
2347
2348 return (rc);
2349 }
2350
2351 static int
deactivate_uld(struct uld_softc * usc)2352 deactivate_uld(struct uld_softc *usc)
2353 {
2354 int rc;
2355
2356 mutex_enter(&t4_uld_list_lock);
2357
2358 if (usc->uld == NULL || usc->softc == NULL) {
2359 rc = EINVAL;
2360 goto done;
2361 }
2362
2363 rc = usc->uld->detach(usc->softc);
2364 if (rc == 0) {
2365 ASSERT(usc->uld->refcount > 0);
2366 usc->uld->refcount--;
2367 usc->uld = NULL;
2368 usc->softc = NULL;
2369 }
2370 done:
2371 mutex_exit(&t4_uld_list_lock);
2372
2373 return (rc);
2374 }
2375
2376 void
t4_iterate(void (* func)(int,void *),void * arg)2377 t4_iterate(void (*func)(int, void *), void *arg)
2378 {
2379 struct adapter *sc;
2380
2381 mutex_enter(&t4_adapter_list_lock);
2382 SLIST_FOREACH(sc, &t4_adapter_list, link) {
2383 /*
2384 * func should not make any assumptions about what state sc is
2385 * in - the only guarantee is that sc->sc_lock is a valid lock.
2386 */
2387 func(ddi_get_instance(sc->dip), arg);
2388 }
2389 mutex_exit(&t4_adapter_list_lock);
2390 }
2391
2392 #endif
2393