xref: /illumos-gate/usr/src/uts/common/io/cxgbe/t4nex/t4_nexus.c (revision 20a7641f9918de8574b8b3b47dbe35c4bfc78df1)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source. A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * This file is part of the Chelsio T4 support code.
14  *
15  * Copyright (C) 2010-2013 Chelsio Communications.  All rights reserved.
16  *
17  * This program is distributed in the hope that it will be useful, but WITHOUT
18  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19  * FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
20  * release for licensing terms and conditions.
21  */
22 
23 /*
24  * Copyright 2023 Oxide Computer Company
25  */
26 
27 #include <sys/ddi.h>
28 #include <sys/sunddi.h>
29 #include <sys/sunndi.h>
30 #include <sys/modctl.h>
31 #include <sys/conf.h>
32 #include <sys/devops.h>
33 #include <sys/pci.h>
34 #include <sys/atomic.h>
35 #include <sys/types.h>
36 #include <sys/file.h>
37 #include <sys/errno.h>
38 #include <sys/open.h>
39 #include <sys/cred.h>
40 #include <sys/stat.h>
41 #include <sys/mkdev.h>
42 #include <sys/queue.h>
43 #include <sys/containerof.h>
44 #include <sys/sensors.h>
45 #include <sys/firmload.h>
46 
47 #include "version.h"
48 #include "common/common.h"
49 #include "common/t4_msg.h"
50 #include "common/t4_regs.h"
51 #include "t4_l2t.h"
52 
53 static int t4_cb_open(dev_t *devp, int flag, int otyp, cred_t *credp);
54 static int t4_cb_close(dev_t dev, int flag, int otyp, cred_t *credp);
55 static int t4_cb_ioctl(dev_t dev, int cmd, intptr_t d, int mode, cred_t *credp,
56     int *rp);
57 struct cb_ops t4_cb_ops = {
58 	.cb_open =		t4_cb_open,
59 	.cb_close =		t4_cb_close,
60 	.cb_strategy =		nodev,
61 	.cb_print =		nodev,
62 	.cb_dump =		nodev,
63 	.cb_read =		nodev,
64 	.cb_write =		nodev,
65 	.cb_ioctl =		t4_cb_ioctl,
66 	.cb_devmap =		nodev,
67 	.cb_mmap =		nodev,
68 	.cb_segmap =		nodev,
69 	.cb_chpoll =		nochpoll,
70 	.cb_prop_op =		ddi_prop_op,
71 	.cb_flag =		D_MP,
72 	.cb_rev =		CB_REV,
73 	.cb_aread =		nodev,
74 	.cb_awrite =		nodev
75 };
76 
77 static int t4_bus_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op,
78     void *arg, void *result);
79 static int t4_bus_config(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op,
80     void *arg, dev_info_t **cdipp);
81 static int t4_bus_unconfig(dev_info_t *dip, uint_t flags,
82     ddi_bus_config_op_t op, void *arg);
83 struct bus_ops t4_bus_ops = {
84 	.busops_rev =		BUSO_REV,
85 	.bus_ctl =		t4_bus_ctl,
86 	.bus_prop_op =		ddi_bus_prop_op,
87 	.bus_config =		t4_bus_config,
88 	.bus_unconfig =		t4_bus_unconfig,
89 };
90 
91 static int t4_devo_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg,
92     void **rp);
93 static int t4_devo_probe(dev_info_t *dip);
94 static int t4_devo_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
95 static int t4_devo_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
96 static int t4_devo_quiesce(dev_info_t *dip);
97 struct dev_ops t4_dev_ops = {
98 	.devo_rev =		DEVO_REV,
99 	.devo_getinfo =		t4_devo_getinfo,
100 	.devo_identify =	nulldev,
101 	.devo_probe =		t4_devo_probe,
102 	.devo_attach =		t4_devo_attach,
103 	.devo_detach =		t4_devo_detach,
104 	.devo_reset =		nodev,
105 	.devo_cb_ops =		&t4_cb_ops,
106 	.devo_bus_ops =		&t4_bus_ops,
107 	.devo_quiesce =		&t4_devo_quiesce,
108 };
109 
110 static struct modldrv modldrv = {
111 	.drv_modops =		&mod_driverops,
112 	.drv_linkinfo =		"Chelsio T4 nexus " DRV_VERSION,
113 	.drv_dev_ops =		&t4_dev_ops
114 };
115 
116 static struct modlinkage modlinkage = {
117 	.ml_rev =		MODREV_1,
118 	.ml_linkage =		{&modldrv, NULL},
119 };
120 
121 void *t4_list;
122 
123 struct intrs_and_queues {
124 	int intr_type;		/* DDI_INTR_TYPE_* */
125 	int nirq;		/* Number of vectors */
126 	int intr_fwd;		/* Interrupts forwarded */
127 	int ntxq10g;		/* # of NIC txq's for each 10G port */
128 	int nrxq10g;		/* # of NIC rxq's for each 10G port */
129 	int ntxq1g;		/* # of NIC txq's for each 1G port */
130 	int nrxq1g;		/* # of NIC rxq's for each 1G port */
131 #ifdef TCP_OFFLOAD_ENABLE
132 	int nofldtxq10g;	/* # of TOE txq's for each 10G port */
133 	int nofldrxq10g;	/* # of TOE rxq's for each 10G port */
134 	int nofldtxq1g;		/* # of TOE txq's for each 1G port */
135 	int nofldrxq1g;		/* # of TOE rxq's for each 1G port */
136 #endif
137 };
138 
139 static int cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss,
140     mblk_t *m);
141 static int fw_msg_not_handled(struct adapter *, const __be64 *);
142 int t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h);
143 static unsigned int getpf(struct adapter *sc);
144 static int prep_firmware(struct adapter *sc);
145 static int upload_config_file(struct adapter *sc, uint32_t *mt, uint32_t *ma);
146 static int partition_resources(struct adapter *sc);
147 static int adap__pre_init_tweaks(struct adapter *sc);
148 static int get_params__pre_init(struct adapter *sc);
149 static int get_params__post_init(struct adapter *sc);
150 static int set_params__post_init(struct adapter *);
151 static void setup_memwin(struct adapter *sc);
152 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
153     uint32_t *);
154 void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
155 uint32_t position_memwin(struct adapter *, int, uint32_t);
156 static int prop_lookup_int_array(struct adapter *sc, char *name, int *data,
157     uint_t count);
158 static int prop_lookup_int_array(struct adapter *sc, char *name, int *data,
159     uint_t count);
160 static int init_driver_props(struct adapter *sc, struct driver_properties *p);
161 static int remove_extra_props(struct adapter *sc, int n10g, int n1g);
162 static int cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
163     struct intrs_and_queues *iaq);
164 static int add_child_node(struct adapter *sc, int idx);
165 static int remove_child_node(struct adapter *sc, int idx);
166 static kstat_t *setup_kstats(struct adapter *sc);
167 static kstat_t *setup_wc_kstats(struct adapter *);
168 static int update_wc_kstats(kstat_t *, int);
169 #ifdef TCP_OFFLOAD_ENABLE
170 static int toe_capability(struct port_info *pi, int enable);
171 static int activate_uld(struct adapter *sc, int id, struct uld_softc *usc);
172 static int deactivate_uld(struct uld_softc *usc);
173 #endif
174 static kmutex_t t4_adapter_list_lock;
175 static SLIST_HEAD(, adapter) t4_adapter_list;
176 #ifdef TCP_OFFLOAD_ENABLE
177 static kmutex_t t4_uld_list_lock;
178 static SLIST_HEAD(, uld_info) t4_uld_list;
179 #endif
180 
181 static int t4_temperature_read(void *, sensor_ioctl_scalar_t *);
182 static int t4_voltage_read(void *, sensor_ioctl_scalar_t *);
183 static const ksensor_ops_t t4_temp_ops = {
184 	.kso_kind = ksensor_kind_temperature,
185 	.kso_scalar = t4_temperature_read
186 };
187 
188 static const ksensor_ops_t t4_volt_ops = {
189 	.kso_kind = ksensor_kind_voltage,
190 	.kso_scalar = t4_voltage_read
191 };
192 
193 static int t4_ufm_getcaps(ddi_ufm_handle_t *, void *, ddi_ufm_cap_t *);
194 static int t4_ufm_fill_image(ddi_ufm_handle_t *, void *, uint_t,
195     ddi_ufm_image_t *);
196 static int t4_ufm_fill_slot(ddi_ufm_handle_t *, void *, uint_t, uint_t,
197     ddi_ufm_slot_t *);
198 static ddi_ufm_ops_t t4_ufm_ops = {
199 	.ddi_ufm_op_fill_image = t4_ufm_fill_image,
200 	.ddi_ufm_op_fill_slot = t4_ufm_fill_slot,
201 	.ddi_ufm_op_getcaps = t4_ufm_getcaps
202 };
203 
204 int
205 _init(void)
206 {
207 	int rc;
208 
209 	rc = ddi_soft_state_init(&t4_list, sizeof (struct adapter), 0);
210 	if (rc != 0)
211 		return (rc);
212 
213 	rc = mod_install(&modlinkage);
214 	if (rc != 0)
215 		ddi_soft_state_fini(&t4_list);
216 
217 	mutex_init(&t4_adapter_list_lock, NULL, MUTEX_DRIVER, NULL);
218 	SLIST_INIT(&t4_adapter_list);
219 
220 #ifdef TCP_OFFLOAD_ENABLE
221 	mutex_init(&t4_uld_list_lock, NULL, MUTEX_DRIVER, NULL);
222 	SLIST_INIT(&t4_uld_list);
223 #endif
224 
225 	return (rc);
226 }
227 
228 int
229 _fini(void)
230 {
231 	int rc;
232 
233 	rc = mod_remove(&modlinkage);
234 	if (rc != 0)
235 		return (rc);
236 
237 	ddi_soft_state_fini(&t4_list);
238 	return (0);
239 }
240 
241 int
242 _info(struct modinfo *mi)
243 {
244 	return (mod_info(&modlinkage, mi));
245 }
246 
247 /* ARGSUSED */
248 static int
249 t4_devo_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **rp)
250 {
251 	struct adapter *sc;
252 	minor_t minor;
253 
254 	minor = getminor((dev_t)arg);	/* same as instance# in our case */
255 
256 	if (cmd == DDI_INFO_DEVT2DEVINFO) {
257 		sc = ddi_get_soft_state(t4_list, minor);
258 		if (sc == NULL)
259 			return (DDI_FAILURE);
260 
261 		ASSERT(sc->dev == (dev_t)arg);
262 		*rp = (void *)sc->dip;
263 	} else if (cmd == DDI_INFO_DEVT2INSTANCE)
264 		*rp = (void *) (unsigned long) minor;
265 	else
266 		ASSERT(0);
267 
268 	return (DDI_SUCCESS);
269 }
270 
271 static int
272 t4_devo_probe(dev_info_t *dip)
273 {
274 	int rc, id, *reg;
275 	uint_t n, pf;
276 
277 	id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
278 	    "device-id", 0xffff);
279 	if (id == 0xffff)
280 		return (DDI_PROBE_DONTCARE);
281 
282 	rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
283 	    "reg", &reg, &n);
284 	if (rc != DDI_SUCCESS)
285 		return (DDI_PROBE_DONTCARE);
286 
287 	pf = PCI_REG_FUNC_G(reg[0]);
288 	ddi_prop_free(reg);
289 
290 	/* Prevent driver attachment on any PF except 0 on the FPGA */
291 	if (id == 0xa000 && pf != 0)
292 		return (DDI_PROBE_FAILURE);
293 
294 	return (DDI_PROBE_DONTCARE);
295 }
296 
297 static int
298 t4_devo_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
299 {
300 	struct adapter *sc = NULL;
301 	struct sge *s;
302 	int i, instance, rc = DDI_SUCCESS, rqidx, tqidx, q;
303 	int irq = 0, nxg = 0, n1g = 0;
304 #ifdef TCP_OFFLOAD_ENABLE
305 	int ofld_rqidx, ofld_tqidx;
306 #endif
307 	char name[16];
308 	struct driver_properties *prp;
309 	struct intrs_and_queues iaq;
310 	ddi_device_acc_attr_t da = {
311 		.devacc_attr_version = DDI_DEVICE_ATTR_V0,
312 		.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC,
313 		.devacc_attr_dataorder = DDI_STRICTORDER_ACC
314 	};
315 	ddi_device_acc_attr_t da1 = {
316 		.devacc_attr_version = DDI_DEVICE_ATTR_V0,
317 		.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC,
318 		.devacc_attr_dataorder = DDI_STRICTORDER_ACC
319 	};
320 
321 	if (cmd != DDI_ATTACH)
322 		return (DDI_FAILURE);
323 
324 	/*
325 	 * Allocate space for soft state.
326 	 */
327 	instance = ddi_get_instance(dip);
328 	rc = ddi_soft_state_zalloc(t4_list, instance);
329 	if (rc != DDI_SUCCESS) {
330 		cxgb_printf(dip, CE_WARN,
331 		    "failed to allocate soft state: %d", rc);
332 		return (DDI_FAILURE);
333 	}
334 
335 	sc = ddi_get_soft_state(t4_list, instance);
336 	sc->dip = dip;
337 	sc->dev = makedevice(ddi_driver_major(dip), instance);
338 	mutex_init(&sc->lock, NULL, MUTEX_DRIVER, NULL);
339 	cv_init(&sc->cv, NULL, CV_DRIVER, NULL);
340 	mutex_init(&sc->sfl_lock, NULL, MUTEX_DRIVER, NULL);
341 	TAILQ_INIT(&sc->sfl);
342 	mutex_init(&sc->mbox_lock, NULL, MUTEX_DRIVER, NULL);
343 	STAILQ_INIT(&sc->mbox_list);
344 
345 	mutex_enter(&t4_adapter_list_lock);
346 	SLIST_INSERT_HEAD(&t4_adapter_list, sc, link);
347 	mutex_exit(&t4_adapter_list_lock);
348 
349 	sc->pf = getpf(sc);
350 	if (sc->pf > 8) {
351 		rc = EINVAL;
352 		cxgb_printf(dip, CE_WARN,
353 		    "failed to determine PCI PF# of device");
354 		goto done;
355 	}
356 	sc->mbox = sc->pf;
357 
358 	/* Initialize the driver properties */
359 	prp = &sc->props;
360 	(void)init_driver_props(sc, prp);
361 
362 	/*
363 	 * Enable access to the PCI config space.
364 	 */
365 	rc = pci_config_setup(dip, &sc->pci_regh);
366 	if (rc != DDI_SUCCESS) {
367 		cxgb_printf(dip, CE_WARN,
368 		    "failed to enable PCI config space access: %d", rc);
369 		goto done;
370 	}
371 
372 	/* TODO: Set max read request to 4K */
373 
374 	/*
375 	 * Enable MMIO access.
376 	 */
377 	rc = ddi_regs_map_setup(dip, 1, &sc->regp, 0, 0, &da, &sc->regh);
378 	if (rc != DDI_SUCCESS) {
379 		cxgb_printf(dip, CE_WARN,
380 		    "failed to map device registers: %d", rc);
381 		goto done;
382 	}
383 
384 	(void) memset(sc->chan_map, 0xff, sizeof (sc->chan_map));
385 
386 	/*
387 	 * Initialize cpl handler.
388 	 */
389 	for (i = 0; i < ARRAY_SIZE(sc->cpl_handler); i++) {
390 		sc->cpl_handler[i] = cpl_not_handled;
391 	}
392 
393 	for (i = 0; i < ARRAY_SIZE(sc->fw_msg_handler); i++) {
394 		sc->fw_msg_handler[i] = fw_msg_not_handled;
395 	}
396 
397 	for (i = 0; i < NCHAN; i++) {
398 		(void) snprintf(name, sizeof (name), "%s-%d",
399 				"reclaim", i);
400 		sc->tq[i] = ddi_taskq_create(sc->dip,
401 		   name, 1, TASKQ_DEFAULTPRI, 0);
402 
403 		if (sc->tq[i] == NULL) {
404 			cxgb_printf(dip, CE_WARN,
405 				   "failed to create task queues");
406 			rc = DDI_FAILURE;
407 			goto done;
408 		}
409 	}
410 
411 	/*
412 	 * Prepare the adapter for operation.
413 	 */
414 	rc = -t4_prep_adapter(sc, false);
415 	if (rc != 0) {
416 		cxgb_printf(dip, CE_WARN, "failed to prepare adapter: %d", rc);
417 		goto done;
418 	}
419 
420 	/*
421 	 * Enable BAR1 access.
422 	 */
423 	sc->doorbells |= DOORBELL_KDB;
424 	rc = ddi_regs_map_setup(dip, 2, &sc->reg1p, 0, 0, &da1, &sc->reg1h);
425 	if (rc != DDI_SUCCESS) {
426 		cxgb_printf(dip, CE_WARN,
427 		    "failed to map BAR1 device registers: %d", rc);
428 		goto done;
429 	} else {
430 		if (is_t5(sc->params.chip)) {
431 			sc->doorbells |= DOORBELL_UDB;
432 			if (prp->wc) {
433 				/*
434 				 * Enable write combining on BAR2.  This is the
435 				 * userspace doorbell BAR and is split into 128B
436 				 * (UDBS_SEG_SIZE) doorbell regions, each associated
437 				 * with an egress queue.  The first 64B has the doorbell
438 				 * and the second 64B can be used to submit a tx work
439 				 * request with an implicit doorbell.
440 				 */
441 				sc->doorbells &= ~DOORBELL_UDB;
442 				sc->doorbells |= (DOORBELL_WCWR |
443 				    DOORBELL_UDBWC);
444 				t4_write_reg(sc, A_SGE_STAT_CFG,
445 				    V_STATSOURCE_T5(7) | V_STATMODE(0));
446 			}
447 		}
448 	}
449 
450 	/*
451 	 * Do this really early.  Note that minor number = instance.
452 	 */
453 	(void) snprintf(name, sizeof (name), "%s,%d", T4_NEXUS_NAME, instance);
454 	rc = ddi_create_minor_node(dip, name, S_IFCHR, instance,
455 	    DDI_NT_NEXUS, 0);
456 	if (rc != DDI_SUCCESS) {
457 		cxgb_printf(dip, CE_WARN,
458 		    "failed to create device node: %d", rc);
459 		rc = DDI_SUCCESS; /* carry on */
460 	}
461 
462 	/* Do this early. Memory window is required for loading config file. */
463 	setup_memwin(sc);
464 
465 	/* Prepare the firmware for operation */
466 	rc = prep_firmware(sc);
467 	if (rc != 0)
468 		goto done; /* error message displayed already */
469 
470 	rc = adap__pre_init_tweaks(sc);
471 	if (rc != 0)
472 		goto done;
473 
474 	rc = get_params__pre_init(sc);
475 	if (rc != 0)
476 		goto done; /* error message displayed already */
477 
478 	t4_sge_init(sc);
479 
480 	if (sc->flags & MASTER_PF) {
481 		/* get basic stuff going */
482 		rc = -t4_fw_initialize(sc, sc->mbox);
483 		if (rc != 0) {
484 			cxgb_printf(sc->dip, CE_WARN,
485 			    "early init failed: %d.\n", rc);
486 			goto done;
487 		}
488 	}
489 
490 	rc = get_params__post_init(sc);
491 	if (rc != 0)
492 		goto done; /* error message displayed already */
493 
494 	rc = set_params__post_init(sc);
495 	if (rc != 0)
496 		goto done; /* error message displayed already */
497 
498 	/*
499 	 * TODO: This is the place to call t4_set_filter_mode()
500 	 */
501 
502 	/* tweak some settings */
503 	t4_write_reg(sc, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) | V_RXTSHIFTMAXR1(4) |
504 	    V_RXTSHIFTMAXR2(15) | V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
505 	    V_KEEPALIVEMAXR1(4) | V_KEEPALIVEMAXR2(9));
506 	t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
507 
508 	/*
509 	 * Work-around for bug 2619
510 	 * Set DisableVlan field in TP_RSS_CONFIG_VRT register so that the
511 	 * VLAN tag extraction is disabled.
512 	 */
513 	t4_set_reg_field(sc, A_TP_RSS_CONFIG_VRT, F_DISABLEVLAN, F_DISABLEVLAN);
514 
515 	/* Store filter mode */
516 	t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &sc->filter_mode, 1,
517 	    A_TP_VLAN_PRI_MAP);
518 
519 	/*
520 	 * First pass over all the ports - allocate VIs and initialize some
521 	 * basic parameters like mac address, port type, etc.  We also figure
522 	 * out whether a port is 10G or 1G and use that information when
523 	 * calculating how many interrupts to attempt to allocate.
524 	 */
525 	for_each_port(sc, i) {
526 		struct port_info *pi;
527 
528 		pi = kmem_zalloc(sizeof (*pi), KM_SLEEP);
529 		sc->port[i] = pi;
530 
531 		/* These must be set before t4_port_init */
532 		pi->adapter = sc;
533 		/* LINTED: E_ASSIGN_NARROW_CONV */
534 		pi->port_id = i;
535 	}
536 
537 	/* Allocate the vi and initialize parameters like mac addr */
538 	rc = -t4_port_init(sc, sc->mbox, sc->pf, 0);
539 	if (rc) {
540 		cxgb_printf(dip, CE_WARN,
541 			    "unable to initialize port: %d", rc);
542 		goto done;
543 	}
544 
545 	for_each_port(sc, i) {
546 		struct port_info *pi = sc->port[i];
547 
548 		mutex_init(&pi->lock, NULL, MUTEX_DRIVER, NULL);
549 		pi->mtu = ETHERMTU;
550 
551 		if (is_10XG_port(pi)) {
552 			nxg++;
553 			pi->tmr_idx = prp->tmr_idx_10g;
554 			pi->pktc_idx = prp->pktc_idx_10g;
555 		} else {
556 			n1g++;
557 			pi->tmr_idx = prp->tmr_idx_1g;
558 			pi->pktc_idx = prp->pktc_idx_1g;
559 		}
560 
561 		pi->xact_addr_filt = -1;
562 		t4_mc_init(pi);
563 
564 		setbit(&sc->registered_device_map, i);
565 	}
566 
567 	(void) remove_extra_props(sc, nxg, n1g);
568 
569 	if (sc->registered_device_map == 0) {
570 		cxgb_printf(dip, CE_WARN, "no usable ports");
571 		rc = DDI_FAILURE;
572 		goto done;
573 	}
574 
575 	rc = cfg_itype_and_nqueues(sc, nxg, n1g, &iaq);
576 	if (rc != 0)
577 		goto done; /* error message displayed already */
578 
579 	sc->intr_type = iaq.intr_type;
580 	sc->intr_count = iaq.nirq;
581 
582 	if (sc->props.multi_rings && (sc->intr_type != DDI_INTR_TYPE_MSIX)) {
583 		sc->props.multi_rings = 0;
584 		cxgb_printf(dip, CE_WARN,
585 		    "Multiple rings disabled as interrupt type is not MSI-X");
586 	}
587 
588 	if (sc->props.multi_rings && iaq.intr_fwd) {
589 		sc->props.multi_rings = 0;
590 		cxgb_printf(dip, CE_WARN,
591 		    "Multiple rings disabled as interrupts are forwarded");
592 	}
593 
594 	if (!sc->props.multi_rings) {
595 		iaq.ntxq10g = 1;
596 		iaq.ntxq1g = 1;
597 	}
598 	s = &sc->sge;
599 	s->nrxq = nxg * iaq.nrxq10g + n1g * iaq.nrxq1g;
600 	s->ntxq = nxg * iaq.ntxq10g + n1g * iaq.ntxq1g;
601 	s->neq = s->ntxq + s->nrxq;	/* the fl in an rxq is an eq */
602 #ifdef TCP_OFFLOAD_ENABLE
603 	/* control queues, 1 per port + 1 mgmtq */
604 	s->neq += sc->params.nports + 1;
605 #endif
606 	s->niq = s->nrxq + 1;		/* 1 extra for firmware event queue */
607 	if (iaq.intr_fwd != 0)
608 		sc->flags |= INTR_FWD;
609 #ifdef TCP_OFFLOAD_ENABLE
610 	if (is_offload(sc) != 0) {
611 
612 		s->nofldrxq = nxg * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
613 		s->nofldtxq = nxg * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
614 		s->neq += s->nofldtxq + s->nofldrxq;
615 		s->niq += s->nofldrxq;
616 
617 		s->ofld_rxq = kmem_zalloc(s->nofldrxq *
618 		    sizeof (struct sge_ofld_rxq), KM_SLEEP);
619 		s->ofld_txq = kmem_zalloc(s->nofldtxq *
620 		    sizeof (struct sge_wrq), KM_SLEEP);
621 		s->ctrlq = kmem_zalloc(sc->params.nports *
622 		    sizeof (struct sge_wrq), KM_SLEEP);
623 
624 	}
625 #endif
626 	s->rxq = kmem_zalloc(s->nrxq * sizeof (struct sge_rxq), KM_SLEEP);
627 	s->txq = kmem_zalloc(s->ntxq * sizeof (struct sge_txq), KM_SLEEP);
628 	s->iqmap = kmem_zalloc(s->iqmap_sz * sizeof (struct sge_iq *), KM_SLEEP);
629 	s->eqmap = kmem_zalloc(s->eqmap_sz * sizeof (struct sge_eq *), KM_SLEEP);
630 
631 	sc->intr_handle = kmem_zalloc(sc->intr_count *
632 	    sizeof (ddi_intr_handle_t), KM_SLEEP);
633 
634 	/*
635 	 * Second pass over the ports.  This time we know the number of rx and
636 	 * tx queues that each port should get.
637 	 */
638 	rqidx = tqidx = 0;
639 #ifdef TCP_OFFLOAD_ENABLE
640 	ofld_rqidx = ofld_tqidx = 0;
641 #endif
642 	for_each_port(sc, i) {
643 		struct port_info *pi = sc->port[i];
644 
645 		if (pi == NULL)
646 			continue;
647 
648 		t4_mc_cb_init(pi);
649 		/* LINTED: E_ASSIGN_NARROW_CONV */
650 		pi->first_rxq = rqidx;
651 		/* LINTED: E_ASSIGN_NARROW_CONV */
652 		pi->nrxq = (is_10XG_port(pi)) ? iaq.nrxq10g
653 		    : iaq.nrxq1g;
654 		/* LINTED: E_ASSIGN_NARROW_CONV */
655 		pi->first_txq = tqidx;
656 		/* LINTED: E_ASSIGN_NARROW_CONV */
657 		pi->ntxq = (is_10XG_port(pi)) ? iaq.ntxq10g
658 		    : iaq.ntxq1g;
659 
660 		rqidx += pi->nrxq;
661 		tqidx += pi->ntxq;
662 
663 #ifdef TCP_OFFLOAD_ENABLE
664 		if (is_offload(sc) != 0) {
665 			/* LINTED: E_ASSIGN_NARROW_CONV */
666 			pi->first_ofld_rxq = ofld_rqidx;
667 			pi->nofldrxq = max(1, pi->nrxq / 4);
668 
669 			/* LINTED: E_ASSIGN_NARROW_CONV */
670 			pi->first_ofld_txq = ofld_tqidx;
671 			pi->nofldtxq = max(1, pi->ntxq / 2);
672 
673 			ofld_rqidx += pi->nofldrxq;
674 			ofld_tqidx += pi->nofldtxq;
675 		}
676 #endif
677 
678 		/*
679 		 * Enable hw checksumming and LSO for all ports by default.
680 		 * They can be disabled using ndd (hw_csum and hw_lso).
681 		 */
682 		pi->features |= (CXGBE_HW_CSUM | CXGBE_HW_LSO);
683 	}
684 
685 #ifdef TCP_OFFLOAD_ENABLE
686 		sc->l2t = t4_init_l2t(sc);
687 #endif
688 
689 	/*
690 	 * Setup Interrupts.
691 	 */
692 
693 	i = 0;
694 	rc = ddi_intr_alloc(dip, sc->intr_handle, sc->intr_type, 0,
695 	    sc->intr_count, &i, DDI_INTR_ALLOC_STRICT);
696 	if (rc != DDI_SUCCESS) {
697 		cxgb_printf(dip, CE_WARN,
698 		    "failed to allocate %d interrupt(s) of type %d: %d, %d",
699 		    sc->intr_count, sc->intr_type, rc, i);
700 		goto done;
701 	}
702 	ASSERT(sc->intr_count == i); /* allocation was STRICT */
703 	(void) ddi_intr_get_cap(sc->intr_handle[0], &sc->intr_cap);
704 	(void) ddi_intr_get_pri(sc->intr_handle[0], &sc->intr_pri);
705 	if (sc->intr_count == 1) {
706 		ASSERT(sc->flags & INTR_FWD);
707 		(void) ddi_intr_add_handler(sc->intr_handle[0], t4_intr_all, sc,
708 		    &s->fwq);
709 	} else {
710 		/* Multiple interrupts.  The first one is always error intr */
711 		(void) ddi_intr_add_handler(sc->intr_handle[0], t4_intr_err, sc,
712 		    NULL);
713 		irq++;
714 
715 		/* The second one is always the firmware event queue */
716 		(void) ddi_intr_add_handler(sc->intr_handle[1], t4_intr, sc,
717 		    &s->fwq);
718 		irq++;
719 		/*
720 		 * Note that if INTR_FWD is set then either the NIC rx
721 		 * queues or (exclusive or) the TOE rx queueus will be taking
722 		 * direct interrupts.
723 		 *
724 		 * There is no need to check for is_offload(sc) as nofldrxq
725 		 * will be 0 if offload is disabled.
726 		 */
727 		for_each_port(sc, i) {
728 			struct port_info *pi = sc->port[i];
729 			struct sge_rxq *rxq;
730 #ifdef TCP_OFFLOAD_ENABLE
731 			struct sge_ofld_rxq *ofld_rxq;
732 
733 			/*
734 			 * Skip over the NIC queues if they aren't taking direct
735 			 * interrupts.
736 			 */
737 			if ((sc->flags & INTR_FWD) &&
738 			    pi->nofldrxq > pi->nrxq)
739 				goto ofld_queues;
740 #endif
741 			rxq = &s->rxq[pi->first_rxq];
742 			for (q = 0; q < pi->nrxq; q++, rxq++) {
743 				(void) ddi_intr_add_handler(
744 				    sc->intr_handle[irq], t4_intr, sc,
745 				    &rxq->iq);
746 				irq++;
747 			}
748 
749 #ifdef TCP_OFFLOAD_ENABLE
750 			/*
751 			 * Skip over the offload queues if they aren't taking
752 			 * direct interrupts.
753 			 */
754 			if ((sc->flags & INTR_FWD))
755 				continue;
756 ofld_queues:
757 			ofld_rxq = &s->ofld_rxq[pi->first_ofld_rxq];
758 			for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
759 				(void) ddi_intr_add_handler(
760 				    sc->intr_handle[irq], t4_intr, sc,
761 				    &ofld_rxq->iq);
762 				irq++;
763 			}
764 #endif
765 		}
766 
767 	}
768 	sc->flags |= INTR_ALLOCATED;
769 
770 	if ((rc = ksensor_create_scalar_pcidev(dip, SENSOR_KIND_TEMPERATURE,
771 	    &t4_temp_ops, sc, "temp", &sc->temp_sensor)) != 0) {
772 		cxgb_printf(dip, CE_WARN, "failed to create temperature "
773 		    "sensor: %d", rc);
774 		rc = DDI_FAILURE;
775 		goto done;
776 	}
777 
778 	if ((rc = ksensor_create_scalar_pcidev(dip, SENSOR_KIND_VOLTAGE,
779 	    &t4_volt_ops, sc, "vdd", &sc->volt_sensor)) != 0) {
780 		cxgb_printf(dip, CE_WARN, "failed to create voltage "
781 		    "sensor: %d", rc);
782 		rc = DDI_FAILURE;
783 		goto done;
784 	}
785 
786 
787 	if ((rc = ddi_ufm_init(dip, DDI_UFM_CURRENT_VERSION, &t4_ufm_ops,
788 	    &sc->ufm_hdl, sc)) != 0) {
789 		cxgb_printf(dip, CE_WARN, "failed to enable UFM ops: %d", rc);
790 		rc = DDI_FAILURE;
791 		goto done;
792 	}
793 	ddi_ufm_update(sc->ufm_hdl);
794 	ddi_report_dev(dip);
795 
796 	/*
797 	 * Hardware/Firmware/etc. Version/Revision IDs.
798 	 */
799 	t4_dump_version_info(sc);
800 
801 	cxgb_printf(dip, CE_NOTE,
802 		    "(%d rxq, %d txq total) %d %s.",
803 		    rqidx, tqidx, sc->intr_count,
804 		    sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" :
805 		    sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" :
806 		    "fixed interrupt");
807 
808 	sc->ksp = setup_kstats(sc);
809 	sc->ksp_stat = setup_wc_kstats(sc);
810 	sc->params.drv_memwin = MEMWIN_NIC;
811 
812 done:
813 	if (rc != DDI_SUCCESS) {
814 		(void) t4_devo_detach(dip, DDI_DETACH);
815 
816 		/* rc may have errno style errors or DDI errors */
817 		rc = DDI_FAILURE;
818 	}
819 
820 	return (rc);
821 }
822 
823 static int
824 t4_devo_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
825 {
826 	int instance, i;
827 	struct adapter *sc;
828 	struct port_info *pi;
829 	struct sge *s;
830 
831 	if (cmd != DDI_DETACH)
832 		return (DDI_FAILURE);
833 
834 	instance = ddi_get_instance(dip);
835 	sc = ddi_get_soft_state(t4_list, instance);
836 	if (sc == NULL)
837 		return (DDI_SUCCESS);
838 
839 	if (sc->flags & FULL_INIT_DONE) {
840 		t4_intr_disable(sc);
841 		for_each_port(sc, i) {
842 			pi = sc->port[i];
843 			if (pi && pi->flags & PORT_INIT_DONE)
844 				(void) port_full_uninit(pi);
845 		}
846 		(void) adapter_full_uninit(sc);
847 	}
848 
849 	/* Safe to call no matter what */
850 	if (sc->ufm_hdl != NULL) {
851 		ddi_ufm_fini(sc->ufm_hdl);
852 		sc->ufm_hdl = NULL;
853 	}
854 	(void) ksensor_remove(dip, KSENSOR_ALL_IDS);
855 	ddi_prop_remove_all(dip);
856 	ddi_remove_minor_node(dip, NULL);
857 
858 	for (i = 0; i < NCHAN; i++) {
859 		if (sc->tq[i]) {
860 			ddi_taskq_wait(sc->tq[i]);
861 			ddi_taskq_destroy(sc->tq[i]);
862 		}
863 	}
864 
865 	if (sc->ksp != NULL)
866 		kstat_delete(sc->ksp);
867 	if (sc->ksp_stat != NULL)
868 		kstat_delete(sc->ksp_stat);
869 
870 	s = &sc->sge;
871 	if (s->rxq != NULL)
872 		kmem_free(s->rxq, s->nrxq * sizeof (struct sge_rxq));
873 #ifdef TCP_OFFLOAD_ENABLE
874 	if (s->ofld_txq != NULL)
875 		kmem_free(s->ofld_txq, s->nofldtxq * sizeof (struct sge_wrq));
876 	if (s->ofld_rxq != NULL)
877 		kmem_free(s->ofld_rxq,
878 		    s->nofldrxq * sizeof (struct sge_ofld_rxq));
879 	if (s->ctrlq != NULL)
880 		kmem_free(s->ctrlq,
881 		    sc->params.nports * sizeof (struct sge_wrq));
882 #endif
883 	if (s->txq != NULL)
884 		kmem_free(s->txq, s->ntxq * sizeof (struct sge_txq));
885 	if (s->iqmap != NULL)
886 		kmem_free(s->iqmap, s->iqmap_sz * sizeof (struct sge_iq *));
887 	if (s->eqmap != NULL)
888 		kmem_free(s->eqmap, s->eqmap_sz * sizeof (struct sge_eq *));
889 
890 	if (s->rxbuf_cache != NULL)
891 		rxbuf_cache_destroy(s->rxbuf_cache);
892 
893 	if (sc->flags & INTR_ALLOCATED) {
894 		for (i = 0; i < sc->intr_count; i++) {
895 			(void) ddi_intr_remove_handler(sc->intr_handle[i]);
896 			(void) ddi_intr_free(sc->intr_handle[i]);
897 		}
898 		sc->flags &= ~INTR_ALLOCATED;
899 	}
900 
901 	if (sc->intr_handle != NULL) {
902 		kmem_free(sc->intr_handle,
903 		    sc->intr_count * sizeof (*sc->intr_handle));
904 	}
905 
906 	for_each_port(sc, i) {
907 		pi = sc->port[i];
908 		if (pi != NULL) {
909 			mutex_destroy(&pi->lock);
910 			kmem_free(pi, sizeof (*pi));
911 			clrbit(&sc->registered_device_map, i);
912 		}
913 	}
914 
915 	if (sc->flags & FW_OK)
916 		(void) t4_fw_bye(sc, sc->mbox);
917 
918 	if (sc->reg1h != NULL)
919 		ddi_regs_map_free(&sc->reg1h);
920 
921 	if (sc->regh != NULL)
922 		ddi_regs_map_free(&sc->regh);
923 
924 	if (sc->pci_regh != NULL)
925 		pci_config_teardown(&sc->pci_regh);
926 
927 	mutex_enter(&t4_adapter_list_lock);
928 	SLIST_REMOVE(&t4_adapter_list, sc, adapter, link);
929 	mutex_exit(&t4_adapter_list_lock);
930 
931 	mutex_destroy(&sc->mbox_lock);
932 	mutex_destroy(&sc->lock);
933 	cv_destroy(&sc->cv);
934 	mutex_destroy(&sc->sfl_lock);
935 
936 #ifdef DEBUG
937 	bzero(sc, sizeof (*sc));
938 #endif
939 	ddi_soft_state_free(t4_list, instance);
940 
941 	return (DDI_SUCCESS);
942 }
943 
944 static int
945 t4_devo_quiesce(dev_info_t *dip)
946 {
947 	int instance;
948 	struct adapter *sc;
949 
950 	instance = ddi_get_instance(dip);
951 	sc = ddi_get_soft_state(t4_list, instance);
952 	if (sc == NULL)
953 		return (DDI_SUCCESS);
954 
955 	t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
956 	t4_intr_disable(sc);
957 	t4_write_reg(sc, A_PL_RST, F_PIORSTMODE | F_PIORST);
958 
959 	return (DDI_SUCCESS);
960 }
961 
962 static int
963 t4_bus_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op, void *arg,
964     void *result)
965 {
966 	char s[4];
967 	struct port_info *pi;
968 	dev_info_t *child = (dev_info_t *)arg;
969 
970 	switch (op) {
971 	case DDI_CTLOPS_REPORTDEV:
972 		pi = ddi_get_parent_data(rdip);
973 		pi->instance = ddi_get_instance(dip);
974 		pi->child_inst = ddi_get_instance(rdip);
975 		cmn_err(CE_CONT, "?%s%d is port %s on %s%d\n",
976 		    ddi_node_name(rdip), ddi_get_instance(rdip),
977 		    ddi_get_name_addr(rdip), ddi_driver_name(dip),
978 		    ddi_get_instance(dip));
979 		return (DDI_SUCCESS);
980 
981 	case DDI_CTLOPS_INITCHILD:
982 		pi = ddi_get_parent_data(child);
983 		if (pi == NULL)
984 			return (DDI_NOT_WELL_FORMED);
985 		(void) snprintf(s, sizeof (s), "%d", pi->port_id);
986 		ddi_set_name_addr(child, s);
987 		return (DDI_SUCCESS);
988 
989 	case DDI_CTLOPS_UNINITCHILD:
990 		ddi_set_name_addr(child, NULL);
991 		return (DDI_SUCCESS);
992 
993 	case DDI_CTLOPS_ATTACH:
994 	case DDI_CTLOPS_DETACH:
995 		return (DDI_SUCCESS);
996 
997 	default:
998 		return (ddi_ctlops(dip, rdip, op, arg, result));
999 	}
1000 }
1001 
1002 static int
1003 t4_bus_config(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op, void *arg,
1004     dev_info_t **cdipp)
1005 {
1006 	int instance, i;
1007 	struct adapter *sc;
1008 
1009 	instance = ddi_get_instance(dip);
1010 	sc = ddi_get_soft_state(t4_list, instance);
1011 
1012 	if (op == BUS_CONFIG_ONE) {
1013 		char *c;
1014 
1015 		/*
1016 		 * arg is something like "cxgb@0" where 0 is the port_id hanging
1017 		 * off this nexus.
1018 		 */
1019 
1020 		c = arg;
1021 		while (*(c + 1))
1022 			c++;
1023 
1024 		/* There should be exactly 1 digit after '@' */
1025 		if (*(c - 1) != '@')
1026 			return (NDI_FAILURE);
1027 
1028 		i = *c - '0';
1029 
1030 		if (add_child_node(sc, i) != 0)
1031 			return (NDI_FAILURE);
1032 
1033 		flags |= NDI_ONLINE_ATTACH;
1034 
1035 	} else if (op == BUS_CONFIG_ALL || op == BUS_CONFIG_DRIVER) {
1036 		/* Allocate and bind all child device nodes */
1037 		for_each_port(sc, i)
1038 		    (void) add_child_node(sc, i);
1039 		flags |= NDI_ONLINE_ATTACH;
1040 	}
1041 
1042 	return (ndi_busop_bus_config(dip, flags, op, arg, cdipp, 0));
1043 }
1044 
1045 static int
1046 t4_bus_unconfig(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op,
1047     void *arg)
1048 {
1049 	int instance, i, rc;
1050 	struct adapter *sc;
1051 
1052 	instance = ddi_get_instance(dip);
1053 	sc = ddi_get_soft_state(t4_list, instance);
1054 
1055 	if (op == BUS_CONFIG_ONE || op == BUS_UNCONFIG_ALL ||
1056 	    op == BUS_UNCONFIG_DRIVER)
1057 		flags |= NDI_UNCONFIG;
1058 
1059 	rc = ndi_busop_bus_unconfig(dip, flags, op, arg);
1060 	if (rc != 0)
1061 		return (rc);
1062 
1063 	if (op == BUS_UNCONFIG_ONE) {
1064 		char *c;
1065 
1066 		c = arg;
1067 		while (*(c + 1))
1068 			c++;
1069 
1070 		if (*(c - 1) != '@')
1071 			return (NDI_SUCCESS);
1072 
1073 		i = *c - '0';
1074 
1075 		rc = remove_child_node(sc, i);
1076 
1077 	} else if (op == BUS_UNCONFIG_ALL || op == BUS_UNCONFIG_DRIVER) {
1078 
1079 		for_each_port(sc, i)
1080 		    (void) remove_child_node(sc, i);
1081 	}
1082 
1083 	return (rc);
1084 }
1085 
1086 /* ARGSUSED */
1087 static int
1088 t4_cb_open(dev_t *devp, int flag, int otyp, cred_t *credp)
1089 {
1090 	struct adapter *sc;
1091 
1092 	if (otyp != OTYP_CHR)
1093 		return (EINVAL);
1094 
1095 	sc = ddi_get_soft_state(t4_list, getminor(*devp));
1096 	if (sc == NULL)
1097 		return (ENXIO);
1098 
1099 	return (atomic_cas_uint(&sc->open, 0, EBUSY));
1100 }
1101 
1102 /* ARGSUSED */
1103 static int
1104 t4_cb_close(dev_t dev, int flag, int otyp, cred_t *credp)
1105 {
1106 	struct adapter *sc;
1107 
1108 	sc = ddi_get_soft_state(t4_list, getminor(dev));
1109 	if (sc == NULL)
1110 		return (EINVAL);
1111 
1112 	(void) atomic_swap_uint(&sc->open, 0);
1113 	return (0);
1114 }
1115 
1116 /* ARGSUSED */
1117 static int
1118 t4_cb_ioctl(dev_t dev, int cmd, intptr_t d, int mode, cred_t *credp, int *rp)
1119 {
1120 	int instance;
1121 	struct adapter *sc;
1122 	void *data = (void *)d;
1123 
1124 	if (crgetuid(credp) != 0)
1125 		return (EPERM);
1126 
1127 	instance = getminor(dev);
1128 	sc = ddi_get_soft_state(t4_list, instance);
1129 	if (sc == NULL)
1130 		return (EINVAL);
1131 
1132 	return (t4_ioctl(sc, cmd, data, mode));
1133 }
1134 
1135 static unsigned int
1136 getpf(struct adapter *sc)
1137 {
1138 	int rc, *data;
1139 	uint_t n, pf;
1140 
1141 	rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sc->dip,
1142 	    DDI_PROP_DONTPASS, "reg", &data, &n);
1143 	if (rc != DDI_SUCCESS) {
1144 		cxgb_printf(sc->dip, CE_WARN,
1145 		    "failed to lookup \"reg\" property: %d", rc);
1146 		return (0xff);
1147 	}
1148 
1149 	pf = PCI_REG_FUNC_G(data[0]);
1150 	ddi_prop_free(data);
1151 
1152 	return (pf);
1153 }
1154 
1155 /*
1156  * Install a compatible firmware (if required), establish contact with it,
1157  * become the master, and reset the device.
1158  */
1159 static int
1160 prep_firmware(struct adapter *sc)
1161 {
1162 	int rc;
1163 	size_t fw_size;
1164 	int reset = 1;
1165 	enum dev_state state;
1166 	unsigned char *fw_data;
1167 	struct fw_hdr *card_fw, *hdr;
1168 	const char *fw_file = NULL;
1169 	firmware_handle_t fw_hdl;
1170 	struct fw_info fi, *fw_info = &fi;
1171 
1172 	struct driver_properties *p = &sc->props;
1173 
1174 	/* Contact firmware, request master */
1175 	rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MUST, &state);
1176 	if (rc < 0) {
1177 		rc = -rc;
1178 		cxgb_printf(sc->dip, CE_WARN,
1179 		    "failed to connect to the firmware: %d.", rc);
1180 		return (rc);
1181 	}
1182 
1183 	if (rc == sc->mbox)
1184 		sc->flags |= MASTER_PF;
1185 
1186 	/* We may need FW version info for later reporting */
1187 	t4_get_version_info(sc);
1188 
1189 	switch(CHELSIO_CHIP_VERSION(sc->params.chip)) {
1190 	case CHELSIO_T4:
1191 		fw_file = "t4fw.bin";
1192 		break;
1193 	case CHELSIO_T5:
1194 		fw_file = "t5fw.bin";
1195 		break;
1196 	case CHELSIO_T6:
1197 		fw_file = "t6fw.bin";
1198 		break;
1199 	default:
1200 		cxgb_printf(sc->dip, CE_WARN, "Adapter type not supported\n");
1201 		return (EINVAL);
1202 	}
1203 
1204 	if (firmware_open(T4_PORT_NAME, fw_file, &fw_hdl) != 0) {
1205 		cxgb_printf(sc->dip, CE_WARN, "Could not open %s\n", fw_file);
1206 		return (EINVAL);
1207 	}
1208 
1209 	fw_size = firmware_get_size(fw_hdl);
1210 
1211 	if (fw_size < sizeof (struct fw_hdr)) {
1212 		cxgb_printf(sc->dip, CE_WARN, "%s is too small (%ld bytes)\n",
1213 		    fw_file, fw_size);
1214 		firmware_close(fw_hdl);
1215 		return (EINVAL);
1216 	}
1217 
1218 	if (fw_size > FLASH_FW_MAX_SIZE) {
1219 		cxgb_printf(sc->dip, CE_WARN,
1220 		    "%s is too large (%ld bytes, max allowed is %ld)\n",
1221 		    fw_file, fw_size, FLASH_FW_MAX_SIZE);
1222 		firmware_close(fw_hdl);
1223 		return (EFBIG);
1224 	}
1225 
1226 	fw_data = kmem_zalloc(fw_size, KM_SLEEP);
1227 	if (firmware_read(fw_hdl, 0, fw_data, fw_size) != 0) {
1228 		cxgb_printf(sc->dip, CE_WARN, "Failed to read from %s\n",
1229 		    fw_file);
1230 		firmware_close(fw_hdl);
1231 		kmem_free(fw_data, fw_size);
1232 		return (EINVAL);
1233 	}
1234 	firmware_close(fw_hdl);
1235 
1236 	bzero(fw_info, sizeof (*fw_info));
1237 	fw_info->chip = CHELSIO_CHIP_VERSION(sc->params.chip);
1238 
1239 	hdr = (struct fw_hdr *)fw_data;
1240 	fw_info->fw_hdr.fw_ver = hdr->fw_ver;
1241 	fw_info->fw_hdr.chip = hdr->chip;
1242 	fw_info->fw_hdr.intfver_nic = hdr->intfver_nic;
1243 	fw_info->fw_hdr.intfver_vnic = hdr->intfver_vnic;
1244 	fw_info->fw_hdr.intfver_ofld = hdr->intfver_ofld;
1245 	fw_info->fw_hdr.intfver_ri = hdr->intfver_ri;
1246 	fw_info->fw_hdr.intfver_iscsipdu = hdr->intfver_iscsipdu;
1247 	fw_info->fw_hdr.intfver_iscsi = hdr->intfver_iscsi;
1248 	fw_info->fw_hdr.intfver_fcoepdu = hdr->intfver_fcoepdu;
1249 	fw_info->fw_hdr.intfver_fcoe = hdr->intfver_fcoe;
1250 
1251 	/* allocate memory to read the header of the firmware on the card */
1252 	card_fw = kmem_zalloc(sizeof (*card_fw), KM_SLEEP);
1253 
1254 	rc = -t4_prep_fw(sc, fw_info, fw_data, fw_size, card_fw,
1255 	    p->t4_fw_install, state, &reset);
1256 
1257 	kmem_free(card_fw, sizeof (*card_fw));
1258 	kmem_free(fw_data, fw_size);
1259 
1260 	if (rc != 0) {
1261 		cxgb_printf(sc->dip, CE_WARN,
1262 		    "failed to install firmware: %d", rc);
1263 		return (rc);
1264 	} else {
1265 		/* refresh */
1266 		(void) t4_check_fw_version(sc);
1267 	}
1268 
1269 	/* Reset device */
1270 	rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
1271 	if (rc != 0) {
1272 		cxgb_printf(sc->dip, CE_WARN,
1273 		    "firmware reset failed: %d.", rc);
1274 		if (rc != ETIMEDOUT && rc != EIO)
1275 			(void) t4_fw_bye(sc, sc->mbox);
1276 		return (rc);
1277 	}
1278 
1279 	/* Partition adapter resources as specified in the config file. */
1280 	if (sc->flags & MASTER_PF) {
1281 		/* Handle default vs special T4 config file */
1282 
1283 		rc = partition_resources(sc);
1284 		if (rc != 0)
1285 			goto err;	/* error message displayed already */
1286 	}
1287 
1288 	sc->flags |= FW_OK;
1289 	return (0);
1290 err:
1291 	return (rc);
1292 
1293 }
1294 
1295 static const struct memwin t4_memwin[] = {
1296 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1297 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1298 	{ MEMWIN2_BASE, MEMWIN2_APERTURE }
1299 };
1300 
1301 static const struct memwin t5_memwin[] = {
1302 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1303 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1304 	{ MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1305 };
1306 
1307 #define	FW_PARAM_DEV(param) \
1308 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
1309 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
1310 #define	FW_PARAM_PFVF(param) \
1311 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
1312 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
1313 
1314 /*
1315  * Verify that the memory range specified by the memtype/offset/len pair is
1316  * valid and lies entirely within the memtype specified.  The global address of
1317  * the start of the range is returned in addr.
1318  */
1319 int
1320 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1321 	uint32_t *addr)
1322 {
1323 	uint32_t em, addr_len, maddr, mlen;
1324 
1325 	/* Memory can only be accessed in naturally aligned 4 byte units */
1326 	if (off & 3 || len & 3 || len == 0)
1327 		return (EINVAL);
1328 
1329 	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1330 	switch (mtype) {
1331 		case MEM_EDC0:
1332 			if (!(em & F_EDRAM0_ENABLE))
1333 				return (EINVAL);
1334 			addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1335 			maddr = G_EDRAM0_BASE(addr_len) << 20;
1336 			mlen = G_EDRAM0_SIZE(addr_len) << 20;
1337 			break;
1338 		case MEM_EDC1:
1339 			if (!(em & F_EDRAM1_ENABLE))
1340 				return (EINVAL);
1341 			addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1342 			maddr = G_EDRAM1_BASE(addr_len) << 20;
1343 			mlen = G_EDRAM1_SIZE(addr_len) << 20;
1344 			break;
1345 		case MEM_MC:
1346 			if (!(em & F_EXT_MEM_ENABLE))
1347 				return (EINVAL);
1348 			addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1349 			maddr = G_EXT_MEM_BASE(addr_len) << 20;
1350 			mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1351 			break;
1352 		case MEM_MC1:
1353 			if (is_t4(sc->params.chip) || !(em & F_EXT_MEM1_ENABLE))
1354 				return (EINVAL);
1355 			addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1356 			maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1357 			mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1358 			break;
1359 		default:
1360 			return (EINVAL);
1361 	}
1362 
1363 	if (mlen > 0 && off < mlen && off + len <= mlen) {
1364 		*addr = maddr + off;    /* global address */
1365 		return (0);
1366 	}
1367 
1368 	return (EFAULT);
1369 }
1370 
1371 void
1372 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1373 {
1374 	const struct memwin *mw;
1375 
1376 	if (is_t4(sc->params.chip)) {
1377 		mw = &t4_memwin[win];
1378 	} else {
1379 		mw = &t5_memwin[win];
1380 	}
1381 
1382 	if (base != NULL)
1383 		*base = mw->base;
1384 	if (aperture != NULL)
1385 		*aperture = mw->aperture;
1386 }
1387 
1388 /*
1389  * Upload configuration file to card's memory.
1390  */
1391 static int
1392 upload_config_file(struct adapter *sc, uint32_t *mt, uint32_t *ma)
1393 {
1394 	int rc = 0;
1395 	size_t cflen, cfbaselen;
1396 	u_int i, n;
1397 	uint32_t param, val, addr, mtype, maddr;
1398 	uint32_t off, mw_base, mw_aperture;
1399 	uint32_t *cfdata, *cfbase;
1400 	firmware_handle_t fw_hdl;
1401 	const char *cfg_file = NULL;
1402 
1403 	/* Figure out where the firmware wants us to upload it. */
1404 	param = FW_PARAM_DEV(CF);
1405 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
1406 	if (rc != 0) {
1407 		/* Firmwares without config file support will fail this way */
1408 		cxgb_printf(sc->dip, CE_WARN,
1409 		    "failed to query config file location: %d.\n", rc);
1410 		return (rc);
1411 	}
1412 	*mt = mtype = G_FW_PARAMS_PARAM_Y(val);
1413 	*ma = maddr = G_FW_PARAMS_PARAM_Z(val) << 16;
1414 
1415 	switch (CHELSIO_CHIP_VERSION(sc->params.chip)) {
1416 	case CHELSIO_T4:
1417 		cfg_file = "t4fw_cfg.txt";
1418 		break;
1419 	case CHELSIO_T5:
1420 		cfg_file = "t5fw_cfg.txt";
1421 		break;
1422 	case CHELSIO_T6:
1423 		cfg_file = "t6fw_cfg.txt";
1424 		break;
1425 	default:
1426 		cxgb_printf(sc->dip, CE_WARN, "Invalid Adapter detected\n");
1427 		return EINVAL;
1428 	}
1429 
1430 	if (firmware_open(T4_PORT_NAME, cfg_file, &fw_hdl) != 0) {
1431 		cxgb_printf(sc->dip, CE_WARN, "Could not open %s\n", cfg_file);
1432 		return EINVAL;
1433 	}
1434 
1435 	cflen = firmware_get_size(fw_hdl);
1436 	/*
1437 	 * Truncate the length to a multiple of uint32_ts. The configuration
1438 	 * text files have trailing comments (and hopefully always will) so
1439 	 * nothing important is lost.
1440 	 */
1441 	cflen &= ~3;
1442 
1443 	if (cflen > FLASH_CFG_MAX_SIZE) {
1444 		cxgb_printf(sc->dip, CE_WARN,
1445 		    "config file too long (%d, max allowed is %d).  ",
1446 		    cflen, FLASH_CFG_MAX_SIZE);
1447 		firmware_close(fw_hdl);
1448 		return (EFBIG);
1449 	}
1450 
1451 	rc = validate_mt_off_len(sc, mtype, maddr, cflen, &addr);
1452 	if (rc != 0) {
1453 		cxgb_printf(sc->dip, CE_WARN,
1454 		    "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
1455 		    "Will try to use the config on the card, if any.\n",
1456 		    __func__, mtype, maddr, cflen, rc);
1457 		firmware_close(fw_hdl);
1458 		return (EFAULT);
1459 	}
1460 
1461 	cfbaselen = cflen;
1462 	cfbase = cfdata = kmem_zalloc(cflen, KM_SLEEP);
1463 	if (firmware_read(fw_hdl, 0, cfdata, cflen) != 0) {
1464 		cxgb_printf(sc->dip, CE_WARN, "Failed to read from %s\n",
1465 		    cfg_file);
1466 		firmware_close(fw_hdl);
1467 		kmem_free(cfbase, cfbaselen);
1468 		return EINVAL;
1469 	}
1470 	firmware_close(fw_hdl);
1471 
1472 	memwin_info(sc, 2, &mw_base, &mw_aperture);
1473 	while (cflen) {
1474 		off = position_memwin(sc, 2, addr);
1475 		n = min(cflen, mw_aperture - off);
1476 		for (i = 0; i < n; i += 4)
1477 			t4_write_reg(sc, mw_base + off + i, *cfdata++);
1478 		cflen -= n;
1479 		addr += n;
1480 	}
1481 
1482 	kmem_free(cfbase, cfbaselen);
1483 
1484 	return (rc);
1485 }
1486 
1487 /*
1488  * Partition chip resources for use between various PFs, VFs, etc.  This is done
1489  * by uploading the firmware configuration file to the adapter and instructing
1490  * the firmware to process it.
1491  */
1492 static int
1493 partition_resources(struct adapter *sc)
1494 {
1495 	int rc;
1496 	struct fw_caps_config_cmd caps;
1497 	uint32_t mtype, maddr, finicsum, cfcsum;
1498 
1499 	rc = upload_config_file(sc, &mtype, &maddr);
1500 	if (rc != 0) {
1501 		mtype = FW_MEMTYPE_CF_FLASH;
1502 		maddr = t4_flash_cfg_addr(sc);
1503 	}
1504 
1505 	bzero(&caps, sizeof (caps));
1506 	caps.op_to_write = BE_32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1507 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
1508 	caps.cfvalid_to_len16 = BE_32(F_FW_CAPS_CONFIG_CMD_CFVALID |
1509 	    V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
1510 	    V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | FW_LEN16(caps));
1511 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), &caps);
1512 	if (rc != 0) {
1513 		cxgb_printf(sc->dip, CE_WARN,
1514 		    "failed to pre-process config file: %d.\n", rc);
1515 		return (rc);
1516 	}
1517 
1518 	finicsum = ntohl(caps.finicsum);
1519 	cfcsum = ntohl(caps.cfcsum);
1520 	if (finicsum != cfcsum) {
1521 		cxgb_printf(sc->dip, CE_WARN,
1522 		    "WARNING: config file checksum mismatch: %08x %08x\n",
1523 		    finicsum, cfcsum);
1524 	}
1525 	sc->cfcsum = cfcsum;
1526 
1527 	/* TODO: Need to configure this correctly */
1528 	caps.toecaps = htons(FW_CAPS_CONFIG_TOE);
1529 	caps.iscsicaps = 0;
1530 	caps.rdmacaps = 0;
1531 	caps.fcoecaps = 0;
1532 	/* TODO: Disable VNIC cap for now */
1533 	caps.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
1534 
1535 	caps.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1536 	    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
1537 	caps.cfvalid_to_len16 = htonl(FW_LEN16(caps));
1538 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), NULL);
1539 	if (rc != 0) {
1540 		cxgb_printf(sc->dip, CE_WARN,
1541 		    "failed to process config file: %d.\n", rc);
1542 		return (rc);
1543 	}
1544 
1545 	return (0);
1546 }
1547 
1548 /*
1549  * Tweak configuration based on module parameters, etc.  Most of these have
1550  * defaults assigned to them by Firmware Configuration Files (if we're using
1551  * them) but need to be explicitly set if we're using hard-coded
1552  * initialization.  But even in the case of using Firmware Configuration
1553  * Files, we'd like to expose the ability to change these via module
1554  * parameters so these are essentially common tweaks/settings for
1555  * Configuration Files and hard-coded initialization ...
1556  */
1557 static int
1558 adap__pre_init_tweaks(struct adapter *sc)
1559 {
1560 	int rx_dma_offset = 2; /* Offset of RX packets into DMA buffers */
1561 
1562 	/*
1563 	 * Fix up various Host-Dependent Parameters like Page Size, Cache
1564 	 * Line Size, etc.  The firmware default is for a 4KB Page Size and
1565 	 * 64B Cache Line Size ...
1566 	 */
1567 	(void) t4_fixup_host_params_compat(sc, PAGE_SIZE, CACHE_LINE, T5_LAST_REV);
1568 
1569 	t4_set_reg_field(sc, A_SGE_CONTROL,
1570 			 V_PKTSHIFT(M_PKTSHIFT), V_PKTSHIFT(rx_dma_offset));
1571 
1572 	return 0;
1573 }
1574 /*
1575  * Retrieve parameters that are needed (or nice to have) prior to calling
1576  * t4_sge_init and t4_fw_initialize.
1577  */
1578 static int
1579 get_params__pre_init(struct adapter *sc)
1580 {
1581 	int rc;
1582 	uint32_t param[2], val[2];
1583 	struct fw_devlog_cmd cmd;
1584 	struct devlog_params *dlog = &sc->params.devlog;
1585 
1586 	/*
1587 	 * Grab the raw VPD parameters.
1588 	 */
1589 	rc = -t4_get_raw_vpd_params(sc, &sc->params.vpd);
1590 	if (rc != 0) {
1591 		cxgb_printf(sc->dip, CE_WARN,
1592 		    "failed to query VPD parameters (pre_init): %d.\n", rc);
1593 		return (rc);
1594 	}
1595 
1596 	param[0] = FW_PARAM_DEV(PORTVEC);
1597 	param[1] = FW_PARAM_DEV(CCLK);
1598 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
1599 	if (rc != 0) {
1600 		cxgb_printf(sc->dip, CE_WARN,
1601 		    "failed to query parameters (pre_init): %d.\n", rc);
1602 		return (rc);
1603 	}
1604 
1605 	sc->params.portvec = val[0];
1606 	sc->params.nports = 0;
1607 	while (val[0]) {
1608 		sc->params.nports++;
1609 		val[0] &= val[0] - 1;
1610 	}
1611 
1612 	sc->params.vpd.cclk = val[1];
1613 
1614 	/* Read device log parameters. */
1615 	bzero(&cmd, sizeof (cmd));
1616 	cmd.op_to_write = htonl(V_FW_CMD_OP(FW_DEVLOG_CMD) |
1617 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
1618 	cmd.retval_len16 = htonl(FW_LEN16(cmd));
1619 	rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof (cmd), &cmd);
1620 	if (rc != 0) {
1621 		cxgb_printf(sc->dip, CE_WARN,
1622 		    "failed to get devlog parameters: %d.\n", rc);
1623 		bzero(dlog, sizeof (*dlog));
1624 		rc = 0;	/* devlog isn't critical for device operation */
1625 	} else {
1626 		val[0] = ntohl(cmd.memtype_devlog_memaddr16_devlog);
1627 		dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
1628 		dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
1629 		dlog->size = ntohl(cmd.memsize_devlog);
1630 	}
1631 
1632 	return (rc);
1633 }
1634 
1635 /*
1636  * Retrieve various parameters that are of interest to the driver.  The device
1637  * has been initialized by the firmware at this point.
1638  */
1639 static int
1640 get_params__post_init(struct adapter *sc)
1641 {
1642 	int rc;
1643 	uint32_t param[7], val[7];
1644 	struct fw_caps_config_cmd caps;
1645 
1646 	param[0] = FW_PARAM_PFVF(IQFLINT_START);
1647 	param[1] = FW_PARAM_PFVF(EQ_START);
1648 	param[2] = FW_PARAM_PFVF(FILTER_START);
1649 	param[3] = FW_PARAM_PFVF(FILTER_END);
1650 	param[4] = FW_PARAM_PFVF(L2T_START);
1651 	param[5] = FW_PARAM_PFVF(L2T_END);
1652 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
1653 	if (rc != 0) {
1654 		cxgb_printf(sc->dip, CE_WARN,
1655 		    "failed to query parameters (post_init): %d.\n", rc);
1656 		return (rc);
1657 	}
1658 
1659 	/* LINTED: E_ASSIGN_NARROW_CONV */
1660 	sc->sge.iq_start = val[0];
1661 	sc->sge.eq_start = val[1];
1662 	sc->tids.ftid_base = val[2];
1663 	sc->tids.nftids = val[3] - val[2] + 1;
1664 	sc->vres.l2t.start = val[4];
1665 	sc->vres.l2t.size = val[5] - val[4] + 1;
1666 
1667 	param[0] = FW_PARAM_PFVF(IQFLINT_END);
1668 	param[1] = FW_PARAM_PFVF(EQ_END);
1669 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
1670 	if (rc != 0) {
1671 		cxgb_printf(sc->dip, CE_WARN,
1672 			    "failed to query eq/iq map size parameters (post_init): %d.\n",
1673 			    rc);
1674 		return (rc);
1675 	}
1676 
1677 	sc->sge.iqmap_sz = val[0] - sc->sge.iq_start + 1;
1678 	sc->sge.eqmap_sz = val[1] - sc->sge.eq_start + 1;
1679 
1680 	/* get capabilites */
1681 	bzero(&caps, sizeof (caps));
1682 	caps.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1683 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
1684 	caps.cfvalid_to_len16 = htonl(FW_LEN16(caps));
1685 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), &caps);
1686 	if (rc != 0) {
1687 		cxgb_printf(sc->dip, CE_WARN,
1688 		    "failed to get card capabilities: %d.\n", rc);
1689 		return (rc);
1690 	}
1691 
1692 	if (caps.toecaps != 0) {
1693 		/* query offload-related parameters */
1694 		param[0] = FW_PARAM_DEV(NTID);
1695 		param[1] = FW_PARAM_PFVF(SERVER_START);
1696 		param[2] = FW_PARAM_PFVF(SERVER_END);
1697 		param[3] = FW_PARAM_PFVF(TDDP_START);
1698 		param[4] = FW_PARAM_PFVF(TDDP_END);
1699 		param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
1700 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
1701 		if (rc != 0) {
1702 			cxgb_printf(sc->dip, CE_WARN,
1703 			    "failed to query TOE parameters: %d.\n", rc);
1704 			return (rc);
1705 		}
1706 		sc->tids.ntids = val[0];
1707 		sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
1708 		sc->tids.stid_base = val[1];
1709 		sc->tids.nstids = val[2] - val[1] + 1;
1710 		sc->vres.ddp.start = val[3];
1711 		sc->vres.ddp.size = val[4] - val[3] + 1;
1712 		sc->params.ofldq_wr_cred = val[5];
1713 		sc->params.offload = 1;
1714 	}
1715 
1716 	rc = -t4_get_pfres(sc);
1717 	if (rc != 0) {
1718 		cxgb_printf(sc->dip, CE_WARN,
1719 			    "failed to query PF resource params: %d.\n", rc);
1720 		return (rc);
1721 	}
1722 
1723 	/* These are finalized by FW initialization, load their values now */
1724 	val[0] = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
1725 	sc->params.tp.tre = G_TIMERRESOLUTION(val[0]);
1726 	sc->params.tp.dack_re = G_DELAYEDACKRESOLUTION(val[0]);
1727 	t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
1728 
1729 	return (rc);
1730 }
1731 
1732 static int
1733 set_params__post_init(struct adapter *sc)
1734 {
1735 	uint32_t param, val;
1736 
1737 	/* ask for encapsulated CPLs */
1738 	param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
1739 	val = 1;
1740 	(void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
1741 
1742 	return (0);
1743 }
1744 
1745 /* TODO: verify */
1746 static void
1747 setup_memwin(struct adapter *sc)
1748 {
1749 	pci_regspec_t *data;
1750 	int rc;
1751 	uint_t n;
1752 	uintptr_t bar0;
1753 	uintptr_t mem_win0_base, mem_win1_base, mem_win2_base;
1754 	uintptr_t mem_win2_aperture;
1755 
1756 	rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sc->dip,
1757 	    DDI_PROP_DONTPASS, "assigned-addresses", (int **)&data, &n);
1758 	if (rc != DDI_SUCCESS) {
1759 		cxgb_printf(sc->dip, CE_WARN,
1760 		    "failed to lookup \"assigned-addresses\" property: %d", rc);
1761 		return;
1762 	}
1763 	n /= sizeof (*data);
1764 
1765 	bar0 = ((uint64_t)data[0].pci_phys_mid << 32) | data[0].pci_phys_low;
1766 	ddi_prop_free(data);
1767 
1768 	if (is_t4(sc->params.chip)) {
1769 		mem_win0_base = bar0 + MEMWIN0_BASE;
1770 		mem_win1_base = bar0 + MEMWIN1_BASE;
1771 		mem_win2_base = bar0 + MEMWIN2_BASE;
1772 		mem_win2_aperture = MEMWIN2_APERTURE;
1773 	} else {
1774 		/* For T5, only relative offset inside the PCIe BAR is passed */
1775 		mem_win0_base = MEMWIN0_BASE;
1776 		mem_win1_base = MEMWIN1_BASE;
1777 		mem_win2_base = MEMWIN2_BASE_T5;
1778 		mem_win2_aperture = MEMWIN2_APERTURE_T5;
1779 	}
1780 
1781 	t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 0),
1782 	    mem_win0_base | V_BIR(0) |
1783 	    V_WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
1784 
1785 	t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 1),
1786 	    mem_win1_base | V_BIR(0) |
1787 	    V_WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
1788 
1789 	t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2),
1790 	    mem_win2_base | V_BIR(0) |
1791 	    V_WINDOW(ilog2(mem_win2_aperture) - 10));
1792 
1793 	/* flush */
1794 	(void)t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1795 }
1796 
1797 /*
1798  * Positions the memory window such that it can be used to access the specified
1799  * address in the chip's address space.  The return value is the offset of addr
1800  * from the start of the window.
1801  */
1802 uint32_t
1803 position_memwin(struct adapter *sc, int n, uint32_t addr)
1804 {
1805 	uint32_t start, pf;
1806 	uint32_t reg;
1807 
1808 	if (addr & 3) {
1809 		cxgb_printf(sc->dip, CE_WARN,
1810 		    "addr (0x%x) is not at a 4B boundary.\n", addr);
1811 		return (EFAULT);
1812 	}
1813 
1814 	if (is_t4(sc->params.chip)) {
1815 		pf = 0;
1816 		start = addr & ~0xf;    /* start must be 16B aligned */
1817 	} else {
1818 		pf = V_PFNUM(sc->pf);
1819 		start = addr & ~0x7f;   /* start must be 128B aligned */
1820 	}
1821 	reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1822 
1823 	t4_write_reg(sc, reg, start | pf);
1824 	(void) t4_read_reg(sc, reg);
1825 
1826 	return (addr - start);
1827 }
1828 
1829 
1830 /*
1831  * Reads the named property and fills up the "data" array (which has at least
1832  * "count" elements).  We first try and lookup the property for our dev_t and
1833  * then retry with DDI_DEV_T_ANY if it's not found.
1834  *
1835  * Returns non-zero if the property was found and "data" has been updated.
1836  */
1837 static int
1838 prop_lookup_int_array(struct adapter *sc, char *name, int *data, uint_t count)
1839 {
1840 	dev_info_t *dip = sc->dip;
1841 	dev_t dev = sc->dev;
1842 	int rc, *d;
1843 	uint_t i, n;
1844 
1845 	rc = ddi_prop_lookup_int_array(dev, dip, DDI_PROP_DONTPASS,
1846 	    name, &d, &n);
1847 	if (rc == DDI_PROP_SUCCESS)
1848 		goto found;
1849 
1850 	if (rc != DDI_PROP_NOT_FOUND) {
1851 		cxgb_printf(dip, CE_WARN,
1852 		    "failed to lookup property %s for minor %d: %d.",
1853 		    name, getminor(dev), rc);
1854 		return (0);
1855 	}
1856 
1857 	rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1858 	    name, &d, &n);
1859 	if (rc == DDI_PROP_SUCCESS)
1860 		goto found;
1861 
1862 	if (rc != DDI_PROP_NOT_FOUND) {
1863 		cxgb_printf(dip, CE_WARN,
1864 		    "failed to lookup property %s: %d.", name, rc);
1865 		return (0);
1866 	}
1867 
1868 	return (0);
1869 
1870 found:
1871 	if (n > count) {
1872 		cxgb_printf(dip, CE_NOTE,
1873 		    "property %s has too many elements (%d), ignoring extras",
1874 		    name, n);
1875 	}
1876 
1877 	for (i = 0; i < n && i < count; i++)
1878 		data[i] = d[i];
1879 	ddi_prop_free(d);
1880 
1881 	return (1);
1882 }
1883 
1884 static int
1885 prop_lookup_int(struct adapter *sc, char *name, int defval)
1886 {
1887 	int rc;
1888 
1889 	rc = ddi_prop_get_int(sc->dev, sc->dip, DDI_PROP_DONTPASS, name, -1);
1890 	if (rc != -1)
1891 		return (rc);
1892 
1893 	return (ddi_prop_get_int(DDI_DEV_T_ANY, sc->dip, DDI_PROP_DONTPASS,
1894 	    name, defval));
1895 }
1896 
1897 static int
1898 init_driver_props(struct adapter *sc, struct driver_properties *p)
1899 {
1900 	dev_t dev = sc->dev;
1901 	dev_info_t *dip = sc->dip;
1902 	int i, *data;
1903 	uint_t tmr[SGE_NTIMERS] = {5, 10, 20, 50, 100, 200};
1904 	uint_t cnt[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */
1905 
1906 	/*
1907 	 * Holdoff timer
1908 	 */
1909 	data = &p->timer_val[0];
1910 	for (i = 0; i < SGE_NTIMERS; i++)
1911 		data[i] = tmr[i];
1912 	(void) prop_lookup_int_array(sc, "holdoff-timer-values", data,
1913 	    SGE_NTIMERS);
1914 	for (i = 0; i < SGE_NTIMERS; i++) {
1915 		int limit = 200U;
1916 		if (data[i] > limit) {
1917 			cxgb_printf(dip, CE_WARN,
1918 			    "holdoff timer %d is too high (%d), lowered to %d.",
1919 			    i, data[i], limit);
1920 			data[i] = limit;
1921 		}
1922 	}
1923 	(void) ddi_prop_update_int_array(dev, dip, "holdoff-timer-values",
1924 	    data, SGE_NTIMERS);
1925 
1926 	/*
1927 	 * Holdoff packet counter
1928 	 */
1929 	data = &p->counter_val[0];
1930 	for (i = 0; i < SGE_NCOUNTERS; i++)
1931 		data[i] = cnt[i];
1932 	(void) prop_lookup_int_array(sc, "holdoff-pkt-counter-values", data,
1933 	    SGE_NCOUNTERS);
1934 	for (i = 0; i < SGE_NCOUNTERS; i++) {
1935 		int limit = M_THRESHOLD_0;
1936 		if (data[i] > limit) {
1937 			cxgb_printf(dip, CE_WARN,
1938 			    "holdoff pkt-counter %d is too high (%d), "
1939 			    "lowered to %d.", i, data[i], limit);
1940 			data[i] = limit;
1941 		}
1942 	}
1943 	(void) ddi_prop_update_int_array(dev, dip, "holdoff-pkt-counter-values",
1944 	    data, SGE_NCOUNTERS);
1945 
1946 	/*
1947 	 * Maximum # of tx and rx queues to use for each
1948 	 * 100G, 40G, 25G, 10G and 1G port.
1949 	 */
1950 	p->max_ntxq_10g = prop_lookup_int(sc, "max-ntxq-10G-port", 8);
1951 	(void) ddi_prop_update_int(dev, dip, "max-ntxq-10G-port",
1952 	    p->max_ntxq_10g);
1953 
1954 	p->max_nrxq_10g = prop_lookup_int(sc, "max-nrxq-10G-port", 8);
1955 	(void) ddi_prop_update_int(dev, dip, "max-nrxq-10G-port",
1956 	    p->max_nrxq_10g);
1957 
1958 	p->max_ntxq_1g = prop_lookup_int(sc, "max-ntxq-1G-port", 2);
1959 	(void) ddi_prop_update_int(dev, dip, "max-ntxq-1G-port",
1960 	    p->max_ntxq_1g);
1961 
1962 	p->max_nrxq_1g = prop_lookup_int(sc, "max-nrxq-1G-port", 2);
1963 	(void) ddi_prop_update_int(dev, dip, "max-nrxq-1G-port",
1964 	    p->max_nrxq_1g);
1965 
1966 #ifdef TCP_OFFLOAD_ENABLE
1967 	p->max_nofldtxq_10g = prop_lookup_int(sc, "max-nofldtxq-10G-port", 8);
1968 	(void) ddi_prop_update_int(dev, dip, "max-ntxq-10G-port",
1969 	    p->max_nofldtxq_10g);
1970 
1971 	p->max_nofldrxq_10g = prop_lookup_int(sc, "max-nofldrxq-10G-port", 2);
1972 	(void) ddi_prop_update_int(dev, dip, "max-nrxq-10G-port",
1973 	    p->max_nofldrxq_10g);
1974 
1975 	p->max_nofldtxq_1g = prop_lookup_int(sc, "max-nofldtxq-1G-port", 2);
1976 	(void) ddi_prop_update_int(dev, dip, "max-ntxq-1G-port",
1977 	    p->max_nofldtxq_1g);
1978 
1979 	p->max_nofldrxq_1g = prop_lookup_int(sc, "max-nofldrxq-1G-port", 1);
1980 	(void) ddi_prop_update_int(dev, dip, "max-nrxq-1G-port",
1981 	    p->max_nofldrxq_1g);
1982 #endif
1983 
1984 	/*
1985 	 * Holdoff parameters for 10G and 1G ports.
1986 	 */
1987 	p->tmr_idx_10g = prop_lookup_int(sc, "holdoff-timer-idx-10G", 0);
1988 	(void) ddi_prop_update_int(dev, dip, "holdoff-timer-idx-10G",
1989 	    p->tmr_idx_10g);
1990 
1991 	p->pktc_idx_10g = prop_lookup_int(sc, "holdoff-pktc-idx-10G", 2);
1992 	(void) ddi_prop_update_int(dev, dip, "holdoff-pktc-idx-10G",
1993 	    p->pktc_idx_10g);
1994 
1995 	p->tmr_idx_1g = prop_lookup_int(sc, "holdoff-timer-idx-1G", 0);
1996 	(void) ddi_prop_update_int(dev, dip, "holdoff-timer-idx-1G",
1997 	    p->tmr_idx_1g);
1998 
1999 	p->pktc_idx_1g = prop_lookup_int(sc, "holdoff-pktc-idx-1G", 2);
2000 	(void) ddi_prop_update_int(dev, dip, "holdoff-pktc-idx-1G",
2001 	    p->pktc_idx_1g);
2002 
2003 	/*
2004 	 * Size (number of entries) of each tx and rx queue.
2005 	 */
2006 	i = prop_lookup_int(sc, "qsize-txq", TX_EQ_QSIZE);
2007 	p->qsize_txq = max(i, 128);
2008 	if (p->qsize_txq != i) {
2009 		cxgb_printf(dip, CE_WARN,
2010 		    "using %d instead of %d as the tx queue size",
2011 		    p->qsize_txq, i);
2012 	}
2013 	(void) ddi_prop_update_int(dev, dip, "qsize-txq", p->qsize_txq);
2014 
2015 	i = prop_lookup_int(sc, "qsize-rxq", RX_IQ_QSIZE);
2016 	p->qsize_rxq = max(i, 128);
2017 	while (p->qsize_rxq & 7)
2018 		p->qsize_rxq--;
2019 	if (p->qsize_rxq != i) {
2020 		cxgb_printf(dip, CE_WARN,
2021 		    "using %d instead of %d as the rx queue size",
2022 		    p->qsize_rxq, i);
2023 	}
2024 	(void) ddi_prop_update_int(dev, dip, "qsize-rxq", p->qsize_rxq);
2025 
2026 	/*
2027 	 * Interrupt types allowed.
2028 	 * Bits 0, 1, 2 = INTx, MSI, MSI-X respectively.  See sys/ddi_intr.h
2029 	 */
2030 	p->intr_types = prop_lookup_int(sc, "interrupt-types",
2031 	    DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_FIXED);
2032 	(void) ddi_prop_update_int(dev, dip, "interrupt-types", p->intr_types);
2033 
2034 	/*
2035 	 * Forwarded interrupt queues.  Create this property to force the driver
2036 	 * to use forwarded interrupt queues.
2037 	 */
2038 	if (ddi_prop_exists(dev, dip, DDI_PROP_DONTPASS,
2039 	    "interrupt-forwarding") != 0 ||
2040 	    ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
2041 	    "interrupt-forwarding") != 0) {
2042 		UNIMPLEMENTED();
2043 		(void) ddi_prop_create(dev, dip, DDI_PROP_CANSLEEP,
2044 		    "interrupt-forwarding", NULL, 0);
2045 	}
2046 
2047 	/*
2048 	 * Write combining
2049 	 * 0 to disable, 1 to enable
2050 	 */
2051 	p->wc = prop_lookup_int(sc, "write-combine", 1);
2052 	cxgb_printf(dip, CE_WARN, "write-combine: using of %d", p->wc);
2053 	if (p->wc != 0 && p->wc != 1) {
2054 		cxgb_printf(dip, CE_WARN,
2055 		    "write-combine: using 1 instead of %d", p->wc);
2056 		p->wc = 1;
2057 	}
2058 	(void) ddi_prop_update_int(dev, dip, "write-combine", p->wc);
2059 
2060 	p->t4_fw_install = prop_lookup_int(sc, "t4_fw_install", 1);
2061 	if (p->t4_fw_install != 0 && p->t4_fw_install != 2)
2062 		p->t4_fw_install = 1;
2063 	(void) ddi_prop_update_int(dev, dip, "t4_fw_install", p->t4_fw_install);
2064 
2065 	/* Multiple Rings */
2066 	p->multi_rings = prop_lookup_int(sc, "multi-rings", 1);
2067 	if (p->multi_rings != 0 && p->multi_rings != 1) {
2068 		cxgb_printf(dip, CE_NOTE,
2069 			   "multi-rings: using value 1 instead of %d", p->multi_rings);
2070 		p->multi_rings = 1;
2071 	}
2072 
2073 	(void) ddi_prop_update_int(dev, dip, "multi-rings", p->multi_rings);
2074 
2075 	return (0);
2076 }
2077 
2078 static int
2079 remove_extra_props(struct adapter *sc, int n10g, int n1g)
2080 {
2081 	if (n10g == 0) {
2082 		(void) ddi_prop_remove(sc->dev, sc->dip, "max-ntxq-10G-port");
2083 		(void) ddi_prop_remove(sc->dev, sc->dip, "max-nrxq-10G-port");
2084 		(void) ddi_prop_remove(sc->dev, sc->dip,
2085 		    "holdoff-timer-idx-10G");
2086 		(void) ddi_prop_remove(sc->dev, sc->dip,
2087 		    "holdoff-pktc-idx-10G");
2088 	}
2089 
2090 	if (n1g == 0) {
2091 		(void) ddi_prop_remove(sc->dev, sc->dip, "max-ntxq-1G-port");
2092 		(void) ddi_prop_remove(sc->dev, sc->dip, "max-nrxq-1G-port");
2093 		(void) ddi_prop_remove(sc->dev, sc->dip,
2094 		    "holdoff-timer-idx-1G");
2095 		(void) ddi_prop_remove(sc->dev, sc->dip, "holdoff-pktc-idx-1G");
2096 	}
2097 
2098 	return (0);
2099 }
2100 
2101 static int
2102 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
2103     struct intrs_and_queues *iaq)
2104 {
2105 	struct driver_properties *p = &sc->props;
2106 	int rc, itype, itypes, navail, nc, n;
2107 	int pfres_rxq, pfres_txq, pfresq;
2108 
2109 	bzero(iaq, sizeof (*iaq));
2110 	nc = ncpus;	/* our snapshot of the number of CPUs */
2111 	iaq->ntxq10g = min(nc, p->max_ntxq_10g);
2112 	iaq->ntxq1g = min(nc, p->max_ntxq_1g);
2113 	iaq->nrxq10g = min(nc, p->max_nrxq_10g);
2114 	iaq->nrxq1g = min(nc, p->max_nrxq_1g);
2115 #ifdef TCP_OFFLOAD_ENABLE
2116 	iaq->nofldtxq10g = min(nc, p->max_nofldtxq_10g);
2117 	iaq->nofldtxq1g = min(nc, p->max_nofldtxq_1g);
2118 	iaq->nofldrxq10g = min(nc, p->max_nofldrxq_10g);
2119 	iaq->nofldrxq1g = min(nc, p->max_nofldrxq_1g);
2120 #endif
2121 
2122 	pfres_rxq = iaq->nrxq10g * n10g + iaq->nrxq1g * n1g;
2123 	pfres_txq = iaq->ntxq10g * n10g + iaq->ntxq1g * n1g;
2124 #ifdef TCP_OFFLOAD_ENABLE
2125 	pfres_rxq += iaq->nofldrxq10g * n10g + iaq->nofldrxq1g * n1g;
2126 	pfres_txq += iaq->nofldtxq10g * n10g + iaq->nofldtxq1g * n1g;
2127 #endif
2128 
2129 	/* If current configuration of max number of Rxqs and Txqs exceed
2130 	 * the max available for all the ports under this PF, then shrink
2131 	 * the queues to max available. Reduce them in a way that each
2132 	 * port under this PF has equally distributed number of queues.
2133 	 * Must guarantee at least 1 queue for each port for both NIC
2134 	 * and Offload queues.
2135 	 *
2136 	 * neq - fixed max number of Egress queues on Tx path and Free List
2137 	 * queues that hold Rx payload data on Rx path. Half are reserved
2138 	 * for Egress queues and the other half for Free List queues.
2139 	 * Hence, the division by 2.
2140 	 *
2141 	 * niqflint - max number of Ingress queues with interrupts on Rx
2142 	 * path to receive completions that indicate Rx payload has been
2143 	 * posted in its associated Free List queue. Also handles Tx
2144 	 * completions for packets successfully transmitted on Tx path.
2145 	 *
2146 	 * nethctrl - max number of Egress queues only for Tx path. This
2147 	 * number is usually half of neq. However, if it became less than
2148 	 * neq due to lack of resources based on firmware configuration,
2149 	 * then take the lower value.
2150 	 */
2151 	while (pfres_rxq >
2152 	       min(sc->params.pfres.neq / 2, sc->params.pfres.niqflint)) {
2153 		pfresq = pfres_rxq;
2154 
2155 		if (iaq->nrxq10g > 1) {
2156 			iaq->nrxq10g--;
2157 			pfres_rxq -= n10g;
2158 		}
2159 
2160 		if (iaq->nrxq1g > 1) {
2161 			iaq->nrxq1g--;
2162 			pfres_rxq -= n1g;
2163 		}
2164 
2165 #ifdef TCP_OFFLOAD_ENABLE
2166 		if (iaq->nofldrxq10g > 1) {
2167 			iaq->nofldrxq10g--;
2168 			pfres_rxq -= n10g;
2169 		}
2170 
2171 		if (iaq->nofldrxq1g > 1) {
2172 			iaq->nofldrxq1g--;
2173 			pfres_rxq -= n1g;
2174 		}
2175 #endif
2176 
2177 		/* Break if nothing changed */
2178 		if (pfresq == pfres_rxq)
2179 			break;
2180 	}
2181 
2182 	while (pfres_txq >
2183 	       min(sc->params.pfres.neq / 2, sc->params.pfres.nethctrl)) {
2184 		pfresq = pfres_txq;
2185 
2186 		if (iaq->ntxq10g > 1) {
2187 			iaq->ntxq10g--;
2188 			pfres_txq -= n10g;
2189 		}
2190 
2191 		if (iaq->ntxq1g > 1) {
2192 			iaq->ntxq1g--;
2193 			pfres_txq -= n1g;
2194 		}
2195 
2196 #ifdef TCP_OFFLOAD_ENABLE
2197 		if (iaq->nofldtxq10g > 1) {
2198 			iaq->nofldtxq10g--;
2199 			pfres_txq -= n10g;
2200 		}
2201 
2202 		if (iaq->nofldtxq1g > 1) {
2203 			iaq->nofldtxq1g--;
2204 			pfres_txq -= n1g;
2205 		}
2206 #endif
2207 
2208 		/* Break if nothing changed */
2209 		if (pfresq == pfres_txq)
2210 			break;
2211 	}
2212 
2213 	rc = ddi_intr_get_supported_types(sc->dip, &itypes);
2214 	if (rc != DDI_SUCCESS) {
2215 		cxgb_printf(sc->dip, CE_WARN,
2216 		    "failed to determine supported interrupt types: %d", rc);
2217 		return (rc);
2218 	}
2219 
2220 	for (itype = DDI_INTR_TYPE_MSIX; itype; itype >>= 1) {
2221 		ASSERT(itype == DDI_INTR_TYPE_MSIX ||
2222 		    itype == DDI_INTR_TYPE_MSI ||
2223 		    itype == DDI_INTR_TYPE_FIXED);
2224 
2225 		if ((itype & itypes & p->intr_types) == 0)
2226 			continue;	/* not supported or not allowed */
2227 
2228 		navail = 0;
2229 		rc = ddi_intr_get_navail(sc->dip, itype, &navail);
2230 		if (rc != DDI_SUCCESS || navail == 0) {
2231 			cxgb_printf(sc->dip, CE_WARN,
2232 			    "failed to get # of interrupts for type %d: %d",
2233 			    itype, rc);
2234 			continue;	/* carry on */
2235 		}
2236 
2237 		iaq->intr_type = itype;
2238 		if (navail == 0)
2239 			continue;
2240 
2241 		/*
2242 		 * Best option: an interrupt vector for errors, one for the
2243 		 * firmware event queue, and one each for each rxq (NIC as well
2244 		 * as offload).
2245 		 */
2246 		iaq->nirq = T4_EXTRA_INTR;
2247 		iaq->nirq += n10g * iaq->nrxq10g;
2248 		iaq->nirq += n1g * iaq->nrxq1g;
2249 #ifdef TCP_OFFLOAD_ENABLE
2250 		iaq->nirq += n10g * iaq->nofldrxq10g;
2251 		iaq->nirq += n1g * iaq->nofldrxq1g;
2252 #endif
2253 
2254 		if (iaq->nirq <= navail &&
2255 		    (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq))) {
2256 			iaq->intr_fwd = 0;
2257 			goto allocate;
2258 		}
2259 
2260 		/*
2261 		 * Second best option: an interrupt vector for errors, one for
2262 		 * the firmware event queue, and one each for either NIC or
2263 		 * offload rxq's.
2264 		 */
2265 		iaq->nirq = T4_EXTRA_INTR;
2266 #ifdef TCP_OFFLOAD_ENABLE
2267 		iaq->nirq += n10g * max(iaq->nrxq10g, iaq->nofldrxq10g);
2268 		iaq->nirq += n1g * max(iaq->nrxq1g, iaq->nofldrxq1g);
2269 #else
2270 		iaq->nirq += n10g * iaq->nrxq10g;
2271 		iaq->nirq += n1g * iaq->nrxq1g;
2272 #endif
2273 		if (iaq->nirq <= navail &&
2274 		    (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq))) {
2275 			iaq->intr_fwd = 1;
2276 			goto allocate;
2277 		}
2278 
2279 		/*
2280 		 * Next best option: an interrupt vector for errors, one for the
2281 		 * firmware event queue, and at least one per port.  At this
2282 		 * point we know we'll have to downsize nrxq or nofldrxq to fit
2283 		 * what's available to us.
2284 		 */
2285 		iaq->nirq = T4_EXTRA_INTR;
2286 		iaq->nirq += n10g + n1g;
2287 		if (iaq->nirq <= navail) {
2288 			int leftover = navail - iaq->nirq;
2289 
2290 			if (n10g > 0) {
2291 				int target = iaq->nrxq10g;
2292 
2293 #ifdef TCP_OFFLOAD_ENABLE
2294 				target = max(target, iaq->nofldrxq10g);
2295 #endif
2296 				n = 1;
2297 				while (n < target && leftover >= n10g) {
2298 					leftover -= n10g;
2299 					iaq->nirq += n10g;
2300 					n++;
2301 				}
2302 				iaq->nrxq10g = min(n, iaq->nrxq10g);
2303 #ifdef TCP_OFFLOAD_ENABLE
2304 				iaq->nofldrxq10g = min(n, iaq->nofldrxq10g);
2305 #endif
2306 			}
2307 
2308 			if (n1g > 0) {
2309 				int target = iaq->nrxq1g;
2310 
2311 #ifdef TCP_OFFLOAD_ENABLE
2312 				target = max(target, iaq->nofldrxq1g);
2313 #endif
2314 				n = 1;
2315 				while (n < target && leftover >= n1g) {
2316 					leftover -= n1g;
2317 					iaq->nirq += n1g;
2318 					n++;
2319 				}
2320 				iaq->nrxq1g = min(n, iaq->nrxq1g);
2321 #ifdef TCP_OFFLOAD_ENABLE
2322 				iaq->nofldrxq1g = min(n, iaq->nofldrxq1g);
2323 #endif
2324 			}
2325 
2326 			/* We have arrived at a minimum value required to enable
2327 			 * per queue irq(either NIC or offload). Thus for non-
2328 			 * offload case, we will get a vector per queue, while
2329 			 * offload case, we will get a vector per offload/NIC q.
2330 			 * Hence enable Interrupt forwarding only for offload
2331 			 * case.
2332 			 */
2333 #ifdef TCP_OFFLOAD_ENABLE
2334 			if (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq)) {
2335 				iaq->intr_fwd = 1;
2336 #else
2337 			if (itype != DDI_INTR_TYPE_MSI) {
2338 #endif
2339 				goto allocate;
2340 			}
2341 		}
2342 
2343 		/*
2344 		 * Least desirable option: one interrupt vector for everything.
2345 		 */
2346 		iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
2347 #ifdef TCP_OFFLOAD_ENABLE
2348 		iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
2349 #endif
2350 		iaq->intr_fwd = 1;
2351 
2352 allocate:
2353 		return (0);
2354 	}
2355 
2356 	cxgb_printf(sc->dip, CE_WARN,
2357 	    "failed to find a usable interrupt type.  supported=%d, allowed=%d",
2358 	    itypes, p->intr_types);
2359 	return (DDI_FAILURE);
2360 }
2361 
2362 static int
2363 add_child_node(struct adapter *sc, int idx)
2364 {
2365 	int rc;
2366 	struct port_info *pi;
2367 
2368 	if (idx < 0 || idx >= sc->params.nports)
2369 		return (EINVAL);
2370 
2371 	pi = sc->port[idx];
2372 	if (pi == NULL)
2373 		return (ENODEV);	/* t4_port_init failed earlier */
2374 
2375 	PORT_LOCK(pi);
2376 	if (pi->dip != NULL) {
2377 		rc = 0;		/* EEXIST really, but then bus_config fails */
2378 		goto done;
2379 	}
2380 
2381 	rc = ndi_devi_alloc(sc->dip, T4_PORT_NAME, DEVI_SID_NODEID, &pi->dip);
2382 	if (rc != DDI_SUCCESS || pi->dip == NULL) {
2383 		rc = ENOMEM;
2384 		goto done;
2385 	}
2386 
2387 	(void) ddi_set_parent_data(pi->dip, pi);
2388 	(void) ndi_devi_bind_driver(pi->dip, 0);
2389 	rc = 0;
2390 done:
2391 	PORT_UNLOCK(pi);
2392 	return (rc);
2393 }
2394 
2395 static int
2396 remove_child_node(struct adapter *sc, int idx)
2397 {
2398 	int rc;
2399 	struct port_info *pi;
2400 
2401 	if (idx < 0 || idx >= sc->params.nports)
2402 		return (EINVAL);
2403 
2404 	pi = sc->port[idx];
2405 	if (pi == NULL)
2406 		return (ENODEV);
2407 
2408 	PORT_LOCK(pi);
2409 	if (pi->dip == NULL) {
2410 		rc = ENODEV;
2411 		goto done;
2412 	}
2413 
2414 	rc = ndi_devi_free(pi->dip);
2415 	if (rc == 0)
2416 		pi->dip = NULL;
2417 done:
2418 	PORT_UNLOCK(pi);
2419 	return (rc);
2420 }
2421 
2422 static char *
2423 print_port_speed(const struct port_info *pi)
2424 {
2425 	if (!pi)
2426 		return "-";
2427 
2428 	if (is_100G_port(pi))
2429 		return "100G";
2430 	else if (is_50G_port(pi))
2431 		return "50G";
2432 	else if (is_40G_port(pi))
2433 		return "40G";
2434 	else if (is_25G_port(pi))
2435 		return "25G";
2436 	else if (is_10G_port(pi))
2437 		return "10G";
2438 	else
2439 		return "1G";
2440 }
2441 
2442 #define	KS_UINIT(x)	kstat_named_init(&kstatp->x, #x, KSTAT_DATA_ULONG)
2443 #define	KS_CINIT(x)	kstat_named_init(&kstatp->x, #x, KSTAT_DATA_CHAR)
2444 #define	KS_U_SET(x, y)	kstatp->x.value.ul = (y)
2445 #define	KS_C_SET(x, ...)	\
2446 			(void) snprintf(kstatp->x.value.c, 16,  __VA_ARGS__)
2447 
2448 /*
2449  * t4nex:X:config
2450  */
2451 struct t4_kstats {
2452 	kstat_named_t chip_ver;
2453 	kstat_named_t fw_vers;
2454 	kstat_named_t tp_vers;
2455 	kstat_named_t driver_version;
2456 	kstat_named_t serial_number;
2457 	kstat_named_t ec_level;
2458 	kstat_named_t id;
2459 	kstat_named_t bus_type;
2460 	kstat_named_t bus_width;
2461 	kstat_named_t bus_speed;
2462 	kstat_named_t core_clock;
2463 	kstat_named_t port_cnt;
2464 	kstat_named_t port_type;
2465 	kstat_named_t pci_vendor_id;
2466 	kstat_named_t pci_device_id;
2467 };
2468 static kstat_t *
2469 setup_kstats(struct adapter *sc)
2470 {
2471 	kstat_t *ksp;
2472 	struct t4_kstats *kstatp;
2473 	int ndata;
2474 	struct pci_params *p = &sc->params.pci;
2475 	struct vpd_params *v = &sc->params.vpd;
2476 	uint16_t pci_vendor, pci_device;
2477 
2478 	ndata = sizeof (struct t4_kstats) / sizeof (kstat_named_t);
2479 
2480 	ksp = kstat_create(T4_NEXUS_NAME, ddi_get_instance(sc->dip), "config",
2481 	    "nexus", KSTAT_TYPE_NAMED, ndata, 0);
2482 	if (ksp == NULL) {
2483 		cxgb_printf(sc->dip, CE_WARN, "failed to initialize kstats.");
2484 		return (NULL);
2485 	}
2486 
2487 	kstatp = (struct t4_kstats *)ksp->ks_data;
2488 
2489 	KS_UINIT(chip_ver);
2490 	KS_CINIT(fw_vers);
2491 	KS_CINIT(tp_vers);
2492 	KS_CINIT(driver_version);
2493 	KS_CINIT(serial_number);
2494 	KS_CINIT(ec_level);
2495 	KS_CINIT(id);
2496 	KS_CINIT(bus_type);
2497 	KS_CINIT(bus_width);
2498 	KS_CINIT(bus_speed);
2499 	KS_UINIT(core_clock);
2500 	KS_UINIT(port_cnt);
2501 	KS_CINIT(port_type);
2502 	KS_CINIT(pci_vendor_id);
2503 	KS_CINIT(pci_device_id);
2504 
2505 	KS_U_SET(chip_ver, sc->params.chip);
2506 	KS_C_SET(fw_vers, "%d.%d.%d.%d",
2507 	    G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2508 	    G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2509 	    G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2510 	    G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2511 	KS_C_SET(tp_vers, "%d.%d.%d.%d",
2512 	    G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers),
2513 	    G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers),
2514 	    G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers),
2515 	    G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers));
2516 	KS_C_SET(driver_version, DRV_VERSION);
2517 	KS_C_SET(serial_number, "%s", v->sn);
2518 	KS_C_SET(ec_level, "%s", v->ec);
2519 	KS_C_SET(id, "%s", v->id);
2520 	KS_C_SET(bus_type, "pci-express");
2521 	KS_C_SET(bus_width, "x%d lanes", p->width);
2522 	KS_C_SET(bus_speed, "%d", p->speed);
2523 	KS_U_SET(core_clock, v->cclk);
2524 	KS_U_SET(port_cnt, sc->params.nports);
2525 
2526 	t4_os_pci_read_cfg2(sc, PCI_CONF_VENID, &pci_vendor);
2527 	KS_C_SET(pci_vendor_id, "0x%x", pci_vendor);
2528 
2529 	t4_os_pci_read_cfg2(sc, PCI_CONF_DEVID, &pci_device);
2530 	KS_C_SET(pci_device_id, "0x%x", pci_device);
2531 
2532 	KS_C_SET(port_type, "%s/%s/%s/%s",
2533 		 print_port_speed(sc->port[0]),
2534 		 print_port_speed(sc->port[1]),
2535 		 print_port_speed(sc->port[2]),
2536 		 print_port_speed(sc->port[3]));
2537 
2538 	/* Do NOT set ksp->ks_update.  These kstats do not change. */
2539 
2540 	/* Install the kstat */
2541 	ksp->ks_private = (void *)sc;
2542 	kstat_install(ksp);
2543 
2544 	return (ksp);
2545 }
2546 
2547 /*
2548  * t4nex:X:stat
2549  */
2550 struct t4_wc_kstats {
2551 	kstat_named_t write_coal_success;
2552 	kstat_named_t write_coal_failure;
2553 };
2554 static kstat_t *
2555 setup_wc_kstats(struct adapter *sc)
2556 {
2557 	kstat_t *ksp;
2558 	struct t4_wc_kstats *kstatp;
2559 	int ndata;
2560 
2561 	ndata = sizeof(struct t4_wc_kstats) / sizeof(kstat_named_t);
2562 	ksp = kstat_create(T4_NEXUS_NAME, ddi_get_instance(sc->dip), "stats",
2563 	    "nexus", KSTAT_TYPE_NAMED, ndata, 0);
2564 	if (ksp == NULL) {
2565 		cxgb_printf(sc->dip, CE_WARN, "failed to initialize kstats.");
2566 		return (NULL);
2567 	}
2568 
2569 	kstatp = (struct t4_wc_kstats *)ksp->ks_data;
2570 
2571 	KS_UINIT(write_coal_success);
2572 	KS_UINIT(write_coal_failure);
2573 
2574 	ksp->ks_update = update_wc_kstats;
2575 	/* Install the kstat */
2576 	ksp->ks_private = (void *)sc;
2577 	kstat_install(ksp);
2578 
2579 	return (ksp);
2580 }
2581 
2582 static int
2583 update_wc_kstats(kstat_t *ksp, int rw)
2584 {
2585 	struct t4_wc_kstats *kstatp = (struct t4_wc_kstats *)ksp->ks_data;
2586 	struct adapter *sc = ksp->ks_private;
2587 	uint32_t wc_total, wc_success, wc_failure;
2588 
2589 	if (rw == KSTAT_WRITE)
2590 		return (0);
2591 
2592 	if (is_t5(sc->params.chip)) {
2593 		wc_total = t4_read_reg(sc, A_SGE_STAT_TOTAL);
2594 		wc_failure = t4_read_reg(sc, A_SGE_STAT_MATCH);
2595 		wc_success = wc_total - wc_failure;
2596 	} else {
2597 		wc_success = 0;
2598 		wc_failure = 0;
2599 	}
2600 
2601 	KS_U_SET(write_coal_success, wc_success);
2602 	KS_U_SET(write_coal_failure, wc_failure);
2603 
2604 	return (0);
2605 }
2606 
2607 int
2608 adapter_full_init(struct adapter *sc)
2609 {
2610 	int i, rc = 0;
2611 
2612 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2613 
2614 	rc = t4_setup_adapter_queues(sc);
2615 	if (rc != 0)
2616 		goto done;
2617 
2618 	if (sc->intr_cap & DDI_INTR_FLAG_BLOCK)
2619 		(void) ddi_intr_block_enable(sc->intr_handle, sc->intr_count);
2620 	else {
2621 		for (i = 0; i < sc->intr_count; i++)
2622 			(void) ddi_intr_enable(sc->intr_handle[i]);
2623 	}
2624 	t4_intr_enable(sc);
2625 	sc->flags |= FULL_INIT_DONE;
2626 
2627 #ifdef TCP_OFFLOAD_ENABLE
2628 	/* TODO: wrong place to enable TOE capability */
2629 	if (is_offload(sc) != 0) {
2630 		for_each_port(sc, i) {
2631 			struct port_info *pi = sc->port[i];
2632 			rc = toe_capability(pi, 1);
2633 			if (rc != 0) {
2634 				cxgb_printf(pi->dip, CE_WARN,
2635 				    "Failed to activate toe capability: %d",
2636 				    rc);
2637 				rc = 0;		/* not a fatal error */
2638 			}
2639 		}
2640 	}
2641 #endif
2642 
2643 done:
2644 	if (rc != 0)
2645 		(void) adapter_full_uninit(sc);
2646 
2647 	return (rc);
2648 }
2649 
2650 int
2651 adapter_full_uninit(struct adapter *sc)
2652 {
2653 	int i, rc = 0;
2654 
2655 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2656 
2657 	if (sc->intr_cap & DDI_INTR_FLAG_BLOCK)
2658 		(void) ddi_intr_block_disable(sc->intr_handle, sc->intr_count);
2659 	else {
2660 		for (i = 0; i < sc->intr_count; i++)
2661 			(void) ddi_intr_disable(sc->intr_handle[i]);
2662 	}
2663 
2664 	rc = t4_teardown_adapter_queues(sc);
2665 	if (rc != 0)
2666 		return (rc);
2667 
2668 	sc->flags &= ~FULL_INIT_DONE;
2669 
2670 	return (0);
2671 }
2672 
2673 int
2674 port_full_init(struct port_info *pi)
2675 {
2676 	struct adapter *sc = pi->adapter;
2677 	uint16_t *rss;
2678 	struct sge_rxq *rxq;
2679 	int rc, i;
2680 
2681 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2682 	ASSERT((pi->flags & PORT_INIT_DONE) == 0);
2683 
2684 	/*
2685 	 * Allocate tx/rx/fl queues for this port.
2686 	 */
2687 	rc = t4_setup_port_queues(pi);
2688 	if (rc != 0)
2689 		goto done;	/* error message displayed already */
2690 
2691 	/*
2692 	 * Setup RSS for this port.
2693 	 */
2694 	rss = kmem_zalloc(pi->nrxq * sizeof (*rss), KM_SLEEP);
2695 	for_each_rxq(pi, i, rxq) {
2696 		rss[i] = rxq->iq.abs_id;
2697 	}
2698 	rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0,
2699 	    pi->rss_size, rss, pi->nrxq);
2700 	kmem_free(rss, pi->nrxq * sizeof (*rss));
2701 	if (rc != 0) {
2702 		cxgb_printf(pi->dip, CE_WARN, "rss_config failed: %d", rc);
2703 		goto done;
2704 	}
2705 
2706 	pi->flags |= PORT_INIT_DONE;
2707 done:
2708 	if (rc != 0)
2709 		(void) port_full_uninit(pi);
2710 
2711 	return (rc);
2712 }
2713 
2714 /*
2715  * Idempotent.
2716  */
2717 int
2718 port_full_uninit(struct port_info *pi)
2719 {
2720 
2721 	ASSERT(pi->flags & PORT_INIT_DONE);
2722 
2723 	(void) t4_teardown_port_queues(pi);
2724 	pi->flags &= ~PORT_INIT_DONE;
2725 
2726 	return (0);
2727 }
2728 
2729 void
2730 enable_port_queues(struct port_info *pi)
2731 {
2732 	struct adapter *sc = pi->adapter;
2733 	int i;
2734 	struct sge_iq *iq;
2735 	struct sge_rxq *rxq;
2736 #ifdef TCP_OFFLOAD_ENABLE
2737 	struct sge_ofld_rxq *ofld_rxq;
2738 #endif
2739 
2740 	ASSERT(pi->flags & PORT_INIT_DONE);
2741 
2742 	/*
2743 	 * TODO: whatever was queued up after we set iq->state to IQS_DISABLED
2744 	 * back in disable_port_queues will be processed now, after an unbounded
2745 	 * delay.  This can't be good.
2746 	 */
2747 
2748 #ifdef TCP_OFFLOAD_ENABLE
2749 	for_each_ofld_rxq(pi, i, ofld_rxq) {
2750 		iq = &ofld_rxq->iq;
2751 		if (atomic_cas_uint(&iq->state, IQS_DISABLED, IQS_IDLE) !=
2752 		    IQS_DISABLED)
2753 			panic("%s: iq %p wasn't disabled", __func__,
2754 			    (void *)iq);
2755 		t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
2756 		    V_SEINTARM(iq->intr_params) | V_INGRESSQID(iq->cntxt_id));
2757 	}
2758 #endif
2759 
2760 	for_each_rxq(pi, i, rxq) {
2761 		iq = &rxq->iq;
2762 		if (atomic_cas_uint(&iq->state, IQS_DISABLED, IQS_IDLE) !=
2763 		    IQS_DISABLED)
2764 			panic("%s: iq %p wasn't disabled", __func__,
2765 			    (void *) iq);
2766 		t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
2767 		    V_SEINTARM(iq->intr_params) | V_INGRESSQID(iq->cntxt_id));
2768 	}
2769 }
2770 
2771 void
2772 disable_port_queues(struct port_info *pi)
2773 {
2774 	int i;
2775 	struct adapter *sc = pi->adapter;
2776 	struct sge_rxq *rxq;
2777 #ifdef TCP_OFFLOAD_ENABLE
2778 	struct sge_ofld_rxq *ofld_rxq;
2779 #endif
2780 
2781 	ASSERT(pi->flags & PORT_INIT_DONE);
2782 
2783 	/*
2784 	 * TODO: need proper implementation for all tx queues (ctrl, eth, ofld).
2785 	 */
2786 
2787 #ifdef TCP_OFFLOAD_ENABLE
2788 	for_each_ofld_rxq(pi, i, ofld_rxq) {
2789 		while (atomic_cas_uint(&ofld_rxq->iq.state, IQS_IDLE,
2790 		    IQS_DISABLED) != IQS_IDLE)
2791 			msleep(1);
2792 	}
2793 #endif
2794 
2795 	for_each_rxq(pi, i, rxq) {
2796 		while (atomic_cas_uint(&rxq->iq.state, IQS_IDLE,
2797 		    IQS_DISABLED) != IQS_IDLE)
2798 			msleep(1);
2799 	}
2800 
2801 	mutex_enter(&sc->sfl_lock);
2802 #ifdef TCP_OFFLOAD_ENABLE
2803 	for_each_ofld_rxq(pi, i, ofld_rxq)
2804 	    ofld_rxq->fl.flags |= FL_DOOMED;
2805 #endif
2806 	for_each_rxq(pi, i, rxq)
2807 	    rxq->fl.flags |= FL_DOOMED;
2808 	mutex_exit(&sc->sfl_lock);
2809 	/* TODO: need to wait for all fl's to be removed from sc->sfl */
2810 }
2811 
2812 void
2813 t4_fatal_err(struct adapter *sc)
2814 {
2815 	t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
2816 	t4_intr_disable(sc);
2817 	cxgb_printf(sc->dip, CE_WARN,
2818 	    "encountered fatal error, adapter stopped.");
2819 }
2820 
2821 int
2822 t4_os_find_pci_capability(struct adapter *sc, int cap)
2823 {
2824 	uint16_t stat;
2825 	uint8_t cap_ptr, cap_id;
2826 
2827 	t4_os_pci_read_cfg2(sc, PCI_CONF_STAT, &stat);
2828 	if ((stat & PCI_STAT_CAP) == 0)
2829 		return (0); /* does not implement capabilities */
2830 
2831 	t4_os_pci_read_cfg1(sc, PCI_CONF_CAP_PTR, &cap_ptr);
2832 	while (cap_ptr) {
2833 		t4_os_pci_read_cfg1(sc, cap_ptr + PCI_CAP_ID, &cap_id);
2834 		if (cap_id == cap)
2835 			return (cap_ptr); /* found */
2836 		t4_os_pci_read_cfg1(sc, cap_ptr + PCI_CAP_NEXT_PTR, &cap_ptr);
2837 	}
2838 
2839 	return (0); /* not found */
2840 }
2841 
2842 void
2843 t4_os_portmod_changed(struct adapter *sc, int idx)
2844 {
2845 	static const char *mod_str[] = {
2846 		NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
2847 	};
2848 	struct port_info *pi = sc->port[idx];
2849 
2850 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
2851 		cxgb_printf(pi->dip, CE_NOTE, "transceiver unplugged.");
2852 	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
2853 		cxgb_printf(pi->dip, CE_NOTE,
2854 		    "unknown transceiver inserted.\n");
2855 	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
2856 		cxgb_printf(pi->dip, CE_NOTE,
2857 		    "unsupported transceiver inserted.\n");
2858 	else if (pi->mod_type > 0 && pi->mod_type < ARRAY_SIZE(mod_str))
2859 		cxgb_printf(pi->dip, CE_NOTE, "%s transceiver inserted.\n",
2860 		    mod_str[pi->mod_type]);
2861 	else
2862 		cxgb_printf(pi->dip, CE_NOTE, "transceiver (type %d) inserted.",
2863 		    pi->mod_type);
2864 
2865 	if ((isset(&sc->open_device_map, pi->port_id) != 0) &&
2866 	    pi->link_cfg.new_module)
2867 		pi->link_cfg.redo_l1cfg = true;
2868 }
2869 
2870 /* ARGSUSED */
2871 static int
2872 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, mblk_t *m)
2873 {
2874 	if (m != NULL)
2875 		freemsg(m);
2876 	return (0);
2877 }
2878 
2879 int
2880 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
2881 {
2882 	uint_t *loc, new;
2883 
2884 	if (opcode >= ARRAY_SIZE(sc->cpl_handler))
2885 		return (EINVAL);
2886 
2887 	new = (uint_t)(unsigned long) (h ? h : cpl_not_handled);
2888 	loc = (uint_t *)&sc->cpl_handler[opcode];
2889 	(void) atomic_swap_uint(loc, new);
2890 
2891 	return (0);
2892 }
2893 
2894 static int
2895 fw_msg_not_handled(struct adapter *sc, const __be64 *data)
2896 {
2897 	struct cpl_fw6_msg *cpl;
2898 
2899 	cpl = __containerof((void *)data, struct cpl_fw6_msg, data);
2900 
2901 	cxgb_printf(sc->dip, CE_WARN, "%s fw_msg type %d", __func__, cpl->type);
2902 	return (0);
2903 }
2904 
2905 int
2906 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
2907 {
2908 	fw_msg_handler_t *loc, new;
2909 
2910 	if (type >= ARRAY_SIZE(sc->fw_msg_handler))
2911 		return (EINVAL);
2912 
2913 	/*
2914 	 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
2915 	 * handler dispatch table.  Reject any attempt to install a handler for
2916 	 * this subtype.
2917 	 */
2918 	if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
2919 		return (EINVAL);
2920 
2921 	new = h ? h : fw_msg_not_handled;
2922 	loc = &sc->fw_msg_handler[type];
2923 	(void)atomic_swap_ptr(loc, (void *)new);
2924 
2925 	return (0);
2926 }
2927 
2928 #ifdef TCP_OFFLOAD_ENABLE
2929 static int
2930 toe_capability(struct port_info *pi, int enable)
2931 {
2932 	int rc;
2933 	struct adapter *sc = pi->adapter;
2934 
2935 	if (!is_offload(sc))
2936 		return (ENODEV);
2937 
2938 	if (enable != 0) {
2939 		if (isset(&sc->offload_map, pi->port_id) != 0)
2940 			return (0);
2941 
2942 		if (sc->offload_map == 0) {
2943 			rc = activate_uld(sc, ULD_TOM, &sc->tom);
2944 			if (rc != 0)
2945 				return (rc);
2946 		}
2947 
2948 		setbit(&sc->offload_map, pi->port_id);
2949 	} else {
2950 		if (!isset(&sc->offload_map, pi->port_id))
2951 			return (0);
2952 
2953 		clrbit(&sc->offload_map, pi->port_id);
2954 
2955 		if (sc->offload_map == 0) {
2956 			rc = deactivate_uld(&sc->tom);
2957 			if (rc != 0) {
2958 				setbit(&sc->offload_map, pi->port_id);
2959 				return (rc);
2960 			}
2961 		}
2962 	}
2963 
2964 	return (0);
2965 }
2966 
2967 /*
2968  * Add an upper layer driver to the global list.
2969  */
2970 int
2971 t4_register_uld(struct uld_info *ui)
2972 {
2973 	int rc = 0;
2974 	struct uld_info *u;
2975 
2976 	mutex_enter(&t4_uld_list_lock);
2977 	SLIST_FOREACH(u, &t4_uld_list, link) {
2978 		if (u->uld_id == ui->uld_id) {
2979 			rc = EEXIST;
2980 			goto done;
2981 		}
2982 	}
2983 
2984 	SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
2985 	ui->refcount = 0;
2986 done:
2987 	mutex_exit(&t4_uld_list_lock);
2988 	return (rc);
2989 }
2990 
2991 int
2992 t4_unregister_uld(struct uld_info *ui)
2993 {
2994 	int rc = EINVAL;
2995 	struct uld_info *u;
2996 
2997 	mutex_enter(&t4_uld_list_lock);
2998 
2999 	SLIST_FOREACH(u, &t4_uld_list, link) {
3000 		if (u == ui) {
3001 			if (ui->refcount > 0) {
3002 				rc = EBUSY;
3003 				goto done;
3004 			}
3005 
3006 			SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
3007 			rc = 0;
3008 			goto done;
3009 		}
3010 	}
3011 done:
3012 	mutex_exit(&t4_uld_list_lock);
3013 	return (rc);
3014 }
3015 
3016 static int
3017 activate_uld(struct adapter *sc, int id, struct uld_softc *usc)
3018 {
3019 	int rc = EAGAIN;
3020 	struct uld_info *ui;
3021 
3022 	mutex_enter(&t4_uld_list_lock);
3023 
3024 	SLIST_FOREACH(ui, &t4_uld_list, link) {
3025 		if (ui->uld_id == id) {
3026 			rc = ui->attach(sc, &usc->softc);
3027 			if (rc == 0) {
3028 				ASSERT(usc->softc != NULL);
3029 				ui->refcount++;
3030 				usc->uld = ui;
3031 			}
3032 			goto done;
3033 		}
3034 	}
3035 done:
3036 	mutex_exit(&t4_uld_list_lock);
3037 
3038 	return (rc);
3039 }
3040 
3041 static int
3042 deactivate_uld(struct uld_softc *usc)
3043 {
3044 	int rc;
3045 
3046 	mutex_enter(&t4_uld_list_lock);
3047 
3048 	if (usc->uld == NULL || usc->softc == NULL) {
3049 		rc = EINVAL;
3050 		goto done;
3051 	}
3052 
3053 	rc = usc->uld->detach(usc->softc);
3054 	if (rc == 0) {
3055 		ASSERT(usc->uld->refcount > 0);
3056 		usc->uld->refcount--;
3057 		usc->uld = NULL;
3058 		usc->softc = NULL;
3059 	}
3060 done:
3061 	mutex_exit(&t4_uld_list_lock);
3062 
3063 	return (rc);
3064 }
3065 
3066 void
3067 t4_iterate(void (*func)(int, void *), void *arg)
3068 {
3069 	struct adapter *sc;
3070 
3071 	mutex_enter(&t4_adapter_list_lock);
3072 	SLIST_FOREACH(sc, &t4_adapter_list, link) {
3073 		/*
3074 		 * func should not make any assumptions about what state sc is
3075 		 * in - the only guarantee is that sc->sc_lock is a valid lock.
3076 		 */
3077 		func(ddi_get_instance(sc->dip), arg);
3078 	}
3079 	mutex_exit(&t4_adapter_list_lock);
3080 }
3081 
3082 #endif
3083 
3084 static int
3085 t4_sensor_read(struct adapter *sc, uint32_t diag, uint32_t *valp)
3086 {
3087 	int rc;
3088 	struct port_info *pi = sc->port[0];
3089 	uint32_t param, val;
3090 
3091 	rc = begin_synchronized_op(pi, 1, 1);
3092 	if (rc != 0) {
3093 		return (rc);
3094 	}
3095 	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3096 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
3097 	    V_FW_PARAMS_PARAM_Y(diag);
3098 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
3099 	end_synchronized_op(pi, 1);
3100 
3101 	if (rc != 0) {
3102 		return (rc);
3103 	}
3104 
3105 	if (val == 0) {
3106 		return (EIO);
3107 	}
3108 
3109 	*valp = val;
3110 	return (0);
3111 }
3112 
3113 static int
3114 t4_temperature_read(void *arg, sensor_ioctl_scalar_t *scalar)
3115 {
3116 	int ret;
3117 	struct adapter *sc = arg;
3118 	uint32_t val;
3119 
3120 	ret = t4_sensor_read(sc, FW_PARAM_DEV_DIAG_TMP, &val);
3121 	if (ret != 0) {
3122 		return (ret);
3123 	}
3124 
3125 	/*
3126 	 * The device measures temperature in units of 1 degree Celsius. We
3127 	 * don't know its precision.
3128 	 */
3129 	scalar->sis_unit = SENSOR_UNIT_CELSIUS;
3130 	scalar->sis_gran = 1;
3131 	scalar->sis_prec = 0;
3132 	scalar->sis_value = val;
3133 
3134 	return (0);
3135 }
3136 
3137 static int
3138 t4_voltage_read(void *arg, sensor_ioctl_scalar_t *scalar)
3139 {
3140 	int ret;
3141 	struct adapter *sc = arg;
3142 	uint32_t val;
3143 
3144 	ret = t4_sensor_read(sc, FW_PARAM_DEV_DIAG_VDD, &val);
3145 	if (ret != 0) {
3146 		return (ret);
3147 	}
3148 
3149 	scalar->sis_unit = SENSOR_UNIT_VOLTS;
3150 	scalar->sis_gran = 1000;
3151 	scalar->sis_prec = 0;
3152 	scalar->sis_value = val;
3153 
3154 	return (0);
3155 }
3156 
3157 /*
3158  * While the hardware supports the ability to read and write the flash image,
3159  * this is not currently wired up.
3160  */
3161 static int
3162 t4_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg, ddi_ufm_cap_t *caps)
3163 {
3164 	*caps = DDI_UFM_CAP_REPORT;
3165 	return (0);
3166 }
3167 
3168 static int
3169 t4_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
3170     ddi_ufm_image_t *imgp)
3171 {
3172 	if (imgno != 0) {
3173 		return (EINVAL);
3174 	}
3175 
3176 	ddi_ufm_image_set_desc(imgp, "Firmware");
3177 	ddi_ufm_image_set_nslots(imgp, 1);
3178 
3179 	return (0);
3180 }
3181 
3182 static int
3183 t4_ufm_fill_slot_version(nvlist_t *nvl, const char *key, uint32_t vers)
3184 {
3185 	char buf[128];
3186 
3187 	if (vers == 0) {
3188 		return (0);
3189 	}
3190 
3191 	if (snprintf(buf, sizeof (buf), "%u.%u.%u.%u",
3192 	    G_FW_HDR_FW_VER_MAJOR(vers), G_FW_HDR_FW_VER_MINOR(vers),
3193 	    G_FW_HDR_FW_VER_MICRO(vers), G_FW_HDR_FW_VER_BUILD(vers)) >=
3194 	    sizeof (buf)) {
3195 		return (EOVERFLOW);
3196 	}
3197 
3198 	return (nvlist_add_string(nvl, key, buf));
3199 }
3200 
3201 static int
3202 t4_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno, uint_t slotno,
3203     ddi_ufm_slot_t *slotp)
3204 {
3205 	int ret;
3206 	struct adapter *sc = arg;
3207 	nvlist_t *misc = NULL;
3208 	char buf[128];
3209 
3210 	if (imgno != 0 || slotno != 0) {
3211 		return (EINVAL);
3212 	}
3213 
3214 	if (snprintf(buf, sizeof (buf), "%u.%u.%u.%u",
3215 	    G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
3216 	    G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
3217 	    G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
3218 	    G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)) >= sizeof (buf)) {
3219 		return (EOVERFLOW);
3220 	}
3221 
3222 	ddi_ufm_slot_set_version(slotp, buf);
3223 
3224 	(void) nvlist_alloc(&misc, NV_UNIQUE_NAME, KM_SLEEP);
3225 	if ((ret = t4_ufm_fill_slot_version(misc, "TP Microcode",
3226 	    sc->params.tp_vers)) != 0) {
3227 		goto err;
3228 	}
3229 
3230 	if ((ret = t4_ufm_fill_slot_version(misc, "Bootstrap",
3231 	    sc->params.bs_vers)) != 0) {
3232 		goto err;
3233 	}
3234 
3235 	if ((ret = t4_ufm_fill_slot_version(misc, "Expansion ROM",
3236 	    sc->params.er_vers)) != 0) {
3237 		goto err;
3238 	}
3239 
3240 	if ((ret = nvlist_add_uint32(misc, "Serial Configuration",
3241 	    sc->params.scfg_vers)) != 0) {
3242 		goto err;
3243 	}
3244 
3245 	if ((ret = nvlist_add_uint32(misc, "VPD Version",
3246 	    sc->params.vpd_vers)) != 0) {
3247 		goto err;
3248 	}
3249 
3250 	ddi_ufm_slot_set_misc(slotp, misc);
3251 	ddi_ufm_slot_set_attrs(slotp, DDI_UFM_ATTR_ACTIVE |
3252 	    DDI_UFM_ATTR_WRITEABLE | DDI_UFM_ATTR_READABLE);
3253 	return (0);
3254 
3255 err:
3256 	nvlist_free(misc);
3257 	return (ret);
3258 
3259 }
3260