xref: /illumos-gate/usr/src/uts/common/io/cxgbe/t4nex/t4_nexus.c (revision ba52565b00cef6c84d9c58d122f646c8f30b4a38)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source. A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * This file is part of the Chelsio T4 support code.
14  *
15  * Copyright (C) 2010-2013 Chelsio Communications.  All rights reserved.
16  *
17  * This program is distributed in the hope that it will be useful, but WITHOUT
18  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19  * FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
20  * release for licensing terms and conditions.
21  */
22 
23 /*
24  * Copyright 2024 Oxide Computer Company
25  */
26 
27 #include <sys/ddi.h>
28 #include <sys/sunddi.h>
29 #include <sys/sunndi.h>
30 #include <sys/modctl.h>
31 #include <sys/conf.h>
32 #include <sys/devops.h>
33 #include <sys/pci.h>
34 #include <sys/atomic.h>
35 #include <sys/types.h>
36 #include <sys/file.h>
37 #include <sys/errno.h>
38 #include <sys/open.h>
39 #include <sys/cred.h>
40 #include <sys/stat.h>
41 #include <sys/mkdev.h>
42 #include <sys/queue.h>
43 #include <sys/containerof.h>
44 #include <sys/sensors.h>
45 #include <sys/firmload.h>
46 #include <sys/mac_provider.h>
47 #include <sys/mac_ether.h>
48 #include <sys/vlan.h>
49 
50 #include "version.h"
51 #include "common/common.h"
52 #include "common/t4_msg.h"
53 #include "common/t4_regs.h"
54 #include "common/t4_extra_regs.h"
55 #include "t4_l2t.h"
56 
57 static int t4_cb_open(dev_t *devp, int flag, int otyp, cred_t *credp);
58 static int t4_cb_close(dev_t dev, int flag, int otyp, cred_t *credp);
59 static int t4_cb_ioctl(dev_t dev, int cmd, intptr_t d, int mode, cred_t *credp,
60     int *rp);
61 struct cb_ops t4_cb_ops = {
62 	.cb_open =		t4_cb_open,
63 	.cb_close =		t4_cb_close,
64 	.cb_strategy =		nodev,
65 	.cb_print =		nodev,
66 	.cb_dump =		nodev,
67 	.cb_read =		nodev,
68 	.cb_write =		nodev,
69 	.cb_ioctl =		t4_cb_ioctl,
70 	.cb_devmap =		nodev,
71 	.cb_mmap =		nodev,
72 	.cb_segmap =		nodev,
73 	.cb_chpoll =		nochpoll,
74 	.cb_prop_op =		ddi_prop_op,
75 	.cb_flag =		D_MP,
76 	.cb_rev =		CB_REV,
77 	.cb_aread =		nodev,
78 	.cb_awrite =		nodev
79 };
80 
81 static int t4_bus_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op,
82     void *arg, void *result);
83 static int t4_bus_config(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op,
84     void *arg, dev_info_t **cdipp);
85 static int t4_bus_unconfig(dev_info_t *dip, uint_t flags,
86     ddi_bus_config_op_t op, void *arg);
87 struct bus_ops t4_bus_ops = {
88 	.busops_rev =		BUSO_REV,
89 	.bus_ctl =		t4_bus_ctl,
90 	.bus_prop_op =		ddi_bus_prop_op,
91 	.bus_config =		t4_bus_config,
92 	.bus_unconfig =		t4_bus_unconfig,
93 };
94 
95 static int t4_devo_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg,
96     void **rp);
97 static int t4_devo_probe(dev_info_t *dip);
98 static int t4_devo_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
99 static int t4_devo_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
100 static int t4_devo_quiesce(dev_info_t *dip);
101 static struct dev_ops t4_dev_ops = {
102 	.devo_rev =		DEVO_REV,
103 	.devo_getinfo =		t4_devo_getinfo,
104 	.devo_identify =	nulldev,
105 	.devo_probe =		t4_devo_probe,
106 	.devo_attach =		t4_devo_attach,
107 	.devo_detach =		t4_devo_detach,
108 	.devo_reset =		nodev,
109 	.devo_cb_ops =		&t4_cb_ops,
110 	.devo_bus_ops =		&t4_bus_ops,
111 	.devo_quiesce =		&t4_devo_quiesce,
112 };
113 
114 static struct modldrv t4nex_modldrv = {
115 	.drv_modops =		&mod_driverops,
116 	.drv_linkinfo =		"Chelsio T4-T6 nexus " DRV_VERSION,
117 	.drv_dev_ops =		&t4_dev_ops
118 };
119 
120 static struct modlinkage t4nex_modlinkage = {
121 	.ml_rev =		MODREV_1,
122 	.ml_linkage =		{&t4nex_modldrv, NULL},
123 };
124 
125 void *t4_list;
126 
127 struct intrs_and_queues {
128 	int intr_type;		/* DDI_INTR_TYPE_* */
129 	int nirq;		/* Number of vectors */
130 	int intr_fwd;		/* Interrupts forwarded */
131 	int ntxq10g;		/* # of NIC txq's for each 10G port */
132 	int nrxq10g;		/* # of NIC rxq's for each 10G port */
133 	int ntxq1g;		/* # of NIC txq's for each 1G port */
134 	int nrxq1g;		/* # of NIC rxq's for each 1G port */
135 };
136 
137 static int cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss,
138     mblk_t *m);
139 static int fw_msg_not_handled(struct adapter *, const __be64 *);
140 int t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h);
141 static unsigned int getpf(struct adapter *sc);
142 static int prep_firmware(struct adapter *sc);
143 static int upload_config_file(struct adapter *sc, uint32_t *mt, uint32_t *ma);
144 static int partition_resources(struct adapter *sc);
145 static int adap__pre_init_tweaks(struct adapter *sc);
146 static int get_params__pre_init(struct adapter *sc);
147 static int get_params__post_init(struct adapter *sc);
148 static int set_params__post_init(struct adapter *);
149 static void setup_memwin(struct adapter *sc);
150 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
151     uint32_t *);
152 void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
153 uint32_t position_memwin(struct adapter *, int, uint32_t);
154 static int prop_lookup_int_array(struct adapter *sc, char *name, int *data,
155     uint_t count);
156 static int prop_lookup_int_array(struct adapter *sc, char *name, int *data,
157     uint_t count);
158 static int init_driver_props(struct adapter *sc, struct driver_properties *p);
159 static int remove_extra_props(struct adapter *sc, int n10g, int n1g);
160 static int cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
161     struct intrs_and_queues *iaq);
162 static int add_child_node(struct adapter *sc, int idx);
163 static int remove_child_node(struct adapter *sc, int idx);
164 static kstat_t *setup_kstats(struct adapter *sc);
165 static kstat_t *setup_wc_kstats(struct adapter *);
166 static int update_wc_kstats(kstat_t *, int);
167 static kmutex_t t4_adapter_list_lock;
168 static SLIST_HEAD(, adapter) t4_adapter_list;
169 
170 static int t4_temperature_read(void *, sensor_ioctl_scalar_t *);
171 static int t4_voltage_read(void *, sensor_ioctl_scalar_t *);
172 static const ksensor_ops_t t4_temp_ops = {
173 	.kso_kind = ksensor_kind_temperature,
174 	.kso_scalar = t4_temperature_read
175 };
176 
177 static const ksensor_ops_t t4_volt_ops = {
178 	.kso_kind = ksensor_kind_voltage,
179 	.kso_scalar = t4_voltage_read
180 };
181 
182 static int t4_ufm_getcaps(ddi_ufm_handle_t *, void *, ddi_ufm_cap_t *);
183 static int t4_ufm_fill_image(ddi_ufm_handle_t *, void *, uint_t,
184     ddi_ufm_image_t *);
185 static int t4_ufm_fill_slot(ddi_ufm_handle_t *, void *, uint_t, uint_t,
186     ddi_ufm_slot_t *);
187 static ddi_ufm_ops_t t4_ufm_ops = {
188 	.ddi_ufm_op_fill_image = t4_ufm_fill_image,
189 	.ddi_ufm_op_fill_slot = t4_ufm_fill_slot,
190 	.ddi_ufm_op_getcaps = t4_ufm_getcaps
191 };
192 
193 int
_init(void)194 _init(void)
195 {
196 	int rc;
197 
198 	rc = ddi_soft_state_init(&t4_list, sizeof (struct adapter), 0);
199 	if (rc != 0)
200 		return (rc);
201 
202 	rc = mod_install(&t4nex_modlinkage);
203 	if (rc != 0)
204 		ddi_soft_state_fini(&t4_list);
205 
206 	mutex_init(&t4_adapter_list_lock, NULL, MUTEX_DRIVER, NULL);
207 	SLIST_INIT(&t4_adapter_list);
208 	t4_debug_init();
209 
210 	return (0);
211 }
212 
213 int
_fini(void)214 _fini(void)
215 {
216 	int rc;
217 
218 	rc = mod_remove(&t4nex_modlinkage);
219 	if (rc != 0)
220 		return (rc);
221 
222 	ddi_soft_state_fini(&t4_list);
223 	t4_debug_fini();
224 
225 	return (0);
226 }
227 
228 int
_info(struct modinfo * mi)229 _info(struct modinfo *mi)
230 {
231 	return (mod_info(&t4nex_modlinkage, mi));
232 }
233 
234 /* ARGSUSED */
235 static int
t4_devo_getinfo(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** rp)236 t4_devo_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **rp)
237 {
238 	struct adapter *sc;
239 	minor_t minor;
240 
241 	minor = getminor((dev_t)arg);	/* same as instance# in our case */
242 
243 	if (cmd == DDI_INFO_DEVT2DEVINFO) {
244 		sc = ddi_get_soft_state(t4_list, minor);
245 		if (sc == NULL)
246 			return (DDI_FAILURE);
247 
248 		ASSERT(sc->dev == (dev_t)arg);
249 		*rp = (void *)sc->dip;
250 	} else if (cmd == DDI_INFO_DEVT2INSTANCE)
251 		*rp = (void *) (unsigned long) minor;
252 	else
253 		ASSERT(0);
254 
255 	return (DDI_SUCCESS);
256 }
257 
258 static int
t4_devo_probe(dev_info_t * dip)259 t4_devo_probe(dev_info_t *dip)
260 {
261 	int rc, id, *reg;
262 	uint_t n, pf;
263 
264 	id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
265 	    "device-id", 0xffff);
266 	if (id == 0xffff)
267 		return (DDI_PROBE_DONTCARE);
268 
269 	rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
270 	    "reg", &reg, &n);
271 	if (rc != DDI_SUCCESS)
272 		return (DDI_PROBE_DONTCARE);
273 
274 	pf = PCI_REG_FUNC_G(reg[0]);
275 	ddi_prop_free(reg);
276 
277 	/* Prevent driver attachment on any PF except 0 on the FPGA */
278 	if (id == 0xa000 && pf != 0)
279 		return (DDI_PROBE_FAILURE);
280 
281 	return (DDI_PROBE_DONTCARE);
282 }
283 
284 static int
t4_devo_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)285 t4_devo_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
286 {
287 	struct adapter *sc = NULL;
288 	struct sge *s;
289 	int i, instance, rc = DDI_SUCCESS, rqidx, tqidx, q;
290 	int irq = 0, nxg = 0, n1g = 0;
291 	char name[16];
292 	struct driver_properties *prp;
293 	struct intrs_and_queues iaq;
294 	ddi_device_acc_attr_t da = {
295 		.devacc_attr_version = DDI_DEVICE_ATTR_V0,
296 		.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC,
297 		.devacc_attr_dataorder = DDI_STRICTORDER_ACC
298 	};
299 	ddi_device_acc_attr_t da1 = {
300 		.devacc_attr_version = DDI_DEVICE_ATTR_V0,
301 		.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC,
302 		.devacc_attr_dataorder = DDI_STRICTORDER_ACC
303 	};
304 
305 	if (cmd != DDI_ATTACH)
306 		return (DDI_FAILURE);
307 
308 	/*
309 	 * Allocate space for soft state.
310 	 */
311 	instance = ddi_get_instance(dip);
312 	rc = ddi_soft_state_zalloc(t4_list, instance);
313 	if (rc != DDI_SUCCESS) {
314 		cxgb_printf(dip, CE_WARN,
315 		    "failed to allocate soft state: %d", rc);
316 		return (DDI_FAILURE);
317 	}
318 
319 	sc = ddi_get_soft_state(t4_list, instance);
320 	sc->dip = dip;
321 	sc->dev = makedevice(ddi_driver_major(dip), instance);
322 	mutex_init(&sc->lock, NULL, MUTEX_DRIVER, NULL);
323 	cv_init(&sc->cv, NULL, CV_DRIVER, NULL);
324 	mutex_init(&sc->sfl_lock, NULL, MUTEX_DRIVER, NULL);
325 	TAILQ_INIT(&sc->sfl);
326 	mutex_init(&sc->mbox_lock, NULL, MUTEX_DRIVER, NULL);
327 	STAILQ_INIT(&sc->mbox_list);
328 
329 	mutex_enter(&t4_adapter_list_lock);
330 	SLIST_INSERT_HEAD(&t4_adapter_list, sc, link);
331 	mutex_exit(&t4_adapter_list_lock);
332 
333 	sc->pf = getpf(sc);
334 	if (sc->pf > 8) {
335 		rc = EINVAL;
336 		cxgb_printf(dip, CE_WARN,
337 		    "failed to determine PCI PF# of device");
338 		goto done;
339 	}
340 	sc->mbox = sc->pf;
341 
342 	/* Initialize the driver properties */
343 	prp = &sc->props;
344 	(void) init_driver_props(sc, prp);
345 
346 	/*
347 	 * Enable access to the PCI config space.
348 	 */
349 	rc = pci_config_setup(dip, &sc->pci_regh);
350 	if (rc != DDI_SUCCESS) {
351 		cxgb_printf(dip, CE_WARN,
352 		    "failed to enable PCI config space access: %d", rc);
353 		goto done;
354 	}
355 
356 	/* TODO: Set max read request to 4K */
357 
358 	/*
359 	 * Enable MMIO access.
360 	 */
361 	rc = ddi_regs_map_setup(dip, 1, &sc->regp, 0, 0, &da, &sc->regh);
362 	if (rc != DDI_SUCCESS) {
363 		cxgb_printf(dip, CE_WARN,
364 		    "failed to map device registers: %d", rc);
365 		goto done;
366 	}
367 
368 	(void) memset(sc->chan_map, 0xff, sizeof (sc->chan_map));
369 
370 	/*
371 	 * Initialize cpl handler.
372 	 */
373 	for (i = 0; i < ARRAY_SIZE(sc->cpl_handler); i++) {
374 		sc->cpl_handler[i] = cpl_not_handled;
375 	}
376 
377 	for (i = 0; i < ARRAY_SIZE(sc->fw_msg_handler); i++) {
378 		sc->fw_msg_handler[i] = fw_msg_not_handled;
379 	}
380 
381 	for (i = 0; i < NCHAN; i++) {
382 		(void) snprintf(name, sizeof (name), "%s-%d", "reclaim", i);
383 		sc->tq[i] = ddi_taskq_create(sc->dip, name, 1,
384 		    TASKQ_DEFAULTPRI, 0);
385 
386 		if (sc->tq[i] == NULL) {
387 			cxgb_printf(dip, CE_WARN, "failed to create taskqs");
388 			rc = DDI_FAILURE;
389 			goto done;
390 		}
391 	}
392 
393 	/*
394 	 * Prepare the adapter for operation.
395 	 */
396 	rc = -t4_prep_adapter(sc, false);
397 	if (rc != 0) {
398 		cxgb_printf(dip, CE_WARN, "failed to prepare adapter: %d", rc);
399 		goto done;
400 	}
401 
402 	/*
403 	 * Enable BAR1 access.
404 	 */
405 	sc->doorbells |= DOORBELL_KDB;
406 	rc = ddi_regs_map_setup(dip, 2, &sc->reg1p, 0, 0, &da1, &sc->reg1h);
407 	if (rc != DDI_SUCCESS) {
408 		cxgb_printf(dip, CE_WARN,
409 		    "failed to map BAR1 device registers: %d", rc);
410 		goto done;
411 	} else {
412 		if (is_t5(sc->params.chip)) {
413 			sc->doorbells |= DOORBELL_UDB;
414 			if (prp->wc) {
415 				/*
416 				 * Enable write combining on BAR2.  This is the
417 				 * userspace doorbell BAR and is split into 128B
418 				 * (UDBS_SEG_SIZE) doorbell regions, each
419 				 * associated with an egress queue.  The first
420 				 * 64B has the doorbell and the second 64B can
421 				 * be used to submit a tx work request with an
422 				 * implicit doorbell.
423 				 */
424 				sc->doorbells &= ~DOORBELL_UDB;
425 				sc->doorbells |= (DOORBELL_WCWR |
426 				    DOORBELL_UDBWC);
427 				t4_write_reg(sc, A_SGE_STAT_CFG,
428 				    V_STATSOURCE_T5(7) | V_STATMODE(0));
429 			}
430 		}
431 	}
432 
433 	/*
434 	 * Do this really early.  Note that minor number = instance.
435 	 */
436 	(void) snprintf(name, sizeof (name), "%s,%d", T4_NEXUS_NAME, instance);
437 	rc = ddi_create_minor_node(dip, name, S_IFCHR, instance,
438 	    DDI_NT_NEXUS, 0);
439 	if (rc != DDI_SUCCESS) {
440 		cxgb_printf(dip, CE_WARN,
441 		    "failed to create device node: %d", rc);
442 		rc = DDI_SUCCESS; /* carry on */
443 	}
444 
445 	/* Do this early. Memory window is required for loading config file. */
446 	setup_memwin(sc);
447 
448 	/* Prepare the firmware for operation */
449 	rc = prep_firmware(sc);
450 	if (rc != 0)
451 		goto done; /* error message displayed already */
452 
453 	rc = adap__pre_init_tweaks(sc);
454 	if (rc != 0)
455 		goto done;
456 
457 	rc = get_params__pre_init(sc);
458 	if (rc != 0)
459 		goto done; /* error message displayed already */
460 
461 	t4_sge_init(sc);
462 
463 	if (sc->flags & MASTER_PF) {
464 		/* get basic stuff going */
465 		rc = -t4_fw_initialize(sc, sc->mbox);
466 		if (rc != 0) {
467 			cxgb_printf(sc->dip, CE_WARN,
468 			    "early init failed: %d.\n", rc);
469 			goto done;
470 		}
471 	}
472 
473 	rc = get_params__post_init(sc);
474 	if (rc != 0)
475 		goto done; /* error message displayed already */
476 
477 	rc = set_params__post_init(sc);
478 	if (rc != 0)
479 		goto done; /* error message displayed already */
480 
481 	/*
482 	 * TODO: This is the place to call t4_set_filter_mode()
483 	 */
484 
485 	/* tweak some settings */
486 	t4_write_reg(sc, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) | V_RXTSHIFTMAXR1(4) |
487 	    V_RXTSHIFTMAXR2(15) | V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
488 	    V_KEEPALIVEMAXR1(4) | V_KEEPALIVEMAXR2(9));
489 	t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
490 
491 	/*
492 	 * Work-around for bug 2619
493 	 * Set DisableVlan field in TP_RSS_CONFIG_VRT register so that the
494 	 * VLAN tag extraction is disabled.
495 	 */
496 	t4_set_reg_field(sc, A_TP_RSS_CONFIG_VRT, F_DISABLEVLAN, F_DISABLEVLAN);
497 
498 	/* Store filter mode */
499 	t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &sc->filter_mode, 1,
500 	    A_TP_VLAN_PRI_MAP);
501 
502 	/*
503 	 * First pass over all the ports - allocate VIs and initialize some
504 	 * basic parameters like mac address, port type, etc.  We also figure
505 	 * out whether a port is 10G or 1G and use that information when
506 	 * calculating how many interrupts to attempt to allocate.
507 	 */
508 	for_each_port(sc, i) {
509 		struct port_info *pi;
510 
511 		pi = kmem_zalloc(sizeof (*pi), KM_SLEEP);
512 		sc->port[i] = pi;
513 
514 		/* These must be set before t4_port_init */
515 		pi->adapter = sc;
516 		/* LINTED: E_ASSIGN_NARROW_CONV */
517 		pi->port_id = i;
518 	}
519 
520 	/* Allocate the vi and initialize parameters like mac addr */
521 	rc = -t4_port_init(sc, sc->mbox, sc->pf, 0);
522 	if (rc) {
523 		cxgb_printf(dip, CE_WARN, "unable to initialize port: %d", rc);
524 		goto done;
525 	}
526 
527 	for_each_port(sc, i) {
528 		struct port_info *pi = sc->port[i];
529 
530 		mutex_init(&pi->lock, NULL, MUTEX_DRIVER, NULL);
531 		pi->mtu = ETHERMTU;
532 
533 		if (is_10XG_port(pi)) {
534 			nxg++;
535 			pi->tmr_idx = prp->tmr_idx_10g;
536 			pi->pktc_idx = prp->pktc_idx_10g;
537 		} else {
538 			n1g++;
539 			pi->tmr_idx = prp->tmr_idx_1g;
540 			pi->pktc_idx = prp->pktc_idx_1g;
541 		}
542 
543 		pi->xact_addr_filt = -1;
544 		t4_mc_init(pi);
545 
546 		setbit(&sc->registered_device_map, i);
547 	}
548 
549 	(void) remove_extra_props(sc, nxg, n1g);
550 
551 	if (sc->registered_device_map == 0) {
552 		cxgb_printf(dip, CE_WARN, "no usable ports");
553 		rc = DDI_FAILURE;
554 		goto done;
555 	}
556 
557 	rc = cfg_itype_and_nqueues(sc, nxg, n1g, &iaq);
558 	if (rc != 0)
559 		goto done; /* error message displayed already */
560 
561 	sc->intr_type = iaq.intr_type;
562 	sc->intr_count = iaq.nirq;
563 
564 	if (sc->props.multi_rings && (sc->intr_type != DDI_INTR_TYPE_MSIX)) {
565 		sc->props.multi_rings = 0;
566 		cxgb_printf(dip, CE_WARN,
567 		    "Multiple rings disabled as interrupt type is not MSI-X");
568 	}
569 
570 	if (sc->props.multi_rings && iaq.intr_fwd) {
571 		sc->props.multi_rings = 0;
572 		cxgb_printf(dip, CE_WARN,
573 		    "Multiple rings disabled as interrupts are forwarded");
574 	}
575 
576 	if (!sc->props.multi_rings) {
577 		iaq.ntxq10g = 1;
578 		iaq.ntxq1g = 1;
579 	}
580 	s = &sc->sge;
581 	s->nrxq = nxg * iaq.nrxq10g + n1g * iaq.nrxq1g;
582 	s->ntxq = nxg * iaq.ntxq10g + n1g * iaq.ntxq1g;
583 	s->neq = s->ntxq + s->nrxq;	/* the fl in an rxq is an eq */
584 	s->niq = s->nrxq + 1;		/* 1 extra for firmware event queue */
585 	if (iaq.intr_fwd != 0)
586 		sc->flags |= INTR_FWD;
587 	s->rxq = kmem_zalloc(s->nrxq * sizeof (struct sge_rxq), KM_SLEEP);
588 	s->txq = kmem_zalloc(s->ntxq * sizeof (struct sge_txq), KM_SLEEP);
589 	s->iqmap =
590 	    kmem_zalloc(s->iqmap_sz * sizeof (struct sge_iq *), KM_SLEEP);
591 	s->eqmap =
592 	    kmem_zalloc(s->eqmap_sz * sizeof (struct sge_eq *), KM_SLEEP);
593 
594 	sc->intr_handle =
595 	    kmem_zalloc(sc->intr_count * sizeof (ddi_intr_handle_t), KM_SLEEP);
596 
597 	/*
598 	 * Second pass over the ports.  This time we know the number of rx and
599 	 * tx queues that each port should get.
600 	 */
601 	rqidx = tqidx = 0;
602 	for_each_port(sc, i) {
603 		struct port_info *pi = sc->port[i];
604 
605 		if (pi == NULL)
606 			continue;
607 
608 		t4_mc_cb_init(pi);
609 		/* LINTED: E_ASSIGN_NARROW_CONV */
610 		pi->first_rxq = rqidx;
611 		/* LINTED: E_ASSIGN_NARROW_CONV */
612 		pi->nrxq = (is_10XG_port(pi)) ? iaq.nrxq10g
613 		    : iaq.nrxq1g;
614 		/* LINTED: E_ASSIGN_NARROW_CONV */
615 		pi->first_txq = tqidx;
616 		/* LINTED: E_ASSIGN_NARROW_CONV */
617 		pi->ntxq = (is_10XG_port(pi)) ? iaq.ntxq10g
618 		    : iaq.ntxq1g;
619 
620 		rqidx += pi->nrxq;
621 		tqidx += pi->ntxq;
622 
623 		/*
624 		 * Enable hw checksumming and LSO for all ports by default.
625 		 * They can be disabled using ndd (hw_csum and hw_lso).
626 		 */
627 		pi->features |= (CXGBE_HW_CSUM | CXGBE_HW_LSO);
628 	}
629 
630 	/*
631 	 * Setup Interrupts.
632 	 */
633 
634 	i = 0;
635 	rc = ddi_intr_alloc(dip, sc->intr_handle, sc->intr_type, 0,
636 	    sc->intr_count, &i, DDI_INTR_ALLOC_STRICT);
637 	if (rc != DDI_SUCCESS) {
638 		cxgb_printf(dip, CE_WARN,
639 		    "failed to allocate %d interrupt(s) of type %d: %d, %d",
640 		    sc->intr_count, sc->intr_type, rc, i);
641 		goto done;
642 	}
643 	ASSERT(sc->intr_count == i); /* allocation was STRICT */
644 	(void) ddi_intr_get_cap(sc->intr_handle[0], &sc->intr_cap);
645 	(void) ddi_intr_get_pri(sc->intr_handle[0], &sc->intr_pri);
646 	if (sc->intr_count == 1) {
647 		ASSERT(sc->flags & INTR_FWD);
648 		(void) ddi_intr_add_handler(sc->intr_handle[0], t4_intr_all, sc,
649 		    &s->fwq);
650 	} else {
651 		/* Multiple interrupts.  The first one is always error intr */
652 		(void) ddi_intr_add_handler(sc->intr_handle[0], t4_intr_err, sc,
653 		    NULL);
654 		irq++;
655 
656 		/* The second one is always the firmware event queue */
657 		(void) ddi_intr_add_handler(sc->intr_handle[1], t4_intr, sc,
658 		    &s->fwq);
659 		irq++;
660 		/*
661 		 * Note that if INTR_FWD is set then either the NIC rx
662 		 * queues or (exclusive or) the TOE rx queueus will be taking
663 		 * direct interrupts.
664 		 *
665 		 * There is no need to check for is_offload(sc) as nofldrxq
666 		 * will be 0 if offload is disabled.
667 		 */
668 		for_each_port(sc, i) {
669 			struct port_info *pi = sc->port[i];
670 			struct sge_rxq *rxq;
671 			rxq = &s->rxq[pi->first_rxq];
672 			for (q = 0; q < pi->nrxq; q++, rxq++) {
673 				(void) ddi_intr_add_handler(
674 				    sc->intr_handle[irq], t4_intr, sc,
675 				    &rxq->iq);
676 				irq++;
677 			}
678 		}
679 
680 	}
681 	sc->flags |= INTR_ALLOCATED;
682 
683 	if ((rc = ksensor_create_scalar_pcidev(dip, SENSOR_KIND_TEMPERATURE,
684 	    &t4_temp_ops, sc, "temp", &sc->temp_sensor)) != 0) {
685 		cxgb_printf(dip, CE_WARN, "failed to create temperature "
686 		    "sensor: %d", rc);
687 		rc = DDI_FAILURE;
688 		goto done;
689 	}
690 
691 	if ((rc = ksensor_create_scalar_pcidev(dip, SENSOR_KIND_VOLTAGE,
692 	    &t4_volt_ops, sc, "vdd", &sc->volt_sensor)) != 0) {
693 		cxgb_printf(dip, CE_WARN, "failed to create voltage "
694 		    "sensor: %d", rc);
695 		rc = DDI_FAILURE;
696 		goto done;
697 	}
698 
699 
700 	if ((rc = ddi_ufm_init(dip, DDI_UFM_CURRENT_VERSION, &t4_ufm_ops,
701 	    &sc->ufm_hdl, sc)) != 0) {
702 		cxgb_printf(dip, CE_WARN, "failed to enable UFM ops: %d", rc);
703 		rc = DDI_FAILURE;
704 		goto done;
705 	}
706 	ddi_ufm_update(sc->ufm_hdl);
707 	ddi_report_dev(dip);
708 
709 	/*
710 	 * Hardware/Firmware/etc. Version/Revision IDs.
711 	 */
712 	t4_dump_version_info(sc);
713 
714 	cxgb_printf(dip, CE_NOTE, "(%d rxq, %d txq total) %d %s.",
715 	    rqidx, tqidx, sc->intr_count,
716 	    sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" :
717 	    sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" :
718 	    "fixed interrupt");
719 
720 	sc->ksp = setup_kstats(sc);
721 	sc->ksp_stat = setup_wc_kstats(sc);
722 	sc->params.drv_memwin = MEMWIN_NIC;
723 
724 done:
725 	if (rc != DDI_SUCCESS) {
726 		(void) t4_devo_detach(dip, DDI_DETACH);
727 
728 		/* rc may have errno style errors or DDI errors */
729 		rc = DDI_FAILURE;
730 	}
731 
732 	return (rc);
733 }
734 
735 static int
t4_devo_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)736 t4_devo_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
737 {
738 	int instance, i;
739 	struct adapter *sc;
740 	struct port_info *pi;
741 	struct sge *s;
742 
743 	if (cmd != DDI_DETACH)
744 		return (DDI_FAILURE);
745 
746 	instance = ddi_get_instance(dip);
747 	sc = ddi_get_soft_state(t4_list, instance);
748 	if (sc == NULL)
749 		return (DDI_SUCCESS);
750 
751 	if (sc->flags & FULL_INIT_DONE) {
752 		t4_intr_disable(sc);
753 		for_each_port(sc, i) {
754 			pi = sc->port[i];
755 			if (pi && pi->flags & PORT_INIT_DONE)
756 				(void) port_full_uninit(pi);
757 		}
758 		(void) adapter_full_uninit(sc);
759 	}
760 
761 	/* Safe to call no matter what */
762 	if (sc->ufm_hdl != NULL) {
763 		ddi_ufm_fini(sc->ufm_hdl);
764 		sc->ufm_hdl = NULL;
765 	}
766 	(void) ksensor_remove(dip, KSENSOR_ALL_IDS);
767 	ddi_prop_remove_all(dip);
768 	ddi_remove_minor_node(dip, NULL);
769 
770 	for (i = 0; i < NCHAN; i++) {
771 		if (sc->tq[i]) {
772 			ddi_taskq_wait(sc->tq[i]);
773 			ddi_taskq_destroy(sc->tq[i]);
774 		}
775 	}
776 
777 	if (sc->ksp != NULL)
778 		kstat_delete(sc->ksp);
779 	if (sc->ksp_stat != NULL)
780 		kstat_delete(sc->ksp_stat);
781 
782 	s = &sc->sge;
783 	if (s->rxq != NULL)
784 		kmem_free(s->rxq, s->nrxq * sizeof (struct sge_rxq));
785 	if (s->txq != NULL)
786 		kmem_free(s->txq, s->ntxq * sizeof (struct sge_txq));
787 	if (s->iqmap != NULL)
788 		kmem_free(s->iqmap, s->iqmap_sz * sizeof (struct sge_iq *));
789 	if (s->eqmap != NULL)
790 		kmem_free(s->eqmap, s->eqmap_sz * sizeof (struct sge_eq *));
791 
792 	if (s->rxbuf_cache != NULL)
793 		kmem_cache_destroy(s->rxbuf_cache);
794 
795 	if (sc->flags & INTR_ALLOCATED) {
796 		for (i = 0; i < sc->intr_count; i++) {
797 			(void) ddi_intr_remove_handler(sc->intr_handle[i]);
798 			(void) ddi_intr_free(sc->intr_handle[i]);
799 		}
800 		sc->flags &= ~INTR_ALLOCATED;
801 	}
802 
803 	if (sc->intr_handle != NULL) {
804 		kmem_free(sc->intr_handle,
805 		    sc->intr_count * sizeof (*sc->intr_handle));
806 	}
807 
808 	for_each_port(sc, i) {
809 		pi = sc->port[i];
810 		if (pi != NULL) {
811 			mutex_destroy(&pi->lock);
812 			kmem_free(pi, sizeof (*pi));
813 			clrbit(&sc->registered_device_map, i);
814 		}
815 	}
816 
817 	if (sc->flags & FW_OK)
818 		(void) t4_fw_bye(sc, sc->mbox);
819 
820 	if (sc->reg1h != NULL)
821 		ddi_regs_map_free(&sc->reg1h);
822 
823 	if (sc->regh != NULL)
824 		ddi_regs_map_free(&sc->regh);
825 
826 	if (sc->pci_regh != NULL)
827 		pci_config_teardown(&sc->pci_regh);
828 
829 	mutex_enter(&t4_adapter_list_lock);
830 	SLIST_REMOVE(&t4_adapter_list, sc, adapter, link);
831 	mutex_exit(&t4_adapter_list_lock);
832 
833 	mutex_destroy(&sc->mbox_lock);
834 	mutex_destroy(&sc->lock);
835 	cv_destroy(&sc->cv);
836 	mutex_destroy(&sc->sfl_lock);
837 
838 #ifdef DEBUG
839 	bzero(sc, sizeof (*sc));
840 #endif
841 	ddi_soft_state_free(t4_list, instance);
842 
843 	return (DDI_SUCCESS);
844 }
845 
846 static int
t4_devo_quiesce(dev_info_t * dip)847 t4_devo_quiesce(dev_info_t *dip)
848 {
849 	int instance;
850 	struct adapter *sc;
851 
852 	instance = ddi_get_instance(dip);
853 	sc = ddi_get_soft_state(t4_list, instance);
854 	if (sc == NULL)
855 		return (DDI_SUCCESS);
856 
857 	t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
858 	t4_intr_disable(sc);
859 	t4_write_reg(sc, A_PL_RST, F_PIORSTMODE | F_PIORST);
860 
861 	return (DDI_SUCCESS);
862 }
863 
864 static int
t4_bus_ctl(dev_info_t * dip,dev_info_t * rdip,ddi_ctl_enum_t op,void * arg,void * result)865 t4_bus_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op, void *arg,
866     void *result)
867 {
868 	char s[4];
869 	struct port_info *pi;
870 	dev_info_t *child = (dev_info_t *)arg;
871 
872 	switch (op) {
873 	case DDI_CTLOPS_REPORTDEV:
874 		pi = ddi_get_parent_data(rdip);
875 		pi->instance = ddi_get_instance(dip);
876 		pi->child_inst = ddi_get_instance(rdip);
877 		return (DDI_SUCCESS);
878 
879 	case DDI_CTLOPS_INITCHILD:
880 		pi = ddi_get_parent_data(child);
881 		if (pi == NULL)
882 			return (DDI_NOT_WELL_FORMED);
883 		(void) snprintf(s, sizeof (s), "%d", pi->port_id);
884 		ddi_set_name_addr(child, s);
885 		return (DDI_SUCCESS);
886 
887 	case DDI_CTLOPS_UNINITCHILD:
888 		ddi_set_name_addr(child, NULL);
889 		return (DDI_SUCCESS);
890 
891 	case DDI_CTLOPS_ATTACH:
892 	case DDI_CTLOPS_DETACH:
893 		return (DDI_SUCCESS);
894 
895 	default:
896 		return (ddi_ctlops(dip, rdip, op, arg, result));
897 	}
898 }
899 
900 static int
t4_bus_config(dev_info_t * dip,uint_t flags,ddi_bus_config_op_t op,void * arg,dev_info_t ** cdipp)901 t4_bus_config(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op, void *arg,
902     dev_info_t **cdipp)
903 {
904 	int instance, i;
905 	struct adapter *sc;
906 
907 	instance = ddi_get_instance(dip);
908 	sc = ddi_get_soft_state(t4_list, instance);
909 
910 	if (op == BUS_CONFIG_ONE) {
911 		char *c;
912 
913 		/*
914 		 * arg is something like "cxgb@0" where 0 is the port_id hanging
915 		 * off this nexus.
916 		 */
917 
918 		c = arg;
919 		while (*(c + 1))
920 			c++;
921 
922 		/* There should be exactly 1 digit after '@' */
923 		if (*(c - 1) != '@')
924 			return (NDI_FAILURE);
925 
926 		i = *c - '0';
927 
928 		if (add_child_node(sc, i) != 0)
929 			return (NDI_FAILURE);
930 
931 		flags |= NDI_ONLINE_ATTACH;
932 
933 	} else if (op == BUS_CONFIG_ALL || op == BUS_CONFIG_DRIVER) {
934 		/* Allocate and bind all child device nodes */
935 		for_each_port(sc, i)
936 		    (void) add_child_node(sc, i);
937 		flags |= NDI_ONLINE_ATTACH;
938 	}
939 
940 	return (ndi_busop_bus_config(dip, flags, op, arg, cdipp, 0));
941 }
942 
943 static int
t4_bus_unconfig(dev_info_t * dip,uint_t flags,ddi_bus_config_op_t op,void * arg)944 t4_bus_unconfig(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op,
945     void *arg)
946 {
947 	int instance, i, rc;
948 	struct adapter *sc;
949 
950 	instance = ddi_get_instance(dip);
951 	sc = ddi_get_soft_state(t4_list, instance);
952 
953 	if (op == BUS_CONFIG_ONE || op == BUS_UNCONFIG_ALL ||
954 	    op == BUS_UNCONFIG_DRIVER)
955 		flags |= NDI_UNCONFIG;
956 
957 	rc = ndi_busop_bus_unconfig(dip, flags, op, arg);
958 	if (rc != 0)
959 		return (rc);
960 
961 	if (op == BUS_UNCONFIG_ONE) {
962 		char *c;
963 
964 		c = arg;
965 		while (*(c + 1))
966 			c++;
967 
968 		if (*(c - 1) != '@')
969 			return (NDI_SUCCESS);
970 
971 		i = *c - '0';
972 
973 		rc = remove_child_node(sc, i);
974 
975 	} else if (op == BUS_UNCONFIG_ALL || op == BUS_UNCONFIG_DRIVER) {
976 
977 		for_each_port(sc, i)
978 		    (void) remove_child_node(sc, i);
979 	}
980 
981 	return (rc);
982 }
983 
984 /* ARGSUSED */
985 static int
t4_cb_open(dev_t * devp,int flag,int otyp,cred_t * credp)986 t4_cb_open(dev_t *devp, int flag, int otyp, cred_t *credp)
987 {
988 	struct adapter *sc;
989 
990 	if (otyp != OTYP_CHR)
991 		return (EINVAL);
992 
993 	sc = ddi_get_soft_state(t4_list, getminor(*devp));
994 	if (sc == NULL)
995 		return (ENXIO);
996 
997 	return (atomic_cas_uint(&sc->open, 0, EBUSY));
998 }
999 
1000 /* ARGSUSED */
1001 static int
t4_cb_close(dev_t dev,int flag,int otyp,cred_t * credp)1002 t4_cb_close(dev_t dev, int flag, int otyp, cred_t *credp)
1003 {
1004 	struct adapter *sc;
1005 
1006 	sc = ddi_get_soft_state(t4_list, getminor(dev));
1007 	if (sc == NULL)
1008 		return (EINVAL);
1009 
1010 	(void) atomic_swap_uint(&sc->open, 0);
1011 	return (0);
1012 }
1013 
1014 /* ARGSUSED */
1015 static int
t4_cb_ioctl(dev_t dev,int cmd,intptr_t d,int mode,cred_t * credp,int * rp)1016 t4_cb_ioctl(dev_t dev, int cmd, intptr_t d, int mode, cred_t *credp, int *rp)
1017 {
1018 	int instance;
1019 	struct adapter *sc;
1020 	void *data = (void *)d;
1021 
1022 	if (crgetuid(credp) != 0)
1023 		return (EPERM);
1024 
1025 	instance = getminor(dev);
1026 	sc = ddi_get_soft_state(t4_list, instance);
1027 	if (sc == NULL)
1028 		return (EINVAL);
1029 
1030 	return (t4_ioctl(sc, cmd, data, mode));
1031 }
1032 
1033 static unsigned int
getpf(struct adapter * sc)1034 getpf(struct adapter *sc)
1035 {
1036 	int rc, *data;
1037 	uint_t n, pf;
1038 
1039 	rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sc->dip,
1040 	    DDI_PROP_DONTPASS, "reg", &data, &n);
1041 	if (rc != DDI_SUCCESS) {
1042 		cxgb_printf(sc->dip, CE_WARN,
1043 		    "failed to lookup \"reg\" property: %d", rc);
1044 		return (0xff);
1045 	}
1046 
1047 	pf = PCI_REG_FUNC_G(data[0]);
1048 	ddi_prop_free(data);
1049 
1050 	return (pf);
1051 }
1052 
1053 /*
1054  * Install a compatible firmware (if required), establish contact with it,
1055  * become the master, and reset the device.
1056  */
1057 static int
prep_firmware(struct adapter * sc)1058 prep_firmware(struct adapter *sc)
1059 {
1060 	int rc;
1061 	size_t fw_size;
1062 	int reset = 1;
1063 	enum dev_state state;
1064 	unsigned char *fw_data;
1065 	struct fw_hdr *card_fw, *hdr;
1066 	const char *fw_file = NULL;
1067 	firmware_handle_t fw_hdl;
1068 	struct fw_info fi, *fw_info = &fi;
1069 
1070 	struct driver_properties *p = &sc->props;
1071 
1072 	/* Contact firmware, request master */
1073 	rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MUST, &state);
1074 	if (rc < 0) {
1075 		rc = -rc;
1076 		cxgb_printf(sc->dip, CE_WARN,
1077 		    "failed to connect to the firmware: %d.", rc);
1078 		return (rc);
1079 	}
1080 
1081 	if (rc == sc->mbox)
1082 		sc->flags |= MASTER_PF;
1083 
1084 	/* We may need FW version info for later reporting */
1085 	t4_get_version_info(sc);
1086 
1087 	switch (CHELSIO_CHIP_VERSION(sc->params.chip)) {
1088 	case CHELSIO_T4:
1089 		fw_file = "t4fw.bin";
1090 		break;
1091 	case CHELSIO_T5:
1092 		fw_file = "t5fw.bin";
1093 		break;
1094 	case CHELSIO_T6:
1095 		fw_file = "t6fw.bin";
1096 		break;
1097 	default:
1098 		cxgb_printf(sc->dip, CE_WARN, "Adapter type not supported\n");
1099 		return (EINVAL);
1100 	}
1101 
1102 	if (firmware_open(T4_PORT_NAME, fw_file, &fw_hdl) != 0) {
1103 		cxgb_printf(sc->dip, CE_WARN, "Could not open %s\n", fw_file);
1104 		return (EINVAL);
1105 	}
1106 
1107 	fw_size = firmware_get_size(fw_hdl);
1108 
1109 	if (fw_size < sizeof (struct fw_hdr)) {
1110 		cxgb_printf(sc->dip, CE_WARN, "%s is too small (%ld bytes)\n",
1111 		    fw_file, fw_size);
1112 		firmware_close(fw_hdl);
1113 		return (EINVAL);
1114 	}
1115 
1116 	if (fw_size > FLASH_FW_MAX_SIZE) {
1117 		cxgb_printf(sc->dip, CE_WARN,
1118 		    "%s is too large (%ld bytes, max allowed is %ld)\n",
1119 		    fw_file, fw_size, FLASH_FW_MAX_SIZE);
1120 		firmware_close(fw_hdl);
1121 		return (EFBIG);
1122 	}
1123 
1124 	fw_data = kmem_zalloc(fw_size, KM_SLEEP);
1125 	if (firmware_read(fw_hdl, 0, fw_data, fw_size) != 0) {
1126 		cxgb_printf(sc->dip, CE_WARN, "Failed to read from %s\n",
1127 		    fw_file);
1128 		firmware_close(fw_hdl);
1129 		kmem_free(fw_data, fw_size);
1130 		return (EINVAL);
1131 	}
1132 	firmware_close(fw_hdl);
1133 
1134 	bzero(fw_info, sizeof (*fw_info));
1135 	fw_info->chip = CHELSIO_CHIP_VERSION(sc->params.chip);
1136 
1137 	hdr = (struct fw_hdr *)fw_data;
1138 	fw_info->fw_hdr.fw_ver = hdr->fw_ver;
1139 	fw_info->fw_hdr.chip = hdr->chip;
1140 	fw_info->fw_hdr.intfver_nic = hdr->intfver_nic;
1141 	fw_info->fw_hdr.intfver_vnic = hdr->intfver_vnic;
1142 	fw_info->fw_hdr.intfver_ofld = hdr->intfver_ofld;
1143 	fw_info->fw_hdr.intfver_ri = hdr->intfver_ri;
1144 	fw_info->fw_hdr.intfver_iscsipdu = hdr->intfver_iscsipdu;
1145 	fw_info->fw_hdr.intfver_iscsi = hdr->intfver_iscsi;
1146 	fw_info->fw_hdr.intfver_fcoepdu = hdr->intfver_fcoepdu;
1147 	fw_info->fw_hdr.intfver_fcoe = hdr->intfver_fcoe;
1148 
1149 	/* allocate memory to read the header of the firmware on the card */
1150 	card_fw = kmem_zalloc(sizeof (*card_fw), KM_SLEEP);
1151 
1152 	rc = -t4_prep_fw(sc, fw_info, fw_data, fw_size, card_fw,
1153 	    p->t4_fw_install, state, &reset);
1154 
1155 	kmem_free(card_fw, sizeof (*card_fw));
1156 	kmem_free(fw_data, fw_size);
1157 
1158 	if (rc != 0) {
1159 		cxgb_printf(sc->dip, CE_WARN,
1160 		    "failed to install firmware: %d", rc);
1161 		return (rc);
1162 	} else {
1163 		/* refresh */
1164 		(void) t4_check_fw_version(sc);
1165 	}
1166 
1167 	/* Reset device */
1168 	rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
1169 	if (rc != 0) {
1170 		cxgb_printf(sc->dip, CE_WARN,
1171 		    "firmware reset failed: %d.", rc);
1172 		if (rc != ETIMEDOUT && rc != EIO)
1173 			(void) t4_fw_bye(sc, sc->mbox);
1174 		return (rc);
1175 	}
1176 
1177 	/* Partition adapter resources as specified in the config file. */
1178 	if (sc->flags & MASTER_PF) {
1179 		/* Handle default vs special T4 config file */
1180 
1181 		rc = partition_resources(sc);
1182 		if (rc != 0)
1183 			goto err;	/* error message displayed already */
1184 	}
1185 
1186 	sc->flags |= FW_OK;
1187 	return (0);
1188 err:
1189 	return (rc);
1190 
1191 }
1192 
1193 static const struct memwin t4_memwin[] = {
1194 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1195 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1196 	{ MEMWIN2_BASE, MEMWIN2_APERTURE }
1197 };
1198 
1199 static const struct memwin t5_memwin[] = {
1200 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1201 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1202 	{ MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1203 };
1204 
1205 #define	FW_PARAM_DEV(param) \
1206 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
1207 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
1208 #define	FW_PARAM_PFVF(param) \
1209 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
1210 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
1211 
1212 /*
1213  * Verify that the memory range specified by the memtype/offset/len pair is
1214  * valid and lies entirely within the memtype specified.  The global address of
1215  * the start of the range is returned in addr.
1216  */
1217 int
validate_mt_off_len(struct adapter * sc,int mtype,uint32_t off,int len,uint32_t * addr)1218 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1219     uint32_t *addr)
1220 {
1221 	uint32_t em, addr_len, maddr, mlen;
1222 
1223 	/* Memory can only be accessed in naturally aligned 4 byte units */
1224 	if (off & 3 || len & 3 || len == 0)
1225 		return (EINVAL);
1226 
1227 	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1228 	switch (mtype) {
1229 		case MEM_EDC0:
1230 			if (!(em & F_EDRAM0_ENABLE))
1231 				return (EINVAL);
1232 			addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1233 			maddr = G_EDRAM0_BASE(addr_len) << 20;
1234 			mlen = G_EDRAM0_SIZE(addr_len) << 20;
1235 			break;
1236 		case MEM_EDC1:
1237 			if (!(em & F_EDRAM1_ENABLE))
1238 				return (EINVAL);
1239 			addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1240 			maddr = G_EDRAM1_BASE(addr_len) << 20;
1241 			mlen = G_EDRAM1_SIZE(addr_len) << 20;
1242 			break;
1243 		case MEM_MC:
1244 			if (!(em & F_EXT_MEM_ENABLE))
1245 				return (EINVAL);
1246 			addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1247 			maddr = G_EXT_MEM_BASE(addr_len) << 20;
1248 			mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1249 			break;
1250 		case MEM_MC1:
1251 			if (is_t4(sc->params.chip) || !(em & F_EXT_MEM1_ENABLE))
1252 				return (EINVAL);
1253 			addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1254 			maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1255 			mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1256 			break;
1257 		default:
1258 			return (EINVAL);
1259 	}
1260 
1261 	if (mlen > 0 && off < mlen && off + len <= mlen) {
1262 		*addr = maddr + off;    /* global address */
1263 		return (0);
1264 	}
1265 
1266 	return (EFAULT);
1267 }
1268 
1269 void
memwin_info(struct adapter * sc,int win,uint32_t * base,uint32_t * aperture)1270 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1271 {
1272 	const struct memwin *mw;
1273 
1274 	if (is_t4(sc->params.chip)) {
1275 		mw = &t4_memwin[win];
1276 	} else {
1277 		mw = &t5_memwin[win];
1278 	}
1279 
1280 	if (base != NULL)
1281 		*base = mw->base;
1282 	if (aperture != NULL)
1283 		*aperture = mw->aperture;
1284 }
1285 
1286 /*
1287  * Upload configuration file to card's memory.
1288  */
1289 static int
upload_config_file(struct adapter * sc,uint32_t * mt,uint32_t * ma)1290 upload_config_file(struct adapter *sc, uint32_t *mt, uint32_t *ma)
1291 {
1292 	int rc = 0;
1293 	size_t cflen, cfbaselen;
1294 	uint_t i, n;
1295 	uint32_t param, val, addr, mtype, maddr;
1296 	uint32_t off, mw_base, mw_aperture;
1297 	uint32_t *cfdata, *cfbase;
1298 	firmware_handle_t fw_hdl;
1299 	const char *cfg_file = NULL;
1300 
1301 	/* Figure out where the firmware wants us to upload it. */
1302 	param = FW_PARAM_DEV(CF);
1303 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
1304 	if (rc != 0) {
1305 		/* Firmwares without config file support will fail this way */
1306 		cxgb_printf(sc->dip, CE_WARN,
1307 		    "failed to query config file location: %d.\n", rc);
1308 		return (rc);
1309 	}
1310 	*mt = mtype = G_FW_PARAMS_PARAM_Y(val);
1311 	*ma = maddr = G_FW_PARAMS_PARAM_Z(val) << 16;
1312 
1313 	switch (CHELSIO_CHIP_VERSION(sc->params.chip)) {
1314 	case CHELSIO_T4:
1315 		cfg_file = "t4fw_cfg.txt";
1316 		break;
1317 	case CHELSIO_T5:
1318 		cfg_file = "t5fw_cfg.txt";
1319 		break;
1320 	case CHELSIO_T6:
1321 		cfg_file = "t6fw_cfg.txt";
1322 		break;
1323 	default:
1324 		cxgb_printf(sc->dip, CE_WARN, "Invalid Adapter detected\n");
1325 		return (EINVAL);
1326 	}
1327 
1328 	if (firmware_open(T4_PORT_NAME, cfg_file, &fw_hdl) != 0) {
1329 		cxgb_printf(sc->dip, CE_WARN, "Could not open %s\n", cfg_file);
1330 		return (EINVAL);
1331 	}
1332 
1333 	cflen = firmware_get_size(fw_hdl);
1334 	/*
1335 	 * Truncate the length to a multiple of uint32_ts. The configuration
1336 	 * text files have trailing comments (and hopefully always will) so
1337 	 * nothing important is lost.
1338 	 */
1339 	cflen &= ~3;
1340 
1341 	if (cflen > FLASH_CFG_MAX_SIZE) {
1342 		cxgb_printf(sc->dip, CE_WARN,
1343 		    "config file too long (%d, max allowed is %d).  ",
1344 		    cflen, FLASH_CFG_MAX_SIZE);
1345 		firmware_close(fw_hdl);
1346 		return (EFBIG);
1347 	}
1348 
1349 	rc = validate_mt_off_len(sc, mtype, maddr, cflen, &addr);
1350 	if (rc != 0) {
1351 		cxgb_printf(sc->dip, CE_WARN,
1352 		    "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
1353 		    "Will try to use the config on the card, if any.\n",
1354 		    __func__, mtype, maddr, cflen, rc);
1355 		firmware_close(fw_hdl);
1356 		return (EFAULT);
1357 	}
1358 
1359 	cfbaselen = cflen;
1360 	cfbase = cfdata = kmem_zalloc(cflen, KM_SLEEP);
1361 	if (firmware_read(fw_hdl, 0, cfdata, cflen) != 0) {
1362 		cxgb_printf(sc->dip, CE_WARN, "Failed to read from %s\n",
1363 		    cfg_file);
1364 		firmware_close(fw_hdl);
1365 		kmem_free(cfbase, cfbaselen);
1366 		return (EINVAL);
1367 	}
1368 	firmware_close(fw_hdl);
1369 
1370 	memwin_info(sc, 2, &mw_base, &mw_aperture);
1371 	while (cflen) {
1372 		off = position_memwin(sc, 2, addr);
1373 		n = min(cflen, mw_aperture - off);
1374 		for (i = 0; i < n; i += 4)
1375 			t4_write_reg(sc, mw_base + off + i, *cfdata++);
1376 		cflen -= n;
1377 		addr += n;
1378 	}
1379 
1380 	kmem_free(cfbase, cfbaselen);
1381 
1382 	return (rc);
1383 }
1384 
1385 /*
1386  * Partition chip resources for use between various PFs, VFs, etc.  This is done
1387  * by uploading the firmware configuration file to the adapter and instructing
1388  * the firmware to process it.
1389  */
1390 static int
partition_resources(struct adapter * sc)1391 partition_resources(struct adapter *sc)
1392 {
1393 	int rc;
1394 	struct fw_caps_config_cmd caps;
1395 	uint32_t mtype, maddr, finicsum, cfcsum;
1396 
1397 	rc = upload_config_file(sc, &mtype, &maddr);
1398 	if (rc != 0) {
1399 		mtype = FW_MEMTYPE_CF_FLASH;
1400 		maddr = t4_flash_cfg_addr(sc);
1401 	}
1402 
1403 	bzero(&caps, sizeof (caps));
1404 	caps.op_to_write = BE_32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1405 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
1406 	caps.cfvalid_to_len16 = BE_32(F_FW_CAPS_CONFIG_CMD_CFVALID |
1407 	    V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
1408 	    V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | FW_LEN16(caps));
1409 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), &caps);
1410 	if (rc != 0) {
1411 		cxgb_printf(sc->dip, CE_WARN,
1412 		    "failed to pre-process config file: %d.\n", rc);
1413 		return (rc);
1414 	}
1415 
1416 	finicsum = ntohl(caps.finicsum);
1417 	cfcsum = ntohl(caps.cfcsum);
1418 	if (finicsum != cfcsum) {
1419 		cxgb_printf(sc->dip, CE_WARN,
1420 		    "WARNING: config file checksum mismatch: %08x %08x\n",
1421 		    finicsum, cfcsum);
1422 	}
1423 	sc->cfcsum = cfcsum;
1424 
1425 	/* TODO: Need to configure this correctly */
1426 	caps.toecaps = htons(FW_CAPS_CONFIG_TOE);
1427 	caps.iscsicaps = 0;
1428 	caps.rdmacaps = 0;
1429 	caps.fcoecaps = 0;
1430 	/* TODO: Disable VNIC cap for now */
1431 	caps.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
1432 
1433 	caps.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1434 	    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
1435 	caps.cfvalid_to_len16 = htonl(FW_LEN16(caps));
1436 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), NULL);
1437 	if (rc != 0) {
1438 		cxgb_printf(sc->dip, CE_WARN,
1439 		    "failed to process config file: %d.\n", rc);
1440 		return (rc);
1441 	}
1442 
1443 	return (0);
1444 }
1445 
1446 /*
1447  * Tweak configuration based on module parameters, etc.  Most of these have
1448  * defaults assigned to them by Firmware Configuration Files (if we're using
1449  * them) but need to be explicitly set if we're using hard-coded
1450  * initialization.  But even in the case of using Firmware Configuration
1451  * Files, we'd like to expose the ability to change these via module
1452  * parameters so these are essentially common tweaks/settings for
1453  * Configuration Files and hard-coded initialization ...
1454  */
1455 static int
adap__pre_init_tweaks(struct adapter * sc)1456 adap__pre_init_tweaks(struct adapter *sc)
1457 {
1458 	int rx_dma_offset = 2; /* Offset of RX packets into DMA buffers */
1459 
1460 	/*
1461 	 * Fix up various Host-Dependent Parameters like Page Size, Cache
1462 	 * Line Size, etc.  The firmware default is for a 4KB Page Size and
1463 	 * 64B Cache Line Size ...
1464 	 */
1465 	(void) t4_fixup_host_params_compat(sc, PAGE_SIZE, _CACHE_LINE_SIZE,
1466 	    T5_LAST_REV);
1467 
1468 	t4_set_reg_field(sc, A_SGE_CONTROL, V_PKTSHIFT(M_PKTSHIFT),
1469 	    V_PKTSHIFT(rx_dma_offset));
1470 
1471 	return (0);
1472 }
1473 /*
1474  * Retrieve parameters that are needed (or nice to have) prior to calling
1475  * t4_sge_init and t4_fw_initialize.
1476  */
1477 static int
get_params__pre_init(struct adapter * sc)1478 get_params__pre_init(struct adapter *sc)
1479 {
1480 	int rc;
1481 	uint32_t param[2], val[2];
1482 	struct fw_devlog_cmd cmd;
1483 	struct devlog_params *dlog = &sc->params.devlog;
1484 
1485 	/*
1486 	 * Grab the raw VPD parameters.
1487 	 */
1488 	rc = -t4_get_raw_vpd_params(sc, &sc->params.vpd);
1489 	if (rc != 0) {
1490 		cxgb_printf(sc->dip, CE_WARN,
1491 		    "failed to query VPD parameters (pre_init): %d.\n", rc);
1492 		return (rc);
1493 	}
1494 
1495 	param[0] = FW_PARAM_DEV(PORTVEC);
1496 	param[1] = FW_PARAM_DEV(CCLK);
1497 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
1498 	if (rc != 0) {
1499 		cxgb_printf(sc->dip, CE_WARN,
1500 		    "failed to query parameters (pre_init): %d.\n", rc);
1501 		return (rc);
1502 	}
1503 
1504 	sc->params.portvec = val[0];
1505 	sc->params.nports = 0;
1506 	while (val[0]) {
1507 		sc->params.nports++;
1508 		val[0] &= val[0] - 1;
1509 	}
1510 
1511 	sc->params.vpd.cclk = val[1];
1512 
1513 	/* Read device log parameters. */
1514 	bzero(&cmd, sizeof (cmd));
1515 	cmd.op_to_write = htonl(V_FW_CMD_OP(FW_DEVLOG_CMD) |
1516 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
1517 	cmd.retval_len16 = htonl(FW_LEN16(cmd));
1518 	rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof (cmd), &cmd);
1519 	if (rc != 0) {
1520 		cxgb_printf(sc->dip, CE_WARN,
1521 		    "failed to get devlog parameters: %d.\n", rc);
1522 		bzero(dlog, sizeof (*dlog));
1523 		rc = 0;	/* devlog isn't critical for device operation */
1524 	} else {
1525 		val[0] = ntohl(cmd.memtype_devlog_memaddr16_devlog);
1526 		dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
1527 		dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
1528 		dlog->size = ntohl(cmd.memsize_devlog);
1529 	}
1530 
1531 	return (rc);
1532 }
1533 
1534 /*
1535  * Retrieve various parameters that are of interest to the driver.  The device
1536  * has been initialized by the firmware at this point.
1537  */
1538 static int
get_params__post_init(struct adapter * sc)1539 get_params__post_init(struct adapter *sc)
1540 {
1541 	int rc;
1542 	uint32_t param[7], val[7];
1543 	struct fw_caps_config_cmd caps;
1544 
1545 	param[0] = FW_PARAM_PFVF(IQFLINT_START);
1546 	param[1] = FW_PARAM_PFVF(EQ_START);
1547 	param[2] = FW_PARAM_PFVF(FILTER_START);
1548 	param[3] = FW_PARAM_PFVF(FILTER_END);
1549 	param[4] = FW_PARAM_PFVF(L2T_START);
1550 	param[5] = FW_PARAM_PFVF(L2T_END);
1551 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
1552 	if (rc != 0) {
1553 		cxgb_printf(sc->dip, CE_WARN,
1554 		    "failed to query parameters (post_init): %d.\n", rc);
1555 		return (rc);
1556 	}
1557 
1558 	/* LINTED: E_ASSIGN_NARROW_CONV */
1559 	sc->sge.iq_start = val[0];
1560 	sc->sge.eq_start = val[1];
1561 	sc->tids.ftid_base = val[2];
1562 	sc->tids.nftids = val[3] - val[2] + 1;
1563 	sc->vres.l2t.start = val[4];
1564 	sc->vres.l2t.size = val[5] - val[4] + 1;
1565 
1566 	param[0] = FW_PARAM_PFVF(IQFLINT_END);
1567 	param[1] = FW_PARAM_PFVF(EQ_END);
1568 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
1569 	if (rc != 0) {
1570 		cxgb_printf(sc->dip, CE_WARN, "failed to query eq/iq map "
1571 		    "size parameters (post_init): %d.\n", rc);
1572 		return (rc);
1573 	}
1574 
1575 	sc->sge.iqmap_sz = val[0] - sc->sge.iq_start + 1;
1576 	sc->sge.eqmap_sz = val[1] - sc->sge.eq_start + 1;
1577 
1578 	/* get capabilites */
1579 	bzero(&caps, sizeof (caps));
1580 	caps.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1581 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
1582 	caps.cfvalid_to_len16 = htonl(FW_LEN16(caps));
1583 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), &caps);
1584 	if (rc != 0) {
1585 		cxgb_printf(sc->dip, CE_WARN,
1586 		    "failed to get card capabilities: %d.\n", rc);
1587 		return (rc);
1588 	}
1589 
1590 	if (caps.toecaps != 0) {
1591 		/* query offload-related parameters */
1592 		param[0] = FW_PARAM_DEV(NTID);
1593 		param[1] = FW_PARAM_PFVF(SERVER_START);
1594 		param[2] = FW_PARAM_PFVF(SERVER_END);
1595 		param[3] = FW_PARAM_PFVF(TDDP_START);
1596 		param[4] = FW_PARAM_PFVF(TDDP_END);
1597 		param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
1598 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
1599 		if (rc != 0) {
1600 			cxgb_printf(sc->dip, CE_WARN,
1601 			    "failed to query TOE parameters: %d.\n", rc);
1602 			return (rc);
1603 		}
1604 		sc->tids.ntids = val[0];
1605 		sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
1606 		sc->tids.stid_base = val[1];
1607 		sc->tids.nstids = val[2] - val[1] + 1;
1608 		sc->vres.ddp.start = val[3];
1609 		sc->vres.ddp.size = val[4] - val[3] + 1;
1610 		sc->params.ofldq_wr_cred = val[5];
1611 		sc->params.offload = 1;
1612 	}
1613 
1614 	rc = -t4_get_pfres(sc);
1615 	if (rc != 0) {
1616 		cxgb_printf(sc->dip, CE_WARN,
1617 		    "failed to query PF resource params: %d.\n", rc);
1618 		return (rc);
1619 	}
1620 
1621 	/* These are finalized by FW initialization, load their values now */
1622 	val[0] = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
1623 	sc->params.tp.tre = G_TIMERRESOLUTION(val[0]);
1624 	sc->params.tp.dack_re = G_DELAYEDACKRESOLUTION(val[0]);
1625 	t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
1626 
1627 	return (rc);
1628 }
1629 
1630 static int
set_params__post_init(struct adapter * sc)1631 set_params__post_init(struct adapter *sc)
1632 {
1633 	uint32_t param, val;
1634 
1635 	/* ask for encapsulated CPLs */
1636 	param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
1637 	val = 1;
1638 	(void) t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
1639 
1640 	return (0);
1641 }
1642 
1643 /* TODO: verify */
1644 static void
setup_memwin(struct adapter * sc)1645 setup_memwin(struct adapter *sc)
1646 {
1647 	pci_regspec_t *data;
1648 	int rc;
1649 	uint_t n;
1650 	uintptr_t bar0;
1651 	uintptr_t mem_win0_base, mem_win1_base, mem_win2_base;
1652 	uintptr_t mem_win2_aperture;
1653 
1654 	rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sc->dip,
1655 	    DDI_PROP_DONTPASS, "assigned-addresses", (int **)&data, &n);
1656 	if (rc != DDI_SUCCESS) {
1657 		cxgb_printf(sc->dip, CE_WARN,
1658 		    "failed to lookup \"assigned-addresses\" property: %d", rc);
1659 		return;
1660 	}
1661 	n /= sizeof (*data);
1662 
1663 	bar0 = ((uint64_t)data[0].pci_phys_mid << 32) | data[0].pci_phys_low;
1664 	ddi_prop_free(data);
1665 
1666 	if (is_t4(sc->params.chip)) {
1667 		mem_win0_base = bar0 + MEMWIN0_BASE;
1668 		mem_win1_base = bar0 + MEMWIN1_BASE;
1669 		mem_win2_base = bar0 + MEMWIN2_BASE;
1670 		mem_win2_aperture = MEMWIN2_APERTURE;
1671 	} else {
1672 		/* For T5, only relative offset inside the PCIe BAR is passed */
1673 		mem_win0_base = MEMWIN0_BASE;
1674 		mem_win1_base = MEMWIN1_BASE;
1675 		mem_win2_base = MEMWIN2_BASE_T5;
1676 		mem_win2_aperture = MEMWIN2_APERTURE_T5;
1677 	}
1678 
1679 	t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 0),
1680 	    mem_win0_base | V_BIR(0) |
1681 	    V_WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
1682 
1683 	t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 1),
1684 	    mem_win1_base | V_BIR(0) |
1685 	    V_WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
1686 
1687 	t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2),
1688 	    mem_win2_base | V_BIR(0) |
1689 	    V_WINDOW(ilog2(mem_win2_aperture) - 10));
1690 
1691 	/* flush */
1692 	(void) t4_read_reg(sc,
1693 	    PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1694 }
1695 
1696 /*
1697  * Positions the memory window such that it can be used to access the specified
1698  * address in the chip's address space.  The return value is the offset of addr
1699  * from the start of the window.
1700  */
1701 uint32_t
position_memwin(struct adapter * sc,int n,uint32_t addr)1702 position_memwin(struct adapter *sc, int n, uint32_t addr)
1703 {
1704 	uint32_t start, pf;
1705 	uint32_t reg;
1706 
1707 	if (addr & 3) {
1708 		cxgb_printf(sc->dip, CE_WARN,
1709 		    "addr (0x%x) is not at a 4B boundary.\n", addr);
1710 		return (EFAULT);
1711 	}
1712 
1713 	if (is_t4(sc->params.chip)) {
1714 		pf = 0;
1715 		start = addr & ~0xf;    /* start must be 16B aligned */
1716 	} else {
1717 		pf = V_PFNUM(sc->pf);
1718 		start = addr & ~0x7f;   /* start must be 128B aligned */
1719 	}
1720 	reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1721 
1722 	t4_write_reg(sc, reg, start | pf);
1723 	(void) t4_read_reg(sc, reg);
1724 
1725 	return (addr - start);
1726 }
1727 
1728 
1729 /*
1730  * Reads the named property and fills up the "data" array (which has at least
1731  * "count" elements).  We first try and lookup the property for our dev_t and
1732  * then retry with DDI_DEV_T_ANY if it's not found.
1733  *
1734  * Returns non-zero if the property was found and "data" has been updated.
1735  */
1736 static int
prop_lookup_int_array(struct adapter * sc,char * name,int * data,uint_t count)1737 prop_lookup_int_array(struct adapter *sc, char *name, int *data, uint_t count)
1738 {
1739 	dev_info_t *dip = sc->dip;
1740 	dev_t dev = sc->dev;
1741 	int rc, *d;
1742 	uint_t i, n;
1743 
1744 	rc = ddi_prop_lookup_int_array(dev, dip, DDI_PROP_DONTPASS,
1745 	    name, &d, &n);
1746 	if (rc == DDI_PROP_SUCCESS)
1747 		goto found;
1748 
1749 	if (rc != DDI_PROP_NOT_FOUND) {
1750 		cxgb_printf(dip, CE_WARN,
1751 		    "failed to lookup property %s for minor %d: %d.",
1752 		    name, getminor(dev), rc);
1753 		return (0);
1754 	}
1755 
1756 	rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1757 	    name, &d, &n);
1758 	if (rc == DDI_PROP_SUCCESS)
1759 		goto found;
1760 
1761 	if (rc != DDI_PROP_NOT_FOUND) {
1762 		cxgb_printf(dip, CE_WARN,
1763 		    "failed to lookup property %s: %d.", name, rc);
1764 		return (0);
1765 	}
1766 
1767 	return (0);
1768 
1769 found:
1770 	if (n > count) {
1771 		cxgb_printf(dip, CE_NOTE,
1772 		    "property %s has too many elements (%d), ignoring extras",
1773 		    name, n);
1774 	}
1775 
1776 	for (i = 0; i < n && i < count; i++)
1777 		data[i] = d[i];
1778 	ddi_prop_free(d);
1779 
1780 	return (1);
1781 }
1782 
1783 static int
prop_lookup_int(struct adapter * sc,char * name,int defval)1784 prop_lookup_int(struct adapter *sc, char *name, int defval)
1785 {
1786 	int rc;
1787 
1788 	rc = ddi_prop_get_int(sc->dev, sc->dip, DDI_PROP_DONTPASS, name, -1);
1789 	if (rc != -1)
1790 		return (rc);
1791 
1792 	return (ddi_prop_get_int(DDI_DEV_T_ANY, sc->dip, DDI_PROP_DONTPASS,
1793 	    name, defval));
1794 }
1795 
1796 static int
init_driver_props(struct adapter * sc,struct driver_properties * p)1797 init_driver_props(struct adapter *sc, struct driver_properties *p)
1798 {
1799 	dev_t dev = sc->dev;
1800 	dev_info_t *dip = sc->dip;
1801 	int i, *data;
1802 	uint_t tmr[SGE_NTIMERS] = {5, 10, 20, 50, 100, 200};
1803 	uint_t cnt[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */
1804 
1805 	/*
1806 	 * Holdoff timer
1807 	 */
1808 	data = &p->timer_val[0];
1809 	for (i = 0; i < SGE_NTIMERS; i++)
1810 		data[i] = tmr[i];
1811 	(void) prop_lookup_int_array(sc, "holdoff-timer-values", data,
1812 	    SGE_NTIMERS);
1813 	for (i = 0; i < SGE_NTIMERS; i++) {
1814 		int limit = 200U;
1815 		if (data[i] > limit) {
1816 			cxgb_printf(dip, CE_WARN,
1817 			    "holdoff timer %d is too high (%d), lowered to %d.",
1818 			    i, data[i], limit);
1819 			data[i] = limit;
1820 		}
1821 	}
1822 	(void) ddi_prop_update_int_array(dev, dip, "holdoff-timer-values",
1823 	    data, SGE_NTIMERS);
1824 
1825 	/*
1826 	 * Holdoff packet counter
1827 	 */
1828 	data = &p->counter_val[0];
1829 	for (i = 0; i < SGE_NCOUNTERS; i++)
1830 		data[i] = cnt[i];
1831 	(void) prop_lookup_int_array(sc, "holdoff-pkt-counter-values", data,
1832 	    SGE_NCOUNTERS);
1833 	for (i = 0; i < SGE_NCOUNTERS; i++) {
1834 		int limit = M_THRESHOLD_0;
1835 		if (data[i] > limit) {
1836 			cxgb_printf(dip, CE_WARN,
1837 			    "holdoff pkt-counter %d is too high (%d), "
1838 			    "lowered to %d.", i, data[i], limit);
1839 			data[i] = limit;
1840 		}
1841 	}
1842 	(void) ddi_prop_update_int_array(dev, dip, "holdoff-pkt-counter-values",
1843 	    data, SGE_NCOUNTERS);
1844 
1845 	/*
1846 	 * Maximum # of tx and rx queues to use for each
1847 	 * 100G, 40G, 25G, 10G and 1G port.
1848 	 */
1849 	p->max_ntxq_10g = prop_lookup_int(sc, "max-ntxq-10G-port", 8);
1850 	(void) ddi_prop_update_int(dev, dip, "max-ntxq-10G-port",
1851 	    p->max_ntxq_10g);
1852 
1853 	p->max_nrxq_10g = prop_lookup_int(sc, "max-nrxq-10G-port", 8);
1854 	(void) ddi_prop_update_int(dev, dip, "max-nrxq-10G-port",
1855 	    p->max_nrxq_10g);
1856 
1857 	p->max_ntxq_1g = prop_lookup_int(sc, "max-ntxq-1G-port", 2);
1858 	(void) ddi_prop_update_int(dev, dip, "max-ntxq-1G-port",
1859 	    p->max_ntxq_1g);
1860 
1861 	p->max_nrxq_1g = prop_lookup_int(sc, "max-nrxq-1G-port", 2);
1862 	(void) ddi_prop_update_int(dev, dip, "max-nrxq-1G-port",
1863 	    p->max_nrxq_1g);
1864 
1865 	/*
1866 	 * Holdoff parameters for 10G and 1G ports.
1867 	 */
1868 	p->tmr_idx_10g = prop_lookup_int(sc, "holdoff-timer-idx-10G", 0);
1869 	(void) ddi_prop_update_int(dev, dip, "holdoff-timer-idx-10G",
1870 	    p->tmr_idx_10g);
1871 
1872 	p->pktc_idx_10g = prop_lookup_int(sc, "holdoff-pktc-idx-10G", 2);
1873 	(void) ddi_prop_update_int(dev, dip, "holdoff-pktc-idx-10G",
1874 	    p->pktc_idx_10g);
1875 
1876 	p->tmr_idx_1g = prop_lookup_int(sc, "holdoff-timer-idx-1G", 0);
1877 	(void) ddi_prop_update_int(dev, dip, "holdoff-timer-idx-1G",
1878 	    p->tmr_idx_1g);
1879 
1880 	p->pktc_idx_1g = prop_lookup_int(sc, "holdoff-pktc-idx-1G", 2);
1881 	(void) ddi_prop_update_int(dev, dip, "holdoff-pktc-idx-1G",
1882 	    p->pktc_idx_1g);
1883 
1884 	/*
1885 	 * Size (number of entries) of each tx and rx queue.
1886 	 */
1887 	i = prop_lookup_int(sc, "qsize-txq", TX_EQ_QSIZE);
1888 	p->qsize_txq = max(i, 128);
1889 	if (p->qsize_txq != i) {
1890 		cxgb_printf(dip, CE_WARN,
1891 		    "using %d instead of %d as the tx queue size",
1892 		    p->qsize_txq, i);
1893 	}
1894 	(void) ddi_prop_update_int(dev, dip, "qsize-txq", p->qsize_txq);
1895 
1896 	i = prop_lookup_int(sc, "qsize-rxq", RX_IQ_QSIZE);
1897 	p->qsize_rxq = max(i, 128);
1898 	while (p->qsize_rxq & 7)
1899 		p->qsize_rxq--;
1900 	if (p->qsize_rxq != i) {
1901 		cxgb_printf(dip, CE_WARN,
1902 		    "using %d instead of %d as the rx queue size",
1903 		    p->qsize_rxq, i);
1904 	}
1905 	(void) ddi_prop_update_int(dev, dip, "qsize-rxq", p->qsize_rxq);
1906 
1907 	/*
1908 	 * Interrupt types allowed.
1909 	 * Bits 0, 1, 2 = INTx, MSI, MSI-X respectively.  See sys/ddi_intr.h
1910 	 */
1911 	p->intr_types = prop_lookup_int(sc, "interrupt-types",
1912 	    DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_FIXED);
1913 	(void) ddi_prop_update_int(dev, dip, "interrupt-types", p->intr_types);
1914 
1915 	/*
1916 	 * Write combining
1917 	 * 0 to disable, 1 to enable
1918 	 */
1919 	p->wc = prop_lookup_int(sc, "write-combine", 1);
1920 	cxgb_printf(dip, CE_WARN, "write-combine: using of %d", p->wc);
1921 	if (p->wc != 0 && p->wc != 1) {
1922 		cxgb_printf(dip, CE_WARN,
1923 		    "write-combine: using 1 instead of %d", p->wc);
1924 		p->wc = 1;
1925 	}
1926 	(void) ddi_prop_update_int(dev, dip, "write-combine", p->wc);
1927 
1928 	p->t4_fw_install = prop_lookup_int(sc, "t4_fw_install", 1);
1929 	if (p->t4_fw_install != 0 && p->t4_fw_install != 2)
1930 		p->t4_fw_install = 1;
1931 	(void) ddi_prop_update_int(dev, dip, "t4_fw_install", p->t4_fw_install);
1932 
1933 	/* Multiple Rings */
1934 	p->multi_rings = prop_lookup_int(sc, "multi-rings", 1);
1935 	if (p->multi_rings != 0 && p->multi_rings != 1) {
1936 		cxgb_printf(dip, CE_NOTE,
1937 		    "multi-rings: using value 1 instead of %d", p->multi_rings);
1938 		p->multi_rings = 1;
1939 	}
1940 
1941 	(void) ddi_prop_update_int(dev, dip, "multi-rings", p->multi_rings);
1942 
1943 	return (0);
1944 }
1945 
1946 static int
remove_extra_props(struct adapter * sc,int n10g,int n1g)1947 remove_extra_props(struct adapter *sc, int n10g, int n1g)
1948 {
1949 	if (n10g == 0) {
1950 		(void) ddi_prop_remove(sc->dev, sc->dip, "max-ntxq-10G-port");
1951 		(void) ddi_prop_remove(sc->dev, sc->dip, "max-nrxq-10G-port");
1952 		(void) ddi_prop_remove(sc->dev, sc->dip,
1953 		    "holdoff-timer-idx-10G");
1954 		(void) ddi_prop_remove(sc->dev, sc->dip,
1955 		    "holdoff-pktc-idx-10G");
1956 	}
1957 
1958 	if (n1g == 0) {
1959 		(void) ddi_prop_remove(sc->dev, sc->dip, "max-ntxq-1G-port");
1960 		(void) ddi_prop_remove(sc->dev, sc->dip, "max-nrxq-1G-port");
1961 		(void) ddi_prop_remove(sc->dev, sc->dip,
1962 		    "holdoff-timer-idx-1G");
1963 		(void) ddi_prop_remove(sc->dev, sc->dip, "holdoff-pktc-idx-1G");
1964 	}
1965 
1966 	return (0);
1967 }
1968 
1969 static int
cfg_itype_and_nqueues(struct adapter * sc,int n10g,int n1g,struct intrs_and_queues * iaq)1970 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1971     struct intrs_and_queues *iaq)
1972 {
1973 	struct driver_properties *p = &sc->props;
1974 	int rc, itype, itypes, navail, nc, n;
1975 	int pfres_rxq, pfres_txq, pfresq;
1976 
1977 	bzero(iaq, sizeof (*iaq));
1978 	nc = ncpus;	/* our snapshot of the number of CPUs */
1979 	iaq->ntxq10g = min(nc, p->max_ntxq_10g);
1980 	iaq->ntxq1g = min(nc, p->max_ntxq_1g);
1981 	iaq->nrxq10g = min(nc, p->max_nrxq_10g);
1982 	iaq->nrxq1g = min(nc, p->max_nrxq_1g);
1983 
1984 	pfres_rxq = iaq->nrxq10g * n10g + iaq->nrxq1g * n1g;
1985 	pfres_txq = iaq->ntxq10g * n10g + iaq->ntxq1g * n1g;
1986 
1987 	/*
1988 	 * If current configuration of max number of Rxqs and Txqs exceed
1989 	 * the max available for all the ports under this PF, then shrink
1990 	 * the queues to max available. Reduce them in a way that each
1991 	 * port under this PF has equally distributed number of queues.
1992 	 * Must guarantee at least 1 queue for each port for both NIC
1993 	 * and Offload queues.
1994 	 *
1995 	 * neq - fixed max number of Egress queues on Tx path and Free List
1996 	 * queues that hold Rx payload data on Rx path. Half are reserved
1997 	 * for Egress queues and the other half for Free List queues.
1998 	 * Hence, the division by 2.
1999 	 *
2000 	 * niqflint - max number of Ingress queues with interrupts on Rx
2001 	 * path to receive completions that indicate Rx payload has been
2002 	 * posted in its associated Free List queue. Also handles Tx
2003 	 * completions for packets successfully transmitted on Tx path.
2004 	 *
2005 	 * nethctrl - max number of Egress queues only for Tx path. This
2006 	 * number is usually half of neq. However, if it became less than
2007 	 * neq due to lack of resources based on firmware configuration,
2008 	 * then take the lower value.
2009 	 */
2010 	const uint_t max_rxq =
2011 	    MIN(sc->params.pfres.neq / 2, sc->params.pfres.niqflint);
2012 	while (pfres_rxq > max_rxq) {
2013 		pfresq = pfres_rxq;
2014 
2015 		if (iaq->nrxq10g > 1) {
2016 			iaq->nrxq10g--;
2017 			pfres_rxq -= n10g;
2018 		}
2019 
2020 		if (iaq->nrxq1g > 1) {
2021 			iaq->nrxq1g--;
2022 			pfres_rxq -= n1g;
2023 		}
2024 
2025 		/* Break if nothing changed */
2026 		if (pfresq == pfres_rxq)
2027 			break;
2028 	}
2029 
2030 	const uint_t max_txq =
2031 	    MIN(sc->params.pfres.neq / 2, sc->params.pfres.nethctrl);
2032 	while (pfres_txq > max_txq) {
2033 		pfresq = pfres_txq;
2034 
2035 		if (iaq->ntxq10g > 1) {
2036 			iaq->ntxq10g--;
2037 			pfres_txq -= n10g;
2038 		}
2039 
2040 		if (iaq->ntxq1g > 1) {
2041 			iaq->ntxq1g--;
2042 			pfres_txq -= n1g;
2043 		}
2044 
2045 		/* Break if nothing changed */
2046 		if (pfresq == pfres_txq)
2047 			break;
2048 	}
2049 
2050 	rc = ddi_intr_get_supported_types(sc->dip, &itypes);
2051 	if (rc != DDI_SUCCESS) {
2052 		cxgb_printf(sc->dip, CE_WARN,
2053 		    "failed to determine supported interrupt types: %d", rc);
2054 		return (rc);
2055 	}
2056 
2057 	for (itype = DDI_INTR_TYPE_MSIX; itype; itype >>= 1) {
2058 		ASSERT(itype == DDI_INTR_TYPE_MSIX ||
2059 		    itype == DDI_INTR_TYPE_MSI ||
2060 		    itype == DDI_INTR_TYPE_FIXED);
2061 
2062 		if ((itype & itypes & p->intr_types) == 0)
2063 			continue;	/* not supported or not allowed */
2064 
2065 		navail = 0;
2066 		rc = ddi_intr_get_navail(sc->dip, itype, &navail);
2067 		if (rc != DDI_SUCCESS || navail == 0) {
2068 			cxgb_printf(sc->dip, CE_WARN,
2069 			    "failed to get # of interrupts for type %d: %d",
2070 			    itype, rc);
2071 			continue;	/* carry on */
2072 		}
2073 
2074 		iaq->intr_type = itype;
2075 		if (navail == 0)
2076 			continue;
2077 
2078 		/*
2079 		 * Best option: an interrupt vector for errors, one for the
2080 		 * firmware event queue, and one each for each rxq (NIC as well
2081 		 * as offload).
2082 		 */
2083 		iaq->nirq = T4_EXTRA_INTR;
2084 		iaq->nirq += n10g * iaq->nrxq10g;
2085 		iaq->nirq += n1g * iaq->nrxq1g;
2086 
2087 		if (iaq->nirq <= navail &&
2088 		    (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq))) {
2089 			iaq->intr_fwd = 0;
2090 			goto allocate;
2091 		}
2092 
2093 		/*
2094 		 * Second best option: an interrupt vector for errors, one for
2095 		 * the firmware event queue, and one each for either NIC or
2096 		 * offload rxq's.
2097 		 */
2098 		iaq->nirq = T4_EXTRA_INTR;
2099 		iaq->nirq += n10g * iaq->nrxq10g;
2100 		iaq->nirq += n1g * iaq->nrxq1g;
2101 		if (iaq->nirq <= navail &&
2102 		    (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq))) {
2103 			iaq->intr_fwd = 1;
2104 			goto allocate;
2105 		}
2106 
2107 		/*
2108 		 * Next best option: an interrupt vector for errors, one for the
2109 		 * firmware event queue, and at least one per port.  At this
2110 		 * point we know we'll have to downsize nrxq or nofldrxq to fit
2111 		 * what's available to us.
2112 		 */
2113 		iaq->nirq = T4_EXTRA_INTR;
2114 		iaq->nirq += n10g + n1g;
2115 		if (iaq->nirq <= navail) {
2116 			int leftover = navail - iaq->nirq;
2117 
2118 			if (n10g > 0) {
2119 				int target = iaq->nrxq10g;
2120 
2121 				n = 1;
2122 				while (n < target && leftover >= n10g) {
2123 					leftover -= n10g;
2124 					iaq->nirq += n10g;
2125 					n++;
2126 				}
2127 				iaq->nrxq10g = min(n, iaq->nrxq10g);
2128 			}
2129 
2130 			if (n1g > 0) {
2131 				int target = iaq->nrxq1g;
2132 
2133 				n = 1;
2134 				while (n < target && leftover >= n1g) {
2135 					leftover -= n1g;
2136 					iaq->nirq += n1g;
2137 					n++;
2138 				}
2139 				iaq->nrxq1g = min(n, iaq->nrxq1g);
2140 			}
2141 
2142 			/*
2143 			 * We have arrived at a minimum value required to enable
2144 			 * per queue irq(either NIC or offload). Thus for non-
2145 			 * offload case, we will get a vector per queue, while
2146 			 * offload case, we will get a vector per offload/NIC q.
2147 			 * Hence enable Interrupt forwarding only for offload
2148 			 * case.
2149 			 */
2150 			if (itype != DDI_INTR_TYPE_MSI) {
2151 				goto allocate;
2152 			}
2153 		}
2154 
2155 		/*
2156 		 * Least desirable option: one interrupt vector for everything.
2157 		 */
2158 		iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
2159 		iaq->intr_fwd = 1;
2160 
2161 allocate:
2162 		return (0);
2163 	}
2164 
2165 	cxgb_printf(sc->dip, CE_WARN,
2166 	    "failed to find a usable interrupt type.  supported=%d, allowed=%d",
2167 	    itypes, p->intr_types);
2168 	return (DDI_FAILURE);
2169 }
2170 
2171 static int
add_child_node(struct adapter * sc,int idx)2172 add_child_node(struct adapter *sc, int idx)
2173 {
2174 	int rc;
2175 	struct port_info *pi;
2176 
2177 	if (idx < 0 || idx >= sc->params.nports)
2178 		return (EINVAL);
2179 
2180 	pi = sc->port[idx];
2181 	if (pi == NULL)
2182 		return (ENODEV);	/* t4_port_init failed earlier */
2183 
2184 	PORT_LOCK(pi);
2185 	if (pi->dip != NULL) {
2186 		rc = 0;		/* EEXIST really, but then bus_config fails */
2187 		goto done;
2188 	}
2189 
2190 	rc = ndi_devi_alloc(sc->dip, T4_PORT_NAME, DEVI_SID_NODEID, &pi->dip);
2191 	if (rc != DDI_SUCCESS || pi->dip == NULL) {
2192 		rc = ENOMEM;
2193 		goto done;
2194 	}
2195 
2196 	(void) ddi_set_parent_data(pi->dip, pi);
2197 	(void) ndi_devi_bind_driver(pi->dip, 0);
2198 	rc = 0;
2199 done:
2200 	PORT_UNLOCK(pi);
2201 	return (rc);
2202 }
2203 
2204 static int
remove_child_node(struct adapter * sc,int idx)2205 remove_child_node(struct adapter *sc, int idx)
2206 {
2207 	int rc;
2208 	struct port_info *pi;
2209 
2210 	if (idx < 0 || idx >= sc->params.nports)
2211 		return (EINVAL);
2212 
2213 	pi = sc->port[idx];
2214 	if (pi == NULL)
2215 		return (ENODEV);
2216 
2217 	PORT_LOCK(pi);
2218 	if (pi->dip == NULL) {
2219 		rc = ENODEV;
2220 		goto done;
2221 	}
2222 
2223 	rc = ndi_devi_free(pi->dip);
2224 	if (rc == 0)
2225 		pi->dip = NULL;
2226 done:
2227 	PORT_UNLOCK(pi);
2228 	return (rc);
2229 }
2230 
2231 static char *
print_port_speed(const struct port_info * pi)2232 print_port_speed(const struct port_info *pi)
2233 {
2234 	if (!pi)
2235 		return ("-");
2236 
2237 	if (is_100G_port(pi))
2238 		return ("100G");
2239 	else if (is_50G_port(pi))
2240 		return ("50G");
2241 	else if (is_40G_port(pi))
2242 		return ("40G");
2243 	else if (is_25G_port(pi))
2244 		return ("25G");
2245 	else if (is_10G_port(pi))
2246 		return ("10G");
2247 	else
2248 		return ("1G");
2249 }
2250 
2251 #define	KS_UINIT(x)	kstat_named_init(&kstatp->x, #x, KSTAT_DATA_ULONG)
2252 #define	KS_CINIT(x)	kstat_named_init(&kstatp->x, #x, KSTAT_DATA_CHAR)
2253 #define	KS_U64INIT(x)	kstat_named_init(&kstatp->x, #x, KSTAT_DATA_UINT64)
2254 #define	KS_U_SET(x, y)	kstatp->x.value.ul = (y)
2255 #define	KS_C_SET(x, ...)	\
2256 			(void) snprintf(kstatp->x.value.c, 16,  __VA_ARGS__)
2257 
2258 /*
2259  * t4nex:X:config
2260  */
2261 struct t4_kstats {
2262 	kstat_named_t chip_ver;
2263 	kstat_named_t fw_vers;
2264 	kstat_named_t tp_vers;
2265 	kstat_named_t driver_version;
2266 	kstat_named_t serial_number;
2267 	kstat_named_t ec_level;
2268 	kstat_named_t id;
2269 	kstat_named_t bus_type;
2270 	kstat_named_t bus_width;
2271 	kstat_named_t bus_speed;
2272 	kstat_named_t core_clock;
2273 	kstat_named_t port_cnt;
2274 	kstat_named_t port_type;
2275 	kstat_named_t pci_vendor_id;
2276 	kstat_named_t pci_device_id;
2277 };
2278 static kstat_t *
setup_kstats(struct adapter * sc)2279 setup_kstats(struct adapter *sc)
2280 {
2281 	kstat_t *ksp;
2282 	struct t4_kstats *kstatp;
2283 	int ndata;
2284 	struct pci_params *p = &sc->params.pci;
2285 	struct vpd_params *v = &sc->params.vpd;
2286 	uint16_t pci_vendor, pci_device;
2287 
2288 	ndata = sizeof (struct t4_kstats) / sizeof (kstat_named_t);
2289 
2290 	ksp = kstat_create(T4_NEXUS_NAME, ddi_get_instance(sc->dip), "config",
2291 	    "nexus", KSTAT_TYPE_NAMED, ndata, 0);
2292 	if (ksp == NULL) {
2293 		cxgb_printf(sc->dip, CE_WARN, "failed to initialize kstats.");
2294 		return (NULL);
2295 	}
2296 
2297 	kstatp = (struct t4_kstats *)ksp->ks_data;
2298 
2299 	KS_UINIT(chip_ver);
2300 	KS_CINIT(fw_vers);
2301 	KS_CINIT(tp_vers);
2302 	KS_CINIT(driver_version);
2303 	KS_CINIT(serial_number);
2304 	KS_CINIT(ec_level);
2305 	KS_CINIT(id);
2306 	KS_CINIT(bus_type);
2307 	KS_CINIT(bus_width);
2308 	KS_CINIT(bus_speed);
2309 	KS_UINIT(core_clock);
2310 	KS_UINIT(port_cnt);
2311 	KS_CINIT(port_type);
2312 	KS_CINIT(pci_vendor_id);
2313 	KS_CINIT(pci_device_id);
2314 
2315 	KS_U_SET(chip_ver, sc->params.chip);
2316 	KS_C_SET(fw_vers, "%d.%d.%d.%d",
2317 	    G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2318 	    G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2319 	    G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2320 	    G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2321 	KS_C_SET(tp_vers, "%d.%d.%d.%d",
2322 	    G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers),
2323 	    G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers),
2324 	    G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers),
2325 	    G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers));
2326 	KS_C_SET(driver_version, DRV_VERSION);
2327 	KS_C_SET(serial_number, "%s", v->sn);
2328 	KS_C_SET(ec_level, "%s", v->ec);
2329 	KS_C_SET(id, "%s", v->id);
2330 	KS_C_SET(bus_type, "pci-express");
2331 	KS_C_SET(bus_width, "x%d lanes", p->width);
2332 	KS_C_SET(bus_speed, "%d", p->speed);
2333 	KS_U_SET(core_clock, v->cclk);
2334 	KS_U_SET(port_cnt, sc->params.nports);
2335 
2336 	t4_os_pci_read_cfg2(sc, PCI_CONF_VENID, &pci_vendor);
2337 	KS_C_SET(pci_vendor_id, "0x%x", pci_vendor);
2338 
2339 	t4_os_pci_read_cfg2(sc, PCI_CONF_DEVID, &pci_device);
2340 	KS_C_SET(pci_device_id, "0x%x", pci_device);
2341 
2342 	KS_C_SET(port_type, "%s/%s/%s/%s",
2343 	    print_port_speed(sc->port[0]),
2344 	    print_port_speed(sc->port[1]),
2345 	    print_port_speed(sc->port[2]),
2346 	    print_port_speed(sc->port[3]));
2347 
2348 	/* Do NOT set ksp->ks_update.  These kstats do not change. */
2349 
2350 	/* Install the kstat */
2351 	ksp->ks_private = (void *)sc;
2352 	kstat_install(ksp);
2353 
2354 	return (ksp);
2355 }
2356 
2357 /*
2358  * t4nex:X:stat
2359  */
2360 struct t4_wc_kstats {
2361 	kstat_named_t write_coal_success;
2362 	kstat_named_t write_coal_failure;
2363 };
2364 static kstat_t *
setup_wc_kstats(struct adapter * sc)2365 setup_wc_kstats(struct adapter *sc)
2366 {
2367 	kstat_t *ksp;
2368 	struct t4_wc_kstats *kstatp;
2369 
2370 	const uint_t ndata =
2371 	    sizeof (struct t4_wc_kstats) / sizeof (kstat_named_t);
2372 	ksp = kstat_create(T4_NEXUS_NAME, ddi_get_instance(sc->dip), "stats",
2373 	    "nexus", KSTAT_TYPE_NAMED, ndata, 0);
2374 	if (ksp == NULL) {
2375 		cxgb_printf(sc->dip, CE_WARN, "failed to initialize kstats.");
2376 		return (NULL);
2377 	}
2378 
2379 	kstatp = (struct t4_wc_kstats *)ksp->ks_data;
2380 
2381 	KS_UINIT(write_coal_success);
2382 	KS_UINIT(write_coal_failure);
2383 
2384 	ksp->ks_update = update_wc_kstats;
2385 	/* Install the kstat */
2386 	ksp->ks_private = (void *)sc;
2387 	kstat_install(ksp);
2388 
2389 	return (ksp);
2390 }
2391 
2392 static int
update_wc_kstats(kstat_t * ksp,int rw)2393 update_wc_kstats(kstat_t *ksp, int rw)
2394 {
2395 	struct t4_wc_kstats *kstatp = (struct t4_wc_kstats *)ksp->ks_data;
2396 	struct adapter *sc = ksp->ks_private;
2397 	uint32_t wc_total, wc_success, wc_failure;
2398 
2399 	if (rw == KSTAT_WRITE)
2400 		return (0);
2401 
2402 	if (is_t5(sc->params.chip)) {
2403 		wc_total = t4_read_reg(sc, A_SGE_STAT_TOTAL);
2404 		wc_failure = t4_read_reg(sc, A_SGE_STAT_MATCH);
2405 		wc_success = wc_total - wc_failure;
2406 	} else {
2407 		wc_success = 0;
2408 		wc_failure = 0;
2409 	}
2410 
2411 	KS_U_SET(write_coal_success, wc_success);
2412 	KS_U_SET(write_coal_failure, wc_failure);
2413 
2414 	return (0);
2415 }
2416 
2417 /*
2418  * cxgbe:X:fec
2419  *
2420  * This provides visibility into the errors that have been found by the
2421  * different FEC subsystems. While it's tempting to combine the two different
2422  * FEC types logically, the data that the errors tell us are pretty different
2423  * between the two. Firecode is strictly per-lane, but RS has parts that are
2424  * related to symbol distribution to lanes and also to the overall channel.
2425  */
2426 struct cxgbe_port_fec_kstats {
2427 	kstat_named_t rs_corr;
2428 	kstat_named_t rs_uncorr;
2429 	kstat_named_t rs_sym0_corr;
2430 	kstat_named_t rs_sym1_corr;
2431 	kstat_named_t rs_sym2_corr;
2432 	kstat_named_t rs_sym3_corr;
2433 	kstat_named_t fc_lane0_corr;
2434 	kstat_named_t fc_lane0_uncorr;
2435 	kstat_named_t fc_lane1_corr;
2436 	kstat_named_t fc_lane1_uncorr;
2437 	kstat_named_t fc_lane2_corr;
2438 	kstat_named_t fc_lane2_uncorr;
2439 	kstat_named_t fc_lane3_corr;
2440 	kstat_named_t fc_lane3_uncorr;
2441 };
2442 
2443 static uint32_t
read_fec_pair(struct port_info * pi,uint32_t lo_reg,uint32_t high_reg)2444 read_fec_pair(struct port_info *pi, uint32_t lo_reg, uint32_t high_reg)
2445 {
2446 	struct adapter *sc = pi->adapter;
2447 	uint8_t port = pi->tx_chan;
2448 	uint32_t low, high, ret;
2449 
2450 	low = t4_read_reg32(sc, T5_PORT_REG(port, lo_reg));
2451 	high = t4_read_reg32(sc, T5_PORT_REG(port, high_reg));
2452 	ret = low & 0xffff;
2453 	ret |= (high & 0xffff) << 16;
2454 	return (ret);
2455 }
2456 
2457 static int
update_port_fec_kstats(kstat_t * ksp,int rw)2458 update_port_fec_kstats(kstat_t *ksp, int rw)
2459 {
2460 	struct cxgbe_port_fec_kstats *fec = ksp->ks_data;
2461 	struct port_info *pi = ksp->ks_private;
2462 
2463 	if (rw == KSTAT_WRITE) {
2464 		return (EACCES);
2465 	}
2466 
2467 	/*
2468 	 * First go ahead and gather RS related stats.
2469 	 */
2470 	fec->rs_corr.value.ui64 += read_fec_pair(pi, T6_RS_FEC_CCW_LO,
2471 	    T6_RS_FEC_CCW_HI);
2472 	fec->rs_uncorr.value.ui64 += read_fec_pair(pi, T6_RS_FEC_NCCW_LO,
2473 	    T6_RS_FEC_NCCW_HI);
2474 	fec->rs_sym0_corr.value.ui64 += read_fec_pair(pi, T6_RS_FEC_SYMERR0_LO,
2475 	    T6_RS_FEC_SYMERR0_HI);
2476 	fec->rs_sym1_corr.value.ui64 += read_fec_pair(pi, T6_RS_FEC_SYMERR1_LO,
2477 	    T6_RS_FEC_SYMERR1_HI);
2478 	fec->rs_sym2_corr.value.ui64 += read_fec_pair(pi, T6_RS_FEC_SYMERR2_LO,
2479 	    T6_RS_FEC_SYMERR2_HI);
2480 	fec->rs_sym3_corr.value.ui64 += read_fec_pair(pi, T6_RS_FEC_SYMERR3_LO,
2481 	    T6_RS_FEC_SYMERR3_HI);
2482 
2483 	/*
2484 	 * Now go through and try to grab Firecode/BASE-R stats.
2485 	 */
2486 	fec->fc_lane0_corr.value.ui64 += read_fec_pair(pi, T6_FC_FEC_L0_CERR_LO,
2487 	    T6_FC_FEC_L0_CERR_HI);
2488 	fec->fc_lane0_uncorr.value.ui64 += read_fec_pair(pi,
2489 	    T6_FC_FEC_L0_NCERR_LO, T6_FC_FEC_L0_NCERR_HI);
2490 	fec->fc_lane1_corr.value.ui64 += read_fec_pair(pi, T6_FC_FEC_L1_CERR_LO,
2491 	    T6_FC_FEC_L1_CERR_HI);
2492 	fec->fc_lane1_uncorr.value.ui64 += read_fec_pair(pi,
2493 	    T6_FC_FEC_L1_NCERR_LO, T6_FC_FEC_L1_NCERR_HI);
2494 	fec->fc_lane2_corr.value.ui64 += read_fec_pair(pi, T6_FC_FEC_L2_CERR_LO,
2495 	    T6_FC_FEC_L2_CERR_HI);
2496 	fec->fc_lane2_uncorr.value.ui64 += read_fec_pair(pi,
2497 	    T6_FC_FEC_L2_NCERR_LO, T6_FC_FEC_L2_NCERR_HI);
2498 	fec->fc_lane3_corr.value.ui64 += read_fec_pair(pi, T6_FC_FEC_L3_CERR_LO,
2499 	    T6_FC_FEC_L3_CERR_HI);
2500 	fec->fc_lane3_uncorr.value.ui64 += read_fec_pair(pi,
2501 	    T6_FC_FEC_L3_NCERR_LO, T6_FC_FEC_L3_NCERR_HI);
2502 
2503 	return (0);
2504 }
2505 
2506 static kstat_t *
setup_port_fec_kstats(struct port_info * pi)2507 setup_port_fec_kstats(struct port_info *pi)
2508 {
2509 	kstat_t *ksp;
2510 	struct cxgbe_port_fec_kstats *kstatp;
2511 
2512 	if (!is_t6(pi->adapter->params.chip)) {
2513 		return (NULL);
2514 	}
2515 
2516 	ksp = kstat_create(T4_PORT_NAME, ddi_get_instance(pi->dip), "fec",
2517 	    "net", KSTAT_TYPE_NAMED, sizeof (struct cxgbe_port_fec_kstats) /
2518 	    sizeof (kstat_named_t), 0);
2519 	if (ksp == NULL) {
2520 		cxgb_printf(pi->dip, CE_WARN, "failed to initialize fec "
2521 		    "kstats.");
2522 		return (NULL);
2523 	}
2524 
2525 	kstatp = ksp->ks_data;
2526 	KS_U64INIT(rs_corr);
2527 	KS_U64INIT(rs_uncorr);
2528 	KS_U64INIT(rs_sym0_corr);
2529 	KS_U64INIT(rs_sym1_corr);
2530 	KS_U64INIT(rs_sym2_corr);
2531 	KS_U64INIT(rs_sym3_corr);
2532 	KS_U64INIT(fc_lane0_corr);
2533 	KS_U64INIT(fc_lane0_uncorr);
2534 	KS_U64INIT(fc_lane1_corr);
2535 	KS_U64INIT(fc_lane1_uncorr);
2536 	KS_U64INIT(fc_lane2_corr);
2537 	KS_U64INIT(fc_lane2_uncorr);
2538 	KS_U64INIT(fc_lane3_corr);
2539 	KS_U64INIT(fc_lane3_uncorr);
2540 
2541 	ksp->ks_update = update_port_fec_kstats;
2542 	ksp->ks_private = pi;
2543 	kstat_install(ksp);
2544 
2545 	return (ksp);
2546 }
2547 
2548 int
adapter_full_init(struct adapter * sc)2549 adapter_full_init(struct adapter *sc)
2550 {
2551 	int i, rc = 0;
2552 
2553 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2554 
2555 	rc = t4_setup_adapter_queues(sc);
2556 	if (rc != 0)
2557 		goto done;
2558 
2559 	if (sc->intr_cap & DDI_INTR_FLAG_BLOCK)
2560 		(void) ddi_intr_block_enable(sc->intr_handle, sc->intr_count);
2561 	else {
2562 		for (i = 0; i < sc->intr_count; i++)
2563 			(void) ddi_intr_enable(sc->intr_handle[i]);
2564 	}
2565 	t4_intr_enable(sc);
2566 	sc->flags |= FULL_INIT_DONE;
2567 
2568 done:
2569 	if (rc != 0)
2570 		(void) adapter_full_uninit(sc);
2571 
2572 	return (rc);
2573 }
2574 
2575 int
adapter_full_uninit(struct adapter * sc)2576 adapter_full_uninit(struct adapter *sc)
2577 {
2578 	int i, rc = 0;
2579 
2580 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2581 
2582 	if (sc->intr_cap & DDI_INTR_FLAG_BLOCK)
2583 		(void) ddi_intr_block_disable(sc->intr_handle, sc->intr_count);
2584 	else {
2585 		for (i = 0; i < sc->intr_count; i++)
2586 			(void) ddi_intr_disable(sc->intr_handle[i]);
2587 	}
2588 
2589 	rc = t4_teardown_adapter_queues(sc);
2590 	if (rc != 0)
2591 		return (rc);
2592 
2593 	sc->flags &= ~FULL_INIT_DONE;
2594 
2595 	return (0);
2596 }
2597 
2598 int
port_full_init(struct port_info * pi)2599 port_full_init(struct port_info *pi)
2600 {
2601 	struct adapter *sc = pi->adapter;
2602 	uint16_t *rss;
2603 	struct sge_rxq *rxq;
2604 	int rc, i;
2605 
2606 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2607 	ASSERT((pi->flags & PORT_INIT_DONE) == 0);
2608 
2609 	/*
2610 	 * Allocate tx/rx/fl queues for this port.
2611 	 */
2612 	rc = t4_setup_port_queues(pi);
2613 	if (rc != 0)
2614 		goto done;	/* error message displayed already */
2615 
2616 	/*
2617 	 * Setup RSS for this port.
2618 	 */
2619 	rss = kmem_zalloc(pi->nrxq * sizeof (*rss), KM_SLEEP);
2620 	for_each_rxq(pi, i, rxq) {
2621 		rss[i] = rxq->iq.abs_id;
2622 	}
2623 	rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0,
2624 	    pi->rss_size, rss, pi->nrxq);
2625 	kmem_free(rss, pi->nrxq * sizeof (*rss));
2626 	if (rc != 0) {
2627 		cxgb_printf(pi->dip, CE_WARN, "rss_config failed: %d", rc);
2628 		goto done;
2629 	}
2630 
2631 	/*
2632 	 * Initialize our per-port FEC kstats.
2633 	 */
2634 	pi->ksp_fec = setup_port_fec_kstats(pi);
2635 
2636 	pi->flags |= PORT_INIT_DONE;
2637 done:
2638 	if (rc != 0)
2639 		(void) port_full_uninit(pi);
2640 
2641 	return (rc);
2642 }
2643 
2644 /*
2645  * Idempotent.
2646  */
2647 int
port_full_uninit(struct port_info * pi)2648 port_full_uninit(struct port_info *pi)
2649 {
2650 
2651 	ASSERT(pi->flags & PORT_INIT_DONE);
2652 
2653 	if (pi->ksp_fec != NULL) {
2654 		kstat_delete(pi->ksp_fec);
2655 		pi->ksp_fec = NULL;
2656 	}
2657 	(void) t4_teardown_port_queues(pi);
2658 	pi->flags &= ~PORT_INIT_DONE;
2659 
2660 	return (0);
2661 }
2662 
2663 void
enable_port_queues(struct port_info * pi)2664 enable_port_queues(struct port_info *pi)
2665 {
2666 	struct adapter *sc = pi->adapter;
2667 	int i;
2668 	struct sge_iq *iq;
2669 	struct sge_rxq *rxq;
2670 
2671 	ASSERT(pi->flags & PORT_INIT_DONE);
2672 
2673 	/*
2674 	 * TODO: whatever was queued up after we set iq->state to IQS_DISABLED
2675 	 * back in disable_port_queues will be processed now, after an unbounded
2676 	 * delay.  This can't be good.
2677 	 */
2678 
2679 	for_each_rxq(pi, i, rxq) {
2680 		iq = &rxq->iq;
2681 		if (atomic_cas_uint(&iq->state, IQS_DISABLED, IQS_IDLE) !=
2682 		    IQS_DISABLED)
2683 			panic("%s: iq %p wasn't disabled", __func__,
2684 			    (void *) iq);
2685 		t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
2686 		    V_SEINTARM(iq->intr_params) | V_INGRESSQID(iq->cntxt_id));
2687 	}
2688 }
2689 
2690 void
disable_port_queues(struct port_info * pi)2691 disable_port_queues(struct port_info *pi)
2692 {
2693 	int i;
2694 	struct adapter *sc = pi->adapter;
2695 	struct sge_rxq *rxq;
2696 
2697 	ASSERT(pi->flags & PORT_INIT_DONE);
2698 
2699 	/*
2700 	 * TODO: need proper implementation for all tx queues (ctrl, eth, ofld).
2701 	 */
2702 
2703 	for_each_rxq(pi, i, rxq) {
2704 		while (atomic_cas_uint(&rxq->iq.state, IQS_IDLE,
2705 		    IQS_DISABLED) != IQS_IDLE)
2706 			msleep(1);
2707 	}
2708 
2709 	mutex_enter(&sc->sfl_lock);
2710 	for_each_rxq(pi, i, rxq)
2711 	    rxq->fl.flags |= FL_DOOMED;
2712 	mutex_exit(&sc->sfl_lock);
2713 	/* TODO: need to wait for all fl's to be removed from sc->sfl */
2714 }
2715 
2716 void
t4_fatal_err(struct adapter * sc)2717 t4_fatal_err(struct adapter *sc)
2718 {
2719 	t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
2720 	t4_intr_disable(sc);
2721 	cxgb_printf(sc->dip, CE_WARN,
2722 	    "encountered fatal error, adapter stopped.");
2723 }
2724 
2725 int
t4_os_find_pci_capability(struct adapter * sc,int cap)2726 t4_os_find_pci_capability(struct adapter *sc, int cap)
2727 {
2728 	uint16_t stat;
2729 	uint8_t cap_ptr, cap_id;
2730 
2731 	t4_os_pci_read_cfg2(sc, PCI_CONF_STAT, &stat);
2732 	if ((stat & PCI_STAT_CAP) == 0)
2733 		return (0); /* does not implement capabilities */
2734 
2735 	t4_os_pci_read_cfg1(sc, PCI_CONF_CAP_PTR, &cap_ptr);
2736 	while (cap_ptr) {
2737 		t4_os_pci_read_cfg1(sc, cap_ptr + PCI_CAP_ID, &cap_id);
2738 		if (cap_id == cap)
2739 			return (cap_ptr); /* found */
2740 		t4_os_pci_read_cfg1(sc, cap_ptr + PCI_CAP_NEXT_PTR, &cap_ptr);
2741 	}
2742 
2743 	return (0); /* not found */
2744 }
2745 
2746 void
t4_os_portmod_changed(struct adapter * sc,int idx)2747 t4_os_portmod_changed(struct adapter *sc, int idx)
2748 {
2749 	static const char *mod_str[] = {
2750 		NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
2751 	};
2752 	struct port_info *pi = sc->port[idx];
2753 
2754 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
2755 		cxgb_printf(pi->dip, CE_NOTE, "transceiver unplugged.");
2756 	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
2757 		cxgb_printf(pi->dip, CE_NOTE,
2758 		    "unknown transceiver inserted.\n");
2759 	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
2760 		cxgb_printf(pi->dip, CE_NOTE,
2761 		    "unsupported transceiver inserted.\n");
2762 	else if (pi->mod_type > 0 && pi->mod_type < ARRAY_SIZE(mod_str))
2763 		cxgb_printf(pi->dip, CE_NOTE, "%s transceiver inserted.\n",
2764 		    mod_str[pi->mod_type]);
2765 	else
2766 		cxgb_printf(pi->dip, CE_NOTE, "transceiver (type %d) inserted.",
2767 		    pi->mod_type);
2768 
2769 	if ((isset(&sc->open_device_map, pi->port_id) != 0) &&
2770 	    pi->link_cfg.new_module)
2771 		pi->link_cfg.redo_l1cfg = true;
2772 }
2773 
2774 /* ARGSUSED */
2775 static int
cpl_not_handled(struct sge_iq * iq,const struct rss_header * rss,mblk_t * m)2776 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, mblk_t *m)
2777 {
2778 	if (m != NULL)
2779 		freemsg(m);
2780 	return (0);
2781 }
2782 
2783 int
t4_register_cpl_handler(struct adapter * sc,int opcode,cpl_handler_t h)2784 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
2785 {
2786 	uint_t *loc, new;
2787 
2788 	if (opcode >= ARRAY_SIZE(sc->cpl_handler))
2789 		return (EINVAL);
2790 
2791 	new = (uint_t)(unsigned long) (h ? h : cpl_not_handled);
2792 	loc = (uint_t *)&sc->cpl_handler[opcode];
2793 	(void) atomic_swap_uint(loc, new);
2794 
2795 	return (0);
2796 }
2797 
2798 static int
fw_msg_not_handled(struct adapter * sc,const __be64 * data)2799 fw_msg_not_handled(struct adapter *sc, const __be64 *data)
2800 {
2801 	struct cpl_fw6_msg *cpl;
2802 
2803 	cpl = __containerof((void *)data, struct cpl_fw6_msg, data);
2804 
2805 	cxgb_printf(sc->dip, CE_WARN, "%s fw_msg type %d", __func__, cpl->type);
2806 	return (0);
2807 }
2808 
2809 int
t4_register_fw_msg_handler(struct adapter * sc,int type,fw_msg_handler_t h)2810 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
2811 {
2812 	fw_msg_handler_t *loc, new;
2813 
2814 	if (type >= ARRAY_SIZE(sc->fw_msg_handler))
2815 		return (EINVAL);
2816 
2817 	/*
2818 	 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
2819 	 * handler dispatch table.  Reject any attempt to install a handler for
2820 	 * this subtype.
2821 	 */
2822 	if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
2823 		return (EINVAL);
2824 
2825 	new = h ? h : fw_msg_not_handled;
2826 	loc = &sc->fw_msg_handler[type];
2827 	(void) atomic_swap_ptr(loc, (void *)new);
2828 
2829 	return (0);
2830 }
2831 
2832 static int
t4_sensor_read(struct adapter * sc,uint32_t diag,uint32_t * valp)2833 t4_sensor_read(struct adapter *sc, uint32_t diag, uint32_t *valp)
2834 {
2835 	int rc;
2836 	struct port_info *pi = sc->port[0];
2837 	uint32_t param, val;
2838 
2839 	rc = begin_synchronized_op(pi, 1, 1);
2840 	if (rc != 0) {
2841 		return (rc);
2842 	}
2843 	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
2844 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
2845 	    V_FW_PARAMS_PARAM_Y(diag);
2846 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2847 	end_synchronized_op(pi, 1);
2848 
2849 	if (rc != 0) {
2850 		return (rc);
2851 	}
2852 
2853 	if (val == 0) {
2854 		return (EIO);
2855 	}
2856 
2857 	*valp = val;
2858 	return (0);
2859 }
2860 
2861 static int
t4_temperature_read(void * arg,sensor_ioctl_scalar_t * scalar)2862 t4_temperature_read(void *arg, sensor_ioctl_scalar_t *scalar)
2863 {
2864 	int ret;
2865 	struct adapter *sc = arg;
2866 	uint32_t val;
2867 
2868 	ret = t4_sensor_read(sc, FW_PARAM_DEV_DIAG_TMP, &val);
2869 	if (ret != 0) {
2870 		return (ret);
2871 	}
2872 
2873 	/*
2874 	 * The device measures temperature in units of 1 degree Celsius. We
2875 	 * don't know its precision.
2876 	 */
2877 	scalar->sis_unit = SENSOR_UNIT_CELSIUS;
2878 	scalar->sis_gran = 1;
2879 	scalar->sis_prec = 0;
2880 	scalar->sis_value = val;
2881 
2882 	return (0);
2883 }
2884 
2885 static int
t4_voltage_read(void * arg,sensor_ioctl_scalar_t * scalar)2886 t4_voltage_read(void *arg, sensor_ioctl_scalar_t *scalar)
2887 {
2888 	int ret;
2889 	struct adapter *sc = arg;
2890 	uint32_t val;
2891 
2892 	ret = t4_sensor_read(sc, FW_PARAM_DEV_DIAG_VDD, &val);
2893 	if (ret != 0) {
2894 		return (ret);
2895 	}
2896 
2897 	scalar->sis_unit = SENSOR_UNIT_VOLTS;
2898 	scalar->sis_gran = 1000;
2899 	scalar->sis_prec = 0;
2900 	scalar->sis_value = val;
2901 
2902 	return (0);
2903 }
2904 
2905 /*
2906  * While the hardware supports the ability to read and write the flash image,
2907  * this is not currently wired up.
2908  */
2909 static int
t4_ufm_getcaps(ddi_ufm_handle_t * ufmh,void * arg,ddi_ufm_cap_t * caps)2910 t4_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg, ddi_ufm_cap_t *caps)
2911 {
2912 	*caps = DDI_UFM_CAP_REPORT;
2913 	return (0);
2914 }
2915 
2916 static int
t4_ufm_fill_image(ddi_ufm_handle_t * ufmh,void * arg,uint_t imgno,ddi_ufm_image_t * imgp)2917 t4_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
2918     ddi_ufm_image_t *imgp)
2919 {
2920 	if (imgno != 0) {
2921 		return (EINVAL);
2922 	}
2923 
2924 	ddi_ufm_image_set_desc(imgp, "Firmware");
2925 	ddi_ufm_image_set_nslots(imgp, 1);
2926 
2927 	return (0);
2928 }
2929 
2930 static int
t4_ufm_fill_slot_version(nvlist_t * nvl,const char * key,uint32_t vers)2931 t4_ufm_fill_slot_version(nvlist_t *nvl, const char *key, uint32_t vers)
2932 {
2933 	char buf[128];
2934 
2935 	if (vers == 0) {
2936 		return (0);
2937 	}
2938 
2939 	if (snprintf(buf, sizeof (buf), "%u.%u.%u.%u",
2940 	    G_FW_HDR_FW_VER_MAJOR(vers), G_FW_HDR_FW_VER_MINOR(vers),
2941 	    G_FW_HDR_FW_VER_MICRO(vers), G_FW_HDR_FW_VER_BUILD(vers)) >=
2942 	    sizeof (buf)) {
2943 		return (EOVERFLOW);
2944 	}
2945 
2946 	return (nvlist_add_string(nvl, key, buf));
2947 }
2948 
2949 static int
t4_ufm_fill_slot(ddi_ufm_handle_t * ufmh,void * arg,uint_t imgno,uint_t slotno,ddi_ufm_slot_t * slotp)2950 t4_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno, uint_t slotno,
2951     ddi_ufm_slot_t *slotp)
2952 {
2953 	int ret;
2954 	struct adapter *sc = arg;
2955 	nvlist_t *misc = NULL;
2956 	char buf[128];
2957 
2958 	if (imgno != 0 || slotno != 0) {
2959 		return (EINVAL);
2960 	}
2961 
2962 	if (snprintf(buf, sizeof (buf), "%u.%u.%u.%u",
2963 	    G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2964 	    G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2965 	    G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2966 	    G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)) >= sizeof (buf)) {
2967 		return (EOVERFLOW);
2968 	}
2969 
2970 	ddi_ufm_slot_set_version(slotp, buf);
2971 
2972 	(void) nvlist_alloc(&misc, NV_UNIQUE_NAME, KM_SLEEP);
2973 	if ((ret = t4_ufm_fill_slot_version(misc, "TP Microcode",
2974 	    sc->params.tp_vers)) != 0) {
2975 		goto err;
2976 	}
2977 
2978 	if ((ret = t4_ufm_fill_slot_version(misc, "Bootstrap",
2979 	    sc->params.bs_vers)) != 0) {
2980 		goto err;
2981 	}
2982 
2983 	if ((ret = t4_ufm_fill_slot_version(misc, "Expansion ROM",
2984 	    sc->params.er_vers)) != 0) {
2985 		goto err;
2986 	}
2987 
2988 	if ((ret = nvlist_add_uint32(misc, "Serial Configuration",
2989 	    sc->params.scfg_vers)) != 0) {
2990 		goto err;
2991 	}
2992 
2993 	if ((ret = nvlist_add_uint32(misc, "VPD Version",
2994 	    sc->params.vpd_vers)) != 0) {
2995 		goto err;
2996 	}
2997 
2998 	ddi_ufm_slot_set_misc(slotp, misc);
2999 	ddi_ufm_slot_set_attrs(slotp, DDI_UFM_ATTR_ACTIVE |
3000 	    DDI_UFM_ATTR_WRITEABLE | DDI_UFM_ATTR_READABLE);
3001 	return (0);
3002 
3003 err:
3004 	nvlist_free(misc);
3005 	return (ret);
3006 
3007 }
3008 
3009 
3010 int
t4_cxgbe_attach(struct port_info * pi,dev_info_t * dip)3011 t4_cxgbe_attach(struct port_info *pi, dev_info_t *dip)
3012 {
3013 	ASSERT(pi != NULL);
3014 
3015 	mac_register_t *mac = mac_alloc(MAC_VERSION);
3016 	if (mac == NULL) {
3017 		return (DDI_FAILURE);
3018 	}
3019 
3020 	mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
3021 	mac->m_driver = pi;
3022 	mac->m_dip = dip;
3023 	mac->m_src_addr = pi->hw_addr;
3024 	mac->m_callbacks = pi->mc;
3025 	mac->m_max_sdu = pi->mtu;
3026 	mac->m_priv_props = pi->props;
3027 	mac->m_margin = VLAN_TAGSZ;
3028 
3029 	if (!mac->m_callbacks->mc_unicst) {
3030 		/* Multiple rings enabled */
3031 		mac->m_v12n = MAC_VIRT_LEVEL1;
3032 	}
3033 
3034 	mac_handle_t mh = NULL;
3035 	const int rc = mac_register(mac, &mh);
3036 	mac_free(mac);
3037 	if (rc != 0) {
3038 		return (DDI_FAILURE);
3039 	}
3040 
3041 	pi->mh = mh;
3042 
3043 	/*
3044 	 * Link state from this point onwards to the time interface is plumbed,
3045 	 * should be set to LINK_STATE_UNKNOWN. The mac should be updated about
3046 	 * the link state as either LINK_STATE_UP or LINK_STATE_DOWN based on
3047 	 * the actual link state detection after interface plumb.
3048 	 */
3049 	mac_link_update(mh, LINK_STATE_UNKNOWN);
3050 
3051 	return (DDI_SUCCESS);
3052 }
3053 
3054 int
t4_cxgbe_detach(struct port_info * pi)3055 t4_cxgbe_detach(struct port_info *pi)
3056 {
3057 	ASSERT(pi != NULL);
3058 	ASSERT(pi->mh != NULL);
3059 
3060 	if (mac_unregister(pi->mh) == 0) {
3061 		pi->mh = NULL;
3062 		return (DDI_SUCCESS);
3063 	}
3064 
3065 	return (DDI_FAILURE);
3066 }
3067