xref: /illumos-gate/usr/src/uts/common/io/cxgbe/t4nex/t4_nexus.c (revision 338b148b3855a59b1ec7a8a989aa5f0c55e8fc33)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source. A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * This file is part of the Chelsio T4 support code.
14  *
15  * Copyright (C) 2010-2013 Chelsio Communications.  All rights reserved.
16  *
17  * This program is distributed in the hope that it will be useful, but WITHOUT
18  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19  * FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
20  * release for licensing terms and conditions.
21  */
22 
23 /*
24  * Copyright 2024 Oxide Computer Company
25  */
26 
27 #include <sys/ddi.h>
28 #include <sys/sunddi.h>
29 #include <sys/sunndi.h>
30 #include <sys/modctl.h>
31 #include <sys/conf.h>
32 #include <sys/devops.h>
33 #include <sys/pci.h>
34 #include <sys/atomic.h>
35 #include <sys/types.h>
36 #include <sys/file.h>
37 #include <sys/errno.h>
38 #include <sys/open.h>
39 #include <sys/cred.h>
40 #include <sys/stat.h>
41 #include <sys/mkdev.h>
42 #include <sys/queue.h>
43 #include <sys/containerof.h>
44 #include <sys/sensors.h>
45 #include <sys/firmload.h>
46 #include <sys/mac_provider.h>
47 #include <sys/mac_ether.h>
48 #include <sys/vlan.h>
49 
50 #include "version.h"
51 #include "common/common.h"
52 #include "common/t4_msg.h"
53 #include "common/t4_regs.h"
54 #include "common/t4_extra_regs.h"
55 #include "t4_l2t.h"
56 
57 static int t4_cb_open(dev_t *devp, int flag, int otyp, cred_t *credp);
58 static int t4_cb_close(dev_t dev, int flag, int otyp, cred_t *credp);
59 static int t4_cb_ioctl(dev_t dev, int cmd, intptr_t d, int mode, cred_t *credp,
60     int *rp);
61 struct cb_ops t4_cb_ops = {
62 	.cb_open =		t4_cb_open,
63 	.cb_close =		t4_cb_close,
64 	.cb_strategy =		nodev,
65 	.cb_print =		nodev,
66 	.cb_dump =		nodev,
67 	.cb_read =		nodev,
68 	.cb_write =		nodev,
69 	.cb_ioctl =		t4_cb_ioctl,
70 	.cb_devmap =		nodev,
71 	.cb_mmap =		nodev,
72 	.cb_segmap =		nodev,
73 	.cb_chpoll =		nochpoll,
74 	.cb_prop_op =		ddi_prop_op,
75 	.cb_flag =		D_MP,
76 	.cb_rev =		CB_REV,
77 	.cb_aread =		nodev,
78 	.cb_awrite =		nodev
79 };
80 
81 static int t4_bus_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op,
82     void *arg, void *result);
83 static int t4_bus_config(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op,
84     void *arg, dev_info_t **cdipp);
85 static int t4_bus_unconfig(dev_info_t *dip, uint_t flags,
86     ddi_bus_config_op_t op, void *arg);
87 struct bus_ops t4_bus_ops = {
88 	.busops_rev =		BUSO_REV,
89 	.bus_ctl =		t4_bus_ctl,
90 	.bus_prop_op =		ddi_bus_prop_op,
91 	.bus_config =		t4_bus_config,
92 	.bus_unconfig =		t4_bus_unconfig,
93 };
94 
95 static int t4_devo_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg,
96     void **rp);
97 static int t4_devo_probe(dev_info_t *dip);
98 static int t4_devo_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
99 static int t4_devo_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
100 static int t4_devo_quiesce(dev_info_t *dip);
101 static struct dev_ops t4_dev_ops = {
102 	.devo_rev =		DEVO_REV,
103 	.devo_getinfo =		t4_devo_getinfo,
104 	.devo_identify =	nulldev,
105 	.devo_probe =		t4_devo_probe,
106 	.devo_attach =		t4_devo_attach,
107 	.devo_detach =		t4_devo_detach,
108 	.devo_reset =		nodev,
109 	.devo_cb_ops =		&t4_cb_ops,
110 	.devo_bus_ops =		&t4_bus_ops,
111 	.devo_quiesce =		&t4_devo_quiesce,
112 };
113 
114 static struct modldrv t4nex_modldrv = {
115 	.drv_modops =		&mod_driverops,
116 	.drv_linkinfo =		"Chelsio T4-T6 nexus " DRV_VERSION,
117 	.drv_dev_ops =		&t4_dev_ops
118 };
119 
120 static struct modlinkage t4nex_modlinkage = {
121 	.ml_rev =		MODREV_1,
122 	.ml_linkage =		{&t4nex_modldrv, NULL},
123 };
124 
125 void *t4_list;
126 
127 struct intrs_and_queues {
128 	int intr_type;		/* DDI_INTR_TYPE_* */
129 	int nirq;		/* Number of vectors */
130 	int intr_fwd;		/* Interrupts forwarded */
131 	int ntxq10g;		/* # of NIC txq's for each 10G port */
132 	int nrxq10g;		/* # of NIC rxq's for each 10G port */
133 	int ntxq1g;		/* # of NIC txq's for each 1G port */
134 	int nrxq1g;		/* # of NIC rxq's for each 1G port */
135 };
136 
137 static unsigned int getpf(struct adapter *sc);
138 static int prep_firmware(struct adapter *sc);
139 static int upload_config_file(struct adapter *sc, uint32_t *mt, uint32_t *ma);
140 static int partition_resources(struct adapter *sc);
141 static int adap__pre_init_tweaks(struct adapter *sc);
142 static int get_params__pre_init(struct adapter *sc);
143 static int get_params__post_init(struct adapter *sc);
144 static int set_params__post_init(struct adapter *);
145 static void setup_memwin(struct adapter *sc);
146 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
147     uint32_t *);
148 void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
149 uint32_t position_memwin(struct adapter *, int, uint32_t);
150 static int prop_lookup_int_array(struct adapter *sc, char *name, int *data,
151     uint_t count);
152 static int prop_lookup_int_array(struct adapter *sc, char *name, int *data,
153     uint_t count);
154 static int init_driver_props(struct adapter *sc, struct driver_properties *p);
155 static int remove_extra_props(struct adapter *sc, int n10g, int n1g);
156 static int cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
157     struct intrs_and_queues *iaq);
158 static int add_child_node(struct adapter *sc, int idx);
159 static int remove_child_node(struct adapter *sc, int idx);
160 static kstat_t *setup_kstats(struct adapter *sc);
161 static kstat_t *setup_wc_kstats(struct adapter *);
162 static int update_wc_kstats(kstat_t *, int);
163 static kmutex_t t4_adapter_list_lock;
164 static SLIST_HEAD(, adapter) t4_adapter_list;
165 
166 static int t4_temperature_read(void *, sensor_ioctl_scalar_t *);
167 static int t4_voltage_read(void *, sensor_ioctl_scalar_t *);
168 static const ksensor_ops_t t4_temp_ops = {
169 	.kso_kind = ksensor_kind_temperature,
170 	.kso_scalar = t4_temperature_read
171 };
172 
173 static const ksensor_ops_t t4_volt_ops = {
174 	.kso_kind = ksensor_kind_voltage,
175 	.kso_scalar = t4_voltage_read
176 };
177 
178 static int t4_ufm_getcaps(ddi_ufm_handle_t *, void *, ddi_ufm_cap_t *);
179 static int t4_ufm_fill_image(ddi_ufm_handle_t *, void *, uint_t,
180     ddi_ufm_image_t *);
181 static int t4_ufm_fill_slot(ddi_ufm_handle_t *, void *, uint_t, uint_t,
182     ddi_ufm_slot_t *);
183 static ddi_ufm_ops_t t4_ufm_ops = {
184 	.ddi_ufm_op_fill_image = t4_ufm_fill_image,
185 	.ddi_ufm_op_fill_slot = t4_ufm_fill_slot,
186 	.ddi_ufm_op_getcaps = t4_ufm_getcaps
187 };
188 
189 int
_init(void)190 _init(void)
191 {
192 	int rc;
193 
194 	rc = ddi_soft_state_init(&t4_list, sizeof (struct adapter), 0);
195 	if (rc != 0)
196 		return (rc);
197 
198 	rc = mod_install(&t4nex_modlinkage);
199 	if (rc != 0)
200 		ddi_soft_state_fini(&t4_list);
201 
202 	mutex_init(&t4_adapter_list_lock, NULL, MUTEX_DRIVER, NULL);
203 	SLIST_INIT(&t4_adapter_list);
204 	t4_debug_init();
205 
206 	return (0);
207 }
208 
209 int
_fini(void)210 _fini(void)
211 {
212 	int rc;
213 
214 	rc = mod_remove(&t4nex_modlinkage);
215 	if (rc != 0)
216 		return (rc);
217 
218 	ddi_soft_state_fini(&t4_list);
219 	t4_debug_fini();
220 
221 	return (0);
222 }
223 
224 int
_info(struct modinfo * mi)225 _info(struct modinfo *mi)
226 {
227 	return (mod_info(&t4nex_modlinkage, mi));
228 }
229 
230 /* ARGSUSED */
231 static int
t4_devo_getinfo(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** rp)232 t4_devo_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **rp)
233 {
234 	struct adapter *sc;
235 	minor_t minor;
236 
237 	minor = getminor((dev_t)arg);	/* same as instance# in our case */
238 
239 	if (cmd == DDI_INFO_DEVT2DEVINFO) {
240 		sc = ddi_get_soft_state(t4_list, minor);
241 		if (sc == NULL)
242 			return (DDI_FAILURE);
243 
244 		ASSERT(sc->dev == (dev_t)arg);
245 		*rp = (void *)sc->dip;
246 	} else if (cmd == DDI_INFO_DEVT2INSTANCE)
247 		*rp = (void *) (unsigned long) minor;
248 	else
249 		ASSERT(0);
250 
251 	return (DDI_SUCCESS);
252 }
253 
254 static int
t4_devo_probe(dev_info_t * dip)255 t4_devo_probe(dev_info_t *dip)
256 {
257 	int rc, id, *reg;
258 	uint_t n, pf;
259 
260 	id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
261 	    "device-id", 0xffff);
262 	if (id == 0xffff)
263 		return (DDI_PROBE_DONTCARE);
264 
265 	rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
266 	    "reg", &reg, &n);
267 	if (rc != DDI_SUCCESS)
268 		return (DDI_PROBE_DONTCARE);
269 
270 	pf = PCI_REG_FUNC_G(reg[0]);
271 	ddi_prop_free(reg);
272 
273 	/* Prevent driver attachment on any PF except 0 on the FPGA */
274 	if (id == 0xa000 && pf != 0)
275 		return (DDI_PROBE_FAILURE);
276 
277 	return (DDI_PROBE_DONTCARE);
278 }
279 
280 static int
t4_devo_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)281 t4_devo_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
282 {
283 	struct adapter *sc = NULL;
284 	struct sge *s;
285 	int i, instance, rc = DDI_SUCCESS, rqidx, tqidx, q;
286 	int irq = 0, nxg = 0, n1g = 0;
287 	char name[16];
288 	struct driver_properties *prp;
289 	struct intrs_and_queues iaq;
290 	ddi_device_acc_attr_t da = {
291 		.devacc_attr_version = DDI_DEVICE_ATTR_V0,
292 		.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC,
293 		.devacc_attr_dataorder = DDI_STRICTORDER_ACC
294 	};
295 	ddi_device_acc_attr_t da1 = {
296 		.devacc_attr_version = DDI_DEVICE_ATTR_V0,
297 		.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC,
298 		.devacc_attr_dataorder = DDI_STRICTORDER_ACC
299 	};
300 
301 	if (cmd != DDI_ATTACH)
302 		return (DDI_FAILURE);
303 
304 	/*
305 	 * Allocate space for soft state.
306 	 */
307 	instance = ddi_get_instance(dip);
308 	rc = ddi_soft_state_zalloc(t4_list, instance);
309 	if (rc != DDI_SUCCESS) {
310 		cxgb_printf(dip, CE_WARN,
311 		    "failed to allocate soft state: %d", rc);
312 		return (DDI_FAILURE);
313 	}
314 
315 	sc = ddi_get_soft_state(t4_list, instance);
316 	sc->dip = dip;
317 	sc->dev = makedevice(ddi_driver_major(dip), instance);
318 	mutex_init(&sc->lock, NULL, MUTEX_DRIVER, NULL);
319 	cv_init(&sc->cv, NULL, CV_DRIVER, NULL);
320 	mutex_init(&sc->sfl_lock, NULL, MUTEX_DRIVER, NULL);
321 	TAILQ_INIT(&sc->sfl);
322 	mutex_init(&sc->mbox_lock, NULL, MUTEX_DRIVER, NULL);
323 	STAILQ_INIT(&sc->mbox_list);
324 
325 	mutex_enter(&t4_adapter_list_lock);
326 	SLIST_INSERT_HEAD(&t4_adapter_list, sc, link);
327 	mutex_exit(&t4_adapter_list_lock);
328 
329 	sc->pf = getpf(sc);
330 	if (sc->pf > 8) {
331 		rc = EINVAL;
332 		cxgb_printf(dip, CE_WARN,
333 		    "failed to determine PCI PF# of device");
334 		goto done;
335 	}
336 	sc->mbox = sc->pf;
337 
338 	/* Initialize the driver properties */
339 	prp = &sc->props;
340 	(void) init_driver_props(sc, prp);
341 
342 	/*
343 	 * Enable access to the PCI config space.
344 	 */
345 	rc = pci_config_setup(dip, &sc->pci_regh);
346 	if (rc != DDI_SUCCESS) {
347 		cxgb_printf(dip, CE_WARN,
348 		    "failed to enable PCI config space access: %d", rc);
349 		goto done;
350 	}
351 
352 	/* TODO: Set max read request to 4K */
353 
354 	/*
355 	 * Enable MMIO access.
356 	 */
357 	rc = ddi_regs_map_setup(dip, 1, &sc->regp, 0, 0, &da, &sc->regh);
358 	if (rc != DDI_SUCCESS) {
359 		cxgb_printf(dip, CE_WARN,
360 		    "failed to map device registers: %d", rc);
361 		goto done;
362 	}
363 
364 	(void) memset(sc->chan_map, 0xff, sizeof (sc->chan_map));
365 
366 	for (i = 0; i < NCHAN; i++) {
367 		(void) snprintf(name, sizeof (name), "%s-%d", "reclaim", i);
368 		sc->tq[i] = ddi_taskq_create(sc->dip, name, 1,
369 		    TASKQ_DEFAULTPRI, 0);
370 
371 		if (sc->tq[i] == NULL) {
372 			cxgb_printf(dip, CE_WARN, "failed to create taskqs");
373 			rc = DDI_FAILURE;
374 			goto done;
375 		}
376 	}
377 
378 	/*
379 	 * Prepare the adapter for operation.
380 	 */
381 	rc = -t4_prep_adapter(sc, false);
382 	if (rc != 0) {
383 		cxgb_printf(dip, CE_WARN, "failed to prepare adapter: %d", rc);
384 		goto done;
385 	}
386 
387 	/*
388 	 * Enable BAR1 access.
389 	 */
390 	sc->doorbells |= DOORBELL_KDB;
391 	rc = ddi_regs_map_setup(dip, 2, &sc->reg1p, 0, 0, &da1, &sc->reg1h);
392 	if (rc != DDI_SUCCESS) {
393 		cxgb_printf(dip, CE_WARN,
394 		    "failed to map BAR1 device registers: %d", rc);
395 		goto done;
396 	} else {
397 		if (is_t5(sc->params.chip)) {
398 			sc->doorbells |= DOORBELL_UDB;
399 			if (prp->wc) {
400 				/*
401 				 * Enable write combining on BAR2.  This is the
402 				 * userspace doorbell BAR and is split into 128B
403 				 * (UDBS_SEG_SIZE) doorbell regions, each
404 				 * associated with an egress queue.  The first
405 				 * 64B has the doorbell and the second 64B can
406 				 * be used to submit a tx work request with an
407 				 * implicit doorbell.
408 				 */
409 				sc->doorbells &= ~DOORBELL_UDB;
410 				sc->doorbells |= (DOORBELL_WCWR |
411 				    DOORBELL_UDBWC);
412 				t4_write_reg(sc, A_SGE_STAT_CFG,
413 				    V_STATSOURCE_T5(7) | V_STATMODE(0));
414 			}
415 		}
416 	}
417 
418 	/*
419 	 * Do this really early.  Note that minor number = instance.
420 	 */
421 	(void) snprintf(name, sizeof (name), "%s,%d", T4_NEXUS_NAME, instance);
422 	rc = ddi_create_minor_node(dip, name, S_IFCHR, instance,
423 	    DDI_NT_NEXUS, 0);
424 	if (rc != DDI_SUCCESS) {
425 		cxgb_printf(dip, CE_WARN,
426 		    "failed to create device node: %d", rc);
427 		rc = DDI_SUCCESS; /* carry on */
428 	}
429 
430 	/* Do this early. Memory window is required for loading config file. */
431 	setup_memwin(sc);
432 
433 	/* Prepare the firmware for operation */
434 	rc = prep_firmware(sc);
435 	if (rc != 0)
436 		goto done; /* error message displayed already */
437 
438 	rc = adap__pre_init_tweaks(sc);
439 	if (rc != 0)
440 		goto done;
441 
442 	rc = get_params__pre_init(sc);
443 	if (rc != 0)
444 		goto done; /* error message displayed already */
445 
446 	t4_sge_init(sc);
447 
448 	if (sc->flags & MASTER_PF) {
449 		/* get basic stuff going */
450 		rc = -t4_fw_initialize(sc, sc->mbox);
451 		if (rc != 0) {
452 			cxgb_printf(sc->dip, CE_WARN,
453 			    "early init failed: %d.\n", rc);
454 			goto done;
455 		}
456 	}
457 
458 	rc = get_params__post_init(sc);
459 	if (rc != 0)
460 		goto done; /* error message displayed already */
461 
462 	rc = set_params__post_init(sc);
463 	if (rc != 0)
464 		goto done; /* error message displayed already */
465 
466 	/*
467 	 * TODO: This is the place to call t4_set_filter_mode()
468 	 */
469 
470 	/* tweak some settings */
471 	t4_write_reg(sc, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) | V_RXTSHIFTMAXR1(4) |
472 	    V_RXTSHIFTMAXR2(15) | V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
473 	    V_KEEPALIVEMAXR1(4) | V_KEEPALIVEMAXR2(9));
474 	t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
475 
476 	/*
477 	 * Work-around for bug 2619
478 	 * Set DisableVlan field in TP_RSS_CONFIG_VRT register so that the
479 	 * VLAN tag extraction is disabled.
480 	 */
481 	t4_set_reg_field(sc, A_TP_RSS_CONFIG_VRT, F_DISABLEVLAN, F_DISABLEVLAN);
482 
483 	/* Store filter mode */
484 	t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &sc->filter_mode, 1,
485 	    A_TP_VLAN_PRI_MAP);
486 
487 	/*
488 	 * First pass over all the ports - allocate VIs and initialize some
489 	 * basic parameters like mac address, port type, etc.  We also figure
490 	 * out whether a port is 10G or 1G and use that information when
491 	 * calculating how many interrupts to attempt to allocate.
492 	 */
493 	for_each_port(sc, i) {
494 		struct port_info *pi;
495 
496 		pi = kmem_zalloc(sizeof (*pi), KM_SLEEP);
497 		sc->port[i] = pi;
498 
499 		/* These must be set before t4_port_init */
500 		pi->adapter = sc;
501 		/* LINTED: E_ASSIGN_NARROW_CONV */
502 		pi->port_id = i;
503 	}
504 
505 	/* Allocate the vi and initialize parameters like mac addr */
506 	rc = -t4_port_init(sc, sc->mbox, sc->pf, 0);
507 	if (rc) {
508 		cxgb_printf(dip, CE_WARN, "unable to initialize port: %d", rc);
509 		goto done;
510 	}
511 
512 	for_each_port(sc, i) {
513 		struct port_info *pi = sc->port[i];
514 
515 		mutex_init(&pi->lock, NULL, MUTEX_DRIVER, NULL);
516 		pi->mtu = ETHERMTU;
517 
518 		if (is_10XG_port(pi)) {
519 			nxg++;
520 			pi->tmr_idx = prp->tmr_idx_10g;
521 			pi->pktc_idx = prp->pktc_idx_10g;
522 		} else {
523 			n1g++;
524 			pi->tmr_idx = prp->tmr_idx_1g;
525 			pi->pktc_idx = prp->pktc_idx_1g;
526 		}
527 
528 		pi->xact_addr_filt = -1;
529 		t4_mc_init(pi);
530 
531 		setbit(&sc->registered_device_map, i);
532 	}
533 
534 	(void) remove_extra_props(sc, nxg, n1g);
535 
536 	if (sc->registered_device_map == 0) {
537 		cxgb_printf(dip, CE_WARN, "no usable ports");
538 		rc = DDI_FAILURE;
539 		goto done;
540 	}
541 
542 	rc = cfg_itype_and_nqueues(sc, nxg, n1g, &iaq);
543 	if (rc != 0)
544 		goto done; /* error message displayed already */
545 
546 	sc->intr_type = iaq.intr_type;
547 	sc->intr_count = iaq.nirq;
548 
549 	if (sc->props.multi_rings && (sc->intr_type != DDI_INTR_TYPE_MSIX)) {
550 		sc->props.multi_rings = 0;
551 		cxgb_printf(dip, CE_WARN,
552 		    "Multiple rings disabled as interrupt type is not MSI-X");
553 	}
554 
555 	if (sc->props.multi_rings && iaq.intr_fwd) {
556 		sc->props.multi_rings = 0;
557 		cxgb_printf(dip, CE_WARN,
558 		    "Multiple rings disabled as interrupts are forwarded");
559 	}
560 
561 	if (!sc->props.multi_rings) {
562 		iaq.ntxq10g = 1;
563 		iaq.ntxq1g = 1;
564 	}
565 	s = &sc->sge;
566 	s->nrxq = nxg * iaq.nrxq10g + n1g * iaq.nrxq1g;
567 	s->ntxq = nxg * iaq.ntxq10g + n1g * iaq.ntxq1g;
568 	s->neq = s->ntxq + s->nrxq;	/* the fl in an rxq is an eq */
569 	s->niq = s->nrxq + 1;		/* 1 extra for firmware event queue */
570 	if (iaq.intr_fwd != 0)
571 		sc->flags |= INTR_FWD;
572 	s->rxq = kmem_zalloc(s->nrxq * sizeof (struct sge_rxq), KM_SLEEP);
573 	s->txq = kmem_zalloc(s->ntxq * sizeof (struct sge_txq), KM_SLEEP);
574 	s->iqmap =
575 	    kmem_zalloc(s->iqmap_sz * sizeof (struct sge_iq *), KM_SLEEP);
576 	s->eqmap =
577 	    kmem_zalloc(s->eqmap_sz * sizeof (struct sge_eq *), KM_SLEEP);
578 
579 	sc->intr_handle =
580 	    kmem_zalloc(sc->intr_count * sizeof (ddi_intr_handle_t), KM_SLEEP);
581 
582 	/*
583 	 * Second pass over the ports.  This time we know the number of rx and
584 	 * tx queues that each port should get.
585 	 */
586 	rqidx = tqidx = 0;
587 	for_each_port(sc, i) {
588 		struct port_info *pi = sc->port[i];
589 
590 		if (pi == NULL)
591 			continue;
592 
593 		t4_mc_cb_init(pi);
594 		/* LINTED: E_ASSIGN_NARROW_CONV */
595 		pi->first_rxq = rqidx;
596 		/* LINTED: E_ASSIGN_NARROW_CONV */
597 		pi->nrxq = (is_10XG_port(pi)) ? iaq.nrxq10g
598 		    : iaq.nrxq1g;
599 		/* LINTED: E_ASSIGN_NARROW_CONV */
600 		pi->first_txq = tqidx;
601 		/* LINTED: E_ASSIGN_NARROW_CONV */
602 		pi->ntxq = (is_10XG_port(pi)) ? iaq.ntxq10g
603 		    : iaq.ntxq1g;
604 
605 		rqidx += pi->nrxq;
606 		tqidx += pi->ntxq;
607 
608 		/*
609 		 * Enable hw checksumming and LSO for all ports by default.
610 		 * They can be disabled using ndd (hw_csum and hw_lso).
611 		 */
612 		pi->features |= (CXGBE_HW_CSUM | CXGBE_HW_LSO);
613 	}
614 
615 	/*
616 	 * Setup Interrupts.
617 	 */
618 
619 	i = 0;
620 	rc = ddi_intr_alloc(dip, sc->intr_handle, sc->intr_type, 0,
621 	    sc->intr_count, &i, DDI_INTR_ALLOC_STRICT);
622 	if (rc != DDI_SUCCESS) {
623 		cxgb_printf(dip, CE_WARN,
624 		    "failed to allocate %d interrupt(s) of type %d: %d, %d",
625 		    sc->intr_count, sc->intr_type, rc, i);
626 		goto done;
627 	}
628 	ASSERT(sc->intr_count == i); /* allocation was STRICT */
629 	(void) ddi_intr_get_cap(sc->intr_handle[0], &sc->intr_cap);
630 	(void) ddi_intr_get_pri(sc->intr_handle[0], &sc->intr_pri);
631 	if (sc->intr_count == 1) {
632 		ASSERT(sc->flags & INTR_FWD);
633 		(void) ddi_intr_add_handler(sc->intr_handle[0], t4_intr_all, sc,
634 		    &s->fwq);
635 	} else {
636 		/* Multiple interrupts.  The first one is always error intr */
637 		(void) ddi_intr_add_handler(sc->intr_handle[0], t4_intr_err, sc,
638 		    NULL);
639 		irq++;
640 
641 		/* The second one is always the firmware event queue */
642 		(void) ddi_intr_add_handler(sc->intr_handle[1], t4_intr, sc,
643 		    &s->fwq);
644 		irq++;
645 		/*
646 		 * Note that if INTR_FWD is set then either the NIC rx
647 		 * queues or (exclusive or) the TOE rx queueus will be taking
648 		 * direct interrupts.
649 		 *
650 		 * There is no need to check for is_offload(sc) as nofldrxq
651 		 * will be 0 if offload is disabled.
652 		 */
653 		for_each_port(sc, i) {
654 			struct port_info *pi = sc->port[i];
655 			struct sge_rxq *rxq;
656 			rxq = &s->rxq[pi->first_rxq];
657 			for (q = 0; q < pi->nrxq; q++, rxq++) {
658 				(void) ddi_intr_add_handler(
659 				    sc->intr_handle[irq], t4_intr, sc,
660 				    &rxq->iq);
661 				irq++;
662 			}
663 		}
664 
665 	}
666 	sc->flags |= INTR_ALLOCATED;
667 
668 	if ((rc = ksensor_create_scalar_pcidev(dip, SENSOR_KIND_TEMPERATURE,
669 	    &t4_temp_ops, sc, "temp", &sc->temp_sensor)) != 0) {
670 		cxgb_printf(dip, CE_WARN, "failed to create temperature "
671 		    "sensor: %d", rc);
672 		rc = DDI_FAILURE;
673 		goto done;
674 	}
675 
676 	if ((rc = ksensor_create_scalar_pcidev(dip, SENSOR_KIND_VOLTAGE,
677 	    &t4_volt_ops, sc, "vdd", &sc->volt_sensor)) != 0) {
678 		cxgb_printf(dip, CE_WARN, "failed to create voltage "
679 		    "sensor: %d", rc);
680 		rc = DDI_FAILURE;
681 		goto done;
682 	}
683 
684 
685 	if ((rc = ddi_ufm_init(dip, DDI_UFM_CURRENT_VERSION, &t4_ufm_ops,
686 	    &sc->ufm_hdl, sc)) != 0) {
687 		cxgb_printf(dip, CE_WARN, "failed to enable UFM ops: %d", rc);
688 		rc = DDI_FAILURE;
689 		goto done;
690 	}
691 	ddi_ufm_update(sc->ufm_hdl);
692 	ddi_report_dev(dip);
693 
694 	/*
695 	 * Hardware/Firmware/etc. Version/Revision IDs.
696 	 */
697 	t4_dump_version_info(sc);
698 
699 	cxgb_printf(dip, CE_NOTE, "(%d rxq, %d txq total) %d %s.",
700 	    rqidx, tqidx, sc->intr_count,
701 	    sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" :
702 	    sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" :
703 	    "fixed interrupt");
704 
705 	sc->ksp = setup_kstats(sc);
706 	sc->ksp_stat = setup_wc_kstats(sc);
707 	sc->params.drv_memwin = MEMWIN_NIC;
708 
709 done:
710 	if (rc != DDI_SUCCESS) {
711 		(void) t4_devo_detach(dip, DDI_DETACH);
712 
713 		/* rc may have errno style errors or DDI errors */
714 		rc = DDI_FAILURE;
715 	}
716 
717 	return (rc);
718 }
719 
720 static int
t4_devo_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)721 t4_devo_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
722 {
723 	int instance, i;
724 	struct adapter *sc;
725 	struct port_info *pi;
726 	struct sge *s;
727 
728 	if (cmd != DDI_DETACH)
729 		return (DDI_FAILURE);
730 
731 	instance = ddi_get_instance(dip);
732 	sc = ddi_get_soft_state(t4_list, instance);
733 	if (sc == NULL)
734 		return (DDI_SUCCESS);
735 
736 	if (sc->flags & FULL_INIT_DONE) {
737 		t4_intr_disable(sc);
738 		for_each_port(sc, i) {
739 			pi = sc->port[i];
740 			if (pi && pi->flags & PORT_INIT_DONE)
741 				(void) port_full_uninit(pi);
742 		}
743 		(void) adapter_full_uninit(sc);
744 	}
745 
746 	/* Safe to call no matter what */
747 	if (sc->ufm_hdl != NULL) {
748 		ddi_ufm_fini(sc->ufm_hdl);
749 		sc->ufm_hdl = NULL;
750 	}
751 	(void) ksensor_remove(dip, KSENSOR_ALL_IDS);
752 	ddi_prop_remove_all(dip);
753 	ddi_remove_minor_node(dip, NULL);
754 
755 	for (i = 0; i < NCHAN; i++) {
756 		if (sc->tq[i]) {
757 			ddi_taskq_wait(sc->tq[i]);
758 			ddi_taskq_destroy(sc->tq[i]);
759 		}
760 	}
761 
762 	if (sc->ksp != NULL)
763 		kstat_delete(sc->ksp);
764 	if (sc->ksp_stat != NULL)
765 		kstat_delete(sc->ksp_stat);
766 
767 	s = &sc->sge;
768 	if (s->rxq != NULL)
769 		kmem_free(s->rxq, s->nrxq * sizeof (struct sge_rxq));
770 	if (s->txq != NULL)
771 		kmem_free(s->txq, s->ntxq * sizeof (struct sge_txq));
772 	if (s->iqmap != NULL)
773 		kmem_free(s->iqmap, s->iqmap_sz * sizeof (struct sge_iq *));
774 	if (s->eqmap != NULL)
775 		kmem_free(s->eqmap, s->eqmap_sz * sizeof (struct sge_eq *));
776 
777 	if (s->rxbuf_cache != NULL)
778 		kmem_cache_destroy(s->rxbuf_cache);
779 
780 	if (sc->flags & INTR_ALLOCATED) {
781 		for (i = 0; i < sc->intr_count; i++) {
782 			(void) ddi_intr_remove_handler(sc->intr_handle[i]);
783 			(void) ddi_intr_free(sc->intr_handle[i]);
784 		}
785 		sc->flags &= ~INTR_ALLOCATED;
786 	}
787 
788 	if (sc->intr_handle != NULL) {
789 		kmem_free(sc->intr_handle,
790 		    sc->intr_count * sizeof (*sc->intr_handle));
791 	}
792 
793 	for_each_port(sc, i) {
794 		pi = sc->port[i];
795 		if (pi != NULL) {
796 			mutex_destroy(&pi->lock);
797 			kmem_free(pi, sizeof (*pi));
798 			clrbit(&sc->registered_device_map, i);
799 		}
800 	}
801 
802 	if (sc->flags & FW_OK)
803 		(void) t4_fw_bye(sc, sc->mbox);
804 
805 	if (sc->reg1h != NULL)
806 		ddi_regs_map_free(&sc->reg1h);
807 
808 	if (sc->regh != NULL)
809 		ddi_regs_map_free(&sc->regh);
810 
811 	if (sc->pci_regh != NULL)
812 		pci_config_teardown(&sc->pci_regh);
813 
814 	mutex_enter(&t4_adapter_list_lock);
815 	SLIST_REMOVE(&t4_adapter_list, sc, adapter, link);
816 	mutex_exit(&t4_adapter_list_lock);
817 
818 	mutex_destroy(&sc->mbox_lock);
819 	mutex_destroy(&sc->lock);
820 	cv_destroy(&sc->cv);
821 	mutex_destroy(&sc->sfl_lock);
822 
823 #ifdef DEBUG
824 	bzero(sc, sizeof (*sc));
825 #endif
826 	ddi_soft_state_free(t4_list, instance);
827 
828 	return (DDI_SUCCESS);
829 }
830 
831 static int
t4_devo_quiesce(dev_info_t * dip)832 t4_devo_quiesce(dev_info_t *dip)
833 {
834 	int instance;
835 	struct adapter *sc;
836 
837 	instance = ddi_get_instance(dip);
838 	sc = ddi_get_soft_state(t4_list, instance);
839 	if (sc == NULL)
840 		return (DDI_SUCCESS);
841 
842 	t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
843 	t4_intr_disable(sc);
844 	t4_write_reg(sc, A_PL_RST, F_PIORSTMODE | F_PIORST);
845 
846 	return (DDI_SUCCESS);
847 }
848 
849 static int
t4_bus_ctl(dev_info_t * dip,dev_info_t * rdip,ddi_ctl_enum_t op,void * arg,void * result)850 t4_bus_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op, void *arg,
851     void *result)
852 {
853 	char s[4];
854 	struct port_info *pi;
855 	dev_info_t *child = (dev_info_t *)arg;
856 
857 	switch (op) {
858 	case DDI_CTLOPS_REPORTDEV:
859 		pi = ddi_get_parent_data(rdip);
860 		pi->instance = ddi_get_instance(dip);
861 		pi->child_inst = ddi_get_instance(rdip);
862 		return (DDI_SUCCESS);
863 
864 	case DDI_CTLOPS_INITCHILD:
865 		pi = ddi_get_parent_data(child);
866 		if (pi == NULL)
867 			return (DDI_NOT_WELL_FORMED);
868 		(void) snprintf(s, sizeof (s), "%d", pi->port_id);
869 		ddi_set_name_addr(child, s);
870 		return (DDI_SUCCESS);
871 
872 	case DDI_CTLOPS_UNINITCHILD:
873 		ddi_set_name_addr(child, NULL);
874 		return (DDI_SUCCESS);
875 
876 	case DDI_CTLOPS_ATTACH:
877 	case DDI_CTLOPS_DETACH:
878 		return (DDI_SUCCESS);
879 
880 	default:
881 		return (ddi_ctlops(dip, rdip, op, arg, result));
882 	}
883 }
884 
885 static int
t4_bus_config(dev_info_t * dip,uint_t flags,ddi_bus_config_op_t op,void * arg,dev_info_t ** cdipp)886 t4_bus_config(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op, void *arg,
887     dev_info_t **cdipp)
888 {
889 	int instance, i;
890 	struct adapter *sc;
891 
892 	instance = ddi_get_instance(dip);
893 	sc = ddi_get_soft_state(t4_list, instance);
894 
895 	if (op == BUS_CONFIG_ONE) {
896 		char *c;
897 
898 		/*
899 		 * arg is something like "cxgb@0" where 0 is the port_id hanging
900 		 * off this nexus.
901 		 */
902 
903 		c = arg;
904 		while (*(c + 1))
905 			c++;
906 
907 		/* There should be exactly 1 digit after '@' */
908 		if (*(c - 1) != '@')
909 			return (NDI_FAILURE);
910 
911 		i = *c - '0';
912 
913 		if (add_child_node(sc, i) != 0)
914 			return (NDI_FAILURE);
915 
916 		flags |= NDI_ONLINE_ATTACH;
917 
918 	} else if (op == BUS_CONFIG_ALL || op == BUS_CONFIG_DRIVER) {
919 		/* Allocate and bind all child device nodes */
920 		for_each_port(sc, i)
921 		    (void) add_child_node(sc, i);
922 		flags |= NDI_ONLINE_ATTACH;
923 	}
924 
925 	return (ndi_busop_bus_config(dip, flags, op, arg, cdipp, 0));
926 }
927 
928 static int
t4_bus_unconfig(dev_info_t * dip,uint_t flags,ddi_bus_config_op_t op,void * arg)929 t4_bus_unconfig(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op,
930     void *arg)
931 {
932 	int instance, i, rc;
933 	struct adapter *sc;
934 
935 	instance = ddi_get_instance(dip);
936 	sc = ddi_get_soft_state(t4_list, instance);
937 
938 	if (op == BUS_CONFIG_ONE || op == BUS_UNCONFIG_ALL ||
939 	    op == BUS_UNCONFIG_DRIVER)
940 		flags |= NDI_UNCONFIG;
941 
942 	rc = ndi_busop_bus_unconfig(dip, flags, op, arg);
943 	if (rc != 0)
944 		return (rc);
945 
946 	if (op == BUS_UNCONFIG_ONE) {
947 		char *c;
948 
949 		c = arg;
950 		while (*(c + 1))
951 			c++;
952 
953 		if (*(c - 1) != '@')
954 			return (NDI_SUCCESS);
955 
956 		i = *c - '0';
957 
958 		rc = remove_child_node(sc, i);
959 
960 	} else if (op == BUS_UNCONFIG_ALL || op == BUS_UNCONFIG_DRIVER) {
961 
962 		for_each_port(sc, i)
963 		    (void) remove_child_node(sc, i);
964 	}
965 
966 	return (rc);
967 }
968 
969 /* ARGSUSED */
970 static int
t4_cb_open(dev_t * devp,int flag,int otyp,cred_t * credp)971 t4_cb_open(dev_t *devp, int flag, int otyp, cred_t *credp)
972 {
973 	struct adapter *sc;
974 
975 	if (otyp != OTYP_CHR)
976 		return (EINVAL);
977 
978 	sc = ddi_get_soft_state(t4_list, getminor(*devp));
979 	if (sc == NULL)
980 		return (ENXIO);
981 
982 	return (atomic_cas_uint(&sc->open, 0, EBUSY));
983 }
984 
985 /* ARGSUSED */
986 static int
t4_cb_close(dev_t dev,int flag,int otyp,cred_t * credp)987 t4_cb_close(dev_t dev, int flag, int otyp, cred_t *credp)
988 {
989 	struct adapter *sc;
990 
991 	sc = ddi_get_soft_state(t4_list, getminor(dev));
992 	if (sc == NULL)
993 		return (EINVAL);
994 
995 	(void) atomic_swap_uint(&sc->open, 0);
996 	return (0);
997 }
998 
999 /* ARGSUSED */
1000 static int
t4_cb_ioctl(dev_t dev,int cmd,intptr_t d,int mode,cred_t * credp,int * rp)1001 t4_cb_ioctl(dev_t dev, int cmd, intptr_t d, int mode, cred_t *credp, int *rp)
1002 {
1003 	int instance;
1004 	struct adapter *sc;
1005 	void *data = (void *)d;
1006 
1007 	if (crgetuid(credp) != 0)
1008 		return (EPERM);
1009 
1010 	instance = getminor(dev);
1011 	sc = ddi_get_soft_state(t4_list, instance);
1012 	if (sc == NULL)
1013 		return (EINVAL);
1014 
1015 	return (t4_ioctl(sc, cmd, data, mode));
1016 }
1017 
1018 static unsigned int
getpf(struct adapter * sc)1019 getpf(struct adapter *sc)
1020 {
1021 	int rc, *data;
1022 	uint_t n, pf;
1023 
1024 	rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sc->dip,
1025 	    DDI_PROP_DONTPASS, "reg", &data, &n);
1026 	if (rc != DDI_SUCCESS) {
1027 		cxgb_printf(sc->dip, CE_WARN,
1028 		    "failed to lookup \"reg\" property: %d", rc);
1029 		return (0xff);
1030 	}
1031 
1032 	pf = PCI_REG_FUNC_G(data[0]);
1033 	ddi_prop_free(data);
1034 
1035 	return (pf);
1036 }
1037 
1038 /*
1039  * Install a compatible firmware (if required), establish contact with it,
1040  * become the master, and reset the device.
1041  */
1042 static int
prep_firmware(struct adapter * sc)1043 prep_firmware(struct adapter *sc)
1044 {
1045 	int rc;
1046 	size_t fw_size;
1047 	int reset = 1;
1048 	enum dev_state state;
1049 	unsigned char *fw_data;
1050 	struct fw_hdr *card_fw, *hdr;
1051 	const char *fw_file = NULL;
1052 	firmware_handle_t fw_hdl;
1053 	struct fw_info fi, *fw_info = &fi;
1054 
1055 	struct driver_properties *p = &sc->props;
1056 
1057 	/* Contact firmware, request master */
1058 	rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MUST, &state);
1059 	if (rc < 0) {
1060 		rc = -rc;
1061 		cxgb_printf(sc->dip, CE_WARN,
1062 		    "failed to connect to the firmware: %d.", rc);
1063 		return (rc);
1064 	}
1065 
1066 	if (rc == sc->mbox)
1067 		sc->flags |= MASTER_PF;
1068 
1069 	/* We may need FW version info for later reporting */
1070 	t4_get_version_info(sc);
1071 
1072 	switch (CHELSIO_CHIP_VERSION(sc->params.chip)) {
1073 	case CHELSIO_T4:
1074 		fw_file = "t4fw.bin";
1075 		break;
1076 	case CHELSIO_T5:
1077 		fw_file = "t5fw.bin";
1078 		break;
1079 	case CHELSIO_T6:
1080 		fw_file = "t6fw.bin";
1081 		break;
1082 	default:
1083 		cxgb_printf(sc->dip, CE_WARN, "Adapter type not supported\n");
1084 		return (EINVAL);
1085 	}
1086 
1087 	if (firmware_open(T4_PORT_NAME, fw_file, &fw_hdl) != 0) {
1088 		cxgb_printf(sc->dip, CE_WARN, "Could not open %s\n", fw_file);
1089 		return (EINVAL);
1090 	}
1091 
1092 	fw_size = firmware_get_size(fw_hdl);
1093 
1094 	if (fw_size < sizeof (struct fw_hdr)) {
1095 		cxgb_printf(sc->dip, CE_WARN, "%s is too small (%ld bytes)\n",
1096 		    fw_file, fw_size);
1097 		firmware_close(fw_hdl);
1098 		return (EINVAL);
1099 	}
1100 
1101 	if (fw_size > FLASH_FW_MAX_SIZE) {
1102 		cxgb_printf(sc->dip, CE_WARN,
1103 		    "%s is too large (%ld bytes, max allowed is %ld)\n",
1104 		    fw_file, fw_size, FLASH_FW_MAX_SIZE);
1105 		firmware_close(fw_hdl);
1106 		return (EFBIG);
1107 	}
1108 
1109 	fw_data = kmem_zalloc(fw_size, KM_SLEEP);
1110 	if (firmware_read(fw_hdl, 0, fw_data, fw_size) != 0) {
1111 		cxgb_printf(sc->dip, CE_WARN, "Failed to read from %s\n",
1112 		    fw_file);
1113 		firmware_close(fw_hdl);
1114 		kmem_free(fw_data, fw_size);
1115 		return (EINVAL);
1116 	}
1117 	firmware_close(fw_hdl);
1118 
1119 	bzero(fw_info, sizeof (*fw_info));
1120 	fw_info->chip = CHELSIO_CHIP_VERSION(sc->params.chip);
1121 
1122 	hdr = (struct fw_hdr *)fw_data;
1123 	fw_info->fw_hdr.fw_ver = hdr->fw_ver;
1124 	fw_info->fw_hdr.chip = hdr->chip;
1125 	fw_info->fw_hdr.intfver_nic = hdr->intfver_nic;
1126 	fw_info->fw_hdr.intfver_vnic = hdr->intfver_vnic;
1127 	fw_info->fw_hdr.intfver_ofld = hdr->intfver_ofld;
1128 	fw_info->fw_hdr.intfver_ri = hdr->intfver_ri;
1129 	fw_info->fw_hdr.intfver_iscsipdu = hdr->intfver_iscsipdu;
1130 	fw_info->fw_hdr.intfver_iscsi = hdr->intfver_iscsi;
1131 	fw_info->fw_hdr.intfver_fcoepdu = hdr->intfver_fcoepdu;
1132 	fw_info->fw_hdr.intfver_fcoe = hdr->intfver_fcoe;
1133 
1134 	/* allocate memory to read the header of the firmware on the card */
1135 	card_fw = kmem_zalloc(sizeof (*card_fw), KM_SLEEP);
1136 
1137 	rc = -t4_prep_fw(sc, fw_info, fw_data, fw_size, card_fw,
1138 	    p->t4_fw_install, state, &reset);
1139 
1140 	kmem_free(card_fw, sizeof (*card_fw));
1141 	kmem_free(fw_data, fw_size);
1142 
1143 	if (rc != 0) {
1144 		cxgb_printf(sc->dip, CE_WARN,
1145 		    "failed to install firmware: %d", rc);
1146 		return (rc);
1147 	} else {
1148 		/* refresh */
1149 		(void) t4_check_fw_version(sc);
1150 	}
1151 
1152 	/* Reset device */
1153 	rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
1154 	if (rc != 0) {
1155 		cxgb_printf(sc->dip, CE_WARN,
1156 		    "firmware reset failed: %d.", rc);
1157 		if (rc != ETIMEDOUT && rc != EIO)
1158 			(void) t4_fw_bye(sc, sc->mbox);
1159 		return (rc);
1160 	}
1161 
1162 	/* Partition adapter resources as specified in the config file. */
1163 	if (sc->flags & MASTER_PF) {
1164 		/* Handle default vs special T4 config file */
1165 
1166 		rc = partition_resources(sc);
1167 		if (rc != 0)
1168 			goto err;	/* error message displayed already */
1169 	}
1170 
1171 	sc->flags |= FW_OK;
1172 	return (0);
1173 err:
1174 	return (rc);
1175 
1176 }
1177 
1178 static const struct memwin t4_memwin[] = {
1179 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1180 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1181 	{ MEMWIN2_BASE, MEMWIN2_APERTURE }
1182 };
1183 
1184 static const struct memwin t5_memwin[] = {
1185 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1186 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1187 	{ MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1188 };
1189 
1190 #define	FW_PARAM_DEV(param) \
1191 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
1192 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
1193 #define	FW_PARAM_PFVF(param) \
1194 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
1195 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
1196 
1197 /*
1198  * Verify that the memory range specified by the memtype/offset/len pair is
1199  * valid and lies entirely within the memtype specified.  The global address of
1200  * the start of the range is returned in addr.
1201  */
1202 int
validate_mt_off_len(struct adapter * sc,int mtype,uint32_t off,int len,uint32_t * addr)1203 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1204     uint32_t *addr)
1205 {
1206 	uint32_t em, addr_len, maddr, mlen;
1207 
1208 	/* Memory can only be accessed in naturally aligned 4 byte units */
1209 	if (off & 3 || len & 3 || len == 0)
1210 		return (EINVAL);
1211 
1212 	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1213 	switch (mtype) {
1214 		case MEM_EDC0:
1215 			if (!(em & F_EDRAM0_ENABLE))
1216 				return (EINVAL);
1217 			addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1218 			maddr = G_EDRAM0_BASE(addr_len) << 20;
1219 			mlen = G_EDRAM0_SIZE(addr_len) << 20;
1220 			break;
1221 		case MEM_EDC1:
1222 			if (!(em & F_EDRAM1_ENABLE))
1223 				return (EINVAL);
1224 			addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1225 			maddr = G_EDRAM1_BASE(addr_len) << 20;
1226 			mlen = G_EDRAM1_SIZE(addr_len) << 20;
1227 			break;
1228 		case MEM_MC:
1229 			if (!(em & F_EXT_MEM_ENABLE))
1230 				return (EINVAL);
1231 			addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1232 			maddr = G_EXT_MEM_BASE(addr_len) << 20;
1233 			mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1234 			break;
1235 		case MEM_MC1:
1236 			if (is_t4(sc->params.chip) || !(em & F_EXT_MEM1_ENABLE))
1237 				return (EINVAL);
1238 			addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1239 			maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1240 			mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1241 			break;
1242 		default:
1243 			return (EINVAL);
1244 	}
1245 
1246 	if (mlen > 0 && off < mlen && off + len <= mlen) {
1247 		*addr = maddr + off;    /* global address */
1248 		return (0);
1249 	}
1250 
1251 	return (EFAULT);
1252 }
1253 
1254 void
memwin_info(struct adapter * sc,int win,uint32_t * base,uint32_t * aperture)1255 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1256 {
1257 	const struct memwin *mw;
1258 
1259 	if (is_t4(sc->params.chip)) {
1260 		mw = &t4_memwin[win];
1261 	} else {
1262 		mw = &t5_memwin[win];
1263 	}
1264 
1265 	if (base != NULL)
1266 		*base = mw->base;
1267 	if (aperture != NULL)
1268 		*aperture = mw->aperture;
1269 }
1270 
1271 /*
1272  * Upload configuration file to card's memory.
1273  */
1274 static int
upload_config_file(struct adapter * sc,uint32_t * mt,uint32_t * ma)1275 upload_config_file(struct adapter *sc, uint32_t *mt, uint32_t *ma)
1276 {
1277 	int rc = 0;
1278 	size_t cflen, cfbaselen;
1279 	uint_t i, n;
1280 	uint32_t param, val, addr, mtype, maddr;
1281 	uint32_t off, mw_base, mw_aperture;
1282 	uint32_t *cfdata, *cfbase;
1283 	firmware_handle_t fw_hdl;
1284 	const char *cfg_file = NULL;
1285 
1286 	/* Figure out where the firmware wants us to upload it. */
1287 	param = FW_PARAM_DEV(CF);
1288 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
1289 	if (rc != 0) {
1290 		/* Firmwares without config file support will fail this way */
1291 		cxgb_printf(sc->dip, CE_WARN,
1292 		    "failed to query config file location: %d.\n", rc);
1293 		return (rc);
1294 	}
1295 	*mt = mtype = G_FW_PARAMS_PARAM_Y(val);
1296 	*ma = maddr = G_FW_PARAMS_PARAM_Z(val) << 16;
1297 
1298 	switch (CHELSIO_CHIP_VERSION(sc->params.chip)) {
1299 	case CHELSIO_T4:
1300 		cfg_file = "t4fw_cfg.txt";
1301 		break;
1302 	case CHELSIO_T5:
1303 		cfg_file = "t5fw_cfg.txt";
1304 		break;
1305 	case CHELSIO_T6:
1306 		cfg_file = "t6fw_cfg.txt";
1307 		break;
1308 	default:
1309 		cxgb_printf(sc->dip, CE_WARN, "Invalid Adapter detected\n");
1310 		return (EINVAL);
1311 	}
1312 
1313 	if (firmware_open(T4_PORT_NAME, cfg_file, &fw_hdl) != 0) {
1314 		cxgb_printf(sc->dip, CE_WARN, "Could not open %s\n", cfg_file);
1315 		return (EINVAL);
1316 	}
1317 
1318 	cflen = firmware_get_size(fw_hdl);
1319 	/*
1320 	 * Truncate the length to a multiple of uint32_ts. The configuration
1321 	 * text files have trailing comments (and hopefully always will) so
1322 	 * nothing important is lost.
1323 	 */
1324 	cflen &= ~3;
1325 
1326 	if (cflen > FLASH_CFG_MAX_SIZE) {
1327 		cxgb_printf(sc->dip, CE_WARN,
1328 		    "config file too long (%d, max allowed is %d).  ",
1329 		    cflen, FLASH_CFG_MAX_SIZE);
1330 		firmware_close(fw_hdl);
1331 		return (EFBIG);
1332 	}
1333 
1334 	rc = validate_mt_off_len(sc, mtype, maddr, cflen, &addr);
1335 	if (rc != 0) {
1336 		cxgb_printf(sc->dip, CE_WARN,
1337 		    "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
1338 		    "Will try to use the config on the card, if any.\n",
1339 		    __func__, mtype, maddr, cflen, rc);
1340 		firmware_close(fw_hdl);
1341 		return (EFAULT);
1342 	}
1343 
1344 	cfbaselen = cflen;
1345 	cfbase = cfdata = kmem_zalloc(cflen, KM_SLEEP);
1346 	if (firmware_read(fw_hdl, 0, cfdata, cflen) != 0) {
1347 		cxgb_printf(sc->dip, CE_WARN, "Failed to read from %s\n",
1348 		    cfg_file);
1349 		firmware_close(fw_hdl);
1350 		kmem_free(cfbase, cfbaselen);
1351 		return (EINVAL);
1352 	}
1353 	firmware_close(fw_hdl);
1354 
1355 	memwin_info(sc, 2, &mw_base, &mw_aperture);
1356 	while (cflen) {
1357 		off = position_memwin(sc, 2, addr);
1358 		n = min(cflen, mw_aperture - off);
1359 		for (i = 0; i < n; i += 4)
1360 			t4_write_reg(sc, mw_base + off + i, *cfdata++);
1361 		cflen -= n;
1362 		addr += n;
1363 	}
1364 
1365 	kmem_free(cfbase, cfbaselen);
1366 
1367 	return (rc);
1368 }
1369 
1370 /*
1371  * Partition chip resources for use between various PFs, VFs, etc.  This is done
1372  * by uploading the firmware configuration file to the adapter and instructing
1373  * the firmware to process it.
1374  */
1375 static int
partition_resources(struct adapter * sc)1376 partition_resources(struct adapter *sc)
1377 {
1378 	int rc;
1379 	struct fw_caps_config_cmd caps;
1380 	uint32_t mtype, maddr, finicsum, cfcsum;
1381 
1382 	rc = upload_config_file(sc, &mtype, &maddr);
1383 	if (rc != 0) {
1384 		mtype = FW_MEMTYPE_CF_FLASH;
1385 		maddr = t4_flash_cfg_addr(sc);
1386 	}
1387 
1388 	bzero(&caps, sizeof (caps));
1389 	caps.op_to_write = BE_32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1390 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
1391 	caps.cfvalid_to_len16 = BE_32(F_FW_CAPS_CONFIG_CMD_CFVALID |
1392 	    V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
1393 	    V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | FW_LEN16(caps));
1394 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), &caps);
1395 	if (rc != 0) {
1396 		cxgb_printf(sc->dip, CE_WARN,
1397 		    "failed to pre-process config file: %d.\n", rc);
1398 		return (rc);
1399 	}
1400 
1401 	finicsum = ntohl(caps.finicsum);
1402 	cfcsum = ntohl(caps.cfcsum);
1403 	if (finicsum != cfcsum) {
1404 		cxgb_printf(sc->dip, CE_WARN,
1405 		    "WARNING: config file checksum mismatch: %08x %08x\n",
1406 		    finicsum, cfcsum);
1407 	}
1408 	sc->cfcsum = cfcsum;
1409 
1410 	/* TODO: Need to configure this correctly */
1411 	caps.toecaps = htons(FW_CAPS_CONFIG_TOE);
1412 	caps.iscsicaps = 0;
1413 	caps.rdmacaps = 0;
1414 	caps.fcoecaps = 0;
1415 	/* TODO: Disable VNIC cap for now */
1416 	caps.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
1417 
1418 	caps.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1419 	    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
1420 	caps.cfvalid_to_len16 = htonl(FW_LEN16(caps));
1421 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), NULL);
1422 	if (rc != 0) {
1423 		cxgb_printf(sc->dip, CE_WARN,
1424 		    "failed to process config file: %d.\n", rc);
1425 		return (rc);
1426 	}
1427 
1428 	return (0);
1429 }
1430 
1431 /*
1432  * Tweak configuration based on module parameters, etc.  Most of these have
1433  * defaults assigned to them by Firmware Configuration Files (if we're using
1434  * them) but need to be explicitly set if we're using hard-coded
1435  * initialization.  But even in the case of using Firmware Configuration
1436  * Files, we'd like to expose the ability to change these via module
1437  * parameters so these are essentially common tweaks/settings for
1438  * Configuration Files and hard-coded initialization ...
1439  */
1440 static int
adap__pre_init_tweaks(struct adapter * sc)1441 adap__pre_init_tweaks(struct adapter *sc)
1442 {
1443 	int rx_dma_offset = 2; /* Offset of RX packets into DMA buffers */
1444 
1445 	/*
1446 	 * Fix up various Host-Dependent Parameters like Page Size, Cache
1447 	 * Line Size, etc.  The firmware default is for a 4KB Page Size and
1448 	 * 64B Cache Line Size ...
1449 	 */
1450 	(void) t4_fixup_host_params_compat(sc, PAGE_SIZE, _CACHE_LINE_SIZE,
1451 	    T5_LAST_REV);
1452 
1453 	t4_set_reg_field(sc, A_SGE_CONTROL, V_PKTSHIFT(M_PKTSHIFT),
1454 	    V_PKTSHIFT(rx_dma_offset));
1455 
1456 	return (0);
1457 }
1458 /*
1459  * Retrieve parameters that are needed (or nice to have) prior to calling
1460  * t4_sge_init and t4_fw_initialize.
1461  */
1462 static int
get_params__pre_init(struct adapter * sc)1463 get_params__pre_init(struct adapter *sc)
1464 {
1465 	int rc;
1466 	uint32_t param[2], val[2];
1467 	struct fw_devlog_cmd cmd;
1468 	struct devlog_params *dlog = &sc->params.devlog;
1469 
1470 	/*
1471 	 * Grab the raw VPD parameters.
1472 	 */
1473 	rc = -t4_get_raw_vpd_params(sc, &sc->params.vpd);
1474 	if (rc != 0) {
1475 		cxgb_printf(sc->dip, CE_WARN,
1476 		    "failed to query VPD parameters (pre_init): %d.\n", rc);
1477 		return (rc);
1478 	}
1479 
1480 	param[0] = FW_PARAM_DEV(PORTVEC);
1481 	param[1] = FW_PARAM_DEV(CCLK);
1482 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
1483 	if (rc != 0) {
1484 		cxgb_printf(sc->dip, CE_WARN,
1485 		    "failed to query parameters (pre_init): %d.\n", rc);
1486 		return (rc);
1487 	}
1488 
1489 	sc->params.portvec = val[0];
1490 	sc->params.nports = 0;
1491 	while (val[0]) {
1492 		sc->params.nports++;
1493 		val[0] &= val[0] - 1;
1494 	}
1495 
1496 	sc->params.vpd.cclk = val[1];
1497 
1498 	/* Read device log parameters. */
1499 	bzero(&cmd, sizeof (cmd));
1500 	cmd.op_to_write = htonl(V_FW_CMD_OP(FW_DEVLOG_CMD) |
1501 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
1502 	cmd.retval_len16 = htonl(FW_LEN16(cmd));
1503 	rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof (cmd), &cmd);
1504 	if (rc != 0) {
1505 		cxgb_printf(sc->dip, CE_WARN,
1506 		    "failed to get devlog parameters: %d.\n", rc);
1507 		bzero(dlog, sizeof (*dlog));
1508 		rc = 0;	/* devlog isn't critical for device operation */
1509 	} else {
1510 		val[0] = ntohl(cmd.memtype_devlog_memaddr16_devlog);
1511 		dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
1512 		dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
1513 		dlog->size = ntohl(cmd.memsize_devlog);
1514 	}
1515 
1516 	return (rc);
1517 }
1518 
1519 /*
1520  * Retrieve various parameters that are of interest to the driver.  The device
1521  * has been initialized by the firmware at this point.
1522  */
1523 static int
get_params__post_init(struct adapter * sc)1524 get_params__post_init(struct adapter *sc)
1525 {
1526 	int rc;
1527 	uint32_t param[7], val[7];
1528 	struct fw_caps_config_cmd caps;
1529 
1530 	param[0] = FW_PARAM_PFVF(IQFLINT_START);
1531 	param[1] = FW_PARAM_PFVF(EQ_START);
1532 	param[2] = FW_PARAM_PFVF(FILTER_START);
1533 	param[3] = FW_PARAM_PFVF(FILTER_END);
1534 	param[4] = FW_PARAM_PFVF(L2T_START);
1535 	param[5] = FW_PARAM_PFVF(L2T_END);
1536 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
1537 	if (rc != 0) {
1538 		cxgb_printf(sc->dip, CE_WARN,
1539 		    "failed to query parameters (post_init): %d.\n", rc);
1540 		return (rc);
1541 	}
1542 
1543 	/* LINTED: E_ASSIGN_NARROW_CONV */
1544 	sc->sge.iq_start = val[0];
1545 	sc->sge.eq_start = val[1];
1546 	sc->tids.ftid_base = val[2];
1547 	sc->tids.nftids = val[3] - val[2] + 1;
1548 	sc->vres.l2t.start = val[4];
1549 	sc->vres.l2t.size = val[5] - val[4] + 1;
1550 
1551 	param[0] = FW_PARAM_PFVF(IQFLINT_END);
1552 	param[1] = FW_PARAM_PFVF(EQ_END);
1553 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
1554 	if (rc != 0) {
1555 		cxgb_printf(sc->dip, CE_WARN, "failed to query eq/iq map "
1556 		    "size parameters (post_init): %d.\n", rc);
1557 		return (rc);
1558 	}
1559 
1560 	sc->sge.iqmap_sz = val[0] - sc->sge.iq_start + 1;
1561 	sc->sge.eqmap_sz = val[1] - sc->sge.eq_start + 1;
1562 
1563 	/* get capabilites */
1564 	bzero(&caps, sizeof (caps));
1565 	caps.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1566 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
1567 	caps.cfvalid_to_len16 = htonl(FW_LEN16(caps));
1568 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), &caps);
1569 	if (rc != 0) {
1570 		cxgb_printf(sc->dip, CE_WARN,
1571 		    "failed to get card capabilities: %d.\n", rc);
1572 		return (rc);
1573 	}
1574 
1575 	if (caps.toecaps != 0) {
1576 		/* query offload-related parameters */
1577 		param[0] = FW_PARAM_DEV(NTID);
1578 		param[1] = FW_PARAM_PFVF(SERVER_START);
1579 		param[2] = FW_PARAM_PFVF(SERVER_END);
1580 		param[3] = FW_PARAM_PFVF(TDDP_START);
1581 		param[4] = FW_PARAM_PFVF(TDDP_END);
1582 		param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
1583 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
1584 		if (rc != 0) {
1585 			cxgb_printf(sc->dip, CE_WARN,
1586 			    "failed to query TOE parameters: %d.\n", rc);
1587 			return (rc);
1588 		}
1589 		sc->tids.ntids = val[0];
1590 		sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
1591 		sc->tids.stid_base = val[1];
1592 		sc->tids.nstids = val[2] - val[1] + 1;
1593 		sc->vres.ddp.start = val[3];
1594 		sc->vres.ddp.size = val[4] - val[3] + 1;
1595 		sc->params.ofldq_wr_cred = val[5];
1596 		sc->params.offload = 1;
1597 	}
1598 
1599 	rc = -t4_get_pfres(sc);
1600 	if (rc != 0) {
1601 		cxgb_printf(sc->dip, CE_WARN,
1602 		    "failed to query PF resource params: %d.\n", rc);
1603 		return (rc);
1604 	}
1605 
1606 	/* These are finalized by FW initialization, load their values now */
1607 	val[0] = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
1608 	sc->params.tp.tre = G_TIMERRESOLUTION(val[0]);
1609 	sc->params.tp.dack_re = G_DELAYEDACKRESOLUTION(val[0]);
1610 	t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
1611 
1612 	return (rc);
1613 }
1614 
1615 static int
set_params__post_init(struct adapter * sc)1616 set_params__post_init(struct adapter *sc)
1617 {
1618 	uint32_t param, val;
1619 
1620 	/* ask for encapsulated CPLs */
1621 	param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
1622 	val = 1;
1623 	(void) t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
1624 
1625 	return (0);
1626 }
1627 
1628 /* TODO: verify */
1629 static void
setup_memwin(struct adapter * sc)1630 setup_memwin(struct adapter *sc)
1631 {
1632 	pci_regspec_t *data;
1633 	int rc;
1634 	uint_t n;
1635 	uintptr_t bar0;
1636 	uintptr_t mem_win0_base, mem_win1_base, mem_win2_base;
1637 	uintptr_t mem_win2_aperture;
1638 
1639 	rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sc->dip,
1640 	    DDI_PROP_DONTPASS, "assigned-addresses", (int **)&data, &n);
1641 	if (rc != DDI_SUCCESS) {
1642 		cxgb_printf(sc->dip, CE_WARN,
1643 		    "failed to lookup \"assigned-addresses\" property: %d", rc);
1644 		return;
1645 	}
1646 	n /= sizeof (*data);
1647 
1648 	bar0 = ((uint64_t)data[0].pci_phys_mid << 32) | data[0].pci_phys_low;
1649 	ddi_prop_free(data);
1650 
1651 	if (is_t4(sc->params.chip)) {
1652 		mem_win0_base = bar0 + MEMWIN0_BASE;
1653 		mem_win1_base = bar0 + MEMWIN1_BASE;
1654 		mem_win2_base = bar0 + MEMWIN2_BASE;
1655 		mem_win2_aperture = MEMWIN2_APERTURE;
1656 	} else {
1657 		/* For T5, only relative offset inside the PCIe BAR is passed */
1658 		mem_win0_base = MEMWIN0_BASE;
1659 		mem_win1_base = MEMWIN1_BASE;
1660 		mem_win2_base = MEMWIN2_BASE_T5;
1661 		mem_win2_aperture = MEMWIN2_APERTURE_T5;
1662 	}
1663 
1664 	t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 0),
1665 	    mem_win0_base | V_BIR(0) |
1666 	    V_WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
1667 
1668 	t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 1),
1669 	    mem_win1_base | V_BIR(0) |
1670 	    V_WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
1671 
1672 	t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2),
1673 	    mem_win2_base | V_BIR(0) |
1674 	    V_WINDOW(ilog2(mem_win2_aperture) - 10));
1675 
1676 	/* flush */
1677 	(void) t4_read_reg(sc,
1678 	    PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1679 }
1680 
1681 /*
1682  * Positions the memory window such that it can be used to access the specified
1683  * address in the chip's address space.  The return value is the offset of addr
1684  * from the start of the window.
1685  */
1686 uint32_t
position_memwin(struct adapter * sc,int n,uint32_t addr)1687 position_memwin(struct adapter *sc, int n, uint32_t addr)
1688 {
1689 	uint32_t start, pf;
1690 	uint32_t reg;
1691 
1692 	if (addr & 3) {
1693 		cxgb_printf(sc->dip, CE_WARN,
1694 		    "addr (0x%x) is not at a 4B boundary.\n", addr);
1695 		return (EFAULT);
1696 	}
1697 
1698 	if (is_t4(sc->params.chip)) {
1699 		pf = 0;
1700 		start = addr & ~0xf;    /* start must be 16B aligned */
1701 	} else {
1702 		pf = V_PFNUM(sc->pf);
1703 		start = addr & ~0x7f;   /* start must be 128B aligned */
1704 	}
1705 	reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1706 
1707 	t4_write_reg(sc, reg, start | pf);
1708 	(void) t4_read_reg(sc, reg);
1709 
1710 	return (addr - start);
1711 }
1712 
1713 
1714 /*
1715  * Reads the named property and fills up the "data" array (which has at least
1716  * "count" elements).  We first try and lookup the property for our dev_t and
1717  * then retry with DDI_DEV_T_ANY if it's not found.
1718  *
1719  * Returns non-zero if the property was found and "data" has been updated.
1720  */
1721 static int
prop_lookup_int_array(struct adapter * sc,char * name,int * data,uint_t count)1722 prop_lookup_int_array(struct adapter *sc, char *name, int *data, uint_t count)
1723 {
1724 	dev_info_t *dip = sc->dip;
1725 	dev_t dev = sc->dev;
1726 	int rc, *d;
1727 	uint_t i, n;
1728 
1729 	rc = ddi_prop_lookup_int_array(dev, dip, DDI_PROP_DONTPASS,
1730 	    name, &d, &n);
1731 	if (rc == DDI_PROP_SUCCESS)
1732 		goto found;
1733 
1734 	if (rc != DDI_PROP_NOT_FOUND) {
1735 		cxgb_printf(dip, CE_WARN,
1736 		    "failed to lookup property %s for minor %d: %d.",
1737 		    name, getminor(dev), rc);
1738 		return (0);
1739 	}
1740 
1741 	rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1742 	    name, &d, &n);
1743 	if (rc == DDI_PROP_SUCCESS)
1744 		goto found;
1745 
1746 	if (rc != DDI_PROP_NOT_FOUND) {
1747 		cxgb_printf(dip, CE_WARN,
1748 		    "failed to lookup property %s: %d.", name, rc);
1749 		return (0);
1750 	}
1751 
1752 	return (0);
1753 
1754 found:
1755 	if (n > count) {
1756 		cxgb_printf(dip, CE_NOTE,
1757 		    "property %s has too many elements (%d), ignoring extras",
1758 		    name, n);
1759 	}
1760 
1761 	for (i = 0; i < n && i < count; i++)
1762 		data[i] = d[i];
1763 	ddi_prop_free(d);
1764 
1765 	return (1);
1766 }
1767 
1768 static int
prop_lookup_int(struct adapter * sc,char * name,int defval)1769 prop_lookup_int(struct adapter *sc, char *name, int defval)
1770 {
1771 	int rc;
1772 
1773 	rc = ddi_prop_get_int(sc->dev, sc->dip, DDI_PROP_DONTPASS, name, -1);
1774 	if (rc != -1)
1775 		return (rc);
1776 
1777 	return (ddi_prop_get_int(DDI_DEV_T_ANY, sc->dip, DDI_PROP_DONTPASS,
1778 	    name, defval));
1779 }
1780 
1781 static int
init_driver_props(struct adapter * sc,struct driver_properties * p)1782 init_driver_props(struct adapter *sc, struct driver_properties *p)
1783 {
1784 	dev_t dev = sc->dev;
1785 	dev_info_t *dip = sc->dip;
1786 	int i, *data;
1787 	uint_t tmr[SGE_NTIMERS] = {5, 10, 20, 50, 100, 200};
1788 	uint_t cnt[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */
1789 
1790 	/*
1791 	 * Holdoff timer
1792 	 */
1793 	data = &p->timer_val[0];
1794 	for (i = 0; i < SGE_NTIMERS; i++)
1795 		data[i] = tmr[i];
1796 	(void) prop_lookup_int_array(sc, "holdoff-timer-values", data,
1797 	    SGE_NTIMERS);
1798 	for (i = 0; i < SGE_NTIMERS; i++) {
1799 		int limit = 200U;
1800 		if (data[i] > limit) {
1801 			cxgb_printf(dip, CE_WARN,
1802 			    "holdoff timer %d is too high (%d), lowered to %d.",
1803 			    i, data[i], limit);
1804 			data[i] = limit;
1805 		}
1806 	}
1807 	(void) ddi_prop_update_int_array(dev, dip, "holdoff-timer-values",
1808 	    data, SGE_NTIMERS);
1809 
1810 	/*
1811 	 * Holdoff packet counter
1812 	 */
1813 	data = &p->counter_val[0];
1814 	for (i = 0; i < SGE_NCOUNTERS; i++)
1815 		data[i] = cnt[i];
1816 	(void) prop_lookup_int_array(sc, "holdoff-pkt-counter-values", data,
1817 	    SGE_NCOUNTERS);
1818 	for (i = 0; i < SGE_NCOUNTERS; i++) {
1819 		int limit = M_THRESHOLD_0;
1820 		if (data[i] > limit) {
1821 			cxgb_printf(dip, CE_WARN,
1822 			    "holdoff pkt-counter %d is too high (%d), "
1823 			    "lowered to %d.", i, data[i], limit);
1824 			data[i] = limit;
1825 		}
1826 	}
1827 	(void) ddi_prop_update_int_array(dev, dip, "holdoff-pkt-counter-values",
1828 	    data, SGE_NCOUNTERS);
1829 
1830 	/*
1831 	 * Maximum # of tx and rx queues to use for each
1832 	 * 100G, 40G, 25G, 10G and 1G port.
1833 	 */
1834 	p->max_ntxq_10g = prop_lookup_int(sc, "max-ntxq-10G-port", 8);
1835 	(void) ddi_prop_update_int(dev, dip, "max-ntxq-10G-port",
1836 	    p->max_ntxq_10g);
1837 
1838 	p->max_nrxq_10g = prop_lookup_int(sc, "max-nrxq-10G-port", 8);
1839 	(void) ddi_prop_update_int(dev, dip, "max-nrxq-10G-port",
1840 	    p->max_nrxq_10g);
1841 
1842 	p->max_ntxq_1g = prop_lookup_int(sc, "max-ntxq-1G-port", 2);
1843 	(void) ddi_prop_update_int(dev, dip, "max-ntxq-1G-port",
1844 	    p->max_ntxq_1g);
1845 
1846 	p->max_nrxq_1g = prop_lookup_int(sc, "max-nrxq-1G-port", 2);
1847 	(void) ddi_prop_update_int(dev, dip, "max-nrxq-1G-port",
1848 	    p->max_nrxq_1g);
1849 
1850 	/*
1851 	 * Holdoff parameters for 10G and 1G ports.
1852 	 */
1853 	p->tmr_idx_10g = prop_lookup_int(sc, "holdoff-timer-idx-10G", 0);
1854 	(void) ddi_prop_update_int(dev, dip, "holdoff-timer-idx-10G",
1855 	    p->tmr_idx_10g);
1856 
1857 	p->pktc_idx_10g = prop_lookup_int(sc, "holdoff-pktc-idx-10G", 2);
1858 	(void) ddi_prop_update_int(dev, dip, "holdoff-pktc-idx-10G",
1859 	    p->pktc_idx_10g);
1860 
1861 	p->tmr_idx_1g = prop_lookup_int(sc, "holdoff-timer-idx-1G", 0);
1862 	(void) ddi_prop_update_int(dev, dip, "holdoff-timer-idx-1G",
1863 	    p->tmr_idx_1g);
1864 
1865 	p->pktc_idx_1g = prop_lookup_int(sc, "holdoff-pktc-idx-1G", 2);
1866 	(void) ddi_prop_update_int(dev, dip, "holdoff-pktc-idx-1G",
1867 	    p->pktc_idx_1g);
1868 
1869 	/*
1870 	 * Size (number of entries) of each tx and rx queue.
1871 	 */
1872 	i = prop_lookup_int(sc, "qsize-txq", TX_EQ_QSIZE);
1873 	p->qsize_txq = max(i, 128);
1874 	if (p->qsize_txq != i) {
1875 		cxgb_printf(dip, CE_WARN,
1876 		    "using %d instead of %d as the tx queue size",
1877 		    p->qsize_txq, i);
1878 	}
1879 	(void) ddi_prop_update_int(dev, dip, "qsize-txq", p->qsize_txq);
1880 
1881 	i = prop_lookup_int(sc, "qsize-rxq", RX_IQ_QSIZE);
1882 	p->qsize_rxq = max(i, 128);
1883 	while (p->qsize_rxq & 7)
1884 		p->qsize_rxq--;
1885 	if (p->qsize_rxq != i) {
1886 		cxgb_printf(dip, CE_WARN,
1887 		    "using %d instead of %d as the rx queue size",
1888 		    p->qsize_rxq, i);
1889 	}
1890 	(void) ddi_prop_update_int(dev, dip, "qsize-rxq", p->qsize_rxq);
1891 
1892 	/*
1893 	 * Interrupt types allowed.
1894 	 * Bits 0, 1, 2 = INTx, MSI, MSI-X respectively.  See sys/ddi_intr.h
1895 	 */
1896 	p->intr_types = prop_lookup_int(sc, "interrupt-types",
1897 	    DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_FIXED);
1898 	(void) ddi_prop_update_int(dev, dip, "interrupt-types", p->intr_types);
1899 
1900 	/*
1901 	 * Write combining
1902 	 * 0 to disable, 1 to enable
1903 	 */
1904 	p->wc = prop_lookup_int(sc, "write-combine", 1);
1905 	cxgb_printf(dip, CE_WARN, "write-combine: using of %d", p->wc);
1906 	if (p->wc != 0 && p->wc != 1) {
1907 		cxgb_printf(dip, CE_WARN,
1908 		    "write-combine: using 1 instead of %d", p->wc);
1909 		p->wc = 1;
1910 	}
1911 	(void) ddi_prop_update_int(dev, dip, "write-combine", p->wc);
1912 
1913 	p->t4_fw_install = prop_lookup_int(sc, "t4_fw_install", 1);
1914 	if (p->t4_fw_install != 0 && p->t4_fw_install != 2)
1915 		p->t4_fw_install = 1;
1916 	(void) ddi_prop_update_int(dev, dip, "t4_fw_install", p->t4_fw_install);
1917 
1918 	/* Multiple Rings */
1919 	p->multi_rings = prop_lookup_int(sc, "multi-rings", 1);
1920 	if (p->multi_rings != 0 && p->multi_rings != 1) {
1921 		cxgb_printf(dip, CE_NOTE,
1922 		    "multi-rings: using value 1 instead of %d", p->multi_rings);
1923 		p->multi_rings = 1;
1924 	}
1925 
1926 	(void) ddi_prop_update_int(dev, dip, "multi-rings", p->multi_rings);
1927 
1928 	return (0);
1929 }
1930 
1931 static int
remove_extra_props(struct adapter * sc,int n10g,int n1g)1932 remove_extra_props(struct adapter *sc, int n10g, int n1g)
1933 {
1934 	if (n10g == 0) {
1935 		(void) ddi_prop_remove(sc->dev, sc->dip, "max-ntxq-10G-port");
1936 		(void) ddi_prop_remove(sc->dev, sc->dip, "max-nrxq-10G-port");
1937 		(void) ddi_prop_remove(sc->dev, sc->dip,
1938 		    "holdoff-timer-idx-10G");
1939 		(void) ddi_prop_remove(sc->dev, sc->dip,
1940 		    "holdoff-pktc-idx-10G");
1941 	}
1942 
1943 	if (n1g == 0) {
1944 		(void) ddi_prop_remove(sc->dev, sc->dip, "max-ntxq-1G-port");
1945 		(void) ddi_prop_remove(sc->dev, sc->dip, "max-nrxq-1G-port");
1946 		(void) ddi_prop_remove(sc->dev, sc->dip,
1947 		    "holdoff-timer-idx-1G");
1948 		(void) ddi_prop_remove(sc->dev, sc->dip, "holdoff-pktc-idx-1G");
1949 	}
1950 
1951 	return (0);
1952 }
1953 
1954 static int
cfg_itype_and_nqueues(struct adapter * sc,int n10g,int n1g,struct intrs_and_queues * iaq)1955 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1956     struct intrs_and_queues *iaq)
1957 {
1958 	struct driver_properties *p = &sc->props;
1959 	int rc, itype, itypes, navail, nc, n;
1960 	int pfres_rxq, pfres_txq, pfresq;
1961 
1962 	bzero(iaq, sizeof (*iaq));
1963 	nc = ncpus;	/* our snapshot of the number of CPUs */
1964 	iaq->ntxq10g = min(nc, p->max_ntxq_10g);
1965 	iaq->ntxq1g = min(nc, p->max_ntxq_1g);
1966 	iaq->nrxq10g = min(nc, p->max_nrxq_10g);
1967 	iaq->nrxq1g = min(nc, p->max_nrxq_1g);
1968 
1969 	pfres_rxq = iaq->nrxq10g * n10g + iaq->nrxq1g * n1g;
1970 	pfres_txq = iaq->ntxq10g * n10g + iaq->ntxq1g * n1g;
1971 
1972 	/*
1973 	 * If current configuration of max number of Rxqs and Txqs exceed
1974 	 * the max available for all the ports under this PF, then shrink
1975 	 * the queues to max available. Reduce them in a way that each
1976 	 * port under this PF has equally distributed number of queues.
1977 	 * Must guarantee at least 1 queue for each port for both NIC
1978 	 * and Offload queues.
1979 	 *
1980 	 * neq - fixed max number of Egress queues on Tx path and Free List
1981 	 * queues that hold Rx payload data on Rx path. Half are reserved
1982 	 * for Egress queues and the other half for Free List queues.
1983 	 * Hence, the division by 2.
1984 	 *
1985 	 * niqflint - max number of Ingress queues with interrupts on Rx
1986 	 * path to receive completions that indicate Rx payload has been
1987 	 * posted in its associated Free List queue. Also handles Tx
1988 	 * completions for packets successfully transmitted on Tx path.
1989 	 *
1990 	 * nethctrl - max number of Egress queues only for Tx path. This
1991 	 * number is usually half of neq. However, if it became less than
1992 	 * neq due to lack of resources based on firmware configuration,
1993 	 * then take the lower value.
1994 	 */
1995 	const uint_t max_rxq =
1996 	    MIN(sc->params.pfres.neq / 2, sc->params.pfres.niqflint);
1997 	while (pfres_rxq > max_rxq) {
1998 		pfresq = pfres_rxq;
1999 
2000 		if (iaq->nrxq10g > 1) {
2001 			iaq->nrxq10g--;
2002 			pfres_rxq -= n10g;
2003 		}
2004 
2005 		if (iaq->nrxq1g > 1) {
2006 			iaq->nrxq1g--;
2007 			pfres_rxq -= n1g;
2008 		}
2009 
2010 		/* Break if nothing changed */
2011 		if (pfresq == pfres_rxq)
2012 			break;
2013 	}
2014 
2015 	const uint_t max_txq =
2016 	    MIN(sc->params.pfres.neq / 2, sc->params.pfres.nethctrl);
2017 	while (pfres_txq > max_txq) {
2018 		pfresq = pfres_txq;
2019 
2020 		if (iaq->ntxq10g > 1) {
2021 			iaq->ntxq10g--;
2022 			pfres_txq -= n10g;
2023 		}
2024 
2025 		if (iaq->ntxq1g > 1) {
2026 			iaq->ntxq1g--;
2027 			pfres_txq -= n1g;
2028 		}
2029 
2030 		/* Break if nothing changed */
2031 		if (pfresq == pfres_txq)
2032 			break;
2033 	}
2034 
2035 	rc = ddi_intr_get_supported_types(sc->dip, &itypes);
2036 	if (rc != DDI_SUCCESS) {
2037 		cxgb_printf(sc->dip, CE_WARN,
2038 		    "failed to determine supported interrupt types: %d", rc);
2039 		return (rc);
2040 	}
2041 
2042 	for (itype = DDI_INTR_TYPE_MSIX; itype; itype >>= 1) {
2043 		ASSERT(itype == DDI_INTR_TYPE_MSIX ||
2044 		    itype == DDI_INTR_TYPE_MSI ||
2045 		    itype == DDI_INTR_TYPE_FIXED);
2046 
2047 		if ((itype & itypes & p->intr_types) == 0)
2048 			continue;	/* not supported or not allowed */
2049 
2050 		navail = 0;
2051 		rc = ddi_intr_get_navail(sc->dip, itype, &navail);
2052 		if (rc != DDI_SUCCESS || navail == 0) {
2053 			cxgb_printf(sc->dip, CE_WARN,
2054 			    "failed to get # of interrupts for type %d: %d",
2055 			    itype, rc);
2056 			continue;	/* carry on */
2057 		}
2058 
2059 		iaq->intr_type = itype;
2060 		if (navail == 0)
2061 			continue;
2062 
2063 		/*
2064 		 * Best option: an interrupt vector for errors, one for the
2065 		 * firmware event queue, and one each for each rxq (NIC as well
2066 		 * as offload).
2067 		 */
2068 		iaq->nirq = T4_EXTRA_INTR;
2069 		iaq->nirq += n10g * iaq->nrxq10g;
2070 		iaq->nirq += n1g * iaq->nrxq1g;
2071 
2072 		if (iaq->nirq <= navail &&
2073 		    (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq))) {
2074 			iaq->intr_fwd = 0;
2075 			goto allocate;
2076 		}
2077 
2078 		/*
2079 		 * Second best option: an interrupt vector for errors, one for
2080 		 * the firmware event queue, and one each for either NIC or
2081 		 * offload rxq's.
2082 		 */
2083 		iaq->nirq = T4_EXTRA_INTR;
2084 		iaq->nirq += n10g * iaq->nrxq10g;
2085 		iaq->nirq += n1g * iaq->nrxq1g;
2086 		if (iaq->nirq <= navail &&
2087 		    (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq))) {
2088 			iaq->intr_fwd = 1;
2089 			goto allocate;
2090 		}
2091 
2092 		/*
2093 		 * Next best option: an interrupt vector for errors, one for the
2094 		 * firmware event queue, and at least one per port.  At this
2095 		 * point we know we'll have to downsize nrxq or nofldrxq to fit
2096 		 * what's available to us.
2097 		 */
2098 		iaq->nirq = T4_EXTRA_INTR;
2099 		iaq->nirq += n10g + n1g;
2100 		if (iaq->nirq <= navail) {
2101 			int leftover = navail - iaq->nirq;
2102 
2103 			if (n10g > 0) {
2104 				int target = iaq->nrxq10g;
2105 
2106 				n = 1;
2107 				while (n < target && leftover >= n10g) {
2108 					leftover -= n10g;
2109 					iaq->nirq += n10g;
2110 					n++;
2111 				}
2112 				iaq->nrxq10g = min(n, iaq->nrxq10g);
2113 			}
2114 
2115 			if (n1g > 0) {
2116 				int target = iaq->nrxq1g;
2117 
2118 				n = 1;
2119 				while (n < target && leftover >= n1g) {
2120 					leftover -= n1g;
2121 					iaq->nirq += n1g;
2122 					n++;
2123 				}
2124 				iaq->nrxq1g = min(n, iaq->nrxq1g);
2125 			}
2126 
2127 			/*
2128 			 * We have arrived at a minimum value required to enable
2129 			 * per queue irq(either NIC or offload). Thus for non-
2130 			 * offload case, we will get a vector per queue, while
2131 			 * offload case, we will get a vector per offload/NIC q.
2132 			 * Hence enable Interrupt forwarding only for offload
2133 			 * case.
2134 			 */
2135 			if (itype != DDI_INTR_TYPE_MSI) {
2136 				goto allocate;
2137 			}
2138 		}
2139 
2140 		/*
2141 		 * Least desirable option: one interrupt vector for everything.
2142 		 */
2143 		iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
2144 		iaq->intr_fwd = 1;
2145 
2146 allocate:
2147 		return (0);
2148 	}
2149 
2150 	cxgb_printf(sc->dip, CE_WARN,
2151 	    "failed to find a usable interrupt type.  supported=%d, allowed=%d",
2152 	    itypes, p->intr_types);
2153 	return (DDI_FAILURE);
2154 }
2155 
2156 static int
add_child_node(struct adapter * sc,int idx)2157 add_child_node(struct adapter *sc, int idx)
2158 {
2159 	int rc;
2160 	struct port_info *pi;
2161 
2162 	if (idx < 0 || idx >= sc->params.nports)
2163 		return (EINVAL);
2164 
2165 	pi = sc->port[idx];
2166 	if (pi == NULL)
2167 		return (ENODEV);	/* t4_port_init failed earlier */
2168 
2169 	PORT_LOCK(pi);
2170 	if (pi->dip != NULL) {
2171 		rc = 0;		/* EEXIST really, but then bus_config fails */
2172 		goto done;
2173 	}
2174 
2175 	rc = ndi_devi_alloc(sc->dip, T4_PORT_NAME, DEVI_SID_NODEID, &pi->dip);
2176 	if (rc != DDI_SUCCESS || pi->dip == NULL) {
2177 		rc = ENOMEM;
2178 		goto done;
2179 	}
2180 
2181 	(void) ddi_set_parent_data(pi->dip, pi);
2182 	(void) ndi_devi_bind_driver(pi->dip, 0);
2183 	rc = 0;
2184 done:
2185 	PORT_UNLOCK(pi);
2186 	return (rc);
2187 }
2188 
2189 static int
remove_child_node(struct adapter * sc,int idx)2190 remove_child_node(struct adapter *sc, int idx)
2191 {
2192 	int rc;
2193 	struct port_info *pi;
2194 
2195 	if (idx < 0 || idx >= sc->params.nports)
2196 		return (EINVAL);
2197 
2198 	pi = sc->port[idx];
2199 	if (pi == NULL)
2200 		return (ENODEV);
2201 
2202 	PORT_LOCK(pi);
2203 	if (pi->dip == NULL) {
2204 		rc = ENODEV;
2205 		goto done;
2206 	}
2207 
2208 	rc = ndi_devi_free(pi->dip);
2209 	if (rc == 0)
2210 		pi->dip = NULL;
2211 done:
2212 	PORT_UNLOCK(pi);
2213 	return (rc);
2214 }
2215 
2216 static char *
print_port_speed(const struct port_info * pi)2217 print_port_speed(const struct port_info *pi)
2218 {
2219 	if (!pi)
2220 		return ("-");
2221 
2222 	if (is_100G_port(pi))
2223 		return ("100G");
2224 	else if (is_50G_port(pi))
2225 		return ("50G");
2226 	else if (is_40G_port(pi))
2227 		return ("40G");
2228 	else if (is_25G_port(pi))
2229 		return ("25G");
2230 	else if (is_10G_port(pi))
2231 		return ("10G");
2232 	else
2233 		return ("1G");
2234 }
2235 
2236 #define	KS_UINIT(x)	kstat_named_init(&kstatp->x, #x, KSTAT_DATA_ULONG)
2237 #define	KS_CINIT(x)	kstat_named_init(&kstatp->x, #x, KSTAT_DATA_CHAR)
2238 #define	KS_U64INIT(x)	kstat_named_init(&kstatp->x, #x, KSTAT_DATA_UINT64)
2239 #define	KS_U_SET(x, y)	kstatp->x.value.ul = (y)
2240 #define	KS_C_SET(x, ...)	\
2241 			(void) snprintf(kstatp->x.value.c, 16,  __VA_ARGS__)
2242 
2243 /*
2244  * t4nex:X:config
2245  */
2246 struct t4_kstats {
2247 	kstat_named_t chip_ver;
2248 	kstat_named_t fw_vers;
2249 	kstat_named_t tp_vers;
2250 	kstat_named_t driver_version;
2251 	kstat_named_t serial_number;
2252 	kstat_named_t ec_level;
2253 	kstat_named_t id;
2254 	kstat_named_t bus_type;
2255 	kstat_named_t bus_width;
2256 	kstat_named_t bus_speed;
2257 	kstat_named_t core_clock;
2258 	kstat_named_t port_cnt;
2259 	kstat_named_t port_type;
2260 	kstat_named_t pci_vendor_id;
2261 	kstat_named_t pci_device_id;
2262 };
2263 static kstat_t *
setup_kstats(struct adapter * sc)2264 setup_kstats(struct adapter *sc)
2265 {
2266 	kstat_t *ksp;
2267 	struct t4_kstats *kstatp;
2268 	int ndata;
2269 	struct pci_params *p = &sc->params.pci;
2270 	struct vpd_params *v = &sc->params.vpd;
2271 	uint16_t pci_vendor, pci_device;
2272 
2273 	ndata = sizeof (struct t4_kstats) / sizeof (kstat_named_t);
2274 
2275 	ksp = kstat_create(T4_NEXUS_NAME, ddi_get_instance(sc->dip), "config",
2276 	    "nexus", KSTAT_TYPE_NAMED, ndata, 0);
2277 	if (ksp == NULL) {
2278 		cxgb_printf(sc->dip, CE_WARN, "failed to initialize kstats.");
2279 		return (NULL);
2280 	}
2281 
2282 	kstatp = (struct t4_kstats *)ksp->ks_data;
2283 
2284 	KS_UINIT(chip_ver);
2285 	KS_CINIT(fw_vers);
2286 	KS_CINIT(tp_vers);
2287 	KS_CINIT(driver_version);
2288 	KS_CINIT(serial_number);
2289 	KS_CINIT(ec_level);
2290 	KS_CINIT(id);
2291 	KS_CINIT(bus_type);
2292 	KS_CINIT(bus_width);
2293 	KS_CINIT(bus_speed);
2294 	KS_UINIT(core_clock);
2295 	KS_UINIT(port_cnt);
2296 	KS_CINIT(port_type);
2297 	KS_CINIT(pci_vendor_id);
2298 	KS_CINIT(pci_device_id);
2299 
2300 	KS_U_SET(chip_ver, sc->params.chip);
2301 	KS_C_SET(fw_vers, "%d.%d.%d.%d",
2302 	    G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2303 	    G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2304 	    G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2305 	    G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2306 	KS_C_SET(tp_vers, "%d.%d.%d.%d",
2307 	    G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers),
2308 	    G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers),
2309 	    G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers),
2310 	    G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers));
2311 	KS_C_SET(driver_version, DRV_VERSION);
2312 	KS_C_SET(serial_number, "%s", v->sn);
2313 	KS_C_SET(ec_level, "%s", v->ec);
2314 	KS_C_SET(id, "%s", v->id);
2315 	KS_C_SET(bus_type, "pci-express");
2316 	KS_C_SET(bus_width, "x%d lanes", p->width);
2317 	KS_C_SET(bus_speed, "%d", p->speed);
2318 	KS_U_SET(core_clock, v->cclk);
2319 	KS_U_SET(port_cnt, sc->params.nports);
2320 
2321 	t4_os_pci_read_cfg2(sc, PCI_CONF_VENID, &pci_vendor);
2322 	KS_C_SET(pci_vendor_id, "0x%x", pci_vendor);
2323 
2324 	t4_os_pci_read_cfg2(sc, PCI_CONF_DEVID, &pci_device);
2325 	KS_C_SET(pci_device_id, "0x%x", pci_device);
2326 
2327 	KS_C_SET(port_type, "%s/%s/%s/%s",
2328 	    print_port_speed(sc->port[0]),
2329 	    print_port_speed(sc->port[1]),
2330 	    print_port_speed(sc->port[2]),
2331 	    print_port_speed(sc->port[3]));
2332 
2333 	/* Do NOT set ksp->ks_update.  These kstats do not change. */
2334 
2335 	/* Install the kstat */
2336 	ksp->ks_private = (void *)sc;
2337 	kstat_install(ksp);
2338 
2339 	return (ksp);
2340 }
2341 
2342 /*
2343  * t4nex:X:stat
2344  */
2345 struct t4_wc_kstats {
2346 	kstat_named_t write_coal_success;
2347 	kstat_named_t write_coal_failure;
2348 };
2349 static kstat_t *
setup_wc_kstats(struct adapter * sc)2350 setup_wc_kstats(struct adapter *sc)
2351 {
2352 	kstat_t *ksp;
2353 	struct t4_wc_kstats *kstatp;
2354 
2355 	const uint_t ndata =
2356 	    sizeof (struct t4_wc_kstats) / sizeof (kstat_named_t);
2357 	ksp = kstat_create(T4_NEXUS_NAME, ddi_get_instance(sc->dip), "stats",
2358 	    "nexus", KSTAT_TYPE_NAMED, ndata, 0);
2359 	if (ksp == NULL) {
2360 		cxgb_printf(sc->dip, CE_WARN, "failed to initialize kstats.");
2361 		return (NULL);
2362 	}
2363 
2364 	kstatp = (struct t4_wc_kstats *)ksp->ks_data;
2365 
2366 	KS_UINIT(write_coal_success);
2367 	KS_UINIT(write_coal_failure);
2368 
2369 	ksp->ks_update = update_wc_kstats;
2370 	/* Install the kstat */
2371 	ksp->ks_private = (void *)sc;
2372 	kstat_install(ksp);
2373 
2374 	return (ksp);
2375 }
2376 
2377 static int
update_wc_kstats(kstat_t * ksp,int rw)2378 update_wc_kstats(kstat_t *ksp, int rw)
2379 {
2380 	struct t4_wc_kstats *kstatp = (struct t4_wc_kstats *)ksp->ks_data;
2381 	struct adapter *sc = ksp->ks_private;
2382 	uint32_t wc_total, wc_success, wc_failure;
2383 
2384 	if (rw == KSTAT_WRITE)
2385 		return (0);
2386 
2387 	if (is_t5(sc->params.chip)) {
2388 		wc_total = t4_read_reg(sc, A_SGE_STAT_TOTAL);
2389 		wc_failure = t4_read_reg(sc, A_SGE_STAT_MATCH);
2390 		wc_success = wc_total - wc_failure;
2391 	} else {
2392 		wc_success = 0;
2393 		wc_failure = 0;
2394 	}
2395 
2396 	KS_U_SET(write_coal_success, wc_success);
2397 	KS_U_SET(write_coal_failure, wc_failure);
2398 
2399 	return (0);
2400 }
2401 
2402 /*
2403  * cxgbe:X:fec
2404  *
2405  * This provides visibility into the errors that have been found by the
2406  * different FEC subsystems. While it's tempting to combine the two different
2407  * FEC types logically, the data that the errors tell us are pretty different
2408  * between the two. Firecode is strictly per-lane, but RS has parts that are
2409  * related to symbol distribution to lanes and also to the overall channel.
2410  */
2411 struct cxgbe_port_fec_kstats {
2412 	kstat_named_t rs_corr;
2413 	kstat_named_t rs_uncorr;
2414 	kstat_named_t rs_sym0_corr;
2415 	kstat_named_t rs_sym1_corr;
2416 	kstat_named_t rs_sym2_corr;
2417 	kstat_named_t rs_sym3_corr;
2418 	kstat_named_t fc_lane0_corr;
2419 	kstat_named_t fc_lane0_uncorr;
2420 	kstat_named_t fc_lane1_corr;
2421 	kstat_named_t fc_lane1_uncorr;
2422 	kstat_named_t fc_lane2_corr;
2423 	kstat_named_t fc_lane2_uncorr;
2424 	kstat_named_t fc_lane3_corr;
2425 	kstat_named_t fc_lane3_uncorr;
2426 };
2427 
2428 static uint32_t
read_fec_pair(struct port_info * pi,uint32_t lo_reg,uint32_t high_reg)2429 read_fec_pair(struct port_info *pi, uint32_t lo_reg, uint32_t high_reg)
2430 {
2431 	struct adapter *sc = pi->adapter;
2432 	uint8_t port = pi->tx_chan;
2433 	uint32_t low, high, ret;
2434 
2435 	low = t4_read_reg32(sc, T5_PORT_REG(port, lo_reg));
2436 	high = t4_read_reg32(sc, T5_PORT_REG(port, high_reg));
2437 	ret = low & 0xffff;
2438 	ret |= (high & 0xffff) << 16;
2439 	return (ret);
2440 }
2441 
2442 static int
update_port_fec_kstats(kstat_t * ksp,int rw)2443 update_port_fec_kstats(kstat_t *ksp, int rw)
2444 {
2445 	struct cxgbe_port_fec_kstats *fec = ksp->ks_data;
2446 	struct port_info *pi = ksp->ks_private;
2447 
2448 	if (rw == KSTAT_WRITE) {
2449 		return (EACCES);
2450 	}
2451 
2452 	/*
2453 	 * First go ahead and gather RS related stats.
2454 	 */
2455 	fec->rs_corr.value.ui64 += read_fec_pair(pi, T6_RS_FEC_CCW_LO,
2456 	    T6_RS_FEC_CCW_HI);
2457 	fec->rs_uncorr.value.ui64 += read_fec_pair(pi, T6_RS_FEC_NCCW_LO,
2458 	    T6_RS_FEC_NCCW_HI);
2459 	fec->rs_sym0_corr.value.ui64 += read_fec_pair(pi, T6_RS_FEC_SYMERR0_LO,
2460 	    T6_RS_FEC_SYMERR0_HI);
2461 	fec->rs_sym1_corr.value.ui64 += read_fec_pair(pi, T6_RS_FEC_SYMERR1_LO,
2462 	    T6_RS_FEC_SYMERR1_HI);
2463 	fec->rs_sym2_corr.value.ui64 += read_fec_pair(pi, T6_RS_FEC_SYMERR2_LO,
2464 	    T6_RS_FEC_SYMERR2_HI);
2465 	fec->rs_sym3_corr.value.ui64 += read_fec_pair(pi, T6_RS_FEC_SYMERR3_LO,
2466 	    T6_RS_FEC_SYMERR3_HI);
2467 
2468 	/*
2469 	 * Now go through and try to grab Firecode/BASE-R stats.
2470 	 */
2471 	fec->fc_lane0_corr.value.ui64 += read_fec_pair(pi, T6_FC_FEC_L0_CERR_LO,
2472 	    T6_FC_FEC_L0_CERR_HI);
2473 	fec->fc_lane0_uncorr.value.ui64 += read_fec_pair(pi,
2474 	    T6_FC_FEC_L0_NCERR_LO, T6_FC_FEC_L0_NCERR_HI);
2475 	fec->fc_lane1_corr.value.ui64 += read_fec_pair(pi, T6_FC_FEC_L1_CERR_LO,
2476 	    T6_FC_FEC_L1_CERR_HI);
2477 	fec->fc_lane1_uncorr.value.ui64 += read_fec_pair(pi,
2478 	    T6_FC_FEC_L1_NCERR_LO, T6_FC_FEC_L1_NCERR_HI);
2479 	fec->fc_lane2_corr.value.ui64 += read_fec_pair(pi, T6_FC_FEC_L2_CERR_LO,
2480 	    T6_FC_FEC_L2_CERR_HI);
2481 	fec->fc_lane2_uncorr.value.ui64 += read_fec_pair(pi,
2482 	    T6_FC_FEC_L2_NCERR_LO, T6_FC_FEC_L2_NCERR_HI);
2483 	fec->fc_lane3_corr.value.ui64 += read_fec_pair(pi, T6_FC_FEC_L3_CERR_LO,
2484 	    T6_FC_FEC_L3_CERR_HI);
2485 	fec->fc_lane3_uncorr.value.ui64 += read_fec_pair(pi,
2486 	    T6_FC_FEC_L3_NCERR_LO, T6_FC_FEC_L3_NCERR_HI);
2487 
2488 	return (0);
2489 }
2490 
2491 static kstat_t *
setup_port_fec_kstats(struct port_info * pi)2492 setup_port_fec_kstats(struct port_info *pi)
2493 {
2494 	kstat_t *ksp;
2495 	struct cxgbe_port_fec_kstats *kstatp;
2496 
2497 	if (!is_t6(pi->adapter->params.chip)) {
2498 		return (NULL);
2499 	}
2500 
2501 	ksp = kstat_create(T4_PORT_NAME, ddi_get_instance(pi->dip), "fec",
2502 	    "net", KSTAT_TYPE_NAMED, sizeof (struct cxgbe_port_fec_kstats) /
2503 	    sizeof (kstat_named_t), 0);
2504 	if (ksp == NULL) {
2505 		cxgb_printf(pi->dip, CE_WARN, "failed to initialize fec "
2506 		    "kstats.");
2507 		return (NULL);
2508 	}
2509 
2510 	kstatp = ksp->ks_data;
2511 	KS_U64INIT(rs_corr);
2512 	KS_U64INIT(rs_uncorr);
2513 	KS_U64INIT(rs_sym0_corr);
2514 	KS_U64INIT(rs_sym1_corr);
2515 	KS_U64INIT(rs_sym2_corr);
2516 	KS_U64INIT(rs_sym3_corr);
2517 	KS_U64INIT(fc_lane0_corr);
2518 	KS_U64INIT(fc_lane0_uncorr);
2519 	KS_U64INIT(fc_lane1_corr);
2520 	KS_U64INIT(fc_lane1_uncorr);
2521 	KS_U64INIT(fc_lane2_corr);
2522 	KS_U64INIT(fc_lane2_uncorr);
2523 	KS_U64INIT(fc_lane3_corr);
2524 	KS_U64INIT(fc_lane3_uncorr);
2525 
2526 	ksp->ks_update = update_port_fec_kstats;
2527 	ksp->ks_private = pi;
2528 	kstat_install(ksp);
2529 
2530 	return (ksp);
2531 }
2532 
2533 int
adapter_full_init(struct adapter * sc)2534 adapter_full_init(struct adapter *sc)
2535 {
2536 	int i, rc = 0;
2537 
2538 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2539 
2540 	rc = t4_setup_adapter_queues(sc);
2541 	if (rc != 0)
2542 		goto done;
2543 
2544 	if (sc->intr_cap & DDI_INTR_FLAG_BLOCK)
2545 		(void) ddi_intr_block_enable(sc->intr_handle, sc->intr_count);
2546 	else {
2547 		for (i = 0; i < sc->intr_count; i++)
2548 			(void) ddi_intr_enable(sc->intr_handle[i]);
2549 	}
2550 	t4_intr_enable(sc);
2551 	sc->flags |= FULL_INIT_DONE;
2552 
2553 done:
2554 	if (rc != 0)
2555 		(void) adapter_full_uninit(sc);
2556 
2557 	return (rc);
2558 }
2559 
2560 int
adapter_full_uninit(struct adapter * sc)2561 adapter_full_uninit(struct adapter *sc)
2562 {
2563 	int i, rc = 0;
2564 
2565 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2566 
2567 	if (sc->intr_cap & DDI_INTR_FLAG_BLOCK)
2568 		(void) ddi_intr_block_disable(sc->intr_handle, sc->intr_count);
2569 	else {
2570 		for (i = 0; i < sc->intr_count; i++)
2571 			(void) ddi_intr_disable(sc->intr_handle[i]);
2572 	}
2573 
2574 	rc = t4_teardown_adapter_queues(sc);
2575 	if (rc != 0)
2576 		return (rc);
2577 
2578 	sc->flags &= ~FULL_INIT_DONE;
2579 
2580 	return (0);
2581 }
2582 
2583 int
port_full_init(struct port_info * pi)2584 port_full_init(struct port_info *pi)
2585 {
2586 	struct adapter *sc = pi->adapter;
2587 	uint16_t *rss;
2588 	struct sge_rxq *rxq;
2589 	int rc, i;
2590 
2591 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2592 	ASSERT((pi->flags & PORT_INIT_DONE) == 0);
2593 
2594 	/*
2595 	 * Allocate tx/rx/fl queues for this port.
2596 	 */
2597 	rc = t4_setup_port_queues(pi);
2598 	if (rc != 0)
2599 		goto done;	/* error message displayed already */
2600 
2601 	/*
2602 	 * Setup RSS for this port.
2603 	 */
2604 	rss = kmem_zalloc(pi->nrxq * sizeof (*rss), KM_SLEEP);
2605 	for_each_rxq(pi, i, rxq) {
2606 		rss[i] = rxq->iq.abs_id;
2607 	}
2608 	rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0,
2609 	    pi->rss_size, rss, pi->nrxq);
2610 	kmem_free(rss, pi->nrxq * sizeof (*rss));
2611 	if (rc != 0) {
2612 		cxgb_printf(pi->dip, CE_WARN, "rss_config failed: %d", rc);
2613 		goto done;
2614 	}
2615 
2616 	/*
2617 	 * Initialize our per-port FEC kstats.
2618 	 */
2619 	pi->ksp_fec = setup_port_fec_kstats(pi);
2620 
2621 	pi->flags |= PORT_INIT_DONE;
2622 done:
2623 	if (rc != 0)
2624 		(void) port_full_uninit(pi);
2625 
2626 	return (rc);
2627 }
2628 
2629 /*
2630  * Idempotent.
2631  */
2632 int
port_full_uninit(struct port_info * pi)2633 port_full_uninit(struct port_info *pi)
2634 {
2635 
2636 	ASSERT(pi->flags & PORT_INIT_DONE);
2637 
2638 	if (pi->ksp_fec != NULL) {
2639 		kstat_delete(pi->ksp_fec);
2640 		pi->ksp_fec = NULL;
2641 	}
2642 	(void) t4_teardown_port_queues(pi);
2643 	pi->flags &= ~PORT_INIT_DONE;
2644 
2645 	return (0);
2646 }
2647 
2648 void
enable_port_queues(struct port_info * pi)2649 enable_port_queues(struct port_info *pi)
2650 {
2651 	struct adapter *sc = pi->adapter;
2652 	int i;
2653 	struct sge_iq *iq;
2654 	struct sge_rxq *rxq;
2655 
2656 	ASSERT(pi->flags & PORT_INIT_DONE);
2657 
2658 	/*
2659 	 * TODO: whatever was queued up after we set iq->state to IQS_DISABLED
2660 	 * back in disable_port_queues will be processed now, after an unbounded
2661 	 * delay.  This can't be good.
2662 	 */
2663 
2664 	for_each_rxq(pi, i, rxq) {
2665 		iq = &rxq->iq;
2666 		if (atomic_cas_uint(&iq->state, IQS_DISABLED, IQS_IDLE) !=
2667 		    IQS_DISABLED)
2668 			panic("%s: iq %p wasn't disabled", __func__,
2669 			    (void *) iq);
2670 		t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
2671 		    V_SEINTARM(iq->intr_params) | V_INGRESSQID(iq->cntxt_id));
2672 	}
2673 }
2674 
2675 void
disable_port_queues(struct port_info * pi)2676 disable_port_queues(struct port_info *pi)
2677 {
2678 	int i;
2679 	struct adapter *sc = pi->adapter;
2680 	struct sge_rxq *rxq;
2681 
2682 	ASSERT(pi->flags & PORT_INIT_DONE);
2683 
2684 	/*
2685 	 * TODO: need proper implementation for all tx queues (ctrl, eth, ofld).
2686 	 */
2687 
2688 	for_each_rxq(pi, i, rxq) {
2689 		while (atomic_cas_uint(&rxq->iq.state, IQS_IDLE,
2690 		    IQS_DISABLED) != IQS_IDLE)
2691 			msleep(1);
2692 	}
2693 
2694 	mutex_enter(&sc->sfl_lock);
2695 	for_each_rxq(pi, i, rxq)
2696 	    rxq->fl.flags |= FL_DOOMED;
2697 	mutex_exit(&sc->sfl_lock);
2698 	/* TODO: need to wait for all fl's to be removed from sc->sfl */
2699 }
2700 
2701 void
t4_fatal_err(struct adapter * sc)2702 t4_fatal_err(struct adapter *sc)
2703 {
2704 	t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
2705 	t4_intr_disable(sc);
2706 	cxgb_printf(sc->dip, CE_WARN,
2707 	    "encountered fatal error, adapter stopped.");
2708 }
2709 
2710 int
t4_os_find_pci_capability(struct adapter * sc,int cap)2711 t4_os_find_pci_capability(struct adapter *sc, int cap)
2712 {
2713 	uint16_t stat;
2714 	uint8_t cap_ptr, cap_id;
2715 
2716 	t4_os_pci_read_cfg2(sc, PCI_CONF_STAT, &stat);
2717 	if ((stat & PCI_STAT_CAP) == 0)
2718 		return (0); /* does not implement capabilities */
2719 
2720 	t4_os_pci_read_cfg1(sc, PCI_CONF_CAP_PTR, &cap_ptr);
2721 	while (cap_ptr) {
2722 		t4_os_pci_read_cfg1(sc, cap_ptr + PCI_CAP_ID, &cap_id);
2723 		if (cap_id == cap)
2724 			return (cap_ptr); /* found */
2725 		t4_os_pci_read_cfg1(sc, cap_ptr + PCI_CAP_NEXT_PTR, &cap_ptr);
2726 	}
2727 
2728 	return (0); /* not found */
2729 }
2730 
2731 void
t4_os_portmod_changed(struct adapter * sc,int idx)2732 t4_os_portmod_changed(struct adapter *sc, int idx)
2733 {
2734 	static const char *mod_str[] = {
2735 		NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
2736 	};
2737 	struct port_info *pi = sc->port[idx];
2738 
2739 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
2740 		cxgb_printf(pi->dip, CE_NOTE, "transceiver unplugged.");
2741 	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
2742 		cxgb_printf(pi->dip, CE_NOTE,
2743 		    "unknown transceiver inserted.\n");
2744 	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
2745 		cxgb_printf(pi->dip, CE_NOTE,
2746 		    "unsupported transceiver inserted.\n");
2747 	else if (pi->mod_type > 0 && pi->mod_type < ARRAY_SIZE(mod_str))
2748 		cxgb_printf(pi->dip, CE_NOTE, "%s transceiver inserted.\n",
2749 		    mod_str[pi->mod_type]);
2750 	else
2751 		cxgb_printf(pi->dip, CE_NOTE, "transceiver (type %d) inserted.",
2752 		    pi->mod_type);
2753 
2754 	if ((isset(&sc->open_device_map, pi->port_id) != 0) &&
2755 	    pi->link_cfg.new_module)
2756 		pi->link_cfg.redo_l1cfg = true;
2757 }
2758 
2759 static int
t4_sensor_read(struct adapter * sc,uint32_t diag,uint32_t * valp)2760 t4_sensor_read(struct adapter *sc, uint32_t diag, uint32_t *valp)
2761 {
2762 	int rc;
2763 	struct port_info *pi = sc->port[0];
2764 	uint32_t param, val;
2765 
2766 	rc = begin_synchronized_op(pi, 1, 1);
2767 	if (rc != 0) {
2768 		return (rc);
2769 	}
2770 	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
2771 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
2772 	    V_FW_PARAMS_PARAM_Y(diag);
2773 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2774 	end_synchronized_op(pi, 1);
2775 
2776 	if (rc != 0) {
2777 		return (rc);
2778 	}
2779 
2780 	if (val == 0) {
2781 		return (EIO);
2782 	}
2783 
2784 	*valp = val;
2785 	return (0);
2786 }
2787 
2788 static int
t4_temperature_read(void * arg,sensor_ioctl_scalar_t * scalar)2789 t4_temperature_read(void *arg, sensor_ioctl_scalar_t *scalar)
2790 {
2791 	int ret;
2792 	struct adapter *sc = arg;
2793 	uint32_t val;
2794 
2795 	ret = t4_sensor_read(sc, FW_PARAM_DEV_DIAG_TMP, &val);
2796 	if (ret != 0) {
2797 		return (ret);
2798 	}
2799 
2800 	/*
2801 	 * The device measures temperature in units of 1 degree Celsius. We
2802 	 * don't know its precision.
2803 	 */
2804 	scalar->sis_unit = SENSOR_UNIT_CELSIUS;
2805 	scalar->sis_gran = 1;
2806 	scalar->sis_prec = 0;
2807 	scalar->sis_value = val;
2808 
2809 	return (0);
2810 }
2811 
2812 static int
t4_voltage_read(void * arg,sensor_ioctl_scalar_t * scalar)2813 t4_voltage_read(void *arg, sensor_ioctl_scalar_t *scalar)
2814 {
2815 	int ret;
2816 	struct adapter *sc = arg;
2817 	uint32_t val;
2818 
2819 	ret = t4_sensor_read(sc, FW_PARAM_DEV_DIAG_VDD, &val);
2820 	if (ret != 0) {
2821 		return (ret);
2822 	}
2823 
2824 	scalar->sis_unit = SENSOR_UNIT_VOLTS;
2825 	scalar->sis_gran = 1000;
2826 	scalar->sis_prec = 0;
2827 	scalar->sis_value = val;
2828 
2829 	return (0);
2830 }
2831 
2832 /*
2833  * While the hardware supports the ability to read and write the flash image,
2834  * this is not currently wired up.
2835  */
2836 static int
t4_ufm_getcaps(ddi_ufm_handle_t * ufmh,void * arg,ddi_ufm_cap_t * caps)2837 t4_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg, ddi_ufm_cap_t *caps)
2838 {
2839 	*caps = DDI_UFM_CAP_REPORT;
2840 	return (0);
2841 }
2842 
2843 static int
t4_ufm_fill_image(ddi_ufm_handle_t * ufmh,void * arg,uint_t imgno,ddi_ufm_image_t * imgp)2844 t4_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
2845     ddi_ufm_image_t *imgp)
2846 {
2847 	if (imgno != 0) {
2848 		return (EINVAL);
2849 	}
2850 
2851 	ddi_ufm_image_set_desc(imgp, "Firmware");
2852 	ddi_ufm_image_set_nslots(imgp, 1);
2853 
2854 	return (0);
2855 }
2856 
2857 static int
t4_ufm_fill_slot_version(nvlist_t * nvl,const char * key,uint32_t vers)2858 t4_ufm_fill_slot_version(nvlist_t *nvl, const char *key, uint32_t vers)
2859 {
2860 	char buf[128];
2861 
2862 	if (vers == 0) {
2863 		return (0);
2864 	}
2865 
2866 	if (snprintf(buf, sizeof (buf), "%u.%u.%u.%u",
2867 	    G_FW_HDR_FW_VER_MAJOR(vers), G_FW_HDR_FW_VER_MINOR(vers),
2868 	    G_FW_HDR_FW_VER_MICRO(vers), G_FW_HDR_FW_VER_BUILD(vers)) >=
2869 	    sizeof (buf)) {
2870 		return (EOVERFLOW);
2871 	}
2872 
2873 	return (nvlist_add_string(nvl, key, buf));
2874 }
2875 
2876 static int
t4_ufm_fill_slot(ddi_ufm_handle_t * ufmh,void * arg,uint_t imgno,uint_t slotno,ddi_ufm_slot_t * slotp)2877 t4_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno, uint_t slotno,
2878     ddi_ufm_slot_t *slotp)
2879 {
2880 	int ret;
2881 	struct adapter *sc = arg;
2882 	nvlist_t *misc = NULL;
2883 	char buf[128];
2884 
2885 	if (imgno != 0 || slotno != 0) {
2886 		return (EINVAL);
2887 	}
2888 
2889 	if (snprintf(buf, sizeof (buf), "%u.%u.%u.%u",
2890 	    G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2891 	    G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2892 	    G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2893 	    G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)) >= sizeof (buf)) {
2894 		return (EOVERFLOW);
2895 	}
2896 
2897 	ddi_ufm_slot_set_version(slotp, buf);
2898 
2899 	(void) nvlist_alloc(&misc, NV_UNIQUE_NAME, KM_SLEEP);
2900 	if ((ret = t4_ufm_fill_slot_version(misc, "TP Microcode",
2901 	    sc->params.tp_vers)) != 0) {
2902 		goto err;
2903 	}
2904 
2905 	if ((ret = t4_ufm_fill_slot_version(misc, "Bootstrap",
2906 	    sc->params.bs_vers)) != 0) {
2907 		goto err;
2908 	}
2909 
2910 	if ((ret = t4_ufm_fill_slot_version(misc, "Expansion ROM",
2911 	    sc->params.er_vers)) != 0) {
2912 		goto err;
2913 	}
2914 
2915 	if ((ret = nvlist_add_uint32(misc, "Serial Configuration",
2916 	    sc->params.scfg_vers)) != 0) {
2917 		goto err;
2918 	}
2919 
2920 	if ((ret = nvlist_add_uint32(misc, "VPD Version",
2921 	    sc->params.vpd_vers)) != 0) {
2922 		goto err;
2923 	}
2924 
2925 	ddi_ufm_slot_set_misc(slotp, misc);
2926 	ddi_ufm_slot_set_attrs(slotp, DDI_UFM_ATTR_ACTIVE |
2927 	    DDI_UFM_ATTR_WRITEABLE | DDI_UFM_ATTR_READABLE);
2928 	return (0);
2929 
2930 err:
2931 	nvlist_free(misc);
2932 	return (ret);
2933 
2934 }
2935 
2936 
2937 int
t4_cxgbe_attach(struct port_info * pi,dev_info_t * dip)2938 t4_cxgbe_attach(struct port_info *pi, dev_info_t *dip)
2939 {
2940 	ASSERT(pi != NULL);
2941 
2942 	mac_register_t *mac = mac_alloc(MAC_VERSION);
2943 	if (mac == NULL) {
2944 		return (DDI_FAILURE);
2945 	}
2946 
2947 	mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2948 	mac->m_driver = pi;
2949 	mac->m_dip = dip;
2950 	mac->m_src_addr = pi->hw_addr;
2951 	mac->m_callbacks = pi->mc;
2952 	mac->m_max_sdu = pi->mtu;
2953 	mac->m_priv_props = pi->props;
2954 	mac->m_margin = VLAN_TAGSZ;
2955 
2956 	if (!mac->m_callbacks->mc_unicst) {
2957 		/* Multiple rings enabled */
2958 		mac->m_v12n = MAC_VIRT_LEVEL1;
2959 	}
2960 
2961 	mac_handle_t mh = NULL;
2962 	const int rc = mac_register(mac, &mh);
2963 	mac_free(mac);
2964 	if (rc != 0) {
2965 		return (DDI_FAILURE);
2966 	}
2967 
2968 	pi->mh = mh;
2969 
2970 	/*
2971 	 * Link state from this point onwards to the time interface is plumbed,
2972 	 * should be set to LINK_STATE_UNKNOWN. The mac should be updated about
2973 	 * the link state as either LINK_STATE_UP or LINK_STATE_DOWN based on
2974 	 * the actual link state detection after interface plumb.
2975 	 */
2976 	mac_link_update(mh, LINK_STATE_UNKNOWN);
2977 
2978 	return (DDI_SUCCESS);
2979 }
2980 
2981 int
t4_cxgbe_detach(struct port_info * pi)2982 t4_cxgbe_detach(struct port_info *pi)
2983 {
2984 	ASSERT(pi != NULL);
2985 	ASSERT(pi->mh != NULL);
2986 
2987 	if (mac_unregister(pi->mh) == 0) {
2988 		pi->mh = NULL;
2989 		return (DDI_SUCCESS);
2990 	}
2991 
2992 	return (DDI_FAILURE);
2993 }
2994