xref: /titanic_41/usr/src/uts/common/io/chxge/ch.c (revision 2449e17f82f6097fd2c665b64723e31ceecbeca6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * This file is part of the Chelsio T1 Ethernet driver.
29  *
30  * Copyright (C) 2003-2005 Chelsio Communications.  All rights reserved.
31  */
32 
33 /*
34  * Solaris Multithreaded STREAMS DLPI Chelsio PCI Ethernet Driver
35  */
36 
37 #pragma ident	"%Z%%M%	%I%	%E% SMI"
38 
39 /* #define CH_DEBUG 1 */
40 #ifdef CH_DEBUG
41 #define	DEBUG_ENTER(a) debug_enter(a)
42 #define	PRINT(a) printf a
43 #else
44 #define	DEBUG_ENTER(a)
45 #define	PRINT(a)
46 #endif
47 
48 #include <sys/types.h>
49 #include <sys/conf.h>
50 #include <sys/debug.h>
51 #include <sys/stropts.h>
52 #include <sys/stream.h>
53 #include <sys/strlog.h>
54 #include <sys/kmem.h>
55 #include <sys/stat.h>
56 #include <sys/kstat.h>
57 #include <sys/modctl.h>
58 #include <sys/errno.h>
59 #include <sys/cmn_err.h>
60 #include <sys/ddi.h>
61 #include <sys/sunddi.h>
62 #include <sys/dlpi.h>
63 #include <sys/ethernet.h>
64 #include <sys/strsun.h>
65 #include <sys/strsubr.h>
66 #include <inet/common.h>
67 #include <inet/nd.h>
68 #include <inet/ip.h>
69 #include <inet/tcp.h>
70 #include <sys/pattr.h>
71 #include <sys/gld.h>
72 #include "ostypes.h"
73 #include "common.h"
74 #include "oschtoe.h"
75 #include "sge.h"
76 #include "ch.h"			/* Chelsio Driver specific parameters */
77 #include "version.h"
78 
79 /*
80  * Function prototypes.
81  */
82 static int ch_attach(dev_info_t *, ddi_attach_cmd_t);
83 static int ch_detach(dev_info_t *, ddi_detach_cmd_t);
84 static void ch_free_dma_handles(ch_t *chp);
85 static void ch_set_name(ch_t *chp, int unit);
86 static void ch_free_name(ch_t *chp);
87 static void ch_get_prop(ch_t *chp);
88 
89 #if defined(__sparc)
90 static void ch_free_dvma_handles(ch_t *chp);
91 #endif
92 
93 /* GLD interfaces */
94 static int ch_reset(gld_mac_info_t *);
95 static int ch_start(gld_mac_info_t *);
96 static int ch_stop(gld_mac_info_t *);
97 static int ch_set_mac_address(gld_mac_info_t *, uint8_t *);
98 static int ch_set_multicast(gld_mac_info_t *, uint8_t *, int);
99 static int ch_ioctl(gld_mac_info_t *, queue_t *, mblk_t *);
100 static int ch_set_promiscuous(gld_mac_info_t *, int);
101 static int ch_get_stats(gld_mac_info_t *, struct gld_stats *);
102 static int ch_send(gld_mac_info_t *, mblk_t *);
103 static uint_t ch_intr(gld_mac_info_t *);
104 
105 /*
106  * Data access requirements.
107  */
108 static struct ddi_device_acc_attr le_attr = {
109 	DDI_DEVICE_ATTR_V0,
110 	DDI_STRUCTURE_LE_ACC,
111 	DDI_STRICTORDER_ACC
112 };
113 
114 /*
115  * No swap mapping device attributes
116  */
117 static struct ddi_device_acc_attr null_attr = {
118 	DDI_DEVICE_ATTR_V0,
119 	DDI_NEVERSWAP_ACC,
120 	DDI_STRICTORDER_ACC
121 };
122 
123 /*
124  * STREAMS driver identification struture module_info(9s)
125  *
126  * driver limit values
127  */
128 
129 static	struct module_info ch_minfo = {
130 	CHIDNUM,	/* mi_idnum */
131 	CHNAME,		/* mi_idname */
132 	CHMINPSZ,	/* mi_minpsz */
133 	CHMAXPSZ,	/* mi_maxpsz */
134 	CHHIWAT,	/* mi_hiwat */
135 	CHLOWAT		/* mi_lowat */
136 };
137 
138 /*
139  * STREAMS queue processiong procedures qinit(9s)
140  *
141  * read queue procedures
142  */
143 
144 static struct qinit ch_rinit = {
145 	(int (*)()) NULL, 	/* qi_putp */
146 	gld_rsrv,		/* qi_srvp */
147 	gld_open,		/* qi_qopen */
148 	gld_close,		/* qi_qclose */
149 	(int (*)()) NULL, 	/* qi_qadmin */
150 	&ch_minfo,		/* qi_minfo */
151 	NULL			/* qi_mstat */
152 };
153 
154 /*
155  * STREAMS queue processiong procedures qinit(9s)
156  *
157  * write queue procedures
158  */
159 
160 static struct qinit ch_winit = {
161 	gld_wput,		/* qi_putp */
162 	gld_wsrv,		/* qi_srvp */
163 	(int (*)()) NULL, 	/* qi_qopen */
164 	(int (*)()) NULL, 	/* qi_qclose */
165 	(int (*)()) NULL, 	/* qi_qadmin */
166 	&ch_minfo,		/* qi_minfo */
167 	NULL			/* qi_mstat */
168 };
169 
170 /*
171  * STREAMS entity declaration structure - streamtab(9s)
172  */
173 static struct streamtab	chinfo = {
174 	&ch_rinit,	/* read queue information */
175 	&ch_winit,	/* write queue information */
176 	NULL,		/* st_muxrinit */
177 	NULL		/* st_muxwrinit */
178 };
179 
180 /*
181  * Device driver ops vector - cb_ops(9s)
182  *
183  * charater/block entry points structure.
184  * chinfo identifies driver as a STREAMS driver.
185  */
186 
187 static struct cb_ops cb_ch_ops = {
188 	nulldev,	/* cb_open */
189 	nulldev,	/* cb_close */
190 	nodev,		/* cb_strategy */
191 	nodev,		/* cb_print */
192 	nodev,		/* cb_dump */
193 	nodev,		/* cb_read */
194 	nodev,		/* cb_write */
195 	nodev,		/* cb_ioctl */
196 	nodev,		/* cb_devmap */
197 	nodev,		/* cb_mmap */
198 	nodev,		/* cb_segmap */
199 	nochpoll,	/* cb_chpoll */
200 	ddi_prop_op,	/* report driver property information - prop_op(9e) */
201 	&chinfo,	/* cb_stream */
202 #if defined(__sparc)
203 	D_MP | D_64BIT,
204 #else
205 	D_MP,		/* cb_flag (supports multi-threading) */
206 #endif
207 	CB_REV,		/* cb_rev */
208 	nodev,		/* cb_aread */
209 	nodev		/* cb_awrite */
210 };
211 
212 /*
213  * dev_ops(9S) structure
214  *
215  * Device Operations table, for autoconfiguration
216  */
217 
218 static	struct dev_ops ch_ops = {
219 	DEVO_REV,	/* Driver build version */
220 	0,		/* Initial driver reference count */
221 	gld_getinfo,	/* funcp: get driver information - getinfo(9e) */
222 	nulldev,	/* funcp: entry point obsolute - identify(9e) */
223 	nulldev,	/* funp: probe for device - probe(9e) */
224 	ch_attach,	/* funp: attach driver to dev_info - attach(9e) */
225 	ch_detach,	/* funp: detach driver to unload - detach(9e) */
226 	nodev,		/* funp: reset device (not supported) - dev_ops(9s) */
227 	&cb_ch_ops,	/* ptr to cb_ops structure */
228 	NULL,		/* ptr to nexus bus operations structure (leaf) */
229 	NULL		/* funp: change device power level - power(9e) */
230 };
231 
232 /*
233  * modldrv(9s) structure
234  *
235  * Definition for module specific device driver linkage structures (modctl.h)
236  */
237 
238 static struct modldrv modldrv = {
239 	&mod_driverops,		/* driver module */
240 	VERSION,
241 	&ch_ops,		/* driver ops */
242 };
243 
244 /*
245  * modlinkage(9s) structure
246  *
247  * module linkage base structure (modctl.h)
248  */
249 
250 static struct modlinkage modlinkage = {
251 	MODREV_1,		/* revision # of system */
252 	&modldrv,		/* NULL terminated list of linkage strucures */
253 	NULL
254 };
255 
256 /* ===================== start of STREAMS driver code ================== */
257 
258 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
259 /*
260  * global pointer to toe per-driver control structure.
261  */
262 #define	MAX_CARDS	4
263 ch_t *gchp[MAX_CARDS];
264 #endif
265 
266 kmutex_t in_use_l;
267 uint32_t buffers_in_use[SZ_INUSE];
268 uint32_t in_use_index;
269 
270 /*
271  * Ethernet broadcast address definition.
272  */
273 static struct ether_addr etherbroadcastaddr = {
274 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
275 };
276 
277 /*
278  * Module initialization functions.
279  *
280  *      Routine         Called by
281  *      _init(9E)       modload(9F)
282  *      _info(9E)       modinfo(9F)
283  *      _fini(9E)       modunload(9F)
284  */
285 
286 /*
287  * _init(9E):
288  *
289  * Initial, one-time, resource allocation and data initialization.
290  */
291 
292 int
293 _init(void)
294 {
295 	int status;
296 
297 	status = mod_install(&modlinkage);
298 
299 	mutex_init(&in_use_l, NULL, MUTEX_DRIVER, NULL);
300 
301 	return (status);
302 }
303 
304 /*
305  * _fini(9E): It is here that any device information that was allocated
306  * during the _init(9E) routine should be released and the module removed
307  * from the system.  In the case of per-instance information, that information
308  * should be released in the _detach(9E) routine.
309  */
310 
311 int
312 _fini(void)
313 {
314 	int status;
315 	int i;
316 	uint32_t t = 0;
317 
318 	for (i = 0; i < SZ_INUSE; i++)
319 		t += buffers_in_use[i];
320 
321 	if (t != NULL)
322 		return (DDI_FAILURE);
323 
324 	status = mod_remove(&modlinkage);
325 
326 	if (status == DDI_SUCCESS)
327 		mutex_destroy(&in_use_l);
328 
329 	return (status);
330 }
331 
332 int
333 _info(struct modinfo *modinfop)
334 {
335 	int status;
336 
337 
338 	status = mod_info(&modlinkage, modinfop);
339 
340 	return (status);
341 }
342 
343 /*
344  * Attach(9E) - This is called on the open to the device.  It creates
345  * an instance of the driver.  In this routine we create the minor
346  * device node.  The routine also initializes all per-unit
347  * mutex's and conditional variables.
348  *
349  * If we were resuming a suspended instance of a device due to power
350  * management, then that would be handled here as well.  For more on
351  * that subject see the man page for pm(9E)
352  *
353  * Interface exists: make available by filling in network interface
354  * record.  System will initialize the interface when it is ready
355  * to accept packets.
356  */
357 int chdebug = 0;
358 int ch_abort_debug = 0;
359 
360 static int
361 ch_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
362 {
363 	ch_t *chp;
364 	int rv;
365 	int unit;
366 #ifdef CH_DEBUG
367 	int Version;
368 	int VendorID;
369 	int DeviceID;
370 	int SubDeviceID;
371 	int Command;
372 #endif
373 	gld_mac_info_t *macinfo;		/* GLD stuff follows */
374 	char *driver;
375 
376 	if (ch_abort_debug)
377 		debug_enter("ch_attach");
378 
379 	if (chdebug)
380 		return (DDI_FAILURE);
381 
382 
383 	if (cmd == DDI_ATTACH) {
384 
385 		unit = ddi_get_instance(dip);
386 
387 		driver = (char *)ddi_driver_name(dip);
388 
389 		PRINT(("driver %s unit: %d\n", driver, unit));
390 
391 		macinfo = gld_mac_alloc(dip);
392 		if (macinfo == NULL) {
393 			PRINT(("macinfo allocation failed\n"));
394 			DEBUG_ENTER("ch_attach");
395 			return (DDI_FAILURE);
396 		}
397 
398 		chp = (ch_t *)kmem_zalloc(sizeof (ch_t), KM_SLEEP);
399 
400 		if (chp == NULL) {
401 			PRINT(("zalloc of chp failed\n"));
402 			DEBUG_ENTER("ch_attach");
403 
404 			gld_mac_free(macinfo);
405 
406 			return (DDI_FAILURE);
407 		}
408 
409 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
410 		/* Solaris TOE support */
411 		gchp[unit] = chp;
412 #endif
413 
414 		PRINT(("attach macinfo: %p chp: %p\n", macinfo, chp));
415 
416 		chp->ch_dip  = dip;
417 		chp->ch_macp = macinfo;
418 		chp->ch_unit = unit;
419 		ch_set_name(chp, unit);
420 
421 		/*
422 		 * map in PCI register spaces
423 		 *
424 		 * PCI register set 0 - PCI configuration space
425 		 * PCI register set 1 - T101 card register space #1
426 		 */
427 
428 		/* map in T101 PCI configuration space */
429 		rv = pci_config_setup(
430 			dip,		/* ptr to dev's dev_info struct */
431 			&chp->ch_hpci);	/* ptr to data access handle */
432 
433 		if (rv != DDI_SUCCESS) {
434 			PRINT(("PCI config setup failed\n"));
435 			DEBUG_ENTER("ch_attach");
436 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
437 			gchp[unit] = NULL;
438 #endif
439 			cmn_err(CE_WARN, "%s: ddi_config_setup PCI error %d\n",
440 				chp->ch_name, rv);
441 
442 			ch_free_name(chp);
443 			kmem_free(chp, sizeof (ch_t));
444 			gld_mac_free(macinfo);
445 
446 			return (DDI_FAILURE);
447 		}
448 
449 		ch_get_prop(chp);
450 
451 		macinfo->gldm_devinfo = dip;
452 		macinfo->gldm_private = (caddr_t)chp;
453 		macinfo->gldm_reset = ch_reset;
454 		macinfo->gldm_start = ch_start;
455 		macinfo->gldm_stop = ch_stop;
456 		macinfo->gldm_set_mac_addr = ch_set_mac_address;
457 		macinfo->gldm_send = ch_send;
458 		macinfo->gldm_set_promiscuous = ch_set_promiscuous;
459 		macinfo->gldm_get_stats = ch_get_stats;
460 		macinfo->gldm_ioctl = ch_ioctl;
461 		macinfo->gldm_set_multicast = ch_set_multicast;
462 		macinfo->gldm_intr = ch_intr;
463 		macinfo->gldm_mctl = NULL;
464 
465 		macinfo->gldm_ident = driver;
466 		macinfo->gldm_type = DL_ETHER;
467 		macinfo->gldm_minpkt = 0;
468 		macinfo->gldm_maxpkt = chp->ch_mtu;
469 		macinfo->gldm_addrlen = ETHERADDRL;
470 		macinfo->gldm_saplen = -2;
471 		macinfo->gldm_ppa = unit;
472 		macinfo->gldm_broadcast_addr =
473 				etherbroadcastaddr.ether_addr_octet;
474 
475 
476 		/*
477 		 * do a power reset of card
478 		 *
479 		 * 1. set PwrState to D3hot (3)
480 		 * 2. clear PwrState flags
481 		 */
482 		pci_config_put32(chp->ch_hpci, 0x44, 3);
483 		pci_config_put32(chp->ch_hpci, 0x44, 0);
484 
485 		/* delay .5 sec */
486 		DELAY(500000);
487 
488 #ifdef CH_DEBUG
489 		VendorID    = pci_config_get16(chp->ch_hpci, 0);
490 		DeviceID    = pci_config_get16(chp->ch_hpci, 2);
491 		SubDeviceID = pci_config_get16(chp->ch_hpci, 0x2e);
492 		Command = pci_config_get16(chp->ch_hpci, 4);
493 
494 		PRINT(("IDs: %x,%x,%x\n", VendorID, DeviceID, SubDeviceID));
495 		PRINT(("Command: %x\n", Command));
496 #endif
497 		/* map in T101 register space (BAR0) */
498 		rv = ddi_regs_map_setup(
499 			dip,		/* ptr to dev's dev_info struct */
500 			BAR0,		/* register address space */
501 			&chp->ch_bar0,	/* address of offset */
502 			0,		/* offset into register address space */
503 			0,		/* length mapped (everything) */
504 			&le_attr,	/* ptr to device attr structure */
505 			&chp->ch_hbar0);	/* ptr to data access handle */
506 
507 		if (rv != DDI_SUCCESS) {
508 			PRINT(("map registers failed\n"));
509 			DEBUG_ENTER("ch_attach");
510 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
511 			gchp[unit] = NULL;
512 #endif
513 			cmn_err(CE_WARN,
514 				"%s: ddi_regs_map_setup BAR0 error %d\n",
515 				chp->ch_name, rv);
516 
517 			pci_config_teardown(&chp->ch_hpci);
518 			ch_free_name(chp);
519 			kmem_free(chp, sizeof (ch_t));
520 			gld_mac_free(macinfo);
521 
522 			return (DDI_FAILURE);
523 		}
524 
525 #ifdef CH_DEBUG
526 		Version  = ddi_get32(chp->ch_hbar0,
527 			(uint32_t *)(chp->ch_bar0+0x6c));
528 #endif
529 
530 		(void) ddi_dev_regsize(dip, 1, &chp->ch_bar0sz);
531 
532 		PRINT(("PCI BAR0 space addr: %p\n", chp->ch_bar0));
533 		PRINT(("PCI BAR0 space size: %x\n", chp->ch_bar0sz));
534 		PRINT(("PE Version: %x\n", Version));
535 
536 		/*
537 		 * Add interrupt to system.
538 		 */
539 		rv = ddi_get_iblock_cookie(
540 			dip,		   /* ptr to dev's dev_info struct */
541 			0,		   /* interrupt # (0) */
542 			&chp->ch_icookp); /* ptr to interrupt block cookie */
543 
544 		if (rv != DDI_SUCCESS) {
545 			PRINT(("iblock cookie failed\n"));
546 			DEBUG_ENTER("ch_attach");
547 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
548 			gchp[unit] = NULL;
549 #endif
550 			cmn_err(CE_WARN,
551 				"%s: ddi_get_iblock_cookie error %d\n",
552 				chp->ch_name, rv);
553 
554 			ddi_regs_map_free(&chp->ch_hbar0);
555 			pci_config_teardown(&chp->ch_hpci);
556 			ch_free_name(chp);
557 			kmem_free(chp, sizeof (ch_t));
558 			gld_mac_free(macinfo);
559 
560 			return (DDI_FAILURE);
561 		}
562 
563 		/*
564 		 * add interrupt handler before card setup.
565 		 */
566 		rv = ddi_add_intr(
567 			dip,		/* ptr to dev's dev_info struct */
568 			0,		/* interrupt # (0) */
569 			0,		/* iblock cookie ptr (NULL) */
570 			0,		/* idevice cookie ptr (NULL) */
571 			gld_intr,	/* function ptr to interrupt handler */
572 			(caddr_t)macinfo);	/* handler argument */
573 
574 		if (rv != DDI_SUCCESS) {
575 			PRINT(("add_intr failed\n"));
576 			DEBUG_ENTER("ch_attach");
577 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
578 			gchp[unit] = NULL;
579 #endif
580 			cmn_err(CE_WARN, "%s: ddi_add_intr error %d\n",
581 				chp->ch_name, rv);
582 
583 			ddi_regs_map_free(&chp->ch_hbar0);
584 			pci_config_teardown(&chp->ch_hpci);
585 			ch_free_name(chp);
586 			kmem_free(chp, sizeof (ch_t));
587 			gld_mac_free(macinfo);
588 
589 			return (DDI_FAILURE);
590 		}
591 
592 		/* initalize all the remaining per-card locks */
593 		mutex_init(&chp->ch_lock, NULL, MUTEX_DRIVER,
594 					(void *)chp->ch_icookp);
595 		mutex_init(&chp->ch_intr, NULL, MUTEX_DRIVER,
596 					(void *)chp->ch_icookp);
597 		mutex_init(&chp->ch_mc_lck, NULL, MUTEX_DRIVER, NULL);
598 		mutex_init(&chp->ch_dh_lck, NULL, MUTEX_DRIVER, NULL);
599 		mutex_init(&chp->mac_lock, NULL, MUTEX_DRIVER, NULL);
600 
601 		/* ------- initialize Chelsio card ------- */
602 
603 		if (pe_attach(chp)) {
604 			PRINT(("card initialization failed\n"));
605 			DEBUG_ENTER("ch_attach");
606 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
607 			gchp[unit] = NULL;
608 #endif
609 			cmn_err(CE_WARN, "%s: pe_attach failed\n",
610 				chp->ch_name);
611 
612 			mutex_destroy(&chp->ch_lock);
613 			mutex_destroy(&chp->ch_intr);
614 			mutex_destroy(&chp->ch_mc_lck);
615 			mutex_destroy(&chp->ch_dh_lck);
616 			mutex_destroy(&chp->mac_lock);
617 			ddi_remove_intr(dip, 0, chp->ch_icookp);
618 			ddi_regs_map_free(&chp->ch_hbar0);
619 			pci_config_teardown(&chp->ch_hpci);
620 			ch_free_name(chp);
621 			kmem_free(chp, sizeof (ch_t));
622 			gld_mac_free(macinfo);
623 
624 			return (DDI_FAILURE);
625 		}
626 
627 		/* ------- done with Chelsio card ------- */
628 
629 		/* now can  set mac address */
630 		macinfo->gldm_vendor_addr = pe_get_mac(chp);
631 
632 		macinfo->gldm_cookie = chp->ch_icookp;
633 
634 		/*
635 		 * We only active checksum offload for T2 architectures.
636 		 */
637 		if (is_T2(chp)) {
638 			if (chp->ch_config.cksum_enabled)
639 				macinfo->gldm_capabilities |=
640 				    GLD_CAP_CKSUM_FULL_V4;
641 		} else
642 			chp->ch_config.cksum_enabled = 0;
643 
644 		rv = gld_register(
645 			dip,		/* ptr to dev's dev_info struct */
646 			(char *)ddi_driver_name(dip),	/* driver name */
647 			macinfo);	/* ptr to gld macinfo buffer */
648 
649 		/*
650 		 * The Jumbo frames capability is not yet available
651 		 * in Solaris 10 so registration will fail. MTU > 1500 is
652 		 * supported in Update 1.
653 		 */
654 		if (rv != DDI_SUCCESS) {
655 			cmn_err(CE_NOTE, "MTU > 1500 not supported by GLD.\n");
656 			cmn_err(CE_NOTE, "Setting MTU to 1500. \n");
657 			macinfo->gldm_maxpkt = chp->ch_mtu = 1500;
658 			rv = gld_register(
659 				dip,	/* ptr to dev's dev_info struct */
660 				(char *)ddi_driver_name(dip), /* driver name */
661 				macinfo); /* ptr to gld macinfo buffer */
662 		}
663 
664 
665 		if (rv != DDI_SUCCESS) {
666 			PRINT(("gld_register failed\n"));
667 			DEBUG_ENTER("ch_attach");
668 
669 			cmn_err(CE_WARN, "%s: gld_register error %d\n",
670 				chp->ch_name, rv);
671 
672 			pe_detach(chp);
673 
674 			mutex_destroy(&chp->ch_lock);
675 			mutex_destroy(&chp->ch_intr);
676 			mutex_destroy(&chp->ch_mc_lck);
677 			mutex_destroy(&chp->ch_dh_lck);
678 			mutex_destroy(&chp->mac_lock);
679 			ddi_remove_intr(dip, 0, chp->ch_icookp);
680 			ddi_regs_map_free(&chp->ch_hbar0);
681 			pci_config_teardown(&chp->ch_hpci);
682 			ch_free_name(chp);
683 			kmem_free(chp, sizeof (ch_t));
684 			gld_mac_free(macinfo);
685 
686 			return (DDI_FAILURE);
687 		}
688 
689 		/*
690 		 * print a banner at boot time (verbose mode), announcing
691 		 * the device pointed to by dip
692 		 */
693 		ddi_report_dev(dip);
694 
695 		if (ch_abort_debug)
696 			debug_enter("ch_attach");
697 
698 		return (DDI_SUCCESS);
699 
700 	} else if (cmd == DDI_RESUME) {
701 		PRINT(("attach resume\n"));
702 		DEBUG_ENTER("ch_attach");
703 		if ((chp = (ch_t *)ddi_get_driver_private(dip)) == NULL)
704 			return (DDI_FAILURE);
705 
706 		mutex_enter(&chp->ch_lock);
707 		chp->ch_flags &= ~PESUSPENDED;
708 		mutex_exit(&chp->ch_lock);
709 		return (DDI_SUCCESS);
710 	} else {
711 		PRINT(("attach: bad command\n"));
712 		DEBUG_ENTER("ch_attach");
713 
714 		return (DDI_FAILURE);
715 	}
716 }
717 
718 static int
719 ch_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
720 {
721 	gld_mac_info_t *macinfo;
722 	ch_t *chp;
723 
724 	if (cmd == DDI_DETACH) {
725 		macinfo = (gld_mac_info_t *)ddi_get_driver_private(dip);
726 		chp = (ch_t *)macinfo->gldm_private;
727 
728 		/*
729 		 * fail detach if there are outstanding mblks still
730 		 * in use somewhere.
731 		 */
732 		DEBUG_ENTER("ch_detach");
733 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
734 		mutex_enter(&chp->ch_lock);
735 		if (chp->ch_refcnt > 0) {
736 			mutex_exit(&chp->ch_lock);
737 			return (DDI_FAILURE);
738 		}
739 		mutex_exit(&chp->ch_lock);
740 		gchp[chp->ch_unit] = NULL;
741 #endif
742 		/*
743 		 * set driver state for this card to IDLE. We're
744 		 * shutting down.
745 		 */
746 		mutex_enter(&chp->ch_lock);
747 		chp->ch_state = PEIDLE;
748 		mutex_exit(&chp->ch_lock);
749 
750 		/*
751 		 * do a power reset of card
752 		 *
753 		 * 1. set PwrState to D3hot (3)
754 		 * 2. clear PwrState flags
755 		 */
756 		pci_config_put32(chp->ch_hpci, 0x44, 3);
757 		pci_config_put32(chp->ch_hpci, 0x44, 0);
758 
759 		/* delay .5 sec */
760 		DELAY(500000);
761 
762 		/* free register resources */
763 		(void) gld_unregister(macinfo);
764 
765 		/* make sure no interrupts while shutting down card */
766 		ddi_remove_intr(dip, 0, chp->ch_icookp);
767 
768 		/*
769 		 * reset device and recover resources
770 		 */
771 		pe_detach(chp);
772 
773 		ddi_regs_map_free(&chp->ch_hbar0);
774 		pci_config_teardown(&chp->ch_hpci);
775 		mutex_destroy(&chp->ch_lock);
776 		mutex_destroy(&chp->ch_intr);
777 		mutex_destroy(&chp->ch_mc_lck);
778 		mutex_destroy(&chp->ch_dh_lck);
779 		mutex_destroy(&chp->mac_lock);
780 		ch_free_dma_handles(chp);
781 #if defined(__sparc)
782 		ch_free_dvma_handles(chp);
783 #endif
784 		ch_free_name(chp);
785 		kmem_free(chp, sizeof (ch_t));
786 		gld_mac_free(macinfo);
787 
788 		DEBUG_ENTER("ch_detach end");
789 
790 		return (DDI_SUCCESS);
791 
792 	} else if ((cmd == DDI_SUSPEND) || (cmd == DDI_PM_SUSPEND)) {
793 		DEBUG_ENTER("suspend");
794 		if ((chp = (ch_t *)ddi_get_driver_private(dip)) == NULL)
795 			return (DDI_FAILURE);
796 		mutex_enter(&chp->ch_lock);
797 		chp->ch_flags |= PESUSPENDED;
798 		mutex_exit(&chp->ch_lock);
799 #ifdef TODO
800 		/* Un-initialize (STOP) T101 */
801 #endif
802 		return (DDI_SUCCESS);
803 	} else
804 		return (DDI_FAILURE);
805 }
806 
807 /*
808  * ch_alloc_dma_mem
809  *
810  * allocates DMA handle
811  * allocates kernel memory
812  * allocates DMA access handle
813  *
814  * chp - per-board descriptor
815  * type - byteswap mapping?
816  * flags - type of mapping
817  * size - # bytes mapped
818  * paddr - physical address
819  * dh - ddi dma handle
820  * ah - ddi access handle
821  */
822 
823 void *
824 ch_alloc_dma_mem(ch_t *chp, int type, int flags, int size, uint64_t *paddr,
825 	ulong_t *dh, ulong_t *ah)
826 {
827 	ddi_dma_attr_t ch_dma_attr;
828 	ddi_dma_cookie_t cookie;
829 	ddi_dma_handle_t ch_dh;
830 	ddi_acc_handle_t ch_ah;
831 	ddi_device_acc_attr_t *dev_attrp;
832 	caddr_t ch_vaddr;
833 	size_t rlen;
834 	uint_t count;
835 	uint_t mapping;
836 	uint_t align;
837 	uint_t rv;
838 	uint_t direction;
839 
840 	mapping = (flags&DMA_STREAM)?DDI_DMA_STREAMING:DDI_DMA_CONSISTENT;
841 	if (flags & DMA_4KALN)
842 		align = 0x4000;
843 	else if (flags & DMA_SMALN)
844 		align = chp->ch_sm_buf_aln;
845 	else if (flags & DMA_BGALN)
846 		align = chp->ch_bg_buf_aln;
847 	else {
848 		cmn_err(CE_WARN, "ch_alloc_dma_mem(%s): bad alignment flag\n",
849 		    chp->ch_name);
850 		return (0);
851 	}
852 	direction = (flags&DMA_OUT)?DDI_DMA_WRITE:DDI_DMA_READ;
853 
854 	/*
855 	 * dynamically create a dma attribute structure
856 	 */
857 	ch_dma_attr.dma_attr_version = DMA_ATTR_V0;
858 	ch_dma_attr.dma_attr_addr_lo = 0;
859 	ch_dma_attr.dma_attr_addr_hi = 0xffffffffffffffff;
860 	ch_dma_attr.dma_attr_count_max = 0x00ffffff;
861 	ch_dma_attr.dma_attr_align = align;
862 	ch_dma_attr.dma_attr_burstsizes = 0xfff;
863 	ch_dma_attr.dma_attr_minxfer = 1;
864 	ch_dma_attr.dma_attr_maxxfer = 0x00ffffff;
865 	ch_dma_attr.dma_attr_seg = 0xffffffff;
866 	ch_dma_attr.dma_attr_sgllen = 1;
867 	ch_dma_attr.dma_attr_granular = 1;
868 	ch_dma_attr.dma_attr_flags = 0;
869 
870 	rv = ddi_dma_alloc_handle(
871 	    chp->ch_dip,		/* device dev_info structure */
872 	    &ch_dma_attr,		/* DMA attributes */
873 	    DDI_DMA_SLEEP,		/* Wait if no memory */
874 	    NULL,			/* no argument to callback */
875 	    &ch_dh);			/* DMA handle */
876 	if (rv != DDI_SUCCESS) {
877 
878 		cmn_err(CE_WARN,
879 			"%s: ch_alloc_dma_mem: ddi_dma_alloc_handle error %d\n",
880 			chp->ch_name, rv);
881 
882 		return (0);
883 	}
884 
885 	/* set byte order for data xfer */
886 	if (type)
887 		dev_attrp = &null_attr;
888 	else
889 		dev_attrp = &le_attr;
890 
891 	rv = ddi_dma_mem_alloc(
892 	    ch_dh,		/* dma handle */
893 	    size,		/* size desired allocate */
894 	    dev_attrp,		/* access attributes */
895 	    mapping,
896 	    DDI_DMA_SLEEP,	/* wait for resources */
897 	    NULL,		/* no argument */
898 	    &ch_vaddr,		/* allocated memory */
899 	    &rlen,		/* real size allocated */
900 	    &ch_ah);		/* data access handle */
901 	if (rv != DDI_SUCCESS) {
902 		ddi_dma_free_handle(&ch_dh);
903 
904 		cmn_err(CE_WARN,
905 			"%s: ch_alloc_dma_mem: ddi_dma_mem_alloc error %d\n",
906 			chp->ch_name, rv);
907 
908 		return (0);
909 	}
910 
911 	rv = ddi_dma_addr_bind_handle(
912 	    ch_dh,				/* dma handle */
913 	    (struct as *)0,			/* kernel address space */
914 	    ch_vaddr,				/* virtual address */
915 	    rlen,				/* length of object */
916 	    direction|mapping,
917 	    DDI_DMA_SLEEP,			/* Wait for resources */
918 	    NULL,				/* no argument */
919 	    &cookie,				/* dma cookie */
920 	    &count);
921 	if (rv != DDI_DMA_MAPPED) {
922 		ddi_dma_mem_free(&ch_ah);
923 		ddi_dma_free_handle(&ch_dh);
924 
925 		cmn_err(CE_WARN,
926 		    "%s: ch_alloc_dma_mem: ddi_dma_addr_bind_handle error %d\n",
927 			chp->ch_name, rv);
928 
929 		return (0);
930 	}
931 
932 	if (count != 1) {
933 		cmn_err(CE_WARN,
934 		    "%s: ch_alloc_dma_mem: ch_alloc_dma_mem cookie count %d\n",
935 			chp->ch_name, count);
936 		PRINT(("ch_alloc_dma_mem cookie count %d\n", count));
937 
938 		ddi_dma_mem_free(&ch_ah);
939 		ddi_dma_free_handle(&ch_dh);
940 
941 		return (0);
942 	}
943 
944 	*paddr = cookie.dmac_laddress;
945 
946 	*(ddi_dma_handle_t *)dh = ch_dh;
947 	*(ddi_acc_handle_t *)ah = ch_ah;
948 
949 	return ((void *)ch_vaddr);
950 }
951 
952 /*
953  * ch_free_dma_mem
954  *
955  * frees resources allocated by ch_alloc_dma_mem()
956  *
957  * frees DMA handle
958  * frees kernel memory
959  * frees DMA access handle
960  */
961 
962 void
963 ch_free_dma_mem(ulong_t dh, ulong_t ah)
964 {
965 	ddi_dma_handle_t ch_dh = (ddi_dma_handle_t)dh;
966 	ddi_acc_handle_t ch_ah = (ddi_acc_handle_t)ah;
967 
968 	(void) ddi_dma_unbind_handle(ch_dh);
969 	ddi_dma_mem_free(&ch_ah);
970 	ddi_dma_free_handle(&ch_dh);
971 }
972 
973 /*
974  * create a dma handle and return a dma handle entry.
975  */
976 free_dh_t *
977 ch_get_dma_handle(ch_t *chp)
978 {
979 	ddi_dma_handle_t ch_dh;
980 	ddi_dma_attr_t ch_dma_attr;
981 	free_dh_t *dhe;
982 	int rv;
983 
984 	dhe = (free_dh_t *)kmem_zalloc(sizeof (*dhe), KM_SLEEP);
985 
986 	ch_dma_attr.dma_attr_version = DMA_ATTR_V0;
987 	ch_dma_attr.dma_attr_addr_lo = 0;
988 	ch_dma_attr.dma_attr_addr_hi = 0xffffffffffffffff;
989 	ch_dma_attr.dma_attr_count_max = 0x00ffffff;
990 	ch_dma_attr.dma_attr_align = 1;
991 	ch_dma_attr.dma_attr_burstsizes = 0xfff;
992 	ch_dma_attr.dma_attr_minxfer = 1;
993 	ch_dma_attr.dma_attr_maxxfer = 0x00ffffff;
994 	ch_dma_attr.dma_attr_seg = 0xffffffff;
995 	ch_dma_attr.dma_attr_sgllen = 5;
996 	ch_dma_attr.dma_attr_granular = 1;
997 	ch_dma_attr.dma_attr_flags = 0;
998 
999 	rv = ddi_dma_alloc_handle(
1000 	    chp->ch_dip,		/* device dev_info */
1001 	    &ch_dma_attr,		/* DMA attributes */
1002 	    DDI_DMA_SLEEP,		/* Wait if no memory */
1003 	    NULL,			/* no argument */
1004 	    &ch_dh);			/* DMA handle */
1005 	if (rv != DDI_SUCCESS) {
1006 
1007 		cmn_err(CE_WARN,
1008 		    "%s: ch_get_dma_handle: ddi_dma_alloc_handle error %d\n",
1009 			chp->ch_name, rv);
1010 
1011 		kmem_free(dhe, sizeof (*dhe));
1012 
1013 		return ((free_dh_t *)0);
1014 	}
1015 
1016 	dhe->dhe_dh = (ulong_t)ch_dh;
1017 
1018 	return (dhe);
1019 }
1020 
1021 /*
1022  * free the linked list of dma descriptor entries.
1023  */
1024 static void
1025 ch_free_dma_handles(ch_t *chp)
1026 {
1027 	free_dh_t *dhe, *the;
1028 
1029 	dhe = chp->ch_dh;
1030 	while (dhe) {
1031 		ddi_dma_free_handle((ddi_dma_handle_t *)&dhe->dhe_dh);
1032 		the = dhe;
1033 		dhe = dhe->dhe_next;
1034 		kmem_free(the, sizeof (*the));
1035 	}
1036 	chp->ch_dh = NULL;
1037 }
1038 
1039 /*
1040  * ch_bind_dma_handle()
1041  *
1042  * returns # of entries used off of cmdQ_ce_t array to hold physical addrs.
1043  *
1044  * chp - per-board descriptor
1045  * size - # bytes mapped
1046  * vaddr - virtual address
1047  * cmp - array of cmdQ_ce_t entries
1048  * cnt - # free entries in cmp array
1049  */
1050 
1051 uint32_t
1052 ch_bind_dma_handle(ch_t *chp, int size, caddr_t vaddr, cmdQ_ce_t *cmp,
1053 	uint32_t cnt)
1054 {
1055 	ddi_dma_cookie_t cookie;
1056 	ddi_dma_handle_t ch_dh;
1057 	uint_t count;
1058 	uint32_t n = 1;
1059 	free_dh_t *dhe;
1060 	uint_t rv;
1061 
1062 	mutex_enter(&chp->ch_dh_lck);
1063 	if ((dhe = chp->ch_dh) != NULL) {
1064 		chp->ch_dh = dhe->dhe_next;
1065 	}
1066 	mutex_exit(&chp->ch_dh_lck);
1067 
1068 	if (dhe == NULL) {
1069 		return (0);
1070 	}
1071 
1072 	ch_dh = (ddi_dma_handle_t)dhe->dhe_dh;
1073 
1074 	rv = ddi_dma_addr_bind_handle(
1075 	    ch_dh,		/* dma handle */
1076 	    (struct as *)0,	/* kernel address space */
1077 	    vaddr,		/* virtual address */
1078 	    size,		/* length of object */
1079 	    DDI_DMA_WRITE|DDI_DMA_STREAMING,
1080 	    DDI_DMA_SLEEP,	/* Wait for resources */
1081 	    NULL,		/* no argument */
1082 	    &cookie,	/* dma cookie */
1083 	    &count);
1084 	if (rv != DDI_DMA_MAPPED) {
1085 
1086 		/* return dma header descriptor back to free list */
1087 		mutex_enter(&chp->ch_dh_lck);
1088 		dhe->dhe_next = chp->ch_dh;
1089 		chp->ch_dh = dhe;
1090 		mutex_exit(&chp->ch_dh_lck);
1091 
1092 		cmn_err(CE_WARN,
1093 		    "%s: ch_bind_dma_handle: ddi_dma_addr_bind_handle err %d\n",
1094 			chp->ch_name, rv);
1095 
1096 		return (0);
1097 	}
1098 
1099 	/*
1100 	 * abort if we've run out of space
1101 	 */
1102 	if (count > cnt) {
1103 		/* return dma header descriptor back to free list */
1104 		mutex_enter(&chp->ch_dh_lck);
1105 		dhe->dhe_next = chp->ch_dh;
1106 		chp->ch_dh = dhe;
1107 		mutex_exit(&chp->ch_dh_lck);
1108 
1109 		return (0);
1110 	}
1111 
1112 	cmp->ce_pa = cookie.dmac_laddress;
1113 	cmp->ce_dh = NULL;
1114 	cmp->ce_len = cookie.dmac_size;
1115 	cmp->ce_mp = NULL;
1116 	cmp->ce_flg = DH_DMA;
1117 
1118 	while (--count) {
1119 		cmp++;
1120 		n++;
1121 		ddi_dma_nextcookie(ch_dh, &cookie);
1122 		cmp->ce_pa = cookie.dmac_laddress;
1123 		cmp->ce_dh = NULL;
1124 		cmp->ce_len = cookie.dmac_size;
1125 		cmp->ce_mp = NULL;
1126 		cmp->ce_flg = DH_DMA;
1127 	}
1128 
1129 	cmp->ce_dh = dhe;
1130 
1131 	return (n);
1132 }
1133 
1134 /*
1135  * ch_unbind_dma_handle()
1136  *
1137  * frees resources alloacted by ch_bind_dma_handle().
1138  *
1139  * frees DMA handle
1140  */
1141 
1142 void
1143 ch_unbind_dma_handle(ch_t *chp, free_dh_t *dhe)
1144 {
1145 	ddi_dma_handle_t ch_dh = (ddi_dma_handle_t)dhe->dhe_dh;
1146 
1147 	if (ddi_dma_unbind_handle(ch_dh))
1148 		cmn_err(CE_WARN, "%s: ddi_dma_unbind_handle failed",
1149 			chp->ch_name);
1150 
1151 	mutex_enter(&chp->ch_dh_lck);
1152 	dhe->dhe_next = chp->ch_dh;
1153 	chp->ch_dh = dhe;
1154 	mutex_exit(&chp->ch_dh_lck);
1155 }
1156 
1157 #if defined(__sparc)
1158 /*
1159  * DVMA stuff. Solaris only.
1160  */
1161 
1162 /*
1163  * create a dvma handle and return a dma handle entry.
1164  * DVMA is on sparc only!
1165  */
1166 
1167 free_dh_t *
1168 ch_get_dvma_handle(ch_t *chp)
1169 {
1170 	ddi_dma_handle_t ch_dh;
1171 	ddi_dma_lim_t ch_dvma_attr;
1172 	free_dh_t *dhe;
1173 	int rv;
1174 
1175 	dhe = (free_dh_t *)kmem_zalloc(sizeof (*dhe), KM_SLEEP);
1176 
1177 	ch_dvma_attr.dlim_addr_lo = 0;
1178 	ch_dvma_attr.dlim_addr_hi = 0xffffffff;
1179 	ch_dvma_attr.dlim_cntr_max = 0xffffffff;
1180 	ch_dvma_attr.dlim_burstsizes = 0xfff;
1181 	ch_dvma_attr.dlim_minxfer = 1;
1182 	ch_dvma_attr.dlim_dmaspeed = 0;
1183 
1184 	rv = dvma_reserve(
1185 		chp->ch_dip,		/* device dev_info */
1186 		&ch_dvma_attr,		/* DVMA attributes */
1187 		3,			/* number of pages */
1188 		&ch_dh);		/* DVMA handle */
1189 
1190 	if (rv != DDI_SUCCESS) {
1191 
1192 		cmn_err(CE_WARN,
1193 		    "%s: ch_get_dvma_handle: dvma_reserve() error %d\n",
1194 			chp->ch_name, rv);
1195 
1196 		kmem_free(dhe, sizeof (*dhe));
1197 
1198 		return ((free_dh_t *)0);
1199 	}
1200 
1201 	dhe->dhe_dh = (ulong_t)ch_dh;
1202 
1203 	return (dhe);
1204 }
1205 
1206 /*
1207  * free the linked list of dvma descriptor entries.
1208  * DVMA is only on sparc!
1209  */
1210 
1211 static void
1212 ch_free_dvma_handles(ch_t *chp)
1213 {
1214 	free_dh_t *dhe, *the;
1215 
1216 	dhe = chp->ch_vdh;
1217 	while (dhe) {
1218 		dvma_release((ddi_dma_handle_t)dhe->dhe_dh);
1219 		the = dhe;
1220 		dhe = dhe->dhe_next;
1221 		kmem_free(the, sizeof (*the));
1222 	}
1223 	chp->ch_vdh = NULL;
1224 }
1225 
1226 /*
1227  * ch_bind_dvma_handle()
1228  *
1229  * returns # of entries used off of cmdQ_ce_t array to hold physical addrs.
1230  * DVMA in sparc only
1231  *
1232  * chp - per-board descriptor
1233  * size - # bytes mapped
1234  * vaddr - virtual address
1235  * cmp - array of cmdQ_ce_t entries
1236  * cnt - # free entries in cmp array
1237  */
1238 
1239 uint32_t
1240 ch_bind_dvma_handle(ch_t *chp, int size, caddr_t vaddr, cmdQ_ce_t *cmp,
1241 	uint32_t cnt)
1242 {
1243 	ddi_dma_cookie_t cookie;
1244 	ddi_dma_handle_t ch_dh;
1245 	uint32_t n = 1;
1246 	free_dh_t *dhe;
1247 
1248 	mutex_enter(&chp->ch_dh_lck);
1249 	if ((dhe = chp->ch_vdh) != NULL) {
1250 		chp->ch_vdh = dhe->dhe_next;
1251 	}
1252 	mutex_exit(&chp->ch_dh_lck);
1253 
1254 	if (dhe == NULL) {
1255 		return (0);
1256 	}
1257 
1258 	ch_dh = (ddi_dma_handle_t)dhe->dhe_dh;
1259 	n = cnt;
1260 
1261 	dvma_kaddr_load(
1262 		ch_dh,		/* dvma handle */
1263 		vaddr,		/* virtual address */
1264 		size,		/* length of object */
1265 		0,		/* start at index 0 */
1266 		&cookie);
1267 
1268 	dvma_sync(ch_dh, 0, DDI_DMA_SYNC_FORDEV);
1269 
1270 	cookie.dmac_notused = 0;
1271 	n = 1;
1272 
1273 	cmp->ce_pa = cookie.dmac_laddress;
1274 	cmp->ce_dh = dhe;
1275 	cmp->ce_len = cookie.dmac_size;
1276 	cmp->ce_mp = NULL;
1277 	cmp->ce_flg = DH_DVMA;	/* indicate a dvma descriptor */
1278 
1279 	return (n);
1280 }
1281 
1282 /*
1283  * ch_unbind_dvma_handle()
1284  *
1285  * frees resources alloacted by ch_bind_dvma_handle().
1286  *
1287  * frees DMA handle
1288  */
1289 
1290 void
1291 ch_unbind_dvma_handle(ch_t *chp, free_dh_t *dhe)
1292 {
1293 	ddi_dma_handle_t ch_dh = (ddi_dma_handle_t)dhe->dhe_dh;
1294 
1295 	dvma_unload(ch_dh, 0, -1);
1296 
1297 	mutex_enter(&chp->ch_dh_lck);
1298 	dhe->dhe_next = chp->ch_vdh;
1299 	chp->ch_vdh = dhe;
1300 	mutex_exit(&chp->ch_dh_lck);
1301 }
1302 
1303 #endif	/* defined(__sparc) */
1304 
1305 /*
1306  * send received packet up stream.
1307  *
1308  * if driver has been stopped, then we drop the message.
1309  */
1310 void
1311 ch_send_up(ch_t *chp, mblk_t *mp, uint32_t cksum, int flg)
1312 {
1313 	/*
1314 	 * probably do not need a lock here. When we set PESTOP in
1315 	 * ch_stop() a packet could have just passed here and gone
1316 	 * upstream. The next one will be dropped.
1317 	 */
1318 	if (chp->ch_state == PERUNNING) {
1319 		/*
1320 		 * note that flg will not be set unless enable_checksum_offload
1321 		 * set in /etc/system (see sge.c).
1322 		 */
1323 		if (flg)
1324 			(void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, cksum,
1325 				HCK_FULLCKSUM, 0);
1326 		gld_recv(chp->ch_macp, mp);
1327 	} else {
1328 		freemsg(mp);
1329 	}
1330 }
1331 
1332 /*
1333  * unblock gld driver.
1334  */
1335 void
1336 ch_gld_ok(ch_t *chp)
1337 {
1338 	gld_sched(chp->ch_macp);
1339 }
1340 
1341 
1342 /*
1343  * reset the card.
1344  *
1345  * Note: we only do this after the card has been initialized.
1346  */
1347 static int
1348 ch_reset(gld_mac_info_t *mp)
1349 {
1350 	ch_t *chp;
1351 
1352 	if (mp == NULL) {
1353 		return (GLD_FAILURE);
1354 	}
1355 
1356 	chp = (ch_t *)mp->gldm_private;
1357 
1358 	if (chp == NULL) {
1359 		return (GLD_FAILURE);
1360 	}
1361 
1362 #ifdef NOTYET
1363 	/*
1364 	 * do a reset of card
1365 	 *
1366 	 * 1. set PwrState to D3hot (3)
1367 	 * 2. clear PwrState flags
1368 	 */
1369 	/*
1370 	 * When we did this, the card didn't start. First guess is that
1371 	 * the initialization is not quite correct. For now, we don't
1372 	 * reset things.
1373 	 */
1374 	if (chp->ch_hpci) {
1375 		pci_config_put32(chp->ch_hpci, 0x44, 3);
1376 		pci_config_put32(chp->ch_hpci, 0x44, 0);
1377 
1378 		/* delay .5 sec */
1379 		DELAY(500000);
1380 	}
1381 #endif
1382 
1383 	return (GLD_SUCCESS);
1384 }
1385 
1386 static int
1387 ch_start(gld_mac_info_t *macinfo)
1388 {
1389 	ch_t *chp = (ch_t *)macinfo->gldm_private;
1390 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
1391 	/* only initialize card on first attempt */
1392 	mutex_enter(&chp->ch_lock);
1393 	chp->ch_refcnt++;
1394 	if (chp->ch_refcnt == 1) {
1395 		chp->ch_state = PERUNNING;
1396 		mutex_exit(&chp->ch_lock);
1397 		pe_init((void *)chp);
1398 	} else
1399 		mutex_exit(&chp->ch_lock);
1400 #else
1401 	pe_init((void *)chp);
1402 
1403 	/* go to running state, we're being started */
1404 	mutex_enter(&chp->ch_lock);
1405 	chp->ch_state = PERUNNING;
1406 	mutex_exit(&chp->ch_lock);
1407 #endif
1408 
1409 	return (GLD_SUCCESS);
1410 }
1411 
1412 static int
1413 ch_stop(gld_mac_info_t *mp)
1414 {
1415 	ch_t *chp = (ch_t *)mp->gldm_private;
1416 
1417 	/*
1418 	 * can only stop the chip if it's been initialized
1419 	 */
1420 	mutex_enter(&chp->ch_lock);
1421 	if (chp->ch_state == PEIDLE) {
1422 		mutex_exit(&chp->ch_lock);
1423 		return (GLD_FAILURE);
1424 	}
1425 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
1426 	chp->ch_refcnt--;
1427 	if (chp->ch_refcnt == 0) {
1428 		chp->ch_state = PESTOP;
1429 		mutex_exit(&chp->ch_lock);
1430 		pe_stop(chp);
1431 	} else
1432 		mutex_exit(&chp->ch_lock);
1433 #else
1434 	chp->ch_state = PESTOP;
1435 	mutex_exit(&chp->ch_lock);
1436 	pe_stop(chp);
1437 #endif
1438 	return (GLD_SUCCESS);
1439 }
1440 
1441 static int
1442 ch_set_mac_address(gld_mac_info_t *mp, uint8_t *mac)
1443 {
1444 	ch_t *chp;
1445 
1446 	if (mp) {
1447 		chp = (ch_t *)mp->gldm_private;
1448 	} else {
1449 		return (GLD_FAILURE);
1450 	}
1451 
1452 	pe_set_mac(chp, mac);
1453 
1454 	return (GLD_SUCCESS);
1455 }
1456 
1457 static int
1458 ch_set_multicast(gld_mac_info_t *mp, uint8_t *ep, int flg)
1459 {
1460 	ch_t *chp = (ch_t *)mp->gldm_private;
1461 
1462 	return (pe_set_mc(chp, ep, flg));
1463 }
1464 
1465 static int
1466 ch_ioctl(gld_mac_info_t *macinfo, queue_t *q, mblk_t *mp)
1467 {
1468 	struct iocblk *iocp;
1469 
1470 	switch (mp->b_datap->db_type) {
1471 	case M_IOCTL:
1472 		/* pe_ioctl() does qreply() */
1473 		pe_ioctl((ch_t *)(macinfo->gldm_private), q, mp);
1474 		break;
1475 
1476 	default:
1477 /*
1478  *		cmn_err(CE_NOTE, "ch_ioctl not M_IOCTL\n");
1479  *		debug_enter("bad ch_ioctl");
1480  */
1481 
1482 		iocp = (struct iocblk *)mp->b_rptr;
1483 
1484 		if (mp->b_cont)
1485 			freemsg(mp->b_cont);
1486 		mp->b_cont = NULL;
1487 
1488 		mp->b_datap->db_type = M_IOCNAK;
1489 		iocp->ioc_error = EINVAL;
1490 		qreply(q, mp);
1491 		break;
1492 	}
1493 
1494 	return (GLD_SUCCESS);
1495 }
1496 
1497 static int
1498 ch_set_promiscuous(gld_mac_info_t *mp, int flag)
1499 {
1500 	ch_t *chp = (ch_t *)mp->gldm_private;
1501 
1502 	switch (flag) {
1503 	case GLD_MAC_PROMISC_MULTI:
1504 		pe_set_promiscuous(chp, 2);
1505 		break;
1506 
1507 	case GLD_MAC_PROMISC_NONE:
1508 		pe_set_promiscuous(chp, 0);
1509 		break;
1510 
1511 	case GLD_MAC_PROMISC_PHYS:
1512 	default:
1513 		pe_set_promiscuous(chp, 1);
1514 		break;
1515 	}
1516 
1517 	return (GLD_SUCCESS);
1518 }
1519 
1520 static int
1521 ch_get_stats(gld_mac_info_t *mp, struct gld_stats *gs)
1522 {
1523 	ch_t *chp = (ch_t *)mp->gldm_private;
1524 	uint64_t speed;
1525 	uint32_t intrcnt;
1526 	uint32_t norcvbuf;
1527 	uint32_t oerrors;
1528 	uint32_t ierrors;
1529 	uint32_t underrun;
1530 	uint32_t overrun;
1531 	uint32_t framing;
1532 	uint32_t crc;
1533 	uint32_t carrier;
1534 	uint32_t collisions;
1535 	uint32_t xcollisions;
1536 	uint32_t late;
1537 	uint32_t defer;
1538 	uint32_t xerrs;
1539 	uint32_t rerrs;
1540 	uint32_t toolong;
1541 	uint32_t runt;
1542 	ulong_t multixmt;
1543 	ulong_t multircv;
1544 	ulong_t brdcstxmt;
1545 	ulong_t brdcstrcv;
1546 
1547 	/*
1548 	 * race looks benign here.
1549 	 */
1550 	if (chp->ch_state != PERUNNING) {
1551 		return (GLD_FAILURE);
1552 	}
1553 
1554 	(void) pe_get_stats(chp,
1555 			&speed,
1556 			&intrcnt,
1557 			&norcvbuf,
1558 			&oerrors,
1559 			&ierrors,
1560 			&underrun,
1561 			&overrun,
1562 			&framing,
1563 			&crc,
1564 			&carrier,
1565 			&collisions,
1566 			&xcollisions,
1567 			&late,
1568 			&defer,
1569 			&xerrs,
1570 			&rerrs,
1571 			&toolong,
1572 			&runt,
1573 			&multixmt,
1574 			&multircv,
1575 			&brdcstxmt,
1576 			&brdcstrcv);
1577 
1578 	gs->glds_speed = speed;
1579 	gs->glds_media = GLDM_UNKNOWN;
1580 	gs->glds_intr  = intrcnt;
1581 	gs->glds_norcvbuf = norcvbuf;
1582 	gs->glds_errxmt = oerrors;
1583 	gs->glds_errrcv = ierrors;
1584 	gs->glds_missed = ierrors;	/* ??? */
1585 	gs->glds_underflow = underrun;
1586 	gs->glds_overflow = overrun;
1587 	gs->glds_frame = framing;
1588 	gs->glds_crc = crc;
1589 	gs->glds_duplex = GLD_DUPLEX_FULL;
1590 	gs->glds_nocarrier = carrier;
1591 	gs->glds_collisions = collisions;
1592 	gs->glds_excoll = xcollisions;
1593 	gs->glds_xmtlatecoll = late;
1594 	gs->glds_defer = defer;
1595 	gs->glds_dot3_first_coll = 0;	/* Not available */
1596 	gs->glds_dot3_multi_coll = 0;	/* Not available */
1597 	gs->glds_dot3_sqe_error = 0;	/* Not available */
1598 	gs->glds_dot3_mac_xmt_error = xerrs;
1599 	gs->glds_dot3_mac_rcv_error = rerrs;
1600 	gs->glds_dot3_frame_too_long = toolong;
1601 	gs->glds_short = runt;
1602 
1603 	gs->glds_noxmtbuf = 0;		/* not documented */
1604 	gs->glds_xmtretry = 0;		/* not documented */
1605 	gs->glds_multixmt = multixmt;	/* not documented */
1606 	gs->glds_multircv = multircv;	/* not documented */
1607 	gs->glds_brdcstxmt = brdcstxmt;	/* not documented */
1608 	gs->glds_brdcstrcv = brdcstrcv;	/* not documented */
1609 
1610 	return (GLD_SUCCESS);
1611 }
1612 
1613 
1614 static int
1615 ch_send(gld_mac_info_t *macinfo, mblk_t *mp)
1616 {
1617 	ch_t *chp = (ch_t *)macinfo->gldm_private;
1618 	uint32_t flg;
1619 	uint32_t msg_flg;
1620 
1621 #ifdef TX_CKSUM_FIX
1622 	mblk_t *nmp;
1623 	int frags;
1624 	size_t msg_len;
1625 	struct ether_header *ehdr;
1626 	ipha_t *ihdr;
1627 	int tflg = 0;
1628 #endif	/* TX_CKSUM_FIX */
1629 
1630 	/*
1631 	 * race looks benign here.
1632 	 */
1633 	if (chp->ch_state != PERUNNING) {
1634 		return (GLD_FAILURE);
1635 	}
1636 
1637 	msg_flg = 0;
1638 	if (chp->ch_config.cksum_enabled) {
1639 		if (is_T2(chp)) {
1640 			hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL,
1641 				NULL, &msg_flg);
1642 			flg = (msg_flg & HCK_FULLCKSUM)?
1643 				CH_NO_CPL: CH_NO_HWCKSUM|CH_NO_CPL;
1644 		} else
1645 			flg = CH_NO_CPL;
1646 	} else
1647 	flg = CH_NO_HWCKSUM | CH_NO_CPL;
1648 
1649 #ifdef TX_CKSUM_FIX
1650 	/*
1651 	 * Check if the message spans more than one mblk or
1652 	 * if it does and the ip header is not in the first
1653 	 * fragment then pull up the message. This case is
1654 	 * expected to be rare.
1655 	 */
1656 	frags = 0;
1657 	msg_len = 0;
1658 	nmp = mp;
1659 	do {
1660 		frags++;
1661 		msg_len += MBLKL(nmp);
1662 		nmp = nmp->b_cont;
1663 	} while (nmp);
1664 #define	MAX_ALL_HDRLEN SZ_CPL_TX_PKT + sizeof (struct ether_header) + \
1665 				TCP_MAX_COMBINED_HEADER_LENGTH
1666 	/*
1667 	 * If the first mblk has enough space at the beginning of
1668 	 * the data buffer to hold a CPL header, then, we'll expancd
1669 	 * the front of the buffer so a pullup will leave space for
1670 	 * pe_start() to add the CPL header in line. We need to remember
1671 	 * that we've done this so we can undo it after the pullup.
1672 	 *
1673 	 * Note that if we decide to do an allocb to hold the CPL header,
1674 	 * we need to catch the case where we've added an empty mblk for
1675 	 * the header but never did a pullup. This would result in the
1676 	 * tests for etherheader, etc. being done on the initial, empty,
1677 	 * mblk instead of the one with data. See PR3646 for further
1678 	 * details. (note this PR is closed since it is no longer relevant).
1679 	 *
1680 	 * Another point is that if we do add an allocb to add space for
1681 	 * a CPL header, after a pullup, the initial pointer, mp, in GLD will
1682 	 * no longer point to a valid mblk. When we get the mblk (by allocb),
1683 	 * we need to switch the mblk structure values between it and the
1684 	 * mp structure values referenced by GLD. This handles the case where
1685 	 * we've run out of cmdQ entries and report GLD_NORESOURCES back to
1686 	 * GLD. The pointer to the mblk data will have been modified to hold
1687 	 * an empty 8 bytes for the CPL header, For now, we let the pe_start()
1688 	 * routine prepend an 8 byte mblk.
1689 	 */
1690 	if (MBLKHEAD(mp) >= SZ_CPL_TX_PKT) {
1691 		mp->b_rptr -= SZ_CPL_TX_PKT;
1692 		tflg = 1;
1693 	}
1694 	if (frags > 3) {
1695 		chp->sge->intr_cnt.tx_msg_pullups++;
1696 		if (pullupmsg(mp, -1) == 0) {
1697 			freemsg(mp);
1698 			return (GLD_SUCCESS);
1699 		}
1700 	} else if ((msg_len > MAX_ALL_HDRLEN) &&
1701 			(MBLKL(mp) < MAX_ALL_HDRLEN)) {
1702 		chp->sge->intr_cnt.tx_hdr_pullups++;
1703 		if (pullupmsg(mp, MAX_ALL_HDRLEN) == 0) {
1704 			freemsg(mp);
1705 			return (GLD_SUCCESS);
1706 		}
1707 	}
1708 	if (tflg)
1709 		mp->b_rptr += SZ_CPL_TX_PKT;
1710 
1711 	ehdr = (struct ether_header *)mp->b_rptr;
1712 	if (ehdr->ether_type == htons(ETHERTYPE_IP)) {
1713 		ihdr = (ipha_t *)&mp->b_rptr[sizeof (struct ether_header)];
1714 		if ((ihdr->ipha_fragment_offset_and_flags & IPH_MF)) {
1715 			if (ihdr->ipha_protocol == IPPROTO_UDP) {
1716 				flg |= CH_UDP_MF;
1717 				chp->sge->intr_cnt.tx_udp_ip_frag++;
1718 			} else if (ihdr->ipha_protocol == IPPROTO_TCP) {
1719 				flg |= CH_TCP_MF;
1720 				chp->sge->intr_cnt.tx_tcp_ip_frag++;
1721 			}
1722 		} else if (ihdr->ipha_protocol == IPPROTO_UDP)
1723 			flg |= CH_UDP;
1724 	}
1725 #endif	/* TX_CKSUM_FIX */
1726 
1727 	/*
1728 	 * return 0 - data send successfully
1729 	 * return 1 - no resources, reschedule
1730 	 */
1731 	if (pe_start(chp, mp, flg))
1732 		return (GLD_NORESOURCES);
1733 	else
1734 		return (GLD_SUCCESS);
1735 }
1736 
1737 static uint_t
1738 ch_intr(gld_mac_info_t *mp)
1739 {
1740 	return (pe_intr((ch_t *)mp->gldm_private));
1741 }
1742 
1743 /*
1744  * generate name of driver with unit# postpended.
1745  */
1746 void
1747 ch_set_name(ch_t *chp, int unit)
1748 {
1749 	chp->ch_name = (char *)kmem_alloc(sizeof ("chxge00"), KM_SLEEP);
1750 	if (unit > 9) {
1751 		bcopy("chxge00", (void *)chp->ch_name, sizeof ("chxge00"));
1752 		chp->ch_name[5] += unit/10;
1753 		chp->ch_name[6] += unit%10;
1754 	} else {
1755 		bcopy("chxge0", (void *)chp->ch_name, sizeof ("chxge0"));
1756 		chp->ch_name[5] += unit;
1757 	}
1758 }
1759 
1760 void
1761 ch_free_name(ch_t *chp)
1762 {
1763 	if (chp->ch_name)
1764 		kmem_free(chp->ch_name, sizeof ("chxge00"));
1765 	chp->ch_name = NULL;
1766 }
1767 
1768 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
1769 /*
1770  * register toe offload.
1771  */
1772 void *
1773 ch_register(void *instp, void *toe_rcv, void *toe_free, void *toe_tunnel,
1774     kmutex_t *toe_tx_mx, kcondvar_t *toe_of_cv, int unit)
1775 {
1776 	ch_t *chp = gchp[unit];
1777 	if (chp != NULL) {
1778 		mutex_enter(&chp->ch_lock);
1779 
1780 		chp->toe_rcv = (void (*)(void *, mblk_t *))toe_rcv;
1781 		chp->ch_toeinst = instp;
1782 		chp->toe_free = (void (*)(void *, tbuf_t *))toe_free;
1783 		chp->toe_tunnel = (int (*)(void *, mblk_t *))toe_tunnel;
1784 		chp->ch_tx_overflow_mutex = toe_tx_mx;
1785 		chp->ch_tx_overflow_cv = toe_of_cv;
1786 		chp->open_device_map |= TOEDEV_DEVMAP_BIT;
1787 
1788 		/* start up adapter if first user */
1789 		chp->ch_refcnt++;
1790 		if (chp->ch_refcnt == 1) {
1791 			chp->ch_state = PERUNNING;
1792 			mutex_exit(&chp->ch_lock);
1793 			pe_init((void *)chp);
1794 		} else
1795 			mutex_exit(&chp->ch_lock);
1796 	}
1797 	return ((void *)gchp[unit]);
1798 }
1799 
1800 /*
1801  * unregister toe offload.
1802  * XXX Need to fix races here.
1803  *     1. turn off SGE interrupts.
1804  *     2. do update
1805  *     3. re-enable SGE interrupts
1806  *     4. SGE doorbell to make sure things get restarted.
1807  */
1808 void
1809 ch_unregister(void)
1810 {
1811 	int i;
1812 	ch_t *chp;
1813 
1814 	for (i = 0; i < MAX_CARDS; i++) {
1815 		chp = gchp[i];
1816 		if (chp == NULL)
1817 			continue;
1818 
1819 		mutex_enter(&chp->ch_lock);
1820 
1821 		chp->ch_refcnt--;
1822 		if (chp->ch_refcnt == 0) {
1823 			chp->ch_state = PESTOP;
1824 			mutex_exit(&chp->ch_lock);
1825 			pe_stop(chp);
1826 		} else
1827 			mutex_exit(&chp->ch_lock);
1828 
1829 		chp->open_device_map &= ~TOEDEV_DEVMAP_BIT;
1830 		chp->toe_rcv = NULL;
1831 		chp->ch_toeinst =  NULL;
1832 		chp->toe_free = NULL;
1833 		chp->toe_tunnel = NULL;
1834 		chp->ch_tx_overflow_mutex = NULL;
1835 		chp->ch_tx_overflow_cv = NULL;
1836 	}
1837 }
1838 #endif	/* CONFIG_CHELSIO_T1_OFFLOAD */
1839 
1840 /*
1841  * get properties from chxge.conf
1842  */
1843 static void
1844 ch_get_prop(ch_t *chp)
1845 {
1846 	int val;
1847 	int tval = 0;
1848 	extern int enable_latency_timer;
1849 	extern uint32_t sge_cmdq0_cnt;
1850 	extern uint32_t sge_cmdq1_cnt;
1851 	extern uint32_t sge_flq0_cnt;
1852 	extern uint32_t sge_flq1_cnt;
1853 	extern uint32_t sge_respq_cnt;
1854 	extern uint32_t sge_cmdq0_cnt_orig;
1855 	extern uint32_t sge_cmdq1_cnt_orig;
1856 	extern uint32_t sge_flq0_cnt_orig;
1857 	extern uint32_t sge_flq1_cnt_orig;
1858 	extern uint32_t sge_respq_cnt_orig;
1859 	dev_info_t *pdip;
1860 	uint32_t vendor_id, device_id, revision_id;
1861 	uint32_t *prop_val = NULL;
1862 	uint32_t prop_len = NULL;
1863 
1864 	val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
1865 		"enable_dvma", -1);
1866 	if (val == -1)
1867 		val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
1868 			"enable-dvma", -1);
1869 	if (val != -1) {
1870 		if (val != 0)
1871 			chp->ch_config.enable_dvma = 1;
1872 	}
1873 
1874 	val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
1875 		"amd_bug_workaround", -1);
1876 	if (val == -1)
1877 		val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
1878 			"amd-bug-workaround", -1);
1879 
1880 	if (val != -1) {
1881 		if (val == 0) {
1882 			chp->ch_config.burstsize_set = 0;
1883 			chp->ch_config.transaction_cnt_set = 0;
1884 			goto fail_exit;
1885 		}
1886 	}
1887 	/*
1888 	 * Step up to the parent node,  That's the node above us
1889 	 * in the device tree. And will typically be the PCI host
1890 	 * Controller.
1891 	 */
1892 	pdip = ddi_get_parent(chp->ch_dip);
1893 
1894 	/*
1895 	 * Now get the 'Vendor id' properties
1896 	 */
1897 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, pdip, 0, "vendor-id",
1898 	    (int **)&prop_val, &prop_len) != DDI_PROP_SUCCESS) {
1899 		chp->ch_config.burstsize_set = 0;
1900 		chp->ch_config.transaction_cnt_set = 0;
1901 		goto fail_exit;
1902 	}
1903 	vendor_id = *(uint32_t *)prop_val;
1904 	ddi_prop_free(prop_val);
1905 
1906 	/*
1907 	 * Now get the 'Device id' properties
1908 	 */
1909 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, pdip, 0, "device-id",
1910 	    (int **)&prop_val, &prop_len) != DDI_PROP_SUCCESS) {
1911 		chp->ch_config.burstsize_set = 0;
1912 		chp->ch_config.transaction_cnt_set = 0;
1913 		goto fail_exit;
1914 	}
1915 	device_id = *(uint32_t *)prop_val;
1916 	ddi_prop_free(prop_val);
1917 
1918 	/*
1919 	 * Now get the 'Revision id' properties
1920 	 */
1921 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, pdip, 0, "revision-id",
1922 	    (int **)&prop_val, &prop_len) != DDI_PROP_SUCCESS) {
1923 		chp->ch_config.burstsize_set = 0;
1924 		chp->ch_config.transaction_cnt_set = 0;
1925 		goto fail_exit;
1926 	}
1927 	revision_id = *(uint32_t *)prop_val;
1928 	ddi_prop_free(prop_val);
1929 
1930 	/*
1931 	 * set default values based on node above us.
1932 	 */
1933 	if ((vendor_id == AMD_VENDOR_ID) && (device_id == AMD_BRIDGE) &&
1934 	    (revision_id <= AMD_BRIDGE_REV)) {
1935 		uint32_t v;
1936 		uint32_t burst;
1937 		uint32_t cnt;
1938 
1939 		/* if 133 Mhz not enabled, then do nothing - we're not PCIx */
1940 		v = pci_config_get32(chp->ch_hpci, 0x64);
1941 		if ((v & 0x20000) == NULL) {
1942 			chp->ch_config.burstsize_set = 0;
1943 			chp->ch_config.transaction_cnt_set = 0;
1944 			goto fail_exit;
1945 		}
1946 
1947 		/* check burst size and transaction count */
1948 		v = pci_config_get32(chp->ch_hpci, 0x60);
1949 		burst = (v >> 18) & 3;
1950 		cnt = (v >> 20) & 7;
1951 
1952 		switch (burst) {
1953 		case 0:	/* 512 */
1954 			/* 512 burst size legal with split cnts 1,2,3 */
1955 			if (cnt <= 2) {
1956 				chp->ch_config.burstsize_set = 0;
1957 				chp->ch_config.transaction_cnt_set = 0;
1958 				goto fail_exit;
1959 			}
1960 			break;
1961 		case 1:	/* 1024 */
1962 			/* 1024 burst size legal with split cnts 1,2 */
1963 			if (cnt <= 1) {
1964 				chp->ch_config.burstsize_set = 0;
1965 				chp->ch_config.transaction_cnt_set = 0;
1966 				goto fail_exit;
1967 			}
1968 			break;
1969 		case 2:	/* 2048 */
1970 			/* 2048 burst size legal with split cnts 1 */
1971 			if (cnt == 0) {
1972 				chp->ch_config.burstsize_set = 0;
1973 				chp->ch_config.transaction_cnt_set = 0;
1974 				goto fail_exit;
1975 			}
1976 			break;
1977 		case 3:	/* 4096 */
1978 			break;
1979 		}
1980 	} else {
1981 		goto fail_exit;
1982 	}
1983 
1984 	/*
1985 	 * if illegal burst size seen, then default to 1024 burst size
1986 	 */
1987 	chp->ch_config.burstsize = 1;
1988 	chp->ch_config.burstsize_set = 1;
1989 	/*
1990 	 * if illegal transaction cnt seen, then default to 2
1991 	 */
1992 	chp->ch_config.transaction_cnt = 1;
1993 	chp->ch_config.transaction_cnt_set = 1;
1994 
1995 
1996 fail_exit:
1997 
1998 	/*
1999 	 * alter the burstsize parameter via an entry
2000 	 * in chxge.conf
2001 	 */
2002 
2003 	val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2004 		"pci_burstsize", -1);
2005 	if (val == -1)
2006 		val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2007 			"pci-burstsize", -1);
2008 
2009 	if (val != -1) {
2010 
2011 		switch (val) {
2012 		case 0:	/* use default */
2013 			chp->ch_config.burstsize_set = 0;
2014 			break;
2015 
2016 		case 1024:
2017 			chp->ch_config.burstsize_set = 1;
2018 			chp->ch_config.burstsize = 1;
2019 			break;
2020 
2021 		case 2048:
2022 			chp->ch_config.burstsize_set = 1;
2023 			chp->ch_config.burstsize = 2;
2024 			break;
2025 
2026 		case 4096:
2027 			cmn_err(CE_WARN, "%s not supported %d\n",
2028 			    chp->ch_name, val);
2029 			break;
2030 
2031 		default:
2032 			cmn_err(CE_WARN, "%s illegal burst size %d\n",
2033 			    chp->ch_name, val);
2034 			break;
2035 		}
2036 	}
2037 
2038 	/*
2039 	 * set transaction count
2040 	 */
2041 	val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2042 		"pci_split_transaction_cnt", -1);
2043 	if (val == -1)
2044 		val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2045 			"pci-split-transaction-cnt", -1);
2046 
2047 	if (val != -1) {
2048 		switch (val) {
2049 		case 0:	/* use default */
2050 			chp->ch_config.transaction_cnt_set = 0;
2051 			break;
2052 
2053 		case 1:
2054 			chp->ch_config.transaction_cnt_set = 1;
2055 			chp->ch_config.transaction_cnt = 0;
2056 			break;
2057 
2058 		case 2:
2059 			chp->ch_config.transaction_cnt_set = 1;
2060 			chp->ch_config.transaction_cnt = 1;
2061 			break;
2062 
2063 		case 3:
2064 			chp->ch_config.transaction_cnt_set = 1;
2065 			chp->ch_config.transaction_cnt = 2;
2066 			break;
2067 
2068 		case 4:
2069 			chp->ch_config.transaction_cnt_set = 1;
2070 			chp->ch_config.transaction_cnt = 3;
2071 			break;
2072 
2073 		case 8:
2074 			chp->ch_config.transaction_cnt_set = 1;
2075 			chp->ch_config.transaction_cnt = 4;
2076 			break;
2077 
2078 		case 12:
2079 			chp->ch_config.transaction_cnt_set = 1;
2080 			chp->ch_config.transaction_cnt = 5;
2081 			break;
2082 
2083 		case 16:
2084 			chp->ch_config.transaction_cnt_set = 1;
2085 			chp->ch_config.transaction_cnt = 6;
2086 			break;
2087 
2088 		case 32:
2089 			chp->ch_config.transaction_cnt_set = 1;
2090 			chp->ch_config.transaction_cnt = 7;
2091 			break;
2092 
2093 		default:
2094 			cmn_err(CE_WARN, "%s illegal transaction cnt %d\n",
2095 			    chp->ch_name, val);
2096 			break;
2097 		}
2098 	}
2099 
2100 	/*
2101 	 * set relaxed ordering bit?
2102 	 */
2103 	val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2104 		"pci_relaxed_ordering_on", -1);
2105 	if (val == -1)
2106 		val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2107 			"pci-relaxed-ordering-on", -1);
2108 
2109 	/*
2110 	 * default is to use system default value.
2111 	 */
2112 	chp->ch_config.relaxed_ordering = 0;
2113 
2114 	if (val != -1) {
2115 		if (val)
2116 			chp->ch_config.relaxed_ordering = 1;
2117 	}
2118 
2119 	val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2120 		"enable_latency_timer", -1);
2121 	if (val == -1)
2122 		val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2123 			"enable-latency-timer", -1);
2124 	if (val != -1)
2125 		enable_latency_timer = (val == 0)? 0: 1;
2126 
2127 	/*
2128 	 * default maximum Jumbo Frame size.
2129 	 */
2130 	chp->ch_maximum_mtu = 9198;	/* tunable via chxge.conf */
2131 	val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2132 		"maximum_mtu", -1);
2133 	if (val == -1) {
2134 		val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2135 			"maximum-mtu", -1);
2136 	}
2137 	if (val != -1) {
2138 		if (val > 9582) {
2139 			cmn_err(CE_WARN,
2140 			    "maximum_mtu value %d > 9582. Value set to 9582",
2141 			    val);
2142 			val = 9582;
2143 		} else if (val < 1500) {
2144 			cmn_err(CE_WARN,
2145 			    "maximum_mtu value %d < 1500. Value set to 1500",
2146 			    val);
2147 			val = 1500;
2148 		}
2149 
2150 		if (val)
2151 			chp->ch_maximum_mtu = val;
2152 	}
2153 
2154 	/*
2155 	 * default value for this instance mtu
2156 	 */
2157 	chp->ch_mtu = ETHERMTU;
2158 
2159 	val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2160 		"accept_jumbo", -1);
2161 	if (val == -1) {
2162 		val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2163 			"accept-jumbo", -1);
2164 	}
2165 	if (val != -1) {
2166 		if (val)
2167 			chp->ch_mtu = chp->ch_maximum_mtu;
2168 	}
2169 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
2170 	chp->ch_sm_buf_sz = 0x800;
2171 	chp->ch_sm_buf_aln = 0x800;
2172 	chp->ch_bg_buf_sz = 0x4000;
2173 	chp->ch_bg_buf_aln = 0x4000;
2174 #else
2175 	chp->ch_sm_buf_sz = 0x200;
2176 	chp->ch_sm_buf_aln = 0x200;
2177 	chp->ch_bg_buf_sz = 0x800;
2178 	chp->ch_bg_buf_aln = 0x800;
2179 	if ((chp->ch_mtu > 0x800) && (chp->ch_mtu <= 0x1000)) {
2180 		chp->ch_sm_buf_sz = 0x400;
2181 		chp->ch_sm_buf_aln = 0x400;
2182 		chp->ch_bg_buf_sz = 0x1000;
2183 		chp->ch_bg_buf_aln = 0x1000;
2184 	} else if ((chp->ch_mtu > 0x1000) && (chp->ch_mtu <= 0x2000)) {
2185 		chp->ch_sm_buf_sz = 0x400;
2186 		chp->ch_sm_buf_aln = 0x400;
2187 		chp->ch_bg_buf_sz = 0x2000;
2188 		chp->ch_bg_buf_aln = 0x2000;
2189 	} else if (chp->ch_mtu > 0x2000) {
2190 		chp->ch_sm_buf_sz = 0x400;
2191 		chp->ch_sm_buf_aln = 0x400;
2192 		chp->ch_bg_buf_sz = 0x3000;
2193 		chp->ch_bg_buf_aln = 0x4000;
2194 	}
2195 #endif
2196 	chp->ch_config.cksum_enabled = 1;
2197 
2198 	val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2199 		"enable_checksum_offload", -1);
2200 	if (val == -1)
2201 		val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2202 			"enable-checksum-offload", -1);
2203 	if (val != -1) {
2204 		if (val == NULL)
2205 			chp->ch_config.cksum_enabled = 0;
2206 	}
2207 
2208 	/*
2209 	 * Provides a tuning capability for the command queue 0 size.
2210 	 */
2211 	val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2212 		"sge_cmdq0_cnt", -1);
2213 	if (val == -1)
2214 		val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2215 			"sge-cmdq0-cnt", -1);
2216 	if (val != -1) {
2217 		if (val > 10)
2218 			sge_cmdq0_cnt = val;
2219 	}
2220 
2221 	if (sge_cmdq0_cnt > 65535) {
2222 		cmn_err(CE_WARN,
2223 		    "%s: sge-cmdQ0-cnt > 65535 - resetting value to default",
2224 			chp->ch_name);
2225 		sge_cmdq0_cnt = sge_cmdq0_cnt_orig;
2226 	}
2227 	tval += sge_cmdq0_cnt;
2228 
2229 	/*
2230 	 * Provides a tuning capability for the command queue 1 size.
2231 	 */
2232 	val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2233 		"sge_cmdq1_cnt", -1);
2234 	if (val == -1)
2235 		val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2236 			"sge-cmdq1-cnt", -1);
2237 	if (val != -1) {
2238 		if (val > 10)
2239 			sge_cmdq1_cnt = val;
2240 	}
2241 
2242 	if (sge_cmdq1_cnt > 65535) {
2243 		cmn_err(CE_WARN,
2244 		    "%s: sge-cmdQ0-cnt > 65535 - resetting value to default",
2245 			chp->ch_name);
2246 		sge_cmdq1_cnt = sge_cmdq1_cnt_orig;
2247 	}
2248 
2249 	/*
2250 	 * Provides a tuning capability for the free list 0 size.
2251 	 */
2252 	val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2253 		"sge_flq0_cnt", -1);
2254 	if (val == -1)
2255 		val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2256 			"sge-flq0-cnt", -1);
2257 	if (val != -1) {
2258 		if (val > 512)
2259 			sge_flq0_cnt = val;
2260 	}
2261 
2262 	if (sge_flq0_cnt > 65535) {
2263 		cmn_err(CE_WARN,
2264 		    "%s: sge-flq0-cnt > 65535 - resetting value to default",
2265 			chp->ch_name);
2266 		sge_flq0_cnt = sge_flq0_cnt_orig;
2267 	}
2268 
2269 	tval += sge_flq0_cnt;
2270 
2271 	/*
2272 	 * Provides a tuning capability for the free list 1 size.
2273 	 */
2274 	val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2275 		"sge_flq1_cnt", -1);
2276 	if (val == -1)
2277 		val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2278 			"sge-flq1-cnt", -1);
2279 	if (val != -1) {
2280 		if (val > 512)
2281 			sge_flq1_cnt = val;
2282 	}
2283 
2284 	if (sge_flq1_cnt > 65535) {
2285 		cmn_err(CE_WARN,
2286 		    "%s: sge-flq1-cnt > 65535 - resetting value to default",
2287 			chp->ch_name);
2288 		sge_flq1_cnt = sge_flq1_cnt_orig;
2289 	}
2290 
2291 	tval += sge_flq1_cnt;
2292 
2293 	/*
2294 	 * Provides a tuning capability for the responce queue size.
2295 	 */
2296 	val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2297 		"sge_respq_cnt", -1);
2298 	if (val == -1)
2299 		val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2300 			"sge-respq-cnt", -1);
2301 	if (val != -1) {
2302 		if (val > 30)
2303 			sge_respq_cnt = val;
2304 	}
2305 
2306 	if (sge_respq_cnt > 65535) {
2307 		cmn_err(CE_WARN,
2308 		    "%s: sge-respq-cnt > 65535 - resetting value to default",
2309 			chp->ch_name);
2310 		sge_respq_cnt = sge_respq_cnt_orig;
2311 	}
2312 
2313 	if (tval > sge_respq_cnt) {
2314 		if (tval <= 65535) {
2315 			cmn_err(CE_WARN,
2316 	    "%s: sge-respq-cnt < %d - setting value to %d (cmdQ+flq0+flq1)",
2317 			    chp->ch_name, tval, tval);
2318 
2319 			sge_respq_cnt = tval;
2320 		} else {
2321 			cmn_err(CE_WARN,
2322 			    "%s: Q sizes invalid - resetting to default values",
2323 			    chp->ch_name);
2324 
2325 			sge_cmdq0_cnt = sge_cmdq0_cnt_orig;
2326 			sge_cmdq1_cnt = sge_cmdq1_cnt_orig;
2327 			sge_flq0_cnt = sge_flq0_cnt_orig;
2328 			sge_flq1_cnt = sge_flq1_cnt_orig;
2329 			sge_respq_cnt = sge_respq_cnt_orig;
2330 		}
2331 	}
2332 }
2333