1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright 2018 Joyent, Inc.
26 */
27
28 /*
29 * This file is part of the Chelsio T1 Ethernet driver.
30 *
31 * Copyright (C) 2003-2005 Chelsio Communications. All rights reserved.
32 */
33
34 /*
35 * Solaris Multithreaded STREAMS DLPI Chelsio PCI Ethernet Driver
36 */
37
38 /* #define CH_DEBUG 1 */
39 #ifdef CH_DEBUG
40 #define DEBUG_ENTER(a) debug_enter(a)
41 #define PRINT(a) printf a
42 #else
43 #define DEBUG_ENTER(a)
44 #define PRINT(a)
45 #endif
46
47 #include <sys/types.h>
48 #include <sys/conf.h>
49 #include <sys/debug.h>
50 #include <sys/stropts.h>
51 #include <sys/stream.h>
52 #include <sys/strlog.h>
53 #include <sys/kmem.h>
54 #include <sys/stat.h>
55 #include <sys/kstat.h>
56 #include <sys/modctl.h>
57 #include <sys/errno.h>
58 #include <sys/cmn_err.h>
59 #include <sys/ddi.h>
60 #include <sys/sunddi.h>
61 #include <sys/dlpi.h>
62 #include <sys/ethernet.h>
63 #include <sys/mac_provider.h>
64 #include <sys/strsun.h>
65 #include <sys/strsubr.h>
66 #include <inet/common.h>
67 #include <inet/nd.h>
68 #include <inet/ip.h>
69 #include <inet/tcp.h>
70 #include <sys/pattr.h>
71 #include <sys/gld.h>
72 #include "ostypes.h"
73 #include "common.h"
74 #include "oschtoe.h"
75 #include "sge.h"
76 #include "regs.h"
77 #include "ch.h" /* Chelsio Driver specific parameters */
78 #include "version.h"
79
80 /*
81 * Function prototypes.
82 */
83 static int ch_attach(dev_info_t *, ddi_attach_cmd_t);
84 static int ch_detach(dev_info_t *, ddi_detach_cmd_t);
85 static int ch_quiesce(dev_info_t *);
86 static void ch_free_dma_handles(ch_t *chp);
87 static void ch_set_name(ch_t *chp, int unit);
88 static void ch_free_name(ch_t *chp);
89 static void ch_get_prop(ch_t *chp);
90
91 #if defined(__sparc)
92 static void ch_free_dvma_handles(ch_t *chp);
93 #endif
94
95 /* GLD interfaces */
96 static int ch_reset(gld_mac_info_t *);
97 static int ch_start(gld_mac_info_t *);
98 static int ch_stop(gld_mac_info_t *);
99 static int ch_set_mac_address(gld_mac_info_t *, uint8_t *);
100 static int ch_set_multicast(gld_mac_info_t *, uint8_t *, int);
101 static int ch_ioctl(gld_mac_info_t *, queue_t *, mblk_t *);
102 static int ch_set_promiscuous(gld_mac_info_t *, int);
103 static int ch_get_stats(gld_mac_info_t *, struct gld_stats *);
104 static int ch_send(gld_mac_info_t *, mblk_t *);
105 static uint_t ch_intr(gld_mac_info_t *);
106
107 /*
108 * Data access requirements.
109 */
110 static struct ddi_device_acc_attr le_attr = {
111 DDI_DEVICE_ATTR_V0,
112 DDI_STRUCTURE_LE_ACC,
113 DDI_STRICTORDER_ACC
114 };
115
116 /*
117 * No swap mapping device attributes
118 */
119 static struct ddi_device_acc_attr null_attr = {
120 DDI_DEVICE_ATTR_V0,
121 DDI_NEVERSWAP_ACC,
122 DDI_STRICTORDER_ACC
123 };
124
125 /*
126 * STREAMS driver identification struture module_info(9s)
127 *
128 * driver limit values
129 */
130
131 static struct module_info ch_minfo = {
132 CHIDNUM, /* mi_idnum */
133 CHNAME, /* mi_idname */
134 CHMINPSZ, /* mi_minpsz */
135 CHMAXPSZ, /* mi_maxpsz */
136 CHHIWAT, /* mi_hiwat */
137 CHLOWAT /* mi_lowat */
138 };
139
140 /*
141 * STREAMS queue processiong procedures qinit(9s)
142 *
143 * read queue procedures
144 */
145
146 static struct qinit ch_rinit = {
147 (int (*)()) NULL, /* qi_putp */
148 gld_rsrv, /* qi_srvp */
149 gld_open, /* qi_qopen */
150 gld_close, /* qi_qclose */
151 (int (*)()) NULL, /* qi_qadmin */
152 &ch_minfo, /* qi_minfo */
153 NULL /* qi_mstat */
154 };
155
156 /*
157 * STREAMS queue processiong procedures qinit(9s)
158 *
159 * write queue procedures
160 */
161
162 static struct qinit ch_winit = {
163 gld_wput, /* qi_putp */
164 gld_wsrv, /* qi_srvp */
165 (int (*)()) NULL, /* qi_qopen */
166 (int (*)()) NULL, /* qi_qclose */
167 (int (*)()) NULL, /* qi_qadmin */
168 &ch_minfo, /* qi_minfo */
169 NULL /* qi_mstat */
170 };
171
172 /*
173 * STREAMS entity declaration structure - streamtab(9s)
174 */
175 static struct streamtab chinfo = {
176 &ch_rinit, /* read queue information */
177 &ch_winit, /* write queue information */
178 NULL, /* st_muxrinit */
179 NULL /* st_muxwrinit */
180 };
181
182 /*
183 * Device driver ops vector - cb_ops(9s)
184 *
185 * charater/block entry points structure.
186 * chinfo identifies driver as a STREAMS driver.
187 */
188
189 static struct cb_ops cb_ch_ops = {
190 nulldev, /* cb_open */
191 nulldev, /* cb_close */
192 nodev, /* cb_strategy */
193 nodev, /* cb_print */
194 nodev, /* cb_dump */
195 nodev, /* cb_read */
196 nodev, /* cb_write */
197 nodev, /* cb_ioctl */
198 nodev, /* cb_devmap */
199 nodev, /* cb_mmap */
200 nodev, /* cb_segmap */
201 nochpoll, /* cb_chpoll */
202 ddi_prop_op, /* report driver property information - prop_op(9e) */
203 &chinfo, /* cb_stream */
204 #if defined(__sparc)
205 D_MP | D_64BIT,
206 #else
207 D_MP, /* cb_flag (supports multi-threading) */
208 #endif
209 CB_REV, /* cb_rev */
210 nodev, /* cb_aread */
211 nodev /* cb_awrite */
212 };
213
214 /*
215 * dev_ops(9S) structure
216 *
217 * Device Operations table, for autoconfiguration
218 */
219
220 static struct dev_ops ch_ops = {
221 DEVO_REV, /* Driver build version */
222 0, /* Initial driver reference count */
223 gld_getinfo, /* funcp: get driver information - getinfo(9e) */
224 nulldev, /* funcp: entry point obsolute - identify(9e) */
225 nulldev, /* funp: probe for device - probe(9e) */
226 ch_attach, /* funp: attach driver to dev_info - attach(9e) */
227 ch_detach, /* funp: detach driver to unload - detach(9e) */
228 nodev, /* funp: reset device (not supported) - dev_ops(9s) */
229 &cb_ch_ops, /* ptr to cb_ops structure */
230 NULL, /* ptr to nexus bus operations structure (leaf) */
231 NULL, /* funp: change device power level - power(9e) */
232 ch_quiesce, /* devo_quiesce */
233 };
234
235 /*
236 * modldrv(9s) structure
237 *
238 * Definition for module specific device driver linkage structures (modctl.h)
239 */
240
241 static struct modldrv modldrv = {
242 &mod_driverops, /* driver module */
243 VERSION,
244 &ch_ops, /* driver ops */
245 };
246
247 /*
248 * modlinkage(9s) structure
249 *
250 * module linkage base structure (modctl.h)
251 */
252
253 static struct modlinkage modlinkage = {
254 MODREV_1, /* revision # of system */
255 &modldrv, /* NULL terminated list of linkage strucures */
256 NULL
257 };
258
259 /* ===================== start of STREAMS driver code ================== */
260
261 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
262 /*
263 * global pointer to toe per-driver control structure.
264 */
265 #define MAX_CARDS 4
266 ch_t *gchp[MAX_CARDS];
267 #endif
268
269 kmutex_t in_use_l;
270 uint32_t buffers_in_use[SZ_INUSE];
271 uint32_t in_use_index;
272
273 /*
274 * Ethernet broadcast address definition.
275 */
276 static struct ether_addr etherbroadcastaddr = {
277 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
278 };
279
280 /*
281 * Module initialization functions.
282 *
283 * Routine Called by
284 * _init(9E) modload(9F)
285 * _info(9E) modinfo(9F)
286 * _fini(9E) modunload(9F)
287 */
288
289 /*
290 * _init(9E):
291 *
292 * Initial, one-time, resource allocation and data initialization.
293 */
294
295 int
_init(void)296 _init(void)
297 {
298 int status;
299
300 status = mod_install(&modlinkage);
301
302 mutex_init(&in_use_l, NULL, MUTEX_DRIVER, NULL);
303
304 return (status);
305 }
306
307 /*
308 * _fini(9E): It is here that any device information that was allocated
309 * during the _init(9E) routine should be released and the module removed
310 * from the system. In the case of per-instance information, that information
311 * should be released in the _detach(9E) routine.
312 */
313
314 int
_fini(void)315 _fini(void)
316 {
317 int status;
318 int i;
319 uint32_t t = 0;
320
321 for (i = 0; i < SZ_INUSE; i++)
322 t += buffers_in_use[i];
323
324 if (t != 0)
325 return (DDI_FAILURE);
326
327 status = mod_remove(&modlinkage);
328
329 if (status == DDI_SUCCESS)
330 mutex_destroy(&in_use_l);
331
332 return (status);
333 }
334
335 int
_info(struct modinfo * modinfop)336 _info(struct modinfo *modinfop)
337 {
338 int status;
339
340
341 status = mod_info(&modlinkage, modinfop);
342
343 return (status);
344 }
345
346 /*
347 * Attach(9E) - This is called on the open to the device. It creates
348 * an instance of the driver. In this routine we create the minor
349 * device node. The routine also initializes all per-unit
350 * mutex's and conditional variables.
351 *
352 * If we were resuming a suspended instance of a device due to power
353 * management, then that would be handled here as well. For more on
354 * that subject see the man page for pm(9E)
355 *
356 * Interface exists: make available by filling in network interface
357 * record. System will initialize the interface when it is ready
358 * to accept packets.
359 */
360 int chdebug = 0;
361 int ch_abort_debug = 0;
362
363 static int
ch_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)364 ch_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
365 {
366 ch_t *chp;
367 int rv;
368 int unit;
369 #ifdef CH_DEBUG
370 int Version;
371 int VendorID;
372 int DeviceID;
373 int SubDeviceID;
374 int Command;
375 #endif
376 gld_mac_info_t *macinfo; /* GLD stuff follows */
377 char *driver;
378
379 if (ch_abort_debug)
380 debug_enter("ch_attach");
381
382 if (chdebug)
383 return (DDI_FAILURE);
384
385
386 if (cmd == DDI_ATTACH) {
387
388 unit = ddi_get_instance(dip);
389
390 driver = (char *)ddi_driver_name(dip);
391
392 PRINT(("driver %s unit: %d\n", driver, unit));
393
394 macinfo = gld_mac_alloc(dip);
395 if (macinfo == NULL) {
396 PRINT(("macinfo allocation failed\n"));
397 DEBUG_ENTER("ch_attach");
398 return (DDI_FAILURE);
399 }
400
401 chp = (ch_t *)kmem_zalloc(sizeof (ch_t), KM_SLEEP);
402
403 if (chp == NULL) {
404 PRINT(("zalloc of chp failed\n"));
405 DEBUG_ENTER("ch_attach");
406
407 gld_mac_free(macinfo);
408
409 return (DDI_FAILURE);
410 }
411
412 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
413 /* Solaris TOE support */
414 gchp[unit] = chp;
415 #endif
416
417 PRINT(("attach macinfo: %p chp: %p\n", macinfo, chp));
418
419 chp->ch_dip = dip;
420 chp->ch_macp = macinfo;
421 chp->ch_unit = unit;
422 ch_set_name(chp, unit);
423
424 /*
425 * map in PCI register spaces
426 *
427 * PCI register set 0 - PCI configuration space
428 * PCI register set 1 - T101 card register space #1
429 */
430
431 /* map in T101 PCI configuration space */
432 rv = pci_config_setup(
433 dip, /* ptr to dev's dev_info struct */
434 &chp->ch_hpci); /* ptr to data access handle */
435
436 if (rv != DDI_SUCCESS) {
437 PRINT(("PCI config setup failed\n"));
438 DEBUG_ENTER("ch_attach");
439 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
440 gchp[unit] = NULL;
441 #endif
442 cmn_err(CE_WARN, "%s: ddi_config_setup PCI error %d\n",
443 chp->ch_name, rv);
444
445 ch_free_name(chp);
446 kmem_free(chp, sizeof (ch_t));
447 gld_mac_free(macinfo);
448
449 return (DDI_FAILURE);
450 }
451
452 ch_get_prop(chp);
453
454 macinfo->gldm_devinfo = dip;
455 macinfo->gldm_private = (caddr_t)chp;
456 macinfo->gldm_reset = ch_reset;
457 macinfo->gldm_start = ch_start;
458 macinfo->gldm_stop = ch_stop;
459 macinfo->gldm_set_mac_addr = ch_set_mac_address;
460 macinfo->gldm_send = ch_send;
461 macinfo->gldm_set_promiscuous = ch_set_promiscuous;
462 macinfo->gldm_get_stats = ch_get_stats;
463 macinfo->gldm_ioctl = ch_ioctl;
464 macinfo->gldm_set_multicast = ch_set_multicast;
465 macinfo->gldm_intr = ch_intr;
466 macinfo->gldm_mctl = NULL;
467
468 macinfo->gldm_ident = driver;
469 macinfo->gldm_type = DL_ETHER;
470 macinfo->gldm_minpkt = 0;
471 macinfo->gldm_maxpkt = chp->ch_mtu;
472 macinfo->gldm_addrlen = ETHERADDRL;
473 macinfo->gldm_saplen = -2;
474 macinfo->gldm_ppa = unit;
475 macinfo->gldm_broadcast_addr =
476 etherbroadcastaddr.ether_addr_octet;
477
478
479 /*
480 * do a power reset of card
481 *
482 * 1. set PwrState to D3hot (3)
483 * 2. clear PwrState flags
484 */
485 pci_config_put32(chp->ch_hpci, 0x44, 3);
486 pci_config_put32(chp->ch_hpci, 0x44, 0);
487
488 /* delay .5 sec */
489 DELAY(500000);
490
491 #ifdef CH_DEBUG
492 VendorID = pci_config_get16(chp->ch_hpci, 0);
493 DeviceID = pci_config_get16(chp->ch_hpci, 2);
494 SubDeviceID = pci_config_get16(chp->ch_hpci, 0x2e);
495 Command = pci_config_get16(chp->ch_hpci, 4);
496
497 PRINT(("IDs: %x,%x,%x\n", VendorID, DeviceID, SubDeviceID));
498 PRINT(("Command: %x\n", Command));
499 #endif
500 /* map in T101 register space (BAR0) */
501 rv = ddi_regs_map_setup(
502 dip, /* ptr to dev's dev_info struct */
503 BAR0, /* register address space */
504 &chp->ch_bar0, /* address of offset */
505 0, /* offset into register address space */
506 0, /* length mapped (everything) */
507 &le_attr, /* ptr to device attr structure */
508 &chp->ch_hbar0); /* ptr to data access handle */
509
510 if (rv != DDI_SUCCESS) {
511 PRINT(("map registers failed\n"));
512 DEBUG_ENTER("ch_attach");
513 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
514 gchp[unit] = NULL;
515 #endif
516 cmn_err(CE_WARN,
517 "%s: ddi_regs_map_setup BAR0 error %d\n",
518 chp->ch_name, rv);
519
520 pci_config_teardown(&chp->ch_hpci);
521 ch_free_name(chp);
522 kmem_free(chp, sizeof (ch_t));
523 gld_mac_free(macinfo);
524
525 return (DDI_FAILURE);
526 }
527
528 #ifdef CH_DEBUG
529 Version = ddi_get32(chp->ch_hbar0,
530 (uint32_t *)(chp->ch_bar0+0x6c));
531 #endif
532
533 (void) ddi_dev_regsize(dip, 1, &chp->ch_bar0sz);
534
535 PRINT(("PCI BAR0 space addr: %p\n", chp->ch_bar0));
536 PRINT(("PCI BAR0 space size: %x\n", chp->ch_bar0sz));
537 PRINT(("PE Version: %x\n", Version));
538
539 /*
540 * Add interrupt to system.
541 */
542 rv = ddi_get_iblock_cookie(
543 dip, /* ptr to dev's dev_info struct */
544 0, /* interrupt # (0) */
545 &chp->ch_icookp); /* ptr to interrupt block cookie */
546
547 if (rv != DDI_SUCCESS) {
548 PRINT(("iblock cookie failed\n"));
549 DEBUG_ENTER("ch_attach");
550 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
551 gchp[unit] = NULL;
552 #endif
553 cmn_err(CE_WARN,
554 "%s: ddi_get_iblock_cookie error %d\n",
555 chp->ch_name, rv);
556
557 ddi_regs_map_free(&chp->ch_hbar0);
558 pci_config_teardown(&chp->ch_hpci);
559 ch_free_name(chp);
560 kmem_free(chp, sizeof (ch_t));
561 gld_mac_free(macinfo);
562
563 return (DDI_FAILURE);
564 }
565
566 /*
567 * add interrupt handler before card setup.
568 */
569 rv = ddi_add_intr(
570 dip, /* ptr to dev's dev_info struct */
571 0, /* interrupt # (0) */
572 0, /* iblock cookie ptr (NULL) */
573 0, /* idevice cookie ptr (NULL) */
574 gld_intr, /* function ptr to interrupt handler */
575 (caddr_t)macinfo); /* handler argument */
576
577 if (rv != DDI_SUCCESS) {
578 PRINT(("add_intr failed\n"));
579 DEBUG_ENTER("ch_attach");
580 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
581 gchp[unit] = NULL;
582 #endif
583 cmn_err(CE_WARN, "%s: ddi_add_intr error %d\n",
584 chp->ch_name, rv);
585
586 ddi_regs_map_free(&chp->ch_hbar0);
587 pci_config_teardown(&chp->ch_hpci);
588 ch_free_name(chp);
589 kmem_free(chp, sizeof (ch_t));
590 gld_mac_free(macinfo);
591
592 return (DDI_FAILURE);
593 }
594
595 /* initalize all the remaining per-card locks */
596 mutex_init(&chp->ch_lock, NULL, MUTEX_DRIVER,
597 (void *)chp->ch_icookp);
598 mutex_init(&chp->ch_intr, NULL, MUTEX_DRIVER,
599 (void *)chp->ch_icookp);
600 mutex_init(&chp->ch_mc_lck, NULL, MUTEX_DRIVER, NULL);
601 mutex_init(&chp->ch_dh_lck, NULL, MUTEX_DRIVER, NULL);
602 mutex_init(&chp->mac_lock, NULL, MUTEX_DRIVER, NULL);
603
604 /* ------- initialize Chelsio card ------- */
605
606 if (pe_attach(chp)) {
607 PRINT(("card initialization failed\n"));
608 DEBUG_ENTER("ch_attach");
609 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
610 gchp[unit] = NULL;
611 #endif
612 cmn_err(CE_WARN, "%s: pe_attach failed\n",
613 chp->ch_name);
614
615 mutex_destroy(&chp->ch_lock);
616 mutex_destroy(&chp->ch_intr);
617 mutex_destroy(&chp->ch_mc_lck);
618 mutex_destroy(&chp->ch_dh_lck);
619 mutex_destroy(&chp->mac_lock);
620 ddi_remove_intr(dip, 0, chp->ch_icookp);
621 ddi_regs_map_free(&chp->ch_hbar0);
622 pci_config_teardown(&chp->ch_hpci);
623 ch_free_name(chp);
624 kmem_free(chp, sizeof (ch_t));
625 gld_mac_free(macinfo);
626
627 return (DDI_FAILURE);
628 }
629
630 /* ------- done with Chelsio card ------- */
631
632 /* now can set mac address */
633 macinfo->gldm_vendor_addr = pe_get_mac(chp);
634
635 macinfo->gldm_cookie = chp->ch_icookp;
636
637 /*
638 * We only active checksum offload for T2 architectures.
639 */
640 if (is_T2(chp)) {
641 if (chp->ch_config.cksum_enabled)
642 macinfo->gldm_capabilities |=
643 GLD_CAP_CKSUM_FULL_V4;
644 } else
645 chp->ch_config.cksum_enabled = 0;
646
647 rv = gld_register(
648 dip, /* ptr to dev's dev_info struct */
649 (char *)ddi_driver_name(dip), /* driver name */
650 macinfo); /* ptr to gld macinfo buffer */
651
652 /*
653 * The Jumbo frames capability is not yet available
654 * in Solaris 10 so registration will fail. MTU > 1500 is
655 * supported in Update 1.
656 */
657 if (rv != DDI_SUCCESS) {
658 cmn_err(CE_NOTE, "MTU > 1500 not supported by GLD.\n");
659 cmn_err(CE_NOTE, "Setting MTU to 1500. \n");
660 macinfo->gldm_maxpkt = chp->ch_mtu = 1500;
661 rv = gld_register(
662 dip, /* ptr to dev's dev_info struct */
663 (char *)ddi_driver_name(dip), /* driver name */
664 macinfo); /* ptr to gld macinfo buffer */
665 }
666
667
668 if (rv != DDI_SUCCESS) {
669 PRINT(("gld_register failed\n"));
670 DEBUG_ENTER("ch_attach");
671
672 cmn_err(CE_WARN, "%s: gld_register error %d\n",
673 chp->ch_name, rv);
674
675 pe_detach(chp);
676
677 mutex_destroy(&chp->ch_lock);
678 mutex_destroy(&chp->ch_intr);
679 mutex_destroy(&chp->ch_mc_lck);
680 mutex_destroy(&chp->ch_dh_lck);
681 mutex_destroy(&chp->mac_lock);
682 ddi_remove_intr(dip, 0, chp->ch_icookp);
683 ddi_regs_map_free(&chp->ch_hbar0);
684 pci_config_teardown(&chp->ch_hpci);
685 ch_free_name(chp);
686 kmem_free(chp, sizeof (ch_t));
687 gld_mac_free(macinfo);
688
689 return (DDI_FAILURE);
690 }
691
692 /*
693 * print a banner at boot time (verbose mode), announcing
694 * the device pointed to by dip
695 */
696 ddi_report_dev(dip);
697
698 if (ch_abort_debug)
699 debug_enter("ch_attach");
700
701 return (DDI_SUCCESS);
702
703 } else if (cmd == DDI_RESUME) {
704 PRINT(("attach resume\n"));
705 DEBUG_ENTER("ch_attach");
706 if ((chp = (ch_t *)ddi_get_driver_private(dip)) == NULL)
707 return (DDI_FAILURE);
708
709 mutex_enter(&chp->ch_lock);
710 chp->ch_flags &= ~PESUSPENDED;
711 mutex_exit(&chp->ch_lock);
712 return (DDI_SUCCESS);
713 } else {
714 PRINT(("attach: bad command\n"));
715 DEBUG_ENTER("ch_attach");
716
717 return (DDI_FAILURE);
718 }
719 }
720
721 /*
722 * quiesce(9E) entry point.
723 *
724 * This function is called when the system is single-threaded at high
725 * PIL with preemption disabled. Therefore, this function must not be
726 * blocked.
727 *
728 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
729 * DDI_FAILURE indicates an error condition and should almost never happen.
730 */
731 static int
ch_quiesce(dev_info_t * dip)732 ch_quiesce(dev_info_t *dip)
733 {
734 ch_t *chp;
735 gld_mac_info_t *macinfo =
736 (gld_mac_info_t *)ddi_get_driver_private(dip);
737
738 chp = (ch_t *)macinfo->gldm_private;
739 chdebug = 0;
740 ch_abort_debug = 0;
741
742 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
743 gchp[chp->ch_unit] = NULL;
744 #endif
745
746 /* Set driver state for this card to IDLE */
747 chp->ch_state = PEIDLE;
748
749 /*
750 * Do a power reset of card
751 * 1. set PwrState to D3hot (3)
752 * 2. clear PwrState flags
753 */
754 pci_config_put32(chp->ch_hpci, 0x44, 3);
755 pci_config_put32(chp->ch_hpci, 0x44, 0);
756
757 /* Wait 0.5 sec */
758 drv_usecwait(500000);
759
760 /*
761 * Now stop the chip
762 */
763 chp->ch_refcnt = 0;
764 chp->ch_state = PESTOP;
765
766 /* Disables all interrupts */
767 t1_interrupts_disable(chp);
768
769 /* Disables SGE queues */
770 t1_write_reg_4(chp->sge->obj, A_SG_CONTROL, 0x0);
771 t1_write_reg_4(chp->sge->obj, A_SG_INT_CAUSE, 0x0);
772
773 return (DDI_SUCCESS);
774 }
775
776 static int
ch_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)777 ch_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
778 {
779 gld_mac_info_t *macinfo;
780 ch_t *chp;
781
782 if (cmd == DDI_DETACH) {
783 macinfo = (gld_mac_info_t *)ddi_get_driver_private(dip);
784 chp = (ch_t *)macinfo->gldm_private;
785
786 /*
787 * fail detach if there are outstanding mblks still
788 * in use somewhere.
789 */
790 DEBUG_ENTER("ch_detach");
791 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
792 mutex_enter(&chp->ch_lock);
793 if (chp->ch_refcnt > 0) {
794 mutex_exit(&chp->ch_lock);
795 return (DDI_FAILURE);
796 }
797 mutex_exit(&chp->ch_lock);
798 gchp[chp->ch_unit] = NULL;
799 #endif
800 /*
801 * set driver state for this card to IDLE. We're
802 * shutting down.
803 */
804 mutex_enter(&chp->ch_lock);
805 chp->ch_state = PEIDLE;
806 mutex_exit(&chp->ch_lock);
807
808 /*
809 * do a power reset of card
810 *
811 * 1. set PwrState to D3hot (3)
812 * 2. clear PwrState flags
813 */
814 pci_config_put32(chp->ch_hpci, 0x44, 3);
815 pci_config_put32(chp->ch_hpci, 0x44, 0);
816
817 /* delay .5 sec */
818 DELAY(500000);
819
820 /* free register resources */
821 (void) gld_unregister(macinfo);
822
823 /* make sure no interrupts while shutting down card */
824 ddi_remove_intr(dip, 0, chp->ch_icookp);
825
826 /*
827 * reset device and recover resources
828 */
829 pe_detach(chp);
830
831 ddi_regs_map_free(&chp->ch_hbar0);
832 pci_config_teardown(&chp->ch_hpci);
833 mutex_destroy(&chp->ch_lock);
834 mutex_destroy(&chp->ch_intr);
835 mutex_destroy(&chp->ch_mc_lck);
836 mutex_destroy(&chp->ch_dh_lck);
837 mutex_destroy(&chp->mac_lock);
838 ch_free_dma_handles(chp);
839 #if defined(__sparc)
840 ch_free_dvma_handles(chp);
841 #endif
842 ch_free_name(chp);
843 kmem_free(chp, sizeof (ch_t));
844 gld_mac_free(macinfo);
845
846 DEBUG_ENTER("ch_detach end");
847
848 return (DDI_SUCCESS);
849
850 } else if ((cmd == DDI_SUSPEND) || (cmd == DDI_PM_SUSPEND)) {
851 DEBUG_ENTER("suspend");
852 if ((chp = (ch_t *)ddi_get_driver_private(dip)) == NULL)
853 return (DDI_FAILURE);
854 mutex_enter(&chp->ch_lock);
855 chp->ch_flags |= PESUSPENDED;
856 mutex_exit(&chp->ch_lock);
857 #ifdef TODO
858 /* Un-initialize (STOP) T101 */
859 #endif
860 return (DDI_SUCCESS);
861 } else
862 return (DDI_FAILURE);
863 }
864
865 /*
866 * ch_alloc_dma_mem
867 *
868 * allocates DMA handle
869 * allocates kernel memory
870 * allocates DMA access handle
871 *
872 * chp - per-board descriptor
873 * type - byteswap mapping?
874 * flags - type of mapping
875 * size - # bytes mapped
876 * paddr - physical address
877 * dh - ddi dma handle
878 * ah - ddi access handle
879 */
880
881 void *
ch_alloc_dma_mem(ch_t * chp,int type,int flags,int size,uint64_t * paddr,ulong_t * dh,ulong_t * ah)882 ch_alloc_dma_mem(ch_t *chp, int type, int flags, int size, uint64_t *paddr,
883 ulong_t *dh, ulong_t *ah)
884 {
885 ddi_dma_attr_t ch_dma_attr;
886 ddi_dma_cookie_t cookie;
887 ddi_dma_handle_t ch_dh;
888 ddi_acc_handle_t ch_ah;
889 ddi_device_acc_attr_t *dev_attrp;
890 caddr_t ch_vaddr;
891 size_t rlen;
892 uint_t count;
893 uint_t mapping;
894 uint_t align;
895 uint_t rv;
896 uint_t direction;
897
898 mapping = (flags&DMA_STREAM)?DDI_DMA_STREAMING:DDI_DMA_CONSISTENT;
899 if (flags & DMA_4KALN)
900 align = 0x4000;
901 else if (flags & DMA_SMALN)
902 align = chp->ch_sm_buf_aln;
903 else if (flags & DMA_BGALN)
904 align = chp->ch_bg_buf_aln;
905 else {
906 cmn_err(CE_WARN, "ch_alloc_dma_mem(%s): bad alignment flag\n",
907 chp->ch_name);
908 return (0);
909 }
910 direction = (flags&DMA_OUT)?DDI_DMA_WRITE:DDI_DMA_READ;
911
912 /*
913 * dynamically create a dma attribute structure
914 */
915 ch_dma_attr.dma_attr_version = DMA_ATTR_V0;
916 ch_dma_attr.dma_attr_addr_lo = 0;
917 ch_dma_attr.dma_attr_addr_hi = 0xffffffffffffffff;
918 ch_dma_attr.dma_attr_count_max = 0x00ffffff;
919 ch_dma_attr.dma_attr_align = align;
920 ch_dma_attr.dma_attr_burstsizes = 0xfff;
921 ch_dma_attr.dma_attr_minxfer = 1;
922 ch_dma_attr.dma_attr_maxxfer = 0x00ffffff;
923 ch_dma_attr.dma_attr_seg = 0xffffffff;
924 ch_dma_attr.dma_attr_sgllen = 1;
925 ch_dma_attr.dma_attr_granular = 1;
926 ch_dma_attr.dma_attr_flags = 0;
927
928 rv = ddi_dma_alloc_handle(
929 chp->ch_dip, /* device dev_info structure */
930 &ch_dma_attr, /* DMA attributes */
931 DDI_DMA_SLEEP, /* Wait if no memory */
932 NULL, /* no argument to callback */
933 &ch_dh); /* DMA handle */
934 if (rv != DDI_SUCCESS) {
935
936 cmn_err(CE_WARN,
937 "%s: ch_alloc_dma_mem: ddi_dma_alloc_handle error %d\n",
938 chp->ch_name, rv);
939
940 return (0);
941 }
942
943 /* set byte order for data xfer */
944 if (type)
945 dev_attrp = &null_attr;
946 else
947 dev_attrp = &le_attr;
948
949 rv = ddi_dma_mem_alloc(
950 ch_dh, /* dma handle */
951 size, /* size desired allocate */
952 dev_attrp, /* access attributes */
953 mapping,
954 DDI_DMA_SLEEP, /* wait for resources */
955 NULL, /* no argument */
956 &ch_vaddr, /* allocated memory */
957 &rlen, /* real size allocated */
958 &ch_ah); /* data access handle */
959 if (rv != DDI_SUCCESS) {
960 ddi_dma_free_handle(&ch_dh);
961
962 cmn_err(CE_WARN,
963 "%s: ch_alloc_dma_mem: ddi_dma_mem_alloc error %d\n",
964 chp->ch_name, rv);
965
966 return (0);
967 }
968
969 rv = ddi_dma_addr_bind_handle(
970 ch_dh, /* dma handle */
971 (struct as *)0, /* kernel address space */
972 ch_vaddr, /* virtual address */
973 rlen, /* length of object */
974 direction|mapping,
975 DDI_DMA_SLEEP, /* Wait for resources */
976 NULL, /* no argument */
977 &cookie, /* dma cookie */
978 &count);
979 if (rv != DDI_DMA_MAPPED) {
980 ddi_dma_mem_free(&ch_ah);
981 ddi_dma_free_handle(&ch_dh);
982
983 cmn_err(CE_WARN,
984 "%s: ch_alloc_dma_mem: ddi_dma_addr_bind_handle error %d\n",
985 chp->ch_name, rv);
986
987 return (0);
988 }
989
990 if (count != 1) {
991 cmn_err(CE_WARN,
992 "%s: ch_alloc_dma_mem: ch_alloc_dma_mem cookie count %d\n",
993 chp->ch_name, count);
994 PRINT(("ch_alloc_dma_mem cookie count %d\n", count));
995
996 ddi_dma_mem_free(&ch_ah);
997 ddi_dma_free_handle(&ch_dh);
998
999 return (0);
1000 }
1001
1002 *paddr = cookie.dmac_laddress;
1003
1004 *(ddi_dma_handle_t *)dh = ch_dh;
1005 *(ddi_acc_handle_t *)ah = ch_ah;
1006
1007 return ((void *)ch_vaddr);
1008 }
1009
1010 /*
1011 * ch_free_dma_mem
1012 *
1013 * frees resources allocated by ch_alloc_dma_mem()
1014 *
1015 * frees DMA handle
1016 * frees kernel memory
1017 * frees DMA access handle
1018 */
1019
1020 void
ch_free_dma_mem(ulong_t dh,ulong_t ah)1021 ch_free_dma_mem(ulong_t dh, ulong_t ah)
1022 {
1023 ddi_dma_handle_t ch_dh = (ddi_dma_handle_t)dh;
1024 ddi_acc_handle_t ch_ah = (ddi_acc_handle_t)ah;
1025
1026 (void) ddi_dma_unbind_handle(ch_dh);
1027 ddi_dma_mem_free(&ch_ah);
1028 ddi_dma_free_handle(&ch_dh);
1029 }
1030
1031 /*
1032 * create a dma handle and return a dma handle entry.
1033 */
1034 free_dh_t *
ch_get_dma_handle(ch_t * chp)1035 ch_get_dma_handle(ch_t *chp)
1036 {
1037 ddi_dma_handle_t ch_dh;
1038 ddi_dma_attr_t ch_dma_attr;
1039 free_dh_t *dhe;
1040 int rv;
1041
1042 dhe = (free_dh_t *)kmem_zalloc(sizeof (*dhe), KM_SLEEP);
1043
1044 ch_dma_attr.dma_attr_version = DMA_ATTR_V0;
1045 ch_dma_attr.dma_attr_addr_lo = 0;
1046 ch_dma_attr.dma_attr_addr_hi = 0xffffffffffffffff;
1047 ch_dma_attr.dma_attr_count_max = 0x00ffffff;
1048 ch_dma_attr.dma_attr_align = 1;
1049 ch_dma_attr.dma_attr_burstsizes = 0xfff;
1050 ch_dma_attr.dma_attr_minxfer = 1;
1051 ch_dma_attr.dma_attr_maxxfer = 0x00ffffff;
1052 ch_dma_attr.dma_attr_seg = 0xffffffff;
1053 ch_dma_attr.dma_attr_sgllen = 5;
1054 ch_dma_attr.dma_attr_granular = 1;
1055 ch_dma_attr.dma_attr_flags = 0;
1056
1057 rv = ddi_dma_alloc_handle(
1058 chp->ch_dip, /* device dev_info */
1059 &ch_dma_attr, /* DMA attributes */
1060 DDI_DMA_SLEEP, /* Wait if no memory */
1061 NULL, /* no argument */
1062 &ch_dh); /* DMA handle */
1063 if (rv != DDI_SUCCESS) {
1064
1065 cmn_err(CE_WARN,
1066 "%s: ch_get_dma_handle: ddi_dma_alloc_handle error %d\n",
1067 chp->ch_name, rv);
1068
1069 kmem_free(dhe, sizeof (*dhe));
1070
1071 return ((free_dh_t *)0);
1072 }
1073
1074 dhe->dhe_dh = (ulong_t)ch_dh;
1075
1076 return (dhe);
1077 }
1078
1079 /*
1080 * free the linked list of dma descriptor entries.
1081 */
1082 static void
ch_free_dma_handles(ch_t * chp)1083 ch_free_dma_handles(ch_t *chp)
1084 {
1085 free_dh_t *dhe, *the;
1086
1087 dhe = chp->ch_dh;
1088 while (dhe) {
1089 ddi_dma_free_handle((ddi_dma_handle_t *)&dhe->dhe_dh);
1090 the = dhe;
1091 dhe = dhe->dhe_next;
1092 kmem_free(the, sizeof (*the));
1093 }
1094 chp->ch_dh = NULL;
1095 }
1096
1097 /*
1098 * ch_bind_dma_handle()
1099 *
1100 * returns # of entries used off of cmdQ_ce_t array to hold physical addrs.
1101 *
1102 * chp - per-board descriptor
1103 * size - # bytes mapped
1104 * vaddr - virtual address
1105 * cmp - array of cmdQ_ce_t entries
1106 * cnt - # free entries in cmp array
1107 */
1108
1109 uint32_t
ch_bind_dma_handle(ch_t * chp,int size,caddr_t vaddr,cmdQ_ce_t * cmp,uint32_t cnt)1110 ch_bind_dma_handle(ch_t *chp, int size, caddr_t vaddr, cmdQ_ce_t *cmp,
1111 uint32_t cnt)
1112 {
1113 ddi_dma_cookie_t cookie;
1114 ddi_dma_handle_t ch_dh;
1115 uint_t count;
1116 uint32_t n = 1;
1117 free_dh_t *dhe;
1118 uint_t rv;
1119
1120 mutex_enter(&chp->ch_dh_lck);
1121 if ((dhe = chp->ch_dh) != NULL) {
1122 chp->ch_dh = dhe->dhe_next;
1123 }
1124 mutex_exit(&chp->ch_dh_lck);
1125
1126 if (dhe == NULL) {
1127 return (0);
1128 }
1129
1130 ch_dh = (ddi_dma_handle_t)dhe->dhe_dh;
1131
1132 rv = ddi_dma_addr_bind_handle(
1133 ch_dh, /* dma handle */
1134 (struct as *)0, /* kernel address space */
1135 vaddr, /* virtual address */
1136 size, /* length of object */
1137 DDI_DMA_WRITE|DDI_DMA_STREAMING,
1138 DDI_DMA_SLEEP, /* Wait for resources */
1139 NULL, /* no argument */
1140 &cookie, /* dma cookie */
1141 &count);
1142 if (rv != DDI_DMA_MAPPED) {
1143
1144 /* return dma header descriptor back to free list */
1145 mutex_enter(&chp->ch_dh_lck);
1146 dhe->dhe_next = chp->ch_dh;
1147 chp->ch_dh = dhe;
1148 mutex_exit(&chp->ch_dh_lck);
1149
1150 cmn_err(CE_WARN,
1151 "%s: ch_bind_dma_handle: ddi_dma_addr_bind_handle err %d\n",
1152 chp->ch_name, rv);
1153
1154 return (0);
1155 }
1156
1157 /*
1158 * abort if we've run out of space
1159 */
1160 if (count > cnt) {
1161 /* return dma header descriptor back to free list */
1162 mutex_enter(&chp->ch_dh_lck);
1163 dhe->dhe_next = chp->ch_dh;
1164 chp->ch_dh = dhe;
1165 mutex_exit(&chp->ch_dh_lck);
1166
1167 return (0);
1168 }
1169
1170 cmp->ce_pa = cookie.dmac_laddress;
1171 cmp->ce_dh = NULL;
1172 cmp->ce_len = cookie.dmac_size;
1173 cmp->ce_mp = NULL;
1174 cmp->ce_flg = DH_DMA;
1175
1176 while (--count) {
1177 cmp++;
1178 n++;
1179 ddi_dma_nextcookie(ch_dh, &cookie);
1180 cmp->ce_pa = cookie.dmac_laddress;
1181 cmp->ce_dh = NULL;
1182 cmp->ce_len = cookie.dmac_size;
1183 cmp->ce_mp = NULL;
1184 cmp->ce_flg = DH_DMA;
1185 }
1186
1187 cmp->ce_dh = dhe;
1188
1189 return (n);
1190 }
1191
1192 /*
1193 * ch_unbind_dma_handle()
1194 *
1195 * frees resources alloacted by ch_bind_dma_handle().
1196 *
1197 * frees DMA handle
1198 */
1199
1200 void
ch_unbind_dma_handle(ch_t * chp,free_dh_t * dhe)1201 ch_unbind_dma_handle(ch_t *chp, free_dh_t *dhe)
1202 {
1203 ddi_dma_handle_t ch_dh = (ddi_dma_handle_t)dhe->dhe_dh;
1204
1205 if (ddi_dma_unbind_handle(ch_dh))
1206 cmn_err(CE_WARN, "%s: ddi_dma_unbind_handle failed",
1207 chp->ch_name);
1208
1209 mutex_enter(&chp->ch_dh_lck);
1210 dhe->dhe_next = chp->ch_dh;
1211 chp->ch_dh = dhe;
1212 mutex_exit(&chp->ch_dh_lck);
1213 }
1214
1215 #if defined(__sparc)
1216 /*
1217 * DVMA stuff. Solaris only.
1218 */
1219
1220 /*
1221 * create a dvma handle and return a dma handle entry.
1222 * DVMA is on sparc only!
1223 */
1224
1225 free_dh_t *
ch_get_dvma_handle(ch_t * chp)1226 ch_get_dvma_handle(ch_t *chp)
1227 {
1228 ddi_dma_handle_t ch_dh;
1229 ddi_dma_lim_t ch_dvma_attr;
1230 free_dh_t *dhe;
1231 int rv;
1232
1233 dhe = (free_dh_t *)kmem_zalloc(sizeof (*dhe), KM_SLEEP);
1234
1235 ch_dvma_attr.dlim_addr_lo = 0;
1236 ch_dvma_attr.dlim_addr_hi = 0xffffffff;
1237 ch_dvma_attr.dlim_cntr_max = 0xffffffff;
1238 ch_dvma_attr.dlim_burstsizes = 0xfff;
1239 ch_dvma_attr.dlim_minxfer = 1;
1240 ch_dvma_attr.dlim_dmaspeed = 0;
1241
1242 rv = dvma_reserve(
1243 chp->ch_dip, /* device dev_info */
1244 &ch_dvma_attr, /* DVMA attributes */
1245 3, /* number of pages */
1246 &ch_dh); /* DVMA handle */
1247
1248 if (rv != DDI_SUCCESS) {
1249
1250 cmn_err(CE_WARN,
1251 "%s: ch_get_dvma_handle: dvma_reserve() error %d\n",
1252 chp->ch_name, rv);
1253
1254 kmem_free(dhe, sizeof (*dhe));
1255
1256 return ((free_dh_t *)0);
1257 }
1258
1259 dhe->dhe_dh = (ulong_t)ch_dh;
1260
1261 return (dhe);
1262 }
1263
1264 /*
1265 * free the linked list of dvma descriptor entries.
1266 * DVMA is only on sparc!
1267 */
1268
1269 static void
ch_free_dvma_handles(ch_t * chp)1270 ch_free_dvma_handles(ch_t *chp)
1271 {
1272 free_dh_t *dhe, *the;
1273
1274 dhe = chp->ch_vdh;
1275 while (dhe) {
1276 dvma_release((ddi_dma_handle_t)dhe->dhe_dh);
1277 the = dhe;
1278 dhe = dhe->dhe_next;
1279 kmem_free(the, sizeof (*the));
1280 }
1281 chp->ch_vdh = NULL;
1282 }
1283
1284 /*
1285 * ch_bind_dvma_handle()
1286 *
1287 * returns # of entries used off of cmdQ_ce_t array to hold physical addrs.
1288 * DVMA in sparc only
1289 *
1290 * chp - per-board descriptor
1291 * size - # bytes mapped
1292 * vaddr - virtual address
1293 * cmp - array of cmdQ_ce_t entries
1294 * cnt - # free entries in cmp array
1295 */
1296
1297 uint32_t
ch_bind_dvma_handle(ch_t * chp,int size,caddr_t vaddr,cmdQ_ce_t * cmp,uint32_t cnt)1298 ch_bind_dvma_handle(ch_t *chp, int size, caddr_t vaddr, cmdQ_ce_t *cmp,
1299 uint32_t cnt)
1300 {
1301 ddi_dma_cookie_t cookie;
1302 ddi_dma_handle_t ch_dh;
1303 uint32_t n = 1;
1304 free_dh_t *dhe;
1305
1306 mutex_enter(&chp->ch_dh_lck);
1307 if ((dhe = chp->ch_vdh) != NULL) {
1308 chp->ch_vdh = dhe->dhe_next;
1309 }
1310 mutex_exit(&chp->ch_dh_lck);
1311
1312 if (dhe == NULL) {
1313 return (0);
1314 }
1315
1316 ch_dh = (ddi_dma_handle_t)dhe->dhe_dh;
1317 n = cnt;
1318
1319 dvma_kaddr_load(
1320 ch_dh, /* dvma handle */
1321 vaddr, /* virtual address */
1322 size, /* length of object */
1323 0, /* start at index 0 */
1324 &cookie);
1325
1326 dvma_sync(ch_dh, 0, DDI_DMA_SYNC_FORDEV);
1327
1328 cookie.dmac_notused = 0;
1329 n = 1;
1330
1331 cmp->ce_pa = cookie.dmac_laddress;
1332 cmp->ce_dh = dhe;
1333 cmp->ce_len = cookie.dmac_size;
1334 cmp->ce_mp = NULL;
1335 cmp->ce_flg = DH_DVMA; /* indicate a dvma descriptor */
1336
1337 return (n);
1338 }
1339
1340 /*
1341 * ch_unbind_dvma_handle()
1342 *
1343 * frees resources alloacted by ch_bind_dvma_handle().
1344 *
1345 * frees DMA handle
1346 */
1347
1348 void
ch_unbind_dvma_handle(ch_t * chp,free_dh_t * dhe)1349 ch_unbind_dvma_handle(ch_t *chp, free_dh_t *dhe)
1350 {
1351 ddi_dma_handle_t ch_dh = (ddi_dma_handle_t)dhe->dhe_dh;
1352
1353 dvma_unload(ch_dh, 0, -1);
1354
1355 mutex_enter(&chp->ch_dh_lck);
1356 dhe->dhe_next = chp->ch_vdh;
1357 chp->ch_vdh = dhe;
1358 mutex_exit(&chp->ch_dh_lck);
1359 }
1360
1361 #endif /* defined(__sparc) */
1362
1363 /*
1364 * send received packet up stream.
1365 *
1366 * if driver has been stopped, then we drop the message.
1367 */
1368 void
ch_send_up(ch_t * chp,mblk_t * mp,uint32_t cksum,int flg)1369 ch_send_up(ch_t *chp, mblk_t *mp, uint32_t cksum, int flg)
1370 {
1371 /*
1372 * probably do not need a lock here. When we set PESTOP in
1373 * ch_stop() a packet could have just passed here and gone
1374 * upstream. The next one will be dropped.
1375 */
1376 if (chp->ch_state == PERUNNING) {
1377 /*
1378 * note that flg will not be set unless enable_checksum_offload
1379 * set in /etc/system (see sge.c).
1380 */
1381 if (flg)
1382 mac_hcksum_set(mp, 0, 0, 0, cksum, HCK_FULLCKSUM);
1383 gld_recv(chp->ch_macp, mp);
1384 } else {
1385 freemsg(mp);
1386 }
1387 }
1388
1389 /*
1390 * unblock gld driver.
1391 */
1392 void
ch_gld_ok(ch_t * chp)1393 ch_gld_ok(ch_t *chp)
1394 {
1395 gld_sched(chp->ch_macp);
1396 }
1397
1398
1399 /*
1400 * reset the card.
1401 *
1402 * Note: we only do this after the card has been initialized.
1403 */
1404 static int
ch_reset(gld_mac_info_t * mp)1405 ch_reset(gld_mac_info_t *mp)
1406 {
1407 ch_t *chp;
1408
1409 if (mp == NULL) {
1410 return (GLD_FAILURE);
1411 }
1412
1413 chp = (ch_t *)mp->gldm_private;
1414
1415 if (chp == NULL) {
1416 return (GLD_FAILURE);
1417 }
1418
1419 #ifdef NOTYET
1420 /*
1421 * do a reset of card
1422 *
1423 * 1. set PwrState to D3hot (3)
1424 * 2. clear PwrState flags
1425 */
1426 /*
1427 * When we did this, the card didn't start. First guess is that
1428 * the initialization is not quite correct. For now, we don't
1429 * reset things.
1430 */
1431 if (chp->ch_hpci) {
1432 pci_config_put32(chp->ch_hpci, 0x44, 3);
1433 pci_config_put32(chp->ch_hpci, 0x44, 0);
1434
1435 /* delay .5 sec */
1436 DELAY(500000);
1437 }
1438 #endif
1439
1440 return (GLD_SUCCESS);
1441 }
1442
1443 static int
ch_start(gld_mac_info_t * macinfo)1444 ch_start(gld_mac_info_t *macinfo)
1445 {
1446 ch_t *chp = (ch_t *)macinfo->gldm_private;
1447 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
1448 /* only initialize card on first attempt */
1449 mutex_enter(&chp->ch_lock);
1450 chp->ch_refcnt++;
1451 if (chp->ch_refcnt == 1) {
1452 chp->ch_state = PERUNNING;
1453 mutex_exit(&chp->ch_lock);
1454 pe_init((void *)chp);
1455 } else
1456 mutex_exit(&chp->ch_lock);
1457 #else
1458 pe_init((void *)chp);
1459
1460 /* go to running state, we're being started */
1461 mutex_enter(&chp->ch_lock);
1462 chp->ch_state = PERUNNING;
1463 mutex_exit(&chp->ch_lock);
1464 #endif
1465
1466 return (GLD_SUCCESS);
1467 }
1468
1469 static int
ch_stop(gld_mac_info_t * mp)1470 ch_stop(gld_mac_info_t *mp)
1471 {
1472 ch_t *chp = (ch_t *)mp->gldm_private;
1473
1474 /*
1475 * can only stop the chip if it's been initialized
1476 */
1477 mutex_enter(&chp->ch_lock);
1478 if (chp->ch_state == PEIDLE) {
1479 mutex_exit(&chp->ch_lock);
1480 return (GLD_FAILURE);
1481 }
1482 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
1483 chp->ch_refcnt--;
1484 if (chp->ch_refcnt == 0) {
1485 chp->ch_state = PESTOP;
1486 mutex_exit(&chp->ch_lock);
1487 pe_stop(chp);
1488 } else
1489 mutex_exit(&chp->ch_lock);
1490 #else
1491 chp->ch_state = PESTOP;
1492 mutex_exit(&chp->ch_lock);
1493 pe_stop(chp);
1494 #endif
1495 return (GLD_SUCCESS);
1496 }
1497
1498 static int
ch_set_mac_address(gld_mac_info_t * mp,uint8_t * mac)1499 ch_set_mac_address(gld_mac_info_t *mp, uint8_t *mac)
1500 {
1501 ch_t *chp;
1502
1503 if (mp) {
1504 chp = (ch_t *)mp->gldm_private;
1505 } else {
1506 return (GLD_FAILURE);
1507 }
1508
1509 pe_set_mac(chp, mac);
1510
1511 return (GLD_SUCCESS);
1512 }
1513
1514 static int
ch_set_multicast(gld_mac_info_t * mp,uint8_t * ep,int flg)1515 ch_set_multicast(gld_mac_info_t *mp, uint8_t *ep, int flg)
1516 {
1517 ch_t *chp = (ch_t *)mp->gldm_private;
1518
1519 return (pe_set_mc(chp, ep, flg));
1520 }
1521
1522 static int
ch_ioctl(gld_mac_info_t * macinfo,queue_t * q,mblk_t * mp)1523 ch_ioctl(gld_mac_info_t *macinfo, queue_t *q, mblk_t *mp)
1524 {
1525 struct iocblk *iocp;
1526
1527 switch (mp->b_datap->db_type) {
1528 case M_IOCTL:
1529 /* pe_ioctl() does qreply() */
1530 pe_ioctl((ch_t *)(macinfo->gldm_private), q, mp);
1531 break;
1532
1533 default:
1534 /*
1535 * cmn_err(CE_NOTE, "ch_ioctl not M_IOCTL\n");
1536 * debug_enter("bad ch_ioctl");
1537 */
1538
1539 iocp = (struct iocblk *)mp->b_rptr;
1540
1541 if (mp->b_cont)
1542 freemsg(mp->b_cont);
1543 mp->b_cont = NULL;
1544
1545 mp->b_datap->db_type = M_IOCNAK;
1546 iocp->ioc_error = EINVAL;
1547 qreply(q, mp);
1548 break;
1549 }
1550
1551 return (GLD_SUCCESS);
1552 }
1553
1554 static int
ch_set_promiscuous(gld_mac_info_t * mp,int flag)1555 ch_set_promiscuous(gld_mac_info_t *mp, int flag)
1556 {
1557 ch_t *chp = (ch_t *)mp->gldm_private;
1558
1559 switch (flag) {
1560 case GLD_MAC_PROMISC_MULTI:
1561 pe_set_promiscuous(chp, 2);
1562 break;
1563
1564 case GLD_MAC_PROMISC_NONE:
1565 pe_set_promiscuous(chp, 0);
1566 break;
1567
1568 case GLD_MAC_PROMISC_PHYS:
1569 default:
1570 pe_set_promiscuous(chp, 1);
1571 break;
1572 }
1573
1574 return (GLD_SUCCESS);
1575 }
1576
1577 static int
ch_get_stats(gld_mac_info_t * mp,struct gld_stats * gs)1578 ch_get_stats(gld_mac_info_t *mp, struct gld_stats *gs)
1579 {
1580 ch_t *chp = (ch_t *)mp->gldm_private;
1581 uint64_t speed;
1582 uint32_t intrcnt;
1583 uint32_t norcvbuf;
1584 uint32_t oerrors;
1585 uint32_t ierrors;
1586 uint32_t underrun;
1587 uint32_t overrun;
1588 uint32_t framing;
1589 uint32_t crc;
1590 uint32_t carrier;
1591 uint32_t collisions;
1592 uint32_t xcollisions;
1593 uint32_t late;
1594 uint32_t defer;
1595 uint32_t xerrs;
1596 uint32_t rerrs;
1597 uint32_t toolong;
1598 uint32_t runt;
1599 ulong_t multixmt;
1600 ulong_t multircv;
1601 ulong_t brdcstxmt;
1602 ulong_t brdcstrcv;
1603
1604 /*
1605 * race looks benign here.
1606 */
1607 if (chp->ch_state != PERUNNING) {
1608 return (GLD_FAILURE);
1609 }
1610
1611 (void) pe_get_stats(chp,
1612 &speed,
1613 &intrcnt,
1614 &norcvbuf,
1615 &oerrors,
1616 &ierrors,
1617 &underrun,
1618 &overrun,
1619 &framing,
1620 &crc,
1621 &carrier,
1622 &collisions,
1623 &xcollisions,
1624 &late,
1625 &defer,
1626 &xerrs,
1627 &rerrs,
1628 &toolong,
1629 &runt,
1630 &multixmt,
1631 &multircv,
1632 &brdcstxmt,
1633 &brdcstrcv);
1634
1635 gs->glds_speed = speed;
1636 gs->glds_media = GLDM_UNKNOWN;
1637 gs->glds_intr = intrcnt;
1638 gs->glds_norcvbuf = norcvbuf;
1639 gs->glds_errxmt = oerrors;
1640 gs->glds_errrcv = ierrors;
1641 gs->glds_missed = ierrors; /* ??? */
1642 gs->glds_underflow = underrun;
1643 gs->glds_overflow = overrun;
1644 gs->glds_frame = framing;
1645 gs->glds_crc = crc;
1646 gs->glds_duplex = GLD_DUPLEX_FULL;
1647 gs->glds_nocarrier = carrier;
1648 gs->glds_collisions = collisions;
1649 gs->glds_excoll = xcollisions;
1650 gs->glds_xmtlatecoll = late;
1651 gs->glds_defer = defer;
1652 gs->glds_dot3_first_coll = 0; /* Not available */
1653 gs->glds_dot3_multi_coll = 0; /* Not available */
1654 gs->glds_dot3_sqe_error = 0; /* Not available */
1655 gs->glds_dot3_mac_xmt_error = xerrs;
1656 gs->glds_dot3_mac_rcv_error = rerrs;
1657 gs->glds_dot3_frame_too_long = toolong;
1658 gs->glds_short = runt;
1659
1660 gs->glds_noxmtbuf = 0; /* not documented */
1661 gs->glds_xmtretry = 0; /* not documented */
1662 gs->glds_multixmt = multixmt; /* not documented */
1663 gs->glds_multircv = multircv; /* not documented */
1664 gs->glds_brdcstxmt = brdcstxmt; /* not documented */
1665 gs->glds_brdcstrcv = brdcstrcv; /* not documented */
1666
1667 return (GLD_SUCCESS);
1668 }
1669
1670
1671 static int
ch_send(gld_mac_info_t * macinfo,mblk_t * mp)1672 ch_send(gld_mac_info_t *macinfo, mblk_t *mp)
1673 {
1674 ch_t *chp = (ch_t *)macinfo->gldm_private;
1675 uint32_t flg;
1676 uint32_t msg_flg;
1677
1678 #ifdef TX_CKSUM_FIX
1679 mblk_t *nmp;
1680 int frags;
1681 size_t msg_len;
1682 struct ether_header *ehdr;
1683 ipha_t *ihdr;
1684 int tflg = 0;
1685 #endif /* TX_CKSUM_FIX */
1686
1687 /*
1688 * race looks benign here.
1689 */
1690 if (chp->ch_state != PERUNNING) {
1691 return (GLD_FAILURE);
1692 }
1693
1694 msg_flg = 0;
1695 if (chp->ch_config.cksum_enabled) {
1696 if (is_T2(chp)) {
1697 mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &msg_flg);
1698 flg = (msg_flg & HCK_FULLCKSUM)?
1699 CH_NO_CPL: CH_NO_HWCKSUM|CH_NO_CPL;
1700 } else
1701 flg = CH_NO_CPL;
1702 } else
1703 flg = CH_NO_HWCKSUM | CH_NO_CPL;
1704
1705 #ifdef TX_CKSUM_FIX
1706 /*
1707 * Check if the message spans more than one mblk or
1708 * if it does and the ip header is not in the first
1709 * fragment then pull up the message. This case is
1710 * expected to be rare.
1711 */
1712 frags = 0;
1713 msg_len = 0;
1714 nmp = mp;
1715 do {
1716 frags++;
1717 msg_len += MBLKL(nmp);
1718 nmp = nmp->b_cont;
1719 } while (nmp);
1720 #define MAX_ALL_HDRLEN SZ_CPL_TX_PKT + sizeof (struct ether_header) + \
1721 TCP_MAX_COMBINED_HEADER_LENGTH
1722 /*
1723 * If the first mblk has enough space at the beginning of
1724 * the data buffer to hold a CPL header, then, we'll expancd
1725 * the front of the buffer so a pullup will leave space for
1726 * pe_start() to add the CPL header in line. We need to remember
1727 * that we've done this so we can undo it after the pullup.
1728 *
1729 * Note that if we decide to do an allocb to hold the CPL header,
1730 * we need to catch the case where we've added an empty mblk for
1731 * the header but never did a pullup. This would result in the
1732 * tests for etherheader, etc. being done on the initial, empty,
1733 * mblk instead of the one with data. See PR3646 for further
1734 * details. (note this PR is closed since it is no longer relevant).
1735 *
1736 * Another point is that if we do add an allocb to add space for
1737 * a CPL header, after a pullup, the initial pointer, mp, in GLD will
1738 * no longer point to a valid mblk. When we get the mblk (by allocb),
1739 * we need to switch the mblk structure values between it and the
1740 * mp structure values referenced by GLD. This handles the case where
1741 * we've run out of cmdQ entries and report GLD_NORESOURCES back to
1742 * GLD. The pointer to the mblk data will have been modified to hold
1743 * an empty 8 bytes for the CPL header, For now, we let the pe_start()
1744 * routine prepend an 8 byte mblk.
1745 */
1746 if (MBLKHEAD(mp) >= SZ_CPL_TX_PKT) {
1747 mp->b_rptr -= SZ_CPL_TX_PKT;
1748 tflg = 1;
1749 }
1750 if (frags > 3) {
1751 chp->sge->intr_cnt.tx_msg_pullups++;
1752 if (pullupmsg(mp, -1) == 0) {
1753 freemsg(mp);
1754 return (GLD_SUCCESS);
1755 }
1756 } else if ((msg_len > MAX_ALL_HDRLEN) &&
1757 (MBLKL(mp) < MAX_ALL_HDRLEN)) {
1758 chp->sge->intr_cnt.tx_hdr_pullups++;
1759 if (pullupmsg(mp, MAX_ALL_HDRLEN) == 0) {
1760 freemsg(mp);
1761 return (GLD_SUCCESS);
1762 }
1763 }
1764 if (tflg)
1765 mp->b_rptr += SZ_CPL_TX_PKT;
1766
1767 ehdr = (struct ether_header *)mp->b_rptr;
1768 if (ehdr->ether_type == htons(ETHERTYPE_IP)) {
1769 ihdr = (ipha_t *)&mp->b_rptr[sizeof (struct ether_header)];
1770 if ((ihdr->ipha_fragment_offset_and_flags & IPH_MF)) {
1771 if (ihdr->ipha_protocol == IPPROTO_UDP) {
1772 flg |= CH_UDP_MF;
1773 chp->sge->intr_cnt.tx_udp_ip_frag++;
1774 } else if (ihdr->ipha_protocol == IPPROTO_TCP) {
1775 flg |= CH_TCP_MF;
1776 chp->sge->intr_cnt.tx_tcp_ip_frag++;
1777 }
1778 } else if (ihdr->ipha_protocol == IPPROTO_UDP)
1779 flg |= CH_UDP;
1780 }
1781 #endif /* TX_CKSUM_FIX */
1782
1783 /*
1784 * return 0 - data send successfully
1785 * return 1 - no resources, reschedule
1786 */
1787 if (pe_start(chp, mp, flg))
1788 return (GLD_NORESOURCES);
1789 else
1790 return (GLD_SUCCESS);
1791 }
1792
1793 static uint_t
ch_intr(gld_mac_info_t * mp)1794 ch_intr(gld_mac_info_t *mp)
1795 {
1796 return (pe_intr((ch_t *)mp->gldm_private));
1797 }
1798
1799 /*
1800 * generate name of driver with unit# postpended.
1801 */
1802 void
ch_set_name(ch_t * chp,int unit)1803 ch_set_name(ch_t *chp, int unit)
1804 {
1805 chp->ch_name = (char *)kmem_alloc(sizeof ("chxge00"), KM_SLEEP);
1806 if (unit > 9) {
1807 bcopy("chxge00", (void *)chp->ch_name, sizeof ("chxge00"));
1808 chp->ch_name[5] += unit/10;
1809 chp->ch_name[6] += unit%10;
1810 } else {
1811 bcopy("chxge0", (void *)chp->ch_name, sizeof ("chxge0"));
1812 chp->ch_name[5] += unit;
1813 }
1814 }
1815
1816 void
ch_free_name(ch_t * chp)1817 ch_free_name(ch_t *chp)
1818 {
1819 if (chp->ch_name)
1820 kmem_free(chp->ch_name, sizeof ("chxge00"));
1821 chp->ch_name = NULL;
1822 }
1823
1824 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
1825 /*
1826 * register toe offload.
1827 */
1828 void *
ch_register(void * instp,void * toe_rcv,void * toe_free,void * toe_tunnel,kmutex_t * toe_tx_mx,kcondvar_t * toe_of_cv,int unit)1829 ch_register(void *instp, void *toe_rcv, void *toe_free, void *toe_tunnel,
1830 kmutex_t *toe_tx_mx, kcondvar_t *toe_of_cv, int unit)
1831 {
1832 ch_t *chp = gchp[unit];
1833 if (chp != NULL) {
1834 mutex_enter(&chp->ch_lock);
1835
1836 chp->toe_rcv = (void (*)(void *, mblk_t *))toe_rcv;
1837 chp->ch_toeinst = instp;
1838 chp->toe_free = (void (*)(void *, tbuf_t *))toe_free;
1839 chp->toe_tunnel = (int (*)(void *, mblk_t *))toe_tunnel;
1840 chp->ch_tx_overflow_mutex = toe_tx_mx;
1841 chp->ch_tx_overflow_cv = toe_of_cv;
1842 chp->open_device_map |= TOEDEV_DEVMAP_BIT;
1843
1844 /* start up adapter if first user */
1845 chp->ch_refcnt++;
1846 if (chp->ch_refcnt == 1) {
1847 chp->ch_state = PERUNNING;
1848 mutex_exit(&chp->ch_lock);
1849 pe_init((void *)chp);
1850 } else
1851 mutex_exit(&chp->ch_lock);
1852 }
1853 return ((void *)gchp[unit]);
1854 }
1855
1856 /*
1857 * unregister toe offload.
1858 * XXX Need to fix races here.
1859 * 1. turn off SGE interrupts.
1860 * 2. do update
1861 * 3. re-enable SGE interrupts
1862 * 4. SGE doorbell to make sure things get restarted.
1863 */
1864 void
ch_unregister(void)1865 ch_unregister(void)
1866 {
1867 int i;
1868 ch_t *chp;
1869
1870 for (i = 0; i < MAX_CARDS; i++) {
1871 chp = gchp[i];
1872 if (chp == NULL)
1873 continue;
1874
1875 mutex_enter(&chp->ch_lock);
1876
1877 chp->ch_refcnt--;
1878 if (chp->ch_refcnt == 0) {
1879 chp->ch_state = PESTOP;
1880 mutex_exit(&chp->ch_lock);
1881 pe_stop(chp);
1882 } else
1883 mutex_exit(&chp->ch_lock);
1884
1885 chp->open_device_map &= ~TOEDEV_DEVMAP_BIT;
1886 chp->toe_rcv = NULL;
1887 chp->ch_toeinst = NULL;
1888 chp->toe_free = NULL;
1889 chp->toe_tunnel = NULL;
1890 chp->ch_tx_overflow_mutex = NULL;
1891 chp->ch_tx_overflow_cv = NULL;
1892 }
1893 }
1894 #endif /* CONFIG_CHELSIO_T1_OFFLOAD */
1895
1896 /*
1897 * get properties from chxge.conf
1898 */
1899 static void
ch_get_prop(ch_t * chp)1900 ch_get_prop(ch_t *chp)
1901 {
1902 int val;
1903 int tval = 0;
1904 extern int enable_latency_timer;
1905 extern uint32_t sge_cmdq0_cnt;
1906 extern uint32_t sge_cmdq1_cnt;
1907 extern uint32_t sge_flq0_cnt;
1908 extern uint32_t sge_flq1_cnt;
1909 extern uint32_t sge_respq_cnt;
1910 extern uint32_t sge_cmdq0_cnt_orig;
1911 extern uint32_t sge_cmdq1_cnt_orig;
1912 extern uint32_t sge_flq0_cnt_orig;
1913 extern uint32_t sge_flq1_cnt_orig;
1914 extern uint32_t sge_respq_cnt_orig;
1915 dev_info_t *pdip;
1916 uint32_t vendor_id, device_id, revision_id;
1917 uint32_t *prop_val = NULL;
1918 uint32_t prop_len = 0;
1919
1920 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
1921 "enable_dvma", -1);
1922 if (val == -1)
1923 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
1924 "enable-dvma", -1);
1925 if (val != -1) {
1926 if (val != 0)
1927 chp->ch_config.enable_dvma = 1;
1928 }
1929
1930 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
1931 "amd_bug_workaround", -1);
1932 if (val == -1)
1933 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
1934 "amd-bug-workaround", -1);
1935
1936 if (val != -1) {
1937 if (val == 0) {
1938 chp->ch_config.burstsize_set = 0;
1939 chp->ch_config.transaction_cnt_set = 0;
1940 goto fail_exit;
1941 }
1942 }
1943 /*
1944 * Step up to the parent node, That's the node above us
1945 * in the device tree. And will typically be the PCI host
1946 * Controller.
1947 */
1948 pdip = ddi_get_parent(chp->ch_dip);
1949
1950 /*
1951 * Now get the 'Vendor id' properties
1952 */
1953 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, pdip, 0, "vendor-id",
1954 (int **)&prop_val, &prop_len) != DDI_PROP_SUCCESS) {
1955 chp->ch_config.burstsize_set = 0;
1956 chp->ch_config.transaction_cnt_set = 0;
1957 goto fail_exit;
1958 }
1959 vendor_id = *(uint32_t *)prop_val;
1960 ddi_prop_free(prop_val);
1961
1962 /*
1963 * Now get the 'Device id' properties
1964 */
1965 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, pdip, 0, "device-id",
1966 (int **)&prop_val, &prop_len) != DDI_PROP_SUCCESS) {
1967 chp->ch_config.burstsize_set = 0;
1968 chp->ch_config.transaction_cnt_set = 0;
1969 goto fail_exit;
1970 }
1971 device_id = *(uint32_t *)prop_val;
1972 ddi_prop_free(prop_val);
1973
1974 /*
1975 * Now get the 'Revision id' properties
1976 */
1977 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, pdip, 0, "revision-id",
1978 (int **)&prop_val, &prop_len) != DDI_PROP_SUCCESS) {
1979 chp->ch_config.burstsize_set = 0;
1980 chp->ch_config.transaction_cnt_set = 0;
1981 goto fail_exit;
1982 }
1983 revision_id = *(uint32_t *)prop_val;
1984 ddi_prop_free(prop_val);
1985
1986 /*
1987 * set default values based on node above us.
1988 */
1989 if ((vendor_id == AMD_VENDOR_ID) && (device_id == AMD_BRIDGE) &&
1990 (revision_id <= AMD_BRIDGE_REV)) {
1991 uint32_t v;
1992 uint32_t burst;
1993 uint32_t cnt;
1994
1995 /* if 133 Mhz not enabled, then do nothing - we're not PCIx */
1996 v = pci_config_get32(chp->ch_hpci, 0x64);
1997 if ((v & 0x20000) == 0) {
1998 chp->ch_config.burstsize_set = 0;
1999 chp->ch_config.transaction_cnt_set = 0;
2000 goto fail_exit;
2001 }
2002
2003 /* check burst size and transaction count */
2004 v = pci_config_get32(chp->ch_hpci, 0x60);
2005 burst = (v >> 18) & 3;
2006 cnt = (v >> 20) & 7;
2007
2008 switch (burst) {
2009 case 0: /* 512 */
2010 /* 512 burst size legal with split cnts 1,2,3 */
2011 if (cnt <= 2) {
2012 chp->ch_config.burstsize_set = 0;
2013 chp->ch_config.transaction_cnt_set = 0;
2014 goto fail_exit;
2015 }
2016 break;
2017 case 1: /* 1024 */
2018 /* 1024 burst size legal with split cnts 1,2 */
2019 if (cnt <= 1) {
2020 chp->ch_config.burstsize_set = 0;
2021 chp->ch_config.transaction_cnt_set = 0;
2022 goto fail_exit;
2023 }
2024 break;
2025 case 2: /* 2048 */
2026 /* 2048 burst size legal with split cnts 1 */
2027 if (cnt == 0) {
2028 chp->ch_config.burstsize_set = 0;
2029 chp->ch_config.transaction_cnt_set = 0;
2030 goto fail_exit;
2031 }
2032 break;
2033 case 3: /* 4096 */
2034 break;
2035 }
2036 } else {
2037 goto fail_exit;
2038 }
2039
2040 /*
2041 * if illegal burst size seen, then default to 1024 burst size
2042 */
2043 chp->ch_config.burstsize = 1;
2044 chp->ch_config.burstsize_set = 1;
2045 /*
2046 * if illegal transaction cnt seen, then default to 2
2047 */
2048 chp->ch_config.transaction_cnt = 1;
2049 chp->ch_config.transaction_cnt_set = 1;
2050
2051
2052 fail_exit:
2053
2054 /*
2055 * alter the burstsize parameter via an entry
2056 * in chxge.conf
2057 */
2058
2059 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2060 "pci_burstsize", -1);
2061 if (val == -1)
2062 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2063 "pci-burstsize", -1);
2064
2065 if (val != -1) {
2066
2067 switch (val) {
2068 case 0: /* use default */
2069 chp->ch_config.burstsize_set = 0;
2070 break;
2071
2072 case 1024:
2073 chp->ch_config.burstsize_set = 1;
2074 chp->ch_config.burstsize = 1;
2075 break;
2076
2077 case 2048:
2078 chp->ch_config.burstsize_set = 1;
2079 chp->ch_config.burstsize = 2;
2080 break;
2081
2082 case 4096:
2083 cmn_err(CE_WARN, "%s not supported %d\n",
2084 chp->ch_name, val);
2085 break;
2086
2087 default:
2088 cmn_err(CE_WARN, "%s illegal burst size %d\n",
2089 chp->ch_name, val);
2090 break;
2091 }
2092 }
2093
2094 /*
2095 * set transaction count
2096 */
2097 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2098 "pci_split_transaction_cnt", -1);
2099 if (val == -1)
2100 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2101 "pci-split-transaction-cnt", -1);
2102
2103 if (val != -1) {
2104 switch (val) {
2105 case 0: /* use default */
2106 chp->ch_config.transaction_cnt_set = 0;
2107 break;
2108
2109 case 1:
2110 chp->ch_config.transaction_cnt_set = 1;
2111 chp->ch_config.transaction_cnt = 0;
2112 break;
2113
2114 case 2:
2115 chp->ch_config.transaction_cnt_set = 1;
2116 chp->ch_config.transaction_cnt = 1;
2117 break;
2118
2119 case 3:
2120 chp->ch_config.transaction_cnt_set = 1;
2121 chp->ch_config.transaction_cnt = 2;
2122 break;
2123
2124 case 4:
2125 chp->ch_config.transaction_cnt_set = 1;
2126 chp->ch_config.transaction_cnt = 3;
2127 break;
2128
2129 case 8:
2130 chp->ch_config.transaction_cnt_set = 1;
2131 chp->ch_config.transaction_cnt = 4;
2132 break;
2133
2134 case 12:
2135 chp->ch_config.transaction_cnt_set = 1;
2136 chp->ch_config.transaction_cnt = 5;
2137 break;
2138
2139 case 16:
2140 chp->ch_config.transaction_cnt_set = 1;
2141 chp->ch_config.transaction_cnt = 6;
2142 break;
2143
2144 case 32:
2145 chp->ch_config.transaction_cnt_set = 1;
2146 chp->ch_config.transaction_cnt = 7;
2147 break;
2148
2149 default:
2150 cmn_err(CE_WARN, "%s illegal transaction cnt %d\n",
2151 chp->ch_name, val);
2152 break;
2153 }
2154 }
2155
2156 /*
2157 * set relaxed ordering bit?
2158 */
2159 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2160 "pci_relaxed_ordering_on", -1);
2161 if (val == -1)
2162 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2163 "pci-relaxed-ordering-on", -1);
2164
2165 /*
2166 * default is to use system default value.
2167 */
2168 chp->ch_config.relaxed_ordering = 0;
2169
2170 if (val != -1) {
2171 if (val)
2172 chp->ch_config.relaxed_ordering = 1;
2173 }
2174
2175 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2176 "enable_latency_timer", -1);
2177 if (val == -1)
2178 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2179 "enable-latency-timer", -1);
2180 if (val != -1)
2181 enable_latency_timer = (val == 0)? 0: 1;
2182
2183 /*
2184 * default maximum Jumbo Frame size.
2185 */
2186 chp->ch_maximum_mtu = 9198; /* tunable via chxge.conf */
2187 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2188 "maximum_mtu", -1);
2189 if (val == -1) {
2190 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2191 "maximum-mtu", -1);
2192 }
2193 if (val != -1) {
2194 if (val > 9582) {
2195 cmn_err(CE_WARN,
2196 "maximum_mtu value %d > 9582. Value set to 9582",
2197 val);
2198 val = 9582;
2199 } else if (val < 1500) {
2200 cmn_err(CE_WARN,
2201 "maximum_mtu value %d < 1500. Value set to 1500",
2202 val);
2203 val = 1500;
2204 }
2205
2206 if (val)
2207 chp->ch_maximum_mtu = val;
2208 }
2209
2210 /*
2211 * default value for this instance mtu
2212 */
2213 chp->ch_mtu = ETHERMTU;
2214
2215 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2216 "accept_jumbo", -1);
2217 if (val == -1) {
2218 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2219 "accept-jumbo", -1);
2220 }
2221 if (val != -1) {
2222 if (val)
2223 chp->ch_mtu = chp->ch_maximum_mtu;
2224 }
2225 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
2226 chp->ch_sm_buf_sz = 0x800;
2227 chp->ch_sm_buf_aln = 0x800;
2228 chp->ch_bg_buf_sz = 0x4000;
2229 chp->ch_bg_buf_aln = 0x4000;
2230 #else
2231 chp->ch_sm_buf_sz = 0x200;
2232 chp->ch_sm_buf_aln = 0x200;
2233 chp->ch_bg_buf_sz = 0x800;
2234 chp->ch_bg_buf_aln = 0x800;
2235 if ((chp->ch_mtu > 0x800) && (chp->ch_mtu <= 0x1000)) {
2236 chp->ch_sm_buf_sz = 0x400;
2237 chp->ch_sm_buf_aln = 0x400;
2238 chp->ch_bg_buf_sz = 0x1000;
2239 chp->ch_bg_buf_aln = 0x1000;
2240 } else if ((chp->ch_mtu > 0x1000) && (chp->ch_mtu <= 0x2000)) {
2241 chp->ch_sm_buf_sz = 0x400;
2242 chp->ch_sm_buf_aln = 0x400;
2243 chp->ch_bg_buf_sz = 0x2000;
2244 chp->ch_bg_buf_aln = 0x2000;
2245 } else if (chp->ch_mtu > 0x2000) {
2246 chp->ch_sm_buf_sz = 0x400;
2247 chp->ch_sm_buf_aln = 0x400;
2248 chp->ch_bg_buf_sz = 0x3000;
2249 chp->ch_bg_buf_aln = 0x4000;
2250 }
2251 #endif
2252 chp->ch_config.cksum_enabled = 1;
2253
2254 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2255 "enable_checksum_offload", -1);
2256 if (val == -1)
2257 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2258 "enable-checksum-offload", -1);
2259 if (val != -1) {
2260 if (val == 0)
2261 chp->ch_config.cksum_enabled = 0;
2262 }
2263
2264 /*
2265 * Provides a tuning capability for the command queue 0 size.
2266 */
2267 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2268 "sge_cmdq0_cnt", -1);
2269 if (val == -1)
2270 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2271 "sge-cmdq0-cnt", -1);
2272 if (val != -1) {
2273 if (val > 10)
2274 sge_cmdq0_cnt = val;
2275 }
2276
2277 if (sge_cmdq0_cnt > 65535) {
2278 cmn_err(CE_WARN,
2279 "%s: sge-cmdQ0-cnt > 65535 - resetting value to default",
2280 chp->ch_name);
2281 sge_cmdq0_cnt = sge_cmdq0_cnt_orig;
2282 }
2283 tval += sge_cmdq0_cnt;
2284
2285 /*
2286 * Provides a tuning capability for the command queue 1 size.
2287 */
2288 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2289 "sge_cmdq1_cnt", -1);
2290 if (val == -1)
2291 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2292 "sge-cmdq1-cnt", -1);
2293 if (val != -1) {
2294 if (val > 10)
2295 sge_cmdq1_cnt = val;
2296 }
2297
2298 if (sge_cmdq1_cnt > 65535) {
2299 cmn_err(CE_WARN,
2300 "%s: sge-cmdQ0-cnt > 65535 - resetting value to default",
2301 chp->ch_name);
2302 sge_cmdq1_cnt = sge_cmdq1_cnt_orig;
2303 }
2304
2305 /*
2306 * Provides a tuning capability for the free list 0 size.
2307 */
2308 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2309 "sge_flq0_cnt", -1);
2310 if (val == -1)
2311 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2312 "sge-flq0-cnt", -1);
2313 if (val != -1) {
2314 if (val > 512)
2315 sge_flq0_cnt = val;
2316 }
2317
2318 if (sge_flq0_cnt > 65535) {
2319 cmn_err(CE_WARN,
2320 "%s: sge-flq0-cnt > 65535 - resetting value to default",
2321 chp->ch_name);
2322 sge_flq0_cnt = sge_flq0_cnt_orig;
2323 }
2324
2325 tval += sge_flq0_cnt;
2326
2327 /*
2328 * Provides a tuning capability for the free list 1 size.
2329 */
2330 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2331 "sge_flq1_cnt", -1);
2332 if (val == -1)
2333 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2334 "sge-flq1-cnt", -1);
2335 if (val != -1) {
2336 if (val > 512)
2337 sge_flq1_cnt = val;
2338 }
2339
2340 if (sge_flq1_cnt > 65535) {
2341 cmn_err(CE_WARN,
2342 "%s: sge-flq1-cnt > 65535 - resetting value to default",
2343 chp->ch_name);
2344 sge_flq1_cnt = sge_flq1_cnt_orig;
2345 }
2346
2347 tval += sge_flq1_cnt;
2348
2349 /*
2350 * Provides a tuning capability for the responce queue size.
2351 */
2352 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2353 "sge_respq_cnt", -1);
2354 if (val == -1)
2355 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS,
2356 "sge-respq-cnt", -1);
2357 if (val != -1) {
2358 if (val > 30)
2359 sge_respq_cnt = val;
2360 }
2361
2362 if (sge_respq_cnt > 65535) {
2363 cmn_err(CE_WARN,
2364 "%s: sge-respq-cnt > 65535 - resetting value to default",
2365 chp->ch_name);
2366 sge_respq_cnt = sge_respq_cnt_orig;
2367 }
2368
2369 if (tval > sge_respq_cnt) {
2370 if (tval <= 65535) {
2371 cmn_err(CE_WARN,
2372 "%s: sge-respq-cnt < %d - setting value to %d (cmdQ+flq0+flq1)",
2373 chp->ch_name, tval, tval);
2374
2375 sge_respq_cnt = tval;
2376 } else {
2377 cmn_err(CE_WARN,
2378 "%s: Q sizes invalid - resetting to default values",
2379 chp->ch_name);
2380
2381 sge_cmdq0_cnt = sge_cmdq0_cnt_orig;
2382 sge_cmdq1_cnt = sge_cmdq1_cnt_orig;
2383 sge_flq0_cnt = sge_flq0_cnt_orig;
2384 sge_flq1_cnt = sge_flq1_cnt_orig;
2385 sge_respq_cnt = sge_respq_cnt_orig;
2386 }
2387 }
2388 }
2389