1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * This file is part of the Chelsio T1 Ethernet driver. 29 * 30 * Copyright (C) 2003-2005 Chelsio Communications. All rights reserved. 31 */ 32 33 /* 34 * Solaris Multithreaded STREAMS DLPI Chelsio PCI Ethernet Driver 35 */ 36 37 /* #define CH_DEBUG 1 */ 38 #ifdef CH_DEBUG 39 #define DEBUG_ENTER(a) debug_enter(a) 40 #define PRINT(a) printf a 41 #else 42 #define DEBUG_ENTER(a) 43 #define PRINT(a) 44 #endif 45 46 #include <sys/types.h> 47 #include <sys/conf.h> 48 #include <sys/debug.h> 49 #include <sys/stropts.h> 50 #include <sys/stream.h> 51 #include <sys/strlog.h> 52 #include <sys/kmem.h> 53 #include <sys/stat.h> 54 #include <sys/kstat.h> 55 #include <sys/modctl.h> 56 #include <sys/errno.h> 57 #include <sys/cmn_err.h> 58 #include <sys/ddi.h> 59 #include <sys/sunddi.h> 60 #include <sys/dlpi.h> 61 #include <sys/ethernet.h> 62 #include <sys/strsun.h> 63 #include <sys/strsubr.h> 64 #include <inet/common.h> 65 #include <inet/nd.h> 66 #include <inet/ip.h> 67 #include <inet/tcp.h> 68 #include <sys/pattr.h> 69 #include <sys/gld.h> 70 #include "ostypes.h" 71 #include "common.h" 72 #include "oschtoe.h" 73 #include "sge.h" 74 #include "regs.h" 75 #include "ch.h" /* Chelsio Driver specific parameters */ 76 #include "version.h" 77 78 /* 79 * Function prototypes. 80 */ 81 static int ch_attach(dev_info_t *, ddi_attach_cmd_t); 82 static int ch_detach(dev_info_t *, ddi_detach_cmd_t); 83 static int ch_quiesce(dev_info_t *); 84 static void ch_free_dma_handles(ch_t *chp); 85 static void ch_set_name(ch_t *chp, int unit); 86 static void ch_free_name(ch_t *chp); 87 static void ch_get_prop(ch_t *chp); 88 89 #if defined(__sparc) 90 static void ch_free_dvma_handles(ch_t *chp); 91 #endif 92 93 /* GLD interfaces */ 94 static int ch_reset(gld_mac_info_t *); 95 static int ch_start(gld_mac_info_t *); 96 static int ch_stop(gld_mac_info_t *); 97 static int ch_set_mac_address(gld_mac_info_t *, uint8_t *); 98 static int ch_set_multicast(gld_mac_info_t *, uint8_t *, int); 99 static int ch_ioctl(gld_mac_info_t *, queue_t *, mblk_t *); 100 static int ch_set_promiscuous(gld_mac_info_t *, int); 101 static int ch_get_stats(gld_mac_info_t *, struct gld_stats *); 102 static int ch_send(gld_mac_info_t *, mblk_t *); 103 static uint_t ch_intr(gld_mac_info_t *); 104 105 /* 106 * Data access requirements. 107 */ 108 static struct ddi_device_acc_attr le_attr = { 109 DDI_DEVICE_ATTR_V0, 110 DDI_STRUCTURE_LE_ACC, 111 DDI_STRICTORDER_ACC 112 }; 113 114 /* 115 * No swap mapping device attributes 116 */ 117 static struct ddi_device_acc_attr null_attr = { 118 DDI_DEVICE_ATTR_V0, 119 DDI_NEVERSWAP_ACC, 120 DDI_STRICTORDER_ACC 121 }; 122 123 /* 124 * STREAMS driver identification struture module_info(9s) 125 * 126 * driver limit values 127 */ 128 129 static struct module_info ch_minfo = { 130 CHIDNUM, /* mi_idnum */ 131 CHNAME, /* mi_idname */ 132 CHMINPSZ, /* mi_minpsz */ 133 CHMAXPSZ, /* mi_maxpsz */ 134 CHHIWAT, /* mi_hiwat */ 135 CHLOWAT /* mi_lowat */ 136 }; 137 138 /* 139 * STREAMS queue processiong procedures qinit(9s) 140 * 141 * read queue procedures 142 */ 143 144 static struct qinit ch_rinit = { 145 (int (*)()) NULL, /* qi_putp */ 146 gld_rsrv, /* qi_srvp */ 147 gld_open, /* qi_qopen */ 148 gld_close, /* qi_qclose */ 149 (int (*)()) NULL, /* qi_qadmin */ 150 &ch_minfo, /* qi_minfo */ 151 NULL /* qi_mstat */ 152 }; 153 154 /* 155 * STREAMS queue processiong procedures qinit(9s) 156 * 157 * write queue procedures 158 */ 159 160 static struct qinit ch_winit = { 161 gld_wput, /* qi_putp */ 162 gld_wsrv, /* qi_srvp */ 163 (int (*)()) NULL, /* qi_qopen */ 164 (int (*)()) NULL, /* qi_qclose */ 165 (int (*)()) NULL, /* qi_qadmin */ 166 &ch_minfo, /* qi_minfo */ 167 NULL /* qi_mstat */ 168 }; 169 170 /* 171 * STREAMS entity declaration structure - streamtab(9s) 172 */ 173 static struct streamtab chinfo = { 174 &ch_rinit, /* read queue information */ 175 &ch_winit, /* write queue information */ 176 NULL, /* st_muxrinit */ 177 NULL /* st_muxwrinit */ 178 }; 179 180 /* 181 * Device driver ops vector - cb_ops(9s) 182 * 183 * charater/block entry points structure. 184 * chinfo identifies driver as a STREAMS driver. 185 */ 186 187 static struct cb_ops cb_ch_ops = { 188 nulldev, /* cb_open */ 189 nulldev, /* cb_close */ 190 nodev, /* cb_strategy */ 191 nodev, /* cb_print */ 192 nodev, /* cb_dump */ 193 nodev, /* cb_read */ 194 nodev, /* cb_write */ 195 nodev, /* cb_ioctl */ 196 nodev, /* cb_devmap */ 197 nodev, /* cb_mmap */ 198 nodev, /* cb_segmap */ 199 nochpoll, /* cb_chpoll */ 200 ddi_prop_op, /* report driver property information - prop_op(9e) */ 201 &chinfo, /* cb_stream */ 202 #if defined(__sparc) 203 D_MP | D_64BIT, 204 #else 205 D_MP, /* cb_flag (supports multi-threading) */ 206 #endif 207 CB_REV, /* cb_rev */ 208 nodev, /* cb_aread */ 209 nodev /* cb_awrite */ 210 }; 211 212 /* 213 * dev_ops(9S) structure 214 * 215 * Device Operations table, for autoconfiguration 216 */ 217 218 static struct dev_ops ch_ops = { 219 DEVO_REV, /* Driver build version */ 220 0, /* Initial driver reference count */ 221 gld_getinfo, /* funcp: get driver information - getinfo(9e) */ 222 nulldev, /* funcp: entry point obsolute - identify(9e) */ 223 nulldev, /* funp: probe for device - probe(9e) */ 224 ch_attach, /* funp: attach driver to dev_info - attach(9e) */ 225 ch_detach, /* funp: detach driver to unload - detach(9e) */ 226 nodev, /* funp: reset device (not supported) - dev_ops(9s) */ 227 &cb_ch_ops, /* ptr to cb_ops structure */ 228 NULL, /* ptr to nexus bus operations structure (leaf) */ 229 NULL, /* funp: change device power level - power(9e) */ 230 ch_quiesce, /* devo_quiesce */ 231 }; 232 233 /* 234 * modldrv(9s) structure 235 * 236 * Definition for module specific device driver linkage structures (modctl.h) 237 */ 238 239 static struct modldrv modldrv = { 240 &mod_driverops, /* driver module */ 241 VERSION, 242 &ch_ops, /* driver ops */ 243 }; 244 245 /* 246 * modlinkage(9s) structure 247 * 248 * module linkage base structure (modctl.h) 249 */ 250 251 static struct modlinkage modlinkage = { 252 MODREV_1, /* revision # of system */ 253 &modldrv, /* NULL terminated list of linkage strucures */ 254 NULL 255 }; 256 257 /* ===================== start of STREAMS driver code ================== */ 258 259 #ifdef CONFIG_CHELSIO_T1_OFFLOAD 260 /* 261 * global pointer to toe per-driver control structure. 262 */ 263 #define MAX_CARDS 4 264 ch_t *gchp[MAX_CARDS]; 265 #endif 266 267 kmutex_t in_use_l; 268 uint32_t buffers_in_use[SZ_INUSE]; 269 uint32_t in_use_index; 270 271 /* 272 * Ethernet broadcast address definition. 273 */ 274 static struct ether_addr etherbroadcastaddr = { 275 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 276 }; 277 278 /* 279 * Module initialization functions. 280 * 281 * Routine Called by 282 * _init(9E) modload(9F) 283 * _info(9E) modinfo(9F) 284 * _fini(9E) modunload(9F) 285 */ 286 287 /* 288 * _init(9E): 289 * 290 * Initial, one-time, resource allocation and data initialization. 291 */ 292 293 int 294 _init(void) 295 { 296 int status; 297 298 status = mod_install(&modlinkage); 299 300 mutex_init(&in_use_l, NULL, MUTEX_DRIVER, NULL); 301 302 return (status); 303 } 304 305 /* 306 * _fini(9E): It is here that any device information that was allocated 307 * during the _init(9E) routine should be released and the module removed 308 * from the system. In the case of per-instance information, that information 309 * should be released in the _detach(9E) routine. 310 */ 311 312 int 313 _fini(void) 314 { 315 int status; 316 int i; 317 uint32_t t = 0; 318 319 for (i = 0; i < SZ_INUSE; i++) 320 t += buffers_in_use[i]; 321 322 if (t != 0) 323 return (DDI_FAILURE); 324 325 status = mod_remove(&modlinkage); 326 327 if (status == DDI_SUCCESS) 328 mutex_destroy(&in_use_l); 329 330 return (status); 331 } 332 333 int 334 _info(struct modinfo *modinfop) 335 { 336 int status; 337 338 339 status = mod_info(&modlinkage, modinfop); 340 341 return (status); 342 } 343 344 /* 345 * Attach(9E) - This is called on the open to the device. It creates 346 * an instance of the driver. In this routine we create the minor 347 * device node. The routine also initializes all per-unit 348 * mutex's and conditional variables. 349 * 350 * If we were resuming a suspended instance of a device due to power 351 * management, then that would be handled here as well. For more on 352 * that subject see the man page for pm(9E) 353 * 354 * Interface exists: make available by filling in network interface 355 * record. System will initialize the interface when it is ready 356 * to accept packets. 357 */ 358 int chdebug = 0; 359 int ch_abort_debug = 0; 360 361 static int 362 ch_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 363 { 364 ch_t *chp; 365 int rv; 366 int unit; 367 #ifdef CH_DEBUG 368 int Version; 369 int VendorID; 370 int DeviceID; 371 int SubDeviceID; 372 int Command; 373 #endif 374 gld_mac_info_t *macinfo; /* GLD stuff follows */ 375 char *driver; 376 377 if (ch_abort_debug) 378 debug_enter("ch_attach"); 379 380 if (chdebug) 381 return (DDI_FAILURE); 382 383 384 if (cmd == DDI_ATTACH) { 385 386 unit = ddi_get_instance(dip); 387 388 driver = (char *)ddi_driver_name(dip); 389 390 PRINT(("driver %s unit: %d\n", driver, unit)); 391 392 macinfo = gld_mac_alloc(dip); 393 if (macinfo == NULL) { 394 PRINT(("macinfo allocation failed\n")); 395 DEBUG_ENTER("ch_attach"); 396 return (DDI_FAILURE); 397 } 398 399 chp = (ch_t *)kmem_zalloc(sizeof (ch_t), KM_SLEEP); 400 401 if (chp == NULL) { 402 PRINT(("zalloc of chp failed\n")); 403 DEBUG_ENTER("ch_attach"); 404 405 gld_mac_free(macinfo); 406 407 return (DDI_FAILURE); 408 } 409 410 #ifdef CONFIG_CHELSIO_T1_OFFLOAD 411 /* Solaris TOE support */ 412 gchp[unit] = chp; 413 #endif 414 415 PRINT(("attach macinfo: %p chp: %p\n", macinfo, chp)); 416 417 chp->ch_dip = dip; 418 chp->ch_macp = macinfo; 419 chp->ch_unit = unit; 420 ch_set_name(chp, unit); 421 422 /* 423 * map in PCI register spaces 424 * 425 * PCI register set 0 - PCI configuration space 426 * PCI register set 1 - T101 card register space #1 427 */ 428 429 /* map in T101 PCI configuration space */ 430 rv = pci_config_setup( 431 dip, /* ptr to dev's dev_info struct */ 432 &chp->ch_hpci); /* ptr to data access handle */ 433 434 if (rv != DDI_SUCCESS) { 435 PRINT(("PCI config setup failed\n")); 436 DEBUG_ENTER("ch_attach"); 437 #ifdef CONFIG_CHELSIO_T1_OFFLOAD 438 gchp[unit] = NULL; 439 #endif 440 cmn_err(CE_WARN, "%s: ddi_config_setup PCI error %d\n", 441 chp->ch_name, rv); 442 443 ch_free_name(chp); 444 kmem_free(chp, sizeof (ch_t)); 445 gld_mac_free(macinfo); 446 447 return (DDI_FAILURE); 448 } 449 450 ch_get_prop(chp); 451 452 macinfo->gldm_devinfo = dip; 453 macinfo->gldm_private = (caddr_t)chp; 454 macinfo->gldm_reset = ch_reset; 455 macinfo->gldm_start = ch_start; 456 macinfo->gldm_stop = ch_stop; 457 macinfo->gldm_set_mac_addr = ch_set_mac_address; 458 macinfo->gldm_send = ch_send; 459 macinfo->gldm_set_promiscuous = ch_set_promiscuous; 460 macinfo->gldm_get_stats = ch_get_stats; 461 macinfo->gldm_ioctl = ch_ioctl; 462 macinfo->gldm_set_multicast = ch_set_multicast; 463 macinfo->gldm_intr = ch_intr; 464 macinfo->gldm_mctl = NULL; 465 466 macinfo->gldm_ident = driver; 467 macinfo->gldm_type = DL_ETHER; 468 macinfo->gldm_minpkt = 0; 469 macinfo->gldm_maxpkt = chp->ch_mtu; 470 macinfo->gldm_addrlen = ETHERADDRL; 471 macinfo->gldm_saplen = -2; 472 macinfo->gldm_ppa = unit; 473 macinfo->gldm_broadcast_addr = 474 etherbroadcastaddr.ether_addr_octet; 475 476 477 /* 478 * do a power reset of card 479 * 480 * 1. set PwrState to D3hot (3) 481 * 2. clear PwrState flags 482 */ 483 pci_config_put32(chp->ch_hpci, 0x44, 3); 484 pci_config_put32(chp->ch_hpci, 0x44, 0); 485 486 /* delay .5 sec */ 487 DELAY(500000); 488 489 #ifdef CH_DEBUG 490 VendorID = pci_config_get16(chp->ch_hpci, 0); 491 DeviceID = pci_config_get16(chp->ch_hpci, 2); 492 SubDeviceID = pci_config_get16(chp->ch_hpci, 0x2e); 493 Command = pci_config_get16(chp->ch_hpci, 4); 494 495 PRINT(("IDs: %x,%x,%x\n", VendorID, DeviceID, SubDeviceID)); 496 PRINT(("Command: %x\n", Command)); 497 #endif 498 /* map in T101 register space (BAR0) */ 499 rv = ddi_regs_map_setup( 500 dip, /* ptr to dev's dev_info struct */ 501 BAR0, /* register address space */ 502 &chp->ch_bar0, /* address of offset */ 503 0, /* offset into register address space */ 504 0, /* length mapped (everything) */ 505 &le_attr, /* ptr to device attr structure */ 506 &chp->ch_hbar0); /* ptr to data access handle */ 507 508 if (rv != DDI_SUCCESS) { 509 PRINT(("map registers failed\n")); 510 DEBUG_ENTER("ch_attach"); 511 #ifdef CONFIG_CHELSIO_T1_OFFLOAD 512 gchp[unit] = NULL; 513 #endif 514 cmn_err(CE_WARN, 515 "%s: ddi_regs_map_setup BAR0 error %d\n", 516 chp->ch_name, rv); 517 518 pci_config_teardown(&chp->ch_hpci); 519 ch_free_name(chp); 520 kmem_free(chp, sizeof (ch_t)); 521 gld_mac_free(macinfo); 522 523 return (DDI_FAILURE); 524 } 525 526 #ifdef CH_DEBUG 527 Version = ddi_get32(chp->ch_hbar0, 528 (uint32_t *)(chp->ch_bar0+0x6c)); 529 #endif 530 531 (void) ddi_dev_regsize(dip, 1, &chp->ch_bar0sz); 532 533 PRINT(("PCI BAR0 space addr: %p\n", chp->ch_bar0)); 534 PRINT(("PCI BAR0 space size: %x\n", chp->ch_bar0sz)); 535 PRINT(("PE Version: %x\n", Version)); 536 537 /* 538 * Add interrupt to system. 539 */ 540 rv = ddi_get_iblock_cookie( 541 dip, /* ptr to dev's dev_info struct */ 542 0, /* interrupt # (0) */ 543 &chp->ch_icookp); /* ptr to interrupt block cookie */ 544 545 if (rv != DDI_SUCCESS) { 546 PRINT(("iblock cookie failed\n")); 547 DEBUG_ENTER("ch_attach"); 548 #ifdef CONFIG_CHELSIO_T1_OFFLOAD 549 gchp[unit] = NULL; 550 #endif 551 cmn_err(CE_WARN, 552 "%s: ddi_get_iblock_cookie error %d\n", 553 chp->ch_name, rv); 554 555 ddi_regs_map_free(&chp->ch_hbar0); 556 pci_config_teardown(&chp->ch_hpci); 557 ch_free_name(chp); 558 kmem_free(chp, sizeof (ch_t)); 559 gld_mac_free(macinfo); 560 561 return (DDI_FAILURE); 562 } 563 564 /* 565 * add interrupt handler before card setup. 566 */ 567 rv = ddi_add_intr( 568 dip, /* ptr to dev's dev_info struct */ 569 0, /* interrupt # (0) */ 570 0, /* iblock cookie ptr (NULL) */ 571 0, /* idevice cookie ptr (NULL) */ 572 gld_intr, /* function ptr to interrupt handler */ 573 (caddr_t)macinfo); /* handler argument */ 574 575 if (rv != DDI_SUCCESS) { 576 PRINT(("add_intr failed\n")); 577 DEBUG_ENTER("ch_attach"); 578 #ifdef CONFIG_CHELSIO_T1_OFFLOAD 579 gchp[unit] = NULL; 580 #endif 581 cmn_err(CE_WARN, "%s: ddi_add_intr error %d\n", 582 chp->ch_name, rv); 583 584 ddi_regs_map_free(&chp->ch_hbar0); 585 pci_config_teardown(&chp->ch_hpci); 586 ch_free_name(chp); 587 kmem_free(chp, sizeof (ch_t)); 588 gld_mac_free(macinfo); 589 590 return (DDI_FAILURE); 591 } 592 593 /* initalize all the remaining per-card locks */ 594 mutex_init(&chp->ch_lock, NULL, MUTEX_DRIVER, 595 (void *)chp->ch_icookp); 596 mutex_init(&chp->ch_intr, NULL, MUTEX_DRIVER, 597 (void *)chp->ch_icookp); 598 mutex_init(&chp->ch_mc_lck, NULL, MUTEX_DRIVER, NULL); 599 mutex_init(&chp->ch_dh_lck, NULL, MUTEX_DRIVER, NULL); 600 mutex_init(&chp->mac_lock, NULL, MUTEX_DRIVER, NULL); 601 602 /* ------- initialize Chelsio card ------- */ 603 604 if (pe_attach(chp)) { 605 PRINT(("card initialization failed\n")); 606 DEBUG_ENTER("ch_attach"); 607 #ifdef CONFIG_CHELSIO_T1_OFFLOAD 608 gchp[unit] = NULL; 609 #endif 610 cmn_err(CE_WARN, "%s: pe_attach failed\n", 611 chp->ch_name); 612 613 mutex_destroy(&chp->ch_lock); 614 mutex_destroy(&chp->ch_intr); 615 mutex_destroy(&chp->ch_mc_lck); 616 mutex_destroy(&chp->ch_dh_lck); 617 mutex_destroy(&chp->mac_lock); 618 ddi_remove_intr(dip, 0, chp->ch_icookp); 619 ddi_regs_map_free(&chp->ch_hbar0); 620 pci_config_teardown(&chp->ch_hpci); 621 ch_free_name(chp); 622 kmem_free(chp, sizeof (ch_t)); 623 gld_mac_free(macinfo); 624 625 return (DDI_FAILURE); 626 } 627 628 /* ------- done with Chelsio card ------- */ 629 630 /* now can set mac address */ 631 macinfo->gldm_vendor_addr = pe_get_mac(chp); 632 633 macinfo->gldm_cookie = chp->ch_icookp; 634 635 /* 636 * We only active checksum offload for T2 architectures. 637 */ 638 if (is_T2(chp)) { 639 if (chp->ch_config.cksum_enabled) 640 macinfo->gldm_capabilities |= 641 GLD_CAP_CKSUM_FULL_V4; 642 } else 643 chp->ch_config.cksum_enabled = 0; 644 645 rv = gld_register( 646 dip, /* ptr to dev's dev_info struct */ 647 (char *)ddi_driver_name(dip), /* driver name */ 648 macinfo); /* ptr to gld macinfo buffer */ 649 650 /* 651 * The Jumbo frames capability is not yet available 652 * in Solaris 10 so registration will fail. MTU > 1500 is 653 * supported in Update 1. 654 */ 655 if (rv != DDI_SUCCESS) { 656 cmn_err(CE_NOTE, "MTU > 1500 not supported by GLD.\n"); 657 cmn_err(CE_NOTE, "Setting MTU to 1500. \n"); 658 macinfo->gldm_maxpkt = chp->ch_mtu = 1500; 659 rv = gld_register( 660 dip, /* ptr to dev's dev_info struct */ 661 (char *)ddi_driver_name(dip), /* driver name */ 662 macinfo); /* ptr to gld macinfo buffer */ 663 } 664 665 666 if (rv != DDI_SUCCESS) { 667 PRINT(("gld_register failed\n")); 668 DEBUG_ENTER("ch_attach"); 669 670 cmn_err(CE_WARN, "%s: gld_register error %d\n", 671 chp->ch_name, rv); 672 673 pe_detach(chp); 674 675 mutex_destroy(&chp->ch_lock); 676 mutex_destroy(&chp->ch_intr); 677 mutex_destroy(&chp->ch_mc_lck); 678 mutex_destroy(&chp->ch_dh_lck); 679 mutex_destroy(&chp->mac_lock); 680 ddi_remove_intr(dip, 0, chp->ch_icookp); 681 ddi_regs_map_free(&chp->ch_hbar0); 682 pci_config_teardown(&chp->ch_hpci); 683 ch_free_name(chp); 684 kmem_free(chp, sizeof (ch_t)); 685 gld_mac_free(macinfo); 686 687 return (DDI_FAILURE); 688 } 689 690 /* 691 * print a banner at boot time (verbose mode), announcing 692 * the device pointed to by dip 693 */ 694 ddi_report_dev(dip); 695 696 if (ch_abort_debug) 697 debug_enter("ch_attach"); 698 699 return (DDI_SUCCESS); 700 701 } else if (cmd == DDI_RESUME) { 702 PRINT(("attach resume\n")); 703 DEBUG_ENTER("ch_attach"); 704 if ((chp = (ch_t *)ddi_get_driver_private(dip)) == NULL) 705 return (DDI_FAILURE); 706 707 mutex_enter(&chp->ch_lock); 708 chp->ch_flags &= ~PESUSPENDED; 709 mutex_exit(&chp->ch_lock); 710 return (DDI_SUCCESS); 711 } else { 712 PRINT(("attach: bad command\n")); 713 DEBUG_ENTER("ch_attach"); 714 715 return (DDI_FAILURE); 716 } 717 } 718 719 /* 720 * quiesce(9E) entry point. 721 * 722 * This function is called when the system is single-threaded at high 723 * PIL with preemption disabled. Therefore, this function must not be 724 * blocked. 725 * 726 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 727 * DDI_FAILURE indicates an error condition and should almost never happen. 728 */ 729 static int 730 ch_quiesce(dev_info_t *dip) 731 { 732 ch_t *chp; 733 gld_mac_info_t *macinfo = 734 (gld_mac_info_t *)ddi_get_driver_private(dip); 735 736 chp = (ch_t *)macinfo->gldm_private; 737 chdebug = 0; 738 ch_abort_debug = 0; 739 740 #ifdef CONFIG_CHELSIO_T1_OFFLOAD 741 gchp[chp->ch_unit] = NULL; 742 #endif 743 744 /* Set driver state for this card to IDLE */ 745 chp->ch_state = PEIDLE; 746 747 /* 748 * Do a power reset of card 749 * 1. set PwrState to D3hot (3) 750 * 2. clear PwrState flags 751 */ 752 pci_config_put32(chp->ch_hpci, 0x44, 3); 753 pci_config_put32(chp->ch_hpci, 0x44, 0); 754 755 /* Wait 0.5 sec */ 756 drv_usecwait(500000); 757 758 /* 759 * Now stop the chip 760 */ 761 chp->ch_refcnt = 0; 762 chp->ch_state = PESTOP; 763 764 /* Disables all interrupts */ 765 t1_interrupts_disable(chp); 766 767 /* Disables SGE queues */ 768 t1_write_reg_4(chp->sge->obj, A_SG_CONTROL, 0x0); 769 t1_write_reg_4(chp->sge->obj, A_SG_INT_CAUSE, 0x0); 770 771 return (DDI_SUCCESS); 772 } 773 774 static int 775 ch_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 776 { 777 gld_mac_info_t *macinfo; 778 ch_t *chp; 779 780 if (cmd == DDI_DETACH) { 781 macinfo = (gld_mac_info_t *)ddi_get_driver_private(dip); 782 chp = (ch_t *)macinfo->gldm_private; 783 784 /* 785 * fail detach if there are outstanding mblks still 786 * in use somewhere. 787 */ 788 DEBUG_ENTER("ch_detach"); 789 #ifdef CONFIG_CHELSIO_T1_OFFLOAD 790 mutex_enter(&chp->ch_lock); 791 if (chp->ch_refcnt > 0) { 792 mutex_exit(&chp->ch_lock); 793 return (DDI_FAILURE); 794 } 795 mutex_exit(&chp->ch_lock); 796 gchp[chp->ch_unit] = NULL; 797 #endif 798 /* 799 * set driver state for this card to IDLE. We're 800 * shutting down. 801 */ 802 mutex_enter(&chp->ch_lock); 803 chp->ch_state = PEIDLE; 804 mutex_exit(&chp->ch_lock); 805 806 /* 807 * do a power reset of card 808 * 809 * 1. set PwrState to D3hot (3) 810 * 2. clear PwrState flags 811 */ 812 pci_config_put32(chp->ch_hpci, 0x44, 3); 813 pci_config_put32(chp->ch_hpci, 0x44, 0); 814 815 /* delay .5 sec */ 816 DELAY(500000); 817 818 /* free register resources */ 819 (void) gld_unregister(macinfo); 820 821 /* make sure no interrupts while shutting down card */ 822 ddi_remove_intr(dip, 0, chp->ch_icookp); 823 824 /* 825 * reset device and recover resources 826 */ 827 pe_detach(chp); 828 829 ddi_regs_map_free(&chp->ch_hbar0); 830 pci_config_teardown(&chp->ch_hpci); 831 mutex_destroy(&chp->ch_lock); 832 mutex_destroy(&chp->ch_intr); 833 mutex_destroy(&chp->ch_mc_lck); 834 mutex_destroy(&chp->ch_dh_lck); 835 mutex_destroy(&chp->mac_lock); 836 ch_free_dma_handles(chp); 837 #if defined(__sparc) 838 ch_free_dvma_handles(chp); 839 #endif 840 ch_free_name(chp); 841 kmem_free(chp, sizeof (ch_t)); 842 gld_mac_free(macinfo); 843 844 DEBUG_ENTER("ch_detach end"); 845 846 return (DDI_SUCCESS); 847 848 } else if ((cmd == DDI_SUSPEND) || (cmd == DDI_PM_SUSPEND)) { 849 DEBUG_ENTER("suspend"); 850 if ((chp = (ch_t *)ddi_get_driver_private(dip)) == NULL) 851 return (DDI_FAILURE); 852 mutex_enter(&chp->ch_lock); 853 chp->ch_flags |= PESUSPENDED; 854 mutex_exit(&chp->ch_lock); 855 #ifdef TODO 856 /* Un-initialize (STOP) T101 */ 857 #endif 858 return (DDI_SUCCESS); 859 } else 860 return (DDI_FAILURE); 861 } 862 863 /* 864 * ch_alloc_dma_mem 865 * 866 * allocates DMA handle 867 * allocates kernel memory 868 * allocates DMA access handle 869 * 870 * chp - per-board descriptor 871 * type - byteswap mapping? 872 * flags - type of mapping 873 * size - # bytes mapped 874 * paddr - physical address 875 * dh - ddi dma handle 876 * ah - ddi access handle 877 */ 878 879 void * 880 ch_alloc_dma_mem(ch_t *chp, int type, int flags, int size, uint64_t *paddr, 881 ulong_t *dh, ulong_t *ah) 882 { 883 ddi_dma_attr_t ch_dma_attr; 884 ddi_dma_cookie_t cookie; 885 ddi_dma_handle_t ch_dh; 886 ddi_acc_handle_t ch_ah; 887 ddi_device_acc_attr_t *dev_attrp; 888 caddr_t ch_vaddr; 889 size_t rlen; 890 uint_t count; 891 uint_t mapping; 892 uint_t align; 893 uint_t rv; 894 uint_t direction; 895 896 mapping = (flags&DMA_STREAM)?DDI_DMA_STREAMING:DDI_DMA_CONSISTENT; 897 if (flags & DMA_4KALN) 898 align = 0x4000; 899 else if (flags & DMA_SMALN) 900 align = chp->ch_sm_buf_aln; 901 else if (flags & DMA_BGALN) 902 align = chp->ch_bg_buf_aln; 903 else { 904 cmn_err(CE_WARN, "ch_alloc_dma_mem(%s): bad alignment flag\n", 905 chp->ch_name); 906 return (0); 907 } 908 direction = (flags&DMA_OUT)?DDI_DMA_WRITE:DDI_DMA_READ; 909 910 /* 911 * dynamically create a dma attribute structure 912 */ 913 ch_dma_attr.dma_attr_version = DMA_ATTR_V0; 914 ch_dma_attr.dma_attr_addr_lo = 0; 915 ch_dma_attr.dma_attr_addr_hi = 0xffffffffffffffff; 916 ch_dma_attr.dma_attr_count_max = 0x00ffffff; 917 ch_dma_attr.dma_attr_align = align; 918 ch_dma_attr.dma_attr_burstsizes = 0xfff; 919 ch_dma_attr.dma_attr_minxfer = 1; 920 ch_dma_attr.dma_attr_maxxfer = 0x00ffffff; 921 ch_dma_attr.dma_attr_seg = 0xffffffff; 922 ch_dma_attr.dma_attr_sgllen = 1; 923 ch_dma_attr.dma_attr_granular = 1; 924 ch_dma_attr.dma_attr_flags = 0; 925 926 rv = ddi_dma_alloc_handle( 927 chp->ch_dip, /* device dev_info structure */ 928 &ch_dma_attr, /* DMA attributes */ 929 DDI_DMA_SLEEP, /* Wait if no memory */ 930 NULL, /* no argument to callback */ 931 &ch_dh); /* DMA handle */ 932 if (rv != DDI_SUCCESS) { 933 934 cmn_err(CE_WARN, 935 "%s: ch_alloc_dma_mem: ddi_dma_alloc_handle error %d\n", 936 chp->ch_name, rv); 937 938 return (0); 939 } 940 941 /* set byte order for data xfer */ 942 if (type) 943 dev_attrp = &null_attr; 944 else 945 dev_attrp = &le_attr; 946 947 rv = ddi_dma_mem_alloc( 948 ch_dh, /* dma handle */ 949 size, /* size desired allocate */ 950 dev_attrp, /* access attributes */ 951 mapping, 952 DDI_DMA_SLEEP, /* wait for resources */ 953 NULL, /* no argument */ 954 &ch_vaddr, /* allocated memory */ 955 &rlen, /* real size allocated */ 956 &ch_ah); /* data access handle */ 957 if (rv != DDI_SUCCESS) { 958 ddi_dma_free_handle(&ch_dh); 959 960 cmn_err(CE_WARN, 961 "%s: ch_alloc_dma_mem: ddi_dma_mem_alloc error %d\n", 962 chp->ch_name, rv); 963 964 return (0); 965 } 966 967 rv = ddi_dma_addr_bind_handle( 968 ch_dh, /* dma handle */ 969 (struct as *)0, /* kernel address space */ 970 ch_vaddr, /* virtual address */ 971 rlen, /* length of object */ 972 direction|mapping, 973 DDI_DMA_SLEEP, /* Wait for resources */ 974 NULL, /* no argument */ 975 &cookie, /* dma cookie */ 976 &count); 977 if (rv != DDI_DMA_MAPPED) { 978 ddi_dma_mem_free(&ch_ah); 979 ddi_dma_free_handle(&ch_dh); 980 981 cmn_err(CE_WARN, 982 "%s: ch_alloc_dma_mem: ddi_dma_addr_bind_handle error %d\n", 983 chp->ch_name, rv); 984 985 return (0); 986 } 987 988 if (count != 1) { 989 cmn_err(CE_WARN, 990 "%s: ch_alloc_dma_mem: ch_alloc_dma_mem cookie count %d\n", 991 chp->ch_name, count); 992 PRINT(("ch_alloc_dma_mem cookie count %d\n", count)); 993 994 ddi_dma_mem_free(&ch_ah); 995 ddi_dma_free_handle(&ch_dh); 996 997 return (0); 998 } 999 1000 *paddr = cookie.dmac_laddress; 1001 1002 *(ddi_dma_handle_t *)dh = ch_dh; 1003 *(ddi_acc_handle_t *)ah = ch_ah; 1004 1005 return ((void *)ch_vaddr); 1006 } 1007 1008 /* 1009 * ch_free_dma_mem 1010 * 1011 * frees resources allocated by ch_alloc_dma_mem() 1012 * 1013 * frees DMA handle 1014 * frees kernel memory 1015 * frees DMA access handle 1016 */ 1017 1018 void 1019 ch_free_dma_mem(ulong_t dh, ulong_t ah) 1020 { 1021 ddi_dma_handle_t ch_dh = (ddi_dma_handle_t)dh; 1022 ddi_acc_handle_t ch_ah = (ddi_acc_handle_t)ah; 1023 1024 (void) ddi_dma_unbind_handle(ch_dh); 1025 ddi_dma_mem_free(&ch_ah); 1026 ddi_dma_free_handle(&ch_dh); 1027 } 1028 1029 /* 1030 * create a dma handle and return a dma handle entry. 1031 */ 1032 free_dh_t * 1033 ch_get_dma_handle(ch_t *chp) 1034 { 1035 ddi_dma_handle_t ch_dh; 1036 ddi_dma_attr_t ch_dma_attr; 1037 free_dh_t *dhe; 1038 int rv; 1039 1040 dhe = (free_dh_t *)kmem_zalloc(sizeof (*dhe), KM_SLEEP); 1041 1042 ch_dma_attr.dma_attr_version = DMA_ATTR_V0; 1043 ch_dma_attr.dma_attr_addr_lo = 0; 1044 ch_dma_attr.dma_attr_addr_hi = 0xffffffffffffffff; 1045 ch_dma_attr.dma_attr_count_max = 0x00ffffff; 1046 ch_dma_attr.dma_attr_align = 1; 1047 ch_dma_attr.dma_attr_burstsizes = 0xfff; 1048 ch_dma_attr.dma_attr_minxfer = 1; 1049 ch_dma_attr.dma_attr_maxxfer = 0x00ffffff; 1050 ch_dma_attr.dma_attr_seg = 0xffffffff; 1051 ch_dma_attr.dma_attr_sgllen = 5; 1052 ch_dma_attr.dma_attr_granular = 1; 1053 ch_dma_attr.dma_attr_flags = 0; 1054 1055 rv = ddi_dma_alloc_handle( 1056 chp->ch_dip, /* device dev_info */ 1057 &ch_dma_attr, /* DMA attributes */ 1058 DDI_DMA_SLEEP, /* Wait if no memory */ 1059 NULL, /* no argument */ 1060 &ch_dh); /* DMA handle */ 1061 if (rv != DDI_SUCCESS) { 1062 1063 cmn_err(CE_WARN, 1064 "%s: ch_get_dma_handle: ddi_dma_alloc_handle error %d\n", 1065 chp->ch_name, rv); 1066 1067 kmem_free(dhe, sizeof (*dhe)); 1068 1069 return ((free_dh_t *)0); 1070 } 1071 1072 dhe->dhe_dh = (ulong_t)ch_dh; 1073 1074 return (dhe); 1075 } 1076 1077 /* 1078 * free the linked list of dma descriptor entries. 1079 */ 1080 static void 1081 ch_free_dma_handles(ch_t *chp) 1082 { 1083 free_dh_t *dhe, *the; 1084 1085 dhe = chp->ch_dh; 1086 while (dhe) { 1087 ddi_dma_free_handle((ddi_dma_handle_t *)&dhe->dhe_dh); 1088 the = dhe; 1089 dhe = dhe->dhe_next; 1090 kmem_free(the, sizeof (*the)); 1091 } 1092 chp->ch_dh = NULL; 1093 } 1094 1095 /* 1096 * ch_bind_dma_handle() 1097 * 1098 * returns # of entries used off of cmdQ_ce_t array to hold physical addrs. 1099 * 1100 * chp - per-board descriptor 1101 * size - # bytes mapped 1102 * vaddr - virtual address 1103 * cmp - array of cmdQ_ce_t entries 1104 * cnt - # free entries in cmp array 1105 */ 1106 1107 uint32_t 1108 ch_bind_dma_handle(ch_t *chp, int size, caddr_t vaddr, cmdQ_ce_t *cmp, 1109 uint32_t cnt) 1110 { 1111 ddi_dma_cookie_t cookie; 1112 ddi_dma_handle_t ch_dh; 1113 uint_t count; 1114 uint32_t n = 1; 1115 free_dh_t *dhe; 1116 uint_t rv; 1117 1118 mutex_enter(&chp->ch_dh_lck); 1119 if ((dhe = chp->ch_dh) != NULL) { 1120 chp->ch_dh = dhe->dhe_next; 1121 } 1122 mutex_exit(&chp->ch_dh_lck); 1123 1124 if (dhe == NULL) { 1125 return (0); 1126 } 1127 1128 ch_dh = (ddi_dma_handle_t)dhe->dhe_dh; 1129 1130 rv = ddi_dma_addr_bind_handle( 1131 ch_dh, /* dma handle */ 1132 (struct as *)0, /* kernel address space */ 1133 vaddr, /* virtual address */ 1134 size, /* length of object */ 1135 DDI_DMA_WRITE|DDI_DMA_STREAMING, 1136 DDI_DMA_SLEEP, /* Wait for resources */ 1137 NULL, /* no argument */ 1138 &cookie, /* dma cookie */ 1139 &count); 1140 if (rv != DDI_DMA_MAPPED) { 1141 1142 /* return dma header descriptor back to free list */ 1143 mutex_enter(&chp->ch_dh_lck); 1144 dhe->dhe_next = chp->ch_dh; 1145 chp->ch_dh = dhe; 1146 mutex_exit(&chp->ch_dh_lck); 1147 1148 cmn_err(CE_WARN, 1149 "%s: ch_bind_dma_handle: ddi_dma_addr_bind_handle err %d\n", 1150 chp->ch_name, rv); 1151 1152 return (0); 1153 } 1154 1155 /* 1156 * abort if we've run out of space 1157 */ 1158 if (count > cnt) { 1159 /* return dma header descriptor back to free list */ 1160 mutex_enter(&chp->ch_dh_lck); 1161 dhe->dhe_next = chp->ch_dh; 1162 chp->ch_dh = dhe; 1163 mutex_exit(&chp->ch_dh_lck); 1164 1165 return (0); 1166 } 1167 1168 cmp->ce_pa = cookie.dmac_laddress; 1169 cmp->ce_dh = NULL; 1170 cmp->ce_len = cookie.dmac_size; 1171 cmp->ce_mp = NULL; 1172 cmp->ce_flg = DH_DMA; 1173 1174 while (--count) { 1175 cmp++; 1176 n++; 1177 ddi_dma_nextcookie(ch_dh, &cookie); 1178 cmp->ce_pa = cookie.dmac_laddress; 1179 cmp->ce_dh = NULL; 1180 cmp->ce_len = cookie.dmac_size; 1181 cmp->ce_mp = NULL; 1182 cmp->ce_flg = DH_DMA; 1183 } 1184 1185 cmp->ce_dh = dhe; 1186 1187 return (n); 1188 } 1189 1190 /* 1191 * ch_unbind_dma_handle() 1192 * 1193 * frees resources alloacted by ch_bind_dma_handle(). 1194 * 1195 * frees DMA handle 1196 */ 1197 1198 void 1199 ch_unbind_dma_handle(ch_t *chp, free_dh_t *dhe) 1200 { 1201 ddi_dma_handle_t ch_dh = (ddi_dma_handle_t)dhe->dhe_dh; 1202 1203 if (ddi_dma_unbind_handle(ch_dh)) 1204 cmn_err(CE_WARN, "%s: ddi_dma_unbind_handle failed", 1205 chp->ch_name); 1206 1207 mutex_enter(&chp->ch_dh_lck); 1208 dhe->dhe_next = chp->ch_dh; 1209 chp->ch_dh = dhe; 1210 mutex_exit(&chp->ch_dh_lck); 1211 } 1212 1213 #if defined(__sparc) 1214 /* 1215 * DVMA stuff. Solaris only. 1216 */ 1217 1218 /* 1219 * create a dvma handle and return a dma handle entry. 1220 * DVMA is on sparc only! 1221 */ 1222 1223 free_dh_t * 1224 ch_get_dvma_handle(ch_t *chp) 1225 { 1226 ddi_dma_handle_t ch_dh; 1227 ddi_dma_lim_t ch_dvma_attr; 1228 free_dh_t *dhe; 1229 int rv; 1230 1231 dhe = (free_dh_t *)kmem_zalloc(sizeof (*dhe), KM_SLEEP); 1232 1233 ch_dvma_attr.dlim_addr_lo = 0; 1234 ch_dvma_attr.dlim_addr_hi = 0xffffffff; 1235 ch_dvma_attr.dlim_cntr_max = 0xffffffff; 1236 ch_dvma_attr.dlim_burstsizes = 0xfff; 1237 ch_dvma_attr.dlim_minxfer = 1; 1238 ch_dvma_attr.dlim_dmaspeed = 0; 1239 1240 rv = dvma_reserve( 1241 chp->ch_dip, /* device dev_info */ 1242 &ch_dvma_attr, /* DVMA attributes */ 1243 3, /* number of pages */ 1244 &ch_dh); /* DVMA handle */ 1245 1246 if (rv != DDI_SUCCESS) { 1247 1248 cmn_err(CE_WARN, 1249 "%s: ch_get_dvma_handle: dvma_reserve() error %d\n", 1250 chp->ch_name, rv); 1251 1252 kmem_free(dhe, sizeof (*dhe)); 1253 1254 return ((free_dh_t *)0); 1255 } 1256 1257 dhe->dhe_dh = (ulong_t)ch_dh; 1258 1259 return (dhe); 1260 } 1261 1262 /* 1263 * free the linked list of dvma descriptor entries. 1264 * DVMA is only on sparc! 1265 */ 1266 1267 static void 1268 ch_free_dvma_handles(ch_t *chp) 1269 { 1270 free_dh_t *dhe, *the; 1271 1272 dhe = chp->ch_vdh; 1273 while (dhe) { 1274 dvma_release((ddi_dma_handle_t)dhe->dhe_dh); 1275 the = dhe; 1276 dhe = dhe->dhe_next; 1277 kmem_free(the, sizeof (*the)); 1278 } 1279 chp->ch_vdh = NULL; 1280 } 1281 1282 /* 1283 * ch_bind_dvma_handle() 1284 * 1285 * returns # of entries used off of cmdQ_ce_t array to hold physical addrs. 1286 * DVMA in sparc only 1287 * 1288 * chp - per-board descriptor 1289 * size - # bytes mapped 1290 * vaddr - virtual address 1291 * cmp - array of cmdQ_ce_t entries 1292 * cnt - # free entries in cmp array 1293 */ 1294 1295 uint32_t 1296 ch_bind_dvma_handle(ch_t *chp, int size, caddr_t vaddr, cmdQ_ce_t *cmp, 1297 uint32_t cnt) 1298 { 1299 ddi_dma_cookie_t cookie; 1300 ddi_dma_handle_t ch_dh; 1301 uint32_t n = 1; 1302 free_dh_t *dhe; 1303 1304 mutex_enter(&chp->ch_dh_lck); 1305 if ((dhe = chp->ch_vdh) != NULL) { 1306 chp->ch_vdh = dhe->dhe_next; 1307 } 1308 mutex_exit(&chp->ch_dh_lck); 1309 1310 if (dhe == NULL) { 1311 return (0); 1312 } 1313 1314 ch_dh = (ddi_dma_handle_t)dhe->dhe_dh; 1315 n = cnt; 1316 1317 dvma_kaddr_load( 1318 ch_dh, /* dvma handle */ 1319 vaddr, /* virtual address */ 1320 size, /* length of object */ 1321 0, /* start at index 0 */ 1322 &cookie); 1323 1324 dvma_sync(ch_dh, 0, DDI_DMA_SYNC_FORDEV); 1325 1326 cookie.dmac_notused = 0; 1327 n = 1; 1328 1329 cmp->ce_pa = cookie.dmac_laddress; 1330 cmp->ce_dh = dhe; 1331 cmp->ce_len = cookie.dmac_size; 1332 cmp->ce_mp = NULL; 1333 cmp->ce_flg = DH_DVMA; /* indicate a dvma descriptor */ 1334 1335 return (n); 1336 } 1337 1338 /* 1339 * ch_unbind_dvma_handle() 1340 * 1341 * frees resources alloacted by ch_bind_dvma_handle(). 1342 * 1343 * frees DMA handle 1344 */ 1345 1346 void 1347 ch_unbind_dvma_handle(ch_t *chp, free_dh_t *dhe) 1348 { 1349 ddi_dma_handle_t ch_dh = (ddi_dma_handle_t)dhe->dhe_dh; 1350 1351 dvma_unload(ch_dh, 0, -1); 1352 1353 mutex_enter(&chp->ch_dh_lck); 1354 dhe->dhe_next = chp->ch_vdh; 1355 chp->ch_vdh = dhe; 1356 mutex_exit(&chp->ch_dh_lck); 1357 } 1358 1359 #endif /* defined(__sparc) */ 1360 1361 /* 1362 * send received packet up stream. 1363 * 1364 * if driver has been stopped, then we drop the message. 1365 */ 1366 void 1367 ch_send_up(ch_t *chp, mblk_t *mp, uint32_t cksum, int flg) 1368 { 1369 /* 1370 * probably do not need a lock here. When we set PESTOP in 1371 * ch_stop() a packet could have just passed here and gone 1372 * upstream. The next one will be dropped. 1373 */ 1374 if (chp->ch_state == PERUNNING) { 1375 /* 1376 * note that flg will not be set unless enable_checksum_offload 1377 * set in /etc/system (see sge.c). 1378 */ 1379 if (flg) 1380 (void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, cksum, 1381 HCK_FULLCKSUM, 0); 1382 gld_recv(chp->ch_macp, mp); 1383 } else { 1384 freemsg(mp); 1385 } 1386 } 1387 1388 /* 1389 * unblock gld driver. 1390 */ 1391 void 1392 ch_gld_ok(ch_t *chp) 1393 { 1394 gld_sched(chp->ch_macp); 1395 } 1396 1397 1398 /* 1399 * reset the card. 1400 * 1401 * Note: we only do this after the card has been initialized. 1402 */ 1403 static int 1404 ch_reset(gld_mac_info_t *mp) 1405 { 1406 ch_t *chp; 1407 1408 if (mp == NULL) { 1409 return (GLD_FAILURE); 1410 } 1411 1412 chp = (ch_t *)mp->gldm_private; 1413 1414 if (chp == NULL) { 1415 return (GLD_FAILURE); 1416 } 1417 1418 #ifdef NOTYET 1419 /* 1420 * do a reset of card 1421 * 1422 * 1. set PwrState to D3hot (3) 1423 * 2. clear PwrState flags 1424 */ 1425 /* 1426 * When we did this, the card didn't start. First guess is that 1427 * the initialization is not quite correct. For now, we don't 1428 * reset things. 1429 */ 1430 if (chp->ch_hpci) { 1431 pci_config_put32(chp->ch_hpci, 0x44, 3); 1432 pci_config_put32(chp->ch_hpci, 0x44, 0); 1433 1434 /* delay .5 sec */ 1435 DELAY(500000); 1436 } 1437 #endif 1438 1439 return (GLD_SUCCESS); 1440 } 1441 1442 static int 1443 ch_start(gld_mac_info_t *macinfo) 1444 { 1445 ch_t *chp = (ch_t *)macinfo->gldm_private; 1446 #ifdef CONFIG_CHELSIO_T1_OFFLOAD 1447 /* only initialize card on first attempt */ 1448 mutex_enter(&chp->ch_lock); 1449 chp->ch_refcnt++; 1450 if (chp->ch_refcnt == 1) { 1451 chp->ch_state = PERUNNING; 1452 mutex_exit(&chp->ch_lock); 1453 pe_init((void *)chp); 1454 } else 1455 mutex_exit(&chp->ch_lock); 1456 #else 1457 pe_init((void *)chp); 1458 1459 /* go to running state, we're being started */ 1460 mutex_enter(&chp->ch_lock); 1461 chp->ch_state = PERUNNING; 1462 mutex_exit(&chp->ch_lock); 1463 #endif 1464 1465 return (GLD_SUCCESS); 1466 } 1467 1468 static int 1469 ch_stop(gld_mac_info_t *mp) 1470 { 1471 ch_t *chp = (ch_t *)mp->gldm_private; 1472 1473 /* 1474 * can only stop the chip if it's been initialized 1475 */ 1476 mutex_enter(&chp->ch_lock); 1477 if (chp->ch_state == PEIDLE) { 1478 mutex_exit(&chp->ch_lock); 1479 return (GLD_FAILURE); 1480 } 1481 #ifdef CONFIG_CHELSIO_T1_OFFLOAD 1482 chp->ch_refcnt--; 1483 if (chp->ch_refcnt == 0) { 1484 chp->ch_state = PESTOP; 1485 mutex_exit(&chp->ch_lock); 1486 pe_stop(chp); 1487 } else 1488 mutex_exit(&chp->ch_lock); 1489 #else 1490 chp->ch_state = PESTOP; 1491 mutex_exit(&chp->ch_lock); 1492 pe_stop(chp); 1493 #endif 1494 return (GLD_SUCCESS); 1495 } 1496 1497 static int 1498 ch_set_mac_address(gld_mac_info_t *mp, uint8_t *mac) 1499 { 1500 ch_t *chp; 1501 1502 if (mp) { 1503 chp = (ch_t *)mp->gldm_private; 1504 } else { 1505 return (GLD_FAILURE); 1506 } 1507 1508 pe_set_mac(chp, mac); 1509 1510 return (GLD_SUCCESS); 1511 } 1512 1513 static int 1514 ch_set_multicast(gld_mac_info_t *mp, uint8_t *ep, int flg) 1515 { 1516 ch_t *chp = (ch_t *)mp->gldm_private; 1517 1518 return (pe_set_mc(chp, ep, flg)); 1519 } 1520 1521 static int 1522 ch_ioctl(gld_mac_info_t *macinfo, queue_t *q, mblk_t *mp) 1523 { 1524 struct iocblk *iocp; 1525 1526 switch (mp->b_datap->db_type) { 1527 case M_IOCTL: 1528 /* pe_ioctl() does qreply() */ 1529 pe_ioctl((ch_t *)(macinfo->gldm_private), q, mp); 1530 break; 1531 1532 default: 1533 /* 1534 * cmn_err(CE_NOTE, "ch_ioctl not M_IOCTL\n"); 1535 * debug_enter("bad ch_ioctl"); 1536 */ 1537 1538 iocp = (struct iocblk *)mp->b_rptr; 1539 1540 if (mp->b_cont) 1541 freemsg(mp->b_cont); 1542 mp->b_cont = NULL; 1543 1544 mp->b_datap->db_type = M_IOCNAK; 1545 iocp->ioc_error = EINVAL; 1546 qreply(q, mp); 1547 break; 1548 } 1549 1550 return (GLD_SUCCESS); 1551 } 1552 1553 static int 1554 ch_set_promiscuous(gld_mac_info_t *mp, int flag) 1555 { 1556 ch_t *chp = (ch_t *)mp->gldm_private; 1557 1558 switch (flag) { 1559 case GLD_MAC_PROMISC_MULTI: 1560 pe_set_promiscuous(chp, 2); 1561 break; 1562 1563 case GLD_MAC_PROMISC_NONE: 1564 pe_set_promiscuous(chp, 0); 1565 break; 1566 1567 case GLD_MAC_PROMISC_PHYS: 1568 default: 1569 pe_set_promiscuous(chp, 1); 1570 break; 1571 } 1572 1573 return (GLD_SUCCESS); 1574 } 1575 1576 static int 1577 ch_get_stats(gld_mac_info_t *mp, struct gld_stats *gs) 1578 { 1579 ch_t *chp = (ch_t *)mp->gldm_private; 1580 uint64_t speed; 1581 uint32_t intrcnt; 1582 uint32_t norcvbuf; 1583 uint32_t oerrors; 1584 uint32_t ierrors; 1585 uint32_t underrun; 1586 uint32_t overrun; 1587 uint32_t framing; 1588 uint32_t crc; 1589 uint32_t carrier; 1590 uint32_t collisions; 1591 uint32_t xcollisions; 1592 uint32_t late; 1593 uint32_t defer; 1594 uint32_t xerrs; 1595 uint32_t rerrs; 1596 uint32_t toolong; 1597 uint32_t runt; 1598 ulong_t multixmt; 1599 ulong_t multircv; 1600 ulong_t brdcstxmt; 1601 ulong_t brdcstrcv; 1602 1603 /* 1604 * race looks benign here. 1605 */ 1606 if (chp->ch_state != PERUNNING) { 1607 return (GLD_FAILURE); 1608 } 1609 1610 (void) pe_get_stats(chp, 1611 &speed, 1612 &intrcnt, 1613 &norcvbuf, 1614 &oerrors, 1615 &ierrors, 1616 &underrun, 1617 &overrun, 1618 &framing, 1619 &crc, 1620 &carrier, 1621 &collisions, 1622 &xcollisions, 1623 &late, 1624 &defer, 1625 &xerrs, 1626 &rerrs, 1627 &toolong, 1628 &runt, 1629 &multixmt, 1630 &multircv, 1631 &brdcstxmt, 1632 &brdcstrcv); 1633 1634 gs->glds_speed = speed; 1635 gs->glds_media = GLDM_UNKNOWN; 1636 gs->glds_intr = intrcnt; 1637 gs->glds_norcvbuf = norcvbuf; 1638 gs->glds_errxmt = oerrors; 1639 gs->glds_errrcv = ierrors; 1640 gs->glds_missed = ierrors; /* ??? */ 1641 gs->glds_underflow = underrun; 1642 gs->glds_overflow = overrun; 1643 gs->glds_frame = framing; 1644 gs->glds_crc = crc; 1645 gs->glds_duplex = GLD_DUPLEX_FULL; 1646 gs->glds_nocarrier = carrier; 1647 gs->glds_collisions = collisions; 1648 gs->glds_excoll = xcollisions; 1649 gs->glds_xmtlatecoll = late; 1650 gs->glds_defer = defer; 1651 gs->glds_dot3_first_coll = 0; /* Not available */ 1652 gs->glds_dot3_multi_coll = 0; /* Not available */ 1653 gs->glds_dot3_sqe_error = 0; /* Not available */ 1654 gs->glds_dot3_mac_xmt_error = xerrs; 1655 gs->glds_dot3_mac_rcv_error = rerrs; 1656 gs->glds_dot3_frame_too_long = toolong; 1657 gs->glds_short = runt; 1658 1659 gs->glds_noxmtbuf = 0; /* not documented */ 1660 gs->glds_xmtretry = 0; /* not documented */ 1661 gs->glds_multixmt = multixmt; /* not documented */ 1662 gs->glds_multircv = multircv; /* not documented */ 1663 gs->glds_brdcstxmt = brdcstxmt; /* not documented */ 1664 gs->glds_brdcstrcv = brdcstrcv; /* not documented */ 1665 1666 return (GLD_SUCCESS); 1667 } 1668 1669 1670 static int 1671 ch_send(gld_mac_info_t *macinfo, mblk_t *mp) 1672 { 1673 ch_t *chp = (ch_t *)macinfo->gldm_private; 1674 uint32_t flg; 1675 uint32_t msg_flg; 1676 1677 #ifdef TX_CKSUM_FIX 1678 mblk_t *nmp; 1679 int frags; 1680 size_t msg_len; 1681 struct ether_header *ehdr; 1682 ipha_t *ihdr; 1683 int tflg = 0; 1684 #endif /* TX_CKSUM_FIX */ 1685 1686 /* 1687 * race looks benign here. 1688 */ 1689 if (chp->ch_state != PERUNNING) { 1690 return (GLD_FAILURE); 1691 } 1692 1693 msg_flg = 0; 1694 if (chp->ch_config.cksum_enabled) { 1695 if (is_T2(chp)) { 1696 hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL, 1697 NULL, &msg_flg); 1698 flg = (msg_flg & HCK_FULLCKSUM)? 1699 CH_NO_CPL: CH_NO_HWCKSUM|CH_NO_CPL; 1700 } else 1701 flg = CH_NO_CPL; 1702 } else 1703 flg = CH_NO_HWCKSUM | CH_NO_CPL; 1704 1705 #ifdef TX_CKSUM_FIX 1706 /* 1707 * Check if the message spans more than one mblk or 1708 * if it does and the ip header is not in the first 1709 * fragment then pull up the message. This case is 1710 * expected to be rare. 1711 */ 1712 frags = 0; 1713 msg_len = 0; 1714 nmp = mp; 1715 do { 1716 frags++; 1717 msg_len += MBLKL(nmp); 1718 nmp = nmp->b_cont; 1719 } while (nmp); 1720 #define MAX_ALL_HDRLEN SZ_CPL_TX_PKT + sizeof (struct ether_header) + \ 1721 TCP_MAX_COMBINED_HEADER_LENGTH 1722 /* 1723 * If the first mblk has enough space at the beginning of 1724 * the data buffer to hold a CPL header, then, we'll expancd 1725 * the front of the buffer so a pullup will leave space for 1726 * pe_start() to add the CPL header in line. We need to remember 1727 * that we've done this so we can undo it after the pullup. 1728 * 1729 * Note that if we decide to do an allocb to hold the CPL header, 1730 * we need to catch the case where we've added an empty mblk for 1731 * the header but never did a pullup. This would result in the 1732 * tests for etherheader, etc. being done on the initial, empty, 1733 * mblk instead of the one with data. See PR3646 for further 1734 * details. (note this PR is closed since it is no longer relevant). 1735 * 1736 * Another point is that if we do add an allocb to add space for 1737 * a CPL header, after a pullup, the initial pointer, mp, in GLD will 1738 * no longer point to a valid mblk. When we get the mblk (by allocb), 1739 * we need to switch the mblk structure values between it and the 1740 * mp structure values referenced by GLD. This handles the case where 1741 * we've run out of cmdQ entries and report GLD_NORESOURCES back to 1742 * GLD. The pointer to the mblk data will have been modified to hold 1743 * an empty 8 bytes for the CPL header, For now, we let the pe_start() 1744 * routine prepend an 8 byte mblk. 1745 */ 1746 if (MBLKHEAD(mp) >= SZ_CPL_TX_PKT) { 1747 mp->b_rptr -= SZ_CPL_TX_PKT; 1748 tflg = 1; 1749 } 1750 if (frags > 3) { 1751 chp->sge->intr_cnt.tx_msg_pullups++; 1752 if (pullupmsg(mp, -1) == 0) { 1753 freemsg(mp); 1754 return (GLD_SUCCESS); 1755 } 1756 } else if ((msg_len > MAX_ALL_HDRLEN) && 1757 (MBLKL(mp) < MAX_ALL_HDRLEN)) { 1758 chp->sge->intr_cnt.tx_hdr_pullups++; 1759 if (pullupmsg(mp, MAX_ALL_HDRLEN) == 0) { 1760 freemsg(mp); 1761 return (GLD_SUCCESS); 1762 } 1763 } 1764 if (tflg) 1765 mp->b_rptr += SZ_CPL_TX_PKT; 1766 1767 ehdr = (struct ether_header *)mp->b_rptr; 1768 if (ehdr->ether_type == htons(ETHERTYPE_IP)) { 1769 ihdr = (ipha_t *)&mp->b_rptr[sizeof (struct ether_header)]; 1770 if ((ihdr->ipha_fragment_offset_and_flags & IPH_MF)) { 1771 if (ihdr->ipha_protocol == IPPROTO_UDP) { 1772 flg |= CH_UDP_MF; 1773 chp->sge->intr_cnt.tx_udp_ip_frag++; 1774 } else if (ihdr->ipha_protocol == IPPROTO_TCP) { 1775 flg |= CH_TCP_MF; 1776 chp->sge->intr_cnt.tx_tcp_ip_frag++; 1777 } 1778 } else if (ihdr->ipha_protocol == IPPROTO_UDP) 1779 flg |= CH_UDP; 1780 } 1781 #endif /* TX_CKSUM_FIX */ 1782 1783 /* 1784 * return 0 - data send successfully 1785 * return 1 - no resources, reschedule 1786 */ 1787 if (pe_start(chp, mp, flg)) 1788 return (GLD_NORESOURCES); 1789 else 1790 return (GLD_SUCCESS); 1791 } 1792 1793 static uint_t 1794 ch_intr(gld_mac_info_t *mp) 1795 { 1796 return (pe_intr((ch_t *)mp->gldm_private)); 1797 } 1798 1799 /* 1800 * generate name of driver with unit# postpended. 1801 */ 1802 void 1803 ch_set_name(ch_t *chp, int unit) 1804 { 1805 chp->ch_name = (char *)kmem_alloc(sizeof ("chxge00"), KM_SLEEP); 1806 if (unit > 9) { 1807 bcopy("chxge00", (void *)chp->ch_name, sizeof ("chxge00")); 1808 chp->ch_name[5] += unit/10; 1809 chp->ch_name[6] += unit%10; 1810 } else { 1811 bcopy("chxge0", (void *)chp->ch_name, sizeof ("chxge0")); 1812 chp->ch_name[5] += unit; 1813 } 1814 } 1815 1816 void 1817 ch_free_name(ch_t *chp) 1818 { 1819 if (chp->ch_name) 1820 kmem_free(chp->ch_name, sizeof ("chxge00")); 1821 chp->ch_name = NULL; 1822 } 1823 1824 #ifdef CONFIG_CHELSIO_T1_OFFLOAD 1825 /* 1826 * register toe offload. 1827 */ 1828 void * 1829 ch_register(void *instp, void *toe_rcv, void *toe_free, void *toe_tunnel, 1830 kmutex_t *toe_tx_mx, kcondvar_t *toe_of_cv, int unit) 1831 { 1832 ch_t *chp = gchp[unit]; 1833 if (chp != NULL) { 1834 mutex_enter(&chp->ch_lock); 1835 1836 chp->toe_rcv = (void (*)(void *, mblk_t *))toe_rcv; 1837 chp->ch_toeinst = instp; 1838 chp->toe_free = (void (*)(void *, tbuf_t *))toe_free; 1839 chp->toe_tunnel = (int (*)(void *, mblk_t *))toe_tunnel; 1840 chp->ch_tx_overflow_mutex = toe_tx_mx; 1841 chp->ch_tx_overflow_cv = toe_of_cv; 1842 chp->open_device_map |= TOEDEV_DEVMAP_BIT; 1843 1844 /* start up adapter if first user */ 1845 chp->ch_refcnt++; 1846 if (chp->ch_refcnt == 1) { 1847 chp->ch_state = PERUNNING; 1848 mutex_exit(&chp->ch_lock); 1849 pe_init((void *)chp); 1850 } else 1851 mutex_exit(&chp->ch_lock); 1852 } 1853 return ((void *)gchp[unit]); 1854 } 1855 1856 /* 1857 * unregister toe offload. 1858 * XXX Need to fix races here. 1859 * 1. turn off SGE interrupts. 1860 * 2. do update 1861 * 3. re-enable SGE interrupts 1862 * 4. SGE doorbell to make sure things get restarted. 1863 */ 1864 void 1865 ch_unregister(void) 1866 { 1867 int i; 1868 ch_t *chp; 1869 1870 for (i = 0; i < MAX_CARDS; i++) { 1871 chp = gchp[i]; 1872 if (chp == NULL) 1873 continue; 1874 1875 mutex_enter(&chp->ch_lock); 1876 1877 chp->ch_refcnt--; 1878 if (chp->ch_refcnt == 0) { 1879 chp->ch_state = PESTOP; 1880 mutex_exit(&chp->ch_lock); 1881 pe_stop(chp); 1882 } else 1883 mutex_exit(&chp->ch_lock); 1884 1885 chp->open_device_map &= ~TOEDEV_DEVMAP_BIT; 1886 chp->toe_rcv = NULL; 1887 chp->ch_toeinst = NULL; 1888 chp->toe_free = NULL; 1889 chp->toe_tunnel = NULL; 1890 chp->ch_tx_overflow_mutex = NULL; 1891 chp->ch_tx_overflow_cv = NULL; 1892 } 1893 } 1894 #endif /* CONFIG_CHELSIO_T1_OFFLOAD */ 1895 1896 /* 1897 * get properties from chxge.conf 1898 */ 1899 static void 1900 ch_get_prop(ch_t *chp) 1901 { 1902 int val; 1903 int tval = 0; 1904 extern int enable_latency_timer; 1905 extern uint32_t sge_cmdq0_cnt; 1906 extern uint32_t sge_cmdq1_cnt; 1907 extern uint32_t sge_flq0_cnt; 1908 extern uint32_t sge_flq1_cnt; 1909 extern uint32_t sge_respq_cnt; 1910 extern uint32_t sge_cmdq0_cnt_orig; 1911 extern uint32_t sge_cmdq1_cnt_orig; 1912 extern uint32_t sge_flq0_cnt_orig; 1913 extern uint32_t sge_flq1_cnt_orig; 1914 extern uint32_t sge_respq_cnt_orig; 1915 dev_info_t *pdip; 1916 uint32_t vendor_id, device_id, revision_id; 1917 uint32_t *prop_val = NULL; 1918 uint32_t prop_len = 0; 1919 1920 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS, 1921 "enable_dvma", -1); 1922 if (val == -1) 1923 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS, 1924 "enable-dvma", -1); 1925 if (val != -1) { 1926 if (val != 0) 1927 chp->ch_config.enable_dvma = 1; 1928 } 1929 1930 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS, 1931 "amd_bug_workaround", -1); 1932 if (val == -1) 1933 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS, 1934 "amd-bug-workaround", -1); 1935 1936 if (val != -1) { 1937 if (val == 0) { 1938 chp->ch_config.burstsize_set = 0; 1939 chp->ch_config.transaction_cnt_set = 0; 1940 goto fail_exit; 1941 } 1942 } 1943 /* 1944 * Step up to the parent node, That's the node above us 1945 * in the device tree. And will typically be the PCI host 1946 * Controller. 1947 */ 1948 pdip = ddi_get_parent(chp->ch_dip); 1949 1950 /* 1951 * Now get the 'Vendor id' properties 1952 */ 1953 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, pdip, 0, "vendor-id", 1954 (int **)&prop_val, &prop_len) != DDI_PROP_SUCCESS) { 1955 chp->ch_config.burstsize_set = 0; 1956 chp->ch_config.transaction_cnt_set = 0; 1957 goto fail_exit; 1958 } 1959 vendor_id = *(uint32_t *)prop_val; 1960 ddi_prop_free(prop_val); 1961 1962 /* 1963 * Now get the 'Device id' properties 1964 */ 1965 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, pdip, 0, "device-id", 1966 (int **)&prop_val, &prop_len) != DDI_PROP_SUCCESS) { 1967 chp->ch_config.burstsize_set = 0; 1968 chp->ch_config.transaction_cnt_set = 0; 1969 goto fail_exit; 1970 } 1971 device_id = *(uint32_t *)prop_val; 1972 ddi_prop_free(prop_val); 1973 1974 /* 1975 * Now get the 'Revision id' properties 1976 */ 1977 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, pdip, 0, "revision-id", 1978 (int **)&prop_val, &prop_len) != DDI_PROP_SUCCESS) { 1979 chp->ch_config.burstsize_set = 0; 1980 chp->ch_config.transaction_cnt_set = 0; 1981 goto fail_exit; 1982 } 1983 revision_id = *(uint32_t *)prop_val; 1984 ddi_prop_free(prop_val); 1985 1986 /* 1987 * set default values based on node above us. 1988 */ 1989 if ((vendor_id == AMD_VENDOR_ID) && (device_id == AMD_BRIDGE) && 1990 (revision_id <= AMD_BRIDGE_REV)) { 1991 uint32_t v; 1992 uint32_t burst; 1993 uint32_t cnt; 1994 1995 /* if 133 Mhz not enabled, then do nothing - we're not PCIx */ 1996 v = pci_config_get32(chp->ch_hpci, 0x64); 1997 if ((v & 0x20000) == 0) { 1998 chp->ch_config.burstsize_set = 0; 1999 chp->ch_config.transaction_cnt_set = 0; 2000 goto fail_exit; 2001 } 2002 2003 /* check burst size and transaction count */ 2004 v = pci_config_get32(chp->ch_hpci, 0x60); 2005 burst = (v >> 18) & 3; 2006 cnt = (v >> 20) & 7; 2007 2008 switch (burst) { 2009 case 0: /* 512 */ 2010 /* 512 burst size legal with split cnts 1,2,3 */ 2011 if (cnt <= 2) { 2012 chp->ch_config.burstsize_set = 0; 2013 chp->ch_config.transaction_cnt_set = 0; 2014 goto fail_exit; 2015 } 2016 break; 2017 case 1: /* 1024 */ 2018 /* 1024 burst size legal with split cnts 1,2 */ 2019 if (cnt <= 1) { 2020 chp->ch_config.burstsize_set = 0; 2021 chp->ch_config.transaction_cnt_set = 0; 2022 goto fail_exit; 2023 } 2024 break; 2025 case 2: /* 2048 */ 2026 /* 2048 burst size legal with split cnts 1 */ 2027 if (cnt == 0) { 2028 chp->ch_config.burstsize_set = 0; 2029 chp->ch_config.transaction_cnt_set = 0; 2030 goto fail_exit; 2031 } 2032 break; 2033 case 3: /* 4096 */ 2034 break; 2035 } 2036 } else { 2037 goto fail_exit; 2038 } 2039 2040 /* 2041 * if illegal burst size seen, then default to 1024 burst size 2042 */ 2043 chp->ch_config.burstsize = 1; 2044 chp->ch_config.burstsize_set = 1; 2045 /* 2046 * if illegal transaction cnt seen, then default to 2 2047 */ 2048 chp->ch_config.transaction_cnt = 1; 2049 chp->ch_config.transaction_cnt_set = 1; 2050 2051 2052 fail_exit: 2053 2054 /* 2055 * alter the burstsize parameter via an entry 2056 * in chxge.conf 2057 */ 2058 2059 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS, 2060 "pci_burstsize", -1); 2061 if (val == -1) 2062 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS, 2063 "pci-burstsize", -1); 2064 2065 if (val != -1) { 2066 2067 switch (val) { 2068 case 0: /* use default */ 2069 chp->ch_config.burstsize_set = 0; 2070 break; 2071 2072 case 1024: 2073 chp->ch_config.burstsize_set = 1; 2074 chp->ch_config.burstsize = 1; 2075 break; 2076 2077 case 2048: 2078 chp->ch_config.burstsize_set = 1; 2079 chp->ch_config.burstsize = 2; 2080 break; 2081 2082 case 4096: 2083 cmn_err(CE_WARN, "%s not supported %d\n", 2084 chp->ch_name, val); 2085 break; 2086 2087 default: 2088 cmn_err(CE_WARN, "%s illegal burst size %d\n", 2089 chp->ch_name, val); 2090 break; 2091 } 2092 } 2093 2094 /* 2095 * set transaction count 2096 */ 2097 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS, 2098 "pci_split_transaction_cnt", -1); 2099 if (val == -1) 2100 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS, 2101 "pci-split-transaction-cnt", -1); 2102 2103 if (val != -1) { 2104 switch (val) { 2105 case 0: /* use default */ 2106 chp->ch_config.transaction_cnt_set = 0; 2107 break; 2108 2109 case 1: 2110 chp->ch_config.transaction_cnt_set = 1; 2111 chp->ch_config.transaction_cnt = 0; 2112 break; 2113 2114 case 2: 2115 chp->ch_config.transaction_cnt_set = 1; 2116 chp->ch_config.transaction_cnt = 1; 2117 break; 2118 2119 case 3: 2120 chp->ch_config.transaction_cnt_set = 1; 2121 chp->ch_config.transaction_cnt = 2; 2122 break; 2123 2124 case 4: 2125 chp->ch_config.transaction_cnt_set = 1; 2126 chp->ch_config.transaction_cnt = 3; 2127 break; 2128 2129 case 8: 2130 chp->ch_config.transaction_cnt_set = 1; 2131 chp->ch_config.transaction_cnt = 4; 2132 break; 2133 2134 case 12: 2135 chp->ch_config.transaction_cnt_set = 1; 2136 chp->ch_config.transaction_cnt = 5; 2137 break; 2138 2139 case 16: 2140 chp->ch_config.transaction_cnt_set = 1; 2141 chp->ch_config.transaction_cnt = 6; 2142 break; 2143 2144 case 32: 2145 chp->ch_config.transaction_cnt_set = 1; 2146 chp->ch_config.transaction_cnt = 7; 2147 break; 2148 2149 default: 2150 cmn_err(CE_WARN, "%s illegal transaction cnt %d\n", 2151 chp->ch_name, val); 2152 break; 2153 } 2154 } 2155 2156 /* 2157 * set relaxed ordering bit? 2158 */ 2159 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS, 2160 "pci_relaxed_ordering_on", -1); 2161 if (val == -1) 2162 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS, 2163 "pci-relaxed-ordering-on", -1); 2164 2165 /* 2166 * default is to use system default value. 2167 */ 2168 chp->ch_config.relaxed_ordering = 0; 2169 2170 if (val != -1) { 2171 if (val) 2172 chp->ch_config.relaxed_ordering = 1; 2173 } 2174 2175 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS, 2176 "enable_latency_timer", -1); 2177 if (val == -1) 2178 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS, 2179 "enable-latency-timer", -1); 2180 if (val != -1) 2181 enable_latency_timer = (val == 0)? 0: 1; 2182 2183 /* 2184 * default maximum Jumbo Frame size. 2185 */ 2186 chp->ch_maximum_mtu = 9198; /* tunable via chxge.conf */ 2187 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS, 2188 "maximum_mtu", -1); 2189 if (val == -1) { 2190 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS, 2191 "maximum-mtu", -1); 2192 } 2193 if (val != -1) { 2194 if (val > 9582) { 2195 cmn_err(CE_WARN, 2196 "maximum_mtu value %d > 9582. Value set to 9582", 2197 val); 2198 val = 9582; 2199 } else if (val < 1500) { 2200 cmn_err(CE_WARN, 2201 "maximum_mtu value %d < 1500. Value set to 1500", 2202 val); 2203 val = 1500; 2204 } 2205 2206 if (val) 2207 chp->ch_maximum_mtu = val; 2208 } 2209 2210 /* 2211 * default value for this instance mtu 2212 */ 2213 chp->ch_mtu = ETHERMTU; 2214 2215 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS, 2216 "accept_jumbo", -1); 2217 if (val == -1) { 2218 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS, 2219 "accept-jumbo", -1); 2220 } 2221 if (val != -1) { 2222 if (val) 2223 chp->ch_mtu = chp->ch_maximum_mtu; 2224 } 2225 #ifdef CONFIG_CHELSIO_T1_OFFLOAD 2226 chp->ch_sm_buf_sz = 0x800; 2227 chp->ch_sm_buf_aln = 0x800; 2228 chp->ch_bg_buf_sz = 0x4000; 2229 chp->ch_bg_buf_aln = 0x4000; 2230 #else 2231 chp->ch_sm_buf_sz = 0x200; 2232 chp->ch_sm_buf_aln = 0x200; 2233 chp->ch_bg_buf_sz = 0x800; 2234 chp->ch_bg_buf_aln = 0x800; 2235 if ((chp->ch_mtu > 0x800) && (chp->ch_mtu <= 0x1000)) { 2236 chp->ch_sm_buf_sz = 0x400; 2237 chp->ch_sm_buf_aln = 0x400; 2238 chp->ch_bg_buf_sz = 0x1000; 2239 chp->ch_bg_buf_aln = 0x1000; 2240 } else if ((chp->ch_mtu > 0x1000) && (chp->ch_mtu <= 0x2000)) { 2241 chp->ch_sm_buf_sz = 0x400; 2242 chp->ch_sm_buf_aln = 0x400; 2243 chp->ch_bg_buf_sz = 0x2000; 2244 chp->ch_bg_buf_aln = 0x2000; 2245 } else if (chp->ch_mtu > 0x2000) { 2246 chp->ch_sm_buf_sz = 0x400; 2247 chp->ch_sm_buf_aln = 0x400; 2248 chp->ch_bg_buf_sz = 0x3000; 2249 chp->ch_bg_buf_aln = 0x4000; 2250 } 2251 #endif 2252 chp->ch_config.cksum_enabled = 1; 2253 2254 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS, 2255 "enable_checksum_offload", -1); 2256 if (val == -1) 2257 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS, 2258 "enable-checksum-offload", -1); 2259 if (val != -1) { 2260 if (val == 0) 2261 chp->ch_config.cksum_enabled = 0; 2262 } 2263 2264 /* 2265 * Provides a tuning capability for the command queue 0 size. 2266 */ 2267 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS, 2268 "sge_cmdq0_cnt", -1); 2269 if (val == -1) 2270 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS, 2271 "sge-cmdq0-cnt", -1); 2272 if (val != -1) { 2273 if (val > 10) 2274 sge_cmdq0_cnt = val; 2275 } 2276 2277 if (sge_cmdq0_cnt > 65535) { 2278 cmn_err(CE_WARN, 2279 "%s: sge-cmdQ0-cnt > 65535 - resetting value to default", 2280 chp->ch_name); 2281 sge_cmdq0_cnt = sge_cmdq0_cnt_orig; 2282 } 2283 tval += sge_cmdq0_cnt; 2284 2285 /* 2286 * Provides a tuning capability for the command queue 1 size. 2287 */ 2288 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS, 2289 "sge_cmdq1_cnt", -1); 2290 if (val == -1) 2291 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS, 2292 "sge-cmdq1-cnt", -1); 2293 if (val != -1) { 2294 if (val > 10) 2295 sge_cmdq1_cnt = val; 2296 } 2297 2298 if (sge_cmdq1_cnt > 65535) { 2299 cmn_err(CE_WARN, 2300 "%s: sge-cmdQ0-cnt > 65535 - resetting value to default", 2301 chp->ch_name); 2302 sge_cmdq1_cnt = sge_cmdq1_cnt_orig; 2303 } 2304 2305 /* 2306 * Provides a tuning capability for the free list 0 size. 2307 */ 2308 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS, 2309 "sge_flq0_cnt", -1); 2310 if (val == -1) 2311 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS, 2312 "sge-flq0-cnt", -1); 2313 if (val != -1) { 2314 if (val > 512) 2315 sge_flq0_cnt = val; 2316 } 2317 2318 if (sge_flq0_cnt > 65535) { 2319 cmn_err(CE_WARN, 2320 "%s: sge-flq0-cnt > 65535 - resetting value to default", 2321 chp->ch_name); 2322 sge_flq0_cnt = sge_flq0_cnt_orig; 2323 } 2324 2325 tval += sge_flq0_cnt; 2326 2327 /* 2328 * Provides a tuning capability for the free list 1 size. 2329 */ 2330 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS, 2331 "sge_flq1_cnt", -1); 2332 if (val == -1) 2333 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS, 2334 "sge-flq1-cnt", -1); 2335 if (val != -1) { 2336 if (val > 512) 2337 sge_flq1_cnt = val; 2338 } 2339 2340 if (sge_flq1_cnt > 65535) { 2341 cmn_err(CE_WARN, 2342 "%s: sge-flq1-cnt > 65535 - resetting value to default", 2343 chp->ch_name); 2344 sge_flq1_cnt = sge_flq1_cnt_orig; 2345 } 2346 2347 tval += sge_flq1_cnt; 2348 2349 /* 2350 * Provides a tuning capability for the responce queue size. 2351 */ 2352 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS, 2353 "sge_respq_cnt", -1); 2354 if (val == -1) 2355 val = ddi_getprop(DDI_DEV_T_ANY, chp->ch_dip, DDI_PROP_DONTPASS, 2356 "sge-respq-cnt", -1); 2357 if (val != -1) { 2358 if (val > 30) 2359 sge_respq_cnt = val; 2360 } 2361 2362 if (sge_respq_cnt > 65535) { 2363 cmn_err(CE_WARN, 2364 "%s: sge-respq-cnt > 65535 - resetting value to default", 2365 chp->ch_name); 2366 sge_respq_cnt = sge_respq_cnt_orig; 2367 } 2368 2369 if (tval > sge_respq_cnt) { 2370 if (tval <= 65535) { 2371 cmn_err(CE_WARN, 2372 "%s: sge-respq-cnt < %d - setting value to %d (cmdQ+flq0+flq1)", 2373 chp->ch_name, tval, tval); 2374 2375 sge_respq_cnt = tval; 2376 } else { 2377 cmn_err(CE_WARN, 2378 "%s: Q sizes invalid - resetting to default values", 2379 chp->ch_name); 2380 2381 sge_cmdq0_cnt = sge_cmdq0_cnt_orig; 2382 sge_cmdq1_cnt = sge_cmdq1_cnt_orig; 2383 sge_flq0_cnt = sge_flq0_cnt_orig; 2384 sge_flq1_cnt = sge_flq1_cnt_orig; 2385 sge_respq_cnt = sge_respq_cnt_orig; 2386 } 2387 } 2388 } 2389