1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/errno.h> 30 #include <sys/types.h> 31 #include <sys/conf.h> 32 #include <sys/kmem.h> 33 #include <sys/ddi.h> 34 #include <sys/stat.h> 35 #include <sys/sunddi.h> 36 #include <sys/file.h> 37 #include <sys/open.h> 38 #include <sys/modctl.h> 39 #include <sys/ddi_impldefs.h> 40 #include <sys/sysmacros.h> 41 42 #include <sys/ioat.h> 43 44 static int ioat_open(dev_t *devp, int flag, int otyp, cred_t *cred); 45 static int ioat_close(dev_t devp, int flag, int otyp, cred_t *cred); 46 static int ioat_attach(dev_info_t *devi, ddi_attach_cmd_t cmd); 47 static int ioat_detach(dev_info_t *devi, ddi_detach_cmd_t cmd); 48 static int ioat_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, 49 void **result); 50 51 static struct cb_ops ioat_cb_ops = { 52 ioat_open, /* cb_open */ 53 ioat_close, /* cb_close */ 54 nodev, /* cb_strategy */ 55 nodev, /* cb_print */ 56 nodev, /* cb_dump */ 57 nodev, /* cb_read */ 58 nodev, /* cb_write */ 59 ioat_ioctl, /* cb_ioctl */ 60 nodev, /* cb_devmap */ 61 nodev, /* cb_mmap */ 62 nodev, /* cb_segmap */ 63 nochpoll, /* cb_chpoll */ 64 ddi_prop_op, /* cb_prop_op */ 65 NULL, /* cb_stream */ 66 D_NEW | D_MP | D_64BIT | D_DEVMAP, /* cb_flag */ 67 CB_REV 68 }; 69 70 static struct dev_ops ioat_dev_ops = { 71 DEVO_REV, /* devo_rev */ 72 0, /* devo_refcnt */ 73 ioat_getinfo, /* devo_getinfo */ 74 nulldev, /* devo_identify */ 75 nulldev, /* devo_probe */ 76 ioat_attach, /* devo_attach */ 77 ioat_detach, /* devo_detach */ 78 nodev, /* devo_reset */ 79 &ioat_cb_ops, /* devo_cb_ops */ 80 NULL, /* devo_bus_ops */ 81 NULL /* power */ 82 }; 83 84 static struct modldrv ioat_modldrv = { 85 &mod_driverops, /* Type of module. This one is a driver */ 86 "ioat driver v%I%", /* Name of the module. */ 87 &ioat_dev_ops, /* driver ops */ 88 }; 89 90 static struct modlinkage ioat_modlinkage = { 91 MODREV_1, 92 (void *) &ioat_modldrv, 93 NULL 94 }; 95 96 97 void *ioat_statep; 98 99 static int ioat_chip_init(ioat_state_t *state); 100 static void ioat_chip_fini(ioat_state_t *state); 101 static int ioat_drv_init(ioat_state_t *state); 102 static void ioat_drv_fini(ioat_state_t *state); 103 static uint_t ioat_isr(caddr_t parm); 104 static void ioat_intr_enable(ioat_state_t *state); 105 static void ioat_intr_disable(ioat_state_t *state); 106 void ioat_detach_finish(ioat_state_t *state); 107 108 109 ddi_device_acc_attr_t ioat_acc_attr = { 110 DDI_DEVICE_ATTR_V0, /* devacc_attr_version */ 111 DDI_NEVERSWAP_ACC, /* devacc_attr_endian_flags */ 112 DDI_STORECACHING_OK_ACC, /* devacc_attr_dataorder */ 113 DDI_DEFAULT_ACC /* devacc_attr_access */ 114 }; 115 116 /* dcopy callback interface */ 117 dcopy_device_cb_t ioat_cb = { 118 DCOPY_DEVICECB_V0, 119 0, /* reserved */ 120 ioat_channel_alloc, 121 ioat_channel_free, 122 ioat_cmd_alloc, 123 ioat_cmd_free, 124 ioat_cmd_post, 125 ioat_cmd_poll, 126 ioat_unregister_complete 127 }; 128 129 /* 130 * _init() 131 */ 132 int 133 _init(void) 134 { 135 int e; 136 137 e = ddi_soft_state_init(&ioat_statep, sizeof (ioat_state_t), 1); 138 if (e != 0) { 139 return (e); 140 } 141 142 e = mod_install(&ioat_modlinkage); 143 if (e != 0) { 144 ddi_soft_state_fini(&ioat_statep); 145 return (e); 146 } 147 148 return (0); 149 } 150 151 /* 152 * _info() 153 */ 154 int 155 _info(struct modinfo *modinfop) 156 { 157 return (mod_info(&ioat_modlinkage, modinfop)); 158 } 159 160 /* 161 * _fini() 162 */ 163 int 164 _fini(void) 165 { 166 int e; 167 168 e = mod_remove(&ioat_modlinkage); 169 if (e != 0) { 170 return (e); 171 } 172 173 ddi_soft_state_fini(&ioat_statep); 174 175 return (0); 176 } 177 178 /* 179 * ioat_attach() 180 */ 181 static int 182 ioat_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 183 { 184 ioat_state_t *state; 185 int instance; 186 int e; 187 188 189 switch (cmd) { 190 case DDI_ATTACH: 191 break; 192 193 case DDI_RESUME: 194 instance = ddi_get_instance(dip); 195 state = ddi_get_soft_state(ioat_statep, instance); 196 if (state == NULL) { 197 return (DDI_FAILURE); 198 } 199 e = ioat_channel_resume(state); 200 if (e != DDI_SUCCESS) { 201 return (DDI_FAILURE); 202 } 203 ioat_intr_enable(state); 204 return (DDI_SUCCESS); 205 206 default: 207 return (DDI_FAILURE); 208 } 209 210 instance = ddi_get_instance(dip); 211 e = ddi_soft_state_zalloc(ioat_statep, instance); 212 if (e != DDI_SUCCESS) { 213 return (DDI_FAILURE); 214 } 215 state = ddi_get_soft_state(ioat_statep, instance); 216 if (state == NULL) { 217 goto attachfail_get_soft_state; 218 } 219 220 state->is_dip = dip; 221 state->is_instance = instance; 222 223 /* setup the registers, save away some device info */ 224 e = ioat_chip_init(state); 225 if (e != DDI_SUCCESS) { 226 goto attachfail_chip_init; 227 } 228 229 /* initialize driver state, must be after chip init */ 230 e = ioat_drv_init(state); 231 if (e != DDI_SUCCESS) { 232 goto attachfail_drv_init; 233 } 234 235 /* create the minor node (for the ioctl) */ 236 e = ddi_create_minor_node(dip, "ioat", S_IFCHR, instance, DDI_PSEUDO, 237 0); 238 if (e != DDI_SUCCESS) { 239 goto attachfail_minor_node; 240 } 241 242 /* Enable device interrupts */ 243 ioat_intr_enable(state); 244 245 /* Report that driver was loaded */ 246 ddi_report_dev(dip); 247 248 /* register with dcopy */ 249 e = dcopy_device_register(state, &state->is_deviceinfo, 250 &state->is_device_handle); 251 if (e != DCOPY_SUCCESS) { 252 goto attachfail_register; 253 } 254 255 return (DDI_SUCCESS); 256 257 attachfail_register: 258 ioat_intr_disable(state); 259 ddi_remove_minor_node(dip, NULL); 260 attachfail_minor_node: 261 ioat_drv_fini(state); 262 attachfail_drv_init: 263 ioat_chip_fini(state); 264 attachfail_chip_init: 265 attachfail_get_soft_state: 266 (void) ddi_soft_state_free(ioat_statep, instance); 267 268 return (DDI_FAILURE); 269 } 270 271 /* 272 * ioat_detach() 273 */ 274 static int 275 ioat_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 276 { 277 ioat_state_t *state; 278 int instance; 279 int e; 280 281 282 instance = ddi_get_instance(dip); 283 state = ddi_get_soft_state(ioat_statep, instance); 284 if (state == NULL) { 285 return (DDI_FAILURE); 286 } 287 288 switch (cmd) { 289 case DDI_DETACH: 290 break; 291 292 case DDI_SUSPEND: 293 ioat_channel_suspend(state); 294 return (DDI_SUCCESS); 295 296 default: 297 return (DDI_FAILURE); 298 } 299 300 /* 301 * try to unregister from dcopy. Since this driver doesn't follow the 302 * traditional parent/child model, we may still be in use so we can't 303 * detach yet. 304 */ 305 e = dcopy_device_unregister(&state->is_device_handle); 306 if (e != DCOPY_SUCCESS) { 307 if (e == DCOPY_PENDING) { 308 cmn_err(CE_NOTE, "device busy, performing asynchronous" 309 " detach\n"); 310 } 311 return (DDI_FAILURE); 312 } 313 314 ioat_detach_finish(state); 315 316 return (DDI_SUCCESS); 317 } 318 319 /* 320 * ioat_getinfo() 321 */ 322 /*ARGSUSED*/ 323 static int 324 ioat_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 325 { 326 ioat_state_t *state; 327 int instance; 328 dev_t dev; 329 int e; 330 331 332 dev = (dev_t)arg; 333 instance = getminor(dev); 334 335 switch (cmd) { 336 case DDI_INFO_DEVT2DEVINFO: 337 state = ddi_get_soft_state(ioat_statep, instance); 338 if (state == NULL) { 339 return (DDI_FAILURE); 340 } 341 *result = (void *)state->is_dip; 342 e = DDI_SUCCESS; 343 break; 344 345 case DDI_INFO_DEVT2INSTANCE: 346 *result = (void *)(uintptr_t)instance; 347 e = DDI_SUCCESS; 348 break; 349 350 default: 351 e = DDI_FAILURE; 352 break; 353 } 354 355 return (e); 356 } 357 358 359 /* 360 * ioat_open() 361 */ 362 /*ARGSUSED*/ 363 static int 364 ioat_open(dev_t *devp, int flag, int otyp, cred_t *cred) 365 { 366 ioat_state_t *state; 367 int instance; 368 369 instance = getminor(*devp); 370 state = ddi_get_soft_state(ioat_statep, instance); 371 if (state == NULL) { 372 return (ENXIO); 373 } 374 375 return (0); 376 } 377 378 379 /* 380 * ioat_close() 381 */ 382 /*ARGSUSED*/ 383 static int 384 ioat_close(dev_t devp, int flag, int otyp, cred_t *cred) 385 { 386 return (0); 387 } 388 389 390 /* 391 * ioat_chip_init() 392 */ 393 static int 394 ioat_chip_init(ioat_state_t *state) 395 { 396 ddi_device_acc_attr_t attr; 397 int e; 398 399 400 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 401 attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC; 402 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 403 404 e = ddi_regs_map_setup(state->is_dip, 1, (caddr_t *)&state->is_genregs, 405 0, 0, &attr, &state->is_reg_handle); 406 if (e != DDI_SUCCESS) { 407 goto chipinitfail_regsmap; 408 } 409 410 /* save away ioat chip info */ 411 state->is_num_channels = (uint_t)ddi_get8(state->is_reg_handle, 412 &state->is_genregs[IOAT_CHANCNT]); 413 state->is_maxxfer = (uint_t)ddi_get8(state->is_reg_handle, 414 &state->is_genregs[IOAT_XFERCAP]); 415 state->is_chanoff = (uintptr_t)ddi_get16(state->is_reg_handle, 416 (uint16_t *)&state->is_genregs[IOAT_PERPORT_OFF]); 417 state->is_cbver = (uint_t)ddi_get8(state->is_reg_handle, 418 &state->is_genregs[IOAT_CBVER]); 419 state->is_intrdelay = (uint_t)ddi_get16(state->is_reg_handle, 420 (uint16_t *)&state->is_genregs[IOAT_INTRDELAY]); 421 state->is_status = (uint_t)ddi_get16(state->is_reg_handle, 422 (uint16_t *)&state->is_genregs[IOAT_CSSTATUS]); 423 state->is_capabilities = (uint_t)ddi_get32(state->is_reg_handle, 424 (uint32_t *)&state->is_genregs[IOAT_DMACAPABILITY]); 425 426 if (state->is_cbver & 0x10) { 427 state->is_ver = IOAT_CBv1; 428 } else if (state->is_cbver & 0x20) { 429 state->is_ver = IOAT_CBv2; 430 } else { 431 goto chipinitfail_version; 432 } 433 434 return (DDI_SUCCESS); 435 436 chipinitfail_version: 437 ddi_regs_map_free(&state->is_reg_handle); 438 chipinitfail_regsmap: 439 return (DDI_FAILURE); 440 } 441 442 443 /* 444 * ioat_chip_fini() 445 */ 446 static void 447 ioat_chip_fini(ioat_state_t *state) 448 { 449 ddi_regs_map_free(&state->is_reg_handle); 450 } 451 452 453 /* 454 * ioat_drv_init() 455 */ 456 static int 457 ioat_drv_init(ioat_state_t *state) 458 { 459 ddi_acc_handle_t handle; 460 int e; 461 462 463 mutex_init(&state->is_mutex, NULL, MUTEX_DRIVER, NULL); 464 465 state->is_deviceinfo.di_dip = state->is_dip; 466 state->is_deviceinfo.di_num_dma = state->is_num_channels; 467 state->is_deviceinfo.di_maxxfer = state->is_maxxfer; 468 state->is_deviceinfo.di_capabilities = state->is_capabilities; 469 state->is_deviceinfo.di_cb = &ioat_cb; 470 471 e = pci_config_setup(state->is_dip, &handle); 472 if (e != DDI_SUCCESS) { 473 goto drvinitfail_config_setup; 474 } 475 476 /* read in Vendor ID */ 477 state->is_deviceinfo.di_id = (uint64_t)pci_config_get16(handle, 0); 478 state->is_deviceinfo.di_id = state->is_deviceinfo.di_id << 16; 479 480 /* read in Device ID */ 481 state->is_deviceinfo.di_id |= (uint64_t)pci_config_get16(handle, 2); 482 state->is_deviceinfo.di_id = state->is_deviceinfo.di_id << 32; 483 484 /* Add in chipset version */ 485 state->is_deviceinfo.di_id |= (uint64_t)state->is_cbver; 486 pci_config_teardown(&handle); 487 488 e = ddi_intr_hilevel(state->is_dip, 0); 489 if (e != 0) { 490 cmn_err(CE_WARN, "hilevel interrupt not supported\n"); 491 goto drvinitfail_hilevel; 492 } 493 494 /* we don't support MSIs for v2 yet */ 495 e = ddi_add_intr(state->is_dip, 0, NULL, NULL, ioat_isr, 496 (caddr_t)state); 497 if (e != DDI_SUCCESS) { 498 goto drvinitfail_add_intr; 499 } 500 501 e = ddi_get_iblock_cookie(state->is_dip, 0, &state->is_iblock_cookie); 502 if (e != DDI_SUCCESS) { 503 goto drvinitfail_iblock_cookie; 504 } 505 506 e = ioat_channel_init(state); 507 if (e != DDI_SUCCESS) { 508 goto drvinitfail_channel_init; 509 } 510 511 return (DDI_SUCCESS); 512 513 drvinitfail_channel_init: 514 drvinitfail_iblock_cookie: 515 ddi_remove_intr(state->is_dip, 0, state->is_iblock_cookie); 516 drvinitfail_add_intr: 517 drvinitfail_hilevel: 518 drvinitfail_config_setup: 519 mutex_destroy(&state->is_mutex); 520 521 return (DDI_FAILURE); 522 } 523 524 525 /* 526 * ioat_drv_fini() 527 */ 528 static void 529 ioat_drv_fini(ioat_state_t *state) 530 { 531 ioat_channel_fini(state); 532 ddi_remove_intr(state->is_dip, 0, state->is_iblock_cookie); 533 mutex_destroy(&state->is_mutex); 534 } 535 536 537 /* 538 * ioat_unregister_complete() 539 */ 540 void 541 ioat_unregister_complete(void *device_private, int status) 542 { 543 ioat_state_t *state; 544 545 546 state = device_private; 547 548 if (status != DCOPY_SUCCESS) { 549 cmn_err(CE_WARN, "asynchronous detach aborted\n"); 550 return; 551 } 552 553 cmn_err(CE_CONT, "detach completing\n"); 554 ioat_detach_finish(state); 555 } 556 557 558 /* 559 * ioat_detach_finish() 560 */ 561 void 562 ioat_detach_finish(ioat_state_t *state) 563 { 564 ioat_intr_disable(state); 565 ddi_remove_minor_node(state->is_dip, NULL); 566 ioat_drv_fini(state); 567 ioat_chip_fini(state); 568 (void) ddi_soft_state_free(ioat_statep, state->is_instance); 569 } 570 571 572 /* 573 * ioat_intr_enable() 574 */ 575 static void 576 ioat_intr_enable(ioat_state_t *state) 577 { 578 uint32_t intr_status; 579 580 581 /* Clear any pending interrupts */ 582 intr_status = ddi_get32(state->is_reg_handle, 583 (uint32_t *)&state->is_genregs[IOAT_ATTNSTATUS]); 584 if (intr_status != 0) { 585 ddi_put32(state->is_reg_handle, 586 (uint32_t *)&state->is_genregs[IOAT_ATTNSTATUS], 587 intr_status); 588 } 589 590 /* Enable interrupts on the device */ 591 ddi_put8(state->is_reg_handle, &state->is_genregs[IOAT_INTRCTL], 592 IOAT_INTRCTL_MASTER_EN); 593 } 594 595 596 /* 597 * ioat_intr_disable() 598 */ 599 static void 600 ioat_intr_disable(ioat_state_t *state) 601 { 602 /* 603 * disable interrupts on the device. A read of the interrupt control 604 * register clears the enable bit. 605 */ 606 (void) ddi_get8(state->is_reg_handle, 607 &state->is_genregs[IOAT_INTRCTL]); 608 } 609 610 611 /* 612 * ioat_isr() 613 */ 614 static uint_t 615 ioat_isr(caddr_t parm) 616 { 617 uint32_t intr_status; 618 ioat_state_t *state; 619 uint8_t intrctrl; 620 uint32_t chan; 621 uint_t r; 622 int i; 623 624 state = (ioat_state_t *)parm; 625 626 intrctrl = ddi_get8(state->is_reg_handle, 627 &state->is_genregs[IOAT_INTRCTL]); 628 /* master interrupt enable should always be set */ 629 ASSERT(intrctrl & IOAT_INTRCTL_MASTER_EN); 630 631 /* If the interrupt status bit isn't set, it's not ours */ 632 if (!(intrctrl & IOAT_INTRCTL_INTR_STAT)) { 633 /* re-set master interrupt enable (since it clears on read) */ 634 ddi_put8(state->is_reg_handle, 635 &state->is_genregs[IOAT_INTRCTL], intrctrl); 636 return (DDI_INTR_UNCLAIMED); 637 } 638 639 /* see which channels generated the interrupt */ 640 intr_status = ddi_get32(state->is_reg_handle, 641 (uint32_t *)&state->is_genregs[IOAT_ATTNSTATUS]); 642 643 /* call the intr handler for the channels */ 644 r = DDI_INTR_UNCLAIMED; 645 chan = 1; 646 for (i = 0; i < state->is_num_channels; i++) { 647 if (intr_status & chan) { 648 ioat_channel_intr(&state->is_channel[i]); 649 r = DDI_INTR_CLAIMED; 650 } 651 chan = chan << 1; 652 } 653 654 /* 655 * if interrupt status bit was set, there should have been an 656 * attention status bit set too. 657 */ 658 ASSERT(r == DDI_INTR_CLAIMED); 659 660 /* re-set master interrupt enable (since it clears on read) */ 661 ddi_put8(state->is_reg_handle, &state->is_genregs[IOAT_INTRCTL], 662 intrctrl); 663 664 return (r); 665 } 666