1 /*- 2 * Copyright (c) 1997-2006 by Matthew Jacob 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /* 28 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters. 29 */ 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 #include <dev/isp/isp_freebsd.h> 33 #include <sys/unistd.h> 34 #include <sys/kthread.h> 35 #include <machine/stdarg.h> /* for use by isp_prt below */ 36 #include <sys/conf.h> 37 #include <sys/module.h> 38 #include <sys/ioccom.h> 39 #include <dev/isp/isp_ioctl.h> 40 #if __FreeBSD_version >= 500000 41 #include <sys/sysctl.h> 42 #else 43 #include <sys/devicestat.h> 44 #endif 45 #include <cam/cam_periph.h> 46 #include <cam/cam_xpt_periph.h> 47 48 #if !defined(CAM_NEW_TRAN_CODE) && __FreeBSD_version >= 700025 49 #define CAM_NEW_TRAN_CODE 1 50 #endif 51 52 53 MODULE_VERSION(isp, 1); 54 MODULE_DEPEND(isp, cam, 1, 1, 1); 55 int isp_announced = 0; 56 int isp_fabric_hysteresis = 5; 57 int isp_loop_down_limit = 300; /* default loop down limit */ 58 int isp_change_is_bad = 0; /* "changed" devices are bad */ 59 int isp_quickboot_time = 15; /* don't wait more than N secs for loop up */ 60 int isp_gone_device_time = 30; /* grace time before reporting device lost */ 61 static const char *roles[4] = { 62 "(none)", "Target", "Initiator", "Target/Initiator" 63 }; 64 static const char prom3[] = 65 "PortID 0x%06x Departed from Target %u because of %s"; 66 67 static void isp_freeze_loopdown(ispsoftc_t *, char *); 68 static d_ioctl_t ispioctl; 69 static void isp_intr_enable(void *); 70 static void isp_cam_async(void *, uint32_t, struct cam_path *, void *); 71 static void isp_poll(struct cam_sim *); 72 static timeout_t isp_watchdog; 73 static timeout_t isp_ldt; 74 static void isp_kthread(void *); 75 static void isp_action(struct cam_sim *, union ccb *); 76 77 #if __FreeBSD_version < 700000 78 ispfwfunc *isp_get_firmware_p = NULL; 79 #endif 80 81 #if __FreeBSD_version < 500000 82 #define ISP_CDEV_MAJOR 248 83 static struct cdevsw isp_cdevsw = { 84 /* open */ nullopen, 85 /* close */ nullclose, 86 /* read */ noread, 87 /* write */ nowrite, 88 /* ioctl */ ispioctl, 89 /* poll */ nopoll, 90 /* mmap */ nommap, 91 /* strategy */ nostrategy, 92 /* name */ "isp", 93 /* maj */ ISP_CDEV_MAJOR, 94 /* dump */ nodump, 95 /* psize */ nopsize, 96 /* flags */ D_TAPE, 97 }; 98 #define isp_sysctl_update(x) do { ; } while (0) 99 #else 100 static struct cdevsw isp_cdevsw = { 101 .d_version = D_VERSION, 102 #if __FreeBSD_version < 700037 103 .d_flags = D_NEEDGIANT, 104 #endif 105 .d_ioctl = ispioctl, 106 .d_name = "isp", 107 }; 108 static void isp_sysctl_update(ispsoftc_t *); 109 #endif 110 111 static ispsoftc_t *isplist = NULL; 112 113 void 114 isp_attach(ispsoftc_t *isp) 115 { 116 int primary, secondary; 117 struct ccb_setasync csa; 118 struct cam_devq *devq; 119 struct cam_sim *sim; 120 struct cam_path *path; 121 122 /* 123 * Establish (in case of 12X0) which bus is the primary. 124 */ 125 126 primary = 0; 127 secondary = 1; 128 129 /* 130 * Create the device queue for our SIM(s). 131 */ 132 devq = cam_simq_alloc(isp->isp_maxcmds); 133 if (devq == NULL) { 134 return; 135 } 136 137 /* 138 * Construct our SIM entry. 139 */ 140 sim = isp_sim_alloc(isp_action, isp_poll, "isp", isp, 141 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); 142 if (sim == NULL) { 143 cam_simq_free(devq); 144 return; 145 } 146 147 isp->isp_osinfo.ehook.ich_func = isp_intr_enable; 148 isp->isp_osinfo.ehook.ich_arg = isp; 149 ISP_UNLOCK(isp); 150 if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) { 151 ISP_LOCK(isp); 152 cam_sim_free(sim, TRUE); 153 isp_prt(isp, ISP_LOGERR, 154 "could not establish interrupt enable hook"); 155 return; 156 } 157 ISP_LOCK(isp); 158 159 if (xpt_bus_register(sim, primary) != CAM_SUCCESS) { 160 cam_sim_free(sim, TRUE); 161 return; 162 } 163 164 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 165 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 166 xpt_bus_deregister(cam_sim_path(sim)); 167 cam_sim_free(sim, TRUE); 168 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 169 return; 170 } 171 172 xpt_setup_ccb(&csa.ccb_h, path, 5); 173 csa.ccb_h.func_code = XPT_SASYNC_CB; 174 csa.event_enable = AC_LOST_DEVICE; 175 csa.callback = isp_cam_async; 176 csa.callback_arg = sim; 177 xpt_action((union ccb *)&csa); 178 isp->isp_sim = sim; 179 isp->isp_path = path; 180 181 /* 182 * If we have a second channel, construct SIM entry for that. 183 */ 184 if (IS_DUALBUS(isp)) { 185 sim = isp_sim_alloc(isp_action, isp_poll, "isp", isp, 186 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); 187 if (sim == NULL) { 188 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 189 xpt_free_path(isp->isp_path); 190 cam_simq_free(devq); 191 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 192 return; 193 } 194 if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) { 195 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 196 xpt_free_path(isp->isp_path); 197 cam_sim_free(sim, TRUE); 198 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 199 return; 200 } 201 202 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 203 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 204 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 205 xpt_free_path(isp->isp_path); 206 xpt_bus_deregister(cam_sim_path(sim)); 207 cam_sim_free(sim, TRUE); 208 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 209 return; 210 } 211 212 xpt_setup_ccb(&csa.ccb_h, path, 5); 213 csa.ccb_h.func_code = XPT_SASYNC_CB; 214 csa.event_enable = AC_LOST_DEVICE; 215 csa.callback = isp_cam_async; 216 csa.callback_arg = sim; 217 xpt_action((union ccb *)&csa); 218 isp->isp_sim2 = sim; 219 isp->isp_path2 = path; 220 } 221 222 /* 223 * Create device nodes 224 */ 225 ISP_UNLOCK(isp); 226 (void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT, 227 GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev)); 228 isp_sysctl_update(isp); 229 ISP_LOCK(isp); 230 231 if (isp->isp_role != ISP_ROLE_NONE) { 232 isp->isp_state = ISP_RUNSTATE; 233 ISP_ENABLE_INTS(isp); 234 } 235 if (isplist == NULL) { 236 isplist = isp; 237 } else { 238 ispsoftc_t *tmp = isplist; 239 while (tmp->isp_osinfo.next) { 240 tmp = tmp->isp_osinfo.next; 241 } 242 tmp->isp_osinfo.next = isp; 243 } 244 245 /* 246 * Create a kernel thread for fibre channel instances. 247 */ 248 if (IS_FC(isp)) { 249 isp_callout_init(&isp->isp_osinfo.ldt); 250 isp_callout_init(&isp->isp_osinfo.gdt); 251 ISP_UNLOCK(isp); 252 #if __FreeBSD_version >= 500000 253 if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc, 254 RFHIGHPID, 0, "%s: fc_thrd", 255 device_get_nameunit(isp->isp_dev))) 256 #else 257 if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc, 258 "%s: fc_thrd", device_get_nameunit(isp->isp_dev))) 259 #endif 260 { 261 ISP_LOCK(isp); 262 xpt_bus_deregister(cam_sim_path(sim)); 263 cam_sim_free(sim, TRUE); 264 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 265 isp_prt(isp, ISP_LOGERR, "could not create kthread"); 266 return; 267 } 268 ISP_LOCK(isp); 269 /* 270 * We start by being "loop down" if we have an initiator role 271 */ 272 if (isp->isp_role & ISP_ROLE_INITIATOR) { 273 isp_freeze_loopdown(isp, "isp_attach"); 274 isp->isp_osinfo.ldt_running = 1; 275 callout_reset(&isp->isp_osinfo.ldt, 276 isp_quickboot_time * hz, isp_ldt, isp); 277 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 278 "Starting Initial Loop Down Timer"); 279 } 280 } 281 } 282 283 static void 284 isp_freeze_loopdown(ispsoftc_t *isp, char *msg) 285 { 286 if (isp->isp_osinfo.simqfrozen == 0) { 287 isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg); 288 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 289 xpt_freeze_simq(isp->isp_sim, 1); 290 } else { 291 isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg); 292 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 293 } 294 } 295 296 297 #if __FreeBSD_version < 500000 298 #define _DEV dev_t 299 #define _IOP struct proc 300 #else 301 #define _IOP struct thread 302 #define _DEV struct cdev * 303 #endif 304 305 static int 306 ispioctl(_DEV dev, u_long c, caddr_t addr, int flags, _IOP *td) 307 { 308 ispsoftc_t *isp; 309 int nr, retval = ENOTTY; 310 #if __FreeBSD_version < 500000 311 int s = splcam(); 312 #else 313 GIANT_REQUIRED; 314 #endif 315 316 isp = isplist; 317 while (isp) { 318 if (minor(dev) == device_get_unit(isp->isp_dev)) { 319 break; 320 } 321 isp = isp->isp_osinfo.next; 322 } 323 if (isp == NULL) { 324 #if __FreeBSD_version < 500000 325 splx(s); 326 #endif 327 return (ENXIO); 328 } 329 330 switch (c) { 331 #ifdef ISP_FW_CRASH_DUMP 332 case ISP_GET_FW_CRASH_DUMP: 333 if (IS_FC(isp)) { 334 uint16_t *ptr = FCPARAM(isp)->isp_dump_data; 335 size_t sz; 336 337 retval = 0; 338 if (IS_2200(isp)) { 339 sz = QLA2200_RISC_IMAGE_DUMP_SIZE; 340 } else { 341 sz = QLA2300_RISC_IMAGE_DUMP_SIZE; 342 } 343 if (ptr && *ptr) { 344 void *uaddr = *((void **) addr); 345 if (copyout(ptr, uaddr, sz)) { 346 retval = EFAULT; 347 } else { 348 *ptr = 0; 349 } 350 } else { 351 retval = ENXIO; 352 } 353 } 354 break; 355 case ISP_FORCE_CRASH_DUMP: 356 if (IS_FC(isp)) { 357 isp_freeze_loopdown(isp, 358 "ispioctl(ISP_FORCE_CRASH_DUMP)"); 359 isp_fw_dump(isp); 360 isp_reinit(isp); 361 retval = 0; 362 } 363 break; 364 #endif 365 case ISP_SDBLEV: 366 { 367 int olddblev = isp->isp_dblev; 368 isp->isp_dblev = *(int *)addr; 369 *(int *)addr = olddblev; 370 retval = 0; 371 break; 372 } 373 case ISP_GETROLE: 374 *(int *)addr = isp->isp_role; 375 retval = 0; 376 break; 377 case ISP_SETROLE: 378 nr = *(int *)addr; 379 if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) { 380 retval = EINVAL; 381 break; 382 } 383 /* 384 * XXX: Current 385 */ 386 if (nr == ISP_ROLE_BOTH) { 387 isp_prt(isp, ISP_LOGERR, "dual roles not supported"); 388 retval = EINVAL; 389 break; 390 } 391 *(int *)addr = isp->isp_role; 392 isp->isp_role = nr; 393 /* FALLTHROUGH */ 394 case ISP_RESETHBA: 395 isp_reinit(isp); 396 retval = 0; 397 break; 398 case ISP_RESCAN: 399 if (IS_FC(isp)) { 400 if (isp_fc_runstate(isp, 5 * 1000000)) { 401 retval = EIO; 402 } else { 403 retval = 0; 404 } 405 } 406 break; 407 case ISP_FC_LIP: 408 if (IS_FC(isp)) { 409 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) { 410 retval = EIO; 411 } else { 412 retval = 0; 413 } 414 } 415 break; 416 case ISP_FC_GETDINFO: 417 { 418 struct isp_fc_device *ifc = (struct isp_fc_device *) addr; 419 fcportdb_t *lp; 420 421 if (IS_SCSI(isp)) { 422 break; 423 } 424 if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) { 425 retval = EINVAL; 426 break; 427 } 428 lp = &FCPARAM(isp)->portdb[ifc->loopid]; 429 if (lp->state == FC_PORTDB_STATE_VALID) { 430 ifc->role = lp->roles; 431 ifc->loopid = lp->handle; 432 ifc->portid = lp->portid; 433 ifc->node_wwn = lp->node_wwn; 434 ifc->port_wwn = lp->port_wwn; 435 retval = 0; 436 } else { 437 retval = ENODEV; 438 } 439 break; 440 } 441 case ISP_GET_STATS: 442 { 443 isp_stats_t *sp = (isp_stats_t *) addr; 444 445 MEMZERO(sp, sizeof (*sp)); 446 sp->isp_stat_version = ISP_STATS_VERSION; 447 sp->isp_type = isp->isp_type; 448 sp->isp_revision = isp->isp_revision; 449 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt; 450 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus; 451 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc; 452 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync; 453 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt; 454 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt; 455 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater; 456 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater; 457 retval = 0; 458 break; 459 } 460 case ISP_CLR_STATS: 461 isp->isp_intcnt = 0; 462 isp->isp_intbogus = 0; 463 isp->isp_intmboxc = 0; 464 isp->isp_intoasync = 0; 465 isp->isp_rsltccmplt = 0; 466 isp->isp_fphccmplt = 0; 467 isp->isp_rscchiwater = 0; 468 isp->isp_fpcchiwater = 0; 469 retval = 0; 470 break; 471 case ISP_FC_GETHINFO: 472 { 473 struct isp_hba_device *hba = (struct isp_hba_device *) addr; 474 MEMZERO(hba, sizeof (*hba)); 475 476 hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev); 477 hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev); 478 hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev); 479 if (IS_FC(isp)) { 480 hba->fc_speed = FCPARAM(isp)->isp_gbspeed; 481 hba->fc_scsi_supported = 1; 482 hba->fc_topology = FCPARAM(isp)->isp_topo + 1; 483 hba->fc_loopid = FCPARAM(isp)->isp_loopid; 484 hba->nvram_node_wwn = FCPARAM(isp)->isp_wwnn_nvram; 485 hba->nvram_port_wwn = FCPARAM(isp)->isp_wwpn_nvram; 486 hba->active_node_wwn = ISP_NODEWWN(isp); 487 hba->active_port_wwn = ISP_PORTWWN(isp); 488 } 489 retval = 0; 490 break; 491 } 492 case ISP_GET_FC_PARAM: 493 { 494 struct isp_fc_param *f = (struct isp_fc_param *) addr; 495 496 if (IS_SCSI(isp)) { 497 break; 498 } 499 f->parameter = 0; 500 if (strcmp(f->param_name, "framelength") == 0) { 501 f->parameter = FCPARAM(isp)->isp_maxfrmlen; 502 retval = 0; 503 break; 504 } 505 if (strcmp(f->param_name, "exec_throttle") == 0) { 506 f->parameter = FCPARAM(isp)->isp_execthrottle; 507 retval = 0; 508 break; 509 } 510 if (strcmp(f->param_name, "fullduplex") == 0) { 511 if (FCPARAM(isp)->isp_fwoptions & ICBOPT_FULL_DUPLEX) 512 f->parameter = 1; 513 retval = 0; 514 break; 515 } 516 if (strcmp(f->param_name, "loopid") == 0) { 517 f->parameter = FCPARAM(isp)->isp_loopid; 518 retval = 0; 519 break; 520 } 521 retval = EINVAL; 522 break; 523 } 524 case ISP_SET_FC_PARAM: 525 { 526 struct isp_fc_param *f = (struct isp_fc_param *) addr; 527 uint32_t param = f->parameter; 528 529 if (IS_SCSI(isp)) { 530 break; 531 } 532 f->parameter = 0; 533 if (strcmp(f->param_name, "framelength") == 0) { 534 if (param != 512 && param != 1024 && param != 1024) { 535 retval = EINVAL; 536 break; 537 } 538 FCPARAM(isp)->isp_maxfrmlen = param; 539 retval = 0; 540 break; 541 } 542 if (strcmp(f->param_name, "exec_throttle") == 0) { 543 if (param < 16 || param > 255) { 544 retval = EINVAL; 545 break; 546 } 547 FCPARAM(isp)->isp_execthrottle = param; 548 retval = 0; 549 break; 550 } 551 if (strcmp(f->param_name, "fullduplex") == 0) { 552 if (param != 0 && param != 1) { 553 retval = EINVAL; 554 break; 555 } 556 if (param) { 557 FCPARAM(isp)->isp_fwoptions |= 558 ICBOPT_FULL_DUPLEX; 559 } else { 560 FCPARAM(isp)->isp_fwoptions &= 561 ~ICBOPT_FULL_DUPLEX; 562 } 563 retval = 0; 564 break; 565 } 566 if (strcmp(f->param_name, "loopid") == 0) { 567 if (param < 0 || param > 125) { 568 retval = EINVAL; 569 break; 570 } 571 FCPARAM(isp)->isp_loopid = param; 572 retval = 0; 573 break; 574 } 575 retval = EINVAL; 576 break; 577 } 578 case ISP_TSK_MGMT: 579 { 580 int needmarker; 581 struct isp_fc_tsk_mgmt *fct = (struct isp_fc_tsk_mgmt *) addr; 582 uint16_t loopid; 583 mbreg_t mbs; 584 585 if (IS_SCSI(isp)) { 586 break; 587 } 588 589 memset(&mbs, 0, sizeof (mbs)); 590 needmarker = retval = 0; 591 loopid = fct->loopid; 592 if (FCPARAM(isp)->isp_2klogin == 0) { 593 loopid <<= 8; 594 } 595 switch (fct->action) { 596 case IPT_CLEAR_ACA: 597 mbs.param[0] = MBOX_CLEAR_ACA; 598 mbs.param[1] = loopid; 599 mbs.param[2] = fct->lun; 600 break; 601 case IPT_TARGET_RESET: 602 mbs.param[0] = MBOX_TARGET_RESET; 603 mbs.param[1] = loopid; 604 needmarker = 1; 605 break; 606 case IPT_LUN_RESET: 607 mbs.param[0] = MBOX_LUN_RESET; 608 mbs.param[1] = loopid; 609 mbs.param[2] = fct->lun; 610 needmarker = 1; 611 break; 612 case IPT_CLEAR_TASK_SET: 613 mbs.param[0] = MBOX_CLEAR_TASK_SET; 614 mbs.param[1] = loopid; 615 mbs.param[2] = fct->lun; 616 needmarker = 1; 617 break; 618 case IPT_ABORT_TASK_SET: 619 mbs.param[0] = MBOX_ABORT_TASK_SET; 620 mbs.param[1] = loopid; 621 mbs.param[2] = fct->lun; 622 needmarker = 1; 623 break; 624 default: 625 retval = EINVAL; 626 break; 627 } 628 if (retval == 0) { 629 if (needmarker) { 630 isp->isp_sendmarker |= 1; 631 } 632 retval = isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs); 633 if (retval) 634 retval = EIO; 635 } 636 break; 637 } 638 default: 639 break; 640 } 641 #if __FreeBSD_version < 500000 642 splx(s); 643 #endif 644 return (retval); 645 } 646 647 #if __FreeBSD_version >= 500000 648 static void 649 isp_sysctl_update(ispsoftc_t *isp) 650 { 651 struct sysctl_ctx_list *ctx = 652 device_get_sysctl_ctx(isp->isp_osinfo.dev); 653 struct sysctl_oid *tree = device_get_sysctl_tree(isp->isp_osinfo.dev); 654 655 if (IS_SCSI(isp)) { 656 return; 657 } 658 659 snprintf(isp->isp_osinfo.sysctl_info.fc.wwnn, 660 sizeof (isp->isp_osinfo.sysctl_info.fc.wwnn), "0x%08x%08x", 661 (uint32_t) (ISP_NODEWWN(isp) >> 32), (uint32_t) ISP_NODEWWN(isp)); 662 663 snprintf(isp->isp_osinfo.sysctl_info.fc.wwpn, 664 sizeof (isp->isp_osinfo.sysctl_info.fc.wwpn), "0x%08x%08x", 665 (uint32_t) (ISP_PORTWWN(isp) >> 32), (uint32_t) ISP_PORTWWN(isp)); 666 667 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 668 "wwnn", CTLFLAG_RD, isp->isp_osinfo.sysctl_info.fc.wwnn, 0, 669 "World Wide Node Name"); 670 671 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 672 "wwpn", CTLFLAG_RD, isp->isp_osinfo.sysctl_info.fc.wwpn, 0, 673 "World Wide Port Name"); 674 675 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 676 "loop_down_limit", 677 CTLFLAG_RW, &isp->isp_osinfo.loop_down_limit, 0, 678 "How long to wait for loop to come back up"); 679 680 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 681 "gone_device_time", 682 CTLFLAG_RW, &isp->isp_osinfo.gone_device_time, 0, 683 "How long to wait for a device to reappear"); 684 } 685 #endif 686 687 static void 688 isp_intr_enable(void *arg) 689 { 690 ispsoftc_t *isp = arg; 691 ISP_LOCK(isp); 692 if (isp->isp_role != ISP_ROLE_NONE) { 693 ISP_ENABLE_INTS(isp); 694 } 695 ISP_UNLOCK(isp); 696 /* Release our hook so that the boot can continue. */ 697 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 698 } 699 700 /* 701 * Put the target mode functions here, because some are inlines 702 */ 703 704 #ifdef ISP_TARGET_MODE 705 706 static __inline int is_lun_enabled(ispsoftc_t *, int, lun_id_t); 707 static __inline int are_any_luns_enabled(ispsoftc_t *, int); 708 static __inline tstate_t *get_lun_statep(ispsoftc_t *, int, lun_id_t); 709 static __inline void rls_lun_statep(ispsoftc_t *, tstate_t *); 710 static __inline atio_private_data_t *isp_get_atpd(ispsoftc_t *, int); 711 static cam_status 712 create_lun_state(ispsoftc_t *, int, struct cam_path *, tstate_t **); 713 static void destroy_lun_state(ispsoftc_t *, tstate_t *); 714 static int isp_en_lun(ispsoftc_t *, union ccb *); 715 static void isp_ledone(ispsoftc_t *, lun_entry_t *); 716 static cam_status isp_abort_tgt_ccb(ispsoftc_t *, union ccb *); 717 static timeout_t isp_refire_putback_atio; 718 static void isp_complete_ctio(union ccb *); 719 static void isp_target_putback_atio(union ccb *); 720 static void isp_target_start_ctio(ispsoftc_t *, union ccb *); 721 static int isp_handle_platform_atio(ispsoftc_t *, at_entry_t *); 722 static int isp_handle_platform_atio2(ispsoftc_t *, at2_entry_t *); 723 static int isp_handle_platform_ctio(ispsoftc_t *, void *); 724 static int isp_handle_platform_notify_scsi(ispsoftc_t *, in_entry_t *); 725 static int isp_handle_platform_notify_fc(ispsoftc_t *, in_fcentry_t *); 726 727 static __inline int 728 is_lun_enabled(ispsoftc_t *isp, int bus, lun_id_t lun) 729 { 730 tstate_t *tptr; 731 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 732 if (tptr == NULL) { 733 return (0); 734 } 735 do { 736 if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) { 737 return (1); 738 } 739 } while ((tptr = tptr->next) != NULL); 740 return (0); 741 } 742 743 static __inline int 744 are_any_luns_enabled(ispsoftc_t *isp, int port) 745 { 746 int lo, hi; 747 if (IS_DUALBUS(isp)) { 748 lo = (port * (LUN_HASH_SIZE >> 1)); 749 hi = lo + (LUN_HASH_SIZE >> 1); 750 } else { 751 lo = 0; 752 hi = LUN_HASH_SIZE; 753 } 754 for (lo = 0; lo < hi; lo++) { 755 if (isp->isp_osinfo.lun_hash[lo]) { 756 return (1); 757 } 758 } 759 return (0); 760 } 761 762 static __inline tstate_t * 763 get_lun_statep(ispsoftc_t *isp, int bus, lun_id_t lun) 764 { 765 tstate_t *tptr = NULL; 766 767 if (lun == CAM_LUN_WILDCARD) { 768 if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) { 769 tptr = &isp->isp_osinfo.tsdflt[bus]; 770 tptr->hold++; 771 return (tptr); 772 } 773 return (NULL); 774 } else { 775 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 776 if (tptr == NULL) { 777 return (NULL); 778 } 779 } 780 781 do { 782 if (tptr->lun == lun && tptr->bus == bus) { 783 tptr->hold++; 784 return (tptr); 785 } 786 } while ((tptr = tptr->next) != NULL); 787 return (tptr); 788 } 789 790 static __inline void 791 rls_lun_statep(ispsoftc_t *isp, tstate_t *tptr) 792 { 793 if (tptr->hold) 794 tptr->hold--; 795 } 796 797 static __inline atio_private_data_t * 798 isp_get_atpd(ispsoftc_t *isp, int tag) 799 { 800 atio_private_data_t *atp; 801 for (atp = isp->isp_osinfo.atpdp; 802 atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) { 803 if (atp->tag == tag) 804 return (atp); 805 } 806 return (NULL); 807 } 808 809 static cam_status 810 create_lun_state(ispsoftc_t *isp, int bus, 811 struct cam_path *path, tstate_t **rslt) 812 { 813 cam_status status; 814 lun_id_t lun; 815 int hfx; 816 tstate_t *tptr, *new; 817 818 lun = xpt_path_lun_id(path); 819 if (lun < 0) { 820 return (CAM_LUN_INVALID); 821 } 822 if (is_lun_enabled(isp, bus, lun)) { 823 return (CAM_LUN_ALRDY_ENA); 824 } 825 new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO); 826 if (new == NULL) { 827 return (CAM_RESRC_UNAVAIL); 828 } 829 830 status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path), 831 xpt_path_target_id(path), xpt_path_lun_id(path)); 832 if (status != CAM_REQ_CMP) { 833 free(new, M_DEVBUF); 834 return (status); 835 } 836 new->bus = bus; 837 new->lun = lun; 838 SLIST_INIT(&new->atios); 839 SLIST_INIT(&new->inots); 840 new->hold = 1; 841 842 hfx = LUN_HASH_FUNC(isp, new->bus, new->lun); 843 tptr = isp->isp_osinfo.lun_hash[hfx]; 844 if (tptr == NULL) { 845 isp->isp_osinfo.lun_hash[hfx] = new; 846 } else { 847 while (tptr->next) 848 tptr = tptr->next; 849 tptr->next = new; 850 } 851 *rslt = new; 852 return (CAM_REQ_CMP); 853 } 854 855 static __inline void 856 destroy_lun_state(ispsoftc_t *isp, tstate_t *tptr) 857 { 858 int hfx; 859 tstate_t *lw, *pw; 860 861 if (tptr->hold) { 862 return; 863 } 864 hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun); 865 pw = isp->isp_osinfo.lun_hash[hfx]; 866 if (pw == NULL) { 867 return; 868 } else if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 869 isp->isp_osinfo.lun_hash[hfx] = pw->next; 870 } else { 871 lw = pw; 872 pw = lw->next; 873 while (pw) { 874 if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 875 lw->next = pw->next; 876 break; 877 } 878 lw = pw; 879 pw = pw->next; 880 } 881 if (pw == NULL) { 882 return; 883 } 884 } 885 free(tptr, M_DEVBUF); 886 } 887 888 /* 889 * Enable luns. 890 */ 891 static int 892 isp_en_lun(ispsoftc_t *isp, union ccb *ccb) 893 { 894 struct ccb_en_lun *cel = &ccb->cel; 895 tstate_t *tptr; 896 uint32_t seq; 897 int bus, cmd, av, wildcard, tm_on; 898 lun_id_t lun; 899 target_id_t tgt; 900 901 bus = XS_CHANNEL(ccb); 902 if (bus > 1) { 903 xpt_print(ccb->ccb_h.path, "illegal bus %d\n", bus); 904 ccb->ccb_h.status = CAM_PATH_INVALID; 905 return (-1); 906 } 907 tgt = ccb->ccb_h.target_id; 908 lun = ccb->ccb_h.target_lun; 909 910 if (isp->isp_dblev & ISP_LOGTDEBUG0) { 911 xpt_print(ccb->ccb_h.path, "%sabling lun 0x%x on channel %d\n", 912 cel->enable? "en" : "dis", lun, bus); 913 } 914 915 if ((lun != CAM_LUN_WILDCARD) && 916 (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) { 917 ccb->ccb_h.status = CAM_LUN_INVALID; 918 return (-1); 919 } 920 921 if (IS_SCSI(isp)) { 922 sdparam *sdp = isp->isp_param; 923 sdp += bus; 924 if (tgt != CAM_TARGET_WILDCARD && 925 tgt != sdp->isp_initiator_id) { 926 ccb->ccb_h.status = CAM_TID_INVALID; 927 return (-1); 928 } 929 } else { 930 /* 931 * There's really no point in doing this yet w/o multi-tid 932 * capability. Even then, it's problematic. 933 */ 934 #if 0 935 if (tgt != CAM_TARGET_WILDCARD && 936 tgt != FCPARAM(isp)->isp_iid) { 937 ccb->ccb_h.status = CAM_TID_INVALID; 938 return (-1); 939 } 940 #endif 941 /* 942 * This is as a good a place as any to check f/w capabilities. 943 */ 944 if (FCPARAM(isp)->isp_tmode == 0) { 945 xpt_print(ccb->ccb_h.path, 946 "firmware does not support target mode\n"); 947 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 948 return (-1); 949 } 950 /* 951 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to 952 * XXX: dork with our already fragile enable/disable code. 953 */ 954 if (FCPARAM(isp)->isp_sccfw == 0) { 955 xpt_print(ccb->ccb_h.path, 956 "firmware not SCCLUN capable\n"); 957 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 958 return (-1); 959 } 960 } 961 962 if (tgt == CAM_TARGET_WILDCARD) { 963 if (lun == CAM_LUN_WILDCARD) { 964 wildcard = 1; 965 } else { 966 ccb->ccb_h.status = CAM_LUN_INVALID; 967 return (-1); 968 } 969 } else { 970 wildcard = 0; 971 } 972 973 tm_on = (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) != 0; 974 975 /* 976 * Next check to see whether this is a target/lun wildcard action. 977 * 978 * If so, we know that we can accept commands for luns that haven't 979 * been enabled yet and send them upstream. Otherwise, we have to 980 * handle them locally (if we see them at all). 981 */ 982 983 if (wildcard) { 984 tptr = &isp->isp_osinfo.tsdflt[bus]; 985 if (cel->enable) { 986 if (tm_on) { 987 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 988 return (-1); 989 } 990 ccb->ccb_h.status = 991 xpt_create_path(&tptr->owner, NULL, 992 xpt_path_path_id(ccb->ccb_h.path), 993 xpt_path_target_id(ccb->ccb_h.path), 994 xpt_path_lun_id(ccb->ccb_h.path)); 995 if (ccb->ccb_h.status != CAM_REQ_CMP) { 996 return (-1); 997 } 998 SLIST_INIT(&tptr->atios); 999 SLIST_INIT(&tptr->inots); 1000 isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED; 1001 } else { 1002 if (tm_on == 0) { 1003 ccb->ccb_h.status = CAM_REQ_CMP; 1004 return (-1); 1005 } 1006 if (tptr->hold) { 1007 ccb->ccb_h.status = CAM_SCSI_BUSY; 1008 return (-1); 1009 } 1010 xpt_free_path(tptr->owner); 1011 isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED; 1012 } 1013 } 1014 1015 /* 1016 * Now check to see whether this bus needs to be 1017 * enabled/disabled with respect to target mode. 1018 */ 1019 av = bus << 31; 1020 if (cel->enable && tm_on == 0) { 1021 av |= ENABLE_TARGET_FLAG; 1022 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 1023 if (av) { 1024 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 1025 if (wildcard) { 1026 isp->isp_osinfo.tmflags[bus] &= 1027 ~TM_WILDCARD_ENABLED; 1028 xpt_free_path(tptr->owner); 1029 } 1030 return (-1); 1031 } 1032 isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED; 1033 xpt_print(ccb->ccb_h.path, "Target Mode Enabled\n"); 1034 } else if (cel->enable == 0 && tm_on && wildcard) { 1035 if (are_any_luns_enabled(isp, bus)) { 1036 ccb->ccb_h.status = CAM_SCSI_BUSY; 1037 return (-1); 1038 } 1039 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 1040 if (av) { 1041 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 1042 return (-1); 1043 } 1044 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; 1045 xpt_print(ccb->ccb_h.path, "Target Mode Disabled\n"); 1046 } 1047 1048 if (wildcard) { 1049 ccb->ccb_h.status = CAM_REQ_CMP; 1050 return (-1); 1051 } 1052 1053 /* 1054 * Find an empty slot 1055 */ 1056 for (seq = 0; seq < NLEACT; seq++) { 1057 if (isp->isp_osinfo.leact[seq] == 0) { 1058 break; 1059 } 1060 } 1061 if (seq >= NLEACT) { 1062 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1063 return (-1); 1064 1065 } 1066 isp->isp_osinfo.leact[seq] = ccb; 1067 1068 if (cel->enable) { 1069 ccb->ccb_h.status = 1070 create_lun_state(isp, bus, ccb->ccb_h.path, &tptr); 1071 if (ccb->ccb_h.status != CAM_REQ_CMP) { 1072 isp->isp_osinfo.leact[seq] = 0; 1073 return (-1); 1074 } 1075 } else { 1076 tptr = get_lun_statep(isp, bus, lun); 1077 if (tptr == NULL) { 1078 ccb->ccb_h.status = CAM_LUN_INVALID; 1079 return (-1); 1080 } 1081 } 1082 1083 if (cel->enable) { 1084 int c, n, ulun = lun; 1085 1086 cmd = RQSTYPE_ENABLE_LUN; 1087 c = DFLT_CMND_CNT; 1088 n = DFLT_INOT_CNT; 1089 if (IS_FC(isp) && lun != 0) { 1090 cmd = RQSTYPE_MODIFY_LUN; 1091 n = 0; 1092 /* 1093 * For SCC firmware, we only deal with setting 1094 * (enabling or modifying) lun 0. 1095 */ 1096 ulun = 0; 1097 } 1098 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) { 1099 rls_lun_statep(isp, tptr); 1100 ccb->ccb_h.status = CAM_REQ_INPROG; 1101 return (seq); 1102 } 1103 } else { 1104 int c, n, ulun = lun; 1105 1106 cmd = -RQSTYPE_MODIFY_LUN; 1107 c = DFLT_CMND_CNT; 1108 n = DFLT_INOT_CNT; 1109 if (IS_FC(isp) && lun != 0) { 1110 n = 0; 1111 /* 1112 * For SCC firmware, we only deal with setting 1113 * (enabling or modifying) lun 0. 1114 */ 1115 ulun = 0; 1116 } 1117 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) { 1118 rls_lun_statep(isp, tptr); 1119 ccb->ccb_h.status = CAM_REQ_INPROG; 1120 return (seq); 1121 } 1122 } 1123 rls_lun_statep(isp, tptr); 1124 xpt_print(ccb->ccb_h.path, "isp_lun_cmd failed\n"); 1125 isp->isp_osinfo.leact[seq] = 0; 1126 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1127 return (-1); 1128 } 1129 1130 static void 1131 isp_ledone(ispsoftc_t *isp, lun_entry_t *lep) 1132 { 1133 const char lfmt[] = "now %sabled for target mode\n"; 1134 union ccb *ccb; 1135 uint32_t seq; 1136 tstate_t *tptr; 1137 int av; 1138 struct ccb_en_lun *cel; 1139 1140 seq = lep->le_reserved - 1; 1141 if (seq >= NLEACT) { 1142 isp_prt(isp, ISP_LOGERR, 1143 "seq out of range (%u) in isp_ledone", seq); 1144 return; 1145 } 1146 ccb = isp->isp_osinfo.leact[seq]; 1147 if (ccb == 0) { 1148 isp_prt(isp, ISP_LOGERR, 1149 "no ccb for seq %u in isp_ledone", seq); 1150 return; 1151 } 1152 cel = &ccb->cel; 1153 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), XS_LUN(ccb)); 1154 if (tptr == NULL) { 1155 xpt_print(ccb->ccb_h.path, "null tptr in isp_ledone\n"); 1156 isp->isp_osinfo.leact[seq] = 0; 1157 return; 1158 } 1159 1160 if (lep->le_status != LUN_OK) { 1161 xpt_print(ccb->ccb_h.path, 1162 "ENABLE/MODIFY LUN returned 0x%x\n", lep->le_status); 1163 err: 1164 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1165 rls_lun_statep(isp, tptr); 1166 isp->isp_osinfo.leact[seq] = 0; 1167 xpt_done(ccb); 1168 return; 1169 } else { 1170 isp_prt(isp, ISP_LOGTDEBUG0, 1171 "isp_ledone: ENABLE/MODIFY done okay"); 1172 } 1173 1174 1175 if (cel->enable) { 1176 ccb->ccb_h.status = CAM_REQ_CMP; 1177 xpt_print(ccb->ccb_h.path, lfmt, "en"); 1178 rls_lun_statep(isp, tptr); 1179 isp->isp_osinfo.leact[seq] = 0; 1180 xpt_done(ccb); 1181 return; 1182 } 1183 1184 if (lep->le_header.rqs_entry_type == RQSTYPE_MODIFY_LUN) { 1185 if (isp_lun_cmd(isp, -RQSTYPE_ENABLE_LUN, XS_CHANNEL(ccb), 1186 XS_TGT(ccb), XS_LUN(ccb), 0, 0, seq+1)) { 1187 xpt_print(ccb->ccb_h.path, 1188 "isp_ledone: isp_lun_cmd failed\n"); 1189 goto err; 1190 } 1191 rls_lun_statep(isp, tptr); 1192 return; 1193 } 1194 1195 xpt_print(ccb->ccb_h.path, lfmt, "dis"); 1196 rls_lun_statep(isp, tptr); 1197 destroy_lun_state(isp, tptr); 1198 ccb->ccb_h.status = CAM_REQ_CMP; 1199 isp->isp_osinfo.leact[seq] = 0; 1200 xpt_done(ccb); 1201 if (are_any_luns_enabled(isp, XS_CHANNEL(ccb)) == 0) { 1202 int bus = XS_CHANNEL(ccb); 1203 av = bus << 31; 1204 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 1205 if (av) { 1206 isp_prt(isp, ISP_LOGWARN, 1207 "disable target mode on channel %d failed", bus); 1208 } 1209 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; 1210 } 1211 } 1212 1213 1214 static cam_status 1215 isp_abort_tgt_ccb(ispsoftc_t *isp, union ccb *ccb) 1216 { 1217 tstate_t *tptr; 1218 struct ccb_hdr_slist *lp; 1219 struct ccb_hdr *curelm; 1220 int found, *ctr; 1221 union ccb *accb = ccb->cab.abort_ccb; 1222 1223 xpt_print(ccb->ccb_h.path, "aborting ccb %p\n", accb); 1224 if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 1225 int badpath = 0; 1226 if (IS_FC(isp) && (accb->ccb_h.target_id != 1227 ((fcparam *) isp->isp_param)->isp_loopid)) { 1228 badpath = 1; 1229 } else if (IS_SCSI(isp) && (accb->ccb_h.target_id != 1230 ((sdparam *) isp->isp_param)->isp_initiator_id)) { 1231 badpath = 1; 1232 } 1233 if (badpath) { 1234 /* 1235 * Being restrictive about target ids is really about 1236 * making sure we're aborting for the right multi-tid 1237 * path. This doesn't really make much sense at present. 1238 */ 1239 #if 0 1240 return (CAM_PATH_INVALID); 1241 #endif 1242 } 1243 } 1244 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun); 1245 if (tptr == NULL) { 1246 xpt_print(ccb->ccb_h.path, "can't get statep\n"); 1247 return (CAM_PATH_INVALID); 1248 } 1249 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 1250 lp = &tptr->atios; 1251 ctr = &tptr->atio_count; 1252 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 1253 lp = &tptr->inots; 1254 ctr = &tptr->inot_count; 1255 } else { 1256 rls_lun_statep(isp, tptr); 1257 xpt_print(ccb->ccb_h.path, "bad function code %d\n", 1258 accb->ccb_h.func_code); 1259 return (CAM_UA_ABORT); 1260 } 1261 curelm = SLIST_FIRST(lp); 1262 found = 0; 1263 if (curelm == &accb->ccb_h) { 1264 found = 1; 1265 SLIST_REMOVE_HEAD(lp, sim_links.sle); 1266 } else { 1267 while(curelm != NULL) { 1268 struct ccb_hdr *nextelm; 1269 1270 nextelm = SLIST_NEXT(curelm, sim_links.sle); 1271 if (nextelm == &accb->ccb_h) { 1272 found = 1; 1273 SLIST_NEXT(curelm, sim_links.sle) = 1274 SLIST_NEXT(nextelm, sim_links.sle); 1275 break; 1276 } 1277 curelm = nextelm; 1278 } 1279 } 1280 rls_lun_statep(isp, tptr); 1281 if (found) { 1282 (*ctr)--; 1283 accb->ccb_h.status = CAM_REQ_ABORTED; 1284 xpt_done(accb); 1285 return (CAM_REQ_CMP); 1286 } 1287 xpt_print(ccb->ccb_h.path, "ccb %p not found\n", accb); 1288 return (CAM_PATH_INVALID); 1289 } 1290 1291 static void 1292 isp_target_start_ctio(ispsoftc_t *isp, union ccb *ccb) 1293 { 1294 void *qe; 1295 struct ccb_scsiio *cso = &ccb->csio; 1296 uint32_t nxti, optr, handle; 1297 uint8_t local[QENTRY_LEN]; 1298 1299 1300 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 1301 xpt_print(ccb->ccb_h.path, 1302 "Request Queue Overflow in isp_target_start_ctio\n"); 1303 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1304 goto out; 1305 } 1306 memset(local, 0, QENTRY_LEN); 1307 1308 /* 1309 * We're either moving data or completing a command here. 1310 */ 1311 1312 if (IS_FC(isp)) { 1313 atio_private_data_t *atp; 1314 ct2_entry_t *cto = (ct2_entry_t *) local; 1315 1316 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; 1317 cto->ct_header.rqs_entry_count = 1; 1318 if (FCPARAM(isp)->isp_2klogin) { 1319 ((ct2e_entry_t *)cto)->ct_iid = cso->init_id; 1320 } else { 1321 cto->ct_iid = cso->init_id; 1322 if (FCPARAM(isp)->isp_sccfw == 0) { 1323 cto->ct_lun = ccb->ccb_h.target_lun; 1324 } 1325 } 1326 1327 atp = isp_get_atpd(isp, cso->tag_id); 1328 if (atp == NULL) { 1329 xpt_print(ccb->ccb_h.path, 1330 "cannot find private data adjunct for tag %x\n", 1331 cso->tag_id); 1332 XS_SETERR(ccb, CAM_REQ_CMP_ERR); 1333 goto out; 1334 } 1335 1336 cto->ct_rxid = cso->tag_id; 1337 if (cso->dxfer_len == 0) { 1338 cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA; 1339 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1340 cto->ct_flags |= CT2_SENDSTATUS; 1341 cto->rsp.m1.ct_scsi_status = cso->scsi_status; 1342 cto->ct_resid = 1343 atp->orig_datalen - atp->bytes_xfered; 1344 if (cto->ct_resid < 0) { 1345 cto->rsp.m1.ct_scsi_status |= 1346 CT2_DATA_OVER; 1347 } else if (cto->ct_resid > 0) { 1348 cto->rsp.m1.ct_scsi_status |= 1349 CT2_DATA_UNDER; 1350 } 1351 } 1352 if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) { 1353 int m = min(cso->sense_len, MAXRESPLEN); 1354 memcpy(cto->rsp.m1.ct_resp, 1355 &cso->sense_data, m); 1356 cto->rsp.m1.ct_senselen = m; 1357 cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID; 1358 } 1359 } else { 1360 cto->ct_flags |= CT2_FLAG_MODE0; 1361 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1362 cto->ct_flags |= CT2_DATA_IN; 1363 } else { 1364 cto->ct_flags |= CT2_DATA_OUT; 1365 } 1366 cto->ct_reloff = atp->bytes_xfered; 1367 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 1368 cto->ct_flags |= CT2_SENDSTATUS; 1369 cto->rsp.m0.ct_scsi_status = cso->scsi_status; 1370 cto->ct_resid = 1371 atp->orig_datalen - 1372 (atp->bytes_xfered + cso->dxfer_len); 1373 if (cto->ct_resid < 0) { 1374 cto->rsp.m0.ct_scsi_status |= 1375 CT2_DATA_OVER; 1376 } else if (cto->ct_resid > 0) { 1377 cto->rsp.m0.ct_scsi_status |= 1378 CT2_DATA_UNDER; 1379 } 1380 } else { 1381 atp->last_xframt = cso->dxfer_len; 1382 } 1383 /* 1384 * If we're sending data and status back together, 1385 * we can't also send back sense data as well. 1386 */ 1387 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1388 } 1389 1390 if (cto->ct_flags & CT2_SENDSTATUS) { 1391 isp_prt(isp, ISP_LOGTDEBUG0, 1392 "CTIO2[%x] STATUS %x origd %u curd %u resid %u", 1393 cto->ct_rxid, cso->scsi_status, atp->orig_datalen, 1394 cso->dxfer_len, cto->ct_resid); 1395 cto->ct_flags |= CT2_CCINCR; 1396 atp->state = ATPD_STATE_LAST_CTIO; 1397 } else { 1398 atp->state = ATPD_STATE_CTIO; 1399 } 1400 cto->ct_timeout = 10; 1401 } else { 1402 ct_entry_t *cto = (ct_entry_t *) local; 1403 1404 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1405 cto->ct_header.rqs_entry_count = 1; 1406 cto->ct_iid = cso->init_id; 1407 cto->ct_iid |= XS_CHANNEL(ccb) << 7; 1408 cto->ct_tgt = ccb->ccb_h.target_id; 1409 cto->ct_lun = ccb->ccb_h.target_lun; 1410 cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id); 1411 if (AT_HAS_TAG(cso->tag_id)) { 1412 cto->ct_tag_val = (uint8_t) AT_GET_TAG(cso->tag_id); 1413 cto->ct_flags |= CT_TQAE; 1414 } 1415 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 1416 cto->ct_flags |= CT_NODISC; 1417 } 1418 if (cso->dxfer_len == 0) { 1419 cto->ct_flags |= CT_NO_DATA; 1420 } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1421 cto->ct_flags |= CT_DATA_IN; 1422 } else { 1423 cto->ct_flags |= CT_DATA_OUT; 1424 } 1425 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1426 cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR; 1427 cto->ct_scsi_status = cso->scsi_status; 1428 cto->ct_resid = cso->resid; 1429 isp_prt(isp, ISP_LOGTDEBUG0, 1430 "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x", 1431 cto->ct_fwhandle, cso->scsi_status, cso->resid, 1432 cso->tag_id); 1433 } 1434 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1435 cto->ct_timeout = 10; 1436 } 1437 1438 if (isp_save_xs_tgt(isp, ccb, &handle)) { 1439 xpt_print(ccb->ccb_h.path, 1440 "No XFLIST pointers for isp_target_start_ctio\n"); 1441 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1442 goto out; 1443 } 1444 1445 1446 /* 1447 * Call the dma setup routines for this entry (and any subsequent 1448 * CTIOs) if there's data to move, and then tell the f/w it's got 1449 * new things to play with. As with isp_start's usage of DMA setup, 1450 * any swizzling is done in the machine dependent layer. Because 1451 * of this, we put the request onto the queue area first in native 1452 * format. 1453 */ 1454 1455 if (IS_FC(isp)) { 1456 ct2_entry_t *cto = (ct2_entry_t *) local; 1457 cto->ct_syshandle = handle; 1458 } else { 1459 ct_entry_t *cto = (ct_entry_t *) local; 1460 cto->ct_syshandle = handle; 1461 } 1462 1463 switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) { 1464 case CMD_QUEUED: 1465 ISP_ADD_REQUEST(isp, nxti); 1466 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1467 return; 1468 1469 case CMD_EAGAIN: 1470 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1471 break; 1472 1473 default: 1474 break; 1475 } 1476 isp_destroy_tgt_handle(isp, handle); 1477 1478 out: 1479 xpt_done(ccb); 1480 } 1481 1482 static void 1483 isp_refire_putback_atio(void *arg) 1484 { 1485 int s = splcam(); 1486 isp_target_putback_atio(arg); 1487 splx(s); 1488 } 1489 1490 static void 1491 isp_target_putback_atio(union ccb *ccb) 1492 { 1493 ispsoftc_t *isp; 1494 struct ccb_scsiio *cso; 1495 uint32_t nxti, optr; 1496 void *qe; 1497 1498 isp = XS_ISP(ccb); 1499 1500 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 1501 xpt_print(ccb->ccb_h.path, 1502 "isp_target_putback_atio: Request Queue Overflow\n"); 1503 (void) timeout(isp_refire_putback_atio, ccb, 10); 1504 return; 1505 } 1506 memset(qe, 0, QENTRY_LEN); 1507 cso = &ccb->csio; 1508 if (IS_FC(isp)) { 1509 at2_entry_t local, *at = &local; 1510 MEMZERO(at, sizeof (at2_entry_t)); 1511 at->at_header.rqs_entry_type = RQSTYPE_ATIO2; 1512 at->at_header.rqs_entry_count = 1; 1513 if (FCPARAM(isp)->isp_sccfw) { 1514 at->at_scclun = (uint16_t) ccb->ccb_h.target_lun; 1515 } else { 1516 at->at_lun = (uint8_t) ccb->ccb_h.target_lun; 1517 } 1518 at->at_status = CT_OK; 1519 at->at_rxid = cso->tag_id; 1520 at->at_iid = cso->ccb_h.target_id; 1521 isp_put_atio2(isp, at, qe); 1522 } else { 1523 at_entry_t local, *at = &local; 1524 MEMZERO(at, sizeof (at_entry_t)); 1525 at->at_header.rqs_entry_type = RQSTYPE_ATIO; 1526 at->at_header.rqs_entry_count = 1; 1527 at->at_iid = cso->init_id; 1528 at->at_iid |= XS_CHANNEL(ccb) << 7; 1529 at->at_tgt = cso->ccb_h.target_id; 1530 at->at_lun = cso->ccb_h.target_lun; 1531 at->at_status = CT_OK; 1532 at->at_tag_val = AT_GET_TAG(cso->tag_id); 1533 at->at_handle = AT_GET_HANDLE(cso->tag_id); 1534 isp_put_atio(isp, at, qe); 1535 } 1536 ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe); 1537 ISP_ADD_REQUEST(isp, nxti); 1538 isp_complete_ctio(ccb); 1539 } 1540 1541 static void 1542 isp_complete_ctio(union ccb *ccb) 1543 { 1544 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1545 ccb->ccb_h.status |= CAM_REQ_CMP; 1546 } 1547 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1548 xpt_done(ccb); 1549 } 1550 1551 /* 1552 * Handle ATIO stuff that the generic code can't. 1553 * This means handling CDBs. 1554 */ 1555 1556 static int 1557 isp_handle_platform_atio(ispsoftc_t *isp, at_entry_t *aep) 1558 { 1559 tstate_t *tptr; 1560 int status, bus, iswildcard; 1561 struct ccb_accept_tio *atiop; 1562 1563 /* 1564 * The firmware status (except for the QLTM_SVALID bit) 1565 * indicates why this ATIO was sent to us. 1566 * 1567 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1568 * 1569 * If the DISCONNECTS DISABLED bit is set in the flags field, 1570 * we're still connected on the SCSI bus. 1571 */ 1572 status = aep->at_status; 1573 if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) { 1574 /* 1575 * Bus Phase Sequence error. We should have sense data 1576 * suggested by the f/w. I'm not sure quite yet what 1577 * to do about this for CAM. 1578 */ 1579 isp_prt(isp, ISP_LOGWARN, "PHASE ERROR"); 1580 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1581 return (0); 1582 } 1583 if ((status & ~QLTM_SVALID) != AT_CDB) { 1584 isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform", 1585 status); 1586 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1587 return (0); 1588 } 1589 1590 bus = GET_BUS_VAL(aep->at_iid); 1591 tptr = get_lun_statep(isp, bus, aep->at_lun); 1592 if (tptr == NULL) { 1593 tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD); 1594 if (tptr == NULL) { 1595 /* 1596 * Because we can't autofeed sense data back with 1597 * a command for parallel SCSI, we can't give back 1598 * a CHECK CONDITION. We'll give back a BUSY status 1599 * instead. This works out okay because the only 1600 * time we should, in fact, get this, is in the 1601 * case that somebody configured us without the 1602 * blackhole driver, so they get what they deserve. 1603 */ 1604 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1605 return (0); 1606 } 1607 iswildcard = 1; 1608 } else { 1609 iswildcard = 0; 1610 } 1611 1612 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1613 if (atiop == NULL) { 1614 /* 1615 * Because we can't autofeed sense data back with 1616 * a command for parallel SCSI, we can't give back 1617 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1618 * instead. This works out okay because the only time we 1619 * should, in fact, get this, is in the case that we've 1620 * run out of ATIOS. 1621 */ 1622 xpt_print(tptr->owner, 1623 "no ATIOS for lun %d from initiator %d on channel %d\n", 1624 aep->at_lun, GET_IID_VAL(aep->at_iid), bus); 1625 if (aep->at_flags & AT_TQAE) 1626 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1627 else 1628 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1629 rls_lun_statep(isp, tptr); 1630 return (0); 1631 } 1632 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1633 tptr->atio_count--; 1634 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d", 1635 aep->at_lun, tptr->atio_count); 1636 if (iswildcard) { 1637 atiop->ccb_h.target_id = aep->at_tgt; 1638 atiop->ccb_h.target_lun = aep->at_lun; 1639 } 1640 if (aep->at_flags & AT_NODISC) { 1641 atiop->ccb_h.flags = CAM_DIS_DISCONNECT; 1642 } else { 1643 atiop->ccb_h.flags = 0; 1644 } 1645 1646 if (status & QLTM_SVALID) { 1647 size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data)); 1648 atiop->sense_len = amt; 1649 MEMCPY(&atiop->sense_data, aep->at_sense, amt); 1650 } else { 1651 atiop->sense_len = 0; 1652 } 1653 1654 atiop->init_id = GET_IID_VAL(aep->at_iid); 1655 atiop->cdb_len = aep->at_cdblen; 1656 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen); 1657 atiop->ccb_h.status = CAM_CDB_RECVD; 1658 /* 1659 * Construct a tag 'id' based upon tag value (which may be 0..255) 1660 * and the handle (which we have to preserve). 1661 */ 1662 AT_MAKE_TAGID(atiop->tag_id, bus, device_get_unit(isp->isp_dev), aep); 1663 if (aep->at_flags & AT_TQAE) { 1664 atiop->tag_action = aep->at_tag_type; 1665 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; 1666 } 1667 xpt_done((union ccb*)atiop); 1668 isp_prt(isp, ISP_LOGTDEBUG0, 1669 "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s", 1670 aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid), 1671 GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff, 1672 aep->at_tag_type, (aep->at_flags & AT_NODISC)? 1673 "nondisc" : "disconnecting"); 1674 rls_lun_statep(isp, tptr); 1675 return (0); 1676 } 1677 1678 static int 1679 isp_handle_platform_atio2(ispsoftc_t *isp, at2_entry_t *aep) 1680 { 1681 lun_id_t lun; 1682 tstate_t *tptr; 1683 struct ccb_accept_tio *atiop; 1684 atio_private_data_t *atp; 1685 1686 /* 1687 * The firmware status (except for the QLTM_SVALID bit) 1688 * indicates why this ATIO was sent to us. 1689 * 1690 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1691 */ 1692 if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) { 1693 isp_prt(isp, ISP_LOGWARN, 1694 "bogus atio (0x%x) leaked to platform", aep->at_status); 1695 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1696 return (0); 1697 } 1698 1699 if (FCPARAM(isp)->isp_sccfw) { 1700 lun = aep->at_scclun; 1701 } else { 1702 lun = aep->at_lun; 1703 } 1704 tptr = get_lun_statep(isp, 0, lun); 1705 if (tptr == NULL) { 1706 isp_prt(isp, ISP_LOGTDEBUG0, 1707 "[0x%x] no state pointer for lun %d", aep->at_rxid, lun); 1708 tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD); 1709 if (tptr == NULL) { 1710 isp_endcmd(isp, aep, 1711 SCSI_STATUS_CHECK_COND | ECMD_SVALID | 1712 (0x5 << 12) | (0x25 << 16), 0); 1713 return (0); 1714 } 1715 } 1716 1717 atp = isp_get_atpd(isp, 0); 1718 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1719 if (atiop == NULL || atp == NULL) { 1720 1721 /* 1722 * Because we can't autofeed sense data back with 1723 * a command for parallel SCSI, we can't give back 1724 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1725 * instead. This works out okay because the only time we 1726 * should, in fact, get this, is in the case that we've 1727 * run out of ATIOS. 1728 */ 1729 xpt_print(tptr->owner, 1730 "no %s for lun %d from initiator %d\n", 1731 (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" : 1732 ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid); 1733 rls_lun_statep(isp, tptr); 1734 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1735 return (0); 1736 } 1737 atp->state = ATPD_STATE_ATIO; 1738 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1739 tptr->atio_count--; 1740 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d", 1741 lun, tptr->atio_count); 1742 1743 if (tptr == &isp->isp_osinfo.tsdflt[0]) { 1744 atiop->ccb_h.target_id = FCPARAM(isp)->isp_loopid; 1745 atiop->ccb_h.target_lun = lun; 1746 } 1747 /* 1748 * We don't get 'suggested' sense data as we do with SCSI cards. 1749 */ 1750 atiop->sense_len = 0; 1751 1752 atiop->init_id = aep->at_iid; 1753 atiop->cdb_len = ATIO2_CDBLEN; 1754 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN); 1755 atiop->ccb_h.status = CAM_CDB_RECVD; 1756 atiop->tag_id = aep->at_rxid; 1757 switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) { 1758 case ATIO2_TC_ATTR_SIMPLEQ: 1759 atiop->tag_action = MSG_SIMPLE_Q_TAG; 1760 break; 1761 case ATIO2_TC_ATTR_HEADOFQ: 1762 atiop->tag_action = MSG_HEAD_OF_Q_TAG; 1763 break; 1764 case ATIO2_TC_ATTR_ORDERED: 1765 atiop->tag_action = MSG_ORDERED_Q_TAG; 1766 break; 1767 case ATIO2_TC_ATTR_ACAQ: /* ?? */ 1768 case ATIO2_TC_ATTR_UNTAGGED: 1769 default: 1770 atiop->tag_action = 0; 1771 break; 1772 } 1773 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; 1774 1775 atp->tag = atiop->tag_id; 1776 atp->lun = lun; 1777 atp->orig_datalen = aep->at_datalen; 1778 atp->last_xframt = 0; 1779 atp->bytes_xfered = 0; 1780 atp->state = ATPD_STATE_CAM; 1781 xpt_done((union ccb*)atiop); 1782 1783 isp_prt(isp, ISP_LOGTDEBUG0, 1784 "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u", 1785 aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid, 1786 lun, aep->at_taskflags, aep->at_datalen); 1787 rls_lun_statep(isp, tptr); 1788 return (0); 1789 } 1790 1791 static int 1792 isp_handle_platform_ctio(ispsoftc_t *isp, void *arg) 1793 { 1794 union ccb *ccb; 1795 int sentstatus, ok, notify_cam, resid = 0; 1796 uint16_t tval; 1797 1798 /* 1799 * CTIO and CTIO2 are close enough.... 1800 */ 1801 1802 ccb = isp_find_xs_tgt(isp, ((ct_entry_t *)arg)->ct_syshandle); 1803 KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio")); 1804 isp_destroy_tgt_handle(isp, ((ct_entry_t *)arg)->ct_syshandle); 1805 1806 if (IS_FC(isp)) { 1807 ct2_entry_t *ct = arg; 1808 atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid); 1809 if (atp == NULL) { 1810 isp_prt(isp, ISP_LOGERR, 1811 "cannot find adjunct for %x after I/O", 1812 ct->ct_rxid); 1813 return (0); 1814 } 1815 sentstatus = ct->ct_flags & CT2_SENDSTATUS; 1816 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1817 if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) { 1818 ccb->ccb_h.status |= CAM_SENT_SENSE; 1819 } 1820 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1821 if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) { 1822 resid = ct->ct_resid; 1823 atp->bytes_xfered += (atp->last_xframt - resid); 1824 atp->last_xframt = 0; 1825 } 1826 if (sentstatus || !ok) { 1827 atp->tag = 0; 1828 } 1829 isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, 1830 "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s", 1831 ct->ct_rxid, ct->ct_status, ct->ct_flags, 1832 (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, 1833 resid, sentstatus? "FIN" : "MID"); 1834 tval = ct->ct_rxid; 1835 1836 /* XXX: should really come after isp_complete_ctio */ 1837 atp->state = ATPD_STATE_PDON; 1838 } else { 1839 ct_entry_t *ct = arg; 1840 sentstatus = ct->ct_flags & CT_SENDSTATUS; 1841 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1842 /* 1843 * We *ought* to be able to get back to the original ATIO 1844 * here, but for some reason this gets lost. It's just as 1845 * well because it's squirrelled away as part of periph 1846 * private data. 1847 * 1848 * We can live without it as long as we continue to use 1849 * the auto-replenish feature for CTIOs. 1850 */ 1851 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1852 if (ct->ct_status & QLTM_SVALID) { 1853 char *sp = (char *)ct; 1854 sp += CTIO_SENSE_OFFSET; 1855 ccb->csio.sense_len = 1856 min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN); 1857 MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len); 1858 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1859 } 1860 if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) { 1861 resid = ct->ct_resid; 1862 } 1863 isp_prt(isp, ISP_LOGTDEBUG0, 1864 "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s", 1865 ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun, 1866 ct->ct_status, ct->ct_flags, resid, 1867 sentstatus? "FIN" : "MID"); 1868 tval = ct->ct_fwhandle; 1869 } 1870 ccb->csio.resid += resid; 1871 1872 /* 1873 * We're here either because intermediate data transfers are done 1874 * and/or the final status CTIO (which may have joined with a 1875 * Data Transfer) is done. 1876 * 1877 * In any case, for this platform, the upper layers figure out 1878 * what to do next, so all we do here is collect status and 1879 * pass information along. Any DMA handles have already been 1880 * freed. 1881 */ 1882 if (notify_cam == 0) { 1883 isp_prt(isp, ISP_LOGTDEBUG0, " INTER CTIO[0x%x] done", tval); 1884 return (0); 1885 } 1886 1887 isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done", 1888 (sentstatus)? " FINAL " : "MIDTERM ", tval); 1889 1890 if (!ok) { 1891 isp_target_putback_atio(ccb); 1892 } else { 1893 isp_complete_ctio(ccb); 1894 1895 } 1896 return (0); 1897 } 1898 1899 static int 1900 isp_handle_platform_notify_scsi(ispsoftc_t *isp, in_entry_t *inp) 1901 { 1902 return (0); /* XXXX */ 1903 } 1904 1905 static int 1906 isp_handle_platform_notify_fc(ispsoftc_t *isp, in_fcentry_t *inp) 1907 { 1908 1909 switch (inp->in_status) { 1910 case IN_PORT_LOGOUT: 1911 isp_prt(isp, ISP_LOGWARN, "port logout of iid %d", 1912 inp->in_iid); 1913 break; 1914 case IN_PORT_CHANGED: 1915 isp_prt(isp, ISP_LOGWARN, "port changed for iid %d", 1916 inp->in_iid); 1917 break; 1918 case IN_GLOBAL_LOGO: 1919 isp_prt(isp, ISP_LOGINFO, "all ports logged out"); 1920 break; 1921 case IN_ABORT_TASK: 1922 { 1923 atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid); 1924 struct ccb_immed_notify *inot = NULL; 1925 1926 if (atp) { 1927 tstate_t *tptr = get_lun_statep(isp, 0, atp->lun); 1928 if (tptr) { 1929 inot = (struct ccb_immed_notify *) 1930 SLIST_FIRST(&tptr->inots); 1931 if (inot) { 1932 tptr->inot_count--; 1933 SLIST_REMOVE_HEAD(&tptr->inots, 1934 sim_links.sle); 1935 isp_prt(isp, ISP_LOGTDEBUG0, 1936 "Take FREE INOT count now %d", 1937 tptr->inot_count); 1938 } 1939 } 1940 isp_prt(isp, ISP_LOGWARN, 1941 "abort task RX_ID %x IID %d state %d", 1942 inp->in_seqid, inp->in_iid, atp->state); 1943 } else { 1944 isp_prt(isp, ISP_LOGWARN, 1945 "abort task RX_ID %x from iid %d, state unknown", 1946 inp->in_seqid, inp->in_iid); 1947 } 1948 if (inot) { 1949 inot->initiator_id = inp->in_iid; 1950 inot->sense_len = 0; 1951 inot->message_args[0] = MSG_ABORT_TAG; 1952 inot->message_args[1] = inp->in_seqid & 0xff; 1953 inot->message_args[2] = (inp->in_seqid >> 8) & 0xff; 1954 inot->ccb_h.status = CAM_MESSAGE_RECV; 1955 xpt_done((union ccb *)inot); 1956 } 1957 break; 1958 } 1959 default: 1960 break; 1961 } 1962 return (0); 1963 } 1964 #endif 1965 1966 static void 1967 isp_cam_async(void *cbarg, uint32_t code, struct cam_path *path, void *arg) 1968 { 1969 struct cam_sim *sim; 1970 ispsoftc_t *isp; 1971 1972 sim = (struct cam_sim *)cbarg; 1973 isp = (ispsoftc_t *) cam_sim_softc(sim); 1974 switch (code) { 1975 case AC_LOST_DEVICE: 1976 if (IS_SCSI(isp)) { 1977 uint16_t oflags, nflags; 1978 sdparam *sdp = isp->isp_param; 1979 int tgt; 1980 1981 tgt = xpt_path_target_id(path); 1982 if (tgt >= 0) { 1983 sdp += cam_sim_bus(sim); 1984 nflags = sdp->isp_devparam[tgt].nvrm_flags; 1985 #ifndef ISP_TARGET_MODE 1986 nflags &= DPARM_SAFE_DFLT; 1987 if (isp->isp_loaded_fw) { 1988 nflags |= DPARM_NARROW | DPARM_ASYNC; 1989 } 1990 #else 1991 nflags = DPARM_DEFAULT; 1992 #endif 1993 oflags = sdp->isp_devparam[tgt].goal_flags; 1994 sdp->isp_devparam[tgt].goal_flags = nflags; 1995 sdp->isp_devparam[tgt].dev_update = 1; 1996 isp->isp_update |= (1 << cam_sim_bus(sim)); 1997 (void) isp_control(isp, 1998 ISPCTL_UPDATE_PARAMS, NULL); 1999 sdp->isp_devparam[tgt].goal_flags = oflags; 2000 } 2001 } 2002 break; 2003 default: 2004 isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code); 2005 break; 2006 } 2007 } 2008 2009 static void 2010 isp_poll(struct cam_sim *sim) 2011 { 2012 ispsoftc_t *isp = cam_sim_softc(sim); 2013 uint32_t isr; 2014 uint16_t sema, mbox; 2015 2016 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 2017 isp_intr(isp, isr, sema, mbox); 2018 } 2019 } 2020 2021 2022 static int isp_watchdog_work(ispsoftc_t *, XS_T *); 2023 2024 static int 2025 isp_watchdog_work(ispsoftc_t *isp, XS_T *xs) 2026 { 2027 uint32_t handle; 2028 2029 /* 2030 * We've decided this command is dead. Make sure we're not trying 2031 * to kill a command that's already dead by getting it's handle and 2032 * and seeing whether it's still alive. 2033 */ 2034 handle = isp_find_handle(isp, xs); 2035 if (handle) { 2036 uint32_t isr; 2037 uint16_t sema, mbox; 2038 2039 if (XS_CMD_DONE_P(xs)) { 2040 isp_prt(isp, ISP_LOGDEBUG1, 2041 "watchdog found done cmd (handle 0x%x)", handle); 2042 return (1);; 2043 } 2044 2045 if (XS_CMD_WDOG_P(xs)) { 2046 isp_prt(isp, ISP_LOGDEBUG2, 2047 "recursive watchdog (handle 0x%x)", handle); 2048 return (1); 2049 } 2050 2051 XS_CMD_S_WDOG(xs); 2052 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 2053 isp_intr(isp, isr, sema, mbox); 2054 } 2055 if (XS_CMD_DONE_P(xs)) { 2056 isp_prt(isp, ISP_LOGDEBUG2, 2057 "watchdog cleanup for handle 0x%x", handle); 2058 isp_free_pcmd(isp, (union ccb *)xs); 2059 xpt_done((union ccb *) xs); 2060 } else if (XS_CMD_GRACE_P(xs)) { 2061 /* 2062 * Make sure the command is *really* dead before we 2063 * release the handle (and DMA resources) for reuse. 2064 */ 2065 (void) isp_control(isp, ISPCTL_ABORT_CMD, xs); 2066 2067 /* 2068 * After this point, the comamnd is really dead. 2069 */ 2070 if (XS_XFRLEN(xs)) { 2071 ISP_DMAFREE(isp, xs, handle); 2072 } 2073 isp_destroy_handle(isp, handle); 2074 xpt_print(xs->ccb_h.path, 2075 "watchdog timeout for handle 0x%x\n", handle); 2076 XS_SETERR(xs, CAM_CMD_TIMEOUT); 2077 XS_CMD_C_WDOG(xs); 2078 isp_done(xs); 2079 } else { 2080 XS_CMD_C_WDOG(xs); 2081 xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz); 2082 XS_CMD_S_GRACE(xs); 2083 isp->isp_sendmarker |= 1 << XS_CHANNEL(xs); 2084 } 2085 return (1); 2086 } 2087 return (0); 2088 } 2089 2090 static void 2091 isp_watchdog(void *arg) 2092 { 2093 ispsoftc_t *isp; 2094 XS_T *xs = arg; 2095 int r; 2096 2097 for (r = 0, isp = isplist; r && isp; isp = isp->isp_osinfo.next) { 2098 ISP_LOCK(isp); 2099 r = isp_watchdog_work(isp, xs); 2100 ISP_UNLOCK(isp); 2101 } 2102 if (isp == NULL) { 2103 printf("isp_watchdog: nobody had %p active\n", arg); 2104 } 2105 } 2106 2107 2108 #if __FreeBSD_version >= 600000 2109 static void 2110 isp_make_here(ispsoftc_t *isp, int tgt) 2111 { 2112 union ccb *ccb; 2113 /* 2114 * Allocate a CCB, create a wildcard path for this bus, 2115 * and schedule a rescan. 2116 */ 2117 ccb = xpt_alloc_ccb_nowait(); 2118 if (ccb == NULL) { 2119 isp_prt(isp, ISP_LOGWARN, "unable to alloc CCB for rescan"); 2120 return; 2121 } 2122 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, 2123 cam_sim_path(isp->isp_sim), tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2124 isp_prt(isp, ISP_LOGWARN, "unable to create path for rescan"); 2125 xpt_free_ccb(ccb); 2126 return; 2127 } 2128 xpt_rescan(ccb); 2129 } 2130 2131 static void 2132 isp_make_gone(ispsoftc_t *isp, int tgt) 2133 { 2134 struct cam_path *tp; 2135 if (xpt_create_path(&tp, NULL, cam_sim_path(isp->isp_sim), tgt, 2136 CAM_LUN_WILDCARD) == CAM_REQ_CMP) { 2137 xpt_async(AC_LOST_DEVICE, tp, NULL); 2138 xpt_free_path(tp); 2139 } 2140 } 2141 #else 2142 #define isp_make_here(isp, tgt) do { ; } while (0) 2143 #define isp_make_gone(isp, tgt) do { ; } while (0) 2144 #endif 2145 2146 2147 /* 2148 * Gone Device Timer Function- when we have decided that a device has gone 2149 * away, we wait a specific period of time prior to telling the OS it has 2150 * gone away. 2151 * 2152 * This timer function fires once a second and then scans the port database 2153 * for devices that are marked dead but still have a virtual target assigned. 2154 * We decrement a counter for that port database entry, and when it hits zero, 2155 * we tell the OS the device has gone away. 2156 */ 2157 static void 2158 isp_gdt(void *arg) 2159 { 2160 ispsoftc_t *isp = arg; 2161 fcportdb_t *lp; 2162 int dbidx, tgt, more_to_do = 0; 2163 2164 ISP_LOCK(isp); 2165 isp_prt(isp, ISP_LOGDEBUG0, "GDT timer expired"); 2166 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { 2167 lp = &FCPARAM(isp)->portdb[dbidx]; 2168 2169 if (lp->state != FC_PORTDB_STATE_ZOMBIE) { 2170 continue; 2171 } 2172 if (lp->ini_map_idx == 0) { 2173 continue; 2174 } 2175 if (lp->new_reserved == 0) { 2176 continue; 2177 } 2178 lp->new_reserved -= 1; 2179 if (lp->new_reserved != 0) { 2180 more_to_do++; 2181 continue; 2182 } 2183 tgt = lp->ini_map_idx - 1; 2184 FCPARAM(isp)->isp_ini_map[tgt] = 0; 2185 lp->ini_map_idx = 0; 2186 lp->state = FC_PORTDB_STATE_NIL; 2187 isp_prt(isp, ISP_LOGCONFIG, prom3, lp->portid, tgt, 2188 "Gone Device Timeout"); 2189 isp_make_gone(isp, tgt); 2190 } 2191 if (more_to_do) { 2192 isp->isp_osinfo.gdt_running = 1; 2193 callout_reset(&isp->isp_osinfo.gdt, hz, isp_gdt, isp); 2194 } else { 2195 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2196 "stopping Gone Device Timer"); 2197 isp->isp_osinfo.gdt_running = 0; 2198 } 2199 ISP_UNLOCK(isp); 2200 } 2201 2202 /* 2203 * Loop Down Timer Function- when loop goes down, a timer is started and 2204 * and after it expires we come here and take all probational devices that 2205 * the OS knows about and the tell the OS that they've gone away. 2206 * 2207 * We don't clear the devices out of our port database because, when loop 2208 * come back up, we have to do some actual cleanup with the chip at that 2209 * point (implicit PLOGO, e.g., to get the chip's port database state right). 2210 */ 2211 static void 2212 isp_ldt(void *arg) 2213 { 2214 ispsoftc_t *isp = arg; 2215 fcportdb_t *lp; 2216 int dbidx, tgt; 2217 2218 ISP_LOCK(isp); 2219 2220 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Loop Down Timer expired"); 2221 2222 /* 2223 * Notify to the OS all targets who we now consider have departed. 2224 */ 2225 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { 2226 lp = &FCPARAM(isp)->portdb[dbidx]; 2227 2228 if (lp->state != FC_PORTDB_STATE_PROBATIONAL) { 2229 continue; 2230 } 2231 if (lp->ini_map_idx == 0) { 2232 continue; 2233 } 2234 2235 /* 2236 * XXX: CLEAN UP AND COMPLETE ANY PENDING COMMANDS FIRST! 2237 */ 2238 2239 /* 2240 * Mark that we've announced that this device is gone.... 2241 */ 2242 lp->reserved = 1; 2243 2244 /* 2245 * but *don't* change the state of the entry. Just clear 2246 * any target id stuff and announce to CAM that the 2247 * device is gone. This way any necessary PLOGO stuff 2248 * will happen when loop comes back up. 2249 */ 2250 2251 tgt = lp->ini_map_idx - 1; 2252 FCPARAM(isp)->isp_ini_map[tgt] = 0; 2253 lp->ini_map_idx = 0; 2254 isp_prt(isp, ISP_LOGCONFIG, prom3, lp->portid, tgt, 2255 "Loop Down Timeout"); 2256 isp_make_gone(isp, tgt); 2257 } 2258 2259 /* 2260 * The loop down timer has expired. Wake up the kthread 2261 * to notice that fact (or make it false). 2262 */ 2263 isp->isp_osinfo.loop_down_time = isp->isp_osinfo.loop_down_limit+1; 2264 wakeup(ISP_KT_WCHAN(isp)); 2265 ISP_UNLOCK(isp); 2266 } 2267 2268 static void 2269 isp_kthread(void *arg) 2270 { 2271 ispsoftc_t *isp = arg; 2272 int slp = 0; 2273 #if __FreeBSD_version < 500000 2274 int s = splcam(); 2275 #elif __FreeBSD_version < 700037 2276 mtx_lock(&Giant); 2277 #else 2278 mtx_lock(&isp->isp_osinfo.lock); 2279 #endif 2280 /* 2281 * The first loop is for our usage where we have yet to have 2282 * gotten good fibre channel state. 2283 */ 2284 for (;;) { 2285 int wasfrozen, lb, lim; 2286 2287 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2288 "isp_kthread: checking FC state"); 2289 isp->isp_osinfo.mbox_sleep_ok = 1; 2290 lb = isp_fc_runstate(isp, 250000); 2291 isp->isp_osinfo.mbox_sleep_ok = 0; 2292 if (lb) { 2293 /* 2294 * Increment loop down time by the last sleep interval 2295 */ 2296 isp->isp_osinfo.loop_down_time += slp; 2297 2298 if (lb < 0) { 2299 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2300 "kthread: FC loop not up (down count %d)", 2301 isp->isp_osinfo.loop_down_time); 2302 } else { 2303 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2304 "kthread: FC got to %d (down count %d)", 2305 lb, isp->isp_osinfo.loop_down_time); 2306 } 2307 2308 2309 /* 2310 * If we've never seen loop up and we've waited longer 2311 * than quickboot time, or we've seen loop up but we've 2312 * waited longer than loop_down_limit, give up and go 2313 * to sleep until loop comes up. 2314 */ 2315 if (FCPARAM(isp)->loop_seen_once == 0) { 2316 lim = isp_quickboot_time; 2317 } else { 2318 lim = isp->isp_osinfo.loop_down_limit; 2319 } 2320 if (isp->isp_osinfo.loop_down_time >= lim) { 2321 isp_freeze_loopdown(isp, "loop limit hit"); 2322 slp = 0; 2323 } else if (isp->isp_osinfo.loop_down_time < 10) { 2324 slp = 1; 2325 } else if (isp->isp_osinfo.loop_down_time < 30) { 2326 slp = 5; 2327 } else if (isp->isp_osinfo.loop_down_time < 60) { 2328 slp = 10; 2329 } else if (isp->isp_osinfo.loop_down_time < 120) { 2330 slp = 20; 2331 } else { 2332 slp = 30; 2333 } 2334 2335 } else { 2336 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2337 "isp_kthread: FC state OK"); 2338 isp->isp_osinfo.loop_down_time = 0; 2339 slp = 0; 2340 } 2341 2342 /* 2343 * If we'd frozen the simq, unfreeze it now so that CAM 2344 * can start sending us commands. If the FC state isn't 2345 * okay yet, they'll hit that in isp_start which will 2346 * freeze the queue again. 2347 */ 2348 wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN; 2349 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN; 2350 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { 2351 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2352 "isp_kthread: releasing simq"); 2353 xpt_release_simq(isp->isp_sim, 1); 2354 } 2355 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2356 "isp_kthread: sleep time %d", slp); 2357 #if __FreeBSD_version < 700037 2358 tsleep(ISP_KT_WCHAN(isp), PRIBIO, "ispf", slp * hz); 2359 #else 2360 msleep(ISP_KT_WCHAN(isp), &isp->isp_osinfo.lock, 2361 PRIBIO, "ispf", slp * hz); 2362 #endif 2363 /* 2364 * If slp is zero, we're waking up for the first time after 2365 * things have been okay. In this case, we set a deferral state 2366 * for all commands and delay hysteresis seconds before starting 2367 * the FC state evaluation. This gives the loop/fabric a chance 2368 * to settle. 2369 */ 2370 if (slp == 0 && isp->isp_osinfo.hysteresis) { 2371 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2372 "isp_kthread: sleep hysteresis tick time %d", 2373 isp->isp_osinfo.hysteresis * hz); 2374 #if __FreeBSD_version < 700037 2375 (void) tsleep(&isp_fabric_hysteresis, PRIBIO, "ispT", 2376 (isp->isp_osinfo.hysteresis * hz)); 2377 #else 2378 (void) msleep(&isp_fabric_hysteresis, 2379 &isp->isp_osinfo.lock, PRIBIO, "ispT", 2380 (isp->isp_osinfo.hysteresis * hz)); 2381 #endif 2382 } 2383 } 2384 #if __FreeBSD_version < 500000 2385 splx(s); 2386 #elif __FreeBSD_version < 700037 2387 mtx_unlock(&Giant); 2388 #else 2389 mtx_unlock(&isp->isp_osinfo.lock); 2390 #endif 2391 } 2392 2393 #if __FreeBSD_version < 500000 2394 static void isp_action_wrk(struct cam_sim *, union ccb *); 2395 static void 2396 isp_action(struct cam_sim *sim, union ccb *ccb) 2397 { 2398 ispsoftc_t *isp = (ispsoftc_t *)cam_sim_softc(sim); 2399 ISP_LOCK(isp); 2400 isp_action_wrk(sim, ccb); 2401 ISP_UNLOCK(isp); 2402 } 2403 #define isp_action isp_action_wrk 2404 #endif 2405 2406 static void 2407 isp_action(struct cam_sim *sim, union ccb *ccb) 2408 { 2409 int bus, tgt, ts, error, lim; 2410 ispsoftc_t *isp; 2411 struct ccb_trans_settings *cts; 2412 2413 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n")); 2414 2415 isp = (ispsoftc_t *)cam_sim_softc(sim); 2416 if (isp->isp_state != ISP_RUNSTATE && 2417 ccb->ccb_h.func_code == XPT_SCSI_IO) { 2418 isp_init(isp); 2419 if (isp->isp_state != ISP_INITSTATE) { 2420 /* 2421 * Lie. Say it was a selection timeout. 2422 */ 2423 ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN; 2424 xpt_freeze_devq(ccb->ccb_h.path, 1); 2425 xpt_done(ccb); 2426 return; 2427 } 2428 isp->isp_state = ISP_RUNSTATE; 2429 } 2430 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code); 2431 ISP_PCMD(ccb) = NULL; 2432 2433 switch (ccb->ccb_h.func_code) { 2434 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 2435 /* 2436 * Do a couple of preliminary checks... 2437 */ 2438 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 2439 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 2440 ccb->ccb_h.status = CAM_REQ_INVALID; 2441 xpt_done(ccb); 2442 break; 2443 } 2444 } 2445 #ifdef DIAGNOSTIC 2446 if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) { 2447 xpt_print(ccb->ccb_h.path, "invalid target\n"); 2448 ccb->ccb_h.status = CAM_PATH_INVALID; 2449 } else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) { 2450 xpt_print(ccb->ccb_h.path, "invalid lun\n"); 2451 ccb->ccb_h.status = CAM_PATH_INVALID; 2452 } 2453 if (ccb->ccb_h.status == CAM_PATH_INVALID) { 2454 xpt_done(ccb); 2455 break; 2456 } 2457 #endif 2458 ccb->csio.scsi_status = SCSI_STATUS_OK; 2459 if (isp_get_pcmd(isp, ccb)) { 2460 isp_prt(isp, ISP_LOGWARN, "out of PCMDs"); 2461 cam_freeze_devq(ccb->ccb_h.path); 2462 cam_release_devq(ccb->ccb_h.path, 2463 RELSIM_RELEASE_AFTER_TIMEOUT, 0, 250, 0); 2464 xpt_done(ccb); 2465 break; 2466 } 2467 error = isp_start((XS_T *) ccb); 2468 switch (error) { 2469 case CMD_QUEUED: 2470 XS_CMD_S_CLEAR(ccb); 2471 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2472 if (ccb->ccb_h.timeout == CAM_TIME_INFINITY) { 2473 break; 2474 } 2475 ts = ccb->ccb_h.timeout; 2476 if (ts == CAM_TIME_DEFAULT) { 2477 ts = 60*1000; 2478 } 2479 ts = isp_mstohz(ts); 2480 callout_reset(&PISP_PCMD(ccb)->wdog, ts, 2481 isp_watchdog, ccb); 2482 break; 2483 case CMD_RQLATER: 2484 /* 2485 * Handle initial and subsequent loop down cases 2486 */ 2487 if (FCPARAM(isp)->loop_seen_once == 0) { 2488 lim = isp_quickboot_time; 2489 } else { 2490 lim = isp->isp_osinfo.loop_down_limit; 2491 } 2492 if (isp->isp_osinfo.loop_down_time >= lim) { 2493 isp_prt(isp, ISP_LOGDEBUG0, 2494 "%d.%d downtime (%d) > lim (%d)", 2495 XS_TGT(ccb), XS_LUN(ccb), 2496 isp->isp_osinfo.loop_down_time, lim); 2497 ccb->ccb_h.status = 2498 CAM_SEL_TIMEOUT|CAM_DEV_QFRZN; 2499 xpt_freeze_devq(ccb->ccb_h.path, 1); 2500 isp_free_pcmd(isp, ccb); 2501 xpt_done(ccb); 2502 break; 2503 } 2504 isp_prt(isp, ISP_LOGDEBUG0, 2505 "%d.%d retry later", XS_TGT(ccb), XS_LUN(ccb)); 2506 /* 2507 * Otherwise, retry in a while. 2508 */ 2509 cam_freeze_devq(ccb->ccb_h.path); 2510 cam_release_devq(ccb->ccb_h.path, 2511 RELSIM_RELEASE_AFTER_TIMEOUT, 0, 1000, 0); 2512 XS_SETERR(ccb, CAM_REQUEUE_REQ); 2513 isp_free_pcmd(isp, ccb); 2514 xpt_done(ccb); 2515 break; 2516 case CMD_EAGAIN: 2517 XS_SETERR(ccb, CAM_REQUEUE_REQ); 2518 isp_free_pcmd(isp, ccb); 2519 xpt_done(ccb); 2520 break; 2521 case CMD_COMPLETE: 2522 isp_done((struct ccb_scsiio *) ccb); 2523 break; 2524 default: 2525 isp_prt(isp, ISP_LOGERR, 2526 "What's this? 0x%x at %d in file %s", 2527 error, __LINE__, __FILE__); 2528 XS_SETERR(ccb, CAM_REQ_CMP_ERR); 2529 isp_free_pcmd(isp, ccb); 2530 xpt_done(ccb); 2531 } 2532 break; 2533 2534 #ifdef ISP_TARGET_MODE 2535 case XPT_EN_LUN: /* Enable LUN as a target */ 2536 { 2537 int seq, i; 2538 seq = isp_en_lun(isp, ccb); 2539 if (seq < 0) { 2540 xpt_done(ccb); 2541 break; 2542 } 2543 for (i = 0; isp->isp_osinfo.leact[seq] && i < 30 * 1000; i++) { 2544 uint32_t isr; 2545 uint16_t sema, mbox; 2546 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 2547 isp_intr(isp, isr, sema, mbox); 2548 } 2549 DELAY(1000); 2550 } 2551 break; 2552 } 2553 case XPT_NOTIFY_ACK: /* recycle notify ack */ 2554 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ 2555 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 2556 { 2557 tstate_t *tptr = 2558 get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun); 2559 if (tptr == NULL) { 2560 ccb->ccb_h.status = CAM_LUN_INVALID; 2561 xpt_done(ccb); 2562 break; 2563 } 2564 ccb->ccb_h.sim_priv.entries[0].field = 0; 2565 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 2566 ccb->ccb_h.flags = 0; 2567 2568 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 2569 /* 2570 * Note that the command itself may not be done- 2571 * it may not even have had the first CTIO sent. 2572 */ 2573 tptr->atio_count++; 2574 isp_prt(isp, ISP_LOGTDEBUG0, 2575 "Put FREE ATIO, lun %d, count now %d", 2576 ccb->ccb_h.target_lun, tptr->atio_count); 2577 SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h, 2578 sim_links.sle); 2579 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 2580 tptr->inot_count++; 2581 isp_prt(isp, ISP_LOGTDEBUG0, 2582 "Put FREE INOT, lun %d, count now %d", 2583 ccb->ccb_h.target_lun, tptr->inot_count); 2584 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, 2585 sim_links.sle); 2586 } else { 2587 isp_prt(isp, ISP_LOGWARN, "Got Notify ACK");; 2588 } 2589 rls_lun_statep(isp, tptr); 2590 ccb->ccb_h.status = CAM_REQ_INPROG; 2591 break; 2592 } 2593 case XPT_CONT_TARGET_IO: 2594 { 2595 isp_target_start_ctio(isp, ccb); 2596 break; 2597 } 2598 #endif 2599 case XPT_RESET_DEV: /* BDR the specified SCSI device */ 2600 2601 bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); 2602 tgt = ccb->ccb_h.target_id; 2603 tgt |= (bus << 16); 2604 2605 error = isp_control(isp, ISPCTL_RESET_DEV, &tgt); 2606 if (error) { 2607 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2608 } else { 2609 ccb->ccb_h.status = CAM_REQ_CMP; 2610 } 2611 xpt_done(ccb); 2612 break; 2613 case XPT_ABORT: /* Abort the specified CCB */ 2614 { 2615 union ccb *accb = ccb->cab.abort_ccb; 2616 switch (accb->ccb_h.func_code) { 2617 #ifdef ISP_TARGET_MODE 2618 case XPT_ACCEPT_TARGET_IO: 2619 case XPT_IMMED_NOTIFY: 2620 ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb); 2621 break; 2622 case XPT_CONT_TARGET_IO: 2623 isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet"); 2624 ccb->ccb_h.status = CAM_UA_ABORT; 2625 break; 2626 #endif 2627 case XPT_SCSI_IO: 2628 error = isp_control(isp, ISPCTL_ABORT_CMD, ccb); 2629 if (error) { 2630 ccb->ccb_h.status = CAM_UA_ABORT; 2631 } else { 2632 ccb->ccb_h.status = CAM_REQ_CMP; 2633 } 2634 break; 2635 default: 2636 ccb->ccb_h.status = CAM_REQ_INVALID; 2637 break; 2638 } 2639 xpt_done(ccb); 2640 break; 2641 } 2642 #ifdef CAM_NEW_TRAN_CODE 2643 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) 2644 #else 2645 #define IS_CURRENT_SETTINGS(c) (c->flags & CCB_TRANS_CURRENT_SETTINGS) 2646 #endif 2647 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 2648 cts = &ccb->cts; 2649 if (!IS_CURRENT_SETTINGS(cts)) { 2650 ccb->ccb_h.status = CAM_REQ_INVALID; 2651 xpt_done(ccb); 2652 break; 2653 } 2654 tgt = cts->ccb_h.target_id; 2655 if (IS_SCSI(isp)) { 2656 #ifndef CAM_NEW_TRAN_CODE 2657 sdparam *sdp = isp->isp_param; 2658 uint16_t *dptr; 2659 2660 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2661 2662 sdp += bus; 2663 /* 2664 * We always update (internally) from goal_flags 2665 * so any request to change settings just gets 2666 * vectored to that location. 2667 */ 2668 dptr = &sdp->isp_devparam[tgt].goal_flags; 2669 2670 /* 2671 * Note that these operations affect the 2672 * the goal flags (goal_flags)- not 2673 * the current state flags. Then we mark 2674 * things so that the next operation to 2675 * this HBA will cause the update to occur. 2676 */ 2677 if (cts->valid & CCB_TRANS_DISC_VALID) { 2678 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) { 2679 *dptr |= DPARM_DISC; 2680 } else { 2681 *dptr &= ~DPARM_DISC; 2682 } 2683 } 2684 if (cts->valid & CCB_TRANS_TQ_VALID) { 2685 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) { 2686 *dptr |= DPARM_TQING; 2687 } else { 2688 *dptr &= ~DPARM_TQING; 2689 } 2690 } 2691 if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) { 2692 switch (cts->bus_width) { 2693 case MSG_EXT_WDTR_BUS_16_BIT: 2694 *dptr |= DPARM_WIDE; 2695 break; 2696 default: 2697 *dptr &= ~DPARM_WIDE; 2698 } 2699 } 2700 /* 2701 * Any SYNC RATE of nonzero and SYNC_OFFSET 2702 * of nonzero will cause us to go to the 2703 * selected (from NVRAM) maximum value for 2704 * this device. At a later point, we'll 2705 * allow finer control. 2706 */ 2707 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && 2708 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) && 2709 (cts->sync_offset > 0)) { 2710 *dptr |= DPARM_SYNC; 2711 } else { 2712 *dptr &= ~DPARM_SYNC; 2713 } 2714 *dptr |= DPARM_SAFE_DFLT; 2715 #else 2716 struct ccb_trans_settings_scsi *scsi = 2717 &cts->proto_specific.scsi; 2718 struct ccb_trans_settings_spi *spi = 2719 &cts->xport_specific.spi; 2720 sdparam *sdp = isp->isp_param; 2721 uint16_t *dptr; 2722 2723 if (spi->valid == 0 && scsi->valid == 0) { 2724 ccb->ccb_h.status = CAM_REQ_CMP; 2725 xpt_done(ccb); 2726 break; 2727 } 2728 2729 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2730 sdp += bus; 2731 /* 2732 * We always update (internally) from goal_flags 2733 * so any request to change settings just gets 2734 * vectored to that location. 2735 */ 2736 dptr = &sdp->isp_devparam[tgt].goal_flags; 2737 2738 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 2739 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 2740 *dptr |= DPARM_DISC; 2741 else 2742 *dptr &= ~DPARM_DISC; 2743 } 2744 2745 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 2746 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 2747 *dptr |= DPARM_TQING; 2748 else 2749 *dptr &= ~DPARM_TQING; 2750 } 2751 2752 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 2753 if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) 2754 *dptr |= DPARM_WIDE; 2755 else 2756 *dptr &= ~DPARM_WIDE; 2757 } 2758 2759 /* 2760 * XXX: FIX ME 2761 */ 2762 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) && 2763 (spi->valid & CTS_SPI_VALID_SYNC_RATE) && 2764 (spi->sync_period && spi->sync_offset)) { 2765 *dptr |= DPARM_SYNC; 2766 /* 2767 * XXX: CHECK FOR LEGALITY 2768 */ 2769 sdp->isp_devparam[tgt].goal_period = 2770 spi->sync_period; 2771 sdp->isp_devparam[tgt].goal_offset = 2772 spi->sync_offset; 2773 } else { 2774 *dptr &= ~DPARM_SYNC; 2775 } 2776 #endif 2777 isp_prt(isp, ISP_LOGDEBUG0, 2778 "SET (%d.%d.%d) to flags %x off %x per %x", 2779 bus, tgt, cts->ccb_h.target_lun, 2780 sdp->isp_devparam[tgt].goal_flags, 2781 sdp->isp_devparam[tgt].goal_offset, 2782 sdp->isp_devparam[tgt].goal_period); 2783 sdp->isp_devparam[tgt].dev_update = 1; 2784 isp->isp_update |= (1 << bus); 2785 } 2786 ccb->ccb_h.status = CAM_REQ_CMP; 2787 xpt_done(ccb); 2788 break; 2789 case XPT_GET_TRAN_SETTINGS: 2790 cts = &ccb->cts; 2791 tgt = cts->ccb_h.target_id; 2792 if (IS_FC(isp)) { 2793 #ifndef CAM_NEW_TRAN_CODE 2794 /* 2795 * a lot of normal SCSI things don't make sense. 2796 */ 2797 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 2798 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2799 /* 2800 * How do you measure the width of a high 2801 * speed serial bus? Well, in bytes. 2802 * 2803 * Offset and period make no sense, though, so we set 2804 * (above) a 'base' transfer speed to be gigabit. 2805 */ 2806 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2807 #else 2808 fcparam *fcp = isp->isp_param; 2809 struct ccb_trans_settings_scsi *scsi = 2810 &cts->proto_specific.scsi; 2811 struct ccb_trans_settings_fc *fc = 2812 &cts->xport_specific.fc; 2813 2814 cts->protocol = PROTO_SCSI; 2815 cts->protocol_version = SCSI_REV_2; 2816 cts->transport = XPORT_FC; 2817 cts->transport_version = 0; 2818 2819 scsi->valid = CTS_SCSI_VALID_TQ; 2820 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 2821 fc->valid = CTS_FC_VALID_SPEED; 2822 if (fcp->isp_gbspeed == 2) { 2823 fc->bitrate = 200000; 2824 } else { 2825 fc->bitrate = 100000; 2826 } 2827 if (tgt > 0 && tgt < MAX_FC_TARG) { 2828 fcportdb_t *lp = &fcp->portdb[tgt]; 2829 fc->wwnn = lp->node_wwn; 2830 fc->wwpn = lp->port_wwn; 2831 fc->port = lp->portid; 2832 fc->valid |= CTS_FC_VALID_WWNN | 2833 CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT; 2834 } 2835 #endif 2836 } else { 2837 #ifdef CAM_NEW_TRAN_CODE 2838 struct ccb_trans_settings_scsi *scsi = 2839 &cts->proto_specific.scsi; 2840 struct ccb_trans_settings_spi *spi = 2841 &cts->xport_specific.spi; 2842 #endif 2843 sdparam *sdp = isp->isp_param; 2844 int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2845 uint16_t dval, pval, oval; 2846 2847 sdp += bus; 2848 2849 if (IS_CURRENT_SETTINGS(cts)) { 2850 sdp->isp_devparam[tgt].dev_refresh = 1; 2851 isp->isp_update |= (1 << bus); 2852 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, 2853 NULL); 2854 dval = sdp->isp_devparam[tgt].actv_flags; 2855 oval = sdp->isp_devparam[tgt].actv_offset; 2856 pval = sdp->isp_devparam[tgt].actv_period; 2857 } else { 2858 dval = sdp->isp_devparam[tgt].nvrm_flags; 2859 oval = sdp->isp_devparam[tgt].nvrm_offset; 2860 pval = sdp->isp_devparam[tgt].nvrm_period; 2861 } 2862 2863 #ifndef CAM_NEW_TRAN_CODE 2864 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 2865 2866 if (dval & DPARM_DISC) { 2867 cts->flags |= CCB_TRANS_DISC_ENB; 2868 } 2869 if (dval & DPARM_TQING) { 2870 cts->flags |= CCB_TRANS_TAG_ENB; 2871 } 2872 if (dval & DPARM_WIDE) { 2873 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2874 } else { 2875 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2876 } 2877 cts->valid = CCB_TRANS_BUS_WIDTH_VALID | 2878 CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2879 2880 if ((dval & DPARM_SYNC) && oval != 0) { 2881 cts->sync_period = pval; 2882 cts->sync_offset = oval; 2883 cts->valid |= 2884 CCB_TRANS_SYNC_RATE_VALID | 2885 CCB_TRANS_SYNC_OFFSET_VALID; 2886 } 2887 #else 2888 cts->protocol = PROTO_SCSI; 2889 cts->protocol_version = SCSI_REV_2; 2890 cts->transport = XPORT_SPI; 2891 cts->transport_version = 2; 2892 2893 spi->valid = 0; 2894 scsi->valid = 0; 2895 spi->flags = 0; 2896 scsi->flags = 0; 2897 if (dval & DPARM_DISC) { 2898 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 2899 } 2900 if ((dval & DPARM_SYNC) && oval && pval) { 2901 spi->sync_offset = oval; 2902 spi->sync_period = pval; 2903 } else { 2904 spi->sync_offset = 0; 2905 spi->sync_period = 0; 2906 } 2907 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 2908 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 2909 spi->valid |= CTS_SPI_VALID_BUS_WIDTH; 2910 if (dval & DPARM_WIDE) { 2911 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2912 } else { 2913 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2914 } 2915 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 2916 scsi->valid = CTS_SCSI_VALID_TQ; 2917 if (dval & DPARM_TQING) { 2918 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 2919 } 2920 spi->valid |= CTS_SPI_VALID_DISC; 2921 } 2922 #endif 2923 isp_prt(isp, ISP_LOGDEBUG0, 2924 "GET %s (%d.%d.%d) to flags %x off %x per %x", 2925 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM", 2926 bus, tgt, cts->ccb_h.target_lun, dval, oval, pval); 2927 } 2928 ccb->ccb_h.status = CAM_REQ_CMP; 2929 xpt_done(ccb); 2930 break; 2931 2932 case XPT_CALC_GEOMETRY: 2933 #if __FreeBSD_version < 500000 2934 { 2935 struct ccb_calc_geometry *ccg; 2936 u_int32_t secs_per_cylinder; 2937 u_int32_t size_mb; 2938 2939 ccg = &ccb->ccg; 2940 if (ccg->block_size == 0) { 2941 ccb->ccb_h.status = CAM_REQ_INVALID; 2942 xpt_done(ccb); 2943 break; 2944 } 2945 size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size); 2946 if (size_mb > 1024) { 2947 ccg->heads = 255; 2948 ccg->secs_per_track = 63; 2949 } else { 2950 ccg->heads = 64; 2951 ccg->secs_per_track = 32; 2952 } 2953 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 2954 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 2955 ccb->ccb_h.status = CAM_REQ_CMP; 2956 xpt_done(ccb); 2957 break; 2958 } 2959 #else 2960 { 2961 cam_calc_geometry(&ccb->ccg, /*extended*/1); 2962 xpt_done(ccb); 2963 break; 2964 } 2965 #endif 2966 case XPT_RESET_BUS: /* Reset the specified bus */ 2967 bus = cam_sim_bus(sim); 2968 error = isp_control(isp, ISPCTL_RESET_BUS, &bus); 2969 if (error) 2970 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2971 else { 2972 if (bootverbose) { 2973 xpt_print(ccb->ccb_h.path, "reset bus\n"); 2974 } 2975 if (cam_sim_bus(sim) && isp->isp_path2 != NULL) 2976 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 2977 else if (isp->isp_path != NULL) 2978 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 2979 ccb->ccb_h.status = CAM_REQ_CMP; 2980 } 2981 xpt_done(ccb); 2982 break; 2983 2984 case XPT_TERM_IO: /* Terminate the I/O process */ 2985 ccb->ccb_h.status = CAM_REQ_INVALID; 2986 xpt_done(ccb); 2987 break; 2988 2989 case XPT_PATH_INQ: /* Path routing inquiry */ 2990 { 2991 struct ccb_pathinq *cpi = &ccb->cpi; 2992 2993 cpi->version_num = 1; 2994 #ifdef ISP_TARGET_MODE 2995 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 2996 #else 2997 cpi->target_sprt = 0; 2998 #endif 2999 cpi->hba_eng_cnt = 0; 3000 cpi->max_target = ISP_MAX_TARGETS(isp) - 1; 3001 cpi->max_lun = ISP_MAX_LUNS(isp) - 1; 3002 cpi->bus_id = cam_sim_bus(sim); 3003 if (IS_FC(isp)) { 3004 cpi->hba_misc = PIM_NOBUSRESET; 3005 /* 3006 * Because our loop ID can shift from time to time, 3007 * make our initiator ID out of range of our bus. 3008 */ 3009 cpi->initiator_id = cpi->max_target + 1; 3010 3011 /* 3012 * Set base transfer capabilities for Fibre Channel. 3013 * Technically not correct because we don't know 3014 * what media we're running on top of- but we'll 3015 * look good if we always say 100MB/s. 3016 */ 3017 if (FCPARAM(isp)->isp_gbspeed == 2) 3018 cpi->base_transfer_speed = 200000; 3019 else 3020 cpi->base_transfer_speed = 100000; 3021 cpi->hba_inquiry = PI_TAG_ABLE; 3022 #ifdef CAM_NEW_TRAN_CODE 3023 cpi->transport = XPORT_FC; 3024 cpi->transport_version = 0; 3025 #endif 3026 } else { 3027 sdparam *sdp = isp->isp_param; 3028 sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path)); 3029 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 3030 cpi->hba_misc = 0; 3031 cpi->initiator_id = sdp->isp_initiator_id; 3032 cpi->base_transfer_speed = 3300; 3033 #ifdef CAM_NEW_TRAN_CODE 3034 cpi->transport = XPORT_SPI; 3035 cpi->transport_version = 2; 3036 #endif 3037 } 3038 #ifdef CAM_NEW_TRAN_CODE 3039 cpi->protocol = PROTO_SCSI; 3040 cpi->protocol_version = SCSI_REV_2; 3041 #endif 3042 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 3043 strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN); 3044 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 3045 cpi->unit_number = cam_sim_unit(sim); 3046 cpi->ccb_h.status = CAM_REQ_CMP; 3047 xpt_done(ccb); 3048 break; 3049 } 3050 default: 3051 ccb->ccb_h.status = CAM_REQ_INVALID; 3052 xpt_done(ccb); 3053 break; 3054 } 3055 } 3056 3057 #define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB) 3058 3059 void 3060 isp_done(struct ccb_scsiio *sccb) 3061 { 3062 ispsoftc_t *isp = XS_ISP(sccb); 3063 3064 if (XS_NOERR(sccb)) 3065 XS_SETERR(sccb, CAM_REQ_CMP); 3066 3067 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && 3068 (sccb->scsi_status != SCSI_STATUS_OK)) { 3069 sccb->ccb_h.status &= ~CAM_STATUS_MASK; 3070 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && 3071 (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) { 3072 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; 3073 } else { 3074 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 3075 } 3076 } 3077 3078 sccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3079 if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 3080 isp_prt(isp, ISP_LOGDEBUG0, 3081 "target %d lun %d CAM status 0x%x SCSI status 0x%x", 3082 XS_TGT(sccb), XS_LUN(sccb), sccb->ccb_h.status, 3083 sccb->scsi_status); 3084 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 3085 sccb->ccb_h.status |= CAM_DEV_QFRZN; 3086 xpt_freeze_devq(sccb->ccb_h.path, 1); 3087 } 3088 } 3089 3090 if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) && 3091 (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 3092 xpt_print(sccb->ccb_h.path, 3093 "cam completion status 0x%x\n", sccb->ccb_h.status); 3094 } 3095 3096 XS_CMD_S_DONE(sccb); 3097 if (XS_CMD_WDOG_P(sccb) == 0) { 3098 untimeout(isp_watchdog, sccb, sccb->ccb_h.timeout_ch); 3099 if (XS_CMD_GRACE_P(sccb)) { 3100 isp_prt(isp, ISP_LOGDEBUG2, 3101 "finished command on borrowed time"); 3102 } 3103 XS_CMD_S_CLEAR(sccb); 3104 isp_free_pcmd(isp, (union ccb *) sccb); 3105 xpt_done((union ccb *) sccb); 3106 } 3107 } 3108 3109 int 3110 isp_async(ispsoftc_t *isp, ispasync_t cmd, void *arg) 3111 { 3112 int bus, rv = 0; 3113 static const char prom[] = 3114 "PortID 0x%06x handle 0x%x role %s %s\n" 3115 " WWNN 0x%08x%08x WWPN 0x%08x%08x"; 3116 static const char prom2[] = 3117 "PortID 0x%06x handle 0x%x role %s %s tgt %u\n" 3118 " WWNN 0x%08x%08x WWPN 0x%08x%08x"; 3119 char *msg = NULL; 3120 target_id_t tgt; 3121 fcportdb_t *lp; 3122 struct cam_path *tmppath; 3123 3124 switch (cmd) { 3125 case ISPASYNC_NEW_TGT_PARAMS: 3126 { 3127 #ifdef CAM_NEW_TRAN_CODE 3128 struct ccb_trans_settings_scsi *scsi; 3129 struct ccb_trans_settings_spi *spi; 3130 #endif 3131 int flags, tgt; 3132 sdparam *sdp = isp->isp_param; 3133 struct ccb_trans_settings cts; 3134 3135 memset(&cts, 0, sizeof (struct ccb_trans_settings)); 3136 3137 tgt = *((int *)arg); 3138 bus = (tgt >> 16) & 0xffff; 3139 tgt &= 0xffff; 3140 sdp += bus; 3141 if (xpt_create_path(&tmppath, NULL, 3142 cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim), 3143 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 3144 isp_prt(isp, ISP_LOGWARN, 3145 "isp_async cannot make temp path for %d.%d", 3146 tgt, bus); 3147 rv = -1; 3148 break; 3149 } 3150 flags = sdp->isp_devparam[tgt].actv_flags; 3151 #ifdef CAM_NEW_TRAN_CODE 3152 cts.type = CTS_TYPE_CURRENT_SETTINGS; 3153 cts.protocol = PROTO_SCSI; 3154 cts.transport = XPORT_SPI; 3155 3156 scsi = &cts.proto_specific.scsi; 3157 spi = &cts.xport_specific.spi; 3158 3159 if (flags & DPARM_TQING) { 3160 scsi->valid |= CTS_SCSI_VALID_TQ; 3161 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 3162 } 3163 3164 if (flags & DPARM_DISC) { 3165 spi->valid |= CTS_SPI_VALID_DISC; 3166 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 3167 } 3168 spi->flags |= CTS_SPI_VALID_BUS_WIDTH; 3169 if (flags & DPARM_WIDE) { 3170 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3171 } else { 3172 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3173 } 3174 if (flags & DPARM_SYNC) { 3175 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 3176 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 3177 spi->sync_period = sdp->isp_devparam[tgt].actv_period; 3178 spi->sync_offset = sdp->isp_devparam[tgt].actv_offset; 3179 } 3180 #else 3181 cts.flags = CCB_TRANS_CURRENT_SETTINGS; 3182 cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3183 if (flags & DPARM_DISC) { 3184 cts.flags |= CCB_TRANS_DISC_ENB; 3185 } 3186 if (flags & DPARM_TQING) { 3187 cts.flags |= CCB_TRANS_TAG_ENB; 3188 } 3189 cts.valid |= CCB_TRANS_BUS_WIDTH_VALID; 3190 cts.bus_width = (flags & DPARM_WIDE)? 3191 MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT; 3192 cts.sync_period = sdp->isp_devparam[tgt].actv_period; 3193 cts.sync_offset = sdp->isp_devparam[tgt].actv_offset; 3194 if (flags & DPARM_SYNC) { 3195 cts.valid |= 3196 CCB_TRANS_SYNC_RATE_VALID | 3197 CCB_TRANS_SYNC_OFFSET_VALID; 3198 } 3199 #endif 3200 isp_prt(isp, ISP_LOGDEBUG2, 3201 "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x", 3202 bus, tgt, sdp->isp_devparam[tgt].actv_period, 3203 sdp->isp_devparam[tgt].actv_offset, flags); 3204 xpt_setup_ccb(&cts.ccb_h, tmppath, 1); 3205 xpt_async(AC_TRANSFER_NEG, tmppath, &cts); 3206 xpt_free_path(tmppath); 3207 break; 3208 } 3209 case ISPASYNC_BUS_RESET: 3210 bus = *((int *)arg); 3211 isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected", 3212 bus); 3213 if (bus > 0 && isp->isp_path2) { 3214 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 3215 } else if (isp->isp_path) { 3216 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 3217 } 3218 break; 3219 case ISPASYNC_LIP: 3220 if (msg == NULL) { 3221 msg = "LIP Received"; 3222 } 3223 /* FALLTHROUGH */ 3224 case ISPASYNC_LOOP_RESET: 3225 if (msg == NULL) { 3226 msg = "LOOP Reset"; 3227 } 3228 /* FALLTHROUGH */ 3229 case ISPASYNC_LOOP_DOWN: 3230 if (msg == NULL) { 3231 msg = "LOOP Down"; 3232 } 3233 if (isp->isp_path) { 3234 isp_freeze_loopdown(isp, msg); 3235 } 3236 if (isp->isp_osinfo.ldt_running == 0) { 3237 isp->isp_osinfo.ldt_running = 1; 3238 callout_reset(&isp->isp_osinfo.ldt, 3239 isp->isp_osinfo.loop_down_limit * hz, isp_ldt, isp); 3240 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 3241 "starting Loop Down Timer"); 3242 } 3243 isp_prt(isp, ISP_LOGINFO, msg); 3244 break; 3245 case ISPASYNC_LOOP_UP: 3246 /* 3247 * Now we just note that Loop has come up. We don't 3248 * actually do anything because we're waiting for a 3249 * Change Notify before activating the FC cleanup 3250 * thread to look at the state of the loop again. 3251 */ 3252 isp_prt(isp, ISP_LOGINFO, "Loop UP"); 3253 break; 3254 case ISPASYNC_DEV_ARRIVED: 3255 lp = arg; 3256 lp->reserved = 0; 3257 if ((isp->isp_role & ISP_ROLE_INITIATOR) && 3258 (lp->roles & (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT))) { 3259 int dbidx = lp - FCPARAM(isp)->portdb; 3260 int i; 3261 3262 for (i = 0; i < MAX_FC_TARG; i++) { 3263 if (i >= FL_ID && i <= SNS_ID) { 3264 continue; 3265 } 3266 if (FCPARAM(isp)->isp_ini_map[i] == 0) { 3267 break; 3268 } 3269 } 3270 if (i < MAX_FC_TARG) { 3271 FCPARAM(isp)->isp_ini_map[i] = dbidx + 1; 3272 lp->ini_map_idx = i + 1; 3273 } else { 3274 isp_prt(isp, ISP_LOGWARN, "out of target ids"); 3275 isp_dump_portdb(isp); 3276 } 3277 } 3278 if (lp->ini_map_idx) { 3279 tgt = lp->ini_map_idx - 1; 3280 isp_prt(isp, ISP_LOGCONFIG, prom2, 3281 lp->portid, lp->handle, 3282 roles[lp->roles], "arrived at", tgt, 3283 (uint32_t) (lp->node_wwn >> 32), 3284 (uint32_t) lp->node_wwn, 3285 (uint32_t) (lp->port_wwn >> 32), 3286 (uint32_t) lp->port_wwn); 3287 isp_make_here(isp, tgt); 3288 } else { 3289 isp_prt(isp, ISP_LOGCONFIG, prom, 3290 lp->portid, lp->handle, 3291 roles[lp->roles], "arrived", 3292 (uint32_t) (lp->node_wwn >> 32), 3293 (uint32_t) lp->node_wwn, 3294 (uint32_t) (lp->port_wwn >> 32), 3295 (uint32_t) lp->port_wwn); 3296 } 3297 break; 3298 case ISPASYNC_DEV_CHANGED: 3299 lp = arg; 3300 if (isp_change_is_bad) { 3301 lp->state = FC_PORTDB_STATE_NIL; 3302 if (lp->ini_map_idx) { 3303 tgt = lp->ini_map_idx - 1; 3304 FCPARAM(isp)->isp_ini_map[tgt] = 0; 3305 lp->ini_map_idx = 0; 3306 isp_prt(isp, ISP_LOGCONFIG, prom3, 3307 lp->portid, tgt, "change is bad"); 3308 isp_make_gone(isp, tgt); 3309 } else { 3310 isp_prt(isp, ISP_LOGCONFIG, prom, 3311 lp->portid, lp->handle, 3312 roles[lp->roles], 3313 "changed and departed", 3314 (uint32_t) (lp->node_wwn >> 32), 3315 (uint32_t) lp->node_wwn, 3316 (uint32_t) (lp->port_wwn >> 32), 3317 (uint32_t) lp->port_wwn); 3318 } 3319 } else { 3320 lp->portid = lp->new_portid; 3321 lp->roles = lp->new_roles; 3322 if (lp->ini_map_idx) { 3323 int t = lp->ini_map_idx - 1; 3324 FCPARAM(isp)->isp_ini_map[t] = 3325 (lp - FCPARAM(isp)->portdb) + 1; 3326 tgt = lp->ini_map_idx - 1; 3327 isp_prt(isp, ISP_LOGCONFIG, prom2, 3328 lp->portid, lp->handle, 3329 roles[lp->roles], "changed at", tgt, 3330 (uint32_t) (lp->node_wwn >> 32), 3331 (uint32_t) lp->node_wwn, 3332 (uint32_t) (lp->port_wwn >> 32), 3333 (uint32_t) lp->port_wwn); 3334 } else { 3335 isp_prt(isp, ISP_LOGCONFIG, prom, 3336 lp->portid, lp->handle, 3337 roles[lp->roles], "changed", 3338 (uint32_t) (lp->node_wwn >> 32), 3339 (uint32_t) lp->node_wwn, 3340 (uint32_t) (lp->port_wwn >> 32), 3341 (uint32_t) lp->port_wwn); 3342 } 3343 } 3344 break; 3345 case ISPASYNC_DEV_STAYED: 3346 lp = arg; 3347 if (lp->ini_map_idx) { 3348 tgt = lp->ini_map_idx - 1; 3349 isp_prt(isp, ISP_LOGCONFIG, prom2, 3350 lp->portid, lp->handle, 3351 roles[lp->roles], "stayed at", tgt, 3352 (uint32_t) (lp->node_wwn >> 32), 3353 (uint32_t) lp->node_wwn, 3354 (uint32_t) (lp->port_wwn >> 32), 3355 (uint32_t) lp->port_wwn); 3356 } else { 3357 isp_prt(isp, ISP_LOGCONFIG, prom, 3358 lp->portid, lp->handle, 3359 roles[lp->roles], "stayed", 3360 (uint32_t) (lp->node_wwn >> 32), 3361 (uint32_t) lp->node_wwn, 3362 (uint32_t) (lp->port_wwn >> 32), 3363 (uint32_t) lp->port_wwn); 3364 } 3365 break; 3366 case ISPASYNC_DEV_GONE: 3367 lp = arg; 3368 /* 3369 * If this has a virtual target and we haven't marked it 3370 * that we're going to have isp_gdt tell the OS it's gone, 3371 * set the isp_gdt timer running on it. 3372 * 3373 * If it isn't marked that isp_gdt is going to get rid of it, 3374 * announce that it's gone. 3375 */ 3376 if (lp->ini_map_idx && lp->reserved == 0) { 3377 lp->reserved = 1; 3378 lp->new_reserved = isp->isp_osinfo.gone_device_time; 3379 lp->state = FC_PORTDB_STATE_ZOMBIE; 3380 if (isp->isp_osinfo.gdt_running == 0) { 3381 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 3382 "starting Gone Device Timer"); 3383 isp->isp_osinfo.gdt_running = 1; 3384 callout_reset(&isp->isp_osinfo.gdt, hz, 3385 isp_gdt, isp); 3386 } 3387 tgt = lp->ini_map_idx - 1; 3388 isp_prt(isp, ISP_LOGCONFIG, prom2, 3389 lp->portid, lp->handle, 3390 roles[lp->roles], "gone zombie at", tgt, 3391 (uint32_t) (lp->node_wwn >> 32), 3392 (uint32_t) lp->node_wwn, 3393 (uint32_t) (lp->port_wwn >> 32), 3394 (uint32_t) lp->port_wwn); 3395 } else if (lp->reserved == 0) { 3396 isp_prt(isp, ISP_LOGCONFIG, prom, 3397 lp->portid, lp->handle, 3398 roles[lp->roles], "departed", 3399 (uint32_t) (lp->node_wwn >> 32), 3400 (uint32_t) lp->node_wwn, 3401 (uint32_t) (lp->port_wwn >> 32), 3402 (uint32_t) lp->port_wwn); 3403 } 3404 break; 3405 case ISPASYNC_CHANGE_NOTIFY: 3406 { 3407 char *msg; 3408 if (arg == ISPASYNC_CHANGE_PDB) { 3409 msg = "Port Database Changed"; 3410 } else if (arg == ISPASYNC_CHANGE_SNS) { 3411 msg = "Name Server Database Changed"; 3412 } else { 3413 msg = "Other Change Notify"; 3414 } 3415 /* 3416 * If the loop down timer is running, cancel it. 3417 */ 3418 if (isp->isp_osinfo.ldt_running) { 3419 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 3420 "Stopping Loop Down Timer"); 3421 isp->isp_osinfo.ldt_running = 0; 3422 callout_stop(&isp->isp_osinfo.ldt); 3423 } 3424 isp_prt(isp, ISP_LOGINFO, msg); 3425 isp_freeze_loopdown(isp, msg); 3426 wakeup(ISP_KT_WCHAN(isp)); 3427 break; 3428 } 3429 #ifdef ISP_TARGET_MODE 3430 case ISPASYNC_TARGET_NOTIFY: 3431 { 3432 tmd_notify_t *nt = arg; 3433 isp_prt(isp, ISP_LOGALL, 3434 "target notify code 0x%x", nt->nt_ncode); 3435 break; 3436 } 3437 case ISPASYNC_TARGET_ACTION: 3438 switch (((isphdr_t *)arg)->rqs_entry_type) { 3439 default: 3440 isp_prt(isp, ISP_LOGWARN, 3441 "event 0x%x for unhandled target action", 3442 ((isphdr_t *)arg)->rqs_entry_type); 3443 break; 3444 case RQSTYPE_NOTIFY: 3445 if (IS_SCSI(isp)) { 3446 rv = isp_handle_platform_notify_scsi(isp, 3447 (in_entry_t *) arg); 3448 } else { 3449 rv = isp_handle_platform_notify_fc(isp, 3450 (in_fcentry_t *) arg); 3451 } 3452 break; 3453 case RQSTYPE_ATIO: 3454 rv = isp_handle_platform_atio(isp, (at_entry_t *) arg); 3455 break; 3456 case RQSTYPE_ATIO2: 3457 rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg); 3458 break; 3459 case RQSTYPE_CTIO3: 3460 case RQSTYPE_CTIO2: 3461 case RQSTYPE_CTIO: 3462 rv = isp_handle_platform_ctio(isp, arg); 3463 break; 3464 case RQSTYPE_ENABLE_LUN: 3465 case RQSTYPE_MODIFY_LUN: 3466 isp_ledone(isp, (lun_entry_t *) arg); 3467 break; 3468 } 3469 break; 3470 #endif 3471 case ISPASYNC_FW_CRASH: 3472 { 3473 uint16_t mbox1, mbox6; 3474 mbox1 = ISP_READ(isp, OUTMAILBOX1); 3475 if (IS_DUALBUS(isp)) { 3476 mbox6 = ISP_READ(isp, OUTMAILBOX6); 3477 } else { 3478 mbox6 = 0; 3479 } 3480 isp_prt(isp, ISP_LOGERR, 3481 "Internal Firmware Error on bus %d @ RISC Address 0x%x", 3482 mbox6, mbox1); 3483 #ifdef ISP_FW_CRASH_DUMP 3484 mbox1 = isp->isp_osinfo.mbox_sleep_ok; 3485 isp->isp_osinfo.mbox_sleep_ok = 0; 3486 if (IS_FC(isp)) { 3487 FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT; 3488 FCPARAM(isp)->isp_loopstate = LOOP_NIL; 3489 isp_freeze_loopdown(isp, "f/w crash"); 3490 isp_fw_dump(isp); 3491 } 3492 isp_reinit(isp); 3493 isp->isp_osinfo.mbox_sleep_ok = mbox1; 3494 #else 3495 mbox1 = isp->isp_osinfo.mbox_sleep_ok; 3496 isp->isp_osinfo.mbox_sleep_ok = 0; 3497 isp_reinit(isp); 3498 isp->isp_osinfo.mbox_sleep_ok = mbox1; 3499 #endif 3500 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL); 3501 break; 3502 } 3503 case ISPASYNC_UNHANDLED_RESPONSE: 3504 break; 3505 default: 3506 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd); 3507 break; 3508 } 3509 return (rv); 3510 } 3511 3512 3513 /* 3514 * Locks are held before coming here. 3515 */ 3516 void 3517 isp_uninit(ispsoftc_t *isp) 3518 { 3519 if (IS_24XX(isp)) { 3520 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_RESET); 3521 } else { 3522 ISP_WRITE(isp, HCCR, HCCR_CMD_RESET); 3523 } 3524 ISP_DISABLE_INTS(isp); 3525 } 3526 3527 void 3528 isp_prt(ispsoftc_t *isp, int level, const char *fmt, ...) 3529 { 3530 va_list ap; 3531 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { 3532 return; 3533 } 3534 printf("%s: ", device_get_nameunit(isp->isp_dev)); 3535 va_start(ap, fmt); 3536 vprintf(fmt, ap); 3537 va_end(ap); 3538 printf("\n"); 3539 } 3540 3541 uint64_t 3542 isp_nanotime_sub(struct timespec *b, struct timespec *a) 3543 { 3544 uint64_t elapsed; 3545 struct timespec x = *b; 3546 timespecsub(&x, a); 3547 elapsed = GET_NANOSEC(&x); 3548 if (elapsed == 0) 3549 elapsed++; 3550 return (elapsed); 3551 } 3552 3553 int 3554 isp_mbox_acquire(ispsoftc_t *isp) 3555 { 3556 if (isp->isp_osinfo.mboxbsy) { 3557 return (1); 3558 } else { 3559 isp->isp_osinfo.mboxcmd_done = 0; 3560 isp->isp_osinfo.mboxbsy = 1; 3561 return (0); 3562 } 3563 } 3564 3565 void 3566 isp_mbox_wait_complete(ispsoftc_t *isp, mbreg_t *mbp) 3567 { 3568 unsigned int usecs = mbp->timeout; 3569 unsigned int max, olim, ilim; 3570 3571 if (usecs == 0) { 3572 usecs = MBCMD_DEFAULT_TIMEOUT; 3573 } 3574 max = isp->isp_mbxwrk0 + 1; 3575 3576 if (isp->isp_osinfo.mbox_sleep_ok) { 3577 unsigned int ms = (usecs + 999) / 1000; 3578 3579 isp->isp_osinfo.mbox_sleep_ok = 0; 3580 isp->isp_osinfo.mbox_sleeping = 1; 3581 for (olim = 0; olim < max; olim++) { 3582 #if __FreeBSD_version < 700037 3583 tsleep(&isp->isp_mbxworkp, PRIBIO, "ispmbx_sleep", 3584 isp_mstohz(ms)); 3585 #else 3586 msleep(&isp->isp_mbxworkp, &isp->isp_osinfo.lock, 3587 PRIBIO, "ispmbx_sleep", isp_mstohz(ms)); 3588 #endif 3589 if (isp->isp_osinfo.mboxcmd_done) { 3590 break; 3591 } 3592 } 3593 isp->isp_osinfo.mbox_sleep_ok = 1; 3594 isp->isp_osinfo.mbox_sleeping = 0; 3595 } else { 3596 for (olim = 0; olim < max; olim++) { 3597 for (ilim = 0; ilim < usecs; ilim += 100) { 3598 uint32_t isr; 3599 uint16_t sema, mbox; 3600 if (isp->isp_osinfo.mboxcmd_done) { 3601 break; 3602 } 3603 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 3604 isp_intr(isp, isr, sema, mbox); 3605 if (isp->isp_osinfo.mboxcmd_done) { 3606 break; 3607 } 3608 } 3609 USEC_DELAY(100); 3610 } 3611 if (isp->isp_osinfo.mboxcmd_done) { 3612 break; 3613 } 3614 } 3615 } 3616 if (isp->isp_osinfo.mboxcmd_done == 0) { 3617 isp_prt(isp, ISP_LOGWARN, 3618 "%s Mailbox Command (0x%x) Timeout (%uus)", 3619 isp->isp_osinfo.mbox_sleep_ok? "Interrupting" : "Polled", 3620 isp->isp_lastmbxcmd, usecs); 3621 mbp->param[0] = MBOX_TIMEOUT; 3622 isp->isp_osinfo.mboxcmd_done = 1; 3623 } 3624 } 3625 3626 void 3627 isp_mbox_notify_done(ispsoftc_t *isp) 3628 { 3629 if (isp->isp_osinfo.mbox_sleeping) { 3630 wakeup(&isp->isp_mbxworkp); 3631 } 3632 isp->isp_osinfo.mboxcmd_done = 1; 3633 } 3634 3635 void 3636 isp_mbox_release(ispsoftc_t *isp) 3637 { 3638 isp->isp_osinfo.mboxbsy = 0; 3639 } 3640 3641 int 3642 isp_mstohz(int ms) 3643 { 3644 int hz; 3645 struct timeval t; 3646 t.tv_sec = ms / 1000; 3647 t.tv_usec = (ms % 1000) * 1000; 3648 hz = tvtohz(&t); 3649 if (hz < 0) { 3650 hz = 0x7fffffff; 3651 } 3652 if (hz == 0) { 3653 hz = 1; 3654 } 3655 return (hz); 3656 } 3657 3658 void 3659 isp_platform_intr(void *arg) 3660 { 3661 ispsoftc_t *isp = arg; 3662 uint32_t isr; 3663 uint16_t sema, mbox; 3664 3665 ISP_LOCK(isp); 3666 isp->isp_intcnt++; 3667 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) { 3668 isp->isp_intbogus++; 3669 } else { 3670 isp_intr(isp, isr, sema, mbox); 3671 } 3672 ISP_UNLOCK(isp); 3673 } 3674 3675 void 3676 isp_common_dmateardown(ispsoftc_t *isp, struct ccb_scsiio *csio, uint32_t hdl) 3677 { 3678 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 3679 bus_dmamap_sync(isp->isp_osinfo.dmat, 3680 PISP_PCMD(csio)->dmap, BUS_DMASYNC_POSTREAD); 3681 } else { 3682 bus_dmamap_sync(isp->isp_osinfo.dmat, 3683 PISP_PCMD(csio)->dmap, BUS_DMASYNC_POSTWRITE); 3684 } 3685 bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap); 3686 } 3687