1 /* $FreeBSD$ */ 2 /* 3 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters. 4 * 5 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 #include <dev/isp/isp_freebsd.h> 29 #include <sys/unistd.h> 30 #include <sys/kthread.h> 31 #include <machine/stdarg.h> /* for use by isp_prt below */ 32 #include <sys/conf.h> 33 #include <sys/ioccom.h> 34 #include <dev/isp/isp_ioctl.h> 35 36 37 static d_ioctl_t ispioctl; 38 static void isp_intr_enable(void *); 39 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *); 40 static void isp_poll(struct cam_sim *); 41 #if 0 42 static void isp_relsim(void *); 43 #endif 44 static timeout_t isp_watchdog; 45 static void isp_kthread(void *); 46 static void isp_action(struct cam_sim *, union ccb *); 47 48 49 #define ISP_CDEV_MAJOR 248 50 static struct cdevsw isp_cdevsw = { 51 /* open */ nullopen, 52 /* close */ nullclose, 53 /* read */ noread, 54 /* write */ nowrite, 55 /* ioctl */ ispioctl, 56 /* poll */ nopoll, 57 /* mmap */ nommap, 58 /* strategy */ nostrategy, 59 /* name */ "isp", 60 /* maj */ ISP_CDEV_MAJOR, 61 /* dump */ nodump, 62 /* psize */ nopsize, 63 /* flags */ D_TAPE, 64 }; 65 66 static struct ispsoftc *isplist = NULL; 67 68 void 69 isp_attach(struct ispsoftc *isp) 70 { 71 int primary, secondary; 72 struct ccb_setasync csa; 73 struct cam_devq *devq; 74 struct cam_sim *sim; 75 struct cam_path *path; 76 77 /* 78 * Establish (in case of 12X0) which bus is the primary. 79 */ 80 81 primary = 0; 82 secondary = 1; 83 84 /* 85 * Create the device queue for our SIM(s). 86 */ 87 devq = cam_simq_alloc(isp->isp_maxcmds); 88 if (devq == NULL) { 89 return; 90 } 91 92 /* 93 * Construct our SIM entry. 94 */ 95 ISPLOCK_2_CAMLOCK(isp); 96 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 97 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); 98 if (sim == NULL) { 99 cam_simq_free(devq); 100 CAMLOCK_2_ISPLOCK(isp); 101 return; 102 } 103 CAMLOCK_2_ISPLOCK(isp); 104 105 isp->isp_osinfo.ehook.ich_func = isp_intr_enable; 106 isp->isp_osinfo.ehook.ich_arg = isp; 107 ISPLOCK_2_CAMLOCK(isp); 108 if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) { 109 cam_sim_free(sim, TRUE); 110 CAMLOCK_2_ISPLOCK(isp); 111 isp_prt(isp, ISP_LOGERR, 112 "could not establish interrupt enable hook"); 113 return; 114 } 115 116 if (xpt_bus_register(sim, primary) != CAM_SUCCESS) { 117 cam_sim_free(sim, TRUE); 118 CAMLOCK_2_ISPLOCK(isp); 119 return; 120 } 121 122 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 123 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 124 xpt_bus_deregister(cam_sim_path(sim)); 125 cam_sim_free(sim, TRUE); 126 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 127 CAMLOCK_2_ISPLOCK(isp); 128 return; 129 } 130 131 xpt_setup_ccb(&csa.ccb_h, path, 5); 132 csa.ccb_h.func_code = XPT_SASYNC_CB; 133 csa.event_enable = AC_LOST_DEVICE; 134 csa.callback = isp_cam_async; 135 csa.callback_arg = sim; 136 xpt_action((union ccb *)&csa); 137 CAMLOCK_2_ISPLOCK(isp); 138 isp->isp_sim = sim; 139 isp->isp_path = path; 140 /* 141 * Create a kernel thread for fibre channel instances. We 142 * don't have dual channel FC cards. 143 */ 144 if (IS_FC(isp)) { 145 ISPLOCK_2_CAMLOCK(isp); 146 /* XXX: LOCK VIOLATION */ 147 cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv"); 148 if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc, 149 RFHIGHPID, "%s: fc_thrd", 150 device_get_nameunit(isp->isp_dev))) { 151 xpt_bus_deregister(cam_sim_path(sim)); 152 cam_sim_free(sim, TRUE); 153 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 154 CAMLOCK_2_ISPLOCK(isp); 155 isp_prt(isp, ISP_LOGERR, "could not create kthread"); 156 return; 157 } 158 CAMLOCK_2_ISPLOCK(isp); 159 } 160 161 162 /* 163 * If we have a second channel, construct SIM entry for that. 164 */ 165 if (IS_DUALBUS(isp)) { 166 ISPLOCK_2_CAMLOCK(isp); 167 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 168 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); 169 if (sim == NULL) { 170 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 171 xpt_free_path(isp->isp_path); 172 cam_simq_free(devq); 173 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 174 return; 175 } 176 if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) { 177 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 178 xpt_free_path(isp->isp_path); 179 cam_sim_free(sim, TRUE); 180 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 181 CAMLOCK_2_ISPLOCK(isp); 182 return; 183 } 184 185 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 186 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 187 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 188 xpt_free_path(isp->isp_path); 189 xpt_bus_deregister(cam_sim_path(sim)); 190 cam_sim_free(sim, TRUE); 191 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 192 CAMLOCK_2_ISPLOCK(isp); 193 return; 194 } 195 196 xpt_setup_ccb(&csa.ccb_h, path, 5); 197 csa.ccb_h.func_code = XPT_SASYNC_CB; 198 csa.event_enable = AC_LOST_DEVICE; 199 csa.callback = isp_cam_async; 200 csa.callback_arg = sim; 201 xpt_action((union ccb *)&csa); 202 CAMLOCK_2_ISPLOCK(isp); 203 isp->isp_sim2 = sim; 204 isp->isp_path2 = path; 205 } 206 207 /* 208 * Create device nodes 209 */ 210 (void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT, 211 GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev)); 212 213 if (isp->isp_role != ISP_ROLE_NONE) { 214 isp->isp_state = ISP_RUNSTATE; 215 ENABLE_INTS(isp); 216 } 217 if (isplist == NULL) { 218 isplist = isp; 219 } else { 220 struct ispsoftc *tmp = isplist; 221 while (tmp->isp_osinfo.next) { 222 tmp = tmp->isp_osinfo.next; 223 } 224 tmp->isp_osinfo.next = isp; 225 } 226 227 } 228 229 static int 230 ispioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) 231 { 232 struct ispsoftc *isp; 233 int retval = ENOTTY; 234 235 isp = isplist; 236 while (isp) { 237 if (minor(dev) == device_get_unit(isp->isp_dev)) { 238 break; 239 } 240 isp = isp->isp_osinfo.next; 241 } 242 if (isp == NULL) 243 return (ENXIO); 244 245 switch (cmd) { 246 case ISP_SDBLEV: 247 { 248 int olddblev = isp->isp_dblev; 249 isp->isp_dblev = *(int *)addr; 250 *(int *)addr = olddblev; 251 retval = 0; 252 break; 253 } 254 case ISP_RESETHBA: 255 ISP_LOCK(isp); 256 isp_reinit(isp); 257 ISP_UNLOCK(isp); 258 retval = 0; 259 break; 260 case ISP_FC_RESCAN: 261 if (IS_FC(isp)) { 262 ISP_LOCK(isp); 263 if (isp_fc_runstate(isp, 5 * 1000000)) { 264 retval = EIO; 265 } else { 266 retval = 0; 267 } 268 ISP_UNLOCK(isp); 269 } 270 break; 271 case ISP_FC_LIP: 272 if (IS_FC(isp)) { 273 ISP_LOCK(isp); 274 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) { 275 retval = EIO; 276 } else { 277 retval = 0; 278 } 279 ISP_UNLOCK(isp); 280 } 281 break; 282 case ISP_FC_GETDINFO: 283 { 284 struct isp_fc_device *ifc = (struct isp_fc_device *) addr; 285 struct lportdb *lp; 286 287 if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) { 288 retval = EINVAL; 289 break; 290 } 291 ISP_LOCK(isp); 292 lp = &FCPARAM(isp)->portdb[ifc->loopid]; 293 if (lp->valid) { 294 ifc->loopid = lp->loopid; 295 ifc->portid = lp->portid; 296 ifc->node_wwn = lp->node_wwn; 297 ifc->port_wwn = lp->port_wwn; 298 retval = 0; 299 } else { 300 retval = ENODEV; 301 } 302 ISP_UNLOCK(isp); 303 break; 304 } 305 default: 306 break; 307 } 308 return (retval); 309 } 310 311 static void 312 isp_intr_enable(void *arg) 313 { 314 struct ispsoftc *isp = arg; 315 if (isp->isp_role != ISP_ROLE_NONE) { 316 ENABLE_INTS(isp); 317 isp->isp_osinfo.intsok = 1; 318 } 319 /* Release our hook so that the boot can continue. */ 320 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 321 } 322 323 /* 324 * Put the target mode functions here, because some are inlines 325 */ 326 327 #ifdef ISP_TARGET_MODE 328 329 static __inline int is_lun_enabled(struct ispsoftc *, int, lun_id_t); 330 static __inline int are_any_luns_enabled(struct ispsoftc *, int); 331 static __inline tstate_t *get_lun_statep(struct ispsoftc *, int, lun_id_t); 332 static __inline void rls_lun_statep(struct ispsoftc *, tstate_t *); 333 static __inline int isp_psema_sig_rqe(struct ispsoftc *); 334 static __inline int isp_cv_wait_timed_rqe(struct ispsoftc *, int); 335 static __inline void isp_cv_signal_rqe(struct ispsoftc *, int); 336 static __inline void isp_vsema_rqe(struct ispsoftc *); 337 static cam_status 338 create_lun_state(struct ispsoftc *, int, struct cam_path *, tstate_t **); 339 static void destroy_lun_state(struct ispsoftc *, tstate_t *); 340 static void isp_en_lun(struct ispsoftc *, union ccb *); 341 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *); 342 static timeout_t isp_refire_putback_atio; 343 static void isp_complete_ctio(union ccb *); 344 static void isp_target_putback_atio(union ccb *); 345 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *); 346 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *); 347 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *); 348 static int isp_handle_platform_ctio(struct ispsoftc *, void *); 349 350 static __inline int 351 is_lun_enabled(struct ispsoftc *isp, int bus, lun_id_t lun) 352 { 353 tstate_t *tptr; 354 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 355 if (tptr == NULL) { 356 ISP_UNLOCK(isp); 357 return (0); 358 } 359 do { 360 if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) { 361 ISP_UNLOCK(isp); 362 return (1); 363 } 364 } while ((tptr = tptr->next) != NULL); 365 return (0); 366 } 367 368 static __inline int 369 are_any_luns_enabled(struct ispsoftc *isp, int port) 370 { 371 int lo, hi; 372 if (IS_DUALBUS(isp)) { 373 lo = (port * (LUN_HASH_SIZE >> 1)); 374 hi = lo + (LUN_HASH_SIZE >> 1); 375 } else { 376 lo = 0; 377 hi = LUN_HASH_SIZE; 378 } 379 for (lo = 0; lo < hi; lo++) { 380 if (isp->isp_osinfo.lun_hash[lo]) { 381 return (1); 382 } 383 } 384 return (0); 385 } 386 387 static __inline tstate_t * 388 get_lun_statep(struct ispsoftc *isp, int bus, lun_id_t lun) 389 { 390 tstate_t *tptr; 391 392 if (lun == CAM_LUN_WILDCARD) { 393 tptr = &isp->isp_osinfo.tsdflt[bus]; 394 tptr->hold++; 395 return (tptr); 396 } else { 397 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 398 } 399 if (tptr == NULL) { 400 return (NULL); 401 } 402 403 do { 404 if (tptr->lun == lun && tptr->bus == bus) { 405 tptr->hold++; 406 return (tptr); 407 } 408 } while ((tptr = tptr->next) != NULL); 409 return (tptr); 410 } 411 412 static __inline void 413 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr) 414 { 415 if (tptr->hold) 416 tptr->hold--; 417 } 418 419 static __inline int 420 isp_psema_sig_rqe(struct ispsoftc *isp) 421 { 422 while (isp->isp_osinfo.tmflags & TM_BUSY) { 423 isp->isp_osinfo.tmflags |= TM_WANTED; 424 if (tsleep(&isp->isp_osinfo.tmflags, PRIBIO|PCATCH, "i0", 0)) { 425 return (-1); 426 } 427 isp->isp_osinfo.tmflags |= TM_BUSY; 428 } 429 return (0); 430 } 431 432 static __inline int 433 isp_cv_wait_timed_rqe(struct ispsoftc *isp, int timo) 434 { 435 if (tsleep(&isp->isp_osinfo.rstatus, PRIBIO, "qt1", timo)) { 436 ISP_UNLOCK(isp); 437 return (-1); 438 } 439 return (0); 440 } 441 442 static __inline void 443 isp_cv_signal_rqe(struct ispsoftc *isp, int status) 444 { 445 isp->isp_osinfo.rstatus = status; 446 wakeup(&isp->isp_osinfo.rstatus); 447 } 448 449 static __inline void 450 isp_vsema_rqe(struct ispsoftc *isp) 451 { 452 if (isp->isp_osinfo.tmflags & TM_WANTED) { 453 isp->isp_osinfo.tmflags &= ~TM_WANTED; 454 wakeup(&isp->isp_osinfo.tmflags); 455 } 456 isp->isp_osinfo.tmflags &= ~TM_BUSY; 457 } 458 459 static cam_status 460 create_lun_state(struct ispsoftc *isp, int bus, 461 struct cam_path *path, tstate_t **rslt) 462 { 463 cam_status status; 464 lun_id_t lun; 465 int hfx; 466 tstate_t *tptr, *new; 467 468 lun = xpt_path_lun_id(path); 469 if (lun < 0) { 470 return (CAM_LUN_INVALID); 471 } 472 if (is_lun_enabled(isp, bus, lun)) { 473 return (CAM_LUN_ALRDY_ENA); 474 } 475 new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO); 476 if (new == NULL) { 477 return (CAM_RESRC_UNAVAIL); 478 } 479 480 status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path), 481 xpt_path_target_id(path), xpt_path_lun_id(path)); 482 if (status != CAM_REQ_CMP) { 483 free(new, M_DEVBUF); 484 return (status); 485 } 486 new->bus = bus; 487 new->lun = lun; 488 SLIST_INIT(&new->atios); 489 SLIST_INIT(&new->inots); 490 new->hold = 1; 491 492 hfx = LUN_HASH_FUNC(isp, new->bus, new->lun); 493 tptr = isp->isp_osinfo.lun_hash[hfx]; 494 if (tptr == NULL) { 495 isp->isp_osinfo.lun_hash[hfx] = new; 496 } else { 497 while (tptr->next) 498 tptr = tptr->next; 499 tptr->next = new; 500 } 501 *rslt = new; 502 return (CAM_REQ_CMP); 503 } 504 505 static __inline void 506 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr) 507 { 508 int hfx; 509 tstate_t *lw, *pw; 510 511 hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun); 512 if (tptr->hold) { 513 return; 514 } 515 pw = isp->isp_osinfo.lun_hash[hfx]; 516 if (pw == NULL) { 517 return; 518 } else if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 519 isp->isp_osinfo.lun_hash[hfx] = pw->next; 520 } else { 521 lw = pw; 522 pw = lw->next; 523 while (pw) { 524 if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 525 lw->next = pw->next; 526 break; 527 } 528 lw = pw; 529 pw = pw->next; 530 } 531 if (pw == NULL) { 532 ISP_UNLOCK(isp); 533 return; 534 } 535 } 536 free(tptr, M_DEVBUF); 537 } 538 539 /* 540 * we enter with our locks held. 541 */ 542 static void 543 isp_en_lun(struct ispsoftc *isp, union ccb *ccb) 544 { 545 const char lfmt[] = "Lun now %sabled for target mode on channel %d"; 546 struct ccb_en_lun *cel = &ccb->cel; 547 tstate_t *tptr; 548 u_int16_t rstat; 549 int bus, cmd, frozen = 0; 550 lun_id_t lun; 551 target_id_t tgt; 552 553 554 bus = XS_CHANNEL(ccb) & 0x1; 555 tgt = ccb->ccb_h.target_id; 556 lun = ccb->ccb_h.target_lun; 557 558 /* 559 * Do some sanity checking first. 560 */ 561 562 if ((lun != CAM_LUN_WILDCARD) && 563 (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) { 564 ccb->ccb_h.status = CAM_LUN_INVALID; 565 return; 566 } 567 if (IS_SCSI(isp)) { 568 sdparam *sdp = isp->isp_param; 569 sdp += bus; 570 if (tgt != CAM_TARGET_WILDCARD && 571 tgt != sdp->isp_initiator_id) { 572 ccb->ccb_h.status = CAM_TID_INVALID; 573 return; 574 } 575 } else { 576 if (tgt != CAM_TARGET_WILDCARD && 577 tgt != FCPARAM(isp)->isp_iid) { 578 ccb->ccb_h.status = CAM_TID_INVALID; 579 return; 580 } 581 } 582 583 if (tgt == CAM_TARGET_WILDCARD) { 584 if (lun != CAM_LUN_WILDCARD) { 585 ccb->ccb_h.status = CAM_LUN_INVALID; 586 return; 587 } 588 } 589 590 /* 591 * If Fibre Channel, stop and drain all activity to this bus. 592 */ 593 #if 0 594 if (IS_FC(isp)) { 595 ISP_LOCK(isp); 596 frozen = 1; 597 xpt_freeze_simq(isp->isp_sim, 1); 598 isp->isp_osinfo.drain = 1; 599 while (isp->isp_osinfo.drain) { 600 (void) msleep(&isp->isp_osinfo.drain, &isp->isp_lock, 601 PRIBIO, "ispdrain", 10 * hz); 602 } 603 ISP_UNLOCK(isp); 604 } 605 #endif 606 607 /* 608 * Check to see if we're enabling on fibre channel and 609 * don't yet have a notion of who the heck we are (no 610 * loop yet). 611 */ 612 if (IS_FC(isp) && cel->enable && 613 (isp->isp_osinfo.tmflags & TM_TMODE_ENABLED) == 0) { 614 fcparam *fcp = isp->isp_param; 615 int rv; 616 617 rv = isp_fc_runstate(isp, 2 * 1000000); 618 if (fcp->isp_fwstate != FW_READY || 619 fcp->isp_loopstate != LOOP_READY) { 620 xpt_print_path(ccb->ccb_h.path); 621 isp_prt(isp, ISP_LOGWARN, 622 "could not get a good port database read"); 623 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 624 if (frozen) { 625 ISPLOCK_2_CAMLOCK(isp); 626 xpt_release_simq(isp->isp_sim, 1); 627 CAMLOCK_2_ISPLOCK(isp); 628 } 629 return; 630 } 631 } 632 633 634 /* 635 * Next check to see whether this is a target/lun wildcard action. 636 * 637 * If so, we enable/disable target mode but don't do any lun enabling. 638 */ 639 if (lun == CAM_LUN_WILDCARD && tgt == CAM_TARGET_WILDCARD) { 640 int av = bus << 31; 641 tptr = &isp->isp_osinfo.tsdflt[bus]; 642 if (cel->enable) { 643 if (isp->isp_osinfo.tmflags & (1 << bus)) { 644 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 645 if (frozen) { 646 ISPLOCK_2_CAMLOCK(isp); 647 xpt_release_simq(isp->isp_sim, 1); 648 CAMLOCK_2_ISPLOCK(isp); 649 } 650 return; 651 } 652 ccb->ccb_h.status = 653 xpt_create_path(&tptr->owner, NULL, 654 xpt_path_path_id(ccb->ccb_h.path), 655 xpt_path_target_id(ccb->ccb_h.path), 656 xpt_path_lun_id(ccb->ccb_h.path)); 657 if (ccb->ccb_h.status != CAM_REQ_CMP) { 658 if (frozen) { 659 ISPLOCK_2_CAMLOCK(isp); 660 xpt_release_simq(isp->isp_sim, 1); 661 CAMLOCK_2_ISPLOCK(isp); 662 } 663 return; 664 } 665 SLIST_INIT(&tptr->atios); 666 SLIST_INIT(&tptr->inots); 667 av |= ENABLE_TARGET_FLAG; 668 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 669 if (av) { 670 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 671 xpt_free_path(tptr->owner); 672 if (frozen) { 673 ISPLOCK_2_CAMLOCK(isp); 674 xpt_release_simq(isp->isp_sim, 1); 675 CAMLOCK_2_ISPLOCK(isp); 676 } 677 return; 678 } 679 isp->isp_osinfo.tmflags |= (1 << bus); 680 } else { 681 if ((isp->isp_osinfo.tmflags & (1 << bus)) == 0) { 682 ccb->ccb_h.status = CAM_LUN_INVALID; 683 if (frozen) { 684 ISPLOCK_2_CAMLOCK(isp); 685 xpt_release_simq(isp->isp_sim, 1); 686 CAMLOCK_2_ISPLOCK(isp); 687 } 688 return; 689 } 690 if (are_any_luns_enabled(isp, bus)) { 691 ccb->ccb_h.status = CAM_SCSI_BUSY; 692 if (frozen) { 693 ISPLOCK_2_CAMLOCK(isp); 694 xpt_release_simq(isp->isp_sim, 1); 695 CAMLOCK_2_ISPLOCK(isp); 696 } 697 return; 698 } 699 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 700 if (av) { 701 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 702 if (frozen) { 703 ISPLOCK_2_CAMLOCK(isp); 704 xpt_release_simq(isp->isp_sim, 1); 705 CAMLOCK_2_ISPLOCK(isp); 706 } 707 return; 708 } 709 isp->isp_osinfo.tmflags &= ~(1 << bus); 710 ccb->ccb_h.status = CAM_REQ_CMP; 711 } 712 xpt_print_path(ccb->ccb_h.path); 713 isp_prt(isp, ISP_LOGINFO, "Target Mode %sabled on channel %d", 714 (cel->enable) ? "en" : "dis", bus); 715 if (frozen) { 716 ISPLOCK_2_CAMLOCK(isp); 717 xpt_release_simq(isp->isp_sim, 1); 718 CAMLOCK_2_ISPLOCK(isp); 719 } 720 return; 721 } 722 723 /* 724 * We can move along now... 725 */ 726 727 if (frozen) { 728 ISPLOCK_2_CAMLOCK(isp); 729 xpt_release_simq(isp->isp_sim, 1); 730 CAMLOCK_2_ISPLOCK(isp); 731 } 732 733 734 if (cel->enable) { 735 ccb->ccb_h.status = 736 create_lun_state(isp, bus, ccb->ccb_h.path, &tptr); 737 if (ccb->ccb_h.status != CAM_REQ_CMP) { 738 return; 739 } 740 } else { 741 tptr = get_lun_statep(isp, bus, lun); 742 if (tptr == NULL) { 743 ccb->ccb_h.status = CAM_LUN_INVALID; 744 return; 745 } 746 } 747 748 if (isp_psema_sig_rqe(isp)) { 749 rls_lun_statep(isp, tptr); 750 if (cel->enable) 751 destroy_lun_state(isp, tptr); 752 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 753 return; 754 } 755 756 if (cel->enable) { 757 u_int32_t seq = isp->isp_osinfo.rollinfo++; 758 int c, n, ulun = lun; 759 760 cmd = RQSTYPE_ENABLE_LUN; 761 c = DFLT_CMND_CNT; 762 n = DFLT_INOT_CNT; 763 if (IS_FC(isp) && lun != 0) { 764 cmd = RQSTYPE_MODIFY_LUN; 765 n = 0; 766 /* 767 * For SCC firmware, we only deal with setting 768 * (enabling or modifying) lun 0. 769 */ 770 ulun = 0; 771 } 772 rstat = LUN_ERR; 773 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) { 774 xpt_print_path(ccb->ccb_h.path); 775 isp_prt(isp, ISP_LOGWARN, "isp_lun_cmd failed"); 776 goto out; 777 } 778 if (isp_cv_wait_timed_rqe(isp, 30 * hz)) { 779 xpt_print_path(ccb->ccb_h.path); 780 isp_prt(isp, ISP_LOGERR, 781 "wait for ENABLE/MODIFY LUN timed out"); 782 goto out; 783 } 784 rstat = isp->isp_osinfo.rstatus; 785 if (rstat != LUN_OK) { 786 xpt_print_path(ccb->ccb_h.path); 787 isp_prt(isp, ISP_LOGERR, 788 "ENABLE/MODIFY LUN returned 0x%x", rstat); 789 goto out; 790 } 791 } else { 792 int c, n, ulun = lun; 793 u_int32_t seq; 794 795 rstat = LUN_ERR; 796 seq = isp->isp_osinfo.rollinfo++; 797 cmd = -RQSTYPE_MODIFY_LUN; 798 799 c = DFLT_CMND_CNT; 800 n = DFLT_INOT_CNT; 801 if (IS_FC(isp) && lun != 0) { 802 n = 0; 803 /* 804 * For SCC firmware, we only deal with setting 805 * (enabling or modifying) lun 0. 806 */ 807 ulun = 0; 808 } 809 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) { 810 xpt_print_path(ccb->ccb_h.path); 811 isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed"); 812 goto out; 813 } 814 if (isp_cv_wait_timed_rqe(isp, 30 * hz)) { 815 xpt_print_path(ccb->ccb_h.path); 816 isp_prt(isp, ISP_LOGERR, 817 "wait for MODIFY LUN timed out"); 818 goto out; 819 } 820 rstat = isp->isp_osinfo.rstatus; 821 if (rstat != LUN_OK) { 822 xpt_print_path(ccb->ccb_h.path); 823 isp_prt(isp, ISP_LOGERR, 824 "MODIFY LUN returned 0x%x", rstat); 825 goto out; 826 } 827 if (IS_FC(isp) && lun) { 828 goto out; 829 } 830 831 seq = isp->isp_osinfo.rollinfo++; 832 833 rstat = LUN_ERR; 834 cmd = -RQSTYPE_ENABLE_LUN; 835 if (isp_lun_cmd(isp, cmd, bus, tgt, lun, 0, 0, seq)) { 836 xpt_print_path(ccb->ccb_h.path); 837 isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed"); 838 goto out; 839 } 840 if (isp_cv_wait_timed_rqe(isp, 30 * hz)) { 841 xpt_print_path(ccb->ccb_h.path); 842 isp_prt(isp, ISP_LOGERR, 843 "wait for DISABLE LUN timed out"); 844 goto out; 845 } 846 rstat = isp->isp_osinfo.rstatus; 847 if (rstat != LUN_OK) { 848 xpt_print_path(ccb->ccb_h.path); 849 isp_prt(isp, ISP_LOGWARN, 850 "DISABLE LUN returned 0x%x", rstat); 851 goto out; 852 } 853 } 854 out: 855 isp_vsema_rqe(isp); 856 857 if (rstat != LUN_OK) { 858 xpt_print_path(ccb->ccb_h.path); 859 isp_prt(isp, ISP_LOGWARN, 860 "lun %sable failed", (cel->enable) ? "en" : "dis"); 861 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 862 rls_lun_statep(isp, tptr); 863 if (cel->enable) 864 destroy_lun_state(isp, tptr); 865 } else { 866 xpt_print_path(ccb->ccb_h.path); 867 isp_prt(isp, ISP_LOGINFO, lfmt, 868 (cel->enable) ? "en" : "dis", bus); 869 rls_lun_statep(isp, tptr); 870 if (cel->enable == 0) { 871 destroy_lun_state(isp, tptr); 872 } 873 ccb->ccb_h.status = CAM_REQ_CMP; 874 } 875 } 876 877 static cam_status 878 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb) 879 { 880 tstate_t *tptr; 881 struct ccb_hdr_slist *lp; 882 struct ccb_hdr *curelm; 883 int found; 884 union ccb *accb = ccb->cab.abort_ccb; 885 886 if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 887 if (IS_FC(isp) && (accb->ccb_h.target_id != 888 ((fcparam *) isp->isp_param)->isp_loopid)) { 889 return (CAM_PATH_INVALID); 890 } else if (IS_SCSI(isp) && (accb->ccb_h.target_id != 891 ((sdparam *) isp->isp_param)->isp_initiator_id)) { 892 return (CAM_PATH_INVALID); 893 } 894 } 895 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun); 896 if (tptr == NULL) { 897 return (CAM_PATH_INVALID); 898 } 899 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 900 lp = &tptr->atios; 901 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 902 lp = &tptr->inots; 903 } else { 904 rls_lun_statep(isp, tptr); 905 return (CAM_UA_ABORT); 906 } 907 curelm = SLIST_FIRST(lp); 908 found = 0; 909 if (curelm == &accb->ccb_h) { 910 found = 1; 911 SLIST_REMOVE_HEAD(lp, sim_links.sle); 912 } else { 913 while(curelm != NULL) { 914 struct ccb_hdr *nextelm; 915 916 nextelm = SLIST_NEXT(curelm, sim_links.sle); 917 if (nextelm == &accb->ccb_h) { 918 found = 1; 919 SLIST_NEXT(curelm, sim_links.sle) = 920 SLIST_NEXT(nextelm, sim_links.sle); 921 break; 922 } 923 curelm = nextelm; 924 } 925 } 926 rls_lun_statep(isp, tptr); 927 if (found) { 928 accb->ccb_h.status = CAM_REQ_ABORTED; 929 return (CAM_REQ_CMP); 930 } 931 return(CAM_PATH_INVALID); 932 } 933 934 static cam_status 935 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb) 936 { 937 void *qe; 938 struct ccb_scsiio *cso = &ccb->csio; 939 u_int16_t *hp, save_handle; 940 u_int16_t iptr, optr; 941 942 943 if (isp_getrqentry(isp, &iptr, &optr, &qe)) { 944 xpt_print_path(ccb->ccb_h.path); 945 printf("Request Queue Overflow in isp_target_start_ctio\n"); 946 return (CAM_RESRC_UNAVAIL); 947 } 948 bzero(qe, QENTRY_LEN); 949 950 /* 951 * We're either moving data or completing a command here. 952 */ 953 954 if (IS_FC(isp)) { 955 struct ccb_accept_tio *atiop; 956 ct2_entry_t *cto = qe; 957 958 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; 959 cto->ct_header.rqs_entry_count = 1; 960 cto->ct_iid = cso->init_id; 961 if (isp->isp_maxluns <= 16) { 962 cto->ct_lun = ccb->ccb_h.target_lun; 963 } 964 /* 965 * Start with a residual based on what the original datalength 966 * was supposed to be. Basically, we ignore what CAM has set 967 * for residuals. The data transfer routines will knock off 968 * the residual for each byte actually moved- and also will 969 * be responsible for setting the underrun flag. 970 */ 971 /* HACK! HACK! */ 972 if ((atiop = ccb->ccb_h.periph_priv.entries[1].ptr) != NULL) { 973 cto->ct_resid = atiop->ccb_h.spriv_field0; 974 } 975 976 cto->ct_rxid = cso->tag_id; 977 if (cso->dxfer_len == 0) { 978 cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA; 979 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 980 cto->ct_flags |= CT2_SENDSTATUS; 981 cto->rsp.m1.ct_scsi_status = cso->scsi_status; 982 } 983 if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) { 984 int m = min(cso->sense_len, MAXRESPLEN); 985 bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m); 986 cto->rsp.m1.ct_senselen = m; 987 cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID; 988 } 989 } else { 990 cto->ct_flags |= CT2_FLAG_MODE0; 991 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 992 cto->ct_flags |= CT2_DATA_IN; 993 } else { 994 cto->ct_flags |= CT2_DATA_OUT; 995 } 996 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 997 cto->ct_flags |= CT2_SENDSTATUS; 998 cto->rsp.m0.ct_scsi_status = cso->scsi_status; 999 } 1000 /* 1001 * If we're sending data and status back together, 1002 * we can't also send back sense data as well. 1003 */ 1004 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1005 } 1006 if (cto->ct_flags & CT2_SENDSTATUS) { 1007 isp_prt(isp, ISP_LOGTDEBUG1, 1008 "CTIO2[%x] SCSI STATUS 0x%x datalength %u", 1009 cto->ct_rxid, cso->scsi_status, cto->ct_resid); 1010 } 1011 if (cto->ct_flags & CT2_SENDSTATUS) 1012 cto->ct_flags |= CT2_CCINCR; 1013 cto->ct_timeout = 10; 1014 hp = &cto->ct_syshandle; 1015 } else { 1016 ct_entry_t *cto = qe; 1017 1018 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1019 cto->ct_header.rqs_entry_count = 1; 1020 cto->ct_iid = cso->init_id; 1021 cto->ct_iid |= XS_CHANNEL(ccb) << 7; 1022 cto->ct_tgt = ccb->ccb_h.target_id; 1023 cto->ct_lun = ccb->ccb_h.target_lun; 1024 cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id); 1025 if (AT_HAS_TAG(cso->tag_id)) { 1026 cto->ct_tag_val = (u_int8_t) AT_GET_TAG(cso->tag_id); 1027 cto->ct_flags |= CT_TQAE; 1028 } 1029 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 1030 cto->ct_flags |= CT_NODISC; 1031 } 1032 if (cso->dxfer_len == 0) { 1033 cto->ct_flags |= CT_NO_DATA; 1034 } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1035 cto->ct_flags |= CT_DATA_IN; 1036 } else { 1037 cto->ct_flags |= CT_DATA_OUT; 1038 } 1039 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1040 cto->ct_flags |= CT_SENDSTATUS; 1041 cto->ct_scsi_status = cso->scsi_status; 1042 cto->ct_resid = cso->resid; 1043 } 1044 if (cto->ct_flags & CT_SENDSTATUS) { 1045 isp_prt(isp, ISP_LOGTDEBUG1, 1046 "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x", 1047 cto->ct_fwhandle, cso->scsi_status, cso->resid, 1048 cso->tag_id); 1049 } 1050 cto->ct_timeout = 10; 1051 hp = &cto->ct_syshandle; 1052 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1053 if (cto->ct_flags & CT_SENDSTATUS) 1054 cto->ct_flags |= CT_CCINCR; 1055 } 1056 1057 if (isp_save_xs(isp, (XS_T *)ccb, hp)) { 1058 xpt_print_path(ccb->ccb_h.path); 1059 printf("No XFLIST pointers for isp_target_start_ctio\n"); 1060 return (CAM_RESRC_UNAVAIL); 1061 } 1062 1063 1064 /* 1065 * Call the dma setup routines for this entry (and any subsequent 1066 * CTIOs) if there's data to move, and then tell the f/w it's got 1067 * new things to play with. As with isp_start's usage of DMA setup, 1068 * any swizzling is done in the machine dependent layer. Because 1069 * of this, we put the request onto the queue area first in native 1070 * format. 1071 */ 1072 1073 save_handle = *hp; 1074 1075 switch (ISP_DMASETUP(isp, cso, qe, &iptr, optr)) { 1076 case CMD_QUEUED: 1077 ISP_ADD_REQUEST(isp, iptr); 1078 return (CAM_REQ_INPROG); 1079 1080 case CMD_EAGAIN: 1081 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1082 isp_destroy_handle(isp, save_handle); 1083 return (CAM_RESRC_UNAVAIL); 1084 1085 default: 1086 isp_destroy_handle(isp, save_handle); 1087 return (XS_ERR(ccb)); 1088 } 1089 } 1090 1091 static void 1092 isp_refire_putback_atio(void *arg) 1093 { 1094 int s = splcam(); 1095 isp_target_putback_atio(arg); 1096 splx(s); 1097 } 1098 1099 static void 1100 isp_target_putback_atio(union ccb *ccb) 1101 { 1102 struct ispsoftc *isp; 1103 struct ccb_scsiio *cso; 1104 u_int16_t iptr, optr; 1105 void *qe; 1106 1107 isp = XS_ISP(ccb); 1108 1109 if (isp_getrqentry(isp, &iptr, &optr, &qe)) { 1110 (void) timeout(isp_refire_putback_atio, ccb, 10); 1111 isp_prt(isp, ISP_LOGWARN, 1112 "isp_target_putback_atio: Request Queue Overflow"); 1113 return; 1114 } 1115 bzero(qe, QENTRY_LEN); 1116 cso = &ccb->csio; 1117 if (IS_FC(isp)) { 1118 at2_entry_t *at = qe; 1119 at->at_header.rqs_entry_type = RQSTYPE_ATIO2; 1120 at->at_header.rqs_entry_count = 1; 1121 if (isp->isp_maxluns > 16) { 1122 at->at_scclun = (uint16_t) ccb->ccb_h.target_lun; 1123 } else { 1124 at->at_lun = (uint8_t) ccb->ccb_h.target_lun; 1125 } 1126 at->at_status = CT_OK; 1127 at->at_rxid = cso->tag_id; 1128 ISP_SWIZ_ATIO2(isp, qe, qe); 1129 } else { 1130 at_entry_t *at = qe; 1131 at->at_header.rqs_entry_type = RQSTYPE_ATIO; 1132 at->at_header.rqs_entry_count = 1; 1133 at->at_iid = cso->init_id; 1134 at->at_iid |= XS_CHANNEL(ccb) << 7; 1135 at->at_tgt = cso->ccb_h.target_id; 1136 at->at_lun = cso->ccb_h.target_lun; 1137 at->at_status = CT_OK; 1138 at->at_tag_val = AT_GET_TAG(cso->tag_id); 1139 at->at_handle = AT_GET_HANDLE(cso->tag_id); 1140 ISP_SWIZ_ATIO(isp, qe, qe); 1141 } 1142 ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe); 1143 ISP_ADD_REQUEST(isp, iptr); 1144 isp_complete_ctio(ccb); 1145 } 1146 1147 static void 1148 isp_complete_ctio(union ccb *ccb) 1149 { 1150 struct ispsoftc *isp = XS_ISP(ccb); 1151 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1152 ccb->ccb_h.status |= CAM_REQ_CMP; 1153 } 1154 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1155 if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) { 1156 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE; 1157 if (isp->isp_osinfo.simqfrozen == 0) { 1158 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 1159 isp_prt(isp, ISP_LOGDEBUG2, "ctio->relsimq"); 1160 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 1161 } else { 1162 isp_prt(isp, ISP_LOGWARN, "ctio->devqfrozen"); 1163 } 1164 } else { 1165 isp_prt(isp, ISP_LOGWARN, 1166 "ctio->simqfrozen(%x)", isp->isp_osinfo.simqfrozen); 1167 } 1168 } 1169 xpt_done(ccb); 1170 } 1171 1172 /* 1173 * Handle ATIO stuff that the generic code can't. 1174 * This means handling CDBs. 1175 */ 1176 1177 static int 1178 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep) 1179 { 1180 tstate_t *tptr; 1181 int status, bus; 1182 struct ccb_accept_tio *atiop; 1183 1184 /* 1185 * The firmware status (except for the QLTM_SVALID bit) 1186 * indicates why this ATIO was sent to us. 1187 * 1188 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1189 * 1190 * If the DISCONNECTS DISABLED bit is set in the flags field, 1191 * we're still connected on the SCSI bus. 1192 */ 1193 status = aep->at_status; 1194 if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) { 1195 /* 1196 * Bus Phase Sequence error. We should have sense data 1197 * suggested by the f/w. I'm not sure quite yet what 1198 * to do about this for CAM. 1199 */ 1200 isp_prt(isp, ISP_LOGWARN, "PHASE ERROR"); 1201 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1202 return (0); 1203 } 1204 if ((status & ~QLTM_SVALID) != AT_CDB) { 1205 isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform", 1206 status); 1207 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1208 return (0); 1209 } 1210 1211 bus = GET_BUS_VAL(aep->at_iid); 1212 tptr = get_lun_statep(isp, bus, aep->at_lun); 1213 if (tptr == NULL) { 1214 tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD); 1215 } 1216 1217 if (tptr == NULL) { 1218 /* 1219 * Because we can't autofeed sense data back with 1220 * a command for parallel SCSI, we can't give back 1221 * a CHECK CONDITION. We'll give back a BUSY status 1222 * instead. This works out okay because the only 1223 * time we should, in fact, get this, is in the 1224 * case that somebody configured us without the 1225 * blackhole driver, so they get what they deserve. 1226 */ 1227 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1228 return (0); 1229 } 1230 1231 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1232 if (atiop == NULL) { 1233 /* 1234 * Because we can't autofeed sense data back with 1235 * a command for parallel SCSI, we can't give back 1236 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1237 * instead. This works out okay because the only time we 1238 * should, in fact, get this, is in the case that we've 1239 * run out of ATIOS. 1240 */ 1241 xpt_print_path(tptr->owner); 1242 isp_prt(isp, ISP_LOGWARN, 1243 "no ATIOS for lun %d from initiator %d on channel %d", 1244 aep->at_lun, GET_IID_VAL(aep->at_iid), bus); 1245 rls_lun_statep(isp, tptr); 1246 if (aep->at_flags & AT_TQAE) 1247 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1248 else 1249 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1250 return (0); 1251 } 1252 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1253 if (tptr == &isp->isp_osinfo.tsdflt[bus]) { 1254 atiop->ccb_h.target_id = aep->at_tgt; 1255 atiop->ccb_h.target_lun = aep->at_lun; 1256 } 1257 if (aep->at_flags & AT_NODISC) { 1258 atiop->ccb_h.flags = CAM_DIS_DISCONNECT; 1259 } else { 1260 atiop->ccb_h.flags = 0; 1261 } 1262 1263 if (status & QLTM_SVALID) { 1264 size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data)); 1265 atiop->sense_len = amt; 1266 MEMCPY(&atiop->sense_data, aep->at_sense, amt); 1267 } else { 1268 atiop->sense_len = 0; 1269 } 1270 1271 atiop->init_id = GET_IID_VAL(aep->at_iid); 1272 atiop->cdb_len = aep->at_cdblen; 1273 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen); 1274 atiop->ccb_h.status = CAM_CDB_RECVD; 1275 /* 1276 * Construct a tag 'id' based upon tag value (which may be 0..255) 1277 * and the handle (which we have to preserve). 1278 */ 1279 AT_MAKE_TAGID(atiop->tag_id, aep); 1280 if (aep->at_flags & AT_TQAE) { 1281 atiop->tag_action = aep->at_tag_type; 1282 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; 1283 } 1284 xpt_done((union ccb*)atiop); 1285 isp_prt(isp, ISP_LOGTDEBUG1, 1286 "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s", 1287 aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid), 1288 GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff, 1289 aep->at_tag_type, (aep->at_flags & AT_NODISC)? 1290 "nondisc" : "disconnecting"); 1291 rls_lun_statep(isp, tptr); 1292 return (0); 1293 } 1294 1295 static int 1296 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep) 1297 { 1298 lun_id_t lun; 1299 tstate_t *tptr; 1300 struct ccb_accept_tio *atiop; 1301 1302 /* 1303 * The firmware status (except for the QLTM_SVALID bit) 1304 * indicates why this ATIO was sent to us. 1305 * 1306 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1307 */ 1308 if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) { 1309 isp_prt(isp, ISP_LOGWARN, 1310 "bogus atio (0x%x) leaked to platform", aep->at_status); 1311 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1312 return (0); 1313 } 1314 1315 if (isp->isp_maxluns > 16) { 1316 lun = aep->at_scclun; 1317 } else { 1318 lun = aep->at_lun; 1319 } 1320 tptr = get_lun_statep(isp, 0, lun); 1321 if (tptr == NULL) { 1322 tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD); 1323 } 1324 1325 if (tptr == NULL) { 1326 /* 1327 * What we'd like to know is whether or not we have a listener 1328 * upstream that really hasn't configured yet. If we do, then 1329 * we can give a more sensible reply here. If not, then we can 1330 * reject this out of hand. 1331 * 1332 * Choices for what to send were 1333 * 1334 * Not Ready, Unit Not Self-Configured Yet 1335 * (0x2,0x3e,0x00) 1336 * 1337 * for the former and 1338 * 1339 * Illegal Request, Logical Unit Not Supported 1340 * (0x5,0x25,0x00) 1341 * 1342 * for the latter. 1343 * 1344 * We used to decide whether there was at least one listener 1345 * based upon whether the black hole driver was configured. 1346 * However, recent config(8) changes have made this hard to do 1347 * at this time. 1348 * 1349 */ 1350 u_int32_t ccode = SCSI_STATUS_BUSY; 1351 1352 /* 1353 * Because we can't autofeed sense data back with 1354 * a command for parallel SCSI, we can't give back 1355 * a CHECK CONDITION. We'll give back a BUSY status 1356 * instead. This works out okay because the only 1357 * time we should, in fact, get this, is in the 1358 * case that somebody configured us without the 1359 * blackhole driver, so they get what they deserve. 1360 */ 1361 isp_endcmd(isp, aep, ccode, 0); 1362 return (0); 1363 } 1364 1365 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1366 if (atiop == NULL) { 1367 /* 1368 * Because we can't autofeed sense data back with 1369 * a command for parallel SCSI, we can't give back 1370 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1371 * instead. This works out okay because the only time we 1372 * should, in fact, get this, is in the case that we've 1373 * run out of ATIOS. 1374 */ 1375 xpt_print_path(tptr->owner); 1376 isp_prt(isp, ISP_LOGWARN, 1377 "no ATIOS for lun %d from initiator %d", lun, aep->at_iid); 1378 rls_lun_statep(isp, tptr); 1379 if (aep->at_flags & AT_TQAE) 1380 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1381 else 1382 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1383 return (0); 1384 } 1385 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1386 1387 if (tptr == &isp->isp_osinfo.tsdflt[0]) { 1388 atiop->ccb_h.target_id = 1389 ((fcparam *)isp->isp_param)->isp_loopid; 1390 atiop->ccb_h.target_lun = lun; 1391 } 1392 /* 1393 * We don't get 'suggested' sense data as we do with SCSI cards. 1394 */ 1395 atiop->sense_len = 0; 1396 1397 atiop->init_id = aep->at_iid; 1398 atiop->cdb_len = ATIO2_CDBLEN; 1399 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN); 1400 atiop->ccb_h.status = CAM_CDB_RECVD; 1401 atiop->tag_id = aep->at_rxid; 1402 switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) { 1403 case ATIO2_TC_ATTR_SIMPLEQ: 1404 atiop->tag_action = MSG_SIMPLE_Q_TAG; 1405 break; 1406 case ATIO2_TC_ATTR_HEADOFQ: 1407 atiop->tag_action = MSG_HEAD_OF_Q_TAG; 1408 break; 1409 case ATIO2_TC_ATTR_ORDERED: 1410 atiop->tag_action = MSG_ORDERED_Q_TAG; 1411 break; 1412 case ATIO2_TC_ATTR_ACAQ: /* ?? */ 1413 case ATIO2_TC_ATTR_UNTAGGED: 1414 default: 1415 atiop->tag_action = 0; 1416 break; 1417 } 1418 if (atiop->tag_action != 0) { 1419 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; 1420 } 1421 1422 /* 1423 * Preserve overall command datalength in private field. 1424 */ 1425 atiop->ccb_h.spriv_field0 = aep->at_datalen; 1426 1427 xpt_done((union ccb*)atiop); 1428 isp_prt(isp, ISP_LOGTDEBUG1, 1429 "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u", 1430 aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid, 1431 lun, aep->at_taskflags, aep->at_datalen); 1432 rls_lun_statep(isp, tptr); 1433 return (0); 1434 } 1435 1436 static int 1437 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg) 1438 { 1439 union ccb *ccb; 1440 int sentstatus, ok, notify_cam, resid = 0; 1441 1442 /* 1443 * CTIO and CTIO2 are close enough.... 1444 */ 1445 1446 ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_syshandle); 1447 KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio")); 1448 isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_syshandle); 1449 1450 if (IS_FC(isp)) { 1451 ct2_entry_t *ct = arg; 1452 sentstatus = ct->ct_flags & CT2_SENDSTATUS; 1453 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1454 if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) { 1455 ccb->ccb_h.status |= CAM_SENT_SENSE; 1456 } 1457 isp_prt(isp, ISP_LOGTDEBUG1, 1458 "CTIO2[%x] sts 0x%x flg 0x%x sns %d %s", 1459 ct->ct_rxid, ct->ct_status, ct->ct_flags, 1460 (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, 1461 sentstatus? "FIN" : "MID"); 1462 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1463 if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) { 1464 resid = ct->ct_resid; 1465 } 1466 } else { 1467 ct_entry_t *ct = arg; 1468 sentstatus = ct->ct_flags & CT_SENDSTATUS; 1469 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1470 isp_prt(isp, ISP_LOGTDEBUG1, 1471 "CTIO[%x] tag %x iid %x tgt %d lun %d sts 0x%x flg %x %s", 1472 ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_tgt, 1473 ct->ct_lun, ct->ct_status, ct->ct_flags, 1474 sentstatus? "FIN" : "MID"); 1475 1476 /* 1477 * We *ought* to be able to get back to the original ATIO 1478 * here, but for some reason this gets lost. It's just as 1479 * well because it's squirrelled away as part of periph 1480 * private data. 1481 * 1482 * We can live without it as long as we continue to use 1483 * the auto-replenish feature for CTIOs. 1484 */ 1485 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1486 if (ct->ct_status & QLTM_SVALID) { 1487 char *sp = (char *)ct; 1488 sp += CTIO_SENSE_OFFSET; 1489 ccb->csio.sense_len = 1490 min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN); 1491 MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len); 1492 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1493 } 1494 if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) { 1495 resid = ct->ct_resid; 1496 } 1497 } 1498 ccb->csio.resid += resid; 1499 1500 /* 1501 * We're here either because intermediate data transfers are done 1502 * and/or the final status CTIO (which may have joined with a 1503 * Data Transfer) is done. 1504 * 1505 * In any case, for this platform, the upper layers figure out 1506 * what to do next, so all we do here is collect status and 1507 * pass information along. Any DMA handles have already been 1508 * freed. 1509 */ 1510 if (notify_cam == 0) { 1511 isp_prt(isp, ISP_LOGTDEBUG0, " INTER CTIO done"); 1512 return (0); 1513 } 1514 1515 isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO done (resid %d)", 1516 (sentstatus)? " FINAL " : "MIDTERM ", ccb->csio.resid); 1517 1518 if (!ok) { 1519 isp_target_putback_atio(ccb); 1520 } else { 1521 isp_complete_ctio(ccb); 1522 1523 } 1524 return (0); 1525 } 1526 #endif 1527 1528 static void 1529 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg) 1530 { 1531 struct cam_sim *sim; 1532 struct ispsoftc *isp; 1533 1534 sim = (struct cam_sim *)cbarg; 1535 isp = (struct ispsoftc *) cam_sim_softc(sim); 1536 switch (code) { 1537 case AC_LOST_DEVICE: 1538 if (IS_SCSI(isp)) { 1539 u_int16_t oflags, nflags; 1540 sdparam *sdp = isp->isp_param; 1541 int tgt; 1542 1543 tgt = xpt_path_target_id(path); 1544 ISP_LOCK(isp); 1545 sdp += cam_sim_bus(sim); 1546 #ifndef ISP_TARGET_MODE 1547 if (tgt == sdp->isp_initiator_id) { 1548 nflags = DPARM_DEFAULT; 1549 } else { 1550 nflags = DPARM_SAFE_DFLT; 1551 if (isp->isp_loaded_fw) { 1552 nflags |= DPARM_NARROW | DPARM_ASYNC; 1553 } 1554 } 1555 #else 1556 nflags = DPARM_DEFAULT; 1557 #endif 1558 oflags = sdp->isp_devparam[tgt].dev_flags; 1559 sdp->isp_devparam[tgt].dev_flags = nflags; 1560 sdp->isp_devparam[tgt].dev_update = 1; 1561 isp->isp_update |= (1 << cam_sim_bus(sim)); 1562 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, NULL); 1563 sdp->isp_devparam[tgt].dev_flags = oflags; 1564 ISP_UNLOCK(isp); 1565 } 1566 break; 1567 default: 1568 isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code); 1569 break; 1570 } 1571 } 1572 1573 static void 1574 isp_poll(struct cam_sim *sim) 1575 { 1576 struct ispsoftc *isp = cam_sim_softc(sim); 1577 ISP_LOCK(isp); 1578 (void) isp_intr(isp); 1579 ISP_UNLOCK(isp); 1580 } 1581 1582 #if 0 1583 static void 1584 isp_relsim(void *arg) 1585 { 1586 struct ispsoftc *isp = arg; 1587 ISP_LOCK(isp); 1588 if (isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED) { 1589 int wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED; 1590 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_TIMED; 1591 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { 1592 xpt_release_simq(isp->isp_sim, 1); 1593 isp_prt(isp, ISP_LOGDEBUG2, "timed relsimq"); 1594 } 1595 } 1596 ISP_UNLOCK(isp); 1597 } 1598 #endif 1599 1600 static void 1601 isp_watchdog(void *arg) 1602 { 1603 XS_T *xs = arg; 1604 struct ispsoftc *isp = XS_ISP(xs); 1605 u_int32_t handle; 1606 1607 /* 1608 * We've decided this command is dead. Make sure we're not trying 1609 * to kill a command that's already dead by getting it's handle and 1610 * and seeing whether it's still alive. 1611 */ 1612 ISP_LOCK(isp); 1613 handle = isp_find_handle(isp, xs); 1614 if (handle) { 1615 u_int16_t r; 1616 1617 if (XS_CMD_DONE_P(xs)) { 1618 isp_prt(isp, ISP_LOGDEBUG1, 1619 "watchdog found done cmd (handle 0x%x)", handle); 1620 ISP_UNLOCK(isp); 1621 return; 1622 } 1623 1624 if (XS_CMD_WDOG_P(xs)) { 1625 isp_prt(isp, ISP_LOGDEBUG2, 1626 "recursive watchdog (handle 0x%x)", handle); 1627 ISP_UNLOCK(isp); 1628 return; 1629 } 1630 1631 XS_CMD_S_WDOG(xs); 1632 1633 r = ISP_READ(isp, BIU_ISR); 1634 1635 if (INT_PENDING(isp, r) && isp_intr(isp) && XS_CMD_DONE_P(xs)) { 1636 isp_prt(isp, ISP_LOGDEBUG2, 1637 "watchdog cleanup (%x, %x)", handle, r); 1638 xpt_done((union ccb *) xs); 1639 } else if (XS_CMD_GRACE_P(xs)) { 1640 /* 1641 * Make sure the command is *really* dead before we 1642 * release the handle (and DMA resources) for reuse. 1643 */ 1644 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg); 1645 1646 /* 1647 * After this point, the comamnd is really dead. 1648 */ 1649 if (XS_XFRLEN(xs)) { 1650 ISP_DMAFREE(isp, xs, handle); 1651 } 1652 isp_destroy_handle(isp, handle); 1653 xpt_print_path(xs->ccb_h.path); 1654 isp_prt(isp, ISP_LOGWARN, 1655 "watchdog timeout (%x, %x)", handle, r); 1656 XS_SETERR(xs, CAM_CMD_TIMEOUT); 1657 XS_CMD_C_WDOG(xs); 1658 isp_done(xs); 1659 } else { 1660 u_int16_t iptr, optr; 1661 ispreq_t *mp; 1662 1663 XS_CMD_C_WDOG(xs); 1664 xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz); 1665 if (isp_getrqentry(isp, &iptr, &optr, (void **) &mp)) { 1666 ISP_UNLOCK(isp); 1667 return; 1668 } 1669 XS_CMD_S_GRACE(xs); 1670 MEMZERO((void *) mp, sizeof (*mp)); 1671 mp->req_header.rqs_entry_count = 1; 1672 mp->req_header.rqs_entry_type = RQSTYPE_MARKER; 1673 mp->req_modifier = SYNC_ALL; 1674 mp->req_target = XS_CHANNEL(xs) << 7; 1675 ISP_SWIZZLE_REQUEST(isp, mp); 1676 ISP_ADD_REQUEST(isp, iptr); 1677 } 1678 } else { 1679 isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command"); 1680 } 1681 ISP_UNLOCK(isp); 1682 } 1683 1684 #ifdef ISP_SMPLOCK 1685 static void 1686 isp_kthread(void *arg) 1687 { 1688 int wasfrozen; 1689 struct ispsoftc *isp = arg; 1690 1691 mtx_lock(&isp->isp_lock); 1692 for (;;) { 1693 isp_prt(isp, ISP_LOGDEBUG0, "kthread checking FC state"); 1694 while (isp_fc_runstate(isp, 2 * 1000000) != 0) { 1695 #if 0 1696 msleep(&lbolt, &isp->isp_lock, 1697 PRIBIO, "isp_fcthrd", 0); 1698 #else 1699 msleep(isp_kthread, &isp->isp_lock, 1700 PRIBIO, "isp_fcthrd", hz); 1701 #endif 1702 } 1703 wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN; 1704 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN; 1705 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { 1706 isp_prt(isp, ISP_LOGDEBUG0, "kthread up release simq"); 1707 ISPLOCK_2_CAMLOCK(isp); 1708 xpt_release_simq(isp->isp_sim, 1); 1709 CAMLOCK_2_ISPLOCK(isp); 1710 } 1711 cv_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock); 1712 } 1713 } 1714 #else 1715 static void 1716 isp_kthread(void *arg) 1717 { 1718 int wasfrozen; 1719 struct ispsoftc *isp = arg; 1720 1721 mtx_lock(&Giant); 1722 for (;;) { 1723 isp_prt(isp, ISP_LOGDEBUG0, "kthread checking FC state"); 1724 while (isp_fc_runstate(isp, 2 * 1000000) != 0) { 1725 tsleep(isp_kthread, PRIBIO, "isp_fcthrd", hz); 1726 } 1727 wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN; 1728 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN; 1729 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { 1730 isp_prt(isp, ISP_LOGDEBUG0, "kthread up release simq"); 1731 ISPLOCK_2_CAMLOCK(isp); 1732 xpt_release_simq(isp->isp_sim, 1); 1733 CAMLOCK_2_ISPLOCK(isp); 1734 } 1735 tsleep(&isp->isp_osinfo.kthread_cv, PRIBIO, "isp_fc_worker", 0); 1736 } 1737 } 1738 #endif 1739 static void 1740 isp_action(struct cam_sim *sim, union ccb *ccb) 1741 { 1742 int bus, tgt, error; 1743 struct ispsoftc *isp; 1744 struct ccb_trans_settings *cts; 1745 1746 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n")); 1747 1748 isp = (struct ispsoftc *)cam_sim_softc(sim); 1749 ccb->ccb_h.sim_priv.entries[0].field = 0; 1750 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 1751 if (isp->isp_state != ISP_RUNSTATE && 1752 ccb->ccb_h.func_code == XPT_SCSI_IO) { 1753 CAMLOCK_2_ISPLOCK(isp); 1754 isp_init(isp); 1755 if (isp->isp_state != ISP_INITSTATE) { 1756 ISP_UNLOCK(isp); 1757 /* 1758 * Lie. Say it was a selection timeout. 1759 */ 1760 ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN; 1761 xpt_freeze_devq(ccb->ccb_h.path, 1); 1762 xpt_done(ccb); 1763 return; 1764 } 1765 isp->isp_state = ISP_RUNSTATE; 1766 ISPLOCK_2_CAMLOCK(isp); 1767 } 1768 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code); 1769 1770 1771 switch (ccb->ccb_h.func_code) { 1772 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 1773 /* 1774 * Do a couple of preliminary checks... 1775 */ 1776 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 1777 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 1778 ccb->ccb_h.status = CAM_REQ_INVALID; 1779 xpt_done(ccb); 1780 break; 1781 } 1782 } 1783 #ifdef DIAGNOSTIC 1784 if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) { 1785 ccb->ccb_h.status = CAM_PATH_INVALID; 1786 } else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) { 1787 ccb->ccb_h.status = CAM_PATH_INVALID; 1788 } 1789 if (ccb->ccb_h.status == CAM_PATH_INVALID) { 1790 isp_prt(isp, ISP_LOGERR, 1791 "invalid tgt/lun (%d.%d) in XPT_SCSI_IO", 1792 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 1793 xpt_done(ccb); 1794 break; 1795 } 1796 #endif 1797 ((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK; 1798 CAMLOCK_2_ISPLOCK(isp); 1799 error = isp_start((XS_T *) ccb); 1800 switch (error) { 1801 case CMD_QUEUED: 1802 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1803 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 1804 u_int64_t ticks = (u_int64_t) hz; 1805 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) 1806 ticks = 60 * 1000 * ticks; 1807 else 1808 ticks = ccb->ccb_h.timeout * hz; 1809 ticks = ((ticks + 999) / 1000) + hz + hz; 1810 if (ticks >= 0x80000000) { 1811 isp_prt(isp, ISP_LOGERR, 1812 "timeout overflow"); 1813 ticks = 0x80000000; 1814 } 1815 ccb->ccb_h.timeout_ch = timeout(isp_watchdog, 1816 (caddr_t)ccb, (int)ticks); 1817 } else { 1818 callout_handle_init(&ccb->ccb_h.timeout_ch); 1819 } 1820 ISPLOCK_2_CAMLOCK(isp); 1821 break; 1822 case CMD_RQLATER: 1823 #ifdef ISP_SMPLOCK 1824 cv_signal(&isp->isp_osinfo.kthread_cv); 1825 #else 1826 wakeup(&isp->isp_osinfo.kthread_cv); 1827 #endif 1828 if (isp->isp_osinfo.simqfrozen == 0) { 1829 isp_prt(isp, ISP_LOGDEBUG2, 1830 "RQLATER freeze simq"); 1831 #if 0 1832 isp->isp_osinfo.simqfrozen |= SIMQFRZ_TIMED; 1833 timeout(isp_relsim, isp, 500); 1834 #else 1835 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 1836 #endif 1837 ISPLOCK_2_CAMLOCK(isp); 1838 xpt_freeze_simq(sim, 1); 1839 } else { 1840 ISPLOCK_2_CAMLOCK(isp); 1841 } 1842 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1843 xpt_done(ccb); 1844 break; 1845 case CMD_EAGAIN: 1846 if (isp->isp_osinfo.simqfrozen == 0) { 1847 xpt_freeze_simq(sim, 1); 1848 isp_prt(isp, ISP_LOGDEBUG2, 1849 "EAGAIN freeze simq"); 1850 } 1851 isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE; 1852 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1853 ISPLOCK_2_CAMLOCK(isp); 1854 xpt_done(ccb); 1855 break; 1856 case CMD_COMPLETE: 1857 isp_done((struct ccb_scsiio *) ccb); 1858 ISPLOCK_2_CAMLOCK(isp); 1859 break; 1860 default: 1861 isp_prt(isp, ISP_LOGERR, 1862 "What's this? 0x%x at %d in file %s", 1863 error, __LINE__, __FILE__); 1864 XS_SETERR(ccb, CAM_REQ_CMP_ERR); 1865 xpt_done(ccb); 1866 ISPLOCK_2_CAMLOCK(isp); 1867 } 1868 break; 1869 1870 #ifdef ISP_TARGET_MODE 1871 case XPT_EN_LUN: /* Enable LUN as a target */ 1872 CAMLOCK_2_ISPLOCK(isp); 1873 isp_en_lun(isp, ccb); 1874 ISPLOCK_2_CAMLOCK(isp); 1875 xpt_done(ccb); 1876 break; 1877 1878 case XPT_NOTIFY_ACK: /* recycle notify ack */ 1879 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ 1880 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 1881 { 1882 tstate_t *tptr = 1883 get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun); 1884 if (tptr == NULL) { 1885 ccb->ccb_h.status = CAM_LUN_INVALID; 1886 xpt_done(ccb); 1887 break; 1888 } 1889 ccb->ccb_h.sim_priv.entries[0].field = 0; 1890 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 1891 CAMLOCK_2_ISPLOCK(isp); 1892 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 1893 SLIST_INSERT_HEAD(&tptr->atios, 1894 &ccb->ccb_h, sim_links.sle); 1895 } else { 1896 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, 1897 sim_links.sle); 1898 } 1899 rls_lun_statep(isp, tptr); 1900 ccb->ccb_h.status = CAM_REQ_INPROG; 1901 ISPLOCK_2_CAMLOCK(isp); 1902 break; 1903 } 1904 case XPT_CONT_TARGET_IO: 1905 { 1906 CAMLOCK_2_ISPLOCK(isp); 1907 ccb->ccb_h.status = isp_target_start_ctio(isp, ccb); 1908 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 1909 if (isp->isp_osinfo.simqfrozen == 0) { 1910 xpt_freeze_simq(sim, 1); 1911 xpt_print_path(ccb->ccb_h.path); 1912 isp_prt(isp, ISP_LOGINFO, 1913 "XPT_CONT_TARGET_IO freeze simq"); 1914 } 1915 isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE; 1916 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1917 ISPLOCK_2_CAMLOCK(isp); 1918 xpt_done(ccb); 1919 } else { 1920 ISPLOCK_2_CAMLOCK(isp); 1921 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1922 } 1923 break; 1924 } 1925 #endif 1926 case XPT_RESET_DEV: /* BDR the specified SCSI device */ 1927 1928 bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); 1929 tgt = ccb->ccb_h.target_id; 1930 tgt |= (bus << 16); 1931 1932 CAMLOCK_2_ISPLOCK(isp); 1933 error = isp_control(isp, ISPCTL_RESET_DEV, &tgt); 1934 ISPLOCK_2_CAMLOCK(isp); 1935 if (error) { 1936 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1937 } else { 1938 ccb->ccb_h.status = CAM_REQ_CMP; 1939 } 1940 xpt_done(ccb); 1941 break; 1942 case XPT_ABORT: /* Abort the specified CCB */ 1943 { 1944 union ccb *accb = ccb->cab.abort_ccb; 1945 CAMLOCK_2_ISPLOCK(isp); 1946 switch (accb->ccb_h.func_code) { 1947 #ifdef ISP_TARGET_MODE 1948 case XPT_ACCEPT_TARGET_IO: 1949 case XPT_IMMED_NOTIFY: 1950 ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb); 1951 break; 1952 case XPT_CONT_TARGET_IO: 1953 isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet"); 1954 ccb->ccb_h.status = CAM_UA_ABORT; 1955 break; 1956 #endif 1957 case XPT_SCSI_IO: 1958 error = isp_control(isp, ISPCTL_ABORT_CMD, ccb); 1959 if (error) { 1960 ccb->ccb_h.status = CAM_UA_ABORT; 1961 } else { 1962 ccb->ccb_h.status = CAM_REQ_CMP; 1963 } 1964 break; 1965 default: 1966 ccb->ccb_h.status = CAM_REQ_INVALID; 1967 break; 1968 } 1969 ISPLOCK_2_CAMLOCK(isp); 1970 xpt_done(ccb); 1971 break; 1972 } 1973 #ifdef CAM_NEW_TRAN_CODE 1974 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) 1975 #else 1976 #define IS_CURRENT_SETTINGS(c) (c->flags & CCB_TRANS_CURRENT_SETTINGS) 1977 #endif 1978 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 1979 cts = &ccb->cts; 1980 tgt = cts->ccb_h.target_id; 1981 CAMLOCK_2_ISPLOCK(isp); 1982 if (IS_SCSI(isp)) { 1983 #ifndef CAM_NEW_TRAN_CODE 1984 sdparam *sdp = isp->isp_param; 1985 u_int16_t *dptr; 1986 1987 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 1988 1989 sdp += bus; 1990 /* 1991 * We always update (internally) from dev_flags 1992 * so any request to change settings just gets 1993 * vectored to that location. 1994 */ 1995 dptr = &sdp->isp_devparam[tgt].dev_flags; 1996 1997 /* 1998 * Note that these operations affect the 1999 * the goal flags (dev_flags)- not 2000 * the current state flags. Then we mark 2001 * things so that the next operation to 2002 * this HBA will cause the update to occur. 2003 */ 2004 if (cts->valid & CCB_TRANS_DISC_VALID) { 2005 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) { 2006 *dptr |= DPARM_DISC; 2007 } else { 2008 *dptr &= ~DPARM_DISC; 2009 } 2010 } 2011 if (cts->valid & CCB_TRANS_TQ_VALID) { 2012 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) { 2013 *dptr |= DPARM_TQING; 2014 } else { 2015 *dptr &= ~DPARM_TQING; 2016 } 2017 } 2018 if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) { 2019 switch (cts->bus_width) { 2020 case MSG_EXT_WDTR_BUS_16_BIT: 2021 *dptr |= DPARM_WIDE; 2022 break; 2023 default: 2024 *dptr &= ~DPARM_WIDE; 2025 } 2026 } 2027 /* 2028 * Any SYNC RATE of nonzero and SYNC_OFFSET 2029 * of nonzero will cause us to go to the 2030 * selected (from NVRAM) maximum value for 2031 * this device. At a later point, we'll 2032 * allow finer control. 2033 */ 2034 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && 2035 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) && 2036 (cts->sync_offset > 0)) { 2037 *dptr |= DPARM_SYNC; 2038 } else { 2039 *dptr &= ~DPARM_SYNC; 2040 } 2041 *dptr |= DPARM_SAFE_DFLT; 2042 #else 2043 struct ccb_trans_settings_scsi *scsi = 2044 &cts->proto_specific.scsi; 2045 struct ccb_trans_settings_spi *spi = 2046 &cts->xport_specific.spi; 2047 sdparam *sdp = isp->isp_param; 2048 u_int16_t *dptr; 2049 2050 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2051 sdp += bus; 2052 /* 2053 * We always update (internally) from dev_flags 2054 * so any request to change settings just gets 2055 * vectored to that location. 2056 */ 2057 dptr = &sdp->isp_devparam[tgt].dev_flags; 2058 2059 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 2060 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 2061 *dptr |= DPARM_DISC; 2062 else 2063 *dptr &= ~DPARM_DISC; 2064 } 2065 2066 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 2067 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 2068 *dptr |= DPARM_TQING; 2069 else 2070 *dptr &= ~DPARM_TQING; 2071 } 2072 2073 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 2074 if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) 2075 *dptr |= DPARM_WIDE; 2076 else 2077 *dptr &= ~DPARM_WIDE; 2078 } 2079 2080 /* 2081 * XXX: FIX ME 2082 */ 2083 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) && 2084 (spi->valid & CTS_SPI_VALID_SYNC_RATE)) { 2085 *dptr |= DPARM_SYNC; 2086 isp_prt(isp, ISP_LOGDEBUG0, 2087 "enabling synchronous mode, but ignoring " 2088 "setting to period 0x%x offset 0x%x", 2089 spi->sync_period, spi->sync_offset); 2090 } else if (spi->sync_period && spi->sync_offset) { 2091 *dptr |= DPARM_SYNC; 2092 isp_prt(isp, ISP_LOGDEBUG0, 2093 "enabling synchronous mode (1), but ignoring" 2094 " setting to period 0x%x offset 0x%x", 2095 spi->sync_period, spi->sync_offset); 2096 } else { 2097 *dptr &= ~DPARM_SYNC; 2098 } 2099 #endif 2100 isp_prt(isp, ISP_LOGDEBUG0, 2101 "%d.%d set %s period 0x%x offset 0x%x flags 0x%x", 2102 bus, tgt, IS_CURRENT_SETTINGS(cts)? "current" : 2103 "user", sdp->isp_devparam[tgt].sync_period, 2104 sdp->isp_devparam[tgt].sync_offset, 2105 sdp->isp_devparam[tgt].dev_flags); 2106 sdp->isp_devparam[tgt].dev_update = 1; 2107 isp->isp_update |= (1 << bus); 2108 } else { 2109 /* 2110 * What, if anything, are we supposed to do? 2111 */ 2112 } 2113 ISPLOCK_2_CAMLOCK(isp); 2114 ccb->ccb_h.status = CAM_REQ_CMP; 2115 xpt_done(ccb); 2116 break; 2117 case XPT_GET_TRAN_SETTINGS: 2118 cts = &ccb->cts; 2119 tgt = cts->ccb_h.target_id; 2120 CAMLOCK_2_ISPLOCK(isp); 2121 if (IS_FC(isp)) { 2122 #ifndef CAM_NEW_TRAN_CODE 2123 /* 2124 * a lot of normal SCSI things don't make sense. 2125 */ 2126 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 2127 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2128 /* 2129 * How do you measure the width of a high 2130 * speed serial bus? Well, in bytes. 2131 * 2132 * Offset and period make no sense, though, so we set 2133 * (above) a 'base' transfer speed to be gigabit. 2134 */ 2135 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2136 #else 2137 fcparam *fcp = isp->isp_param; 2138 struct ccb_trans_settings_fc *fc = 2139 &cts->xport_specific.fc; 2140 2141 cts->protocol = PROTO_SCSI; 2142 cts->protocol_version = SCSI_REV_2; 2143 cts->transport = XPORT_FC; 2144 cts->transport_version = 0; 2145 2146 fc->valid = CTS_FC_VALID_SPEED; 2147 fc->bitrate = 100000; 2148 if (tgt > 0 && tgt < MAX_FC_TARG) { 2149 struct lportdb *lp = &fcp->portdb[tgt]; 2150 fc->wwnn = lp->node_wwn; 2151 fc->wwpn = lp->port_wwn; 2152 fc->port = lp->portid; 2153 fc->valid |= CTS_FC_VALID_WWNN | 2154 CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT; 2155 } 2156 #endif 2157 } else { 2158 #ifdef CAM_NEW_TRAN_CODE 2159 struct ccb_trans_settings_scsi *scsi = 2160 &cts->proto_specific.scsi; 2161 struct ccb_trans_settings_spi *spi = 2162 &cts->xport_specific.spi; 2163 #endif 2164 sdparam *sdp = isp->isp_param; 2165 int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2166 u_int16_t dval, pval, oval; 2167 2168 sdp += bus; 2169 2170 if (IS_CURRENT_SETTINGS(cts)) { 2171 sdp->isp_devparam[tgt].dev_refresh = 1; 2172 isp->isp_update |= (1 << bus); 2173 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, 2174 NULL); 2175 dval = sdp->isp_devparam[tgt].cur_dflags; 2176 oval = sdp->isp_devparam[tgt].cur_offset; 2177 pval = sdp->isp_devparam[tgt].cur_period; 2178 } else { 2179 dval = sdp->isp_devparam[tgt].dev_flags; 2180 oval = sdp->isp_devparam[tgt].sync_offset; 2181 pval = sdp->isp_devparam[tgt].sync_period; 2182 } 2183 2184 #ifndef CAM_NEW_TRAN_CODE 2185 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 2186 2187 if (dval & DPARM_DISC) { 2188 cts->flags |= CCB_TRANS_DISC_ENB; 2189 } 2190 if (dval & DPARM_TQING) { 2191 cts->flags |= CCB_TRANS_TAG_ENB; 2192 } 2193 if (dval & DPARM_WIDE) { 2194 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2195 } else { 2196 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2197 } 2198 cts->valid = CCB_TRANS_BUS_WIDTH_VALID | 2199 CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2200 2201 if ((dval & DPARM_SYNC) && oval != 0) { 2202 cts->sync_period = pval; 2203 cts->sync_offset = oval; 2204 cts->valid |= 2205 CCB_TRANS_SYNC_RATE_VALID | 2206 CCB_TRANS_SYNC_OFFSET_VALID; 2207 } 2208 #else 2209 cts->protocol = PROTO_SCSI; 2210 cts->protocol_version = SCSI_REV_2; 2211 cts->transport = XPORT_SPI; 2212 cts->transport_version = 2; 2213 2214 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 2215 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; 2216 if (dval & DPARM_DISC) { 2217 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 2218 } 2219 if (dval & DPARM_TQING) { 2220 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 2221 } 2222 if ((dval & DPARM_SYNC) && oval != 0) { 2223 spi->sync_offset = oval; 2224 spi->sync_period = pval; 2225 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 2226 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 2227 } 2228 spi->valid |= CTS_SPI_VALID_BUS_WIDTH; 2229 if (dval & DPARM_WIDE) { 2230 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2231 } else { 2232 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2233 } 2234 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 2235 scsi->valid = CTS_SCSI_VALID_TQ; 2236 spi->valid |= CTS_SPI_VALID_DISC; 2237 } else { 2238 scsi->valid = 0; 2239 } 2240 #endif 2241 isp_prt(isp, ISP_LOGDEBUG0, 2242 "%d.%d get %s period 0x%x offset 0x%x flags 0x%x", 2243 bus, tgt, IS_CURRENT_SETTINGS(cts)? "current" : 2244 "user", pval, oval, dval); 2245 } 2246 ISPLOCK_2_CAMLOCK(isp); 2247 ccb->ccb_h.status = CAM_REQ_CMP; 2248 xpt_done(ccb); 2249 break; 2250 2251 case XPT_CALC_GEOMETRY: 2252 { 2253 struct ccb_calc_geometry *ccg; 2254 u_int32_t secs_per_cylinder; 2255 u_int32_t size_mb; 2256 2257 ccg = &ccb->ccg; 2258 if (ccg->block_size == 0) { 2259 isp_prt(isp, ISP_LOGERR, 2260 "%d.%d XPT_CALC_GEOMETRY block size 0?", 2261 ccg->ccb_h.target_id, ccg->ccb_h.target_lun); 2262 ccb->ccb_h.status = CAM_REQ_INVALID; 2263 xpt_done(ccb); 2264 break; 2265 } 2266 size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size); 2267 if (size_mb > 1024) { 2268 ccg->heads = 255; 2269 ccg->secs_per_track = 63; 2270 } else { 2271 ccg->heads = 64; 2272 ccg->secs_per_track = 32; 2273 } 2274 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 2275 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 2276 ccb->ccb_h.status = CAM_REQ_CMP; 2277 xpt_done(ccb); 2278 break; 2279 } 2280 case XPT_RESET_BUS: /* Reset the specified bus */ 2281 bus = cam_sim_bus(sim); 2282 CAMLOCK_2_ISPLOCK(isp); 2283 error = isp_control(isp, ISPCTL_RESET_BUS, &bus); 2284 ISPLOCK_2_CAMLOCK(isp); 2285 if (error) 2286 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2287 else { 2288 if (cam_sim_bus(sim) && isp->isp_path2 != NULL) 2289 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 2290 else if (isp->isp_path != NULL) 2291 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 2292 ccb->ccb_h.status = CAM_REQ_CMP; 2293 } 2294 xpt_done(ccb); 2295 break; 2296 2297 case XPT_TERM_IO: /* Terminate the I/O process */ 2298 ccb->ccb_h.status = CAM_REQ_INVALID; 2299 xpt_done(ccb); 2300 break; 2301 2302 case XPT_PATH_INQ: /* Path routing inquiry */ 2303 { 2304 struct ccb_pathinq *cpi = &ccb->cpi; 2305 2306 cpi->version_num = 1; 2307 #ifdef ISP_TARGET_MODE 2308 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 2309 #else 2310 cpi->target_sprt = 0; 2311 #endif 2312 cpi->hba_eng_cnt = 0; 2313 cpi->max_target = ISP_MAX_TARGETS(isp) - 1; 2314 cpi->max_lun = ISP_MAX_LUNS(isp) - 1; 2315 cpi->bus_id = cam_sim_bus(sim); 2316 if (IS_FC(isp)) { 2317 cpi->hba_misc = PIM_NOBUSRESET; 2318 /* 2319 * Because our loop ID can shift from time to time, 2320 * make our initiator ID out of range of our bus. 2321 */ 2322 cpi->initiator_id = cpi->max_target + 1; 2323 2324 /* 2325 * Set base transfer capabilities for Fibre Channel. 2326 * Technically not correct because we don't know 2327 * what media we're running on top of- but we'll 2328 * look good if we always say 100MB/s. 2329 */ 2330 cpi->base_transfer_speed = 100000; 2331 cpi->hba_inquiry = PI_TAG_ABLE; 2332 #ifdef CAM_NEW_TRAN_CODE 2333 cpi->transport = XPORT_FC; 2334 cpi->transport_version = 0; /* WHAT'S THIS FOR? */ 2335 #endif 2336 } else { 2337 sdparam *sdp = isp->isp_param; 2338 sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path)); 2339 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 2340 cpi->hba_misc = 0; 2341 cpi->initiator_id = sdp->isp_initiator_id; 2342 cpi->base_transfer_speed = 3300; 2343 #ifdef CAM_NEW_TRAN_CODE 2344 cpi->transport = XPORT_SPI; 2345 cpi->transport_version = 2; /* WHAT'S THIS FOR? */ 2346 #endif 2347 } 2348 #ifdef CAM_NEW_TRAN_CODE 2349 cpi->protocol = PROTO_SCSI; 2350 cpi->protocol_version = SCSI_REV_2; 2351 #endif 2352 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 2353 strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN); 2354 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 2355 cpi->unit_number = cam_sim_unit(sim); 2356 cpi->ccb_h.status = CAM_REQ_CMP; 2357 xpt_done(ccb); 2358 break; 2359 } 2360 default: 2361 ccb->ccb_h.status = CAM_REQ_INVALID; 2362 xpt_done(ccb); 2363 break; 2364 } 2365 } 2366 2367 #define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB) 2368 void 2369 isp_done(struct ccb_scsiio *sccb) 2370 { 2371 struct ispsoftc *isp = XS_ISP(sccb); 2372 2373 if (XS_NOERR(sccb)) 2374 XS_SETERR(sccb, CAM_REQ_CMP); 2375 2376 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && 2377 (sccb->scsi_status != SCSI_STATUS_OK)) { 2378 sccb->ccb_h.status &= ~CAM_STATUS_MASK; 2379 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && 2380 (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) { 2381 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; 2382 } else { 2383 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 2384 } 2385 } 2386 2387 sccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2388 if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2389 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 2390 sccb->ccb_h.status |= CAM_DEV_QFRZN; 2391 xpt_freeze_devq(sccb->ccb_h.path, 1); 2392 if (sccb->scsi_status != SCSI_STATUS_OK) 2393 isp_prt(isp, ISP_LOGDEBUG2, 2394 "freeze devq %d.%d %x %x", 2395 sccb->ccb_h.target_id, 2396 sccb->ccb_h.target_lun, sccb->ccb_h.status, 2397 sccb->scsi_status); 2398 } 2399 } 2400 2401 /* 2402 * If we were frozen waiting resources, clear that we were frozen 2403 * waiting for resources. If we are no longer frozen, and the devq 2404 * isn't frozen, mark the completing CCB to have the XPT layer 2405 * release the simq. 2406 */ 2407 if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) { 2408 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE; 2409 if (isp->isp_osinfo.simqfrozen == 0) { 2410 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 2411 isp_prt(isp, ISP_LOGDEBUG2, 2412 "isp_done->relsimq"); 2413 sccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2414 } else { 2415 isp_prt(isp, ISP_LOGDEBUG2, 2416 "isp_done->devq frozen"); 2417 } 2418 } else { 2419 isp_prt(isp, ISP_LOGDEBUG2, 2420 "isp_done -> simqfrozen = %x", 2421 isp->isp_osinfo.simqfrozen); 2422 } 2423 } 2424 if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) && 2425 (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2426 xpt_print_path(sccb->ccb_h.path); 2427 isp_prt(isp, ISP_LOGINFO, 2428 "cam completion status 0x%x", sccb->ccb_h.status); 2429 } 2430 2431 XS_CMD_S_DONE(sccb); 2432 if (XS_CMD_WDOG_P(sccb) == 0) { 2433 untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch); 2434 if (XS_CMD_GRACE_P(sccb)) { 2435 isp_prt(isp, ISP_LOGDEBUG2, 2436 "finished command on borrowed time"); 2437 } 2438 XS_CMD_S_CLEAR(sccb); 2439 ISPLOCK_2_CAMLOCK(isp); 2440 xpt_done((union ccb *) sccb); 2441 CAMLOCK_2_ISPLOCK(isp); 2442 } 2443 } 2444 2445 int 2446 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg) 2447 { 2448 int bus, rv = 0; 2449 switch (cmd) { 2450 case ISPASYNC_NEW_TGT_PARAMS: 2451 { 2452 #ifdef CAM_NEW_TRAN_CODE 2453 struct ccb_trans_settings_scsi *scsi; 2454 struct ccb_trans_settings_spi *spi; 2455 #endif 2456 int flags, tgt; 2457 sdparam *sdp = isp->isp_param; 2458 struct ccb_trans_settings cts; 2459 struct cam_path *tmppath; 2460 2461 bzero(&cts, sizeof (struct ccb_trans_settings)); 2462 2463 tgt = *((int *)arg); 2464 bus = (tgt >> 16) & 0xffff; 2465 tgt &= 0xffff; 2466 sdp += bus; 2467 ISPLOCK_2_CAMLOCK(isp); 2468 if (xpt_create_path(&tmppath, NULL, 2469 cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim), 2470 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2471 CAMLOCK_2_ISPLOCK(isp); 2472 isp_prt(isp, ISP_LOGWARN, 2473 "isp_async cannot make temp path for %d.%d", 2474 tgt, bus); 2475 rv = -1; 2476 break; 2477 } 2478 CAMLOCK_2_ISPLOCK(isp); 2479 flags = sdp->isp_devparam[tgt].cur_dflags; 2480 #ifdef CAM_NEW_TRAN_CODE 2481 cts.type = CTS_TYPE_CURRENT_SETTINGS; 2482 cts.protocol = PROTO_SCSI; 2483 cts.transport = XPORT_SPI; 2484 2485 scsi = &cts.proto_specific.scsi; 2486 spi = &cts.xport_specific.spi; 2487 2488 if (flags & DPARM_TQING) { 2489 scsi->valid |= CTS_SCSI_VALID_TQ; 2490 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 2491 spi->flags |= CTS_SPI_FLAGS_TAG_ENB; 2492 } 2493 2494 if (flags & DPARM_DISC) { 2495 spi->valid |= CTS_SPI_VALID_DISC; 2496 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 2497 } 2498 spi->flags |= CTS_SPI_VALID_BUS_WIDTH; 2499 if (flags & DPARM_WIDE) { 2500 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2501 } else { 2502 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2503 } 2504 if (flags & DPARM_SYNC) { 2505 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 2506 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 2507 spi->sync_period = sdp->isp_devparam[tgt].cur_period; 2508 spi->sync_offset = sdp->isp_devparam[tgt].cur_offset; 2509 } 2510 #else 2511 cts.flags = CCB_TRANS_CURRENT_SETTINGS; 2512 cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2513 if (flags & DPARM_DISC) { 2514 cts.flags |= CCB_TRANS_DISC_ENB; 2515 } 2516 if (flags & DPARM_TQING) { 2517 cts.flags |= CCB_TRANS_TAG_ENB; 2518 } 2519 cts.valid |= CCB_TRANS_BUS_WIDTH_VALID; 2520 cts.bus_width = (flags & DPARM_WIDE)? 2521 MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT; 2522 cts.sync_period = sdp->isp_devparam[tgt].cur_period; 2523 cts.sync_offset = sdp->isp_devparam[tgt].cur_offset; 2524 if (flags & DPARM_SYNC) { 2525 cts.valid |= 2526 CCB_TRANS_SYNC_RATE_VALID | 2527 CCB_TRANS_SYNC_OFFSET_VALID; 2528 } 2529 #endif 2530 isp_prt(isp, ISP_LOGDEBUG2, 2531 "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x", 2532 bus, tgt, sdp->isp_devparam[tgt].cur_period, 2533 sdp->isp_devparam[tgt].cur_offset, flags); 2534 xpt_setup_ccb(&cts.ccb_h, tmppath, 1); 2535 ISPLOCK_2_CAMLOCK(isp); 2536 xpt_async(AC_TRANSFER_NEG, tmppath, &cts); 2537 CAMLOCK_2_ISPLOCK(isp); 2538 xpt_free_path(tmppath); 2539 break; 2540 } 2541 case ISPASYNC_BUS_RESET: 2542 bus = *((int *)arg); 2543 isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected", 2544 bus); 2545 if (bus > 0 && isp->isp_path2) { 2546 ISPLOCK_2_CAMLOCK(isp); 2547 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 2548 CAMLOCK_2_ISPLOCK(isp); 2549 } else if (isp->isp_path) { 2550 ISPLOCK_2_CAMLOCK(isp); 2551 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 2552 CAMLOCK_2_ISPLOCK(isp); 2553 } 2554 break; 2555 case ISPASYNC_LIP: 2556 if (isp->isp_path) { 2557 if (isp->isp_osinfo.simqfrozen == 0) { 2558 isp_prt(isp, ISP_LOGDEBUG0, "LIP freeze simq"); 2559 ISPLOCK_2_CAMLOCK(isp); 2560 xpt_freeze_simq(isp->isp_sim, 1); 2561 CAMLOCK_2_ISPLOCK(isp); 2562 } 2563 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 2564 } 2565 isp_prt(isp, ISP_LOGINFO, "LIP Received"); 2566 break; 2567 case ISPASYNC_LOOP_RESET: 2568 if (isp->isp_path) { 2569 if (isp->isp_osinfo.simqfrozen == 0) { 2570 isp_prt(isp, ISP_LOGDEBUG0, 2571 "Loop Reset freeze simq"); 2572 ISPLOCK_2_CAMLOCK(isp); 2573 xpt_freeze_simq(isp->isp_sim, 1); 2574 CAMLOCK_2_ISPLOCK(isp); 2575 } 2576 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 2577 } 2578 isp_prt(isp, ISP_LOGINFO, "Loop Reset Received"); 2579 break; 2580 case ISPASYNC_LOOP_DOWN: 2581 if (isp->isp_path) { 2582 if (isp->isp_osinfo.simqfrozen == 0) { 2583 isp_prt(isp, ISP_LOGDEBUG0, 2584 "loop down freeze simq"); 2585 ISPLOCK_2_CAMLOCK(isp); 2586 xpt_freeze_simq(isp->isp_sim, 1); 2587 CAMLOCK_2_ISPLOCK(isp); 2588 } 2589 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 2590 } 2591 isp_prt(isp, ISP_LOGINFO, "Loop DOWN"); 2592 break; 2593 case ISPASYNC_LOOP_UP: 2594 /* 2595 * Now we just note that Loop has come up. We don't 2596 * actually do anything because we're waiting for a 2597 * Change Notify before activating the FC cleanup 2598 * thread to look at the state of the loop again. 2599 */ 2600 isp_prt(isp, ISP_LOGINFO, "Loop UP"); 2601 break; 2602 case ISPASYNC_PROMENADE: 2603 { 2604 struct cam_path *tmppath; 2605 const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x " 2606 "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x"; 2607 static const char *roles[4] = { 2608 "(none)", "Target", "Initiator", "Target/Initiator" 2609 }; 2610 fcparam *fcp = isp->isp_param; 2611 int tgt = *((int *) arg); 2612 struct lportdb *lp = &fcp->portdb[tgt]; 2613 2614 isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid, 2615 roles[lp->roles & 0x3], 2616 (lp->valid)? "Arrived" : "Departed", 2617 (u_int32_t) (lp->port_wwn >> 32), 2618 (u_int32_t) (lp->port_wwn & 0xffffffffLL), 2619 (u_int32_t) (lp->node_wwn >> 32), 2620 (u_int32_t) (lp->node_wwn & 0xffffffffLL)); 2621 2622 ISPLOCK_2_CAMLOCK(isp); 2623 if (xpt_create_path(&tmppath, NULL, cam_sim_path(isp->isp_sim), 2624 (target_id_t)tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2625 CAMLOCK_2_ISPLOCK(isp); 2626 break; 2627 } 2628 if (lp->valid && (lp->roles & 2629 (SVC3_INI_ROLE >> SVC3_ROLE_SHIFT))) { 2630 xpt_async(AC_FOUND_DEVICE, tmppath, NULL); 2631 } else { 2632 xpt_async(AC_LOST_DEVICE, tmppath, NULL); 2633 } 2634 CAMLOCK_2_ISPLOCK(isp); 2635 xpt_free_path(tmppath); 2636 break; 2637 } 2638 case ISPASYNC_CHANGE_NOTIFY: 2639 if (arg == (void *) 1) { 2640 isp_prt(isp, ISP_LOGINFO, 2641 "Name Server Database Changed"); 2642 } else { 2643 isp_prt(isp, ISP_LOGINFO, 2644 "Name Server Database Changed"); 2645 } 2646 #ifdef ISP_SMPLOCK 2647 cv_signal(&isp->isp_osinfo.kthread_cv); 2648 #else 2649 wakeup(&isp->isp_osinfo.kthread_cv); 2650 #endif 2651 break; 2652 case ISPASYNC_FABRIC_DEV: 2653 { 2654 int target, lrange; 2655 struct lportdb *lp = NULL; 2656 char *pt; 2657 sns_ganrsp_t *resp = (sns_ganrsp_t *) arg; 2658 u_int32_t portid; 2659 u_int64_t wwpn, wwnn; 2660 fcparam *fcp = isp->isp_param; 2661 2662 portid = 2663 (((u_int32_t) resp->snscb_port_id[0]) << 16) | 2664 (((u_int32_t) resp->snscb_port_id[1]) << 8) | 2665 (((u_int32_t) resp->snscb_port_id[2])); 2666 2667 wwpn = 2668 (((u_int64_t)resp->snscb_portname[0]) << 56) | 2669 (((u_int64_t)resp->snscb_portname[1]) << 48) | 2670 (((u_int64_t)resp->snscb_portname[2]) << 40) | 2671 (((u_int64_t)resp->snscb_portname[3]) << 32) | 2672 (((u_int64_t)resp->snscb_portname[4]) << 24) | 2673 (((u_int64_t)resp->snscb_portname[5]) << 16) | 2674 (((u_int64_t)resp->snscb_portname[6]) << 8) | 2675 (((u_int64_t)resp->snscb_portname[7])); 2676 2677 wwnn = 2678 (((u_int64_t)resp->snscb_nodename[0]) << 56) | 2679 (((u_int64_t)resp->snscb_nodename[1]) << 48) | 2680 (((u_int64_t)resp->snscb_nodename[2]) << 40) | 2681 (((u_int64_t)resp->snscb_nodename[3]) << 32) | 2682 (((u_int64_t)resp->snscb_nodename[4]) << 24) | 2683 (((u_int64_t)resp->snscb_nodename[5]) << 16) | 2684 (((u_int64_t)resp->snscb_nodename[6]) << 8) | 2685 (((u_int64_t)resp->snscb_nodename[7])); 2686 if (portid == 0 || wwpn == 0) { 2687 break; 2688 } 2689 2690 switch (resp->snscb_port_type) { 2691 case 1: 2692 pt = " N_Port"; 2693 break; 2694 case 2: 2695 pt = " NL_Port"; 2696 break; 2697 case 3: 2698 pt = "F/NL_Port"; 2699 break; 2700 case 0x7f: 2701 pt = " Nx_Port"; 2702 break; 2703 case 0x81: 2704 pt = " F_port"; 2705 break; 2706 case 0x82: 2707 pt = " FL_Port"; 2708 break; 2709 case 0x84: 2710 pt = " E_port"; 2711 break; 2712 default: 2713 pt = "?"; 2714 break; 2715 } 2716 isp_prt(isp, ISP_LOGINFO, 2717 "%s @ 0x%x, Node 0x%08x%08x Port %08x%08x", 2718 pt, portid, ((u_int32_t) (wwnn >> 32)), ((u_int32_t) wwnn), 2719 ((u_int32_t) (wwpn >> 32)), ((u_int32_t) wwpn)); 2720 /* 2721 * We're only interested in SCSI_FCP types (for now) 2722 */ 2723 if ((resp->snscb_fc4_types[2] & 1) == 0) { 2724 break; 2725 } 2726 if (fcp->isp_topo != TOPO_F_PORT) 2727 lrange = FC_SNS_ID+1; 2728 else 2729 lrange = 0; 2730 /* 2731 * Is it already in our list? 2732 */ 2733 for (target = lrange; target < MAX_FC_TARG; target++) { 2734 if (target >= FL_PORT_ID && target <= FC_SNS_ID) { 2735 continue; 2736 } 2737 lp = &fcp->portdb[target]; 2738 if (lp->port_wwn == wwpn && lp->node_wwn == wwnn) { 2739 lp->fabric_dev = 1; 2740 break; 2741 } 2742 } 2743 if (target < MAX_FC_TARG) { 2744 break; 2745 } 2746 for (target = lrange; target < MAX_FC_TARG; target++) { 2747 if (target >= FL_PORT_ID && target <= FC_SNS_ID) { 2748 continue; 2749 } 2750 lp = &fcp->portdb[target]; 2751 if (lp->port_wwn == 0) { 2752 break; 2753 } 2754 } 2755 if (target == MAX_FC_TARG) { 2756 isp_prt(isp, ISP_LOGWARN, 2757 "no more space for fabric devices"); 2758 break; 2759 } 2760 lp->node_wwn = wwnn; 2761 lp->port_wwn = wwpn; 2762 lp->portid = portid; 2763 lp->fabric_dev = 1; 2764 break; 2765 } 2766 #ifdef ISP_TARGET_MODE 2767 case ISPASYNC_TARGET_MESSAGE: 2768 { 2769 tmd_msg_t *mp = arg; 2770 isp_prt(isp, ISP_LOGDEBUG2, 2771 "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x", 2772 mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt, 2773 (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval, 2774 mp->nt_msg[0]); 2775 break; 2776 } 2777 case ISPASYNC_TARGET_EVENT: 2778 { 2779 tmd_event_t *ep = arg; 2780 isp_prt(isp, ISP_LOGDEBUG2, 2781 "bus %d event code 0x%x", ep->ev_bus, ep->ev_event); 2782 break; 2783 } 2784 case ISPASYNC_TARGET_ACTION: 2785 switch (((isphdr_t *)arg)->rqs_entry_type) { 2786 default: 2787 isp_prt(isp, ISP_LOGWARN, 2788 "event 0x%x for unhandled target action", 2789 ((isphdr_t *)arg)->rqs_entry_type); 2790 break; 2791 case RQSTYPE_ATIO: 2792 rv = isp_handle_platform_atio(isp, (at_entry_t *) arg); 2793 break; 2794 case RQSTYPE_ATIO2: 2795 rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg); 2796 break; 2797 case RQSTYPE_CTIO2: 2798 case RQSTYPE_CTIO: 2799 rv = isp_handle_platform_ctio(isp, arg); 2800 break; 2801 case RQSTYPE_ENABLE_LUN: 2802 case RQSTYPE_MODIFY_LUN: 2803 isp_cv_signal_rqe(isp, ((lun_entry_t *)arg)->le_status); 2804 break; 2805 } 2806 break; 2807 #endif 2808 case ISPASYNC_FW_CRASH: 2809 { 2810 u_int16_t mbox1, mbox6; 2811 mbox1 = ISP_READ(isp, OUTMAILBOX1); 2812 if (IS_DUALBUS(isp)) { 2813 mbox6 = ISP_READ(isp, OUTMAILBOX6); 2814 } else { 2815 mbox6 = 0; 2816 } 2817 isp_prt(isp, ISP_LOGERR, 2818 "Internal Firmware on bus %d Error @ RISC Address 0x%x", 2819 mbox6, mbox1); 2820 isp_reinit(isp); 2821 break; 2822 } 2823 default: 2824 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd); 2825 rv = -1; 2826 break; 2827 } 2828 return (rv); 2829 } 2830 2831 2832 /* 2833 * Locks are held before coming here. 2834 */ 2835 void 2836 isp_uninit(struct ispsoftc *isp) 2837 { 2838 ISP_WRITE(isp, HCCR, HCCR_CMD_RESET); 2839 DISABLE_INTS(isp); 2840 } 2841 2842 void 2843 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...) 2844 { 2845 va_list ap; 2846 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { 2847 return; 2848 } 2849 printf("%s: ", device_get_nameunit(isp->isp_dev)); 2850 va_start(ap, fmt); 2851 vprintf(fmt, ap); 2852 va_end(ap); 2853 printf("\n"); 2854 } 2855