1 /* $FreeBSD$ */ 2 /* 3 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters. 4 * 5 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 #include <dev/isp/isp_freebsd.h> 29 #include <sys/unistd.h> 30 #include <sys/kthread.h> 31 #include <machine/stdarg.h> /* for use by isp_prt below */ 32 #include <sys/conf.h> 33 #include <sys/ioccom.h> 34 #include <dev/isp/isp_ioctl.h> 35 36 37 static d_ioctl_t ispioctl; 38 static void isp_intr_enable(void *); 39 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *); 40 static void isp_poll(struct cam_sim *); 41 #if 0 42 static void isp_relsim(void *); 43 #endif 44 static timeout_t isp_watchdog; 45 static void isp_kthread(void *); 46 static void isp_action(struct cam_sim *, union ccb *); 47 48 49 #define ISP_CDEV_MAJOR 248 50 static struct cdevsw isp_cdevsw = { 51 /* open */ nullopen, 52 /* close */ nullclose, 53 /* read */ noread, 54 /* write */ nowrite, 55 /* ioctl */ ispioctl, 56 /* poll */ nopoll, 57 /* mmap */ nommap, 58 /* strategy */ nostrategy, 59 /* name */ "isp", 60 /* maj */ ISP_CDEV_MAJOR, 61 /* dump */ nodump, 62 /* psize */ nopsize, 63 /* flags */ D_TAPE, 64 }; 65 66 static struct ispsoftc *isplist = NULL; 67 68 void 69 isp_attach(struct ispsoftc *isp) 70 { 71 int primary, secondary; 72 struct ccb_setasync csa; 73 struct cam_devq *devq; 74 struct cam_sim *sim; 75 struct cam_path *path; 76 77 /* 78 * Establish (in case of 12X0) which bus is the primary. 79 */ 80 81 primary = 0; 82 secondary = 1; 83 84 /* 85 * Create the device queue for our SIM(s). 86 */ 87 devq = cam_simq_alloc(isp->isp_maxcmds); 88 if (devq == NULL) { 89 return; 90 } 91 92 /* 93 * Construct our SIM entry. 94 */ 95 ISPLOCK_2_CAMLOCK(isp); 96 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 97 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); 98 if (sim == NULL) { 99 cam_simq_free(devq); 100 CAMLOCK_2_ISPLOCK(isp); 101 return; 102 } 103 CAMLOCK_2_ISPLOCK(isp); 104 105 isp->isp_osinfo.ehook.ich_func = isp_intr_enable; 106 isp->isp_osinfo.ehook.ich_arg = isp; 107 ISPLOCK_2_CAMLOCK(isp); 108 if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) { 109 cam_sim_free(sim, TRUE); 110 CAMLOCK_2_ISPLOCK(isp); 111 isp_prt(isp, ISP_LOGERR, 112 "could not establish interrupt enable hook"); 113 return; 114 } 115 116 if (xpt_bus_register(sim, primary) != CAM_SUCCESS) { 117 cam_sim_free(sim, TRUE); 118 CAMLOCK_2_ISPLOCK(isp); 119 return; 120 } 121 122 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 123 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 124 xpt_bus_deregister(cam_sim_path(sim)); 125 cam_sim_free(sim, TRUE); 126 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 127 CAMLOCK_2_ISPLOCK(isp); 128 return; 129 } 130 131 xpt_setup_ccb(&csa.ccb_h, path, 5); 132 csa.ccb_h.func_code = XPT_SASYNC_CB; 133 csa.event_enable = AC_LOST_DEVICE; 134 csa.callback = isp_cam_async; 135 csa.callback_arg = sim; 136 xpt_action((union ccb *)&csa); 137 CAMLOCK_2_ISPLOCK(isp); 138 isp->isp_sim = sim; 139 isp->isp_path = path; 140 /* 141 * Create a kernel thread for fibre channel instances. We 142 * don't have dual channel FC cards. 143 */ 144 if (IS_FC(isp)) { 145 ISPLOCK_2_CAMLOCK(isp); 146 /* XXX: LOCK VIOLATION */ 147 cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv"); 148 if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc, 149 RFHIGHPID, "%s: fc_thrd", 150 device_get_nameunit(isp->isp_dev))) { 151 xpt_bus_deregister(cam_sim_path(sim)); 152 cam_sim_free(sim, TRUE); 153 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 154 CAMLOCK_2_ISPLOCK(isp); 155 isp_prt(isp, ISP_LOGERR, "could not create kthread"); 156 return; 157 } 158 CAMLOCK_2_ISPLOCK(isp); 159 } 160 161 162 /* 163 * If we have a second channel, construct SIM entry for that. 164 */ 165 if (IS_DUALBUS(isp)) { 166 ISPLOCK_2_CAMLOCK(isp); 167 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 168 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); 169 if (sim == NULL) { 170 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 171 xpt_free_path(isp->isp_path); 172 cam_simq_free(devq); 173 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 174 return; 175 } 176 if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) { 177 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 178 xpt_free_path(isp->isp_path); 179 cam_sim_free(sim, TRUE); 180 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 181 CAMLOCK_2_ISPLOCK(isp); 182 return; 183 } 184 185 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 186 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 187 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 188 xpt_free_path(isp->isp_path); 189 xpt_bus_deregister(cam_sim_path(sim)); 190 cam_sim_free(sim, TRUE); 191 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 192 CAMLOCK_2_ISPLOCK(isp); 193 return; 194 } 195 196 xpt_setup_ccb(&csa.ccb_h, path, 5); 197 csa.ccb_h.func_code = XPT_SASYNC_CB; 198 csa.event_enable = AC_LOST_DEVICE; 199 csa.callback = isp_cam_async; 200 csa.callback_arg = sim; 201 xpt_action((union ccb *)&csa); 202 CAMLOCK_2_ISPLOCK(isp); 203 isp->isp_sim2 = sim; 204 isp->isp_path2 = path; 205 } 206 207 #ifdef ISP_TARGET_MODE 208 cv_init(&isp->isp_osinfo.tgtcv0[0], "isp_tgcv0a"); 209 cv_init(&isp->isp_osinfo.tgtcv0[1], "isp_tgcv0b"); 210 cv_init(&isp->isp_osinfo.tgtcv1[0], "isp_tgcv1a"); 211 cv_init(&isp->isp_osinfo.tgtcv1[1], "isp_tgcv1b"); 212 #endif 213 /* 214 * Create device nodes 215 */ 216 (void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT, 217 GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev)); 218 219 if (isp->isp_role != ISP_ROLE_NONE) { 220 isp->isp_state = ISP_RUNSTATE; 221 ENABLE_INTS(isp); 222 } 223 if (isplist == NULL) { 224 isplist = isp; 225 } else { 226 struct ispsoftc *tmp = isplist; 227 while (tmp->isp_osinfo.next) { 228 tmp = tmp->isp_osinfo.next; 229 } 230 tmp->isp_osinfo.next = isp; 231 } 232 233 } 234 235 static int 236 ispioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 237 { 238 struct ispsoftc *isp; 239 int retval = ENOTTY; 240 241 isp = isplist; 242 while (isp) { 243 if (minor(dev) == device_get_unit(isp->isp_dev)) { 244 break; 245 } 246 isp = isp->isp_osinfo.next; 247 } 248 if (isp == NULL) 249 return (ENXIO); 250 251 switch (cmd) { 252 case ISP_SDBLEV: 253 { 254 int olddblev = isp->isp_dblev; 255 isp->isp_dblev = *(int *)addr; 256 *(int *)addr = olddblev; 257 retval = 0; 258 break; 259 } 260 case ISP_RESETHBA: 261 ISP_LOCK(isp); 262 isp_reinit(isp); 263 ISP_UNLOCK(isp); 264 retval = 0; 265 break; 266 case ISP_FC_RESCAN: 267 if (IS_FC(isp)) { 268 ISP_LOCK(isp); 269 if (isp_fc_runstate(isp, 5 * 1000000)) { 270 retval = EIO; 271 } else { 272 retval = 0; 273 } 274 ISP_UNLOCK(isp); 275 } 276 break; 277 case ISP_FC_LIP: 278 if (IS_FC(isp)) { 279 ISP_LOCK(isp); 280 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) { 281 retval = EIO; 282 } else { 283 retval = 0; 284 } 285 ISP_UNLOCK(isp); 286 } 287 break; 288 case ISP_FC_GETDINFO: 289 { 290 struct isp_fc_device *ifc = (struct isp_fc_device *) addr; 291 struct lportdb *lp; 292 293 if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) { 294 retval = EINVAL; 295 break; 296 } 297 ISP_LOCK(isp); 298 lp = &FCPARAM(isp)->portdb[ifc->loopid]; 299 if (lp->valid) { 300 ifc->loopid = lp->loopid; 301 ifc->portid = lp->portid; 302 ifc->node_wwn = lp->node_wwn; 303 ifc->port_wwn = lp->port_wwn; 304 retval = 0; 305 } else { 306 retval = ENODEV; 307 } 308 ISP_UNLOCK(isp); 309 break; 310 } 311 case ISP_GET_STATS: 312 { 313 isp_stats_t *sp = (isp_stats_t *) addr; 314 315 MEMZERO(sp, sizeof (*sp)); 316 sp->isp_stat_version = ISP_STATS_VERSION; 317 sp->isp_type = isp->isp_type; 318 sp->isp_revision = isp->isp_revision; 319 ISP_LOCK(isp); 320 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt; 321 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus; 322 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc; 323 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync; 324 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt; 325 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt; 326 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater; 327 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater; 328 ISP_UNLOCK(isp); 329 retval = 0; 330 break; 331 } 332 case ISP_CLR_STATS: 333 ISP_LOCK(isp); 334 isp->isp_intcnt = 0; 335 isp->isp_intbogus = 0; 336 isp->isp_intmboxc = 0; 337 isp->isp_intoasync = 0; 338 isp->isp_rsltccmplt = 0; 339 isp->isp_fphccmplt = 0; 340 isp->isp_rscchiwater = 0; 341 isp->isp_fpcchiwater = 0; 342 ISP_UNLOCK(isp); 343 retval = 0; 344 break; 345 346 default: 347 break; 348 } 349 return (retval); 350 } 351 352 static void 353 isp_intr_enable(void *arg) 354 { 355 struct ispsoftc *isp = arg; 356 if (isp->isp_role != ISP_ROLE_NONE) { 357 ENABLE_INTS(isp); 358 isp->isp_osinfo.intsok = 1; 359 } 360 /* Release our hook so that the boot can continue. */ 361 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 362 } 363 364 /* 365 * Put the target mode functions here, because some are inlines 366 */ 367 368 #ifdef ISP_TARGET_MODE 369 370 static __inline int is_lun_enabled(struct ispsoftc *, int, lun_id_t); 371 static __inline int are_any_luns_enabled(struct ispsoftc *, int); 372 static __inline tstate_t *get_lun_statep(struct ispsoftc *, int, lun_id_t); 373 static __inline void rls_lun_statep(struct ispsoftc *, tstate_t *); 374 static __inline int isp_psema_sig_rqe(struct ispsoftc *, int); 375 static __inline int isp_cv_wait_timed_rqe(struct ispsoftc *, int, int); 376 static __inline void isp_cv_signal_rqe(struct ispsoftc *, int, int); 377 static __inline void isp_vsema_rqe(struct ispsoftc *, int); 378 static __inline atio_private_data_t *isp_get_atpd(struct ispsoftc *, int); 379 static cam_status 380 create_lun_state(struct ispsoftc *, int, struct cam_path *, tstate_t **); 381 static void destroy_lun_state(struct ispsoftc *, tstate_t *); 382 static void isp_en_lun(struct ispsoftc *, union ccb *); 383 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *); 384 static timeout_t isp_refire_putback_atio; 385 static void isp_complete_ctio(union ccb *); 386 static void isp_target_putback_atio(union ccb *); 387 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *); 388 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *); 389 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *); 390 static int isp_handle_platform_ctio(struct ispsoftc *, void *); 391 392 static __inline int 393 is_lun_enabled(struct ispsoftc *isp, int bus, lun_id_t lun) 394 { 395 tstate_t *tptr; 396 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 397 if (tptr == NULL) { 398 return (0); 399 } 400 do { 401 if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) { 402 return (1); 403 } 404 } while ((tptr = tptr->next) != NULL); 405 return (0); 406 } 407 408 static __inline int 409 are_any_luns_enabled(struct ispsoftc *isp, int port) 410 { 411 int lo, hi; 412 if (IS_DUALBUS(isp)) { 413 lo = (port * (LUN_HASH_SIZE >> 1)); 414 hi = lo + (LUN_HASH_SIZE >> 1); 415 } else { 416 lo = 0; 417 hi = LUN_HASH_SIZE; 418 } 419 for (lo = 0; lo < hi; lo++) { 420 if (isp->isp_osinfo.lun_hash[lo]) { 421 return (1); 422 } 423 } 424 return (0); 425 } 426 427 static __inline tstate_t * 428 get_lun_statep(struct ispsoftc *isp, int bus, lun_id_t lun) 429 { 430 tstate_t *tptr = NULL; 431 432 if (lun == CAM_LUN_WILDCARD) { 433 if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) { 434 tptr = &isp->isp_osinfo.tsdflt[bus]; 435 tptr->hold++; 436 return (tptr); 437 } 438 } else { 439 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 440 if (tptr == NULL) { 441 return (NULL); 442 } 443 } 444 445 do { 446 if (tptr->lun == lun && tptr->bus == bus) { 447 tptr->hold++; 448 return (tptr); 449 } 450 } while ((tptr = tptr->next) != NULL); 451 return (tptr); 452 } 453 454 static __inline void 455 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr) 456 { 457 if (tptr->hold) 458 tptr->hold--; 459 } 460 461 static __inline int 462 isp_psema_sig_rqe(struct ispsoftc *isp, int bus) 463 { 464 while (isp->isp_osinfo.tmflags[bus] & TM_BUSY) { 465 isp->isp_osinfo.tmflags[bus] |= TM_WANTED; 466 if (cv_wait_sig(&isp->isp_osinfo.tgtcv0[bus], &isp->isp_lock)) { 467 return (-1); 468 } 469 isp->isp_osinfo.tmflags[bus] |= TM_BUSY; 470 } 471 return (0); 472 } 473 474 static __inline int 475 isp_cv_wait_timed_rqe(struct ispsoftc *isp, int bus, int timo) 476 { 477 if (cv_timedwait(&isp->isp_osinfo.tgtcv1[bus], &isp->isp_lock, timo)) { 478 return (-1); 479 } 480 return (0); 481 } 482 483 static __inline void 484 isp_cv_signal_rqe(struct ispsoftc *isp, int bus, int status) 485 { 486 isp->isp_osinfo.rstatus[bus] = status; 487 cv_signal(&isp->isp_osinfo.tgtcv1[bus]); 488 } 489 490 static __inline void 491 isp_vsema_rqe(struct ispsoftc *isp, int bus) 492 { 493 if (isp->isp_osinfo.tmflags[bus] & TM_WANTED) { 494 isp->isp_osinfo.tmflags[bus] &= ~TM_WANTED; 495 cv_signal(&isp->isp_osinfo.tgtcv0[bus]); 496 } 497 isp->isp_osinfo.tmflags[bus] &= ~TM_BUSY; 498 } 499 500 static __inline atio_private_data_t * 501 isp_get_atpd(struct ispsoftc *isp, int tag) 502 { 503 atio_private_data_t *atp; 504 for (atp = isp->isp_osinfo.atpdp; 505 atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) { 506 if (atp->tag == tag) 507 return (atp); 508 } 509 return (NULL); 510 } 511 512 static cam_status 513 create_lun_state(struct ispsoftc *isp, int bus, 514 struct cam_path *path, tstate_t **rslt) 515 { 516 cam_status status; 517 lun_id_t lun; 518 int hfx; 519 tstate_t *tptr, *new; 520 521 lun = xpt_path_lun_id(path); 522 if (lun < 0) { 523 return (CAM_LUN_INVALID); 524 } 525 if (is_lun_enabled(isp, bus, lun)) { 526 return (CAM_LUN_ALRDY_ENA); 527 } 528 new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO); 529 if (new == NULL) { 530 return (CAM_RESRC_UNAVAIL); 531 } 532 533 status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path), 534 xpt_path_target_id(path), xpt_path_lun_id(path)); 535 if (status != CAM_REQ_CMP) { 536 free(new, M_DEVBUF); 537 return (status); 538 } 539 new->bus = bus; 540 new->lun = lun; 541 SLIST_INIT(&new->atios); 542 SLIST_INIT(&new->inots); 543 new->hold = 1; 544 545 hfx = LUN_HASH_FUNC(isp, new->bus, new->lun); 546 tptr = isp->isp_osinfo.lun_hash[hfx]; 547 if (tptr == NULL) { 548 isp->isp_osinfo.lun_hash[hfx] = new; 549 } else { 550 while (tptr->next) 551 tptr = tptr->next; 552 tptr->next = new; 553 } 554 *rslt = new; 555 return (CAM_REQ_CMP); 556 } 557 558 static __inline void 559 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr) 560 { 561 int hfx; 562 tstate_t *lw, *pw; 563 564 hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun); 565 if (tptr->hold) { 566 return; 567 } 568 pw = isp->isp_osinfo.lun_hash[hfx]; 569 if (pw == NULL) { 570 return; 571 } else if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 572 isp->isp_osinfo.lun_hash[hfx] = pw->next; 573 } else { 574 lw = pw; 575 pw = lw->next; 576 while (pw) { 577 if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 578 lw->next = pw->next; 579 break; 580 } 581 lw = pw; 582 pw = pw->next; 583 } 584 if (pw == NULL) { 585 return; 586 } 587 } 588 free(tptr, M_DEVBUF); 589 } 590 591 /* 592 * we enter with our locks held. 593 */ 594 static void 595 isp_en_lun(struct ispsoftc *isp, union ccb *ccb) 596 { 597 const char lfmt[] = "Lun now %sabled for target mode on channel %d"; 598 struct ccb_en_lun *cel = &ccb->cel; 599 tstate_t *tptr; 600 u_int16_t rstat; 601 int bus, cmd, av, wildcard; 602 lun_id_t lun; 603 target_id_t tgt; 604 605 606 bus = XS_CHANNEL(ccb) & 0x1; 607 tgt = ccb->ccb_h.target_id; 608 lun = ccb->ccb_h.target_lun; 609 610 /* 611 * Do some sanity checking first. 612 */ 613 614 if ((lun != CAM_LUN_WILDCARD) && 615 (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) { 616 ccb->ccb_h.status = CAM_LUN_INVALID; 617 return; 618 } 619 620 if (IS_SCSI(isp)) { 621 sdparam *sdp = isp->isp_param; 622 sdp += bus; 623 if (tgt != CAM_TARGET_WILDCARD && 624 tgt != sdp->isp_initiator_id) { 625 ccb->ccb_h.status = CAM_TID_INVALID; 626 return; 627 } 628 } else { 629 if (tgt != CAM_TARGET_WILDCARD && 630 tgt != FCPARAM(isp)->isp_iid) { 631 ccb->ccb_h.status = CAM_TID_INVALID; 632 return; 633 } 634 /* 635 * This is as a good a place as any to check f/w capabilities. 636 */ 637 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_TMODE) == 0) { 638 isp_prt(isp, ISP_LOGERR, 639 "firmware does not support target mode"); 640 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 641 return; 642 } 643 /* 644 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to 645 * XXX: dorks with our already fragile enable/disable code. 646 */ 647 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) { 648 isp_prt(isp, ISP_LOGERR, 649 "firmware not SCCLUN capable"); 650 } 651 } 652 653 if (tgt == CAM_TARGET_WILDCARD) { 654 if (lun == CAM_LUN_WILDCARD) { 655 wildcard = 1; 656 } else { 657 ccb->ccb_h.status = CAM_LUN_INVALID; 658 return; 659 } 660 } else { 661 wildcard = 0; 662 } 663 664 /* 665 * Next check to see whether this is a target/lun wildcard action. 666 * 667 * If so, we know that we can accept commands for luns that haven't 668 * been enabled yet and send them upstream. Otherwise, we have to 669 * handle them locally (if we see them at all). 670 */ 671 672 if (wildcard) { 673 tptr = &isp->isp_osinfo.tsdflt[bus]; 674 if (cel->enable) { 675 if (isp->isp_osinfo.tmflags[bus] & 676 TM_WILDCARD_ENABLED) { 677 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 678 return; 679 } 680 ccb->ccb_h.status = 681 xpt_create_path(&tptr->owner, NULL, 682 xpt_path_path_id(ccb->ccb_h.path), 683 xpt_path_target_id(ccb->ccb_h.path), 684 xpt_path_lun_id(ccb->ccb_h.path)); 685 if (ccb->ccb_h.status != CAM_REQ_CMP) { 686 return; 687 } 688 SLIST_INIT(&tptr->atios); 689 SLIST_INIT(&tptr->inots); 690 isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED; 691 } else { 692 if ((isp->isp_osinfo.tmflags[bus] & 693 TM_WILDCARD_ENABLED) == 0) { 694 ccb->ccb_h.status = CAM_REQ_CMP; 695 return; 696 } 697 if (tptr->hold) { 698 ccb->ccb_h.status = CAM_SCSI_BUSY; 699 return; 700 } 701 xpt_free_path(tptr->owner); 702 isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED; 703 } 704 } 705 706 /* 707 * Now check to see whether this bus needs to be 708 * enabled/disabled with respect to target mode. 709 */ 710 av = bus << 31; 711 if (cel->enable && !(isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED)) { 712 av |= ENABLE_TARGET_FLAG; 713 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 714 if (av) { 715 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 716 if (wildcard) { 717 isp->isp_osinfo.tmflags[bus] &= 718 ~TM_WILDCARD_ENABLED; 719 xpt_free_path(tptr->owner); 720 } 721 return; 722 } 723 isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED; 724 isp_prt(isp, ISP_LOGINFO, 725 "Target Mode enabled on channel %d", bus); 726 } else if (cel->enable == 0 && 727 (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) && wildcard) { 728 if (are_any_luns_enabled(isp, bus)) { 729 ccb->ccb_h.status = CAM_SCSI_BUSY; 730 return; 731 } 732 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 733 if (av) { 734 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 735 return; 736 } 737 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; 738 isp_prt(isp, ISP_LOGINFO, 739 "Target Mode disabled on channel %d", bus); 740 } 741 742 if (wildcard) { 743 ccb->ccb_h.status = CAM_REQ_CMP; 744 return; 745 } 746 747 if (cel->enable) { 748 ccb->ccb_h.status = 749 create_lun_state(isp, bus, ccb->ccb_h.path, &tptr); 750 if (ccb->ccb_h.status != CAM_REQ_CMP) { 751 return; 752 } 753 } else { 754 tptr = get_lun_statep(isp, bus, lun); 755 if (tptr == NULL) { 756 ccb->ccb_h.status = CAM_LUN_INVALID; 757 return; 758 } 759 } 760 761 if (isp_psema_sig_rqe(isp, bus)) { 762 rls_lun_statep(isp, tptr); 763 if (cel->enable) 764 destroy_lun_state(isp, tptr); 765 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 766 return; 767 } 768 769 if (cel->enable) { 770 u_int32_t seq = isp->isp_osinfo.rollinfo++; 771 int c, n, ulun = lun; 772 773 cmd = RQSTYPE_ENABLE_LUN; 774 c = DFLT_CMND_CNT; 775 n = DFLT_INOT_CNT; 776 if (IS_FC(isp) && lun != 0) { 777 cmd = RQSTYPE_MODIFY_LUN; 778 n = 0; 779 /* 780 * For SCC firmware, we only deal with setting 781 * (enabling or modifying) lun 0. 782 */ 783 ulun = 0; 784 } 785 rstat = LUN_ERR; 786 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) { 787 xpt_print_path(ccb->ccb_h.path); 788 isp_prt(isp, ISP_LOGWARN, "isp_lun_cmd failed"); 789 goto out; 790 } 791 if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) { 792 xpt_print_path(ccb->ccb_h.path); 793 isp_prt(isp, ISP_LOGERR, 794 "wait for ENABLE/MODIFY LUN timed out"); 795 goto out; 796 } 797 rstat = isp->isp_osinfo.rstatus[bus]; 798 if (rstat != LUN_OK) { 799 xpt_print_path(ccb->ccb_h.path); 800 isp_prt(isp, ISP_LOGERR, 801 "ENABLE/MODIFY LUN returned 0x%x", rstat); 802 goto out; 803 } 804 } else { 805 int c, n, ulun = lun; 806 u_int32_t seq; 807 808 rstat = LUN_ERR; 809 seq = isp->isp_osinfo.rollinfo++; 810 cmd = -RQSTYPE_MODIFY_LUN; 811 812 c = DFLT_CMND_CNT; 813 n = DFLT_INOT_CNT; 814 if (IS_FC(isp) && lun != 0) { 815 n = 0; 816 /* 817 * For SCC firmware, we only deal with setting 818 * (enabling or modifying) lun 0. 819 */ 820 ulun = 0; 821 } 822 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) { 823 xpt_print_path(ccb->ccb_h.path); 824 isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed"); 825 goto out; 826 } 827 if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) { 828 xpt_print_path(ccb->ccb_h.path); 829 isp_prt(isp, ISP_LOGERR, 830 "wait for MODIFY LUN timed out"); 831 goto out; 832 } 833 rstat = isp->isp_osinfo.rstatus[bus]; 834 if (rstat != LUN_OK) { 835 xpt_print_path(ccb->ccb_h.path); 836 isp_prt(isp, ISP_LOGERR, 837 "MODIFY LUN returned 0x%x", rstat); 838 goto out; 839 } 840 if (IS_FC(isp) && lun) { 841 goto out; 842 } 843 844 seq = isp->isp_osinfo.rollinfo++; 845 846 rstat = LUN_ERR; 847 cmd = -RQSTYPE_ENABLE_LUN; 848 if (isp_lun_cmd(isp, cmd, bus, tgt, lun, 0, 0, seq)) { 849 xpt_print_path(ccb->ccb_h.path); 850 isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed"); 851 goto out; 852 } 853 if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) { 854 xpt_print_path(ccb->ccb_h.path); 855 isp_prt(isp, ISP_LOGERR, 856 "wait for DISABLE LUN timed out"); 857 goto out; 858 } 859 rstat = isp->isp_osinfo.rstatus[bus]; 860 if (rstat != LUN_OK) { 861 xpt_print_path(ccb->ccb_h.path); 862 isp_prt(isp, ISP_LOGWARN, 863 "DISABLE LUN returned 0x%x", rstat); 864 goto out; 865 } 866 if (are_any_luns_enabled(isp, bus) == 0) { 867 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 868 if (av) { 869 isp_prt(isp, ISP_LOGWARN, 870 "disable target mode on channel %d failed", 871 bus); 872 goto out; 873 } 874 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; 875 xpt_print_path(ccb->ccb_h.path); 876 isp_prt(isp, ISP_LOGINFO, 877 "Target Mode disabled on channel %d", bus); 878 } 879 } 880 881 out: 882 isp_vsema_rqe(isp, bus); 883 884 if (rstat != LUN_OK) { 885 xpt_print_path(ccb->ccb_h.path); 886 isp_prt(isp, ISP_LOGWARN, 887 "lun %sable failed", (cel->enable) ? "en" : "dis"); 888 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 889 rls_lun_statep(isp, tptr); 890 if (cel->enable) 891 destroy_lun_state(isp, tptr); 892 } else { 893 xpt_print_path(ccb->ccb_h.path); 894 isp_prt(isp, ISP_LOGINFO, lfmt, 895 (cel->enable) ? "en" : "dis", bus); 896 rls_lun_statep(isp, tptr); 897 if (cel->enable == 0) { 898 destroy_lun_state(isp, tptr); 899 } 900 ccb->ccb_h.status = CAM_REQ_CMP; 901 } 902 } 903 904 static cam_status 905 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb) 906 { 907 tstate_t *tptr; 908 struct ccb_hdr_slist *lp; 909 struct ccb_hdr *curelm; 910 int found; 911 union ccb *accb = ccb->cab.abort_ccb; 912 913 if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 914 if (IS_FC(isp) && (accb->ccb_h.target_id != 915 ((fcparam *) isp->isp_param)->isp_loopid)) { 916 return (CAM_PATH_INVALID); 917 } else if (IS_SCSI(isp) && (accb->ccb_h.target_id != 918 ((sdparam *) isp->isp_param)->isp_initiator_id)) { 919 return (CAM_PATH_INVALID); 920 } 921 } 922 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun); 923 if (tptr == NULL) { 924 return (CAM_PATH_INVALID); 925 } 926 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 927 lp = &tptr->atios; 928 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 929 lp = &tptr->inots; 930 } else { 931 rls_lun_statep(isp, tptr); 932 return (CAM_UA_ABORT); 933 } 934 curelm = SLIST_FIRST(lp); 935 found = 0; 936 if (curelm == &accb->ccb_h) { 937 found = 1; 938 SLIST_REMOVE_HEAD(lp, sim_links.sle); 939 } else { 940 while(curelm != NULL) { 941 struct ccb_hdr *nextelm; 942 943 nextelm = SLIST_NEXT(curelm, sim_links.sle); 944 if (nextelm == &accb->ccb_h) { 945 found = 1; 946 SLIST_NEXT(curelm, sim_links.sle) = 947 SLIST_NEXT(nextelm, sim_links.sle); 948 break; 949 } 950 curelm = nextelm; 951 } 952 } 953 rls_lun_statep(isp, tptr); 954 if (found) { 955 accb->ccb_h.status = CAM_REQ_ABORTED; 956 return (CAM_REQ_CMP); 957 } 958 return(CAM_PATH_INVALID); 959 } 960 961 static cam_status 962 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb) 963 { 964 void *qe; 965 struct ccb_scsiio *cso = &ccb->csio; 966 u_int16_t *hp, save_handle; 967 u_int16_t nxti, optr; 968 u_int8_t local[QENTRY_LEN]; 969 970 971 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 972 xpt_print_path(ccb->ccb_h.path); 973 printf("Request Queue Overflow in isp_target_start_ctio\n"); 974 return (CAM_RESRC_UNAVAIL); 975 } 976 bzero(local, QENTRY_LEN); 977 978 /* 979 * We're either moving data or completing a command here. 980 */ 981 982 if (IS_FC(isp)) { 983 atio_private_data_t *atp; 984 ct2_entry_t *cto = (ct2_entry_t *) local; 985 986 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; 987 cto->ct_header.rqs_entry_count = 1; 988 cto->ct_iid = cso->init_id; 989 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) { 990 cto->ct_lun = ccb->ccb_h.target_lun; 991 } 992 993 atp = isp_get_atpd(isp, cso->tag_id); 994 if (atp == NULL) { 995 panic("cannot find private data adjunct for tag %x", 996 cso->tag_id); 997 } 998 999 cto->ct_rxid = cso->tag_id; 1000 if (cso->dxfer_len == 0) { 1001 cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA; 1002 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1003 cto->ct_flags |= CT2_SENDSTATUS; 1004 cto->rsp.m1.ct_scsi_status = cso->scsi_status; 1005 cto->ct_resid = 1006 atp->orig_datalen - atp->bytes_xfered; 1007 } 1008 if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) { 1009 int m = min(cso->sense_len, MAXRESPLEN); 1010 bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m); 1011 cto->rsp.m1.ct_senselen = m; 1012 cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID; 1013 } 1014 } else { 1015 cto->ct_flags |= CT2_FLAG_MODE0; 1016 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1017 cto->ct_flags |= CT2_DATA_IN; 1018 } else { 1019 cto->ct_flags |= CT2_DATA_OUT; 1020 } 1021 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 1022 cto->ct_flags |= CT2_SENDSTATUS; 1023 cto->rsp.m0.ct_scsi_status = cso->scsi_status; 1024 cto->ct_resid = 1025 atp->orig_datalen - 1026 (atp->bytes_xfered + cso->dxfer_len); 1027 } else { 1028 atp->last_xframt = cso->dxfer_len; 1029 } 1030 /* 1031 * If we're sending data and status back together, 1032 * we can't also send back sense data as well. 1033 */ 1034 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1035 } 1036 1037 if (cto->ct_flags & CT2_SENDSTATUS) { 1038 isp_prt(isp, ISP_LOGTDEBUG0, 1039 "CTIO2[%x] STATUS %x origd %u curd %u resid %u", 1040 cto->ct_rxid, cso->scsi_status, atp->orig_datalen, 1041 cso->dxfer_len, cto->ct_resid); 1042 cto->ct_flags |= CT2_CCINCR; 1043 } 1044 cto->ct_timeout = 10; 1045 hp = &cto->ct_syshandle; 1046 } else { 1047 ct_entry_t *cto = (ct_entry_t *) local; 1048 1049 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1050 cto->ct_header.rqs_entry_count = 1; 1051 cto->ct_iid = cso->init_id; 1052 cto->ct_iid |= XS_CHANNEL(ccb) << 7; 1053 cto->ct_tgt = ccb->ccb_h.target_id; 1054 cto->ct_lun = ccb->ccb_h.target_lun; 1055 cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id); 1056 if (AT_HAS_TAG(cso->tag_id)) { 1057 cto->ct_tag_val = (u_int8_t) AT_GET_TAG(cso->tag_id); 1058 cto->ct_flags |= CT_TQAE; 1059 } 1060 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 1061 cto->ct_flags |= CT_NODISC; 1062 } 1063 if (cso->dxfer_len == 0) { 1064 cto->ct_flags |= CT_NO_DATA; 1065 } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1066 cto->ct_flags |= CT_DATA_IN; 1067 } else { 1068 cto->ct_flags |= CT_DATA_OUT; 1069 } 1070 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1071 cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR; 1072 cto->ct_scsi_status = cso->scsi_status; 1073 cto->ct_resid = cso->resid; 1074 isp_prt(isp, ISP_LOGTDEBUG0, 1075 "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x", 1076 cto->ct_fwhandle, cso->scsi_status, cso->resid, 1077 cso->tag_id); 1078 } 1079 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1080 cto->ct_timeout = 10; 1081 hp = &cto->ct_syshandle; 1082 } 1083 1084 if (isp_save_xs(isp, (XS_T *)ccb, hp)) { 1085 xpt_print_path(ccb->ccb_h.path); 1086 printf("No XFLIST pointers for isp_target_start_ctio\n"); 1087 return (CAM_RESRC_UNAVAIL); 1088 } 1089 1090 1091 /* 1092 * Call the dma setup routines for this entry (and any subsequent 1093 * CTIOs) if there's data to move, and then tell the f/w it's got 1094 * new things to play with. As with isp_start's usage of DMA setup, 1095 * any swizzling is done in the machine dependent layer. Because 1096 * of this, we put the request onto the queue area first in native 1097 * format. 1098 */ 1099 1100 save_handle = *hp; 1101 1102 switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) { 1103 case CMD_QUEUED: 1104 ISP_ADD_REQUEST(isp, nxti); 1105 return (CAM_REQ_INPROG); 1106 1107 case CMD_EAGAIN: 1108 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1109 isp_destroy_handle(isp, save_handle); 1110 return (CAM_RESRC_UNAVAIL); 1111 1112 default: 1113 isp_destroy_handle(isp, save_handle); 1114 return (XS_ERR(ccb)); 1115 } 1116 } 1117 1118 static void 1119 isp_refire_putback_atio(void *arg) 1120 { 1121 int s = splcam(); 1122 isp_target_putback_atio(arg); 1123 splx(s); 1124 } 1125 1126 static void 1127 isp_target_putback_atio(union ccb *ccb) 1128 { 1129 struct ispsoftc *isp; 1130 struct ccb_scsiio *cso; 1131 u_int16_t nxti, optr; 1132 void *qe; 1133 1134 isp = XS_ISP(ccb); 1135 1136 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 1137 (void) timeout(isp_refire_putback_atio, ccb, 10); 1138 isp_prt(isp, ISP_LOGWARN, 1139 "isp_target_putback_atio: Request Queue Overflow"); 1140 return; 1141 } 1142 bzero(qe, QENTRY_LEN); 1143 cso = &ccb->csio; 1144 if (IS_FC(isp)) { 1145 at2_entry_t local, *at = &local; 1146 MEMZERO(at, sizeof (at2_entry_t)); 1147 at->at_header.rqs_entry_type = RQSTYPE_ATIO2; 1148 at->at_header.rqs_entry_count = 1; 1149 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) { 1150 at->at_scclun = (uint16_t) ccb->ccb_h.target_lun; 1151 } else { 1152 at->at_lun = (uint8_t) ccb->ccb_h.target_lun; 1153 } 1154 at->at_status = CT_OK; 1155 at->at_rxid = cso->tag_id; 1156 isp_put_atio2(isp, at, qe); 1157 } else { 1158 at_entry_t local, *at = &local; 1159 MEMZERO(at, sizeof (at_entry_t)); 1160 at->at_header.rqs_entry_type = RQSTYPE_ATIO; 1161 at->at_header.rqs_entry_count = 1; 1162 at->at_iid = cso->init_id; 1163 at->at_iid |= XS_CHANNEL(ccb) << 7; 1164 at->at_tgt = cso->ccb_h.target_id; 1165 at->at_lun = cso->ccb_h.target_lun; 1166 at->at_status = CT_OK; 1167 at->at_tag_val = AT_GET_TAG(cso->tag_id); 1168 at->at_handle = AT_GET_HANDLE(cso->tag_id); 1169 isp_put_atio(isp, at, qe); 1170 } 1171 ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe); 1172 ISP_ADD_REQUEST(isp, nxti); 1173 isp_complete_ctio(ccb); 1174 } 1175 1176 static void 1177 isp_complete_ctio(union ccb *ccb) 1178 { 1179 struct ispsoftc *isp = XS_ISP(ccb); 1180 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1181 ccb->ccb_h.status |= CAM_REQ_CMP; 1182 } 1183 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1184 if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) { 1185 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE; 1186 if (isp->isp_osinfo.simqfrozen == 0) { 1187 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 1188 isp_prt(isp, ISP_LOGDEBUG2, "ctio->relsimq"); 1189 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 1190 } else { 1191 isp_prt(isp, ISP_LOGWARN, "ctio->devqfrozen"); 1192 } 1193 } else { 1194 isp_prt(isp, ISP_LOGWARN, 1195 "ctio->simqfrozen(%x)", isp->isp_osinfo.simqfrozen); 1196 } 1197 } 1198 xpt_done(ccb); 1199 } 1200 1201 /* 1202 * Handle ATIO stuff that the generic code can't. 1203 * This means handling CDBs. 1204 */ 1205 1206 static int 1207 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep) 1208 { 1209 tstate_t *tptr; 1210 int status, bus, iswildcard; 1211 struct ccb_accept_tio *atiop; 1212 1213 /* 1214 * The firmware status (except for the QLTM_SVALID bit) 1215 * indicates why this ATIO was sent to us. 1216 * 1217 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1218 * 1219 * If the DISCONNECTS DISABLED bit is set in the flags field, 1220 * we're still connected on the SCSI bus. 1221 */ 1222 status = aep->at_status; 1223 if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) { 1224 /* 1225 * Bus Phase Sequence error. We should have sense data 1226 * suggested by the f/w. I'm not sure quite yet what 1227 * to do about this for CAM. 1228 */ 1229 isp_prt(isp, ISP_LOGWARN, "PHASE ERROR"); 1230 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1231 return (0); 1232 } 1233 if ((status & ~QLTM_SVALID) != AT_CDB) { 1234 isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform", 1235 status); 1236 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1237 return (0); 1238 } 1239 1240 bus = GET_BUS_VAL(aep->at_iid); 1241 tptr = get_lun_statep(isp, bus, aep->at_lun); 1242 if (tptr == NULL) { 1243 tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD); 1244 iswildcard = 1; 1245 } else { 1246 iswildcard = 0; 1247 } 1248 1249 if (tptr == NULL) { 1250 /* 1251 * Because we can't autofeed sense data back with 1252 * a command for parallel SCSI, we can't give back 1253 * a CHECK CONDITION. We'll give back a BUSY status 1254 * instead. This works out okay because the only 1255 * time we should, in fact, get this, is in the 1256 * case that somebody configured us without the 1257 * blackhole driver, so they get what they deserve. 1258 */ 1259 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1260 return (0); 1261 } 1262 1263 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1264 if (atiop == NULL) { 1265 /* 1266 * Because we can't autofeed sense data back with 1267 * a command for parallel SCSI, we can't give back 1268 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1269 * instead. This works out okay because the only time we 1270 * should, in fact, get this, is in the case that we've 1271 * run out of ATIOS. 1272 */ 1273 xpt_print_path(tptr->owner); 1274 isp_prt(isp, ISP_LOGWARN, 1275 "no ATIOS for lun %d from initiator %d on channel %d", 1276 aep->at_lun, GET_IID_VAL(aep->at_iid), bus); 1277 if (aep->at_flags & AT_TQAE) 1278 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1279 else 1280 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1281 rls_lun_statep(isp, tptr); 1282 return (0); 1283 } 1284 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1285 if (iswildcard) { 1286 atiop->ccb_h.target_id = aep->at_tgt; 1287 atiop->ccb_h.target_lun = aep->at_lun; 1288 } 1289 if (aep->at_flags & AT_NODISC) { 1290 atiop->ccb_h.flags = CAM_DIS_DISCONNECT; 1291 } else { 1292 atiop->ccb_h.flags = 0; 1293 } 1294 1295 if (status & QLTM_SVALID) { 1296 size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data)); 1297 atiop->sense_len = amt; 1298 MEMCPY(&atiop->sense_data, aep->at_sense, amt); 1299 } else { 1300 atiop->sense_len = 0; 1301 } 1302 1303 atiop->init_id = GET_IID_VAL(aep->at_iid); 1304 atiop->cdb_len = aep->at_cdblen; 1305 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen); 1306 atiop->ccb_h.status = CAM_CDB_RECVD; 1307 /* 1308 * Construct a tag 'id' based upon tag value (which may be 0..255) 1309 * and the handle (which we have to preserve). 1310 */ 1311 AT_MAKE_TAGID(atiop->tag_id, aep); 1312 if (aep->at_flags & AT_TQAE) { 1313 atiop->tag_action = aep->at_tag_type; 1314 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; 1315 } 1316 xpt_done((union ccb*)atiop); 1317 isp_prt(isp, ISP_LOGTDEBUG0, 1318 "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s", 1319 aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid), 1320 GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff, 1321 aep->at_tag_type, (aep->at_flags & AT_NODISC)? 1322 "nondisc" : "disconnecting"); 1323 rls_lun_statep(isp, tptr); 1324 return (0); 1325 } 1326 1327 static int 1328 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep) 1329 { 1330 lun_id_t lun; 1331 tstate_t *tptr; 1332 struct ccb_accept_tio *atiop; 1333 atio_private_data_t *atp; 1334 1335 /* 1336 * The firmware status (except for the QLTM_SVALID bit) 1337 * indicates why this ATIO was sent to us. 1338 * 1339 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1340 */ 1341 if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) { 1342 isp_prt(isp, ISP_LOGWARN, 1343 "bogus atio (0x%x) leaked to platform", aep->at_status); 1344 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1345 return (0); 1346 } 1347 1348 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) { 1349 lun = aep->at_scclun; 1350 } else { 1351 lun = aep->at_lun; 1352 } 1353 tptr = get_lun_statep(isp, 0, lun); 1354 if (tptr == NULL) { 1355 tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD); 1356 } 1357 1358 if (tptr == NULL) { 1359 /* 1360 * What we'd like to know is whether or not we have a listener 1361 * upstream that really hasn't configured yet. If we do, then 1362 * we can give a more sensible reply here. If not, then we can 1363 * reject this out of hand. 1364 * 1365 * Choices for what to send were 1366 * 1367 * Not Ready, Unit Not Self-Configured Yet 1368 * (0x2,0x3e,0x00) 1369 * 1370 * for the former and 1371 * 1372 * Illegal Request, Logical Unit Not Supported 1373 * (0x5,0x25,0x00) 1374 * 1375 * for the latter. 1376 * 1377 * We used to decide whether there was at least one listener 1378 * based upon whether the black hole driver was configured. 1379 * However, recent config(8) changes have made this hard to do 1380 * at this time. 1381 * 1382 */ 1383 u_int32_t ccode = SCSI_STATUS_BUSY; 1384 1385 /* 1386 * Because we can't autofeed sense data back with 1387 * a command for parallel SCSI, we can't give back 1388 * a CHECK CONDITION. We'll give back a BUSY status 1389 * instead. This works out okay because the only 1390 * time we should, in fact, get this, is in the 1391 * case that somebody configured us without the 1392 * blackhole driver, so they get what they deserve. 1393 */ 1394 isp_endcmd(isp, aep, ccode, 0); 1395 return (0); 1396 } 1397 1398 atp = isp_get_atpd(isp, 0); 1399 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1400 if (atiop == NULL || atp == NULL) { 1401 /* 1402 * Because we can't autofeed sense data back with 1403 * a command for parallel SCSI, we can't give back 1404 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1405 * instead. This works out okay because the only time we 1406 * should, in fact, get this, is in the case that we've 1407 * run out of ATIOS. 1408 */ 1409 xpt_print_path(tptr->owner); 1410 isp_prt(isp, ISP_LOGWARN, 1411 "no ATIOS for lun %d from initiator %d", lun, aep->at_iid); 1412 rls_lun_statep(isp, tptr); 1413 if (aep->at_flags & AT_TQAE) 1414 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1415 else 1416 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1417 return (0); 1418 } 1419 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1420 1421 if (tptr == &isp->isp_osinfo.tsdflt[0]) { 1422 atiop->ccb_h.target_id = 1423 ((fcparam *)isp->isp_param)->isp_loopid; 1424 atiop->ccb_h.target_lun = lun; 1425 } 1426 /* 1427 * We don't get 'suggested' sense data as we do with SCSI cards. 1428 */ 1429 atiop->sense_len = 0; 1430 1431 atiop->init_id = aep->at_iid; 1432 atiop->cdb_len = ATIO2_CDBLEN; 1433 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN); 1434 atiop->ccb_h.status = CAM_CDB_RECVD; 1435 atiop->tag_id = aep->at_rxid; 1436 switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) { 1437 case ATIO2_TC_ATTR_SIMPLEQ: 1438 atiop->tag_action = MSG_SIMPLE_Q_TAG; 1439 break; 1440 case ATIO2_TC_ATTR_HEADOFQ: 1441 atiop->tag_action = MSG_HEAD_OF_Q_TAG; 1442 break; 1443 case ATIO2_TC_ATTR_ORDERED: 1444 atiop->tag_action = MSG_ORDERED_Q_TAG; 1445 break; 1446 case ATIO2_TC_ATTR_ACAQ: /* ?? */ 1447 case ATIO2_TC_ATTR_UNTAGGED: 1448 default: 1449 atiop->tag_action = 0; 1450 break; 1451 } 1452 if (atiop->tag_action != 0) { 1453 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; 1454 } 1455 1456 atp->tag = atiop->tag_id; 1457 atp->orig_datalen = aep->at_datalen; 1458 atp->last_xframt = 0; 1459 atp->bytes_xfered = 0; 1460 1461 xpt_done((union ccb*)atiop); 1462 isp_prt(isp, ISP_LOGTDEBUG0, 1463 "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u", 1464 aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid, 1465 lun, aep->at_taskflags, aep->at_datalen); 1466 rls_lun_statep(isp, tptr); 1467 return (0); 1468 } 1469 1470 static int 1471 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg) 1472 { 1473 union ccb *ccb; 1474 int sentstatus, ok, notify_cam, resid = 0; 1475 u_int16_t tval; 1476 1477 /* 1478 * CTIO and CTIO2 are close enough.... 1479 */ 1480 1481 ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_syshandle); 1482 KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio")); 1483 isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_syshandle); 1484 1485 if (IS_FC(isp)) { 1486 ct2_entry_t *ct = arg; 1487 sentstatus = ct->ct_flags & CT2_SENDSTATUS; 1488 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1489 if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) { 1490 ccb->ccb_h.status |= CAM_SENT_SENSE; 1491 } 1492 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1493 if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) { 1494 atio_private_data_t *atp = 1495 isp_get_atpd(isp, ct->ct_rxid); 1496 if (atp == NULL) { 1497 panic("cannot find adjunct after I/O"); 1498 } 1499 resid = ct->ct_resid; 1500 atp->bytes_xfered += (atp->last_xframt - resid); 1501 atp->last_xframt = 0; 1502 if (sentstatus) { 1503 atp->tag = 0; 1504 } 1505 } 1506 isp_prt(isp, ISP_LOGTDEBUG0, 1507 "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s", 1508 ct->ct_rxid, ct->ct_status, ct->ct_flags, 1509 (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, 1510 resid, sentstatus? "FIN" : "MID"); 1511 tval = ct->ct_rxid; 1512 } else { 1513 ct_entry_t *ct = arg; 1514 sentstatus = ct->ct_flags & CT_SENDSTATUS; 1515 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1516 /* 1517 * We *ought* to be able to get back to the original ATIO 1518 * here, but for some reason this gets lost. It's just as 1519 * well because it's squirrelled away as part of periph 1520 * private data. 1521 * 1522 * We can live without it as long as we continue to use 1523 * the auto-replenish feature for CTIOs. 1524 */ 1525 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1526 if (ct->ct_status & QLTM_SVALID) { 1527 char *sp = (char *)ct; 1528 sp += CTIO_SENSE_OFFSET; 1529 ccb->csio.sense_len = 1530 min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN); 1531 MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len); 1532 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1533 } 1534 if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) { 1535 resid = ct->ct_resid; 1536 } 1537 isp_prt(isp, ISP_LOGTDEBUG0, 1538 "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s", 1539 ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun, 1540 ct->ct_status, ct->ct_flags, resid, 1541 sentstatus? "FIN" : "MID"); 1542 tval = ct->ct_fwhandle; 1543 } 1544 ccb->csio.resid += resid; 1545 1546 /* 1547 * We're here either because intermediate data transfers are done 1548 * and/or the final status CTIO (which may have joined with a 1549 * Data Transfer) is done. 1550 * 1551 * In any case, for this platform, the upper layers figure out 1552 * what to do next, so all we do here is collect status and 1553 * pass information along. Any DMA handles have already been 1554 * freed. 1555 */ 1556 if (notify_cam == 0) { 1557 isp_prt(isp, ISP_LOGTDEBUG0, " INTER CTIO[0x%x] done", tval); 1558 return (0); 1559 } 1560 1561 isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done", 1562 (sentstatus)? " FINAL " : "MIDTERM ", tval); 1563 1564 if (!ok) { 1565 isp_target_putback_atio(ccb); 1566 } else { 1567 isp_complete_ctio(ccb); 1568 1569 } 1570 return (0); 1571 } 1572 #endif 1573 1574 static void 1575 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg) 1576 { 1577 struct cam_sim *sim; 1578 struct ispsoftc *isp; 1579 1580 sim = (struct cam_sim *)cbarg; 1581 isp = (struct ispsoftc *) cam_sim_softc(sim); 1582 switch (code) { 1583 case AC_LOST_DEVICE: 1584 if (IS_SCSI(isp)) { 1585 u_int16_t oflags, nflags; 1586 sdparam *sdp = isp->isp_param; 1587 int tgt; 1588 1589 tgt = xpt_path_target_id(path); 1590 ISP_LOCK(isp); 1591 sdp += cam_sim_bus(sim); 1592 nflags = sdp->isp_devparam[tgt].nvrm_flags; 1593 #ifndef ISP_TARGET_MODE 1594 nflags &= DPARM_SAFE_DFLT; 1595 if (isp->isp_loaded_fw) { 1596 nflags |= DPARM_NARROW | DPARM_ASYNC; 1597 } 1598 #else 1599 nflags = DPARM_DEFAULT; 1600 #endif 1601 oflags = sdp->isp_devparam[tgt].goal_flags; 1602 sdp->isp_devparam[tgt].goal_flags = nflags; 1603 sdp->isp_devparam[tgt].dev_update = 1; 1604 isp->isp_update |= (1 << cam_sim_bus(sim)); 1605 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, NULL); 1606 sdp->isp_devparam[tgt].goal_flags = oflags; 1607 ISP_UNLOCK(isp); 1608 } 1609 break; 1610 default: 1611 isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code); 1612 break; 1613 } 1614 } 1615 1616 static void 1617 isp_poll(struct cam_sim *sim) 1618 { 1619 struct ispsoftc *isp = cam_sim_softc(sim); 1620 u_int16_t isr, sema, mbox; 1621 1622 ISP_LOCK(isp); 1623 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 1624 isp_intr(isp, isr, sema, mbox); 1625 } 1626 ISP_UNLOCK(isp); 1627 } 1628 1629 #if 0 1630 static void 1631 isp_relsim(void *arg) 1632 { 1633 struct ispsoftc *isp = arg; 1634 ISP_LOCK(isp); 1635 if (isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED) { 1636 int wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED; 1637 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_TIMED; 1638 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { 1639 xpt_release_simq(isp->isp_sim, 1); 1640 isp_prt(isp, ISP_LOGDEBUG2, "timed relsimq"); 1641 } 1642 } 1643 ISP_UNLOCK(isp); 1644 } 1645 #endif 1646 1647 static void 1648 isp_watchdog(void *arg) 1649 { 1650 XS_T *xs = arg; 1651 struct ispsoftc *isp = XS_ISP(xs); 1652 u_int32_t handle; 1653 1654 /* 1655 * We've decided this command is dead. Make sure we're not trying 1656 * to kill a command that's already dead by getting it's handle and 1657 * and seeing whether it's still alive. 1658 */ 1659 ISP_LOCK(isp); 1660 handle = isp_find_handle(isp, xs); 1661 if (handle) { 1662 u_int16_t isr, sema, mbox; 1663 1664 if (XS_CMD_DONE_P(xs)) { 1665 isp_prt(isp, ISP_LOGDEBUG1, 1666 "watchdog found done cmd (handle 0x%x)", handle); 1667 ISP_UNLOCK(isp); 1668 return; 1669 } 1670 1671 if (XS_CMD_WDOG_P(xs)) { 1672 isp_prt(isp, ISP_LOGDEBUG2, 1673 "recursive watchdog (handle 0x%x)", handle); 1674 ISP_UNLOCK(isp); 1675 return; 1676 } 1677 1678 XS_CMD_S_WDOG(xs); 1679 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 1680 isp_intr(isp, isr, sema, mbox); 1681 } 1682 if (XS_CMD_DONE_P(xs)) { 1683 isp_prt(isp, ISP_LOGDEBUG2, 1684 "watchdog cleanup for handle 0x%x", handle); 1685 xpt_done((union ccb *) xs); 1686 } else if (XS_CMD_GRACE_P(xs)) { 1687 /* 1688 * Make sure the command is *really* dead before we 1689 * release the handle (and DMA resources) for reuse. 1690 */ 1691 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg); 1692 1693 /* 1694 * After this point, the comamnd is really dead. 1695 */ 1696 if (XS_XFRLEN(xs)) { 1697 ISP_DMAFREE(isp, xs, handle); 1698 } 1699 isp_destroy_handle(isp, handle); 1700 xpt_print_path(xs->ccb_h.path); 1701 isp_prt(isp, ISP_LOGWARN, 1702 "watchdog timeout for handle 0x%x", handle); 1703 XS_SETERR(xs, CAM_CMD_TIMEOUT); 1704 XS_CMD_C_WDOG(xs); 1705 isp_done(xs); 1706 } else { 1707 u_int16_t nxti, optr; 1708 ispreq_t local, *mp= &local, *qe; 1709 1710 XS_CMD_C_WDOG(xs); 1711 xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz); 1712 if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) { 1713 ISP_UNLOCK(isp); 1714 return; 1715 } 1716 XS_CMD_S_GRACE(xs); 1717 MEMZERO((void *) mp, sizeof (*mp)); 1718 mp->req_header.rqs_entry_count = 1; 1719 mp->req_header.rqs_entry_type = RQSTYPE_MARKER; 1720 mp->req_modifier = SYNC_ALL; 1721 mp->req_target = XS_CHANNEL(xs) << 7; 1722 isp_put_request(isp, mp, qe); 1723 ISP_ADD_REQUEST(isp, nxti); 1724 } 1725 } else { 1726 isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command"); 1727 } 1728 ISP_UNLOCK(isp); 1729 } 1730 1731 static int isp_ktmature = 0; 1732 1733 static void 1734 isp_kthread(void *arg) 1735 { 1736 int wasfrozen; 1737 struct ispsoftc *isp = arg; 1738 1739 mtx_lock(&isp->isp_lock); 1740 for (;;) { 1741 isp_prt(isp, ISP_LOGDEBUG0, "kthread checking FC state"); 1742 while (isp_fc_runstate(isp, 2 * 1000000) != 0) { 1743 if (FCPARAM(isp)->isp_fwstate != FW_READY || 1744 FCPARAM(isp)->isp_loopstate < LOOP_PDB_RCVD) { 1745 if (FCPARAM(isp)->loop_seen_once == 0 || 1746 isp_ktmature == 0) { 1747 break; 1748 } 1749 } 1750 msleep(isp_kthread, &isp->isp_lock, 1751 PRIBIO, "isp_fcthrd", hz); 1752 } 1753 /* 1754 * Even if we didn't get good loop state we may be 1755 * unfreezing the SIMQ so that we can kill off 1756 * commands (if we've never seen loop before, e.g.) 1757 */ 1758 isp_ktmature = 1; 1759 wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN; 1760 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN; 1761 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { 1762 isp_prt(isp, ISP_LOGDEBUG0, "kthread up release simq"); 1763 ISPLOCK_2_CAMLOCK(isp); 1764 xpt_release_simq(isp->isp_sim, 1); 1765 CAMLOCK_2_ISPLOCK(isp); 1766 } 1767 cv_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock); 1768 } 1769 } 1770 1771 static void 1772 isp_action(struct cam_sim *sim, union ccb *ccb) 1773 { 1774 int bus, tgt, error; 1775 struct ispsoftc *isp; 1776 struct ccb_trans_settings *cts; 1777 1778 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n")); 1779 1780 isp = (struct ispsoftc *)cam_sim_softc(sim); 1781 ccb->ccb_h.sim_priv.entries[0].field = 0; 1782 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 1783 if (isp->isp_state != ISP_RUNSTATE && 1784 ccb->ccb_h.func_code == XPT_SCSI_IO) { 1785 CAMLOCK_2_ISPLOCK(isp); 1786 isp_init(isp); 1787 if (isp->isp_state != ISP_INITSTATE) { 1788 ISP_UNLOCK(isp); 1789 /* 1790 * Lie. Say it was a selection timeout. 1791 */ 1792 ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN; 1793 xpt_freeze_devq(ccb->ccb_h.path, 1); 1794 xpt_done(ccb); 1795 return; 1796 } 1797 isp->isp_state = ISP_RUNSTATE; 1798 ISPLOCK_2_CAMLOCK(isp); 1799 } 1800 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code); 1801 1802 1803 switch (ccb->ccb_h.func_code) { 1804 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 1805 /* 1806 * Do a couple of preliminary checks... 1807 */ 1808 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 1809 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 1810 ccb->ccb_h.status = CAM_REQ_INVALID; 1811 xpt_done(ccb); 1812 break; 1813 } 1814 } 1815 #ifdef DIAGNOSTIC 1816 if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) { 1817 ccb->ccb_h.status = CAM_PATH_INVALID; 1818 } else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) { 1819 ccb->ccb_h.status = CAM_PATH_INVALID; 1820 } 1821 if (ccb->ccb_h.status == CAM_PATH_INVALID) { 1822 isp_prt(isp, ISP_LOGERR, 1823 "invalid tgt/lun (%d.%d) in XPT_SCSI_IO", 1824 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 1825 xpt_done(ccb); 1826 break; 1827 } 1828 #endif 1829 ((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK; 1830 CAMLOCK_2_ISPLOCK(isp); 1831 error = isp_start((XS_T *) ccb); 1832 switch (error) { 1833 case CMD_QUEUED: 1834 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1835 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 1836 u_int64_t ticks = (u_int64_t) hz; 1837 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) 1838 ticks = 60 * 1000 * ticks; 1839 else 1840 ticks = ccb->ccb_h.timeout * hz; 1841 ticks = ((ticks + 999) / 1000) + hz + hz; 1842 if (ticks >= 0x80000000) { 1843 isp_prt(isp, ISP_LOGERR, 1844 "timeout overflow"); 1845 ticks = 0x80000000; 1846 } 1847 ccb->ccb_h.timeout_ch = timeout(isp_watchdog, 1848 (caddr_t)ccb, (int)ticks); 1849 } else { 1850 callout_handle_init(&ccb->ccb_h.timeout_ch); 1851 } 1852 ISPLOCK_2_CAMLOCK(isp); 1853 break; 1854 case CMD_RQLATER: 1855 /* 1856 * This can only happen for Fibre Channel 1857 */ 1858 KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only")); 1859 if (FCPARAM(isp)->loop_seen_once == 0 && isp_ktmature) { 1860 ISPLOCK_2_CAMLOCK(isp); 1861 XS_SETERR(ccb, CAM_SEL_TIMEOUT); 1862 xpt_done(ccb); 1863 break; 1864 } 1865 cv_signal(&isp->isp_osinfo.kthread_cv); 1866 if (isp->isp_osinfo.simqfrozen == 0) { 1867 isp_prt(isp, ISP_LOGDEBUG2, 1868 "RQLATER freeze simq"); 1869 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 1870 ISPLOCK_2_CAMLOCK(isp); 1871 xpt_freeze_simq(sim, 1); 1872 } else { 1873 ISPLOCK_2_CAMLOCK(isp); 1874 } 1875 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1876 xpt_done(ccb); 1877 break; 1878 case CMD_EAGAIN: 1879 if (isp->isp_osinfo.simqfrozen == 0) { 1880 xpt_freeze_simq(sim, 1); 1881 isp_prt(isp, ISP_LOGDEBUG2, 1882 "EAGAIN freeze simq"); 1883 } 1884 isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE; 1885 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1886 ISPLOCK_2_CAMLOCK(isp); 1887 xpt_done(ccb); 1888 break; 1889 case CMD_COMPLETE: 1890 isp_done((struct ccb_scsiio *) ccb); 1891 ISPLOCK_2_CAMLOCK(isp); 1892 break; 1893 default: 1894 isp_prt(isp, ISP_LOGERR, 1895 "What's this? 0x%x at %d in file %s", 1896 error, __LINE__, __FILE__); 1897 XS_SETERR(ccb, CAM_REQ_CMP_ERR); 1898 xpt_done(ccb); 1899 ISPLOCK_2_CAMLOCK(isp); 1900 } 1901 break; 1902 1903 #ifdef ISP_TARGET_MODE 1904 case XPT_EN_LUN: /* Enable LUN as a target */ 1905 { 1906 int iok; 1907 CAMLOCK_2_ISPLOCK(isp); 1908 iok = isp->isp_osinfo.intsok; 1909 isp->isp_osinfo.intsok = 0; 1910 isp_en_lun(isp, ccb); 1911 isp->isp_osinfo.intsok = iok; 1912 ISPLOCK_2_CAMLOCK(isp); 1913 xpt_done(ccb); 1914 break; 1915 } 1916 case XPT_NOTIFY_ACK: /* recycle notify ack */ 1917 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ 1918 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 1919 { 1920 tstate_t *tptr = 1921 get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun); 1922 if (tptr == NULL) { 1923 ccb->ccb_h.status = CAM_LUN_INVALID; 1924 xpt_done(ccb); 1925 break; 1926 } 1927 ccb->ccb_h.sim_priv.entries[0].field = 0; 1928 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 1929 CAMLOCK_2_ISPLOCK(isp); 1930 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 1931 SLIST_INSERT_HEAD(&tptr->atios, 1932 &ccb->ccb_h, sim_links.sle); 1933 } else { 1934 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, 1935 sim_links.sle); 1936 } 1937 rls_lun_statep(isp, tptr); 1938 ccb->ccb_h.status = CAM_REQ_INPROG; 1939 ISPLOCK_2_CAMLOCK(isp); 1940 break; 1941 } 1942 case XPT_CONT_TARGET_IO: 1943 { 1944 CAMLOCK_2_ISPLOCK(isp); 1945 ccb->ccb_h.status = isp_target_start_ctio(isp, ccb); 1946 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 1947 if (isp->isp_osinfo.simqfrozen == 0) { 1948 xpt_freeze_simq(sim, 1); 1949 xpt_print_path(ccb->ccb_h.path); 1950 isp_prt(isp, ISP_LOGINFO, 1951 "XPT_CONT_TARGET_IO freeze simq"); 1952 } 1953 isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE; 1954 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1955 ISPLOCK_2_CAMLOCK(isp); 1956 xpt_done(ccb); 1957 } else { 1958 ISPLOCK_2_CAMLOCK(isp); 1959 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1960 } 1961 break; 1962 } 1963 #endif 1964 case XPT_RESET_DEV: /* BDR the specified SCSI device */ 1965 1966 bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); 1967 tgt = ccb->ccb_h.target_id; 1968 tgt |= (bus << 16); 1969 1970 CAMLOCK_2_ISPLOCK(isp); 1971 error = isp_control(isp, ISPCTL_RESET_DEV, &tgt); 1972 ISPLOCK_2_CAMLOCK(isp); 1973 if (error) { 1974 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1975 } else { 1976 ccb->ccb_h.status = CAM_REQ_CMP; 1977 } 1978 xpt_done(ccb); 1979 break; 1980 case XPT_ABORT: /* Abort the specified CCB */ 1981 { 1982 union ccb *accb = ccb->cab.abort_ccb; 1983 CAMLOCK_2_ISPLOCK(isp); 1984 switch (accb->ccb_h.func_code) { 1985 #ifdef ISP_TARGET_MODE 1986 case XPT_ACCEPT_TARGET_IO: 1987 case XPT_IMMED_NOTIFY: 1988 ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb); 1989 break; 1990 case XPT_CONT_TARGET_IO: 1991 isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet"); 1992 ccb->ccb_h.status = CAM_UA_ABORT; 1993 break; 1994 #endif 1995 case XPT_SCSI_IO: 1996 error = isp_control(isp, ISPCTL_ABORT_CMD, ccb); 1997 if (error) { 1998 ccb->ccb_h.status = CAM_UA_ABORT; 1999 } else { 2000 ccb->ccb_h.status = CAM_REQ_CMP; 2001 } 2002 break; 2003 default: 2004 ccb->ccb_h.status = CAM_REQ_INVALID; 2005 break; 2006 } 2007 ISPLOCK_2_CAMLOCK(isp); 2008 xpt_done(ccb); 2009 break; 2010 } 2011 #ifdef CAM_NEW_TRAN_CODE 2012 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) 2013 #else 2014 #define IS_CURRENT_SETTINGS(c) (c->flags & CCB_TRANS_CURRENT_SETTINGS) 2015 #endif 2016 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 2017 cts = &ccb->cts; 2018 if (!IS_CURRENT_SETTINGS(cts)) { 2019 ccb->ccb_h.status = CAM_REQ_INVALID; 2020 xpt_done(ccb); 2021 break; 2022 } 2023 tgt = cts->ccb_h.target_id; 2024 CAMLOCK_2_ISPLOCK(isp); 2025 if (IS_SCSI(isp)) { 2026 #ifndef CAM_NEW_TRAN_CODE 2027 sdparam *sdp = isp->isp_param; 2028 u_int16_t *dptr; 2029 2030 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2031 2032 sdp += bus; 2033 /* 2034 * We always update (internally) from goal_flags 2035 * so any request to change settings just gets 2036 * vectored to that location. 2037 */ 2038 dptr = &sdp->isp_devparam[tgt].goal_flags; 2039 2040 /* 2041 * Note that these operations affect the 2042 * the goal flags (goal_flags)- not 2043 * the current state flags. Then we mark 2044 * things so that the next operation to 2045 * this HBA will cause the update to occur. 2046 */ 2047 if (cts->valid & CCB_TRANS_DISC_VALID) { 2048 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) { 2049 *dptr |= DPARM_DISC; 2050 } else { 2051 *dptr &= ~DPARM_DISC; 2052 } 2053 } 2054 if (cts->valid & CCB_TRANS_TQ_VALID) { 2055 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) { 2056 *dptr |= DPARM_TQING; 2057 } else { 2058 *dptr &= ~DPARM_TQING; 2059 } 2060 } 2061 if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) { 2062 switch (cts->bus_width) { 2063 case MSG_EXT_WDTR_BUS_16_BIT: 2064 *dptr |= DPARM_WIDE; 2065 break; 2066 default: 2067 *dptr &= ~DPARM_WIDE; 2068 } 2069 } 2070 /* 2071 * Any SYNC RATE of nonzero and SYNC_OFFSET 2072 * of nonzero will cause us to go to the 2073 * selected (from NVRAM) maximum value for 2074 * this device. At a later point, we'll 2075 * allow finer control. 2076 */ 2077 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && 2078 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) && 2079 (cts->sync_offset > 0)) { 2080 *dptr |= DPARM_SYNC; 2081 } else { 2082 *dptr &= ~DPARM_SYNC; 2083 } 2084 *dptr |= DPARM_SAFE_DFLT; 2085 #else 2086 struct ccb_trans_settings_scsi *scsi = 2087 &cts->proto_specific.scsi; 2088 struct ccb_trans_settings_spi *spi = 2089 &cts->xport_specific.spi; 2090 sdparam *sdp = isp->isp_param; 2091 u_int16_t *dptr; 2092 2093 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2094 sdp += bus; 2095 /* 2096 * We always update (internally) from goal_flags 2097 * so any request to change settings just gets 2098 * vectored to that location. 2099 */ 2100 dptr = &sdp->isp_devparam[tgt].goal_flags; 2101 2102 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 2103 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 2104 *dptr |= DPARM_DISC; 2105 else 2106 *dptr &= ~DPARM_DISC; 2107 } 2108 2109 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 2110 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 2111 *dptr |= DPARM_TQING; 2112 else 2113 *dptr &= ~DPARM_TQING; 2114 } 2115 2116 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 2117 if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) 2118 *dptr |= DPARM_WIDE; 2119 else 2120 *dptr &= ~DPARM_WIDE; 2121 } 2122 2123 /* 2124 * XXX: FIX ME 2125 */ 2126 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) && 2127 (spi->valid & CTS_SPI_VALID_SYNC_RATE) && 2128 (spi->sync_period && spi->sync_offset)) { 2129 *dptr |= DPARM_SYNC; 2130 /* 2131 * XXX: CHECK FOR LEGALITY 2132 */ 2133 sdp->isp_devparam[tgt].goal_period = 2134 spi->sync_period; 2135 sdp->isp_devparam[tgt].goal_offset = 2136 spi->sync_offset; 2137 } else { 2138 *dptr &= ~DPARM_SYNC; 2139 } 2140 #endif 2141 isp_prt(isp, ISP_LOGDEBUG0, 2142 "SET bus %d targ %d to flags %x off %x per %x", 2143 bus, tgt, sdp->isp_devparam[tgt].goal_flags, 2144 sdp->isp_devparam[tgt].goal_offset, 2145 sdp->isp_devparam[tgt].goal_period); 2146 sdp->isp_devparam[tgt].dev_update = 1; 2147 isp->isp_update |= (1 << bus); 2148 } 2149 ISPLOCK_2_CAMLOCK(isp); 2150 ccb->ccb_h.status = CAM_REQ_CMP; 2151 xpt_done(ccb); 2152 break; 2153 case XPT_GET_TRAN_SETTINGS: 2154 cts = &ccb->cts; 2155 tgt = cts->ccb_h.target_id; 2156 CAMLOCK_2_ISPLOCK(isp); 2157 if (IS_FC(isp)) { 2158 #ifndef CAM_NEW_TRAN_CODE 2159 /* 2160 * a lot of normal SCSI things don't make sense. 2161 */ 2162 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 2163 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2164 /* 2165 * How do you measure the width of a high 2166 * speed serial bus? Well, in bytes. 2167 * 2168 * Offset and period make no sense, though, so we set 2169 * (above) a 'base' transfer speed to be gigabit. 2170 */ 2171 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2172 #else 2173 fcparam *fcp = isp->isp_param; 2174 struct ccb_trans_settings_fc *fc = 2175 &cts->xport_specific.fc; 2176 2177 cts->protocol = PROTO_SCSI; 2178 cts->protocol_version = SCSI_REV_2; 2179 cts->transport = XPORT_FC; 2180 cts->transport_version = 0; 2181 2182 fc->valid = CTS_FC_VALID_SPEED; 2183 if (fcp->isp_gbspeed == 2) 2184 fc->bitrate = 200000; 2185 else 2186 fc->bitrate = 100000; 2187 if (tgt > 0 && tgt < MAX_FC_TARG) { 2188 struct lportdb *lp = &fcp->portdb[tgt]; 2189 fc->wwnn = lp->node_wwn; 2190 fc->wwpn = lp->port_wwn; 2191 fc->port = lp->portid; 2192 fc->valid |= CTS_FC_VALID_WWNN | 2193 CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT; 2194 } 2195 #endif 2196 } else { 2197 #ifdef CAM_NEW_TRAN_CODE 2198 struct ccb_trans_settings_scsi *scsi = 2199 &cts->proto_specific.scsi; 2200 struct ccb_trans_settings_spi *spi = 2201 &cts->xport_specific.spi; 2202 #endif 2203 sdparam *sdp = isp->isp_param; 2204 int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2205 u_int16_t dval, pval, oval; 2206 2207 sdp += bus; 2208 2209 if (IS_CURRENT_SETTINGS(cts)) { 2210 sdp->isp_devparam[tgt].dev_refresh = 1; 2211 isp->isp_update |= (1 << bus); 2212 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, 2213 NULL); 2214 dval = sdp->isp_devparam[tgt].actv_flags; 2215 oval = sdp->isp_devparam[tgt].actv_offset; 2216 pval = sdp->isp_devparam[tgt].actv_period; 2217 } else { 2218 dval = sdp->isp_devparam[tgt].nvrm_flags; 2219 oval = sdp->isp_devparam[tgt].nvrm_offset; 2220 pval = sdp->isp_devparam[tgt].nvrm_period; 2221 } 2222 2223 #ifndef CAM_NEW_TRAN_CODE 2224 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 2225 2226 if (dval & DPARM_DISC) { 2227 cts->flags |= CCB_TRANS_DISC_ENB; 2228 } 2229 if (dval & DPARM_TQING) { 2230 cts->flags |= CCB_TRANS_TAG_ENB; 2231 } 2232 if (dval & DPARM_WIDE) { 2233 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2234 } else { 2235 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2236 } 2237 cts->valid = CCB_TRANS_BUS_WIDTH_VALID | 2238 CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2239 2240 if ((dval & DPARM_SYNC) && oval != 0) { 2241 cts->sync_period = pval; 2242 cts->sync_offset = oval; 2243 cts->valid |= 2244 CCB_TRANS_SYNC_RATE_VALID | 2245 CCB_TRANS_SYNC_OFFSET_VALID; 2246 } 2247 #else 2248 cts->protocol = PROTO_SCSI; 2249 cts->protocol_version = SCSI_REV_2; 2250 cts->transport = XPORT_SPI; 2251 cts->transport_version = 2; 2252 2253 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 2254 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; 2255 if (dval & DPARM_DISC) { 2256 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 2257 } 2258 if (dval & DPARM_TQING) { 2259 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 2260 } 2261 if ((dval & DPARM_SYNC) && oval && pval) { 2262 spi->sync_offset = oval; 2263 spi->sync_period = pval; 2264 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 2265 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 2266 } 2267 spi->valid |= CTS_SPI_VALID_BUS_WIDTH; 2268 if (dval & DPARM_WIDE) { 2269 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2270 } else { 2271 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2272 } 2273 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 2274 scsi->valid = CTS_SCSI_VALID_TQ; 2275 spi->valid |= CTS_SPI_VALID_DISC; 2276 } else { 2277 scsi->valid = 0; 2278 } 2279 #endif 2280 isp_prt(isp, ISP_LOGDEBUG0, 2281 "GET %s bus %d targ %d to flags %x off %x per %x", 2282 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM", 2283 bus, tgt, dval, oval, pval); 2284 } 2285 ISPLOCK_2_CAMLOCK(isp); 2286 ccb->ccb_h.status = CAM_REQ_CMP; 2287 xpt_done(ccb); 2288 break; 2289 2290 case XPT_CALC_GEOMETRY: 2291 { 2292 struct ccb_calc_geometry *ccg; 2293 u_int32_t secs_per_cylinder; 2294 u_int32_t size_mb; 2295 2296 ccg = &ccb->ccg; 2297 if (ccg->block_size == 0) { 2298 isp_prt(isp, ISP_LOGERR, 2299 "%d.%d XPT_CALC_GEOMETRY block size 0?", 2300 ccg->ccb_h.target_id, ccg->ccb_h.target_lun); 2301 ccb->ccb_h.status = CAM_REQ_INVALID; 2302 xpt_done(ccb); 2303 break; 2304 } 2305 size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size); 2306 if (size_mb > 1024) { 2307 ccg->heads = 255; 2308 ccg->secs_per_track = 63; 2309 } else { 2310 ccg->heads = 64; 2311 ccg->secs_per_track = 32; 2312 } 2313 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 2314 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 2315 ccb->ccb_h.status = CAM_REQ_CMP; 2316 xpt_done(ccb); 2317 break; 2318 } 2319 case XPT_RESET_BUS: /* Reset the specified bus */ 2320 bus = cam_sim_bus(sim); 2321 CAMLOCK_2_ISPLOCK(isp); 2322 error = isp_control(isp, ISPCTL_RESET_BUS, &bus); 2323 ISPLOCK_2_CAMLOCK(isp); 2324 if (error) 2325 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2326 else { 2327 if (cam_sim_bus(sim) && isp->isp_path2 != NULL) 2328 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 2329 else if (isp->isp_path != NULL) 2330 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 2331 ccb->ccb_h.status = CAM_REQ_CMP; 2332 } 2333 xpt_done(ccb); 2334 break; 2335 2336 case XPT_TERM_IO: /* Terminate the I/O process */ 2337 ccb->ccb_h.status = CAM_REQ_INVALID; 2338 xpt_done(ccb); 2339 break; 2340 2341 case XPT_PATH_INQ: /* Path routing inquiry */ 2342 { 2343 struct ccb_pathinq *cpi = &ccb->cpi; 2344 2345 cpi->version_num = 1; 2346 #ifdef ISP_TARGET_MODE 2347 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 2348 #else 2349 cpi->target_sprt = 0; 2350 #endif 2351 cpi->hba_eng_cnt = 0; 2352 cpi->max_target = ISP_MAX_TARGETS(isp) - 1; 2353 cpi->max_lun = ISP_MAX_LUNS(isp) - 1; 2354 cpi->bus_id = cam_sim_bus(sim); 2355 if (IS_FC(isp)) { 2356 cpi->hba_misc = PIM_NOBUSRESET; 2357 /* 2358 * Because our loop ID can shift from time to time, 2359 * make our initiator ID out of range of our bus. 2360 */ 2361 cpi->initiator_id = cpi->max_target + 1; 2362 2363 /* 2364 * Set base transfer capabilities for Fibre Channel. 2365 * Technically not correct because we don't know 2366 * what media we're running on top of- but we'll 2367 * look good if we always say 100MB/s. 2368 */ 2369 if (FCPARAM(isp)->isp_gbspeed == 2) 2370 cpi->base_transfer_speed = 200000; 2371 else 2372 cpi->base_transfer_speed = 100000; 2373 cpi->hba_inquiry = PI_TAG_ABLE; 2374 #ifdef CAM_NEW_TRAN_CODE 2375 cpi->transport = XPORT_FC; 2376 cpi->transport_version = 0; /* WHAT'S THIS FOR? */ 2377 #endif 2378 } else { 2379 sdparam *sdp = isp->isp_param; 2380 sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path)); 2381 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 2382 cpi->hba_misc = 0; 2383 cpi->initiator_id = sdp->isp_initiator_id; 2384 cpi->base_transfer_speed = 3300; 2385 #ifdef CAM_NEW_TRAN_CODE 2386 cpi->transport = XPORT_SPI; 2387 cpi->transport_version = 2; /* WHAT'S THIS FOR? */ 2388 #endif 2389 } 2390 #ifdef CAM_NEW_TRAN_CODE 2391 cpi->protocol = PROTO_SCSI; 2392 cpi->protocol_version = SCSI_REV_2; 2393 #endif 2394 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 2395 strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN); 2396 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 2397 cpi->unit_number = cam_sim_unit(sim); 2398 cpi->ccb_h.status = CAM_REQ_CMP; 2399 xpt_done(ccb); 2400 break; 2401 } 2402 default: 2403 ccb->ccb_h.status = CAM_REQ_INVALID; 2404 xpt_done(ccb); 2405 break; 2406 } 2407 } 2408 2409 #define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB) 2410 void 2411 isp_done(struct ccb_scsiio *sccb) 2412 { 2413 struct ispsoftc *isp = XS_ISP(sccb); 2414 2415 if (XS_NOERR(sccb)) 2416 XS_SETERR(sccb, CAM_REQ_CMP); 2417 2418 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && 2419 (sccb->scsi_status != SCSI_STATUS_OK)) { 2420 sccb->ccb_h.status &= ~CAM_STATUS_MASK; 2421 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && 2422 (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) { 2423 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; 2424 } else { 2425 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 2426 } 2427 } 2428 2429 sccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2430 if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2431 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 2432 sccb->ccb_h.status |= CAM_DEV_QFRZN; 2433 xpt_freeze_devq(sccb->ccb_h.path, 1); 2434 if (sccb->scsi_status != SCSI_STATUS_OK) 2435 isp_prt(isp, ISP_LOGDEBUG2, 2436 "freeze devq %d.%d %x %x", 2437 sccb->ccb_h.target_id, 2438 sccb->ccb_h.target_lun, sccb->ccb_h.status, 2439 sccb->scsi_status); 2440 } 2441 } 2442 2443 /* 2444 * If we were frozen waiting resources, clear that we were frozen 2445 * waiting for resources. If we are no longer frozen, and the devq 2446 * isn't frozen, mark the completing CCB to have the XPT layer 2447 * release the simq. 2448 */ 2449 if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) { 2450 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE; 2451 if (isp->isp_osinfo.simqfrozen == 0) { 2452 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 2453 isp_prt(isp, ISP_LOGDEBUG2, 2454 "isp_done->relsimq"); 2455 sccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2456 } else { 2457 isp_prt(isp, ISP_LOGDEBUG2, 2458 "isp_done->devq frozen"); 2459 } 2460 } else { 2461 isp_prt(isp, ISP_LOGDEBUG2, 2462 "isp_done -> simqfrozen = %x", 2463 isp->isp_osinfo.simqfrozen); 2464 } 2465 } 2466 if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) && 2467 (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2468 xpt_print_path(sccb->ccb_h.path); 2469 isp_prt(isp, ISP_LOGINFO, 2470 "cam completion status 0x%x", sccb->ccb_h.status); 2471 } 2472 2473 XS_CMD_S_DONE(sccb); 2474 if (XS_CMD_WDOG_P(sccb) == 0) { 2475 untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch); 2476 if (XS_CMD_GRACE_P(sccb)) { 2477 isp_prt(isp, ISP_LOGDEBUG2, 2478 "finished command on borrowed time"); 2479 } 2480 XS_CMD_S_CLEAR(sccb); 2481 ISPLOCK_2_CAMLOCK(isp); 2482 xpt_done((union ccb *) sccb); 2483 CAMLOCK_2_ISPLOCK(isp); 2484 } 2485 } 2486 2487 int 2488 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg) 2489 { 2490 int bus, rv = 0; 2491 switch (cmd) { 2492 case ISPASYNC_NEW_TGT_PARAMS: 2493 { 2494 #ifdef CAM_NEW_TRAN_CODE 2495 struct ccb_trans_settings_scsi *scsi; 2496 struct ccb_trans_settings_spi *spi; 2497 #endif 2498 int flags, tgt; 2499 sdparam *sdp = isp->isp_param; 2500 struct ccb_trans_settings cts; 2501 struct cam_path *tmppath; 2502 2503 bzero(&cts, sizeof (struct ccb_trans_settings)); 2504 2505 tgt = *((int *)arg); 2506 bus = (tgt >> 16) & 0xffff; 2507 tgt &= 0xffff; 2508 sdp += bus; 2509 ISPLOCK_2_CAMLOCK(isp); 2510 if (xpt_create_path(&tmppath, NULL, 2511 cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim), 2512 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2513 CAMLOCK_2_ISPLOCK(isp); 2514 isp_prt(isp, ISP_LOGWARN, 2515 "isp_async cannot make temp path for %d.%d", 2516 tgt, bus); 2517 rv = -1; 2518 break; 2519 } 2520 CAMLOCK_2_ISPLOCK(isp); 2521 flags = sdp->isp_devparam[tgt].actv_flags; 2522 #ifdef CAM_NEW_TRAN_CODE 2523 cts.type = CTS_TYPE_CURRENT_SETTINGS; 2524 cts.protocol = PROTO_SCSI; 2525 cts.transport = XPORT_SPI; 2526 2527 scsi = &cts.proto_specific.scsi; 2528 spi = &cts.xport_specific.spi; 2529 2530 if (flags & DPARM_TQING) { 2531 scsi->valid |= CTS_SCSI_VALID_TQ; 2532 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 2533 spi->flags |= CTS_SPI_FLAGS_TAG_ENB; 2534 } 2535 2536 if (flags & DPARM_DISC) { 2537 spi->valid |= CTS_SPI_VALID_DISC; 2538 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 2539 } 2540 spi->flags |= CTS_SPI_VALID_BUS_WIDTH; 2541 if (flags & DPARM_WIDE) { 2542 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2543 } else { 2544 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2545 } 2546 if (flags & DPARM_SYNC) { 2547 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 2548 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 2549 spi->sync_period = sdp->isp_devparam[tgt].actv_period; 2550 spi->sync_offset = sdp->isp_devparam[tgt].actv_offset; 2551 } 2552 #else 2553 cts.flags = CCB_TRANS_CURRENT_SETTINGS; 2554 cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2555 if (flags & DPARM_DISC) { 2556 cts.flags |= CCB_TRANS_DISC_ENB; 2557 } 2558 if (flags & DPARM_TQING) { 2559 cts.flags |= CCB_TRANS_TAG_ENB; 2560 } 2561 cts.valid |= CCB_TRANS_BUS_WIDTH_VALID; 2562 cts.bus_width = (flags & DPARM_WIDE)? 2563 MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT; 2564 cts.sync_period = sdp->isp_devparam[tgt].actv_period; 2565 cts.sync_offset = sdp->isp_devparam[tgt].actv_offset; 2566 if (flags & DPARM_SYNC) { 2567 cts.valid |= 2568 CCB_TRANS_SYNC_RATE_VALID | 2569 CCB_TRANS_SYNC_OFFSET_VALID; 2570 } 2571 #endif 2572 isp_prt(isp, ISP_LOGDEBUG2, 2573 "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x", 2574 bus, tgt, sdp->isp_devparam[tgt].actv_period, 2575 sdp->isp_devparam[tgt].actv_offset, flags); 2576 xpt_setup_ccb(&cts.ccb_h, tmppath, 1); 2577 ISPLOCK_2_CAMLOCK(isp); 2578 xpt_async(AC_TRANSFER_NEG, tmppath, &cts); 2579 xpt_free_path(tmppath); 2580 CAMLOCK_2_ISPLOCK(isp); 2581 break; 2582 } 2583 case ISPASYNC_BUS_RESET: 2584 bus = *((int *)arg); 2585 isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected", 2586 bus); 2587 if (bus > 0 && isp->isp_path2) { 2588 ISPLOCK_2_CAMLOCK(isp); 2589 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 2590 CAMLOCK_2_ISPLOCK(isp); 2591 } else if (isp->isp_path) { 2592 ISPLOCK_2_CAMLOCK(isp); 2593 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 2594 CAMLOCK_2_ISPLOCK(isp); 2595 } 2596 break; 2597 case ISPASYNC_LIP: 2598 if (isp->isp_path) { 2599 if (isp->isp_osinfo.simqfrozen == 0) { 2600 isp_prt(isp, ISP_LOGDEBUG0, "LIP freeze simq"); 2601 ISPLOCK_2_CAMLOCK(isp); 2602 xpt_freeze_simq(isp->isp_sim, 1); 2603 CAMLOCK_2_ISPLOCK(isp); 2604 } 2605 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 2606 } 2607 isp_prt(isp, ISP_LOGINFO, "LIP Received"); 2608 break; 2609 case ISPASYNC_LOOP_RESET: 2610 if (isp->isp_path) { 2611 if (isp->isp_osinfo.simqfrozen == 0) { 2612 isp_prt(isp, ISP_LOGDEBUG0, 2613 "Loop Reset freeze simq"); 2614 ISPLOCK_2_CAMLOCK(isp); 2615 xpt_freeze_simq(isp->isp_sim, 1); 2616 CAMLOCK_2_ISPLOCK(isp); 2617 } 2618 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 2619 } 2620 isp_prt(isp, ISP_LOGINFO, "Loop Reset Received"); 2621 break; 2622 case ISPASYNC_LOOP_DOWN: 2623 if (isp->isp_path) { 2624 if (isp->isp_osinfo.simqfrozen == 0) { 2625 isp_prt(isp, ISP_LOGDEBUG0, 2626 "loop down freeze simq"); 2627 ISPLOCK_2_CAMLOCK(isp); 2628 xpt_freeze_simq(isp->isp_sim, 1); 2629 CAMLOCK_2_ISPLOCK(isp); 2630 } 2631 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 2632 } 2633 isp_prt(isp, ISP_LOGINFO, "Loop DOWN"); 2634 break; 2635 case ISPASYNC_LOOP_UP: 2636 /* 2637 * Now we just note that Loop has come up. We don't 2638 * actually do anything because we're waiting for a 2639 * Change Notify before activating the FC cleanup 2640 * thread to look at the state of the loop again. 2641 */ 2642 isp_prt(isp, ISP_LOGINFO, "Loop UP"); 2643 break; 2644 case ISPASYNC_PROMENADE: 2645 { 2646 struct cam_path *tmppath; 2647 const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x " 2648 "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x"; 2649 static const char *roles[4] = { 2650 "(none)", "Target", "Initiator", "Target/Initiator" 2651 }; 2652 fcparam *fcp = isp->isp_param; 2653 int tgt = *((int *) arg); 2654 int is_tgt_mask = (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT); 2655 struct lportdb *lp = &fcp->portdb[tgt]; 2656 2657 isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid, 2658 roles[lp->roles & 0x3], 2659 (lp->valid)? "Arrived" : "Departed", 2660 (u_int32_t) (lp->port_wwn >> 32), 2661 (u_int32_t) (lp->port_wwn & 0xffffffffLL), 2662 (u_int32_t) (lp->node_wwn >> 32), 2663 (u_int32_t) (lp->node_wwn & 0xffffffffLL)); 2664 2665 ISPLOCK_2_CAMLOCK(isp); 2666 if (xpt_create_path(&tmppath, NULL, cam_sim_path(isp->isp_sim), 2667 (target_id_t)tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2668 CAMLOCK_2_ISPLOCK(isp); 2669 break; 2670 } 2671 /* 2672 * Policy: only announce targets. 2673 */ 2674 if (lp->roles & is_tgt_mask) { 2675 if (lp->valid) { 2676 xpt_async(AC_FOUND_DEVICE, tmppath, NULL); 2677 } else { 2678 xpt_async(AC_LOST_DEVICE, tmppath, NULL); 2679 } 2680 } 2681 xpt_free_path(tmppath); 2682 CAMLOCK_2_ISPLOCK(isp); 2683 break; 2684 } 2685 case ISPASYNC_CHANGE_NOTIFY: 2686 if (arg == ISPASYNC_CHANGE_PDB) { 2687 isp_prt(isp, ISP_LOGINFO, 2688 "Port Database Changed"); 2689 } else if (arg == ISPASYNC_CHANGE_SNS) { 2690 isp_prt(isp, ISP_LOGINFO, 2691 "Name Server Database Changed"); 2692 } 2693 cv_signal(&isp->isp_osinfo.kthread_cv); 2694 break; 2695 case ISPASYNC_FABRIC_DEV: 2696 { 2697 int target, lrange; 2698 struct lportdb *lp = NULL; 2699 char *pt; 2700 sns_ganrsp_t *resp = (sns_ganrsp_t *) arg; 2701 u_int32_t portid; 2702 u_int64_t wwpn, wwnn; 2703 fcparam *fcp = isp->isp_param; 2704 2705 portid = 2706 (((u_int32_t) resp->snscb_port_id[0]) << 16) | 2707 (((u_int32_t) resp->snscb_port_id[1]) << 8) | 2708 (((u_int32_t) resp->snscb_port_id[2])); 2709 2710 wwpn = 2711 (((u_int64_t)resp->snscb_portname[0]) << 56) | 2712 (((u_int64_t)resp->snscb_portname[1]) << 48) | 2713 (((u_int64_t)resp->snscb_portname[2]) << 40) | 2714 (((u_int64_t)resp->snscb_portname[3]) << 32) | 2715 (((u_int64_t)resp->snscb_portname[4]) << 24) | 2716 (((u_int64_t)resp->snscb_portname[5]) << 16) | 2717 (((u_int64_t)resp->snscb_portname[6]) << 8) | 2718 (((u_int64_t)resp->snscb_portname[7])); 2719 2720 wwnn = 2721 (((u_int64_t)resp->snscb_nodename[0]) << 56) | 2722 (((u_int64_t)resp->snscb_nodename[1]) << 48) | 2723 (((u_int64_t)resp->snscb_nodename[2]) << 40) | 2724 (((u_int64_t)resp->snscb_nodename[3]) << 32) | 2725 (((u_int64_t)resp->snscb_nodename[4]) << 24) | 2726 (((u_int64_t)resp->snscb_nodename[5]) << 16) | 2727 (((u_int64_t)resp->snscb_nodename[6]) << 8) | 2728 (((u_int64_t)resp->snscb_nodename[7])); 2729 if (portid == 0 || wwpn == 0) { 2730 break; 2731 } 2732 2733 switch (resp->snscb_port_type) { 2734 case 1: 2735 pt = " N_Port"; 2736 break; 2737 case 2: 2738 pt = " NL_Port"; 2739 break; 2740 case 3: 2741 pt = "F/NL_Port"; 2742 break; 2743 case 0x7f: 2744 pt = " Nx_Port"; 2745 break; 2746 case 0x81: 2747 pt = " F_port"; 2748 break; 2749 case 0x82: 2750 pt = " FL_Port"; 2751 break; 2752 case 0x84: 2753 pt = " E_port"; 2754 break; 2755 default: 2756 pt = "?"; 2757 break; 2758 } 2759 isp_prt(isp, ISP_LOGINFO, 2760 "%s @ 0x%x, Node 0x%08x%08x Port %08x%08x", 2761 pt, portid, ((u_int32_t) (wwnn >> 32)), ((u_int32_t) wwnn), 2762 ((u_int32_t) (wwpn >> 32)), ((u_int32_t) wwpn)); 2763 /* 2764 * We're only interested in SCSI_FCP types (for now) 2765 */ 2766 if ((resp->snscb_fc4_types[2] & 1) == 0) { 2767 break; 2768 } 2769 if (fcp->isp_topo != TOPO_F_PORT) 2770 lrange = FC_SNS_ID+1; 2771 else 2772 lrange = 0; 2773 /* 2774 * Is it already in our list? 2775 */ 2776 for (target = lrange; target < MAX_FC_TARG; target++) { 2777 if (target >= FL_PORT_ID && target <= FC_SNS_ID) { 2778 continue; 2779 } 2780 lp = &fcp->portdb[target]; 2781 if (lp->port_wwn == wwpn && lp->node_wwn == wwnn) { 2782 lp->fabric_dev = 1; 2783 break; 2784 } 2785 } 2786 if (target < MAX_FC_TARG) { 2787 break; 2788 } 2789 for (target = lrange; target < MAX_FC_TARG; target++) { 2790 if (target >= FL_PORT_ID && target <= FC_SNS_ID) { 2791 continue; 2792 } 2793 lp = &fcp->portdb[target]; 2794 if (lp->port_wwn == 0) { 2795 break; 2796 } 2797 } 2798 if (target == MAX_FC_TARG) { 2799 isp_prt(isp, ISP_LOGWARN, 2800 "no more space for fabric devices"); 2801 break; 2802 } 2803 lp->node_wwn = wwnn; 2804 lp->port_wwn = wwpn; 2805 lp->portid = portid; 2806 lp->fabric_dev = 1; 2807 break; 2808 } 2809 #ifdef ISP_TARGET_MODE 2810 case ISPASYNC_TARGET_MESSAGE: 2811 { 2812 tmd_msg_t *mp = arg; 2813 isp_prt(isp, ISP_LOGALL, 2814 "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x", 2815 mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt, 2816 (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval, 2817 mp->nt_msg[0]); 2818 break; 2819 } 2820 case ISPASYNC_TARGET_EVENT: 2821 { 2822 tmd_event_t *ep = arg; 2823 isp_prt(isp, ISP_LOGALL, 2824 "bus %d event code 0x%x", ep->ev_bus, ep->ev_event); 2825 break; 2826 } 2827 case ISPASYNC_TARGET_ACTION: 2828 switch (((isphdr_t *)arg)->rqs_entry_type) { 2829 default: 2830 isp_prt(isp, ISP_LOGWARN, 2831 "event 0x%x for unhandled target action", 2832 ((isphdr_t *)arg)->rqs_entry_type); 2833 break; 2834 case RQSTYPE_ATIO: 2835 rv = isp_handle_platform_atio(isp, (at_entry_t *) arg); 2836 break; 2837 case RQSTYPE_ATIO2: 2838 rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg); 2839 break; 2840 case RQSTYPE_CTIO2: 2841 case RQSTYPE_CTIO: 2842 rv = isp_handle_platform_ctio(isp, arg); 2843 break; 2844 case RQSTYPE_ENABLE_LUN: 2845 case RQSTYPE_MODIFY_LUN: 2846 if (IS_DUALBUS(isp)) { 2847 bus = 2848 GET_BUS_VAL(((lun_entry_t *)arg)->le_rsvd); 2849 } else { 2850 bus = 0; 2851 } 2852 isp_cv_signal_rqe(isp, bus, 2853 ((lun_entry_t *)arg)->le_status); 2854 break; 2855 } 2856 break; 2857 #endif 2858 case ISPASYNC_FW_CRASH: 2859 { 2860 u_int16_t mbox1, mbox6; 2861 mbox1 = ISP_READ(isp, OUTMAILBOX1); 2862 if (IS_DUALBUS(isp)) { 2863 mbox6 = ISP_READ(isp, OUTMAILBOX6); 2864 } else { 2865 mbox6 = 0; 2866 } 2867 isp_prt(isp, ISP_LOGERR, 2868 "Internal Firmware on bus %d Error @ RISC Address 0x%x", 2869 mbox6, mbox1); 2870 isp_reinit(isp); 2871 break; 2872 } 2873 case ISPASYNC_UNHANDLED_RESPONSE: 2874 break; 2875 default: 2876 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd); 2877 break; 2878 } 2879 return (rv); 2880 } 2881 2882 2883 /* 2884 * Locks are held before coming here. 2885 */ 2886 void 2887 isp_uninit(struct ispsoftc *isp) 2888 { 2889 ISP_WRITE(isp, HCCR, HCCR_CMD_RESET); 2890 DISABLE_INTS(isp); 2891 } 2892 2893 void 2894 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...) 2895 { 2896 va_list ap; 2897 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { 2898 return; 2899 } 2900 printf("%s: ", device_get_nameunit(isp->isp_dev)); 2901 va_start(ap, fmt); 2902 vprintf(fmt, ap); 2903 va_end(ap); 2904 printf("\n"); 2905 } 2906