1 /* $FreeBSD$ */ 2 /* 3 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters. 4 * 5 * Copyright (c) 1997, 1998, 1999, 2000 by Matthew Jacob 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 #include <dev/isp/isp_freebsd.h> 29 #include <machine/stdarg.h> /* for use by isp_prt below */ 30 31 static void isp_intr_enable(void *); 32 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *); 33 static void isp_poll(struct cam_sim *); 34 static void isp_relsim(void *); 35 static timeout_t isp_watchdog; 36 static void isp_action(struct cam_sim *, union ccb *); 37 38 39 static struct ispsoftc *isplist = NULL; 40 41 void 42 isp_attach(struct ispsoftc *isp) 43 { 44 int primary, secondary; 45 struct ccb_setasync csa; 46 struct cam_devq *devq; 47 struct cam_sim *sim; 48 struct cam_path *path; 49 50 /* 51 * Establish (in case of 12X0) which bus is the primary. 52 */ 53 54 primary = 0; 55 secondary = 1; 56 57 /* 58 * Create the device queue for our SIM(s). 59 */ 60 devq = cam_simq_alloc(isp->isp_maxcmds); 61 if (devq == NULL) { 62 return; 63 } 64 65 /* 66 * Construct our SIM entry. 67 */ 68 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 69 isp->isp_unit, 1, isp->isp_maxcmds, devq); 70 if (sim == NULL) { 71 cam_simq_free(devq); 72 return; 73 } 74 75 isp->isp_osinfo.ehook.ich_func = isp_intr_enable; 76 isp->isp_osinfo.ehook.ich_arg = isp; 77 if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) { 78 isp_prt(isp, ISP_LOGERR, 79 "could not establish interrupt enable hook"); 80 cam_sim_free(sim, TRUE); 81 return; 82 } 83 84 if (xpt_bus_register(sim, primary) != CAM_SUCCESS) { 85 cam_sim_free(sim, TRUE); 86 return; 87 } 88 89 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 90 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 91 xpt_bus_deregister(cam_sim_path(sim)); 92 cam_sim_free(sim, TRUE); 93 return; 94 } 95 96 xpt_setup_ccb(&csa.ccb_h, path, 5); 97 csa.ccb_h.func_code = XPT_SASYNC_CB; 98 csa.event_enable = AC_LOST_DEVICE; 99 csa.callback = isp_cam_async; 100 csa.callback_arg = sim; 101 xpt_action((union ccb *)&csa); 102 isp->isp_sim = sim; 103 isp->isp_path = path; 104 105 /* 106 * If we have a second channel, construct SIM entry for that. 107 */ 108 if (IS_DUALBUS(isp)) { 109 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 110 isp->isp_unit, 1, isp->isp_maxcmds, devq); 111 if (sim == NULL) { 112 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 113 xpt_free_path(isp->isp_path); 114 cam_simq_free(devq); 115 return; 116 } 117 if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) { 118 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 119 xpt_free_path(isp->isp_path); 120 cam_sim_free(sim, TRUE); 121 return; 122 } 123 124 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 125 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 126 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 127 xpt_free_path(isp->isp_path); 128 xpt_bus_deregister(cam_sim_path(sim)); 129 cam_sim_free(sim, TRUE); 130 return; 131 } 132 133 xpt_setup_ccb(&csa.ccb_h, path, 5); 134 csa.ccb_h.func_code = XPT_SASYNC_CB; 135 csa.event_enable = AC_LOST_DEVICE; 136 csa.callback = isp_cam_async; 137 csa.callback_arg = sim; 138 xpt_action((union ccb *)&csa); 139 isp->isp_sim2 = sim; 140 isp->isp_path2 = path; 141 } 142 if (isp->isp_role != ISP_ROLE_NONE) { 143 isp->isp_state = ISP_RUNSTATE; 144 ENABLE_INTS(isp); 145 } 146 if (isplist == NULL) { 147 isplist = isp; 148 } else { 149 struct ispsoftc *tmp = isplist; 150 while (tmp->isp_osinfo.next) { 151 tmp = tmp->isp_osinfo.next; 152 } 153 tmp->isp_osinfo.next = isp; 154 } 155 } 156 157 static void 158 isp_intr_enable(void *arg) 159 { 160 struct ispsoftc *isp = arg; 161 if (isp->isp_role != ISP_ROLE_NONE) { 162 ENABLE_INTS(isp); 163 isp->isp_osinfo.intsok = 1; 164 } 165 /* Release our hook so that the boot can continue. */ 166 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 167 } 168 169 /* 170 * Put the target mode functions here, because some are inlines 171 */ 172 173 #ifdef ISP_TARGET_MODE 174 175 static __inline int is_lun_enabled(struct ispsoftc *, lun_id_t); 176 static __inline int are_any_luns_enabled(struct ispsoftc *); 177 static __inline tstate_t *get_lun_statep(struct ispsoftc *, lun_id_t); 178 static __inline void rls_lun_statep(struct ispsoftc *, tstate_t *); 179 static __inline int isp_psema_sig_rqe(struct ispsoftc *); 180 static __inline int isp_cv_wait_timed_rqe(struct ispsoftc *, int); 181 static __inline void isp_cv_signal_rqe(struct ispsoftc *, int); 182 static __inline void isp_vsema_rqe(struct ispsoftc *); 183 static cam_status 184 create_lun_state(struct ispsoftc *, struct cam_path *, tstate_t **); 185 static void destroy_lun_state(struct ispsoftc *, tstate_t *); 186 static void isp_en_lun(struct ispsoftc *, union ccb *); 187 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *); 188 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *); 189 static cam_status isp_target_putback_atio(struct ispsoftc *, union ccb *); 190 static timeout_t isp_refire_putback_atio; 191 192 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *); 193 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *); 194 static int isp_handle_platform_ctio(struct ispsoftc *, void *); 195 static void isp_handle_platform_ctio_part2(struct ispsoftc *, union ccb *); 196 197 static __inline int 198 is_lun_enabled(struct ispsoftc *isp, lun_id_t lun) 199 { 200 tstate_t *tptr; 201 ISP_LOCK(isp); 202 if ((tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(lun)]) == NULL) { 203 ISP_UNLOCK(isp); 204 return (0); 205 } 206 do { 207 if (tptr->lun == (lun_id_t) lun) { 208 ISP_UNLOCK(isp); 209 return (1); 210 } 211 } while ((tptr = tptr->next) != NULL); 212 ISP_UNLOCK(isp); 213 return (0); 214 } 215 216 static __inline int 217 are_any_luns_enabled(struct ispsoftc *isp) 218 { 219 int i; 220 for (i = 0; i < LUN_HASH_SIZE; i++) { 221 if (isp->isp_osinfo.lun_hash[i]) { 222 return (1); 223 } 224 } 225 return (0); 226 } 227 228 static __inline tstate_t * 229 get_lun_statep(struct ispsoftc *isp, lun_id_t lun) 230 { 231 tstate_t *tptr; 232 233 ISP_LOCK(isp); 234 if (lun == CAM_LUN_WILDCARD) { 235 tptr = &isp->isp_osinfo.tsdflt; 236 tptr->hold++; 237 ISP_UNLOCK(isp); 238 return (tptr); 239 } else { 240 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(lun)]; 241 } 242 if (tptr == NULL) { 243 ISP_UNLOCK(isp); 244 return (NULL); 245 } 246 247 do { 248 if (tptr->lun == lun) { 249 tptr->hold++; 250 ISP_UNLOCK(isp); 251 return (tptr); 252 } 253 } while ((tptr = tptr->next) != NULL); 254 ISP_UNLOCK(isp); 255 return (tptr); 256 } 257 258 static __inline void 259 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr) 260 { 261 if (tptr->hold) 262 tptr->hold--; 263 } 264 265 static __inline int 266 isp_psema_sig_rqe(struct ispsoftc *isp) 267 { 268 ISP_LOCK(isp); 269 while (isp->isp_osinfo.tmflags & TM_BUSY) { 270 isp->isp_osinfo.tmflags |= TM_WANTED; 271 if (tsleep(&isp->isp_osinfo.tmflags, PRIBIO|PCATCH, "i0", 0)) { 272 ISP_UNLOCK(isp); 273 return (-1); 274 } 275 isp->isp_osinfo.tmflags |= TM_BUSY; 276 } 277 ISP_UNLOCK(isp); 278 return (0); 279 } 280 281 static __inline int 282 isp_cv_wait_timed_rqe(struct ispsoftc *isp, int timo) 283 { 284 ISP_LOCK(isp); 285 if (tsleep(&isp->isp_osinfo.rstatus, PRIBIO, "qt1", timo)) { 286 ISP_UNLOCK(isp); 287 return (-1); 288 } 289 ISP_UNLOCK(isp); 290 return (0); 291 } 292 293 static __inline void 294 isp_cv_signal_rqe(struct ispsoftc *isp, int status) 295 { 296 isp->isp_osinfo.rstatus = status; 297 wakeup(&isp->isp_osinfo.rstatus); 298 } 299 300 static __inline void 301 isp_vsema_rqe(struct ispsoftc *isp) 302 { 303 ISP_LOCK(isp); 304 if (isp->isp_osinfo.tmflags & TM_WANTED) { 305 isp->isp_osinfo.tmflags &= ~TM_WANTED; 306 wakeup(&isp->isp_osinfo.tmflags); 307 } 308 isp->isp_osinfo.tmflags &= ~TM_BUSY; 309 ISP_UNLOCK(isp); 310 } 311 312 static cam_status 313 create_lun_state(struct ispsoftc *isp, struct cam_path *path, tstate_t **rslt) 314 { 315 cam_status status; 316 lun_id_t lun; 317 tstate_t *tptr, *new; 318 319 lun = xpt_path_lun_id(path); 320 if (lun < 0) { 321 return (CAM_LUN_INVALID); 322 } 323 if (is_lun_enabled(isp, lun)) { 324 return (CAM_LUN_ALRDY_ENA); 325 } 326 new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO); 327 if (new == NULL) { 328 return (CAM_RESRC_UNAVAIL); 329 } 330 331 status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path), 332 xpt_path_target_id(path), xpt_path_lun_id(path)); 333 if (status != CAM_REQ_CMP) { 334 free(new, M_DEVBUF); 335 return (status); 336 } 337 new->lun = lun; 338 SLIST_INIT(&new->atios); 339 SLIST_INIT(&new->inots); 340 new->hold = 1; 341 342 ISP_LOCK(isp); 343 if ((tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(lun)]) == NULL) { 344 isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(lun)] = new; 345 } else { 346 while (tptr->next) 347 tptr = tptr->next; 348 tptr->next = new; 349 } 350 ISP_UNLOCK(isp); 351 *rslt = new; 352 return (CAM_REQ_CMP); 353 } 354 355 static __inline void 356 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr) 357 { 358 tstate_t *lw, *pw; 359 360 ISP_LOCK(isp); 361 if (tptr->hold) { 362 ISP_UNLOCK(isp); 363 return; 364 } 365 pw = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(tptr->lun)]; 366 if (pw == NULL) { 367 ISP_UNLOCK(isp); 368 return; 369 } else if (pw->lun == tptr->lun) { 370 isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(tptr->lun)] = pw->next; 371 } else { 372 lw = pw; 373 pw = lw->next; 374 while (pw) { 375 if (pw->lun == tptr->lun) { 376 lw->next = pw->next; 377 break; 378 } 379 lw = pw; 380 pw = pw->next; 381 } 382 if (pw == NULL) { 383 ISP_UNLOCK(isp); 384 return; 385 } 386 } 387 free(tptr, M_DEVBUF); 388 ISP_UNLOCK(isp); 389 } 390 391 static void 392 isp_en_lun(struct ispsoftc *isp, union ccb *ccb) 393 { 394 const char *lfmt = "Lun now %sabled for target mode\n"; 395 struct ccb_en_lun *cel = &ccb->cel; 396 tstate_t *tptr; 397 u_int16_t rstat; 398 int bus, frozen = 0; 399 lun_id_t lun; 400 target_id_t tgt; 401 402 403 bus = XS_CHANNEL(ccb); 404 tgt = ccb->ccb_h.target_id; 405 lun = ccb->ccb_h.target_lun; 406 407 /* 408 * Do some sanity checking first. 409 */ 410 411 if ((lun != CAM_LUN_WILDCARD) && 412 (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) { 413 ccb->ccb_h.status = CAM_LUN_INVALID; 414 return; 415 } 416 if (IS_SCSI(isp)) { 417 if (tgt != CAM_TARGET_WILDCARD && 418 tgt != SDPARAM(isp)->isp_initiator_id) { 419 ccb->ccb_h.status = CAM_TID_INVALID; 420 return; 421 } 422 } else { 423 if (tgt != CAM_TARGET_WILDCARD && 424 tgt != FCPARAM(isp)->isp_iid) { 425 ccb->ccb_h.status = CAM_TID_INVALID; 426 return; 427 } 428 } 429 430 if (tgt == CAM_TARGET_WILDCARD) { 431 if (lun != CAM_LUN_WILDCARD) { 432 ccb->ccb_h.status = CAM_LUN_INVALID; 433 return; 434 } 435 } 436 437 /* 438 * If Fibre Channel, stop and drain all activity to this bus. 439 */ 440 #if 0 441 if (IS_FC(isp)) { 442 ISP_LOCK(isp); 443 frozen = 1; 444 xpt_freeze_simq(isp->isp_sim, 1); 445 isp->isp_osinfo.drain = 1; 446 while (isp->isp_osinfo.drain) { 447 (void) msleep(&isp->isp_osinfo.drain, 448 &isp->isp_osinfo.lock, PRIBIO, 449 "ispdrain", 10 * hz); 450 } 451 ISP_UNLOCK(isp); 452 } 453 #endif 454 455 /* 456 * Check to see if we're enabling on fibre channel and 457 * don't yet have a notion of who the heck we are (no 458 * loop yet). 459 */ 460 if (IS_FC(isp) && cel->enable && 461 (isp->isp_osinfo.tmflags & TM_TMODE_ENABLED) == 0) { 462 fcparam *fcp = isp->isp_param; 463 int rv; 464 465 ISP_LOCK(isp); 466 rv = isp_fc_runstate(isp, 2 * 1000000); 467 ISP_UNLOCK(isp); 468 if (fcp->isp_fwstate != FW_READY || 469 fcp->isp_loopstate != LOOP_READY) { 470 xpt_print_path(ccb->ccb_h.path); 471 printf("could not get a good port database read\n"); 472 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 473 if (frozen) 474 xpt_release_simq(isp->isp_sim, 1); 475 return; 476 } 477 } 478 479 480 /* 481 * Next check to see whether this is a target/lun wildcard action. 482 * 483 * If so, we enable/disable target mode but don't do any lun enabling. 484 */ 485 if (lun == CAM_LUN_WILDCARD && tgt == CAM_TARGET_WILDCARD) { 486 int av; 487 tptr = &isp->isp_osinfo.tsdflt; 488 if (cel->enable) { 489 if (isp->isp_osinfo.tmflags & TM_TMODE_ENABLED) { 490 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 491 if (frozen) 492 xpt_release_simq(isp->isp_sim, 1); 493 return; 494 } 495 ccb->ccb_h.status = 496 xpt_create_path(&tptr->owner, NULL, 497 xpt_path_path_id(ccb->ccb_h.path), 498 xpt_path_target_id(ccb->ccb_h.path), 499 xpt_path_lun_id(ccb->ccb_h.path)); 500 if (ccb->ccb_h.status != CAM_REQ_CMP) { 501 if (frozen) 502 xpt_release_simq(isp->isp_sim, 1); 503 return; 504 } 505 SLIST_INIT(&tptr->atios); 506 SLIST_INIT(&tptr->inots); 507 av = 1; 508 ISP_LOCK(isp); 509 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 510 if (av) { 511 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 512 xpt_free_path(tptr->owner); 513 ISP_UNLOCK(isp); 514 if (frozen) 515 xpt_release_simq(isp->isp_sim, 1); 516 return; 517 } 518 isp->isp_osinfo.tmflags |= TM_TMODE_ENABLED; 519 ISP_UNLOCK(isp); 520 } else { 521 if ((isp->isp_osinfo.tmflags & TM_TMODE_ENABLED) == 0) { 522 ccb->ccb_h.status = CAM_LUN_INVALID; 523 if (frozen) 524 xpt_release_simq(isp->isp_sim, 1); 525 return; 526 } 527 if (are_any_luns_enabled(isp)) { 528 ccb->ccb_h.status = CAM_SCSI_BUSY; 529 if (frozen) 530 xpt_release_simq(isp->isp_sim, 1); 531 return; 532 } 533 av = 0; 534 ISP_LOCK(isp); 535 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 536 if (av) { 537 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 538 ISP_UNLOCK(isp); 539 if (frozen) 540 xpt_release_simq(isp->isp_sim, 1); 541 return; 542 } 543 isp->isp_osinfo.tmflags &= ~TM_TMODE_ENABLED; 544 ISP_UNLOCK(isp); 545 ccb->ccb_h.status = CAM_REQ_CMP; 546 } 547 xpt_print_path(ccb->ccb_h.path); 548 printf(lfmt, (cel->enable) ? "en" : "dis"); 549 if (frozen) 550 xpt_release_simq(isp->isp_sim, 1); 551 return; 552 } 553 554 /* 555 * We can move along now... 556 */ 557 558 if (frozen) 559 xpt_release_simq(isp->isp_sim, 1); 560 561 562 if (cel->enable) { 563 ccb->ccb_h.status = 564 create_lun_state(isp, ccb->ccb_h.path, &tptr); 565 if (ccb->ccb_h.status != CAM_REQ_CMP) { 566 return; 567 } 568 } else { 569 tptr = get_lun_statep(isp, lun); 570 if (tptr == NULL) { 571 ccb->ccb_h.status = CAM_LUN_INVALID; 572 return; 573 } 574 } 575 576 if (isp_psema_sig_rqe(isp)) { 577 rls_lun_statep(isp, tptr); 578 if (cel->enable) 579 destroy_lun_state(isp, tptr); 580 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 581 return; 582 } 583 584 ISP_LOCK(isp); 585 if (cel->enable) { 586 u_int32_t seq = isp->isp_osinfo.rollinfo++; 587 rstat = LUN_ERR; 588 if (isp_lun_cmd(isp, RQSTYPE_ENABLE_LUN, bus, tgt, lun, seq)) { 589 xpt_print_path(ccb->ccb_h.path); 590 printf("isp_lun_cmd failed\n"); 591 goto out; 592 } 593 if (isp_cv_wait_timed_rqe(isp, 30 * hz)) { 594 xpt_print_path(ccb->ccb_h.path); 595 printf("wait for ENABLE LUN timed out\n"); 596 goto out; 597 } 598 rstat = isp->isp_osinfo.rstatus; 599 if (rstat != LUN_OK) { 600 xpt_print_path(ccb->ccb_h.path); 601 printf("ENABLE LUN returned 0x%x\n", rstat); 602 goto out; 603 } 604 } else { 605 u_int32_t seq; 606 607 seq = isp->isp_osinfo.rollinfo++; 608 rstat = LUN_ERR; 609 610 if (isp_lun_cmd(isp, -RQSTYPE_MODIFY_LUN, bus, tgt, lun, seq)) { 611 xpt_print_path(ccb->ccb_h.path); 612 printf("isp_lun_cmd failed\n"); 613 goto out; 614 } 615 if (isp_cv_wait_timed_rqe(isp, 30 * hz)) { 616 xpt_print_path(ccb->ccb_h.path); 617 printf("wait for MODIFY LUN timed out\n"); 618 goto out; 619 } 620 rstat = isp->isp_osinfo.rstatus; 621 if (rstat != LUN_OK) { 622 xpt_print_path(ccb->ccb_h.path); 623 printf("MODIFY LUN returned 0x%x\n", rstat); 624 goto out; 625 } 626 rstat = LUN_ERR; 627 seq = isp->isp_osinfo.rollinfo++; 628 629 if (isp_lun_cmd(isp, -RQSTYPE_ENABLE_LUN, bus, tgt, lun, seq)) { 630 xpt_print_path(ccb->ccb_h.path); 631 printf("isp_lun_cmd failed\n"); 632 goto out; 633 } 634 if (isp_cv_wait_timed_rqe(isp, 30 * hz)) { 635 xpt_print_path(ccb->ccb_h.path); 636 printf("wait for ENABLE LUN timed out\n"); 637 goto out; 638 } 639 rstat = isp->isp_osinfo.rstatus; 640 if (rstat != LUN_OK) { 641 xpt_print_path(ccb->ccb_h.path); 642 printf("ENABLE LUN returned 0x%x\n", rstat); 643 goto out; 644 } 645 } 646 out: 647 isp_vsema_rqe(isp); 648 ISP_UNLOCK(isp); 649 650 if (rstat != LUN_OK) { 651 xpt_print_path(ccb->ccb_h.path); 652 printf("lun %sable failed\n", (cel->enable) ? "en" : "dis"); 653 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 654 rls_lun_statep(isp, tptr); 655 if (cel->enable) 656 destroy_lun_state(isp, tptr); 657 } else { 658 xpt_print_path(ccb->ccb_h.path); 659 printf(lfmt, (cel->enable) ? "en" : "dis"); 660 rls_lun_statep(isp, tptr); 661 if (cel->enable == 0) { 662 destroy_lun_state(isp, tptr); 663 } 664 ccb->ccb_h.status = CAM_REQ_CMP; 665 } 666 } 667 668 static cam_status 669 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb) 670 { 671 tstate_t *tptr; 672 struct ccb_hdr_slist *lp; 673 struct ccb_hdr *curelm; 674 int found; 675 union ccb *accb = ccb->cab.abort_ccb; 676 677 if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 678 if (IS_FC(isp) && (accb->ccb_h.target_id != 679 ((fcparam *) isp->isp_param)->isp_loopid)) { 680 return (CAM_PATH_INVALID); 681 } else if (IS_SCSI(isp) && (accb->ccb_h.target_id != 682 ((sdparam *) isp->isp_param)->isp_initiator_id)) { 683 return (CAM_PATH_INVALID); 684 } 685 } 686 tptr = get_lun_statep(isp, accb->ccb_h.target_lun); 687 if (tptr == NULL) { 688 return (CAM_PATH_INVALID); 689 } 690 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 691 lp = &tptr->atios; 692 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 693 lp = &tptr->inots; 694 } else { 695 rls_lun_statep(isp, tptr); 696 return (CAM_UA_ABORT); 697 } 698 curelm = SLIST_FIRST(lp); 699 found = 0; 700 if (curelm == &accb->ccb_h) { 701 found = 1; 702 SLIST_REMOVE_HEAD(lp, sim_links.sle); 703 } else { 704 while(curelm != NULL) { 705 struct ccb_hdr *nextelm; 706 707 nextelm = SLIST_NEXT(curelm, sim_links.sle); 708 if (nextelm == &accb->ccb_h) { 709 found = 1; 710 SLIST_NEXT(curelm, sim_links.sle) = 711 SLIST_NEXT(nextelm, sim_links.sle); 712 break; 713 } 714 curelm = nextelm; 715 } 716 } 717 rls_lun_statep(isp, tptr); 718 if (found) { 719 accb->ccb_h.status = CAM_REQ_ABORTED; 720 return (CAM_REQ_CMP); 721 } 722 return(CAM_PATH_INVALID); 723 } 724 725 static cam_status 726 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb) 727 { 728 void *qe; 729 struct ccb_scsiio *cso = &ccb->csio; 730 u_int32_t *hp, save_handle; 731 u_int16_t iptr, optr; 732 733 734 if (isp_getrqentry(isp, &iptr, &optr, &qe)) { 735 xpt_print_path(ccb->ccb_h.path); 736 printf("Request Queue Overflow in isp_target_start_ctio\n"); 737 return (CAM_RESRC_UNAVAIL); 738 } 739 bzero(qe, QENTRY_LEN); 740 741 /* 742 * We're either moving data or completing a command here. 743 */ 744 745 if (IS_FC(isp)) { 746 struct ccb_accept_tio *atiop; 747 ct2_entry_t *cto = qe; 748 749 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; 750 cto->ct_header.rqs_entry_count = 1; 751 cto->ct_iid = cso->init_id; 752 if (isp->isp_maxluns <= 16) { 753 cto->ct_lun = ccb->ccb_h.target_lun; 754 } 755 /* 756 * Start with a residual based on what the original datalength 757 * was supposed to be. Basically, we ignore what CAM has set 758 * for residuals. The data transfer routines will knock off 759 * the residual for each byte actually moved- and also will 760 * be responsible for setting the underrun flag. 761 */ 762 /* HACK! HACK! */ 763 if ((atiop = ccb->ccb_h.periph_priv.entries[1].ptr) != NULL) { 764 cto->ct_resid = atiop->ccb_h.spriv_field0; 765 } 766 767 /* 768 * We always have to use the tag_id- it has the RX_ID 769 * for this exchage. 770 */ 771 cto->ct_rxid = cso->tag_id; 772 if (cso->dxfer_len == 0) { 773 cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA; 774 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 775 cto->ct_flags |= CT2_SENDSTATUS; 776 cto->rsp.m1.ct_scsi_status = cso->scsi_status; 777 } 778 if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) { 779 int m = min(cso->sense_len, MAXRESPLEN); 780 bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m); 781 cto->rsp.m1.ct_senselen = m; 782 cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID; 783 } 784 } else { 785 cto->ct_flags |= CT2_FLAG_MODE0; 786 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 787 cto->ct_flags |= CT2_DATA_IN; 788 } else { 789 cto->ct_flags |= CT2_DATA_OUT; 790 } 791 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 792 cto->ct_flags |= CT2_SENDSTATUS; 793 cto->rsp.m0.ct_scsi_status = cso->scsi_status; 794 } 795 /* 796 * If we're sending data and status back together, 797 * we can't also send back sense data as well. 798 */ 799 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 800 } 801 if (cto->ct_flags & CAM_SEND_STATUS) { 802 isp_prt(isp, ISP_LOGTDEBUG2, 803 "CTIO2 RX_ID 0x%x SCSI STATUS 0x%x datalength %u", 804 cto->ct_rxid, cso->scsi_status, cto->ct_resid); 805 } 806 hp = &cto->ct_reserved; 807 } else { 808 ct_entry_t *cto = qe; 809 810 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 811 cto->ct_header.rqs_entry_count = 1; 812 cto->ct_iid = cso->init_id; 813 cto->ct_tgt = ccb->ccb_h.target_id; 814 cto->ct_lun = ccb->ccb_h.target_lun; 815 if (cso->tag_id && cso->tag_action) { 816 /* 817 * We don't specify a tag type for regular SCSI. 818 * Just the tag value and set the flag. 819 */ 820 cto->ct_tag_val = cso->tag_id; 821 cto->ct_flags |= CT_TQAE; 822 } 823 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 824 cto->ct_flags |= CT_NODISC; 825 } 826 if (cso->dxfer_len == 0) { 827 cto->ct_flags |= CT_NO_DATA; 828 } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 829 cto->ct_flags |= CT_DATA_IN; 830 } else { 831 cto->ct_flags |= CT_DATA_OUT; 832 } 833 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 834 cto->ct_flags |= CT_SENDSTATUS; 835 cto->ct_scsi_status = cso->scsi_status; 836 cto->ct_resid = cso->resid; 837 } 838 if (cto->ct_flags & CAM_SEND_STATUS) { 839 isp_prt(isp, ISP_LOGTDEBUG2, 840 "CTIO SCSI STATUS 0x%x resid %d", 841 cso->scsi_status, cso->resid); 842 } 843 hp = &cto->ct_reserved; 844 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 845 } 846 847 if (isp_save_xs(isp, (XS_T *)ccb, hp)) { 848 xpt_print_path(ccb->ccb_h.path); 849 printf("No XFLIST pointers for isp_target_start_ctio\n"); 850 return (CAM_RESRC_UNAVAIL); 851 } 852 853 854 /* 855 * Call the dma setup routines for this entry (and any subsequent 856 * CTIOs) if there's data to move, and then tell the f/w it's got 857 * new things to play with. As with isp_start's usage of DMA setup, 858 * any swizzling is done in the machine dependent layer. Because 859 * of this, we put the request onto the queue area first in native 860 * format. 861 */ 862 863 save_handle = *hp; 864 switch (ISP_DMASETUP(isp, cso, qe, &iptr, optr)) { 865 case CMD_QUEUED: 866 ISP_ADD_REQUEST(isp, iptr); 867 return (CAM_REQ_INPROG); 868 869 case CMD_EAGAIN: 870 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 871 isp_destroy_handle(isp, save_handle); 872 return (CAM_RESRC_UNAVAIL); 873 874 default: 875 isp_destroy_handle(isp, save_handle); 876 return (XS_ERR(ccb)); 877 } 878 } 879 880 static cam_status 881 isp_target_putback_atio(struct ispsoftc *isp, union ccb *ccb) 882 { 883 void *qe; 884 struct ccb_accept_tio *atiop; 885 u_int16_t iptr, optr; 886 887 if (isp_getrqentry(isp, &iptr, &optr, &qe)) { 888 xpt_print_path(ccb->ccb_h.path); 889 printf("Request Queue Overflow in isp_target_putback_atio\n"); 890 return (CAM_RESRC_UNAVAIL); 891 } 892 bzero(qe, QENTRY_LEN); 893 atiop = (struct ccb_accept_tio *) ccb; 894 if (IS_FC(isp)) { 895 at2_entry_t *at = qe; 896 at->at_header.rqs_entry_type = RQSTYPE_ATIO2; 897 at->at_header.rqs_entry_count = 1; 898 if (isp->isp_maxluns > 16) { 899 at->at_scclun = (uint16_t) atiop->ccb_h.target_lun; 900 } else { 901 at->at_lun = (uint8_t) atiop->ccb_h.target_lun; 902 } 903 at->at_status = CT_OK; 904 at->at_rxid = atiop->tag_id; 905 ISP_SWIZ_ATIO2(isp, qe, qe); 906 } else { 907 at_entry_t *at = qe; 908 at->at_header.rqs_entry_type = RQSTYPE_ATIO; 909 at->at_header.rqs_entry_count = 1; 910 at->at_iid = atiop->init_id; 911 at->at_tgt = atiop->ccb_h.target_id; 912 at->at_lun = atiop->ccb_h.target_lun; 913 at->at_status = CT_OK; 914 if (atiop->ccb_h.status & CAM_TAG_ACTION_VALID) { 915 at->at_tag_type = atiop->tag_action; 916 } 917 at->at_tag_val = atiop->tag_id; 918 ISP_SWIZ_ATIO(isp, qe, qe); 919 } 920 ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe); 921 ISP_ADD_REQUEST(isp, iptr); 922 return (CAM_REQ_CMP); 923 } 924 925 static void 926 isp_refire_putback_atio(void *arg) 927 { 928 union ccb *ccb = arg; 929 int s = splcam(); 930 if (isp_target_putback_atio(XS_ISP(ccb), ccb) != CAM_REQ_CMP) { 931 (void) timeout(isp_refire_putback_atio, ccb, 10); 932 } else { 933 isp_handle_platform_ctio_part2(XS_ISP(ccb), ccb); 934 } 935 splx(s); 936 } 937 938 /* 939 * Handle ATIO stuff that the generic code can't. 940 * This means handling CDBs. 941 */ 942 943 static int 944 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep) 945 { 946 tstate_t *tptr; 947 int status; 948 struct ccb_accept_tio *atiop; 949 950 /* 951 * The firmware status (except for the QLTM_SVALID bit) 952 * indicates why this ATIO was sent to us. 953 * 954 * If QLTM_SVALID is set, the firware has recommended Sense Data. 955 * 956 * If the DISCONNECTS DISABLED bit is set in the flags field, 957 * we're still connected on the SCSI bus - i.e. the initiator 958 * did not set DiscPriv in the identify message. We don't care 959 * about this so it's ignored. 960 */ 961 status = aep->at_status; 962 if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) { 963 /* 964 * Bus Phase Sequence error. We should have sense data 965 * suggested by the f/w. I'm not sure quite yet what 966 * to do about this for CAM. 967 */ 968 printf("%s: PHASE ERROR\n", isp->isp_name); 969 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 970 return (0); 971 } 972 if ((status & ~QLTM_SVALID) != AT_CDB) { 973 printf("%s: bogus atio (0x%x) leaked to platform\n", 974 isp->isp_name, status); 975 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 976 return (0); 977 } 978 979 tptr = get_lun_statep(isp, aep->at_lun); 980 if (tptr == NULL) { 981 tptr = get_lun_statep(isp, CAM_LUN_WILDCARD); 982 } 983 984 if (tptr == NULL) { 985 /* 986 * Because we can't autofeed sense data back with 987 * a command for parallel SCSI, we can't give back 988 * a CHECK CONDITION. We'll give back a BUSY status 989 * instead. This works out okay because the only 990 * time we should, in fact, get this, is in the 991 * case that somebody configured us without the 992 * blackhole driver, so they get what they deserve. 993 */ 994 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 995 return (0); 996 } 997 998 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 999 if (atiop == NULL) { 1000 /* 1001 * Because we can't autofeed sense data back with 1002 * a command for parallel SCSI, we can't give back 1003 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1004 * instead. This works out okay because the only time we 1005 * should, in fact, get this, is in the case that we've 1006 * run out of ATIOS. 1007 */ 1008 xpt_print_path(tptr->owner); 1009 printf("no ATIOS for lun %d from initiator %d\n", 1010 aep->at_lun, aep->at_iid); 1011 rls_lun_statep(isp, tptr); 1012 if (aep->at_flags & AT_TQAE) 1013 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1014 else 1015 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1016 return (0); 1017 } 1018 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1019 if (tptr == &isp->isp_osinfo.tsdflt) { 1020 atiop->ccb_h.target_id = aep->at_tgt; 1021 atiop->ccb_h.target_lun = aep->at_lun; 1022 } 1023 if (aep->at_flags & AT_NODISC) { 1024 atiop->ccb_h.flags = CAM_DIS_DISCONNECT; 1025 } else { 1026 atiop->ccb_h.flags = 0; 1027 } 1028 1029 if (status & QLTM_SVALID) { 1030 size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data)); 1031 atiop->sense_len = amt; 1032 MEMCPY(&atiop->sense_data, aep->at_sense, amt); 1033 } else { 1034 atiop->sense_len = 0; 1035 } 1036 1037 atiop->init_id = aep->at_iid; 1038 atiop->cdb_len = aep->at_cdblen; 1039 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen); 1040 atiop->ccb_h.status = CAM_CDB_RECVD; 1041 atiop->tag_id = aep->at_tag_val; 1042 if ((atiop->tag_action = aep->at_tag_type) != 0) { 1043 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; 1044 } 1045 xpt_done((union ccb*)atiop); 1046 isp_prt(isp, ISP_LOGTDEBUG2, 1047 "ATIO CDB=0x%x iid%d->lun%d tag 0x%x ttype 0x%x %s", 1048 aep->at_cdb[0] & 0xff, aep->at_iid, aep->at_lun, 1049 aep->at_tag_val & 0xff, aep->at_tag_type, 1050 (aep->at_flags & AT_NODISC)? "nondisc" : "disconnecting"); 1051 rls_lun_statep(isp, tptr); 1052 return (0); 1053 } 1054 1055 static int 1056 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep) 1057 { 1058 lun_id_t lun; 1059 tstate_t *tptr; 1060 struct ccb_accept_tio *atiop; 1061 1062 /* 1063 * The firmware status (except for the QLTM_SVALID bit) 1064 * indicates why this ATIO was sent to us. 1065 * 1066 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1067 */ 1068 if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) { 1069 printf("%s: bogus atio (0x%x) leaked to platform\n", 1070 isp->isp_name, aep->at_status); 1071 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1072 return (0); 1073 } 1074 1075 if (isp->isp_maxluns > 16) { 1076 lun = aep->at_scclun; 1077 } else { 1078 lun = aep->at_lun; 1079 } 1080 tptr = get_lun_statep(isp, lun); 1081 if (tptr == NULL) { 1082 tptr = get_lun_statep(isp, CAM_LUN_WILDCARD); 1083 } 1084 1085 if (tptr == NULL) { 1086 /* 1087 * What we'd like to know is whether or not we have a listener 1088 * upstream that really hasn't configured yet. If we do, then 1089 * we can give a more sensible reply here. If not, then we can 1090 * reject this out of hand. 1091 * 1092 * Choices for what to send were 1093 * 1094 * Not Ready, Unit Not Self-Configured Yet 1095 * (0x2,0x3e,0x00) 1096 * 1097 * for the former and 1098 * 1099 * Illegal Request, Logical Unit Not Supported 1100 * (0x5,0x25,0x00) 1101 * 1102 * for the latter. 1103 * 1104 * We used to decide whether there was at least one listener 1105 * based upon whether the black hole driver was configured. 1106 * However, recent config(8) changes have made this hard to do 1107 * at this time. 1108 * 1109 */ 1110 u_int32_t ccode = SCSI_STATUS_BUSY; 1111 1112 /* 1113 * Because we can't autofeed sense data back with 1114 * a command for parallel SCSI, we can't give back 1115 * a CHECK CONDITION. We'll give back a BUSY status 1116 * instead. This works out okay because the only 1117 * time we should, in fact, get this, is in the 1118 * case that somebody configured us without the 1119 * blackhole driver, so they get what they deserve. 1120 */ 1121 isp_endcmd(isp, aep, ccode, 0); 1122 return (0); 1123 } 1124 1125 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1126 if (atiop == NULL) { 1127 /* 1128 * Because we can't autofeed sense data back with 1129 * a command for parallel SCSI, we can't give back 1130 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1131 * instead. This works out okay because the only time we 1132 * should, in fact, get this, is in the case that we've 1133 * run out of ATIOS. 1134 */ 1135 xpt_print_path(tptr->owner); 1136 printf("no ATIOS for lun %d from initiator %d\n", 1137 lun, aep->at_iid); 1138 rls_lun_statep(isp, tptr); 1139 if (aep->at_flags & AT_TQAE) 1140 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1141 else 1142 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1143 return (0); 1144 } 1145 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1146 1147 if (tptr == &isp->isp_osinfo.tsdflt) { 1148 atiop->ccb_h.target_id = 1149 ((fcparam *)isp->isp_param)->isp_loopid; 1150 atiop->ccb_h.target_lun = lun; 1151 } 1152 if (aep->at_status & QLTM_SVALID) { 1153 size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data)); 1154 atiop->sense_len = amt; 1155 MEMCPY(&atiop->sense_data, aep->at_sense, amt); 1156 } else { 1157 atiop->sense_len = 0; 1158 } 1159 1160 atiop->init_id = aep->at_iid; 1161 atiop->cdb_len = ATIO2_CDBLEN; 1162 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN); 1163 atiop->ccb_h.status = CAM_CDB_RECVD; 1164 atiop->tag_id = aep->at_rxid; 1165 switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) { 1166 case ATIO2_TC_ATTR_SIMPLEQ: 1167 atiop->tag_action = MSG_SIMPLE_Q_TAG; 1168 break; 1169 case ATIO2_TC_ATTR_HEADOFQ: 1170 atiop->tag_action = MSG_HEAD_OF_Q_TAG; 1171 break; 1172 case ATIO2_TC_ATTR_ORDERED: 1173 atiop->tag_action = MSG_ORDERED_Q_TAG; 1174 break; 1175 case ATIO2_TC_ATTR_ACAQ: /* ?? */ 1176 case ATIO2_TC_ATTR_UNTAGGED: 1177 default: 1178 atiop->tag_action = 0; 1179 break; 1180 } 1181 if (atiop->tag_action != 0) { 1182 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; 1183 } 1184 1185 /* 1186 * Preserve overall command datalength in private field. 1187 */ 1188 atiop->ccb_h.spriv_field0 = aep->at_datalen; 1189 1190 xpt_done((union ccb*)atiop); 1191 isp_prt(isp, ISP_LOGTDEBUG2, 1192 "ATIO2 RX_ID 0x%x CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u", 1193 aep->at_rxid & 0xffff, aep->at_cdb[0] & 0xff, aep->at_iid, 1194 lun, aep->at_taskflags, aep->at_datalen); 1195 rls_lun_statep(isp, tptr); 1196 return (0); 1197 } 1198 1199 static int 1200 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg) 1201 { 1202 union ccb *ccb; 1203 int sentstatus, ok, notify_cam; 1204 1205 /* 1206 * CTIO and CTIO2 are close enough.... 1207 */ 1208 1209 ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_reserved); 1210 KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio")); 1211 isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_reserved); 1212 1213 if (IS_FC(isp)) { 1214 ct2_entry_t *ct = arg; 1215 sentstatus = ct->ct_flags & CT2_SENDSTATUS; 1216 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1217 if (ok && (ccb->ccb_h.flags & CAM_SEND_SENSE)) { 1218 ccb->ccb_h.status |= CAM_SENT_SENSE; 1219 } 1220 isp_prt(isp, ISP_LOGTDEBUG2, 1221 "CTIO2 RX_ID 0x%x sts 0x%x flg 0x%x sns %d FIN", 1222 ct->ct_rxid, ct->ct_status, ct->ct_flags, 1223 (ccb->ccb_h.status & CAM_SENT_SENSE) != 0); 1224 notify_cam = ct->ct_header.rqs_seqno; 1225 } else { 1226 ct_entry_t *ct = arg; 1227 sentstatus = ct->ct_flags & CT_SENDSTATUS; 1228 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1229 isp_prt(isp, ISP_LOGTDEBUG2, 1230 "CTIO tag 0x%x sts 0x%x flg 0x%x FIN", 1231 ct->ct_tag_val, ct->ct_status, ct->ct_flags); 1232 notify_cam = ct->ct_header.rqs_seqno; 1233 } 1234 1235 /* 1236 * We're here either because data transfers are done (and 1237 * it's time to send a final status CTIO) or because the final 1238 * status CTIO is done. We don't get called for all intermediate 1239 * CTIOs that happen for a large data transfer. 1240 * 1241 * In any case, for this platform, the upper layers figure out 1242 * what to do next, so all we do here is collect status and 1243 * pass information along. The exception is that we clear 1244 * the notion of handling a non-disconnecting command here. 1245 */ 1246 1247 if (sentstatus) { 1248 /* 1249 * Data transfer done. See if all went okay. 1250 */ 1251 if (ok) { 1252 ccb->csio.resid = 0; 1253 } else { 1254 ccb->csio.resid = ccb->csio.dxfer_len; 1255 } 1256 } 1257 1258 if (notify_cam == 0) { 1259 isp_prt(isp, ISP_LOGTDEBUG1, "Intermediate CTIO done"); 1260 return (0); 1261 } 1262 isp_prt(isp, ISP_LOGTDEBUG1, "Final CTIO done"); 1263 if (isp_target_putback_atio(isp, ccb) != CAM_REQ_CMP) { 1264 (void) timeout(isp_refire_putback_atio, ccb, 10); 1265 } else { 1266 isp_handle_platform_ctio_part2(isp, ccb); 1267 } 1268 return (0); 1269 } 1270 1271 static void 1272 isp_handle_platform_ctio_part2(struct ispsoftc *isp, union ccb *ccb) 1273 { 1274 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1275 ccb->ccb_h.status |= CAM_REQ_CMP; 1276 } 1277 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1278 if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) { 1279 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE; 1280 if (isp->isp_osinfo.simqfrozen == 0) { 1281 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 1282 isp_prt(isp, ISP_LOGDEBUG2, "ctio->relsimq"); 1283 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 1284 } else { 1285 isp_prt(isp, ISP_LOGDEBUG2, "ctio->devqfrozen"); 1286 } 1287 } else { 1288 isp_prt(isp, ISP_LOGDEBUG2, 1289 "ctio->simqfrozen(%x)", isp->isp_osinfo.simqfrozen); 1290 } 1291 } 1292 xpt_done(ccb); 1293 } 1294 #endif 1295 1296 static void 1297 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg) 1298 { 1299 struct cam_sim *sim; 1300 struct ispsoftc *isp; 1301 1302 sim = (struct cam_sim *)cbarg; 1303 isp = (struct ispsoftc *) cam_sim_softc(sim); 1304 switch (code) { 1305 case AC_LOST_DEVICE: 1306 if (IS_SCSI(isp)) { 1307 u_int16_t oflags, nflags; 1308 sdparam *sdp = isp->isp_param; 1309 int rvf, tgt; 1310 1311 tgt = xpt_path_target_id(path); 1312 rvf = ISP_FW_REVX(isp->isp_fwrev); 1313 ISP_LOCK(isp); 1314 sdp += cam_sim_bus(sim); 1315 isp->isp_update |= (1 << cam_sim_bus(sim)); 1316 nflags = DPARM_SAFE_DFLT; 1317 if (rvf >= ISP_FW_REV(7, 55, 0) || 1318 (ISP_FW_REV(4, 55, 0) <= rvf && 1319 (rvf < ISP_FW_REV(5, 0, 0)))) { 1320 nflags |= DPARM_NARROW | DPARM_ASYNC; 1321 } 1322 oflags = sdp->isp_devparam[tgt].dev_flags; 1323 sdp->isp_devparam[tgt].dev_flags = nflags; 1324 sdp->isp_devparam[tgt].dev_update = 1; 1325 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, NULL); 1326 sdp->isp_devparam[tgt].dev_flags = oflags; 1327 ISP_UNLOCK(isp); 1328 } 1329 break; 1330 default: 1331 printf("%s: isp_attach Async Code 0x%x\n", isp->isp_name, code); 1332 break; 1333 } 1334 } 1335 1336 static void 1337 isp_poll(struct cam_sim *sim) 1338 { 1339 struct ispsoftc *isp = cam_sim_softc(sim); 1340 ISP_LOCK(isp); 1341 (void) isp_intr(isp); 1342 ISP_UNLOCK(isp); 1343 } 1344 1345 static void 1346 isp_relsim(void *arg) 1347 { 1348 struct ispsoftc *isp = arg; 1349 ISP_LOCK(isp); 1350 if (isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED) { 1351 int wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED; 1352 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_TIMED; 1353 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { 1354 xpt_release_simq(isp->isp_sim, 1); 1355 isp_prt(isp, ISP_LOGDEBUG2, "timed relsimq"); 1356 } 1357 } 1358 ISP_UNLOCK(isp); 1359 } 1360 1361 static void 1362 isp_watchdog(void *arg) 1363 { 1364 XS_T *xs = arg; 1365 struct ispsoftc *isp = XS_ISP(xs); 1366 u_int32_t handle; 1367 1368 /* 1369 * We've decided this command is dead. Make sure we're not trying 1370 * to kill a command that's already dead by getting it's handle and 1371 * and seeing whether it's still alive. 1372 */ 1373 ISP_LOCK(isp); 1374 handle = isp_find_handle(isp, xs); 1375 if (handle) { 1376 u_int16_t r; 1377 1378 if (XS_CMD_DONE_P(xs)) { 1379 isp_prt(isp, ISP_LOGDEBUG1, 1380 "watchdog found done cmd (handle 0x%x)", handle); 1381 ISP_UNLOCK(isp); 1382 return; 1383 } 1384 1385 if (XS_CMD_WDOG_P(xs)) { 1386 isp_prt(isp, ISP_LOGDEBUG2, 1387 "recursive watchdog (handle 0x%x)", handle); 1388 ISP_UNLOCK(isp); 1389 return; 1390 } 1391 1392 XS_CMD_S_WDOG(xs); 1393 1394 r = ISP_READ(isp, BIU_ISR); 1395 1396 if (INT_PENDING(isp, r) && isp_intr(isp) && XS_CMD_DONE_P(xs)) { 1397 isp_prt(isp, ISP_LOGDEBUG2, 1398 "watchdog cleanup (%x, %x)", handle, r); 1399 xpt_done((union ccb *) xs); 1400 } else if (XS_CMD_GRACE_P(xs)) { 1401 /* 1402 * Make sure the command is *really* dead before we 1403 * release the handle (and DMA resources) for reuse. 1404 */ 1405 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg); 1406 1407 /* 1408 * After this point, the comamnd is really dead. 1409 */ 1410 if (XS_XFRLEN(xs)) { 1411 ISP_DMAFREE(isp, xs, handle); 1412 } 1413 isp_destroy_handle(isp, handle); 1414 xpt_print_path(xs->ccb_h.path); 1415 printf("%s: watchdog timeout (%x, %x)\n", 1416 isp->isp_name, handle, r); 1417 XS_SETERR(xs, CAM_CMD_TIMEOUT); 1418 XS_CMD_C_WDOG(xs); 1419 isp_done(xs); 1420 } else { 1421 u_int16_t iptr, optr; 1422 ispreq_t *mp; 1423 1424 XS_CMD_C_WDOG(xs); 1425 xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz); 1426 if (isp_getrqentry(isp, &iptr, &optr, (void **) &mp)) { 1427 ISP_UNLOCK(isp); 1428 return; 1429 } 1430 XS_CMD_S_GRACE(xs); 1431 MEMZERO((void *) mp, sizeof (*mp)); 1432 mp->req_header.rqs_entry_count = 1; 1433 mp->req_header.rqs_entry_type = RQSTYPE_MARKER; 1434 mp->req_modifier = SYNC_ALL; 1435 mp->req_target = XS_CHANNEL(xs) << 7; 1436 ISP_SWIZZLE_REQUEST(isp, mp); 1437 ISP_ADD_REQUEST(isp, iptr); 1438 } 1439 } else { 1440 isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command"); 1441 } 1442 ISP_UNLOCK(isp); 1443 } 1444 1445 static void 1446 isp_action(struct cam_sim *sim, union ccb *ccb) 1447 { 1448 int bus, tgt, error; 1449 struct ispsoftc *isp; 1450 struct ccb_trans_settings *cts; 1451 1452 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n")); 1453 1454 isp = (struct ispsoftc *)cam_sim_softc(sim); 1455 ccb->ccb_h.sim_priv.entries[0].field = 0; 1456 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 1457 if (isp->isp_state != ISP_RUNSTATE && 1458 ccb->ccb_h.func_code == XPT_SCSI_IO) { 1459 ISP_LOCK(isp); 1460 isp_init(isp); 1461 if (isp->isp_state != ISP_INITSTATE) { 1462 ISP_UNLOCK(isp); 1463 /* 1464 * Lie. Say it was a selection timeout. 1465 */ 1466 ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN; 1467 xpt_freeze_devq(ccb->ccb_h.path, 1); 1468 xpt_done(ccb); 1469 return; 1470 } 1471 isp->isp_state = ISP_RUNSTATE; 1472 ISP_UNLOCK(isp); 1473 } 1474 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code); 1475 1476 switch (ccb->ccb_h.func_code) { 1477 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 1478 /* 1479 * Do a couple of preliminary checks... 1480 */ 1481 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 1482 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 1483 ccb->ccb_h.status = CAM_REQ_INVALID; 1484 xpt_done(ccb); 1485 break; 1486 } 1487 } 1488 #ifdef DIAGNOSTIC 1489 if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) { 1490 ccb->ccb_h.status = CAM_PATH_INVALID; 1491 } else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) { 1492 ccb->ccb_h.status = CAM_PATH_INVALID; 1493 } 1494 if (ccb->ccb_h.status == CAM_PATH_INVALID) { 1495 isp_prt(isp, ISP_LOGERR, 1496 "invalid tgt/lun (%d.%d) in XPT_SCSI_IO", 1497 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 1498 xpt_done(ccb); 1499 break; 1500 } 1501 #endif 1502 ((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK; 1503 ISP_LOCK(isp); 1504 error = isp_start((XS_T *) ccb); 1505 ISP_UNLOCK(isp); 1506 switch (error) { 1507 case CMD_QUEUED: 1508 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1509 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 1510 u_int64_t ticks = (u_int64_t) hz; 1511 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) 1512 ticks = 60 * 1000 * ticks; 1513 else 1514 ticks = ccb->ccb_h.timeout * hz; 1515 ticks = ((ticks + 999) / 1000) + hz + hz; 1516 if (ticks >= 0x80000000) { 1517 isp_prt(isp, ISP_LOGERR, 1518 "timeout overflow"); 1519 ticks = 0x80000000; 1520 } 1521 ccb->ccb_h.timeout_ch = timeout(isp_watchdog, 1522 (caddr_t)ccb, (int)ticks); 1523 } else { 1524 callout_handle_init(&ccb->ccb_h.timeout_ch); 1525 } 1526 break; 1527 case CMD_RQLATER: 1528 if (isp->isp_osinfo.simqfrozen == 0) { 1529 isp_prt(isp, ISP_LOGDEBUG2, 1530 "RQLATER freeze simq"); 1531 isp->isp_osinfo.simqfrozen |= SIMQFRZ_TIMED; 1532 timeout(isp_relsim, isp, 500); 1533 xpt_freeze_simq(sim, 1); 1534 } 1535 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1536 xpt_done(ccb); 1537 break; 1538 case CMD_EAGAIN: 1539 if (isp->isp_osinfo.simqfrozen == 0) { 1540 xpt_freeze_simq(sim, 1); 1541 isp_prt(isp, ISP_LOGDEBUG2, 1542 "EAGAIN freeze simq"); 1543 } 1544 isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE; 1545 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1546 xpt_done(ccb); 1547 break; 1548 case CMD_COMPLETE: 1549 ISP_LOCK(isp); 1550 isp_done((struct ccb_scsiio *) ccb); 1551 ISP_UNLOCK(isp); 1552 break; 1553 default: 1554 isp_prt(isp, ISP_LOGERR, 1555 "What's this? 0x%x at %d in file %s", 1556 error, __LINE__, __FILE__); 1557 XS_SETERR(ccb, CAM_REQ_CMP_ERR); 1558 xpt_done(ccb); 1559 } 1560 break; 1561 1562 #ifdef ISP_TARGET_MODE 1563 case XPT_EN_LUN: /* Enable LUN as a target */ 1564 isp_en_lun(isp, ccb); 1565 xpt_done(ccb); 1566 break; 1567 1568 case XPT_NOTIFY_ACK: /* recycle notify ack */ 1569 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ 1570 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 1571 { 1572 tstate_t *tptr = get_lun_statep(isp, ccb->ccb_h.target_lun); 1573 if (tptr == NULL) { 1574 ccb->ccb_h.status = CAM_LUN_INVALID; 1575 xpt_done(ccb); 1576 break; 1577 } 1578 ccb->ccb_h.sim_priv.entries[0].field = 0; 1579 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 1580 ISP_LOCK(isp); 1581 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 1582 #if 0 1583 (void) isp_target_putback_atio(isp, ccb); 1584 #endif 1585 SLIST_INSERT_HEAD(&tptr->atios, 1586 &ccb->ccb_h, sim_links.sle); 1587 } else { 1588 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, 1589 sim_links.sle); 1590 } 1591 ISP_UNLOCK(isp); 1592 rls_lun_statep(isp, tptr); 1593 ccb->ccb_h.status = CAM_REQ_INPROG; 1594 break; 1595 } 1596 case XPT_CONT_TARGET_IO: 1597 { 1598 ISP_LOCK(isp); 1599 ccb->ccb_h.status = isp_target_start_ctio(isp, ccb); 1600 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 1601 if (isp->isp_osinfo.simqfrozen == 0) { 1602 xpt_freeze_simq(sim, 1); 1603 xpt_print_path(ccb->ccb_h.path); 1604 printf("XPT_CONT_TARGET_IO freeze simq\n"); 1605 } 1606 isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE; 1607 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1608 xpt_done(ccb); 1609 } else { 1610 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1611 } 1612 ISP_UNLOCK(isp); 1613 break; 1614 } 1615 #endif 1616 case XPT_RESET_DEV: /* BDR the specified SCSI device */ 1617 1618 bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); 1619 tgt = ccb->ccb_h.target_id; 1620 tgt |= (bus << 16); 1621 1622 ISP_LOCK(isp); 1623 error = isp_control(isp, ISPCTL_RESET_DEV, &tgt); 1624 ISP_UNLOCK(isp); 1625 if (error) { 1626 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1627 } else { 1628 ccb->ccb_h.status = CAM_REQ_CMP; 1629 } 1630 xpt_done(ccb); 1631 break; 1632 case XPT_ABORT: /* Abort the specified CCB */ 1633 { 1634 union ccb *accb = ccb->cab.abort_ccb; 1635 switch (accb->ccb_h.func_code) { 1636 #ifdef ISP_TARGET_MODE 1637 case XPT_ACCEPT_TARGET_IO: 1638 case XPT_IMMED_NOTIFY: 1639 ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb); 1640 break; 1641 case XPT_CONT_TARGET_IO: 1642 isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet"); 1643 ccb->ccb_h.status = CAM_UA_ABORT; 1644 break; 1645 #endif 1646 case XPT_SCSI_IO: 1647 ISP_LOCK(isp); 1648 error = isp_control(isp, ISPCTL_ABORT_CMD, ccb); 1649 ISP_UNLOCK(isp); 1650 if (error) { 1651 ccb->ccb_h.status = CAM_UA_ABORT; 1652 } else { 1653 ccb->ccb_h.status = CAM_REQ_CMP; 1654 } 1655 break; 1656 default: 1657 ccb->ccb_h.status = CAM_REQ_INVALID; 1658 break; 1659 } 1660 xpt_done(ccb); 1661 break; 1662 } 1663 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 1664 1665 cts = &ccb->cts; 1666 tgt = cts->ccb_h.target_id; 1667 ISP_LOCK(isp); 1668 if (IS_SCSI(isp)) { 1669 sdparam *sdp = isp->isp_param; 1670 u_int16_t *dptr; 1671 1672 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 1673 1674 sdp += bus; 1675 #if 0 1676 if (cts->flags & CCB_TRANS_CURRENT_SETTINGS) 1677 dptr = &sdp->isp_devparam[tgt].cur_dflags; 1678 else 1679 dptr = &sdp->isp_devparam[tgt].dev_flags; 1680 #else 1681 /* 1682 * We always update (internally) from dev_flags 1683 * so any request to change settings just gets 1684 * vectored to that location. 1685 */ 1686 dptr = &sdp->isp_devparam[tgt].dev_flags; 1687 #endif 1688 1689 /* 1690 * Note that these operations affect the 1691 * the goal flags (dev_flags)- not 1692 * the current state flags. Then we mark 1693 * things so that the next operation to 1694 * this HBA will cause the update to occur. 1695 */ 1696 if (cts->valid & CCB_TRANS_DISC_VALID) { 1697 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) { 1698 *dptr |= DPARM_DISC; 1699 } else { 1700 *dptr &= ~DPARM_DISC; 1701 } 1702 } 1703 if (cts->valid & CCB_TRANS_TQ_VALID) { 1704 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) { 1705 *dptr |= DPARM_TQING; 1706 } else { 1707 *dptr &= ~DPARM_TQING; 1708 } 1709 } 1710 if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) { 1711 switch (cts->bus_width) { 1712 case MSG_EXT_WDTR_BUS_16_BIT: 1713 *dptr |= DPARM_WIDE; 1714 break; 1715 default: 1716 *dptr &= ~DPARM_WIDE; 1717 } 1718 } 1719 /* 1720 * Any SYNC RATE of nonzero and SYNC_OFFSET 1721 * of nonzero will cause us to go to the 1722 * selected (from NVRAM) maximum value for 1723 * this device. At a later point, we'll 1724 * allow finer control. 1725 */ 1726 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && 1727 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) && 1728 (cts->sync_offset > 0)) { 1729 *dptr |= DPARM_SYNC; 1730 } else { 1731 *dptr &= ~DPARM_SYNC; 1732 } 1733 *dptr |= DPARM_SAFE_DFLT; 1734 isp_prt(isp, ISP_LOGDEBUG0, 1735 "%d.%d set %s period 0x%x offset 0x%x flags 0x%x", 1736 bus, tgt, (cts->flags & CCB_TRANS_CURRENT_SETTINGS)? 1737 "current" : "user", 1738 sdp->isp_devparam[tgt].sync_period, 1739 sdp->isp_devparam[tgt].sync_offset, 1740 sdp->isp_devparam[tgt].dev_flags); 1741 sdp->isp_devparam[tgt].dev_update = 1; 1742 isp->isp_update |= (1 << bus); 1743 } 1744 ISP_UNLOCK(isp); 1745 ccb->ccb_h.status = CAM_REQ_CMP; 1746 xpt_done(ccb); 1747 break; 1748 1749 case XPT_GET_TRAN_SETTINGS: 1750 1751 cts = &ccb->cts; 1752 tgt = cts->ccb_h.target_id; 1753 if (IS_FC(isp)) { 1754 /* 1755 * a lot of normal SCSI things don't make sense. 1756 */ 1757 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 1758 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 1759 /* 1760 * How do you measure the width of a high 1761 * speed serial bus? Well, in bytes. 1762 * 1763 * Offset and period make no sense, though, so we set 1764 * (above) a 'base' transfer speed to be gigabit. 1765 */ 1766 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 1767 } else { 1768 sdparam *sdp = isp->isp_param; 1769 u_int16_t dval, pval, oval; 1770 int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 1771 1772 sdp += bus; 1773 if (cts->flags & CCB_TRANS_CURRENT_SETTINGS) { 1774 ISP_LOCK(isp); 1775 sdp->isp_devparam[tgt].dev_refresh = 1; 1776 isp->isp_update |= (1 << bus); 1777 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, 1778 NULL); 1779 ISP_UNLOCK(isp); 1780 dval = sdp->isp_devparam[tgt].cur_dflags; 1781 oval = sdp->isp_devparam[tgt].cur_offset; 1782 pval = sdp->isp_devparam[tgt].cur_period; 1783 } else { 1784 dval = sdp->isp_devparam[tgt].dev_flags; 1785 oval = sdp->isp_devparam[tgt].sync_offset; 1786 pval = sdp->isp_devparam[tgt].sync_period; 1787 } 1788 1789 ISP_LOCK(isp); 1790 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 1791 1792 if (dval & DPARM_DISC) { 1793 cts->flags |= CCB_TRANS_DISC_ENB; 1794 } 1795 if (dval & DPARM_TQING) { 1796 cts->flags |= CCB_TRANS_TAG_ENB; 1797 } 1798 if (dval & DPARM_WIDE) { 1799 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 1800 } else { 1801 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 1802 } 1803 cts->valid = CCB_TRANS_BUS_WIDTH_VALID | 1804 CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 1805 1806 if ((dval & DPARM_SYNC) && oval != 0) { 1807 cts->sync_period = pval; 1808 cts->sync_offset = oval; 1809 cts->valid |= 1810 CCB_TRANS_SYNC_RATE_VALID | 1811 CCB_TRANS_SYNC_OFFSET_VALID; 1812 } 1813 ISP_UNLOCK(isp); 1814 isp_prt(isp, ISP_LOGDEBUG0, 1815 "%d.%d get %s period 0x%x offset 0x%x flags 0x%x", 1816 bus, tgt, (cts->flags & CCB_TRANS_CURRENT_SETTINGS)? 1817 "current" : "user", pval, oval, dval); 1818 } 1819 ccb->ccb_h.status = CAM_REQ_CMP; 1820 xpt_done(ccb); 1821 break; 1822 1823 case XPT_CALC_GEOMETRY: 1824 { 1825 struct ccb_calc_geometry *ccg; 1826 u_int32_t secs_per_cylinder; 1827 u_int32_t size_mb; 1828 1829 ccg = &ccb->ccg; 1830 if (ccg->block_size == 0) { 1831 isp_prt(isp, ISP_LOGERR, 1832 "%d.%d XPT_CALC_GEOMETRY block size 0?", 1833 ccg->ccb_h.target_id, ccg->ccb_h.target_lun); 1834 ccb->ccb_h.status = CAM_REQ_INVALID; 1835 xpt_done(ccb); 1836 break; 1837 } 1838 size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size); 1839 if (size_mb > 1024) { 1840 ccg->heads = 255; 1841 ccg->secs_per_track = 63; 1842 } else { 1843 ccg->heads = 64; 1844 ccg->secs_per_track = 32; 1845 } 1846 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 1847 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 1848 ccb->ccb_h.status = CAM_REQ_CMP; 1849 xpt_done(ccb); 1850 break; 1851 } 1852 case XPT_RESET_BUS: /* Reset the specified bus */ 1853 bus = cam_sim_bus(sim); 1854 ISP_LOCK(isp); 1855 error = isp_control(isp, ISPCTL_RESET_BUS, &bus); 1856 ISP_UNLOCK(isp); 1857 if (error) 1858 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1859 else { 1860 if (cam_sim_bus(sim) && isp->isp_path2 != NULL) 1861 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 1862 else if (isp->isp_path != NULL) 1863 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 1864 ccb->ccb_h.status = CAM_REQ_CMP; 1865 } 1866 xpt_done(ccb); 1867 break; 1868 1869 case XPT_TERM_IO: /* Terminate the I/O process */ 1870 ccb->ccb_h.status = CAM_REQ_INVALID; 1871 xpt_done(ccb); 1872 break; 1873 1874 case XPT_PATH_INQ: /* Path routing inquiry */ 1875 { 1876 struct ccb_pathinq *cpi = &ccb->cpi; 1877 1878 cpi->version_num = 1; 1879 #ifdef ISP_TARGET_MODE 1880 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 1881 #else 1882 cpi->target_sprt = 0; 1883 #endif 1884 cpi->hba_eng_cnt = 0; 1885 cpi->max_target = ISP_MAX_TARGETS(isp) - 1; 1886 cpi->max_lun = ISP_MAX_LUNS(isp) - 1; 1887 cpi->bus_id = cam_sim_bus(sim); 1888 if (IS_FC(isp)) { 1889 cpi->hba_misc = PIM_NOBUSRESET; 1890 /* 1891 * Because our loop ID can shift from time to time, 1892 * make our initiator ID out of range of our bus. 1893 */ 1894 cpi->initiator_id = cpi->max_target + 1; 1895 1896 /* 1897 * Set base transfer capabilities for Fibre Channel. 1898 * Technically not correct because we don't know 1899 * what media we're running on top of- but we'll 1900 * look good if we always say 100MB/s. 1901 */ 1902 cpi->base_transfer_speed = 100000; 1903 cpi->hba_inquiry = PI_TAG_ABLE; 1904 } else { 1905 sdparam *sdp = isp->isp_param; 1906 sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path)); 1907 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 1908 cpi->hba_misc = 0; 1909 cpi->initiator_id = sdp->isp_initiator_id; 1910 cpi->base_transfer_speed = 3300; 1911 } 1912 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 1913 strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN); 1914 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 1915 cpi->unit_number = cam_sim_unit(sim); 1916 cpi->ccb_h.status = CAM_REQ_CMP; 1917 xpt_done(ccb); 1918 break; 1919 } 1920 default: 1921 ccb->ccb_h.status = CAM_REQ_INVALID; 1922 xpt_done(ccb); 1923 break; 1924 } 1925 } 1926 1927 #define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB) 1928 void 1929 isp_done(struct ccb_scsiio *sccb) 1930 { 1931 struct ispsoftc *isp = XS_ISP(sccb); 1932 1933 if (XS_NOERR(sccb)) 1934 XS_SETERR(sccb, CAM_REQ_CMP); 1935 1936 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && 1937 (sccb->scsi_status != SCSI_STATUS_OK)) { 1938 sccb->ccb_h.status &= ~CAM_STATUS_MASK; 1939 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && 1940 (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) { 1941 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; 1942 } else { 1943 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 1944 } 1945 } 1946 1947 sccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1948 if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1949 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 1950 sccb->ccb_h.status |= CAM_DEV_QFRZN; 1951 xpt_freeze_devq(sccb->ccb_h.path, 1); 1952 if (sccb->scsi_status != SCSI_STATUS_OK) 1953 isp_prt(isp, ISP_LOGDEBUG2, 1954 "freeze devq %d.%d %x %x", 1955 sccb->ccb_h.target_id, 1956 sccb->ccb_h.target_lun, sccb->ccb_h.status, 1957 sccb->scsi_status); 1958 } 1959 } 1960 1961 /* 1962 * If we were frozen waiting resources, clear that we were frozen 1963 * waiting for resources. If we are no longer frozen, and the devq 1964 * isn't frozen, mark the completing CCB to have the XPT layer 1965 * release the simq. 1966 */ 1967 if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) { 1968 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE; 1969 if (isp->isp_osinfo.simqfrozen == 0) { 1970 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 1971 isp_prt(isp, ISP_LOGDEBUG2, 1972 "isp_done->relsimq"); 1973 sccb->ccb_h.status |= CAM_RELEASE_SIMQ; 1974 } else { 1975 isp_prt(isp, ISP_LOGDEBUG2, 1976 "isp_done->devq frozen"); 1977 } 1978 } else { 1979 isp_prt(isp, ISP_LOGDEBUG2, 1980 "isp_done -> simqfrozen = %x", 1981 isp->isp_osinfo.simqfrozen); 1982 } 1983 } 1984 if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) && 1985 (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1986 xpt_print_path(sccb->ccb_h.path); 1987 printf("cam completion status 0x%x\n", sccb->ccb_h.status); 1988 } 1989 1990 XS_CMD_S_DONE(sccb); 1991 if (XS_CMD_WDOG_P(sccb) == 0) { 1992 untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch); 1993 if (XS_CMD_GRACE_P(sccb)) { 1994 isp_prt(isp, ISP_LOGDEBUG2, 1995 "finished command on borrowed time"); 1996 } 1997 XS_CMD_S_CLEAR(sccb); 1998 ISP_UNLOCK(isp); 1999 #ifdef ISP_SMPLOCK 2000 mtx_lock(&Giant); 2001 xpt_done((union ccb *) sccb); 2002 mtx_unlock(&Giant); 2003 #else 2004 xpt_done((union ccb *) sccb); 2005 #endif 2006 ISP_LOCK(isp); 2007 } 2008 } 2009 2010 int 2011 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg) 2012 { 2013 int bus, rv = 0; 2014 switch (cmd) { 2015 case ISPASYNC_NEW_TGT_PARAMS: 2016 { 2017 int flags, tgt; 2018 sdparam *sdp = isp->isp_param; 2019 struct ccb_trans_settings neg; 2020 struct cam_path *tmppath; 2021 2022 tgt = *((int *)arg); 2023 bus = (tgt >> 16) & 0xffff; 2024 tgt &= 0xffff; 2025 sdp += bus; 2026 if (xpt_create_path(&tmppath, NULL, 2027 cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim), 2028 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2029 isp_prt(isp, ISP_LOGWARN, 2030 "isp_async cannot make temp path for %d.%d", 2031 tgt, bus); 2032 rv = -1; 2033 break; 2034 } 2035 flags = sdp->isp_devparam[tgt].cur_dflags; 2036 neg.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2037 if (flags & DPARM_DISC) { 2038 neg.flags |= CCB_TRANS_DISC_ENB; 2039 } 2040 if (flags & DPARM_TQING) { 2041 neg.flags |= CCB_TRANS_TAG_ENB; 2042 } 2043 neg.valid |= CCB_TRANS_BUS_WIDTH_VALID; 2044 neg.bus_width = (flags & DPARM_WIDE)? 2045 MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT; 2046 neg.sync_period = sdp->isp_devparam[tgt].cur_period; 2047 neg.sync_offset = sdp->isp_devparam[tgt].cur_offset; 2048 if (flags & DPARM_SYNC) { 2049 neg.valid |= 2050 CCB_TRANS_SYNC_RATE_VALID | 2051 CCB_TRANS_SYNC_OFFSET_VALID; 2052 } 2053 isp_prt(isp, ISP_LOGDEBUG2, 2054 "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x", 2055 bus, tgt, neg.sync_period, neg.sync_offset, flags); 2056 xpt_setup_ccb(&neg.ccb_h, tmppath, 1); 2057 xpt_async(AC_TRANSFER_NEG, tmppath, &neg); 2058 xpt_free_path(tmppath); 2059 break; 2060 } 2061 case ISPASYNC_BUS_RESET: 2062 bus = *((int *)arg); 2063 isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected", 2064 bus); 2065 if (bus > 0 && isp->isp_path2) { 2066 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 2067 } else if (isp->isp_path) { 2068 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 2069 } 2070 break; 2071 case ISPASYNC_LOOP_DOWN: 2072 if (isp->isp_path) { 2073 if (isp->isp_osinfo.simqfrozen == 0) { 2074 isp_prt(isp, ISP_LOGDEBUG2, 2075 "loop down freeze simq"); 2076 xpt_freeze_simq(isp->isp_sim, 1); 2077 } 2078 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 2079 } 2080 isp_prt(isp, ISP_LOGINFO, "Loop DOWN"); 2081 break; 2082 case ISPASYNC_LOOP_UP: 2083 if (isp->isp_path) { 2084 int wasfrozen = 2085 isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN; 2086 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN; 2087 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { 2088 xpt_release_simq(isp->isp_sim, 1); 2089 isp_prt(isp, ISP_LOGDEBUG2, 2090 "loop up release simq"); 2091 } 2092 } 2093 isp_prt(isp, ISP_LOGINFO, "Loop UP"); 2094 break; 2095 case ISPASYNC_PROMENADE: 2096 { 2097 const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x " 2098 "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x"; 2099 static const char *roles[4] = { 2100 "(none)", "Target", "Initiator", "Target/Initiator" 2101 }; 2102 fcparam *fcp = isp->isp_param; 2103 int tgt = *((int *) arg); 2104 struct lportdb *lp = &fcp->portdb[tgt]; 2105 2106 isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid, 2107 roles[lp->roles & 0x3], 2108 (lp->valid)? "Arrived" : "Departed", 2109 (u_int32_t) (lp->port_wwn >> 32), 2110 (u_int32_t) (lp->port_wwn & 0xffffffffLL), 2111 (u_int32_t) (lp->node_wwn >> 32), 2112 (u_int32_t) (lp->node_wwn & 0xffffffffLL)); 2113 break; 2114 } 2115 case ISPASYNC_CHANGE_NOTIFY: 2116 if (arg == (void *) 1) { 2117 isp_prt(isp, ISP_LOGINFO, 2118 "Name Server Database Changed"); 2119 } else { 2120 isp_prt(isp, ISP_LOGINFO, 2121 "Name Server Database Changed"); 2122 } 2123 break; 2124 case ISPASYNC_FABRIC_DEV: 2125 { 2126 int target, lrange; 2127 struct lportdb *lp = NULL; 2128 char *pt; 2129 sns_ganrsp_t *resp = (sns_ganrsp_t *) arg; 2130 u_int32_t portid; 2131 u_int64_t wwpn, wwnn; 2132 fcparam *fcp = isp->isp_param; 2133 2134 portid = 2135 (((u_int32_t) resp->snscb_port_id[0]) << 16) | 2136 (((u_int32_t) resp->snscb_port_id[1]) << 8) | 2137 (((u_int32_t) resp->snscb_port_id[2])); 2138 2139 wwpn = 2140 (((u_int64_t)resp->snscb_portname[0]) << 56) | 2141 (((u_int64_t)resp->snscb_portname[1]) << 48) | 2142 (((u_int64_t)resp->snscb_portname[2]) << 40) | 2143 (((u_int64_t)resp->snscb_portname[3]) << 32) | 2144 (((u_int64_t)resp->snscb_portname[4]) << 24) | 2145 (((u_int64_t)resp->snscb_portname[5]) << 16) | 2146 (((u_int64_t)resp->snscb_portname[6]) << 8) | 2147 (((u_int64_t)resp->snscb_portname[7])); 2148 2149 wwnn = 2150 (((u_int64_t)resp->snscb_nodename[0]) << 56) | 2151 (((u_int64_t)resp->snscb_nodename[1]) << 48) | 2152 (((u_int64_t)resp->snscb_nodename[2]) << 40) | 2153 (((u_int64_t)resp->snscb_nodename[3]) << 32) | 2154 (((u_int64_t)resp->snscb_nodename[4]) << 24) | 2155 (((u_int64_t)resp->snscb_nodename[5]) << 16) | 2156 (((u_int64_t)resp->snscb_nodename[6]) << 8) | 2157 (((u_int64_t)resp->snscb_nodename[7])); 2158 if (portid == 0 || wwpn == 0) { 2159 break; 2160 } 2161 2162 switch (resp->snscb_port_type) { 2163 case 1: 2164 pt = " N_Port"; 2165 break; 2166 case 2: 2167 pt = " NL_Port"; 2168 break; 2169 case 3: 2170 pt = "F/NL_Port"; 2171 break; 2172 case 0x7f: 2173 pt = " Nx_Port"; 2174 break; 2175 case 0x81: 2176 pt = " F_port"; 2177 break; 2178 case 0x82: 2179 pt = " FL_Port"; 2180 break; 2181 case 0x84: 2182 pt = " E_port"; 2183 break; 2184 default: 2185 pt = "?"; 2186 break; 2187 } 2188 isp_prt(isp, ISP_LOGINFO, 2189 "%s @ 0x%x, Node 0x%08x%08x Port %08x%08x", 2190 pt, portid, ((u_int32_t) (wwnn >> 32)), ((u_int32_t) wwnn), 2191 ((u_int32_t) (wwpn >> 32)), ((u_int32_t) wwpn)); 2192 /* 2193 * We're only interested in SCSI_FCP types (for now) 2194 */ 2195 if ((resp->snscb_fc4_types[2] & 1) == 0) { 2196 break; 2197 } 2198 if (fcp->isp_topo != TOPO_F_PORT) 2199 lrange = FC_SNS_ID+1; 2200 else 2201 lrange = 0; 2202 /* 2203 * Is it already in our list? 2204 */ 2205 for (target = lrange; target < MAX_FC_TARG; target++) { 2206 if (target >= FL_PORT_ID && target <= FC_SNS_ID) { 2207 continue; 2208 } 2209 lp = &fcp->portdb[target]; 2210 if (lp->port_wwn == wwpn && lp->node_wwn == wwnn) { 2211 lp->fabric_dev = 1; 2212 break; 2213 } 2214 } 2215 if (target < MAX_FC_TARG) { 2216 break; 2217 } 2218 for (target = lrange; target < MAX_FC_TARG; target++) { 2219 if (target >= FL_PORT_ID && target <= FC_SNS_ID) { 2220 continue; 2221 } 2222 lp = &fcp->portdb[target]; 2223 if (lp->port_wwn == 0) { 2224 break; 2225 } 2226 } 2227 if (target == MAX_FC_TARG) { 2228 isp_prt(isp, ISP_LOGWARN, 2229 "no more space for fabric devices"); 2230 break; 2231 } 2232 lp->node_wwn = wwnn; 2233 lp->port_wwn = wwpn; 2234 lp->portid = portid; 2235 lp->fabric_dev = 1; 2236 break; 2237 } 2238 #ifdef ISP_TARGET_MODE 2239 case ISPASYNC_TARGET_MESSAGE: 2240 { 2241 tmd_msg_t *mp = arg; 2242 isp_prt(isp, ISP_LOGDEBUG2, 2243 "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x", 2244 mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt, 2245 (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval, 2246 mp->nt_msg[0]); 2247 break; 2248 } 2249 case ISPASYNC_TARGET_EVENT: 2250 { 2251 tmd_event_t *ep = arg; 2252 isp_prt(isp, ISP_LOGDEBUG2, 2253 "bus %d event code 0x%x", ep->ev_bus, ep->ev_event); 2254 break; 2255 } 2256 case ISPASYNC_TARGET_ACTION: 2257 switch (((isphdr_t *)arg)->rqs_entry_type) { 2258 default: 2259 isp_prt(isp, ISP_LOGWARN, 2260 "event 0x%x for unhandled target action", 2261 ((isphdr_t *)arg)->rqs_entry_type); 2262 break; 2263 case RQSTYPE_ATIO: 2264 rv = isp_handle_platform_atio(isp, (at_entry_t *) arg); 2265 break; 2266 case RQSTYPE_ATIO2: 2267 rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg); 2268 break; 2269 case RQSTYPE_CTIO2: 2270 case RQSTYPE_CTIO: 2271 rv = isp_handle_platform_ctio(isp, arg); 2272 break; 2273 case RQSTYPE_ENABLE_LUN: 2274 case RQSTYPE_MODIFY_LUN: 2275 isp_cv_signal_rqe(isp, ((lun_entry_t *)arg)->le_status); 2276 break; 2277 } 2278 break; 2279 #endif 2280 default: 2281 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd); 2282 rv = -1; 2283 break; 2284 } 2285 return (rv); 2286 } 2287 2288 2289 /* 2290 * Locks are held before coming here. 2291 */ 2292 void 2293 isp_uninit(struct ispsoftc *isp) 2294 { 2295 ISP_WRITE(isp, HCCR, HCCR_CMD_RESET); 2296 DISABLE_INTS(isp); 2297 } 2298 2299 void 2300 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...) 2301 { 2302 va_list ap; 2303 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { 2304 return; 2305 } 2306 printf("%s: ", isp->isp_name); 2307 va_start(ap, fmt); 2308 vprintf(fmt, ap); 2309 va_end(ap); 2310 printf("\n"); 2311 } 2312