1 /* $FreeBSD$ */ 2 /* 3 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters. 4 * 5 * Copyright (c) 1997, 1998, 1999, 2000 by Matthew Jacob 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 #include <dev/isp/isp_freebsd.h> 29 #include <machine/stdarg.h> /* for use by isp_prt below */ 30 31 static void isp_intr_enable(void *); 32 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *); 33 static void isp_poll(struct cam_sim *); 34 static void isp_relsim(void *); 35 static timeout_t isp_watchdog; 36 static void isp_action(struct cam_sim *, union ccb *); 37 38 39 static struct ispsoftc *isplist = NULL; 40 41 void 42 isp_attach(struct ispsoftc *isp) 43 { 44 int primary, secondary; 45 struct ccb_setasync csa; 46 struct cam_devq *devq; 47 struct cam_sim *sim; 48 struct cam_path *path; 49 50 /* 51 * Establish (in case of 12X0) which bus is the primary. 52 */ 53 54 primary = 0; 55 secondary = 1; 56 57 /* 58 * Create the device queue for our SIM(s). 59 */ 60 devq = cam_simq_alloc(isp->isp_maxcmds); 61 if (devq == NULL) { 62 return; 63 } 64 65 /* 66 * Construct our SIM entry. 67 */ 68 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 69 isp->isp_unit, 1, isp->isp_maxcmds, devq); 70 if (sim == NULL) { 71 cam_simq_free(devq); 72 return; 73 } 74 75 isp->isp_osinfo.ehook.ich_func = isp_intr_enable; 76 isp->isp_osinfo.ehook.ich_arg = isp; 77 if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) { 78 printf("%s: could not establish interrupt enable hook\n", 79 isp->isp_name); 80 cam_sim_free(sim, TRUE); 81 return; 82 } 83 84 if (xpt_bus_register(sim, primary) != CAM_SUCCESS) { 85 cam_sim_free(sim, TRUE); 86 return; 87 } 88 89 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 90 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 91 xpt_bus_deregister(cam_sim_path(sim)); 92 cam_sim_free(sim, TRUE); 93 return; 94 } 95 96 xpt_setup_ccb(&csa.ccb_h, path, 5); 97 csa.ccb_h.func_code = XPT_SASYNC_CB; 98 csa.event_enable = AC_LOST_DEVICE; 99 csa.callback = isp_cam_async; 100 csa.callback_arg = sim; 101 xpt_action((union ccb *)&csa); 102 isp->isp_sim = sim; 103 isp->isp_path = path; 104 105 /* 106 * If we have a second channel, construct SIM entry for that. 107 */ 108 if (IS_DUALBUS(isp)) { 109 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 110 isp->isp_unit, 1, isp->isp_maxcmds, devq); 111 if (sim == NULL) { 112 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 113 xpt_free_path(isp->isp_path); 114 cam_simq_free(devq); 115 return; 116 } 117 if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) { 118 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 119 xpt_free_path(isp->isp_path); 120 cam_sim_free(sim, TRUE); 121 return; 122 } 123 124 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 125 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 126 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 127 xpt_free_path(isp->isp_path); 128 xpt_bus_deregister(cam_sim_path(sim)); 129 cam_sim_free(sim, TRUE); 130 return; 131 } 132 133 xpt_setup_ccb(&csa.ccb_h, path, 5); 134 csa.ccb_h.func_code = XPT_SASYNC_CB; 135 csa.event_enable = AC_LOST_DEVICE; 136 csa.callback = isp_cam_async; 137 csa.callback_arg = sim; 138 xpt_action((union ccb *)&csa); 139 isp->isp_sim2 = sim; 140 isp->isp_path2 = path; 141 } 142 isp->isp_state = ISP_RUNSTATE; 143 ENABLE_INTS(isp); 144 if (isplist == NULL) { 145 isplist = isp; 146 } else { 147 struct ispsoftc *tmp = isplist; 148 while (tmp->isp_osinfo.next) { 149 tmp = tmp->isp_osinfo.next; 150 } 151 tmp->isp_osinfo.next = isp; 152 } 153 } 154 155 static void 156 isp_intr_enable(void *arg) 157 { 158 struct ispsoftc *isp = arg; 159 ENABLE_INTS(isp); 160 isp->isp_osinfo.intsok = 1; 161 /* Release our hook so that the boot can continue. */ 162 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 163 } 164 165 /* 166 * Put the target mode functions here, because some are inlines 167 */ 168 169 #ifdef ISP_TARGET_MODE 170 171 static __inline int is_lun_enabled(struct ispsoftc *, lun_id_t); 172 static __inline int are_any_luns_enabled(struct ispsoftc *); 173 static __inline tstate_t *get_lun_statep(struct ispsoftc *, lun_id_t); 174 static __inline void rls_lun_statep(struct ispsoftc *, tstate_t *); 175 static __inline int isp_psema_sig_rqe(struct ispsoftc *); 176 static __inline int isp_cv_wait_timed_rqe(struct ispsoftc *, int); 177 static __inline void isp_cv_signal_rqe(struct ispsoftc *, int); 178 static __inline void isp_vsema_rqe(struct ispsoftc *); 179 static cam_status 180 create_lun_state(struct ispsoftc *, struct cam_path *, tstate_t **); 181 static void destroy_lun_state(struct ispsoftc *, tstate_t *); 182 static void isp_en_lun(struct ispsoftc *, union ccb *); 183 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *); 184 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *); 185 static cam_status isp_target_putback_atio(struct ispsoftc *, union ccb *); 186 static timeout_t isp_refire_putback_atio; 187 188 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *); 189 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *); 190 static int isp_handle_platform_ctio(struct ispsoftc *, void *); 191 static void isp_handle_platform_ctio_part2(struct ispsoftc *, union ccb *); 192 193 static __inline int 194 is_lun_enabled(struct ispsoftc *isp, lun_id_t lun) 195 { 196 tstate_t *tptr; 197 ISP_LOCK(isp); 198 if ((tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(lun)]) == NULL) { 199 ISP_UNLOCK(isp); 200 return (0); 201 } 202 do { 203 if (tptr->lun == (lun_id_t) lun) { 204 ISP_UNLOCK(isp); 205 return (1); 206 } 207 } while ((tptr = tptr->next) != NULL); 208 ISP_UNLOCK(isp); 209 return (0); 210 } 211 212 static __inline int 213 are_any_luns_enabled(struct ispsoftc *isp) 214 { 215 int i; 216 for (i = 0; i < LUN_HASH_SIZE; i++) { 217 if (isp->isp_osinfo.lun_hash[i]) { 218 return (1); 219 } 220 } 221 return (0); 222 } 223 224 static __inline tstate_t * 225 get_lun_statep(struct ispsoftc *isp, lun_id_t lun) 226 { 227 tstate_t *tptr; 228 229 ISP_LOCK(isp); 230 if (lun == CAM_LUN_WILDCARD) { 231 tptr = &isp->isp_osinfo.tsdflt; 232 tptr->hold++; 233 ISP_UNLOCK(isp); 234 return (tptr); 235 } else { 236 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(lun)]; 237 } 238 if (tptr == NULL) { 239 ISP_UNLOCK(isp); 240 return (NULL); 241 } 242 243 do { 244 if (tptr->lun == lun) { 245 tptr->hold++; 246 ISP_UNLOCK(isp); 247 return (tptr); 248 } 249 } while ((tptr = tptr->next) != NULL); 250 ISP_UNLOCK(isp); 251 return (tptr); 252 } 253 254 static __inline void 255 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr) 256 { 257 if (tptr->hold) 258 tptr->hold--; 259 } 260 261 static __inline int 262 isp_psema_sig_rqe(struct ispsoftc *isp) 263 { 264 ISP_LOCK(isp); 265 while (isp->isp_osinfo.tmflags & TM_BUSY) { 266 isp->isp_osinfo.tmflags |= TM_WANTED; 267 if (tsleep(&isp->isp_osinfo.tmflags, PRIBIO|PCATCH, "i0", 0)) { 268 ISP_UNLOCK(isp); 269 return (-1); 270 } 271 isp->isp_osinfo.tmflags |= TM_BUSY; 272 } 273 ISP_UNLOCK(isp); 274 return (0); 275 } 276 277 static __inline int 278 isp_cv_wait_timed_rqe(struct ispsoftc *isp, int timo) 279 { 280 ISP_LOCK(isp); 281 if (tsleep(&isp->isp_osinfo.rstatus, PRIBIO, "qt1", timo)) { 282 ISP_UNLOCK(isp); 283 return (-1); 284 } 285 ISP_UNLOCK(isp); 286 return (0); 287 } 288 289 static __inline void 290 isp_cv_signal_rqe(struct ispsoftc *isp, int status) 291 { 292 isp->isp_osinfo.rstatus = status; 293 wakeup(&isp->isp_osinfo.rstatus); 294 } 295 296 static __inline void 297 isp_vsema_rqe(struct ispsoftc *isp) 298 { 299 ISP_LOCK(isp); 300 if (isp->isp_osinfo.tmflags & TM_WANTED) { 301 isp->isp_osinfo.tmflags &= ~TM_WANTED; 302 wakeup(&isp->isp_osinfo.tmflags); 303 } 304 isp->isp_osinfo.tmflags &= ~TM_BUSY; 305 ISP_UNLOCK(isp); 306 } 307 308 static cam_status 309 create_lun_state(struct ispsoftc *isp, struct cam_path *path, tstate_t **rslt) 310 { 311 cam_status status; 312 lun_id_t lun; 313 tstate_t *tptr, *new; 314 315 lun = xpt_path_lun_id(path); 316 if (lun < 0) { 317 return (CAM_LUN_INVALID); 318 } 319 if (is_lun_enabled(isp, lun)) { 320 return (CAM_LUN_ALRDY_ENA); 321 } 322 new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT); 323 if (new == NULL) { 324 return (CAM_RESRC_UNAVAIL); 325 } 326 bzero(new, sizeof (tstate_t)); 327 328 status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path), 329 xpt_path_target_id(path), xpt_path_lun_id(path)); 330 if (status != CAM_REQ_CMP) { 331 free(new, M_DEVBUF); 332 return (status); 333 } 334 new->lun = lun; 335 SLIST_INIT(&new->atios); 336 SLIST_INIT(&new->inots); 337 new->hold = 1; 338 339 ISP_LOCK(isp); 340 if ((tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(lun)]) == NULL) { 341 isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(lun)] = new; 342 } else { 343 while (tptr->next) 344 tptr = tptr->next; 345 tptr->next = new; 346 } 347 ISP_UNLOCK(isp); 348 *rslt = new; 349 return (CAM_REQ_CMP); 350 } 351 352 static __inline void 353 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr) 354 { 355 tstate_t *lw, *pw; 356 357 ISP_LOCK(isp); 358 if (tptr->hold) { 359 ISP_UNLOCK(isp); 360 return; 361 } 362 pw = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(tptr->lun)]; 363 if (pw == NULL) { 364 ISP_UNLOCK(isp); 365 return; 366 } else if (pw->lun == tptr->lun) { 367 isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(tptr->lun)] = pw->next; 368 } else { 369 lw = pw; 370 pw = lw->next; 371 while (pw) { 372 if (pw->lun == tptr->lun) { 373 lw->next = pw->next; 374 break; 375 } 376 lw = pw; 377 pw = pw->next; 378 } 379 if (pw == NULL) { 380 ISP_UNLOCK(isp); 381 return; 382 } 383 } 384 free(tptr, M_DEVBUF); 385 ISP_UNLOCK(isp); 386 } 387 388 static void 389 isp_en_lun(struct ispsoftc *isp, union ccb *ccb) 390 { 391 const char *lfmt = "Lun now %sabled for target mode\n"; 392 struct ccb_en_lun *cel = &ccb->cel; 393 tstate_t *tptr; 394 u_int16_t rstat; 395 int bus, frozen = 0; 396 lun_id_t lun; 397 target_id_t tgt; 398 399 400 bus = XS_CHANNEL(ccb); 401 tgt = ccb->ccb_h.target_id; 402 lun = ccb->ccb_h.target_lun; 403 404 /* 405 * Do some sanity checking first. 406 */ 407 408 if (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns) { 409 ccb->ccb_h.status = CAM_LUN_INVALID; 410 return; 411 } 412 if (IS_SCSI(isp)) { 413 if (tgt != CAM_TARGET_WILDCARD && 414 tgt != ((sdparam *) isp->isp_param)->isp_initiator_id) { 415 ccb->ccb_h.status = CAM_TID_INVALID; 416 return; 417 } 418 } else { 419 if (tgt != CAM_TARGET_WILDCARD && 420 tgt != ((fcparam *) isp->isp_param)->isp_loopid) { 421 ccb->ccb_h.status = CAM_TID_INVALID; 422 return; 423 } 424 } 425 426 /* 427 * If Fibre Channel, stop and drain all activity to this bus. 428 */ 429 if (IS_FC(isp)) { 430 ISP_LOCK(isp); 431 frozen = 1; 432 xpt_freeze_simq(isp->isp_sim, 1); 433 isp->isp_osinfo.drain = 1; 434 /* ISP_UNLOCK(isp); XXX NEED CV_WAIT HERE XXX */ 435 while (isp->isp_osinfo.drain) { 436 tsleep(&isp->isp_osinfo.drain, PRIBIO, "ispdrain", 0); 437 } 438 ISP_UNLOCK(isp); 439 } 440 441 /* 442 * Check to see if we're enabling on fibre channel and 443 * don't yet have a notion of who the heck we are (no 444 * loop yet). 445 */ 446 if (IS_FC(isp) && cel->enable && 447 (isp->isp_osinfo.tmflags & TM_TMODE_ENABLED) == 0) { 448 int rv= 2 * 1000000; 449 fcparam *fcp = isp->isp_param; 450 451 ISP_LOCK(isp); 452 rv = isp_control(isp, ISPCTL_FCLINK_TEST, &rv); 453 ISP_UNLOCK(isp); 454 if (rv || fcp->isp_fwstate != FW_READY) { 455 xpt_print_path(ccb->ccb_h.path); 456 printf("link status not good yet\n"); 457 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 458 if (frozen) 459 xpt_release_simq(isp->isp_sim, 1); 460 return; 461 } 462 ISP_LOCK(isp); 463 rv = isp_control(isp, ISPCTL_PDB_SYNC, NULL); 464 ISP_UNLOCK(isp); 465 if (rv || fcp->isp_fwstate != FW_READY) { 466 xpt_print_path(ccb->ccb_h.path); 467 printf("could not get a good port database read\n"); 468 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 469 if (frozen) 470 xpt_release_simq(isp->isp_sim, 1); 471 return; 472 } 473 } 474 475 476 /* 477 * Next check to see whether this is a target/lun wildcard action. 478 * 479 * If so, we enable/disable target mode but don't do any lun enabling. 480 */ 481 if (lun == CAM_LUN_WILDCARD && tgt == CAM_TARGET_WILDCARD) { 482 int av; 483 tptr = &isp->isp_osinfo.tsdflt; 484 if (cel->enable) { 485 if (isp->isp_osinfo.tmflags & TM_TMODE_ENABLED) { 486 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 487 if (frozen) 488 xpt_release_simq(isp->isp_sim, 1); 489 return; 490 } 491 ccb->ccb_h.status = 492 xpt_create_path(&tptr->owner, NULL, 493 xpt_path_path_id(ccb->ccb_h.path), 494 xpt_path_target_id(ccb->ccb_h.path), 495 xpt_path_lun_id(ccb->ccb_h.path)); 496 if (ccb->ccb_h.status != CAM_REQ_CMP) { 497 if (frozen) 498 xpt_release_simq(isp->isp_sim, 1); 499 return; 500 } 501 SLIST_INIT(&tptr->atios); 502 SLIST_INIT(&tptr->inots); 503 av = 1; 504 ISP_LOCK(isp); 505 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 506 if (av) { 507 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 508 xpt_free_path(tptr->owner); 509 ISP_UNLOCK(isp); 510 if (frozen) 511 xpt_release_simq(isp->isp_sim, 1); 512 return; 513 } 514 isp->isp_osinfo.tmflags |= TM_TMODE_ENABLED; 515 ISP_UNLOCK(isp); 516 } else { 517 if ((isp->isp_osinfo.tmflags & TM_TMODE_ENABLED) == 0) { 518 ccb->ccb_h.status = CAM_LUN_INVALID; 519 if (frozen) 520 xpt_release_simq(isp->isp_sim, 1); 521 return; 522 } 523 if (are_any_luns_enabled(isp)) { 524 ccb->ccb_h.status = CAM_SCSI_BUSY; 525 if (frozen) 526 xpt_release_simq(isp->isp_sim, 1); 527 return; 528 } 529 av = 0; 530 ISP_LOCK(isp); 531 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 532 if (av) { 533 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 534 ISP_UNLOCK(isp); 535 if (frozen) 536 xpt_release_simq(isp->isp_sim, 1); 537 return; 538 } 539 isp->isp_osinfo.tmflags &= ~TM_TMODE_ENABLED; 540 ISP_UNLOCK(isp); 541 ccb->ccb_h.status = CAM_REQ_CMP; 542 } 543 xpt_print_path(ccb->ccb_h.path); 544 printf(lfmt, (cel->enable) ? "en" : "dis"); 545 if (frozen) 546 xpt_release_simq(isp->isp_sim, 1); 547 return; 548 } 549 550 /* 551 * We can move along now... 552 */ 553 554 if (frozen) 555 xpt_release_simq(isp->isp_sim, 1); 556 557 558 if (cel->enable) { 559 ccb->ccb_h.status = 560 create_lun_state(isp, ccb->ccb_h.path, &tptr); 561 if (ccb->ccb_h.status != CAM_REQ_CMP) { 562 return; 563 } 564 } else { 565 tptr = get_lun_statep(isp, lun); 566 if (tptr == NULL) { 567 ccb->ccb_h.status = CAM_LUN_INVALID; 568 return; 569 } 570 } 571 572 if (isp_psema_sig_rqe(isp)) { 573 rls_lun_statep(isp, tptr); 574 if (cel->enable) 575 destroy_lun_state(isp, tptr); 576 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 577 return; 578 } 579 580 ISP_LOCK(isp); 581 if (cel->enable) { 582 u_int32_t seq = isp->isp_osinfo.rollinfo++; 583 rstat = LUN_ERR; 584 if (isp_lun_cmd(isp, RQSTYPE_ENABLE_LUN, bus, tgt, lun, seq)) { 585 xpt_print_path(ccb->ccb_h.path); 586 printf("isp_lun_cmd failed\n"); 587 goto out; 588 } 589 if (isp_cv_wait_timed_rqe(isp, 30 * hz)) { 590 xpt_print_path(ccb->ccb_h.path); 591 printf("wait for ENABLE LUN timed out\n"); 592 goto out; 593 } 594 rstat = isp->isp_osinfo.rstatus; 595 if (rstat != LUN_OK) { 596 xpt_print_path(ccb->ccb_h.path); 597 printf("ENABLE LUN returned 0x%x\n", rstat); 598 goto out; 599 } 600 } else { 601 u_int32_t seq; 602 603 seq = isp->isp_osinfo.rollinfo++; 604 rstat = LUN_ERR; 605 606 if (isp_lun_cmd(isp, -RQSTYPE_MODIFY_LUN, bus, tgt, lun, seq)) { 607 xpt_print_path(ccb->ccb_h.path); 608 printf("isp_lun_cmd failed\n"); 609 goto out; 610 } 611 if (isp_cv_wait_timed_rqe(isp, 30 * hz)) { 612 xpt_print_path(ccb->ccb_h.path); 613 printf("wait for MODIFY LUN timed out\n"); 614 goto out; 615 } 616 rstat = isp->isp_osinfo.rstatus; 617 if (rstat != LUN_OK) { 618 xpt_print_path(ccb->ccb_h.path); 619 printf("MODIFY LUN returned 0x%x\n", rstat); 620 goto out; 621 } 622 rstat = LUN_ERR; 623 seq = isp->isp_osinfo.rollinfo++; 624 625 if (isp_lun_cmd(isp, -RQSTYPE_ENABLE_LUN, bus, tgt, lun, seq)) { 626 xpt_print_path(ccb->ccb_h.path); 627 printf("isp_lun_cmd failed\n"); 628 goto out; 629 } 630 if (isp_cv_wait_timed_rqe(isp, 30 * hz)) { 631 xpt_print_path(ccb->ccb_h.path); 632 printf("wait for ENABLE LUN timed out\n"); 633 goto out; 634 } 635 rstat = isp->isp_osinfo.rstatus; 636 if (rstat != LUN_OK) { 637 xpt_print_path(ccb->ccb_h.path); 638 printf("ENABLE LUN returned 0x%x\n", rstat); 639 goto out; 640 } 641 } 642 out: 643 isp_vsema_rqe(isp); 644 ISP_UNLOCK(isp); 645 646 if (rstat != LUN_OK) { 647 xpt_print_path(ccb->ccb_h.path); 648 printf("lun %sable failed\n", (cel->enable) ? "en" : "dis"); 649 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 650 rls_lun_statep(isp, tptr); 651 if (cel->enable) 652 destroy_lun_state(isp, tptr); 653 } else { 654 xpt_print_path(ccb->ccb_h.path); 655 printf(lfmt, (cel->enable) ? "en" : "dis"); 656 rls_lun_statep(isp, tptr); 657 if (cel->enable == 0) { 658 destroy_lun_state(isp, tptr); 659 } 660 ccb->ccb_h.status = CAM_REQ_CMP; 661 } 662 } 663 664 static cam_status 665 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb) 666 { 667 tstate_t *tptr; 668 struct ccb_hdr_slist *lp; 669 struct ccb_hdr *curelm; 670 int found; 671 union ccb *accb = ccb->cab.abort_ccb; 672 673 if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 674 if (IS_FC(isp) && (accb->ccb_h.target_id != 675 ((fcparam *) isp->isp_param)->isp_loopid)) { 676 return (CAM_PATH_INVALID); 677 } else if (IS_SCSI(isp) && (accb->ccb_h.target_id != 678 ((sdparam *) isp->isp_param)->isp_initiator_id)) { 679 return (CAM_PATH_INVALID); 680 } 681 } 682 tptr = get_lun_statep(isp, accb->ccb_h.target_lun); 683 if (tptr == NULL) { 684 return (CAM_PATH_INVALID); 685 } 686 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 687 lp = &tptr->atios; 688 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 689 lp = &tptr->inots; 690 } else { 691 rls_lun_statep(isp, tptr); 692 return (CAM_UA_ABORT); 693 } 694 curelm = SLIST_FIRST(lp); 695 found = 0; 696 if (curelm == &accb->ccb_h) { 697 found = 1; 698 SLIST_REMOVE_HEAD(lp, sim_links.sle); 699 } else { 700 while(curelm != NULL) { 701 struct ccb_hdr *nextelm; 702 703 nextelm = SLIST_NEXT(curelm, sim_links.sle); 704 if (nextelm == &accb->ccb_h) { 705 found = 1; 706 SLIST_NEXT(curelm, sim_links.sle) = 707 SLIST_NEXT(nextelm, sim_links.sle); 708 break; 709 } 710 curelm = nextelm; 711 } 712 } 713 rls_lun_statep(isp, tptr); 714 if (found) { 715 accb->ccb_h.status = CAM_REQ_ABORTED; 716 return (CAM_REQ_CMP); 717 } 718 return(CAM_PATH_INVALID); 719 } 720 721 static cam_status 722 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb) 723 { 724 void *qe; 725 struct ccb_scsiio *cso = &ccb->csio; 726 u_int32_t *hp, save_handle; 727 u_int16_t iptr, optr; 728 729 730 if (isp_getrqentry(isp, &iptr, &optr, &qe)) { 731 xpt_print_path(ccb->ccb_h.path); 732 printf("Request Queue Overflow in isp_target_start_ctio\n"); 733 return (CAM_RESRC_UNAVAIL); 734 } 735 bzero(qe, QENTRY_LEN); 736 737 /* 738 * We're either moving data or completing a command here. 739 */ 740 741 if (IS_FC(isp)) { 742 struct ccb_accept_tio *atiop; 743 ct2_entry_t *cto = qe; 744 745 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; 746 cto->ct_header.rqs_entry_count = 1; 747 cto->ct_iid = cso->init_id; 748 if (isp->isp_maxluns <= 16) { 749 cto->ct_lun = ccb->ccb_h.target_lun; 750 } 751 /* 752 * Start with a residual based on what the original datalength 753 * was supposed to be. Basically, we ignore what CAM has set 754 * for residuals. The data transfer routines will knock off 755 * the residual for each byte actually moved- and also will 756 * be responsible for setting the underrun flag. 757 */ 758 /* HACK! HACK! */ 759 if ((atiop = ccb->ccb_h.periph_priv.entries[1].ptr) != NULL) { 760 cto->ct_resid = atiop->ccb_h.spriv_field0; 761 } 762 763 /* 764 * We always have to use the tag_id- it has the RX_ID 765 * for this exchage. 766 */ 767 cto->ct_rxid = cso->tag_id; 768 if (cso->dxfer_len == 0) { 769 cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA; 770 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 771 cto->ct_flags |= CT2_SENDSTATUS; 772 cto->rsp.m1.ct_scsi_status = cso->scsi_status; 773 } 774 if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) { 775 int m = min(cso->sense_len, MAXRESPLEN); 776 bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m); 777 cto->rsp.m1.ct_senselen = m; 778 cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID; 779 } 780 } else { 781 cto->ct_flags |= CT2_FLAG_MODE0; 782 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 783 cto->ct_flags |= CT2_DATA_IN; 784 } else { 785 cto->ct_flags |= CT2_DATA_OUT; 786 } 787 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 788 cto->ct_flags |= CT2_SENDSTATUS; 789 cto->rsp.m0.ct_scsi_status = cso->scsi_status; 790 } 791 /* 792 * If we're sending data and status back together, 793 * we can't also send back sense data as well. 794 */ 795 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 796 } 797 if (cto->ct_flags & CAM_SEND_STATUS) { 798 isp_prt(isp, ISP_LOGTDEBUG2, 799 "CTIO2 RX_ID 0x%x SCSI STATUS 0x%x datalength %u", 800 cto->ct_rxid, cso->scsi_status, cto->ct_resid); 801 } 802 hp = &cto->ct_reserved; 803 } else { 804 ct_entry_t *cto = qe; 805 806 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 807 cto->ct_header.rqs_entry_count = 1; 808 cto->ct_iid = cso->init_id; 809 cto->ct_tgt = ccb->ccb_h.target_id; 810 cto->ct_lun = ccb->ccb_h.target_lun; 811 if (cso->tag_id && cso->tag_action) { 812 /* 813 * We don't specify a tag type for regular SCSI. 814 * Just the tag value and set the flag. 815 */ 816 cto->ct_tag_val = cso->tag_id; 817 cto->ct_flags |= CT_TQAE; 818 } 819 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 820 cto->ct_flags |= CT_NODISC; 821 } 822 if (cso->dxfer_len == 0) { 823 cto->ct_flags |= CT_NO_DATA; 824 } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 825 cto->ct_flags |= CT_DATA_IN; 826 } else { 827 cto->ct_flags |= CT_DATA_OUT; 828 } 829 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 830 cto->ct_flags |= CT_SENDSTATUS; 831 cto->ct_scsi_status = cso->scsi_status; 832 cto->ct_resid = cso->resid; 833 } 834 if (cto->ct_flags & CAM_SEND_STATUS) { 835 isp_prt(isp, ISP_LOGTDEBUG2, 836 "CTIO SCSI STATUS 0x%x resid %d", 837 cso->scsi_status, cso->resid); 838 } 839 hp = &cto->ct_reserved; 840 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 841 } 842 843 if (isp_save_xs(isp, (XS_T *)ccb, hp)) { 844 xpt_print_path(ccb->ccb_h.path); 845 printf("No XFLIST pointers for isp_target_start_ctio\n"); 846 return (CAM_RESRC_UNAVAIL); 847 } 848 849 850 /* 851 * Call the dma setup routines for this entry (and any subsequent 852 * CTIOs) if there's data to move, and then tell the f/w it's got 853 * new things to play with. As with isp_start's usage of DMA setup, 854 * any swizzling is done in the machine dependent layer. Because 855 * of this, we put the request onto the queue area first in native 856 * format. 857 */ 858 859 save_handle = *hp; 860 switch (ISP_DMASETUP(isp, cso, qe, &iptr, optr)) { 861 case CMD_QUEUED: 862 ISP_ADD_REQUEST(isp, iptr); 863 return (CAM_REQ_INPROG); 864 865 case CMD_EAGAIN: 866 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 867 isp_destroy_handle(isp, save_handle); 868 return (CAM_RESRC_UNAVAIL); 869 870 default: 871 isp_destroy_handle(isp, save_handle); 872 return (XS_ERR(ccb)); 873 } 874 } 875 876 static cam_status 877 isp_target_putback_atio(struct ispsoftc *isp, union ccb *ccb) 878 { 879 void *qe; 880 struct ccb_accept_tio *atiop; 881 u_int16_t iptr, optr; 882 883 if (isp_getrqentry(isp, &iptr, &optr, &qe)) { 884 xpt_print_path(ccb->ccb_h.path); 885 printf("Request Queue Overflow in isp_target_putback_atio\n"); 886 return (CAM_RESRC_UNAVAIL); 887 } 888 bzero(qe, QENTRY_LEN); 889 atiop = (struct ccb_accept_tio *) ccb; 890 if (IS_FC(isp)) { 891 at2_entry_t *at = qe; 892 at->at_header.rqs_entry_type = RQSTYPE_ATIO2; 893 at->at_header.rqs_entry_count = 1; 894 if (isp->isp_maxluns > 16) { 895 at->at_scclun = (uint16_t) atiop->ccb_h.target_lun; 896 } else { 897 at->at_lun = (uint8_t) atiop->ccb_h.target_lun; 898 } 899 at->at_status = CT_OK; 900 at->at_rxid = atiop->tag_id; 901 ISP_SWIZ_ATIO2(isp, qe, qe); 902 } else { 903 at_entry_t *at = qe; 904 at->at_header.rqs_entry_type = RQSTYPE_ATIO; 905 at->at_header.rqs_entry_count = 1; 906 at->at_iid = atiop->init_id; 907 at->at_tgt = atiop->ccb_h.target_id; 908 at->at_lun = atiop->ccb_h.target_lun; 909 at->at_status = CT_OK; 910 if (atiop->ccb_h.status & CAM_TAG_ACTION_VALID) { 911 at->at_tag_type = atiop->tag_action; 912 } 913 at->at_tag_val = atiop->tag_id; 914 ISP_SWIZ_ATIO(isp, qe, qe); 915 } 916 ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe); 917 ISP_ADD_REQUEST(isp, iptr); 918 return (CAM_REQ_CMP); 919 } 920 921 static void 922 isp_refire_putback_atio(void *arg) 923 { 924 union ccb *ccb = arg; 925 int s = splcam(); 926 if (isp_target_putback_atio(XS_ISP(ccb), ccb) != CAM_REQ_CMP) { 927 (void) timeout(isp_refire_putback_atio, ccb, 10); 928 } else { 929 isp_handle_platform_ctio_part2(XS_ISP(ccb), ccb); 930 } 931 splx(s); 932 } 933 934 /* 935 * Handle ATIO stuff that the generic code can't. 936 * This means handling CDBs. 937 */ 938 939 static int 940 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep) 941 { 942 tstate_t *tptr; 943 int status; 944 struct ccb_accept_tio *atiop; 945 946 /* 947 * The firmware status (except for the QLTM_SVALID bit) 948 * indicates why this ATIO was sent to us. 949 * 950 * If QLTM_SVALID is set, the firware has recommended Sense Data. 951 * 952 * If the DISCONNECTS DISABLED bit is set in the flags field, 953 * we're still connected on the SCSI bus - i.e. the initiator 954 * did not set DiscPriv in the identify message. We don't care 955 * about this so it's ignored. 956 */ 957 status = aep->at_status; 958 if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) { 959 /* 960 * Bus Phase Sequence error. We should have sense data 961 * suggested by the f/w. I'm not sure quite yet what 962 * to do about this for CAM. 963 */ 964 printf("%s: PHASE ERROR\n", isp->isp_name); 965 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 966 return (0); 967 } 968 if ((status & ~QLTM_SVALID) != AT_CDB) { 969 printf("%s: bogus atio (0x%x) leaked to platform\n", 970 isp->isp_name, status); 971 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 972 return (0); 973 } 974 975 tptr = get_lun_statep(isp, aep->at_lun); 976 if (tptr == NULL) { 977 tptr = get_lun_statep(isp, CAM_LUN_WILDCARD); 978 } 979 980 if (tptr == NULL) { 981 /* 982 * Because we can't autofeed sense data back with 983 * a command for parallel SCSI, we can't give back 984 * a CHECK CONDITION. We'll give back a BUSY status 985 * instead. This works out okay because the only 986 * time we should, in fact, get this, is in the 987 * case that somebody configured us without the 988 * blackhole driver, so they get what they deserve. 989 */ 990 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 991 return (0); 992 } 993 994 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 995 if (atiop == NULL) { 996 /* 997 * Because we can't autofeed sense data back with 998 * a command for parallel SCSI, we can't give back 999 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1000 * instead. This works out okay because the only time we 1001 * should, in fact, get this, is in the case that we've 1002 * run out of ATIOS. 1003 */ 1004 xpt_print_path(tptr->owner); 1005 printf("no ATIOS for lun %d from initiator %d\n", 1006 aep->at_lun, aep->at_iid); 1007 rls_lun_statep(isp, tptr); 1008 if (aep->at_flags & AT_TQAE) 1009 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1010 else 1011 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1012 return (0); 1013 } 1014 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1015 if (tptr == &isp->isp_osinfo.tsdflt) { 1016 atiop->ccb_h.target_id = aep->at_tgt; 1017 atiop->ccb_h.target_lun = aep->at_lun; 1018 } 1019 if (aep->at_flags & AT_NODISC) { 1020 atiop->ccb_h.flags = CAM_DIS_DISCONNECT; 1021 } else { 1022 atiop->ccb_h.flags = 0; 1023 } 1024 1025 if (status & QLTM_SVALID) { 1026 size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data)); 1027 atiop->sense_len = amt; 1028 MEMCPY(&atiop->sense_data, aep->at_sense, amt); 1029 } else { 1030 atiop->sense_len = 0; 1031 } 1032 1033 atiop->init_id = aep->at_iid; 1034 atiop->cdb_len = aep->at_cdblen; 1035 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen); 1036 atiop->ccb_h.status = CAM_CDB_RECVD; 1037 atiop->tag_id = aep->at_tag_val; 1038 if ((atiop->tag_action = aep->at_tag_type) != 0) { 1039 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; 1040 } 1041 xpt_done((union ccb*)atiop); 1042 isp_prt(isp, ISP_LOGTDEBUG2, 1043 "ATIO CDB=0x%x iid%d->lun%d tag 0x%x ttype 0x%x %s", 1044 aep->at_cdb[0] & 0xff, aep->at_iid, aep->at_lun, 1045 aep->at_tag_val & 0xff, aep->at_tag_type, 1046 (aep->at_flags & AT_NODISC)? "nondisc" : "disconnecting"); 1047 rls_lun_statep(isp, tptr); 1048 return (0); 1049 } 1050 1051 static int 1052 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep) 1053 { 1054 lun_id_t lun; 1055 tstate_t *tptr; 1056 struct ccb_accept_tio *atiop; 1057 1058 /* 1059 * The firmware status (except for the QLTM_SVALID bit) 1060 * indicates why this ATIO was sent to us. 1061 * 1062 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1063 */ 1064 if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) { 1065 printf("%s: bogus atio (0x%x) leaked to platform\n", 1066 isp->isp_name, aep->at_status); 1067 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1068 return (0); 1069 } 1070 1071 if (isp->isp_maxluns > 16) { 1072 lun = aep->at_scclun; 1073 } else { 1074 lun = aep->at_lun; 1075 } 1076 tptr = get_lun_statep(isp, lun); 1077 if (tptr == NULL) { 1078 tptr = get_lun_statep(isp, CAM_LUN_WILDCARD); 1079 } 1080 1081 if (tptr == NULL) { 1082 /* 1083 * What we'd like to know is whether or not we have a listener 1084 * upstream that really hasn't configured yet. If we do, then 1085 * we can give a more sensible reply here. If not, then we can 1086 * reject this out of hand. 1087 * 1088 * Choices for what to send were 1089 * 1090 * Not Ready, Unit Not Self-Configured Yet 1091 * (0x2,0x3e,0x00) 1092 * 1093 * for the former and 1094 * 1095 * Illegal Request, Logical Unit Not Supported 1096 * (0x5,0x25,0x00) 1097 * 1098 * for the latter. 1099 * 1100 * We used to decide whether there was at least one listener 1101 * based upon whether the black hole driver was configured. 1102 * However, recent config(8) changes have made this hard to do 1103 * at this time. 1104 * 1105 */ 1106 u_int32_t ccode = SCSI_STATUS_BUSY; 1107 1108 /* 1109 * Because we can't autofeed sense data back with 1110 * a command for parallel SCSI, we can't give back 1111 * a CHECK CONDITION. We'll give back a BUSY status 1112 * instead. This works out okay because the only 1113 * time we should, in fact, get this, is in the 1114 * case that somebody configured us without the 1115 * blackhole driver, so they get what they deserve. 1116 */ 1117 isp_endcmd(isp, aep, ccode, 0); 1118 return (0); 1119 } 1120 1121 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1122 if (atiop == NULL) { 1123 /* 1124 * Because we can't autofeed sense data back with 1125 * a command for parallel SCSI, we can't give back 1126 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1127 * instead. This works out okay because the only time we 1128 * should, in fact, get this, is in the case that we've 1129 * run out of ATIOS. 1130 */ 1131 xpt_print_path(tptr->owner); 1132 printf("no ATIOS for lun %d from initiator %d\n", 1133 lun, aep->at_iid); 1134 rls_lun_statep(isp, tptr); 1135 if (aep->at_flags & AT_TQAE) 1136 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1137 else 1138 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1139 return (0); 1140 } 1141 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1142 1143 if (tptr == &isp->isp_osinfo.tsdflt) { 1144 atiop->ccb_h.target_id = 1145 ((fcparam *)isp->isp_param)->isp_loopid; 1146 atiop->ccb_h.target_lun = lun; 1147 } 1148 if (aep->at_status & QLTM_SVALID) { 1149 size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data)); 1150 atiop->sense_len = amt; 1151 MEMCPY(&atiop->sense_data, aep->at_sense, amt); 1152 } else { 1153 atiop->sense_len = 0; 1154 } 1155 1156 atiop->init_id = aep->at_iid; 1157 atiop->cdb_len = ATIO2_CDBLEN; 1158 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN); 1159 atiop->ccb_h.status = CAM_CDB_RECVD; 1160 atiop->tag_id = aep->at_rxid; 1161 switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) { 1162 case ATIO2_TC_ATTR_SIMPLEQ: 1163 atiop->tag_action = MSG_SIMPLE_Q_TAG; 1164 break; 1165 case ATIO2_TC_ATTR_HEADOFQ: 1166 atiop->tag_action = MSG_HEAD_OF_Q_TAG; 1167 break; 1168 case ATIO2_TC_ATTR_ORDERED: 1169 atiop->tag_action = MSG_ORDERED_Q_TAG; 1170 break; 1171 case ATIO2_TC_ATTR_ACAQ: /* ?? */ 1172 case ATIO2_TC_ATTR_UNTAGGED: 1173 default: 1174 atiop->tag_action = 0; 1175 break; 1176 } 1177 if (atiop->tag_action != 0) { 1178 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; 1179 } 1180 1181 /* 1182 * Preserve overall command datalength in private field. 1183 */ 1184 atiop->ccb_h.spriv_field0 = aep->at_datalen; 1185 1186 xpt_done((union ccb*)atiop); 1187 isp_prt(isp, ISP_LOGTDEBUG2, 1188 "ATIO2 RX_ID 0x%x CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u", 1189 aep->at_rxid & 0xffff, aep->at_cdb[0] & 0xff, aep->at_iid, 1190 lun, aep->at_taskflags, aep->at_datalen); 1191 rls_lun_statep(isp, tptr); 1192 return (0); 1193 } 1194 1195 static int 1196 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg) 1197 { 1198 union ccb *ccb; 1199 int sentstatus, ok, notify_cam; 1200 1201 /* 1202 * CTIO and CTIO2 are close enough.... 1203 */ 1204 1205 ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_reserved); 1206 KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio")); 1207 isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_reserved); 1208 1209 if (IS_FC(isp)) { 1210 ct2_entry_t *ct = arg; 1211 sentstatus = ct->ct_flags & CT2_SENDSTATUS; 1212 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1213 if (ok && (ccb->ccb_h.flags & CAM_SEND_SENSE)) { 1214 ccb->ccb_h.status |= CAM_SENT_SENSE; 1215 } 1216 isp_prt(isp, ISP_LOGTDEBUG2, 1217 "CTIO2 RX_ID 0x%x sts 0x%x flg 0x%x sns %d FIN", 1218 ct->ct_rxid, ct->ct_status, ct->ct_flags, 1219 (ccb->ccb_h.status & CAM_SENT_SENSE) != 0); 1220 notify_cam = ct->ct_header.rqs_seqno; 1221 } else { 1222 ct_entry_t *ct = arg; 1223 sentstatus = ct->ct_flags & CT_SENDSTATUS; 1224 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1225 isp_prt(isp, ISP_LOGTDEBUG2, 1226 "CTIO tag 0x%x sts 0x%x flg 0x%x FIN", 1227 ct->ct_tag_val, ct->ct_status, ct->ct_flags); 1228 notify_cam = ct->ct_header.rqs_seqno; 1229 } 1230 1231 /* 1232 * We're here either because data transfers are done (and 1233 * it's time to send a final status CTIO) or because the final 1234 * status CTIO is done. We don't get called for all intermediate 1235 * CTIOs that happen for a large data transfer. 1236 * 1237 * In any case, for this platform, the upper layers figure out 1238 * what to do next, so all we do here is collect status and 1239 * pass information along. The exception is that we clear 1240 * the notion of handling a non-disconnecting command here. 1241 */ 1242 1243 if (sentstatus) { 1244 /* 1245 * Data transfer done. See if all went okay. 1246 */ 1247 if (ok) { 1248 ccb->csio.resid = 0; 1249 } else { 1250 ccb->csio.resid = ccb->csio.dxfer_len; 1251 } 1252 } 1253 1254 if (notify_cam == 0) { 1255 isp_prt(isp, ISP_LOGTDEBUG1, "Intermediate CTIO done"); 1256 return (0); 1257 } 1258 isp_prt(isp, ISP_LOGTDEBUG1, "Final CTIO done"); 1259 if (isp_target_putback_atio(isp, ccb) != CAM_REQ_CMP) { 1260 (void) timeout(isp_refire_putback_atio, ccb, 10); 1261 } else { 1262 isp_handle_platform_ctio_part2(isp, ccb); 1263 } 1264 return (0); 1265 } 1266 1267 static void 1268 isp_handle_platform_ctio_part2(struct ispsoftc *isp, union ccb *ccb) 1269 { 1270 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1271 ccb->ccb_h.status |= CAM_REQ_CMP; 1272 } 1273 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1274 if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) { 1275 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE; 1276 if (isp->isp_osinfo.simqfrozen == 0) { 1277 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 1278 isp_prt(isp, ISP_LOGDEBUG2, "ctio->relsimq"); 1279 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 1280 } else { 1281 isp_prt(isp, ISP_LOGDEBUG2, "ctio->devqfrozen"); 1282 } 1283 } else { 1284 isp_prt(isp, ISP_LOGDEBUG2, 1285 "ctio->simqfrozen(%x)", isp->isp_osinfo.simqfrozen); 1286 } 1287 } 1288 xpt_done(ccb); 1289 } 1290 #endif 1291 1292 static void 1293 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg) 1294 { 1295 struct cam_sim *sim; 1296 struct ispsoftc *isp; 1297 1298 sim = (struct cam_sim *)cbarg; 1299 isp = (struct ispsoftc *) cam_sim_softc(sim); 1300 switch (code) { 1301 case AC_LOST_DEVICE: 1302 if (IS_SCSI(isp)) { 1303 u_int16_t oflags, nflags; 1304 sdparam *sdp = isp->isp_param; 1305 int rvf, tgt; 1306 1307 tgt = xpt_path_target_id(path); 1308 rvf = ISP_FW_REVX(isp->isp_fwrev); 1309 ISP_LOCK(isp); 1310 sdp += cam_sim_bus(sim); 1311 isp->isp_update |= (1 << cam_sim_bus(sim)); 1312 nflags = DPARM_SAFE_DFLT; 1313 if (rvf >= ISP_FW_REV(7, 55, 0) || 1314 (ISP_FW_REV(4, 55, 0) <= rvf && 1315 (rvf < ISP_FW_REV(5, 0, 0)))) { 1316 nflags |= DPARM_NARROW | DPARM_ASYNC; 1317 } 1318 oflags = sdp->isp_devparam[tgt].dev_flags; 1319 sdp->isp_devparam[tgt].dev_flags = nflags; 1320 sdp->isp_devparam[tgt].dev_update = 1; 1321 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, NULL); 1322 sdp->isp_devparam[tgt].dev_flags = oflags; 1323 ISP_UNLOCK(isp); 1324 } 1325 break; 1326 default: 1327 printf("%s: isp_attach Async Code 0x%x\n", isp->isp_name, code); 1328 break; 1329 } 1330 } 1331 1332 static void 1333 isp_poll(struct cam_sim *sim) 1334 { 1335 isp_intr((struct ispsoftc *) cam_sim_softc(sim)); 1336 } 1337 1338 static void 1339 isp_relsim(void *arg) 1340 { 1341 struct ispsoftc *isp = arg; 1342 ISP_LOCK(isp); 1343 if (isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED) { 1344 int wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED; 1345 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_TIMED; 1346 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { 1347 xpt_release_simq(isp->isp_sim, 1); 1348 isp_prt(isp, ISP_LOGDEBUG2, "timed relsimq"); 1349 } 1350 } 1351 ISP_UNLOCK(isp); 1352 } 1353 1354 static void 1355 isp_watchdog(void *arg) 1356 { 1357 XS_T *xs = arg; 1358 struct ispsoftc *isp = XS_ISP(xs); 1359 u_int32_t handle; 1360 1361 /* 1362 * We've decided this command is dead. Make sure we're not trying 1363 * to kill a command that's already dead by getting it's handle and 1364 * and seeing whether it's still alive. 1365 */ 1366 ISP_LOCK(isp); 1367 handle = isp_find_handle(isp, xs); 1368 if (handle) { 1369 u_int16_t r; 1370 1371 if (XS_CMD_DONE_P(xs)) { 1372 isp_prt(isp, ISP_LOGDEBUG1, 1373 "watchdog found done cmd (handle 0x%x)", handle); 1374 ISP_UNLOCK(isp); 1375 return; 1376 } 1377 1378 if (XS_CMD_WDOG_P(xs)) { 1379 isp_prt(isp, ISP_LOGDEBUG2, 1380 "recursive watchdog (handle 0x%x)", handle); 1381 ISP_UNLOCK(isp); 1382 return; 1383 } 1384 1385 XS_CMD_S_WDOG(xs); 1386 1387 r = ISP_READ(isp, BIU_ISR); 1388 1389 if (INT_PENDING(isp, r) && isp_intr(isp) && XS_CMD_DONE_P(xs)) { 1390 isp_prt(isp, ISP_LOGDEBUG2, 1391 "watchdog cleanup (%x, %x)", handle, r); 1392 xpt_done((union ccb *) xs); 1393 } else if (XS_CMD_GRACE_P(xs)) { 1394 /* 1395 * Make sure the command is *really* dead before we 1396 * release the handle (and DMA resources) for reuse. 1397 */ 1398 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg); 1399 1400 /* 1401 * After this point, the comamnd is really dead. 1402 */ 1403 if (XS_XFRLEN(xs)) { 1404 ISP_DMAFREE(isp, xs, handle); 1405 } 1406 isp_destroy_handle(isp, handle); 1407 xpt_print_path(xs->ccb_h.path); 1408 printf("%s: watchdog timeout (%x, %x)\n", 1409 isp->isp_name, handle, r); 1410 XS_SETERR(xs, CAM_CMD_TIMEOUT); 1411 XS_CMD_C_WDOG(xs); 1412 isp_done(xs); 1413 } else { 1414 u_int16_t iptr, optr; 1415 ispreq_t *mp; 1416 1417 XS_CMD_C_WDOG(xs); 1418 xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz); 1419 if (isp_getrqentry(isp, &iptr, &optr, (void **) &mp)) { 1420 ISP_UNLOCK(isp); 1421 return; 1422 } 1423 XS_CMD_S_GRACE(xs); 1424 MEMZERO((void *) mp, sizeof (*mp)); 1425 mp->req_header.rqs_entry_count = 1; 1426 mp->req_header.rqs_entry_type = RQSTYPE_MARKER; 1427 mp->req_modifier = SYNC_ALL; 1428 mp->req_target = XS_CHANNEL(xs) << 7; 1429 ISP_SWIZZLE_REQUEST(isp, mp); 1430 ISP_ADD_REQUEST(isp, iptr); 1431 } 1432 } else { 1433 isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command"); 1434 } 1435 ISP_UNLOCK(isp); 1436 } 1437 1438 static void 1439 isp_action(struct cam_sim *sim, union ccb *ccb) 1440 { 1441 int bus, tgt, error; 1442 struct ispsoftc *isp; 1443 struct ccb_trans_settings *cts; 1444 1445 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n")); 1446 1447 isp = (struct ispsoftc *)cam_sim_softc(sim); 1448 ccb->ccb_h.sim_priv.entries[0].field = 0; 1449 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 1450 if (isp->isp_state != ISP_RUNSTATE && 1451 ccb->ccb_h.func_code == XPT_SCSI_IO) { 1452 ISP_LOCK(isp); 1453 isp_init(isp); 1454 if (isp->isp_state != ISP_INITSTATE) { 1455 ISP_UNLOCK(isp); 1456 /* 1457 * Lie. Say it was a selection timeout. 1458 */ 1459 ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN; 1460 xpt_freeze_devq(ccb->ccb_h.path, 1); 1461 xpt_done(ccb); 1462 return; 1463 } 1464 isp->isp_state = ISP_RUNSTATE; 1465 ISP_UNLOCK(isp); 1466 } 1467 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code); 1468 1469 switch (ccb->ccb_h.func_code) { 1470 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 1471 /* 1472 * Do a couple of preliminary checks... 1473 */ 1474 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 1475 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 1476 ccb->ccb_h.status = CAM_REQ_INVALID; 1477 xpt_done(ccb); 1478 break; 1479 } 1480 } 1481 #ifdef DIAGNOSTIC 1482 if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) { 1483 ccb->ccb_h.status = CAM_PATH_INVALID; 1484 } else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) { 1485 ccb->ccb_h.status = CAM_PATH_INVALID; 1486 } 1487 if (ccb->ccb_h.status == CAM_PATH_INVALID) { 1488 printf("%s: invalid tgt/lun (%d.%d) in XPT_SCSI_IO\n", 1489 isp->isp_name, ccb->ccb_h.target_id, 1490 ccb->ccb_h.target_lun); 1491 xpt_done(ccb); 1492 break; 1493 } 1494 #endif 1495 ((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK; 1496 ISP_LOCK(isp); 1497 error = isp_start((XS_T *) ccb); 1498 ISP_UNLOCK(isp); 1499 switch (error) { 1500 case CMD_QUEUED: 1501 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1502 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 1503 int ticks; 1504 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) 1505 ticks = 60 * 1000 * hz; 1506 else 1507 ticks = ccb->ccb_h.timeout * hz; 1508 ticks = ((ticks + 999) / 1000) + hz + hz; 1509 ccb->ccb_h.timeout_ch = 1510 timeout(isp_watchdog, (caddr_t)ccb, ticks); 1511 } else { 1512 callout_handle_init(&ccb->ccb_h.timeout_ch); 1513 } 1514 break; 1515 case CMD_RQLATER: 1516 if (isp->isp_osinfo.simqfrozen == 0) { 1517 isp_prt(isp, ISP_LOGDEBUG2, 1518 "RQLATER freeze simq"); 1519 isp->isp_osinfo.simqfrozen |= SIMQFRZ_TIMED; 1520 timeout(isp_relsim, isp, 500); 1521 xpt_freeze_simq(sim, 1); 1522 } 1523 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1524 xpt_done(ccb); 1525 break; 1526 case CMD_EAGAIN: 1527 if (isp->isp_osinfo.simqfrozen == 0) { 1528 xpt_freeze_simq(sim, 1); 1529 isp_prt(isp, ISP_LOGDEBUG2, 1530 "EAGAIN freeze simq"); 1531 } 1532 isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE; 1533 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1534 xpt_done(ccb); 1535 break; 1536 case CMD_COMPLETE: 1537 isp_done((struct ccb_scsiio *) ccb); 1538 break; 1539 default: 1540 printf("%s: What's this? 0x%x at %d in file %s\n", 1541 isp->isp_name, error, __LINE__, __FILE__); 1542 XS_SETERR(ccb, CAM_REQ_CMP_ERR); 1543 xpt_done(ccb); 1544 } 1545 break; 1546 1547 #ifdef ISP_TARGET_MODE 1548 case XPT_EN_LUN: /* Enable LUN as a target */ 1549 isp_en_lun(isp, ccb); 1550 xpt_done(ccb); 1551 break; 1552 1553 case XPT_NOTIFY_ACK: /* recycle notify ack */ 1554 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ 1555 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 1556 { 1557 tstate_t *tptr = get_lun_statep(isp, ccb->ccb_h.target_lun); 1558 if (tptr == NULL) { 1559 ccb->ccb_h.status = CAM_LUN_INVALID; 1560 xpt_done(ccb); 1561 break; 1562 } 1563 ccb->ccb_h.sim_priv.entries[0].field = 0; 1564 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 1565 ISP_LOCK(isp); 1566 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 1567 #if 0 1568 (void) isp_target_putback_atio(isp, ccb); 1569 #endif 1570 SLIST_INSERT_HEAD(&tptr->atios, 1571 &ccb->ccb_h, sim_links.sle); 1572 } else { 1573 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, 1574 sim_links.sle); 1575 } 1576 ISP_UNLOCK(isp); 1577 rls_lun_statep(isp, tptr); 1578 ccb->ccb_h.status = CAM_REQ_INPROG; 1579 break; 1580 } 1581 case XPT_CONT_TARGET_IO: 1582 { 1583 ISP_LOCK(isp); 1584 ccb->ccb_h.status = isp_target_start_ctio(isp, ccb); 1585 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 1586 if (isp->isp_osinfo.simqfrozen == 0) { 1587 xpt_freeze_simq(sim, 1); 1588 xpt_print_path(ccb->ccb_h.path); 1589 printf("XPT_CONT_TARGET_IO freeze simq\n"); 1590 } 1591 isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE; 1592 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1593 xpt_done(ccb); 1594 } else { 1595 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1596 } 1597 ISP_UNLOCK(isp); 1598 break; 1599 } 1600 #endif 1601 case XPT_RESET_DEV: /* BDR the specified SCSI device */ 1602 1603 bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); 1604 tgt = ccb->ccb_h.target_id; 1605 tgt |= (bus << 16); 1606 1607 ISP_LOCK(isp); 1608 error = isp_control(isp, ISPCTL_RESET_DEV, &tgt); 1609 ISP_UNLOCK(isp); 1610 if (error) { 1611 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1612 } else { 1613 ccb->ccb_h.status = CAM_REQ_CMP; 1614 } 1615 xpt_done(ccb); 1616 break; 1617 case XPT_ABORT: /* Abort the specified CCB */ 1618 { 1619 union ccb *accb = ccb->cab.abort_ccb; 1620 switch (accb->ccb_h.func_code) { 1621 #ifdef ISP_TARGET_MODE 1622 case XPT_ACCEPT_TARGET_IO: 1623 case XPT_IMMED_NOTIFY: 1624 ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb); 1625 break; 1626 case XPT_CONT_TARGET_IO: 1627 isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet"); 1628 ccb->ccb_h.status = CAM_UA_ABORT; 1629 break; 1630 #endif 1631 case XPT_SCSI_IO: 1632 ISP_LOCK(isp); 1633 error = isp_control(isp, ISPCTL_ABORT_CMD, ccb); 1634 ISP_UNLOCK(isp); 1635 if (error) { 1636 ccb->ccb_h.status = CAM_UA_ABORT; 1637 } else { 1638 ccb->ccb_h.status = CAM_REQ_CMP; 1639 } 1640 break; 1641 default: 1642 ccb->ccb_h.status = CAM_REQ_INVALID; 1643 break; 1644 } 1645 xpt_done(ccb); 1646 break; 1647 } 1648 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 1649 1650 cts = &ccb->cts; 1651 tgt = cts->ccb_h.target_id; 1652 ISP_LOCK(isp); 1653 if (IS_SCSI(isp)) { 1654 sdparam *sdp = isp->isp_param; 1655 u_int16_t *dptr; 1656 1657 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 1658 1659 sdp += bus; 1660 #if 0 1661 if (cts->flags & CCB_TRANS_CURRENT_SETTINGS) 1662 dptr = &sdp->isp_devparam[tgt].cur_dflags; 1663 else 1664 dptr = &sdp->isp_devparam[tgt].dev_flags; 1665 #else 1666 /* 1667 * We always update (internally) from dev_flags 1668 * so any request to change settings just gets 1669 * vectored to that location. 1670 */ 1671 dptr = &sdp->isp_devparam[tgt].dev_flags; 1672 #endif 1673 1674 /* 1675 * Note that these operations affect the 1676 * the goal flags (dev_flags)- not 1677 * the current state flags. Then we mark 1678 * things so that the next operation to 1679 * this HBA will cause the update to occur. 1680 */ 1681 if (cts->valid & CCB_TRANS_DISC_VALID) { 1682 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) { 1683 *dptr |= DPARM_DISC; 1684 } else { 1685 *dptr &= ~DPARM_DISC; 1686 } 1687 } 1688 if (cts->valid & CCB_TRANS_TQ_VALID) { 1689 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) { 1690 *dptr |= DPARM_TQING; 1691 } else { 1692 *dptr &= ~DPARM_TQING; 1693 } 1694 } 1695 if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) { 1696 switch (cts->bus_width) { 1697 case MSG_EXT_WDTR_BUS_16_BIT: 1698 *dptr |= DPARM_WIDE; 1699 break; 1700 default: 1701 *dptr &= ~DPARM_WIDE; 1702 } 1703 } 1704 /* 1705 * Any SYNC RATE of nonzero and SYNC_OFFSET 1706 * of nonzero will cause us to go to the 1707 * selected (from NVRAM) maximum value for 1708 * this device. At a later point, we'll 1709 * allow finer control. 1710 */ 1711 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && 1712 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) && 1713 (cts->sync_offset > 0)) { 1714 *dptr |= DPARM_SYNC; 1715 } else { 1716 *dptr &= ~DPARM_SYNC; 1717 } 1718 *dptr |= DPARM_SAFE_DFLT; 1719 if (bootverbose || isp->isp_dblev >= 3) 1720 printf("%s: %d.%d set %s period 0x%x offset " 1721 "0x%x flags 0x%x\n", isp->isp_name, bus, 1722 tgt, 1723 (cts->flags & CCB_TRANS_CURRENT_SETTINGS)? 1724 "current" : "user", 1725 sdp->isp_devparam[tgt].sync_period, 1726 sdp->isp_devparam[tgt].sync_offset, 1727 sdp->isp_devparam[tgt].dev_flags); 1728 sdp->isp_devparam[tgt].dev_update = 1; 1729 isp->isp_update |= (1 << bus); 1730 } 1731 ISP_UNLOCK(isp); 1732 ccb->ccb_h.status = CAM_REQ_CMP; 1733 xpt_done(ccb); 1734 break; 1735 1736 case XPT_GET_TRAN_SETTINGS: 1737 1738 cts = &ccb->cts; 1739 tgt = cts->ccb_h.target_id; 1740 if (IS_FC(isp)) { 1741 /* 1742 * a lot of normal SCSI things don't make sense. 1743 */ 1744 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 1745 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 1746 /* 1747 * How do you measure the width of a high 1748 * speed serial bus? Well, in bytes. 1749 * 1750 * Offset and period make no sense, though, so we set 1751 * (above) a 'base' transfer speed to be gigabit. 1752 */ 1753 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 1754 } else { 1755 sdparam *sdp = isp->isp_param; 1756 u_int16_t dval, pval, oval; 1757 int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 1758 1759 sdp += bus; 1760 if (cts->flags & CCB_TRANS_CURRENT_SETTINGS) { 1761 ISP_LOCK(isp); 1762 sdp->isp_devparam[tgt].dev_refresh = 1; 1763 isp->isp_update |= (1 << bus); 1764 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, 1765 NULL); 1766 ISP_UNLOCK(isp); 1767 dval = sdp->isp_devparam[tgt].cur_dflags; 1768 oval = sdp->isp_devparam[tgt].cur_offset; 1769 pval = sdp->isp_devparam[tgt].cur_period; 1770 } else { 1771 dval = sdp->isp_devparam[tgt].dev_flags; 1772 oval = sdp->isp_devparam[tgt].sync_offset; 1773 pval = sdp->isp_devparam[tgt].sync_period; 1774 } 1775 1776 ISP_LOCK(isp); 1777 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 1778 1779 if (dval & DPARM_DISC) { 1780 cts->flags |= CCB_TRANS_DISC_ENB; 1781 } 1782 if (dval & DPARM_TQING) { 1783 cts->flags |= CCB_TRANS_TAG_ENB; 1784 } 1785 if (dval & DPARM_WIDE) { 1786 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 1787 } else { 1788 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 1789 } 1790 cts->valid = CCB_TRANS_BUS_WIDTH_VALID | 1791 CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 1792 1793 if ((dval & DPARM_SYNC) && oval != 0) { 1794 cts->sync_period = pval; 1795 cts->sync_offset = oval; 1796 cts->valid |= 1797 CCB_TRANS_SYNC_RATE_VALID | 1798 CCB_TRANS_SYNC_OFFSET_VALID; 1799 } 1800 ISP_UNLOCK(isp); 1801 if (bootverbose || isp->isp_dblev >= 3) 1802 printf("%s: %d.%d get %s period 0x%x offset " 1803 "0x%x flags 0x%x\n", isp->isp_name, bus, 1804 tgt, 1805 (cts->flags & CCB_TRANS_CURRENT_SETTINGS)? 1806 "current" : "user", pval, oval, dval); 1807 } 1808 ccb->ccb_h.status = CAM_REQ_CMP; 1809 xpt_done(ccb); 1810 break; 1811 1812 case XPT_CALC_GEOMETRY: 1813 { 1814 struct ccb_calc_geometry *ccg; 1815 u_int32_t secs_per_cylinder; 1816 u_int32_t size_mb; 1817 1818 ccg = &ccb->ccg; 1819 if (ccg->block_size == 0) { 1820 printf("%s: %d.%d XPT_CALC_GEOMETRY block size 0?\n", 1821 isp->isp_name, ccg->ccb_h.target_id, 1822 ccg->ccb_h.target_lun); 1823 ccb->ccb_h.status = CAM_REQ_INVALID; 1824 xpt_done(ccb); 1825 break; 1826 } 1827 size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size); 1828 if (size_mb > 1024) { 1829 ccg->heads = 255; 1830 ccg->secs_per_track = 63; 1831 } else { 1832 ccg->heads = 64; 1833 ccg->secs_per_track = 32; 1834 } 1835 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 1836 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 1837 ccb->ccb_h.status = CAM_REQ_CMP; 1838 xpt_done(ccb); 1839 break; 1840 } 1841 case XPT_RESET_BUS: /* Reset the specified bus */ 1842 bus = cam_sim_bus(sim); 1843 ISP_LOCK(isp); 1844 error = isp_control(isp, ISPCTL_RESET_BUS, &bus); 1845 ISP_UNLOCK(isp); 1846 if (error) 1847 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1848 else { 1849 if (cam_sim_bus(sim) && isp->isp_path2 != NULL) 1850 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 1851 else if (isp->isp_path != NULL) 1852 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 1853 ccb->ccb_h.status = CAM_REQ_CMP; 1854 } 1855 xpt_done(ccb); 1856 break; 1857 1858 case XPT_TERM_IO: /* Terminate the I/O process */ 1859 ccb->ccb_h.status = CAM_REQ_INVALID; 1860 xpt_done(ccb); 1861 break; 1862 1863 case XPT_PATH_INQ: /* Path routing inquiry */ 1864 { 1865 struct ccb_pathinq *cpi = &ccb->cpi; 1866 1867 cpi->version_num = 1; 1868 #ifdef ISP_TARGET_MODE 1869 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 1870 #else 1871 cpi->target_sprt = 0; 1872 #endif 1873 cpi->hba_eng_cnt = 0; 1874 cpi->max_target = ISP_MAX_TARGETS(isp) - 1; 1875 cpi->max_lun = ISP_MAX_LUNS(isp) - 1; 1876 cpi->bus_id = cam_sim_bus(sim); 1877 if (IS_FC(isp)) { 1878 cpi->hba_misc = PIM_NOBUSRESET; 1879 /* 1880 * Because our loop ID can shift from time to time, 1881 * make our initiator ID out of range of our bus. 1882 */ 1883 cpi->initiator_id = cpi->max_target + 1; 1884 1885 /* 1886 * Set base transfer capabilities for Fibre Channel. 1887 * Technically not correct because we don't know 1888 * what media we're running on top of- but we'll 1889 * look good if we always say 100MB/s. 1890 */ 1891 cpi->base_transfer_speed = 100000; 1892 cpi->hba_inquiry = PI_TAG_ABLE; 1893 } else { 1894 sdparam *sdp = isp->isp_param; 1895 sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path)); 1896 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 1897 cpi->hba_misc = 0; 1898 cpi->initiator_id = sdp->isp_initiator_id; 1899 cpi->base_transfer_speed = 3300; 1900 } 1901 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 1902 strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN); 1903 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 1904 cpi->unit_number = cam_sim_unit(sim); 1905 cpi->ccb_h.status = CAM_REQ_CMP; 1906 xpt_done(ccb); 1907 break; 1908 } 1909 default: 1910 ccb->ccb_h.status = CAM_REQ_INVALID; 1911 xpt_done(ccb); 1912 break; 1913 } 1914 } 1915 1916 #define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB) 1917 void 1918 isp_done(struct ccb_scsiio *sccb) 1919 { 1920 struct ispsoftc *isp = XS_ISP(sccb); 1921 1922 if (XS_NOERR(sccb)) 1923 XS_SETERR(sccb, CAM_REQ_CMP); 1924 1925 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && 1926 (sccb->scsi_status != SCSI_STATUS_OK)) { 1927 sccb->ccb_h.status &= ~CAM_STATUS_MASK; 1928 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && 1929 (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) { 1930 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; 1931 } else { 1932 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 1933 } 1934 } 1935 1936 sccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1937 if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1938 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 1939 sccb->ccb_h.status |= CAM_DEV_QFRZN; 1940 xpt_freeze_devq(sccb->ccb_h.path, 1); 1941 if (sccb->scsi_status != SCSI_STATUS_OK) 1942 isp_prt(isp, ISP_LOGDEBUG2, 1943 "freeze devq %d.%d %x %x", 1944 sccb->ccb_h.target_id, 1945 sccb->ccb_h.target_lun, sccb->ccb_h.status, 1946 sccb->scsi_status); 1947 } 1948 } 1949 1950 /* 1951 * If we were frozen waiting resources, clear that we were frozen 1952 * waiting for resources. If we are no longer frozen, and the devq 1953 * isn't frozen, mark the completing CCB to have the XPT layer 1954 * release the simq. 1955 */ 1956 if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) { 1957 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE; 1958 if (isp->isp_osinfo.simqfrozen == 0) { 1959 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 1960 isp_prt(isp, ISP_LOGDEBUG2, 1961 "isp_done->relsimq"); 1962 sccb->ccb_h.status |= CAM_RELEASE_SIMQ; 1963 } else { 1964 isp_prt(isp, ISP_LOGDEBUG2, 1965 "isp_done->devq frozen"); 1966 } 1967 } else { 1968 isp_prt(isp, ISP_LOGDEBUG2, 1969 "isp_done -> simqfrozen = %x", 1970 isp->isp_osinfo.simqfrozen); 1971 } 1972 } 1973 if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) && 1974 (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1975 xpt_print_path(sccb->ccb_h.path); 1976 printf("cam completion status 0x%x\n", sccb->ccb_h.status); 1977 } 1978 1979 XS_CMD_S_DONE(sccb); 1980 if (XS_CMD_WDOG_P(sccb) == 0) { 1981 untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch); 1982 if (XS_CMD_GRACE_P(sccb)) { 1983 isp_prt(isp, ISP_LOGDEBUG2, 1984 "finished command on borrowed time"); 1985 } 1986 XS_CMD_S_CLEAR(sccb); 1987 xpt_done((union ccb *) sccb); 1988 } 1989 } 1990 1991 int 1992 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg) 1993 { 1994 int bus, rv = 0; 1995 switch (cmd) { 1996 case ISPASYNC_NEW_TGT_PARAMS: 1997 { 1998 int flags, tgt; 1999 sdparam *sdp = isp->isp_param; 2000 struct ccb_trans_settings neg; 2001 struct cam_path *tmppath; 2002 2003 tgt = *((int *)arg); 2004 bus = (tgt >> 16) & 0xffff; 2005 tgt &= 0xffff; 2006 sdp += bus; 2007 if (xpt_create_path(&tmppath, NULL, 2008 cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim), 2009 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2010 xpt_print_path(isp->isp_path); 2011 printf("isp_async cannot make temp path for " 2012 "target %d bus %d\n", tgt, bus); 2013 rv = -1; 2014 break; 2015 } 2016 flags = sdp->isp_devparam[tgt].cur_dflags; 2017 neg.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2018 if (flags & DPARM_DISC) { 2019 neg.flags |= CCB_TRANS_DISC_ENB; 2020 } 2021 if (flags & DPARM_TQING) { 2022 neg.flags |= CCB_TRANS_TAG_ENB; 2023 } 2024 neg.valid |= CCB_TRANS_BUS_WIDTH_VALID; 2025 neg.bus_width = (flags & DPARM_WIDE)? 2026 MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT; 2027 neg.sync_period = sdp->isp_devparam[tgt].cur_period; 2028 neg.sync_offset = sdp->isp_devparam[tgt].cur_offset; 2029 if (flags & DPARM_SYNC) { 2030 neg.valid |= 2031 CCB_TRANS_SYNC_RATE_VALID | 2032 CCB_TRANS_SYNC_OFFSET_VALID; 2033 } 2034 isp_prt(isp, ISP_LOGDEBUG2, 2035 "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x", 2036 bus, tgt, neg.sync_period, neg.sync_offset, flags); 2037 xpt_setup_ccb(&neg.ccb_h, tmppath, 1); 2038 xpt_async(AC_TRANSFER_NEG, tmppath, &neg); 2039 xpt_free_path(tmppath); 2040 break; 2041 } 2042 case ISPASYNC_BUS_RESET: 2043 bus = *((int *)arg); 2044 isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected", 2045 bus); 2046 if (bus > 0 && isp->isp_path2) { 2047 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 2048 } else if (isp->isp_path) { 2049 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 2050 } 2051 break; 2052 case ISPASYNC_LOOP_DOWN: 2053 if (isp->isp_path) { 2054 if (isp->isp_osinfo.simqfrozen == 0) { 2055 isp_prt(isp, ISP_LOGDEBUG2, 2056 "loop down freeze simq"); 2057 xpt_freeze_simq(isp->isp_sim, 1); 2058 } 2059 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 2060 } 2061 isp_prt(isp, ISP_LOGINFO, "Loop DOWN"); 2062 break; 2063 case ISPASYNC_LOOP_UP: 2064 if (isp->isp_path) { 2065 int wasfrozen = 2066 isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN; 2067 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN; 2068 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { 2069 xpt_release_simq(isp->isp_sim, 1); 2070 isp_prt(isp, ISP_LOGDEBUG2, 2071 "loop up release simq"); 2072 } 2073 } 2074 isp_prt(isp, ISP_LOGINFO, "Loop UP"); 2075 break; 2076 case ISPASYNC_PDB_CHANGED: 2077 { 2078 const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x " 2079 "role %s %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x"; 2080 const static char *roles[4] = { 2081 "(none)", "Target", "Initiator", "Target/Initiator" 2082 }; 2083 char *ptr; 2084 fcparam *fcp = isp->isp_param; 2085 int tgt = *((int *) arg); 2086 struct lportdb *lp = &fcp->portdb[tgt]; 2087 2088 if (lp->valid) { 2089 ptr = "arrived"; 2090 } else { 2091 ptr = "disappeared"; 2092 } 2093 isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid, 2094 roles[lp->roles & 0x3], ptr, 2095 (u_int32_t) (lp->port_wwn >> 32), 2096 (u_int32_t) (lp->port_wwn & 0xffffffffLL), 2097 (u_int32_t) (lp->node_wwn >> 32), 2098 (u_int32_t) (lp->node_wwn & 0xffffffffLL)); 2099 break; 2100 } 2101 case ISPASYNC_CHANGE_NOTIFY: 2102 isp_prt(isp, ISP_LOGINFO, "Name Server Database Changed"); 2103 break; 2104 #ifdef ISP2100_FABRIC 2105 case ISPASYNC_FABRIC_DEV: 2106 { 2107 int target; 2108 struct lportdb *lp; 2109 char *pt; 2110 sns_ganrsp_t *resp = (sns_ganrsp_t *) arg; 2111 u_int32_t portid; 2112 u_int64_t wwpn, wwnn; 2113 fcparam *fcp = isp->isp_param; 2114 2115 rv = -1; 2116 2117 portid = 2118 (((u_int32_t) resp->snscb_port_id[0]) << 16) | 2119 (((u_int32_t) resp->snscb_port_id[1]) << 8) | 2120 (((u_int32_t) resp->snscb_port_id[2])); 2121 2122 wwpn = 2123 (((u_int64_t)resp->snscb_portname[0]) << 56) | 2124 (((u_int64_t)resp->snscb_portname[1]) << 48) | 2125 (((u_int64_t)resp->snscb_portname[2]) << 40) | 2126 (((u_int64_t)resp->snscb_portname[3]) << 32) | 2127 (((u_int64_t)resp->snscb_portname[4]) << 24) | 2128 (((u_int64_t)resp->snscb_portname[5]) << 16) | 2129 (((u_int64_t)resp->snscb_portname[6]) << 8) | 2130 (((u_int64_t)resp->snscb_portname[7])); 2131 2132 wwnn = 2133 (((u_int64_t)resp->snscb_nodename[0]) << 56) | 2134 (((u_int64_t)resp->snscb_nodename[1]) << 48) | 2135 (((u_int64_t)resp->snscb_nodename[2]) << 40) | 2136 (((u_int64_t)resp->snscb_nodename[3]) << 32) | 2137 (((u_int64_t)resp->snscb_nodename[4]) << 24) | 2138 (((u_int64_t)resp->snscb_nodename[5]) << 16) | 2139 (((u_int64_t)resp->snscb_nodename[6]) << 8) | 2140 (((u_int64_t)resp->snscb_nodename[7])); 2141 if (portid == 0 || wwpn == 0) { 2142 rv = 0; 2143 break; 2144 } 2145 2146 switch (resp->snscb_port_type) { 2147 case 1: 2148 pt = " N_Port"; 2149 break; 2150 case 2: 2151 pt = " NL_Port"; 2152 break; 2153 case 3: 2154 pt = "F/NL_Port"; 2155 break; 2156 case 0x7f: 2157 pt = " Nx_Port"; 2158 break; 2159 case 0x81: 2160 pt = " F_port"; 2161 break; 2162 case 0x82: 2163 pt = " FL_Port"; 2164 break; 2165 case 0x84: 2166 pt = " E_port"; 2167 break; 2168 default: 2169 pt = "?"; 2170 break; 2171 } 2172 isp_prt(isp, ISP_LOGINFO, 2173 "%s @ 0x%x, Node 0x%08x%08x Port %08x%08x", 2174 pt, portid, ((u_int32_t) (wwnn >> 32)), ((u_int32_t) wwnn), 2175 ((u_int32_t) (wwpn >> 32)), ((u_int32_t) wwpn)); 2176 for (target = FC_SNS_ID+1; target < MAX_FC_TARG; target++) { 2177 lp = &fcp->portdb[target]; 2178 if (lp->port_wwn == wwpn && lp->node_wwn == wwnn) 2179 break; 2180 } 2181 if (target < MAX_FC_TARG) { 2182 rv = 0; 2183 break; 2184 } 2185 for (target = FC_SNS_ID+1; target < MAX_FC_TARG; target++) { 2186 lp = &fcp->portdb[target]; 2187 if (lp->port_wwn == 0) 2188 break; 2189 } 2190 if (target == MAX_FC_TARG) { 2191 printf("%s: no more space for fabric devices\n", 2192 isp->isp_name); 2193 break; 2194 } 2195 lp->node_wwn = wwnn; 2196 lp->port_wwn = wwpn; 2197 lp->portid = portid; 2198 rv = 0; 2199 break; 2200 } 2201 #endif 2202 #ifdef ISP_TARGET_MODE 2203 case ISPASYNC_TARGET_MESSAGE: 2204 { 2205 tmd_msg_t *mp = arg; 2206 isp_prt(isp, ISP_LOGDEBUG2, 2207 "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x", 2208 mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt, 2209 (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval, 2210 mp->nt_msg[0]); 2211 break; 2212 } 2213 case ISPASYNC_TARGET_EVENT: 2214 { 2215 tmd_event_t *ep = arg; 2216 isp_prt(isp, ISP_LOGDEBUG2, 2217 "bus %d event code 0x%x", ep->ev_bus, ep->ev_event); 2218 break; 2219 } 2220 case ISPASYNC_TARGET_ACTION: 2221 switch (((isphdr_t *)arg)->rqs_entry_type) { 2222 default: 2223 printf("%s: event 0x%x for unhandled target action\n", 2224 isp->isp_name, ((isphdr_t *)arg)->rqs_entry_type); 2225 break; 2226 case RQSTYPE_ATIO: 2227 rv = isp_handle_platform_atio(isp, (at_entry_t *) arg); 2228 break; 2229 case RQSTYPE_ATIO2: 2230 rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg); 2231 break; 2232 case RQSTYPE_CTIO2: 2233 case RQSTYPE_CTIO: 2234 rv = isp_handle_platform_ctio(isp, arg); 2235 break; 2236 case RQSTYPE_ENABLE_LUN: 2237 case RQSTYPE_MODIFY_LUN: 2238 isp_cv_signal_rqe(isp, ((lun_entry_t *)arg)->le_status); 2239 break; 2240 } 2241 break; 2242 #endif 2243 default: 2244 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd); 2245 rv = -1; 2246 break; 2247 } 2248 return (rv); 2249 } 2250 2251 2252 /* 2253 * Locks are held before coming here. 2254 */ 2255 void 2256 isp_uninit(struct ispsoftc *isp) 2257 { 2258 ISP_WRITE(isp, HCCR, HCCR_CMD_RESET); 2259 DISABLE_INTS(isp); 2260 } 2261 2262 void 2263 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...) 2264 { 2265 va_list ap; 2266 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { 2267 return; 2268 } 2269 printf("%s: ", isp->isp_name); 2270 va_start(ap, fmt); 2271 vprintf(fmt, ap); 2272 va_end(ap); 2273 printf("\n"); 2274 } 2275