1 /* $FreeBSD$ */ 2 /* 3 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters. 4 * 5 *--------------------------------------- 6 * Copyright (c) 1997, 1998, 1999 by Matthew Jacob 7 * NASA/Ames Research Center 8 * All rights reserved. 9 *--------------------------------------- 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice immediately at the beginning of the file, without modification, 16 * this list of conditions, and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 27 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 #include <dev/isp/isp_freebsd.h> 36 37 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *); 38 static void isp_poll(struct cam_sim *); 39 static void isp_relsim(void *); 40 static timeout_t isp_timeout; 41 static void isp_action(struct cam_sim *, union ccb *); 42 43 44 static struct ispsoftc *isplist = NULL; 45 /* #define ISP_LUN0_ONLY 1 */ 46 #ifdef DEBUG 47 int isp_debug = 2; 48 #elif defined(CAMDEBUG) || defined(DIAGNOSTIC) 49 int isp_debug = 1; 50 #else 51 int isp_debug = 0; 52 #endif 53 54 void 55 isp_attach(struct ispsoftc *isp) 56 { 57 int primary, secondary; 58 struct ccb_setasync csa; 59 struct cam_devq *devq; 60 struct cam_sim *sim; 61 struct cam_path *path; 62 63 /* 64 * Establish (in case of 12X0) which bus is the primary. 65 */ 66 67 primary = 0; 68 secondary = 1; 69 70 /* 71 * Create the device queue for our SIM(s). 72 */ 73 devq = cam_simq_alloc(isp->isp_maxcmds); 74 if (devq == NULL) { 75 return; 76 } 77 78 /* 79 * Construct our SIM entry. 80 */ 81 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 82 isp->isp_unit, 1, isp->isp_maxcmds, devq); 83 if (sim == NULL) { 84 cam_simq_free(devq); 85 return; 86 } 87 if (xpt_bus_register(sim, primary) != CAM_SUCCESS) { 88 cam_sim_free(sim, TRUE); 89 return; 90 } 91 92 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 93 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 94 xpt_bus_deregister(cam_sim_path(sim)); 95 cam_sim_free(sim, TRUE); 96 return; 97 } 98 99 xpt_setup_ccb(&csa.ccb_h, path, 5); 100 csa.ccb_h.func_code = XPT_SASYNC_CB; 101 csa.event_enable = AC_LOST_DEVICE; 102 csa.callback = isp_cam_async; 103 csa.callback_arg = sim; 104 xpt_action((union ccb *)&csa); 105 isp->isp_sim = sim; 106 isp->isp_path = path; 107 108 /* 109 * If we have a second channel, construct SIM entry for that. 110 */ 111 if (IS_DUALBUS(isp)) { 112 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 113 isp->isp_unit, 1, isp->isp_maxcmds, devq); 114 if (sim == NULL) { 115 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 116 xpt_free_path(isp->isp_path); 117 cam_simq_free(devq); 118 return; 119 } 120 if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) { 121 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 122 xpt_free_path(isp->isp_path); 123 cam_sim_free(sim, TRUE); 124 return; 125 } 126 127 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 128 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 129 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 130 xpt_free_path(isp->isp_path); 131 xpt_bus_deregister(cam_sim_path(sim)); 132 cam_sim_free(sim, TRUE); 133 return; 134 } 135 136 xpt_setup_ccb(&csa.ccb_h, path, 5); 137 csa.ccb_h.func_code = XPT_SASYNC_CB; 138 csa.event_enable = AC_LOST_DEVICE; 139 csa.callback = isp_cam_async; 140 csa.callback_arg = sim; 141 xpt_action((union ccb *)&csa); 142 isp->isp_sim2 = sim; 143 isp->isp_path2 = path; 144 } 145 isp->isp_state = ISP_RUNSTATE; 146 if (isplist == NULL) { 147 isplist = isp; 148 } else { 149 struct ispsoftc *tmp = isplist; 150 while (tmp->isp_osinfo.next) { 151 tmp = tmp->isp_osinfo.next; 152 } 153 tmp->isp_osinfo.next = isp; 154 } 155 } 156 157 158 /* 159 * Put the target mode functions here, because some are inlines 160 */ 161 162 #ifdef ISP_TARGET_MODE 163 164 static __inline int is_lun_enabled(struct ispsoftc *, lun_id_t); 165 static __inline int are_any_luns_enabled(struct ispsoftc *); 166 static __inline tstate_t *get_lun_statep(struct ispsoftc *, lun_id_t); 167 static __inline void rls_lun_statep(struct ispsoftc *, tstate_t *); 168 static __inline int isp_psema_sig_rqe(struct ispsoftc *); 169 static __inline int isp_cv_wait_timed_rqe(struct ispsoftc *, int); 170 static __inline void isp_cv_signal_rqe(struct ispsoftc *, int); 171 static __inline void isp_vsema_rqe(struct ispsoftc *); 172 static cam_status 173 create_lun_state(struct ispsoftc *, struct cam_path *, tstate_t **); 174 static void destroy_lun_state(struct ispsoftc *, tstate_t *); 175 static void isp_en_lun(struct ispsoftc *, union ccb *); 176 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *); 177 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *); 178 179 180 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *); 181 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *); 182 static int isp_handle_platform_ctio(struct ispsoftc *, void *); 183 184 static __inline int 185 is_lun_enabled(struct ispsoftc *isp, lun_id_t lun) 186 { 187 tstate_t *tptr; 188 int s = splsoftcam(); 189 if ((tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(lun)]) == NULL) { 190 splx(s); 191 return (0); 192 } 193 do { 194 if (tptr->lun == (lun_id_t) lun) { 195 splx(s); 196 return (1); 197 } 198 } while ((tptr = tptr->next) != NULL); 199 splx(s); 200 return (0); 201 } 202 203 static __inline int 204 are_any_luns_enabled(struct ispsoftc *isp) 205 { 206 int i; 207 for (i = 0; i < LUN_HASH_SIZE; i++) { 208 if (isp->isp_osinfo.lun_hash[i]) { 209 return (1); 210 } 211 } 212 return (0); 213 } 214 215 static __inline tstate_t * 216 get_lun_statep(struct ispsoftc *isp, lun_id_t lun) 217 { 218 tstate_t *tptr; 219 int s; 220 221 s = splsoftcam(); 222 if (lun == CAM_LUN_WILDCARD) { 223 tptr = &isp->isp_osinfo.tsdflt; 224 tptr->hold++; 225 splx(s); 226 return (tptr); 227 } else { 228 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(lun)]; 229 } 230 if (tptr == NULL) { 231 splx(s); 232 return (NULL); 233 } 234 235 do { 236 if (tptr->lun == lun) { 237 tptr->hold++; 238 splx(s); 239 return (tptr); 240 } 241 } while ((tptr = tptr->next) != NULL); 242 splx(s); 243 return (tptr); 244 } 245 246 static __inline void 247 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr) 248 { 249 if (tptr->hold) 250 tptr->hold--; 251 } 252 253 static __inline int 254 isp_psema_sig_rqe(struct ispsoftc *isp) 255 { 256 int s = splcam(); 257 while (isp->isp_osinfo.tmflags & TM_BUSY) { 258 isp->isp_osinfo.tmflags |= TM_WANTED; 259 if (tsleep(&isp->isp_osinfo.tmflags, PRIBIO|PCATCH, "i0", 0)) { 260 splx(s); 261 return (-1); 262 } 263 isp->isp_osinfo.tmflags |= TM_BUSY; 264 } 265 splx(s); 266 return (0); 267 } 268 269 static __inline int 270 isp_cv_wait_timed_rqe(struct ispsoftc *isp, int timo) 271 { 272 int s = splcam(); 273 if (tsleep(&isp->isp_osinfo.rstatus, PRIBIO, "qt1", timo)) { 274 splx(s); 275 return (-1); 276 } 277 splx(s); 278 return (0); 279 } 280 281 static __inline void 282 isp_cv_signal_rqe(struct ispsoftc *isp, int status) 283 { 284 isp->isp_osinfo.rstatus = status; 285 wakeup(&isp->isp_osinfo.rstatus); 286 } 287 288 static __inline void 289 isp_vsema_rqe(struct ispsoftc *isp) 290 { 291 int s = splcam(); 292 if (isp->isp_osinfo.tmflags & TM_WANTED) { 293 isp->isp_osinfo.tmflags &= ~TM_WANTED; 294 wakeup(&isp->isp_osinfo.tmflags); 295 } 296 isp->isp_osinfo.tmflags &= ~TM_BUSY; 297 splx(s); 298 } 299 300 static cam_status 301 create_lun_state(struct ispsoftc *isp, struct cam_path *path, tstate_t **rslt) 302 { 303 int s; 304 cam_status status; 305 lun_id_t lun; 306 tstate_t *tptr, *new; 307 308 lun = xpt_path_lun_id(path); 309 if (lun < 0) { 310 return (CAM_LUN_INVALID); 311 } 312 if (is_lun_enabled(isp, lun)) { 313 return (CAM_LUN_ALRDY_ENA); 314 } 315 new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT); 316 if (new == NULL) { 317 return (CAM_RESRC_UNAVAIL); 318 } 319 bzero(new, sizeof (tstate_t)); 320 321 status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path), 322 xpt_path_target_id(path), xpt_path_lun_id(path)); 323 if (status != CAM_REQ_CMP) { 324 free(new, M_DEVBUF); 325 return (status); 326 } 327 new->lun = lun; 328 SLIST_INIT(&new->atios); 329 SLIST_INIT(&new->inots); 330 new->hold = 1; 331 332 s = splsoftcam(); 333 if ((tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(lun)]) == NULL) { 334 isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(lun)] = new; 335 } else { 336 while (tptr->next) 337 tptr = tptr->next; 338 tptr->next = new; 339 } 340 splx(s); 341 *rslt = new; 342 return (CAM_REQ_CMP); 343 } 344 345 static __inline void 346 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr) 347 { 348 tstate_t *lw, *pw; 349 int s; 350 351 s = splsoftcam(); 352 if (tptr->hold) { 353 splx(s); 354 return; 355 } 356 pw = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(tptr->lun)]; 357 if (pw == NULL) { 358 splx(s); 359 return; 360 } else if (pw->lun == tptr->lun) { 361 isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(tptr->lun)] = pw->next; 362 } else { 363 lw = pw; 364 pw = lw->next; 365 while (pw) { 366 if (pw->lun == tptr->lun) { 367 lw->next = pw->next; 368 break; 369 } 370 lw = pw; 371 pw = pw->next; 372 } 373 if (pw == NULL) { 374 splx(s); 375 return; 376 } 377 } 378 free(tptr, M_DEVBUF); 379 splx(s); 380 } 381 382 static void 383 isp_en_lun(struct ispsoftc *isp, union ccb *ccb) 384 { 385 const char *lfmt = "Lun now %sabled for target mode\n"; 386 struct ccb_en_lun *cel = &ccb->cel; 387 tstate_t *tptr; 388 u_int16_t rstat; 389 int bus, s; 390 lun_id_t lun; 391 target_id_t tgt; 392 393 394 bus = XS_CHANNEL(ccb); 395 tgt = ccb->ccb_h.target_id; 396 lun = ccb->ccb_h.target_lun; 397 398 /* 399 * First, check to see if we're enabling on fibre channel 400 * and don't yet have a notion of who the heck we are (no 401 * loop yet). We do this by 402 */ 403 if (IS_FC(isp) && cel->enable && 404 (isp->isp_osinfo.tmflags & TM_TMODE_ENABLED) == 0) { 405 int rv; 406 fcparam *fcp = isp->isp_param; 407 408 s = splcam(); 409 rv = isp_control(isp, ISPCTL_FCLINK_TEST, NULL); 410 (void) splx(s); 411 if (rv || fcp->isp_fwstate != FW_READY) { 412 xpt_print_path(ccb->ccb_h.path); 413 printf("link status not good yet\n"); 414 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 415 return; 416 } 417 s = splcam(); 418 rv = isp_control(isp, ISPCTL_PDB_SYNC, NULL); 419 (void) splx(s); 420 if (rv || fcp->isp_loopstate != LOOP_READY) { 421 xpt_print_path(ccb->ccb_h.path); 422 printf("could not get a good port database\n"); 423 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 424 return; 425 } 426 } 427 428 429 /* 430 * Next check to see whether this is a target/lun wildcard action. 431 * 432 * If so, we enable/disable target mode but don't do any lun enabling. 433 */ 434 if (lun == CAM_LUN_WILDCARD && tgt == CAM_TARGET_WILDCARD) { 435 int av; 436 tptr = &isp->isp_osinfo.tsdflt; 437 if (cel->enable) { 438 if (isp->isp_osinfo.tmflags & TM_TMODE_ENABLED) { 439 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 440 return; 441 } 442 ccb->ccb_h.status = 443 xpt_create_path(&tptr->owner, NULL, 444 xpt_path_path_id(ccb->ccb_h.path), 445 xpt_path_target_id(ccb->ccb_h.path), 446 xpt_path_lun_id(ccb->ccb_h.path)); 447 if (ccb->ccb_h.status != CAM_REQ_CMP) { 448 return; 449 } 450 SLIST_INIT(&tptr->atios); 451 SLIST_INIT(&tptr->inots); 452 av = 1; 453 s = splcam(); 454 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 455 if (av) { 456 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 457 xpt_free_path(tptr->owner); 458 splx(s); 459 return; 460 } 461 isp->isp_osinfo.tmflags |= TM_TMODE_ENABLED; 462 splx(s); 463 } else { 464 if ((isp->isp_osinfo.tmflags & TM_TMODE_ENABLED) == 0) { 465 ccb->ccb_h.status = CAM_LUN_INVALID; 466 return; 467 } 468 if (are_any_luns_enabled(isp)) { 469 ccb->ccb_h.status = CAM_SCSI_BUSY; 470 return; 471 } 472 av = 0; 473 s = splcam(); 474 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 475 if (av) { 476 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 477 splx(s); 478 return; 479 } 480 isp->isp_osinfo.tmflags &= ~TM_TMODE_ENABLED; 481 splx(s); 482 ccb->ccb_h.status = CAM_REQ_CMP; 483 } 484 xpt_print_path(ccb->ccb_h.path); 485 printf(lfmt, (cel->enable) ? "en" : "dis"); 486 return; 487 } 488 489 /* 490 * Do some sanity checking first. 491 */ 492 493 if (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns) { 494 ccb->ccb_h.status = CAM_LUN_INVALID; 495 return; 496 } 497 if (IS_SCSI(isp)) { 498 if (tgt != CAM_TARGET_WILDCARD && 499 tgt != ((sdparam *) isp->isp_param)->isp_initiator_id) { 500 ccb->ccb_h.status = CAM_TID_INVALID; 501 return; 502 } 503 } else { 504 if (tgt != CAM_TARGET_WILDCARD && 505 tgt != ((fcparam *) isp->isp_param)->isp_loopid) { 506 ccb->ccb_h.status = CAM_TID_INVALID; 507 return; 508 } 509 } 510 511 512 if (cel->enable) { 513 ccb->ccb_h.status = 514 create_lun_state(isp, ccb->ccb_h.path, &tptr); 515 if (ccb->ccb_h.status != CAM_REQ_CMP) { 516 return; 517 } 518 } else { 519 tptr = get_lun_statep(isp, lun); 520 if (tptr == NULL) { 521 ccb->ccb_h.status = CAM_LUN_INVALID; 522 return; 523 } 524 } 525 526 if (isp_psema_sig_rqe(isp)) { 527 rls_lun_statep(isp, tptr); 528 if (cel->enable) 529 destroy_lun_state(isp, tptr); 530 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 531 return; 532 } 533 534 s = splcam(); 535 if (cel->enable) { 536 u_int32_t seq = isp->isp_osinfo.rollinfo++; 537 rstat = LUN_ERR; 538 if (isp_lun_cmd(isp, RQSTYPE_ENABLE_LUN, bus, tgt, lun, seq)) { 539 xpt_print_path(ccb->ccb_h.path); 540 printf("isp_lun_cmd failed\n"); 541 goto out; 542 } 543 if (isp_cv_wait_timed_rqe(isp, 30 * hz)) { 544 xpt_print_path(ccb->ccb_h.path); 545 printf("wait for ENABLE LUN timed out\n"); 546 goto out; 547 } 548 rstat = isp->isp_osinfo.rstatus; 549 if (rstat != LUN_OK) { 550 xpt_print_path(ccb->ccb_h.path); 551 printf("ENABLE LUN returned 0x%x\n", rstat); 552 goto out; 553 } 554 } else { 555 u_int32_t seq; 556 557 seq = isp->isp_osinfo.rollinfo++; 558 rstat = LUN_ERR; 559 560 if (isp_lun_cmd(isp, -RQSTYPE_MODIFY_LUN, bus, tgt, lun, seq)) { 561 xpt_print_path(ccb->ccb_h.path); 562 printf("isp_lun_cmd failed\n"); 563 goto out; 564 } 565 if (isp_cv_wait_timed_rqe(isp, 30 * hz)) { 566 xpt_print_path(ccb->ccb_h.path); 567 printf("wait for MODIFY LUN timed out\n"); 568 goto out; 569 } 570 rstat = isp->isp_osinfo.rstatus; 571 if (rstat != LUN_OK) { 572 xpt_print_path(ccb->ccb_h.path); 573 printf("MODIFY LUN returned 0x%x\n", rstat); 574 goto out; 575 } 576 rstat = LUN_ERR; 577 seq = isp->isp_osinfo.rollinfo++; 578 579 if (isp_lun_cmd(isp, -RQSTYPE_ENABLE_LUN, bus, tgt, lun, seq)) { 580 xpt_print_path(ccb->ccb_h.path); 581 printf("isp_lun_cmd failed\n"); 582 goto out; 583 } 584 if (isp_cv_wait_timed_rqe(isp, 30 * hz)) { 585 xpt_print_path(ccb->ccb_h.path); 586 printf("wait for ENABLE LUN timed out\n"); 587 goto out; 588 } 589 rstat = isp->isp_osinfo.rstatus; 590 if (rstat != LUN_OK) { 591 xpt_print_path(ccb->ccb_h.path); 592 printf("ENABLE LUN returned 0x%x\n", rstat); 593 goto out; 594 } 595 } 596 out: 597 isp_vsema_rqe(isp); 598 splx(s); 599 600 if (rstat != LUN_OK) { 601 xpt_print_path(ccb->ccb_h.path); 602 printf("lun %sable failed\n", (cel->enable) ? "en" : "dis"); 603 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 604 rls_lun_statep(isp, tptr); 605 if (cel->enable) 606 destroy_lun_state(isp, tptr); 607 } else { 608 xpt_print_path(ccb->ccb_h.path); 609 printf(lfmt, (cel->enable) ? "en" : "dis"); 610 rls_lun_statep(isp, tptr); 611 if (cel->enable == 0) { 612 destroy_lun_state(isp, tptr); 613 } 614 ccb->ccb_h.status = CAM_REQ_CMP; 615 } 616 } 617 618 static cam_status 619 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb) 620 { 621 tstate_t *tptr; 622 struct ccb_hdr_slist *lp; 623 struct ccb_hdr *curelm; 624 int found; 625 union ccb *accb = ccb->cab.abort_ccb; 626 627 if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 628 if (IS_FC(isp) && (accb->ccb_h.target_id != 629 ((fcparam *) isp->isp_param)->isp_loopid)) { 630 return (CAM_PATH_INVALID); 631 } else if (IS_SCSI(isp) && (accb->ccb_h.target_id != 632 ((sdparam *) isp->isp_param)->isp_initiator_id)) { 633 return (CAM_PATH_INVALID); 634 } 635 } 636 tptr = get_lun_statep(isp, accb->ccb_h.target_lun); 637 if (tptr == NULL) { 638 return (CAM_PATH_INVALID); 639 } 640 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 641 lp = &tptr->atios; 642 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 643 lp = &tptr->inots; 644 } else { 645 rls_lun_statep(isp, tptr); 646 return (CAM_UA_ABORT); 647 } 648 curelm = SLIST_FIRST(lp); 649 found = 0; 650 if (curelm == &accb->ccb_h) { 651 found = 1; 652 SLIST_REMOVE_HEAD(lp, sim_links.sle); 653 } else { 654 while(curelm != NULL) { 655 struct ccb_hdr *nextelm; 656 657 nextelm = SLIST_NEXT(curelm, sim_links.sle); 658 if (nextelm == &accb->ccb_h) { 659 found = 1; 660 SLIST_NEXT(curelm, sim_links.sle) = 661 SLIST_NEXT(nextelm, sim_links.sle); 662 break; 663 } 664 curelm = nextelm; 665 } 666 } 667 rls_lun_statep(isp, tptr); 668 if (found) { 669 accb->ccb_h.status = CAM_REQ_ABORTED; 670 return (CAM_REQ_CMP); 671 } 672 return(CAM_PATH_INVALID); 673 } 674 675 static cam_status 676 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb) 677 { 678 void *qe; 679 struct ccb_scsiio *cso = &ccb->csio; 680 u_int32_t *hp, save_handle; 681 u_int16_t iptr, optr; 682 683 if (isp_getrqentry(isp, &iptr, &optr, &qe)) { 684 xpt_print_path(ccb->ccb_h.path); 685 printf("Request Queue Overflow in isp_target_start_ctio\n"); 686 return (CAM_RESRC_UNAVAIL); 687 } 688 bzero(qe, QENTRY_LEN); 689 690 /* 691 * We're either moving data or completing a command here. 692 */ 693 694 if (IS_FC(isp)) { 695 ct2_entry_t *cto = qe; 696 u_int16_t *ssptr = NULL; 697 698 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; 699 cto->ct_header.rqs_entry_count = 1; 700 cto->ct_iid = cso->init_id; 701 if (isp->isp_maxluns <= 16) { 702 cto->ct_lun = ccb->ccb_h.target_lun; 703 } 704 cto->ct_rxid = cso->tag_id; 705 cto->ct_flags = CT2_CCINCR; 706 if (cso->dxfer_len == 0) { 707 cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA; 708 KASSERT(ccb->ccb_h.flags & CAM_SEND_STATUS, 709 ("a CTIO with no data and no status?")); 710 cto->ct_flags |= CT2_SENDSTATUS; 711 ssptr = &cto->rsp.m1.ct_scsi_status; 712 *ssptr = cso->scsi_status; 713 if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) { 714 int m = min(cso->sense_len, MAXRESPLEN); 715 bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m); 716 cto->rsp.m1.ct_senselen = m; 717 cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID; 718 } 719 } else { 720 cto->ct_flags |= CT2_FLAG_MODE0; 721 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 722 cto->ct_flags |= CT2_DATA_IN; 723 } else { 724 cto->ct_flags |= CT2_DATA_OUT; 725 } 726 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 727 ssptr = &cto->rsp.m0.ct_scsi_status; 728 cto->ct_flags |= CT2_SENDSTATUS; 729 cto->rsp.m0.ct_scsi_status = cso->scsi_status; 730 } 731 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 732 } 733 if (ssptr && cso->resid) { 734 cto->ct_resid = cso->resid; 735 if (cso->resid < 0) 736 *ssptr |= CT2_DATA_OVER; 737 else 738 *ssptr |= CT2_DATA_UNDER; 739 } 740 if (isp_tdebug > 1 && ssptr && 741 (cso->scsi_status != SCSI_STATUS_OK || cso->resid)) { 742 printf("%s:CTIO2 RX_ID 0x%x SCSI STATUS 0x%x " 743 "resid %d\n", isp->isp_name, cto->ct_rxid, 744 cso->scsi_status, cso->resid); 745 } 746 hp = &cto->ct_reserved; 747 } else { 748 ct_entry_t *cto = qe; 749 750 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 751 cto->ct_header.rqs_entry_count = 1; 752 cto->ct_iid = cso->init_id; 753 cto->ct_tgt = ccb->ccb_h.target_id; 754 cto->ct_lun = ccb->ccb_h.target_lun; 755 cto->ct_tag_type = cso->tag_action; 756 cto->ct_tag_val = cso->tag_id; 757 cto->ct_flags = CT_CCINCR; 758 if (cso->dxfer_len) { 759 cto->ct_flags |= CT_NO_DATA; 760 } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 761 cto->ct_flags |= CT_DATA_IN; 762 } else { 763 cto->ct_flags |= CT_DATA_OUT; 764 } 765 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 766 cto->ct_flags |= CT_SENDSTATUS; 767 cto->ct_scsi_status = cso->scsi_status; 768 cto->ct_resid = cso->resid; 769 } 770 if (isp_tdebug > 1 && 771 (cso->scsi_status != SCSI_STATUS_OK || cso->resid)) { 772 printf("%s:CTIO SCSI STATUS 0x%x resid %d\n", 773 isp->isp_name, cso->scsi_status, cso->resid); 774 } 775 hp = &cto->ct_reserved; 776 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 777 } 778 779 if (isp_save_xs(isp, (ISP_SCSI_XFER_T *)ccb, hp)) { 780 xpt_print_path(ccb->ccb_h.path); 781 printf("No XFLIST pointers for isp_target_start_ctio\n"); 782 return (CAM_RESRC_UNAVAIL); 783 } 784 785 786 /* 787 * Call the dma setup routines for this entry (and any subsequent 788 * CTIOs) if there's data to move, and then tell the f/w it's got 789 * new things to play with. As with ispscsicmd's usage of DMA setup, 790 * any swizzling is done in the machine dependent layer. Because 791 * of this, we put the request onto the queue area first in native 792 * format. 793 */ 794 795 save_handle = *hp; 796 switch (ISP_DMASETUP(isp, cso, qe, &iptr, optr)) { 797 case CMD_QUEUED: 798 MemoryBarrier(); 799 ISP_ADD_REQUEST(isp, iptr); 800 return (CAM_REQ_INPROG); 801 802 case CMD_EAGAIN: 803 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 804 isp_destroy_handle(isp, save_handle); 805 return (CAM_RESRC_UNAVAIL); 806 807 default: 808 isp_destroy_handle(isp, save_handle); 809 return (ccb->ccb_h.spriv_field0); 810 } 811 } 812 813 /* 814 * Handle ATIO stuff that the generic code can't. 815 * This means handling CDBs. 816 */ 817 818 static int 819 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep) 820 { 821 tstate_t *tptr; 822 int status; 823 struct ccb_accept_tio *atiop; 824 825 /* 826 * The firmware status (except for the QLTM_SVALID bit) 827 * indicates why this ATIO was sent to us. 828 * 829 * If QLTM_SVALID is set, the firware has recommended Sense Data. 830 * 831 * If the DISCONNECTS DISABLED bit is set in the flags field, 832 * we're still connected on the SCSI bus - i.e. the initiator 833 * did not set DiscPriv in the identify message. We don't care 834 * about this so it's ignored. 835 */ 836 status = aep->at_status; 837 if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) { 838 /* 839 * Bus Phase Sequence error. We should have sense data 840 * suggested by the f/w. I'm not sure quite yet what 841 * to do about this for CAM. 842 */ 843 printf("%s: PHASE ERROR\n", isp->isp_name); 844 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 845 return (0); 846 } 847 if ((status & ~QLTM_SVALID) != AT_CDB) { 848 printf("%s: bogus atio (0x%x) leaked to platform\n", 849 isp->isp_name, status); 850 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 851 return (0); 852 } 853 854 tptr = get_lun_statep(isp, aep->at_lun); 855 if (tptr == NULL) { 856 tptr = get_lun_statep(isp, CAM_LUN_WILDCARD); 857 } 858 859 if (tptr == NULL) { 860 /* 861 * Because we can't autofeed sense data back with 862 * a command for parallel SCSI, we can't give back 863 * a CHECK CONDITION. We'll give back a BUSY status 864 * instead. This works out okay because the only 865 * time we should, in fact, get this, is in the 866 * case that somebody configured us without the 867 * blackhole driver, so they get what they deserve. 868 */ 869 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 870 return (0); 871 } 872 873 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 874 if (atiop == NULL) { 875 /* 876 * Because we can't autofeed sense data back with 877 * a command for parallel SCSI, we can't give back 878 * a CHECK CONDITION. We'll give back a QUEUE FULL status 879 * instead. This works out okay because the only time we 880 * should, in fact, get this, is in the case that we've 881 * run out of ATIOS. 882 */ 883 xpt_print_path(tptr->owner); 884 printf("no ATIOS for lun %d from initiator %d\n", 885 aep->at_lun, aep->at_iid); 886 rls_lun_statep(isp, tptr); 887 if (aep->at_flags & AT_TQAE) 888 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 889 else 890 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 891 return (0); 892 } 893 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 894 if (tptr == &isp->isp_osinfo.tsdflt) { 895 atiop->ccb_h.target_id = aep->at_tgt; 896 atiop->ccb_h.target_lun = aep->at_lun; 897 } 898 if (aep->at_flags & AT_NODISC) { 899 xpt_print_path(tptr->owner); 900 printf("incoming command that cannot disconnect\n"); 901 } 902 903 904 atiop->init_id = aep->at_iid; 905 atiop->cdb_len = aep->at_cdblen; 906 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen); 907 atiop->ccb_h.status = CAM_CDB_RECVD; 908 if ((atiop->tag_action = aep->at_tag_type) != 0) { 909 atiop->tag_id = aep->at_tag_val; 910 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; 911 } 912 xpt_done((union ccb*)atiop); 913 if (isp_tdebug > 1) { 914 printf("%s:ATIO CDB=0x%x iid%d->lun%d tag 0x%x ttype 0x%x\n", 915 isp->isp_name, aep->at_cdb[0] & 0xff, aep->at_iid, 916 aep->at_lun, aep->at_tag_val & 0xff, aep->at_tag_type); 917 } 918 rls_lun_statep(isp, tptr); 919 return (0); 920 } 921 922 static int 923 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep) 924 { 925 lun_id_t lun; 926 tstate_t *tptr; 927 struct ccb_accept_tio *atiop; 928 929 /* 930 * The firmware status (except for the QLTM_SVALID bit) 931 * indicates why this ATIO was sent to us. 932 * 933 * If QLTM_SVALID is set, the firware has recommended Sense Data. 934 */ 935 if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) { 936 printf("%s: bogus atio (0x%x) leaked to platform\n", 937 isp->isp_name, aep->at_status); 938 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 939 return (0); 940 } 941 942 if (isp->isp_maxluns > 16) { 943 lun = aep->at_scclun; 944 } else { 945 lun = aep->at_lun; 946 } 947 tptr = get_lun_statep(isp, lun); 948 if (tptr == NULL) { 949 tptr = get_lun_statep(isp, CAM_LUN_WILDCARD); 950 } 951 952 if (tptr == NULL) { 953 /* 954 * What we'd like to know is whether or not we have a listener 955 * upstream that really hasn't configured yet. If we do, then 956 * we can give a more sensible reply here. If not, then we can 957 * reject this out of hand. 958 * 959 * Choices for what to send were 960 * 961 * Not Ready, Unit Not Self-Configured Yet 962 * (0x2,0x3e,0x00) 963 * 964 * for the former and 965 * 966 * Illegal Request, Logical Unit Not Supported 967 * (0x5,0x25,0x00) 968 * 969 * for the latter. 970 * 971 * We used to decide whether there was at least one listener 972 * based upon whether the black hole driver was configured. 973 * However, recent config(8) changes have made this hard to do 974 * at this time. 975 * 976 */ 977 u_int32_t ccode = SCSI_STATUS_BUSY; 978 979 /* 980 * Because we can't autofeed sense data back with 981 * a command for parallel SCSI, we can't give back 982 * a CHECK CONDITION. We'll give back a BUSY status 983 * instead. This works out okay because the only 984 * time we should, in fact, get this, is in the 985 * case that somebody configured us without the 986 * blackhole driver, so they get what they deserve. 987 */ 988 isp_endcmd(isp, aep, ccode, 0); 989 return (0); 990 } 991 992 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 993 if (atiop == NULL) { 994 /* 995 * Because we can't autofeed sense data back with 996 * a command for parallel SCSI, we can't give back 997 * a CHECK CONDITION. We'll give back a QUEUE FULL status 998 * instead. This works out okay because the only time we 999 * should, in fact, get this, is in the case that we've 1000 * run out of ATIOS. 1001 */ 1002 xpt_print_path(tptr->owner); 1003 printf("no ATIOS for lun %d from initiator %d\n", 1004 lun, aep->at_iid); 1005 rls_lun_statep(isp, tptr); 1006 if (aep->at_flags & AT_TQAE) 1007 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1008 else 1009 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1010 return (0); 1011 } 1012 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1013 if (tptr == &isp->isp_osinfo.tsdflt) { 1014 atiop->ccb_h.target_id = 1015 ((fcparam *)isp->isp_param)->isp_loopid; 1016 atiop->ccb_h.target_lun = lun; 1017 } 1018 atiop->init_id = aep->at_iid; 1019 atiop->cdb_len = ATIO2_CDBLEN; 1020 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN); 1021 atiop->ccb_h.status = CAM_CDB_RECVD; 1022 atiop->tag_id = aep->at_rxid; 1023 switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) { 1024 case ATIO2_TC_ATTR_SIMPLEQ: 1025 atiop->tag_action = MSG_SIMPLE_Q_TAG; 1026 break; 1027 case ATIO2_TC_ATTR_HEADOFQ: 1028 atiop->tag_action = MSG_HEAD_OF_Q_TAG; 1029 break; 1030 case ATIO2_TC_ATTR_ORDERED: 1031 atiop->tag_action = MSG_ORDERED_Q_TAG; 1032 break; 1033 case ATIO2_TC_ATTR_ACAQ: /* ?? */ 1034 case ATIO2_TC_ATTR_UNTAGGED: 1035 default: 1036 atiop->tag_action = 0; 1037 break; 1038 } 1039 if (atiop->tag_action != 0) { 1040 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; 1041 } 1042 xpt_done((union ccb*)atiop); 1043 if (isp_tdebug > 1) { 1044 printf("%s:ATIO2 RX_ID 0x%x CDB=0x%x iid%d->lun%d tattr 0x%x\n", 1045 isp->isp_name, aep->at_rxid & 0xffff, aep->at_cdb[0] & 0xff, 1046 aep->at_iid, lun, aep->at_taskflags); 1047 } 1048 rls_lun_statep(isp, tptr); 1049 return (0); 1050 } 1051 1052 static int 1053 isp_handle_platform_ctio(struct ispsoftc *isp, void * arg) 1054 { 1055 union ccb *ccb; 1056 int sentstatus, ok; 1057 1058 /* 1059 * CTIO and CTIO2 are close enough.... 1060 */ 1061 1062 ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_reserved); 1063 KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio")); 1064 isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_reserved); 1065 1066 if (IS_FC(isp)) { 1067 ct2_entry_t *ct = arg; 1068 sentstatus = ct->ct_flags & CT2_SENDSTATUS; 1069 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1070 if (ok && ccb->ccb_h.flags & CAM_SEND_SENSE) { 1071 ccb->ccb_h.status |= CAM_SENT_SENSE; 1072 } 1073 if (isp_tdebug > 1) { 1074 printf("%s:CTIO2 RX_ID 0x%x sts 0x%x flg 0x%x sns " 1075 "%d FIN\n", isp->isp_name, ct->ct_rxid, 1076 ct->ct_status, ct->ct_flags, 1077 (ccb->ccb_h.status & CAM_SENT_SENSE) != 0); 1078 } 1079 } else { 1080 ct_entry_t *ct = arg; 1081 sentstatus = ct->ct_flags & CT_SENDSTATUS; 1082 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1083 if (isp_tdebug > 1) { 1084 printf("%s:CTIO tag 0x%x sts 0x%x flg 0x%x FIN\n", 1085 isp->isp_name, ct->ct_tag_val, ct->ct_status, 1086 ct->ct_flags); 1087 } 1088 } 1089 1090 /* 1091 * We're here either because data transfers are done (and 1092 * it's time to send a final status CTIO) or because the final 1093 * status CTIO is done. We don't get called for all intermediate 1094 * CTIOs that happen for a large data transfer. 1095 * 1096 * In any case, for this platform, the upper layers figure out 1097 * what to do next, so all we do here is collect status and 1098 * pass information along. 1099 */ 1100 1101 if (sentstatus) { 1102 /* 1103 * Data transfer done. See if all went okay. 1104 */ 1105 if (ok) { 1106 ccb->csio.resid = 0; 1107 } else { 1108 ccb->csio.resid = ccb->csio.dxfer_len; 1109 } 1110 } 1111 1112 1113 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1114 ccb->ccb_h.status |= CAM_REQ_CMP; 1115 } 1116 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1117 if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) { 1118 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE; 1119 if (isp->isp_osinfo.simqfrozen == 0) { 1120 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 1121 IDPRINTF(3, ("%s: isp_done -> relsimq\n", 1122 isp->isp_name)); 1123 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 1124 } else { 1125 IDPRINTF(3, ("%s: isp_done -> devq frozen\n", 1126 isp->isp_name)); 1127 } 1128 } else { 1129 IDPRINTF(3, ("%s: isp_done -> simqfrozen = %x\n", 1130 isp->isp_name, isp->isp_osinfo.simqfrozen)); 1131 } 1132 } 1133 xpt_done(ccb); 1134 return (0); 1135 } 1136 #endif 1137 1138 static void 1139 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg) 1140 { 1141 struct cam_sim *sim; 1142 struct ispsoftc *isp; 1143 1144 sim = (struct cam_sim *)cbarg; 1145 isp = (struct ispsoftc *) cam_sim_softc(sim); 1146 switch (code) { 1147 case AC_LOST_DEVICE: 1148 if (IS_SCSI(isp)) { 1149 u_int16_t oflags, nflags; 1150 sdparam *sdp = isp->isp_param; 1151 int s, rvf, tgt; 1152 1153 tgt = xpt_path_target_id(path); 1154 rvf = ISP_FW_REVX(isp->isp_fwrev); 1155 s = splcam(); 1156 sdp += cam_sim_bus(sim); 1157 isp->isp_update |= (1 << cam_sim_bus(sim)); 1158 nflags = DPARM_SAFE_DFLT; 1159 if (rvf >= ISP_FW_REV(7, 55, 0) || 1160 (ISP_FW_REV(4, 55, 0) <= rvf && 1161 (rvf < ISP_FW_REV(5, 0, 0)))) { 1162 nflags |= DPARM_NARROW | DPARM_ASYNC; 1163 } 1164 oflags = sdp->isp_devparam[tgt].dev_flags; 1165 sdp->isp_devparam[tgt].dev_flags = nflags; 1166 sdp->isp_devparam[tgt].dev_update = 1; 1167 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, NULL); 1168 sdp->isp_devparam[tgt].dev_flags = oflags; 1169 (void) splx(s); 1170 } 1171 break; 1172 default: 1173 printf("%s: isp_attach Async Code 0x%x\n", isp->isp_name, code); 1174 break; 1175 } 1176 } 1177 1178 static void 1179 isp_poll(struct cam_sim *sim) 1180 { 1181 isp_intr((struct ispsoftc *) cam_sim_softc(sim)); 1182 } 1183 1184 static void 1185 isp_relsim(void *arg) 1186 { 1187 struct ispsoftc *isp = arg; 1188 int s = splcam(); 1189 if (isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED) { 1190 int wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED; 1191 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_TIMED; 1192 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { 1193 xpt_release_simq(isp->isp_sim, 1); 1194 IDPRINTF(3, ("%s: timed relsimq\n", isp->isp_name)); 1195 } 1196 } 1197 splx(s); 1198 } 1199 1200 static void 1201 isp_timeout(void *arg) 1202 { 1203 ISP_SCSI_XFER_T *xs = arg; 1204 struct ispsoftc *isp = XS_ISP(xs); 1205 u_int32_t handle; 1206 int s = splcam(); 1207 /* 1208 * We've decide this command is dead. Make sure we're not trying 1209 * to kill a command that's already dead by getting it's handle. 1210 */ 1211 handle = isp_find_handle(isp, xs); 1212 if (handle) { 1213 isp_destroy_handle(isp, handle); 1214 xpt_print_path(xs->ccb_h.path); 1215 printf("watchdog timeout (handle 0x%x)\n", handle); 1216 XS_SETERR(xs, CAM_CMD_TIMEOUT); 1217 isp_done(xs); 1218 } 1219 (void) splx(s); 1220 } 1221 1222 static void 1223 isp_action(struct cam_sim *sim, union ccb *ccb) 1224 { 1225 int s, bus, tgt, error; 1226 struct ispsoftc *isp; 1227 struct ccb_trans_settings *cts; 1228 1229 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n")); 1230 1231 isp = (struct ispsoftc *)cam_sim_softc(sim); 1232 ccb->ccb_h.sim_priv.entries[0].field = 0; 1233 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 1234 if (isp->isp_state != ISP_RUNSTATE && 1235 ccb->ccb_h.func_code == XPT_SCSI_IO) { 1236 s = splcam(); 1237 DISABLE_INTS(isp); 1238 isp_init(isp); 1239 if (isp->isp_state != ISP_INITSTATE) { 1240 (void) splx(s); 1241 /* 1242 * Lie. Say it was a selection timeout. 1243 */ 1244 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 1245 ccb->ccb_h.status |= CAM_DEV_QFRZN; 1246 xpt_freeze_devq(ccb->ccb_h.path, 1); 1247 xpt_done(ccb); 1248 return; 1249 } 1250 isp->isp_state = ISP_RUNSTATE; 1251 ENABLE_INTS(isp); 1252 (void) splx(s); 1253 } 1254 IDPRINTF(4, ("%s: isp_action code %x\n", isp->isp_name, 1255 ccb->ccb_h.func_code)); 1256 1257 switch (ccb->ccb_h.func_code) { 1258 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 1259 /* 1260 * Do a couple of preliminary checks... 1261 */ 1262 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 1263 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 1264 ccb->ccb_h.status = CAM_REQ_INVALID; 1265 xpt_done(ccb); 1266 break; 1267 } 1268 } 1269 #ifdef DIAGNOSTIC 1270 if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) { 1271 ccb->ccb_h.status = CAM_PATH_INVALID; 1272 } else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) { 1273 ccb->ccb_h.status = CAM_PATH_INVALID; 1274 } 1275 if (ccb->ccb_h.status == CAM_PATH_INVALID) { 1276 printf("%s: invalid tgt/lun (%d.%d) in XPT_SCSI_IO\n", 1277 isp->isp_name, ccb->ccb_h.target_id, 1278 ccb->ccb_h.target_lun); 1279 xpt_done(ccb); 1280 break; 1281 } 1282 #endif 1283 ((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK; 1284 s = splcam(); 1285 DISABLE_INTS(isp); 1286 error = ispscsicmd((ISP_SCSI_XFER_T *) ccb); 1287 ENABLE_INTS(isp); 1288 splx(s); 1289 switch (error) { 1290 case CMD_QUEUED: 1291 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1292 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 1293 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) 1294 ccb->ccb_h.timeout = 5 * 1000; 1295 ccb->ccb_h.timeout_ch = 1296 timeout(isp_timeout, (caddr_t)ccb, 1297 (ccb->ccb_h.timeout * hz) / 1000); 1298 } 1299 break; 1300 case CMD_RQLATER: 1301 if (isp->isp_osinfo.simqfrozen == 0) { 1302 IDPRINTF(3, ("%s: RQLATER freeze simq\n", 1303 isp->isp_name)); 1304 isp->isp_osinfo.simqfrozen |= SIMQFRZ_TIMED; 1305 timeout(isp_relsim, isp, 500); 1306 xpt_freeze_simq(sim, 1); 1307 } 1308 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1309 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 1310 xpt_done(ccb); 1311 break; 1312 case CMD_EAGAIN: 1313 if (isp->isp_osinfo.simqfrozen == 0) { 1314 xpt_freeze_simq(sim, 1); 1315 IDPRINTF(3, ("%s: EAGAIN freeze simq\n", 1316 isp->isp_name)); 1317 } 1318 isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE; 1319 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1320 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 1321 xpt_done(ccb); 1322 break; 1323 case CMD_COMPLETE: 1324 isp_done((struct ccb_scsiio *) ccb); 1325 break; 1326 default: 1327 printf("%s: What's this? 0x%x at %d in file %s\n", 1328 isp->isp_name, error, __LINE__, __FILE__); 1329 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1330 ccb->ccb_h.status |= CAM_REQ_CMP_ERR; 1331 xpt_done(ccb); 1332 } 1333 break; 1334 1335 #ifdef ISP_TARGET_MODE 1336 case XPT_EN_LUN: /* Enable LUN as a target */ 1337 isp_en_lun(isp, ccb); 1338 xpt_done(ccb); 1339 break; 1340 1341 case XPT_NOTIFY_ACK: /* recycle notify ack */ 1342 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ 1343 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 1344 { 1345 tstate_t *tptr = get_lun_statep(isp, ccb->ccb_h.target_lun); 1346 if (tptr == NULL) { 1347 ccb->ccb_h.status = CAM_LUN_INVALID; 1348 xpt_done(ccb); 1349 break; 1350 } 1351 s = splsoftcam(); 1352 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 1353 SLIST_INSERT_HEAD(&tptr->atios, 1354 &ccb->ccb_h, sim_links.sle); 1355 } else { 1356 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, 1357 sim_links.sle); 1358 } 1359 splx(s); 1360 rls_lun_statep(isp, tptr); 1361 ccb->ccb_h.status = CAM_REQ_INPROG; 1362 break; 1363 } 1364 case XPT_CONT_TARGET_IO: 1365 { 1366 s = splcam(); 1367 ccb->ccb_h.status = isp_target_start_ctio(isp, ccb); 1368 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 1369 if (isp->isp_osinfo.simqfrozen == 0) { 1370 xpt_freeze_simq(sim, 1); 1371 xpt_print_path(ccb->ccb_h.path); 1372 printf("XPT_CONT_TARGET_IO freeze simq\n"); 1373 } 1374 isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE; 1375 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1376 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 1377 xpt_done(ccb); 1378 } else { 1379 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1380 } 1381 splx(s); 1382 break; 1383 } 1384 #endif 1385 case XPT_RESET_DEV: /* BDR the specified SCSI device */ 1386 1387 bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); 1388 tgt = ccb->ccb_h.target_id; 1389 tgt |= (bus << 16); 1390 1391 s = splcam(); 1392 error = isp_control(isp, ISPCTL_RESET_DEV, &tgt); 1393 (void) splx(s); 1394 if (error) { 1395 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1396 } else { 1397 ccb->ccb_h.status = CAM_REQ_CMP; 1398 } 1399 xpt_done(ccb); 1400 break; 1401 case XPT_ABORT: /* Abort the specified CCB */ 1402 { 1403 union ccb *accb = ccb->cab.abort_ccb; 1404 switch (accb->ccb_h.func_code) { 1405 #ifdef ISP_TARGET_MODE 1406 case XPT_ACCEPT_TARGET_IO: 1407 case XPT_IMMED_NOTIFY: 1408 ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb); 1409 break; 1410 case XPT_CONT_TARGET_IO: 1411 PRINTF("%s: cannot abort CTIOs yet\n", isp->isp_name); 1412 ccb->ccb_h.status = CAM_UA_ABORT; 1413 break; 1414 #endif 1415 case XPT_SCSI_IO: 1416 s = splcam(); 1417 error = isp_control(isp, ISPCTL_ABORT_CMD, ccb); 1418 (void) splx(s); 1419 if (error) { 1420 ccb->ccb_h.status = CAM_UA_ABORT; 1421 } else { 1422 ccb->ccb_h.status = CAM_REQ_CMP; 1423 } 1424 break; 1425 default: 1426 ccb->ccb_h.status = CAM_REQ_INVALID; 1427 break; 1428 } 1429 xpt_done(ccb); 1430 break; 1431 } 1432 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 1433 1434 cts = &ccb->cts; 1435 tgt = cts->ccb_h.target_id; 1436 s = splcam(); 1437 if (IS_SCSI(isp)) { 1438 sdparam *sdp = isp->isp_param; 1439 u_int16_t *dptr; 1440 1441 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 1442 1443 sdp += bus; 1444 #if 0 1445 if (cts->flags & CCB_TRANS_CURRENT_SETTINGS) 1446 dptr = &sdp->isp_devparam[tgt].cur_dflags; 1447 else 1448 dptr = &sdp->isp_devparam[tgt].dev_flags; 1449 #else 1450 /* 1451 * We always update (internally) from dev_flags 1452 * so any request to change settings just gets 1453 * vectored to that location. 1454 */ 1455 dptr = &sdp->isp_devparam[tgt].dev_flags; 1456 #endif 1457 1458 /* 1459 * Note that these operations affect the 1460 * the goal flags (dev_flags)- not 1461 * the current state flags. Then we mark 1462 * things so that the next operation to 1463 * this HBA will cause the update to occur. 1464 */ 1465 if (cts->valid & CCB_TRANS_DISC_VALID) { 1466 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) { 1467 *dptr |= DPARM_DISC; 1468 } else { 1469 *dptr &= ~DPARM_DISC; 1470 } 1471 } 1472 if (cts->valid & CCB_TRANS_TQ_VALID) { 1473 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) { 1474 *dptr |= DPARM_TQING; 1475 } else { 1476 *dptr &= ~DPARM_TQING; 1477 } 1478 } 1479 if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) { 1480 switch (cts->bus_width) { 1481 case MSG_EXT_WDTR_BUS_16_BIT: 1482 *dptr |= DPARM_WIDE; 1483 break; 1484 default: 1485 *dptr &= ~DPARM_WIDE; 1486 } 1487 } 1488 /* 1489 * Any SYNC RATE of nonzero and SYNC_OFFSET 1490 * of nonzero will cause us to go to the 1491 * selected (from NVRAM) maximum value for 1492 * this device. At a later point, we'll 1493 * allow finer control. 1494 */ 1495 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && 1496 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) && 1497 (cts->sync_offset > 0)) { 1498 *dptr |= DPARM_SYNC; 1499 } else { 1500 *dptr &= ~DPARM_SYNC; 1501 } 1502 *dptr |= DPARM_SAFE_DFLT; 1503 if (bootverbose || isp->isp_dblev >= 3) 1504 printf("%s: %d.%d set %s period 0x%x offset " 1505 "0x%x flags 0x%x\n", isp->isp_name, bus, 1506 tgt, 1507 (cts->flags & CCB_TRANS_CURRENT_SETTINGS)? 1508 "current" : "user", 1509 sdp->isp_devparam[tgt].sync_period, 1510 sdp->isp_devparam[tgt].sync_offset, 1511 sdp->isp_devparam[tgt].dev_flags); 1512 sdp->isp_devparam[tgt].dev_update = 1; 1513 isp->isp_update |= (1 << bus); 1514 } 1515 (void) splx(s); 1516 ccb->ccb_h.status = CAM_REQ_CMP; 1517 xpt_done(ccb); 1518 break; 1519 1520 case XPT_GET_TRAN_SETTINGS: 1521 1522 cts = &ccb->cts; 1523 tgt = cts->ccb_h.target_id; 1524 if (IS_FC(isp)) { 1525 /* 1526 * a lot of normal SCSI things don't make sense. 1527 */ 1528 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 1529 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 1530 /* 1531 * How do you measure the width of a high 1532 * speed serial bus? Well, in bytes. 1533 * 1534 * Offset and period make no sense, though, so we set 1535 * (above) a 'base' transfer speed to be gigabit. 1536 */ 1537 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 1538 } else { 1539 sdparam *sdp = isp->isp_param; 1540 u_int16_t dval, pval, oval; 1541 int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 1542 1543 sdp += bus; 1544 if (cts->flags & CCB_TRANS_CURRENT_SETTINGS) { 1545 s = splcam(); 1546 sdp->isp_devparam[tgt].dev_refresh = 1; 1547 isp->isp_update |= (1 << bus); 1548 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, 1549 NULL); 1550 (void) splx(s); 1551 dval = sdp->isp_devparam[tgt].cur_dflags; 1552 oval = sdp->isp_devparam[tgt].cur_offset; 1553 pval = sdp->isp_devparam[tgt].cur_period; 1554 } else { 1555 dval = sdp->isp_devparam[tgt].dev_flags; 1556 oval = sdp->isp_devparam[tgt].sync_offset; 1557 pval = sdp->isp_devparam[tgt].sync_period; 1558 } 1559 1560 s = splcam(); 1561 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 1562 1563 if (dval & DPARM_DISC) { 1564 cts->flags |= CCB_TRANS_DISC_ENB; 1565 } 1566 if (dval & DPARM_TQING) { 1567 cts->flags |= CCB_TRANS_TAG_ENB; 1568 } 1569 if (dval & DPARM_WIDE) { 1570 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 1571 } else { 1572 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 1573 } 1574 cts->valid = CCB_TRANS_BUS_WIDTH_VALID | 1575 CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 1576 1577 if ((dval & DPARM_SYNC) && oval != 0) { 1578 cts->sync_period = pval; 1579 cts->sync_offset = oval; 1580 cts->valid |= 1581 CCB_TRANS_SYNC_RATE_VALID | 1582 CCB_TRANS_SYNC_OFFSET_VALID; 1583 } 1584 splx(s); 1585 if (bootverbose || isp->isp_dblev >= 3) 1586 printf("%s: %d.%d get %s period 0x%x offset " 1587 "0x%x flags 0x%x\n", isp->isp_name, bus, 1588 tgt, 1589 (cts->flags & CCB_TRANS_CURRENT_SETTINGS)? 1590 "current" : "user", pval, oval, dval); 1591 } 1592 ccb->ccb_h.status = CAM_REQ_CMP; 1593 xpt_done(ccb); 1594 break; 1595 1596 case XPT_CALC_GEOMETRY: 1597 { 1598 struct ccb_calc_geometry *ccg; 1599 u_int32_t secs_per_cylinder; 1600 u_int32_t size_mb; 1601 1602 ccg = &ccb->ccg; 1603 if (ccg->block_size == 0) { 1604 printf("%s: %d.%d XPT_CALC_GEOMETRY block size 0?\n", 1605 isp->isp_name, ccg->ccb_h.target_id, 1606 ccg->ccb_h.target_lun); 1607 ccb->ccb_h.status = CAM_REQ_INVALID; 1608 xpt_done(ccb); 1609 break; 1610 } 1611 size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size); 1612 if (size_mb > 1024) { 1613 ccg->heads = 255; 1614 ccg->secs_per_track = 63; 1615 } else { 1616 ccg->heads = 64; 1617 ccg->secs_per_track = 32; 1618 } 1619 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 1620 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 1621 ccb->ccb_h.status = CAM_REQ_CMP; 1622 xpt_done(ccb); 1623 break; 1624 } 1625 case XPT_RESET_BUS: /* Reset the specified bus */ 1626 bus = cam_sim_bus(sim); 1627 s = splcam(); 1628 error = isp_control(isp, ISPCTL_RESET_BUS, &bus); 1629 (void) splx(s); 1630 if (error) 1631 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1632 else { 1633 if (cam_sim_bus(sim) && isp->isp_path2 != NULL) 1634 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 1635 else if (isp->isp_path != NULL) 1636 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 1637 ccb->ccb_h.status = CAM_REQ_CMP; 1638 } 1639 xpt_done(ccb); 1640 break; 1641 1642 case XPT_TERM_IO: /* Terminate the I/O process */ 1643 /* Does this need to be implemented? */ 1644 ccb->ccb_h.status = CAM_REQ_INVALID; 1645 xpt_done(ccb); 1646 break; 1647 1648 case XPT_PATH_INQ: /* Path routing inquiry */ 1649 { 1650 struct ccb_pathinq *cpi = &ccb->cpi; 1651 1652 cpi->version_num = 1; 1653 #ifdef ISP_TARGET_MODE 1654 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 1655 #else 1656 cpi->target_sprt = 0; 1657 #endif 1658 cpi->hba_eng_cnt = 0; 1659 cpi->max_target = ISP_MAX_TARGETS(isp) - 1; 1660 cpi->max_lun = ISP_MAX_LUNS(isp) - 1; 1661 cpi->bus_id = cam_sim_bus(sim); 1662 if (IS_FC(isp)) { 1663 cpi->hba_misc = PIM_NOBUSRESET; 1664 /* 1665 * Because our loop ID can shift from time to time, 1666 * make our initiator ID out of range of our bus. 1667 */ 1668 cpi->initiator_id = cpi->max_target + 1; 1669 1670 /* 1671 * Set base transfer capabilities for Fibre Channel. 1672 * Technically not correct because we don't know 1673 * what media we're running on top of- but we'll 1674 * look good if we always say 100MB/s. 1675 */ 1676 cpi->base_transfer_speed = 100000; 1677 cpi->hba_inquiry = PI_TAG_ABLE; 1678 } else { 1679 sdparam *sdp = isp->isp_param; 1680 sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path)); 1681 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 1682 cpi->hba_misc = 0; 1683 cpi->initiator_id = sdp->isp_initiator_id; 1684 cpi->base_transfer_speed = 3300; 1685 } 1686 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 1687 strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN); 1688 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 1689 cpi->unit_number = cam_sim_unit(sim); 1690 cpi->ccb_h.status = CAM_REQ_CMP; 1691 xpt_done(ccb); 1692 break; 1693 } 1694 default: 1695 ccb->ccb_h.status = CAM_REQ_INVALID; 1696 xpt_done(ccb); 1697 break; 1698 } 1699 } 1700 1701 #define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB) 1702 void 1703 isp_done(struct ccb_scsiio *sccb) 1704 { 1705 struct ispsoftc *isp = XS_ISP(sccb); 1706 1707 if (XS_NOERR(sccb)) 1708 XS_SETERR(sccb, CAM_REQ_CMP); 1709 sccb->ccb_h.status &= ~CAM_STATUS_MASK; 1710 sccb->ccb_h.status |= sccb->ccb_h.spriv_field0; 1711 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && 1712 (sccb->scsi_status != SCSI_STATUS_OK)) { 1713 sccb->ccb_h.status &= ~CAM_STATUS_MASK; 1714 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && 1715 (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) { 1716 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; 1717 } else { 1718 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 1719 } 1720 } 1721 sccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1722 if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1723 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 1724 sccb->ccb_h.status |= CAM_DEV_QFRZN; 1725 xpt_freeze_devq(sccb->ccb_h.path, 1); 1726 if (sccb->scsi_status != SCSI_STATUS_OK) 1727 IDPRINTF(3, ("%s: fdevq %d.%d %x %x\n", 1728 isp->isp_name, sccb->ccb_h.target_id, 1729 sccb->ccb_h.target_lun, sccb->ccb_h.status, 1730 sccb->scsi_status)); 1731 } 1732 } 1733 /* 1734 * If we were frozen waiting resources, clear that we were frozen 1735 * waiting for resources. If we are no longer frozen, and the devq 1736 * isn't frozen, mark the completing CCB to have the XPT layer 1737 * release the simq. 1738 */ 1739 if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) { 1740 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE; 1741 if (isp->isp_osinfo.simqfrozen == 0) { 1742 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 1743 IDPRINTF(3, ("%s: isp_done -> relsimq\n", 1744 isp->isp_name)); 1745 sccb->ccb_h.status |= CAM_RELEASE_SIMQ; 1746 } else { 1747 IDPRINTF(3, ("%s: isp_done -> devq frozen\n", 1748 isp->isp_name)); 1749 } 1750 } else { 1751 IDPRINTF(3, ("%s: isp_done -> simqfrozen = %x\n", 1752 isp->isp_name, isp->isp_osinfo.simqfrozen)); 1753 } 1754 } 1755 if (CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB) && 1756 (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1757 xpt_print_path(sccb->ccb_h.path); 1758 printf("cam completion status 0x%x\n", sccb->ccb_h.status); 1759 } 1760 untimeout(isp_timeout, (caddr_t)sccb, sccb->ccb_h.timeout_ch); 1761 xpt_done((union ccb *) sccb); 1762 } 1763 1764 int 1765 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg) 1766 { 1767 int bus, rv = 0; 1768 switch (cmd) { 1769 case ISPASYNC_NEW_TGT_PARAMS: 1770 { 1771 int flags, tgt; 1772 sdparam *sdp = isp->isp_param; 1773 struct ccb_trans_settings neg; 1774 struct cam_path *tmppath; 1775 1776 tgt = *((int *)arg); 1777 bus = (tgt >> 16) & 0xffff; 1778 tgt &= 0xffff; 1779 sdp += bus; 1780 if (xpt_create_path(&tmppath, NULL, 1781 cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim), 1782 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1783 xpt_print_path(isp->isp_path); 1784 printf("isp_async cannot make temp path for " 1785 "target %d bus %d\n", tgt, bus); 1786 rv = -1; 1787 break; 1788 } 1789 flags = sdp->isp_devparam[tgt].cur_dflags; 1790 neg.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 1791 if (flags & DPARM_DISC) { 1792 neg.flags |= CCB_TRANS_DISC_ENB; 1793 } 1794 if (flags & DPARM_TQING) { 1795 neg.flags |= CCB_TRANS_TAG_ENB; 1796 } 1797 neg.valid |= CCB_TRANS_BUS_WIDTH_VALID; 1798 neg.bus_width = (flags & DPARM_WIDE)? 1799 MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT; 1800 neg.sync_period = sdp->isp_devparam[tgt].cur_period; 1801 neg.sync_offset = sdp->isp_devparam[tgt].cur_offset; 1802 if (flags & DPARM_SYNC) { 1803 neg.valid |= 1804 CCB_TRANS_SYNC_RATE_VALID | 1805 CCB_TRANS_SYNC_OFFSET_VALID; 1806 } 1807 IDPRINTF(3, ("%s: NEW_TGT_PARAMS bus %d tgt %d period " 1808 "0x%x offset 0x%x flags 0x%x\n", isp->isp_name, 1809 bus, tgt, neg.sync_period, neg.sync_offset, flags)); 1810 xpt_setup_ccb(&neg.ccb_h, tmppath, 1); 1811 xpt_async(AC_TRANSFER_NEG, tmppath, &neg); 1812 xpt_free_path(tmppath); 1813 break; 1814 } 1815 case ISPASYNC_BUS_RESET: 1816 bus = *((int *)arg); 1817 printf("%s: SCSI bus reset on bus %d detected\n", 1818 isp->isp_name, bus); 1819 if (bus > 0 && isp->isp_path2) { 1820 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 1821 } else if (isp->isp_path) { 1822 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 1823 } 1824 break; 1825 case ISPASYNC_LOOP_DOWN: 1826 if (isp->isp_path) { 1827 if (isp->isp_osinfo.simqfrozen == 0) { 1828 IDPRINTF(3, ("%s: loop down freeze simq\n", 1829 isp->isp_name)); 1830 xpt_freeze_simq(isp->isp_sim, 1); 1831 } 1832 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 1833 } 1834 printf("%s: Loop DOWN\n", isp->isp_name); 1835 break; 1836 case ISPASYNC_LOOP_UP: 1837 if (isp->isp_path) { 1838 int wasfrozen = 1839 isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN; 1840 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN; 1841 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { 1842 xpt_release_simq(isp->isp_sim, 1); 1843 IDPRINTF(3, ("%s: loop up release simq\n", 1844 isp->isp_name)); 1845 } 1846 } 1847 printf("%s: Loop UP\n", isp->isp_name); 1848 break; 1849 case ISPASYNC_PDB_CHANGED: 1850 { 1851 const char *fmt = "%s: Target %d (Loop 0x%x) Port ID 0x%x " 1852 "role %s %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x\n"; 1853 const static char *roles[4] = { 1854 "(none)", "Target", "Initiator", "Target/Initiator" 1855 }; 1856 char *ptr; 1857 fcparam *fcp = isp->isp_param; 1858 int tgt = *((int *) arg); 1859 struct lportdb *lp = &fcp->portdb[tgt]; 1860 1861 if (lp->valid) { 1862 ptr = "arrived"; 1863 } else { 1864 ptr = "disappeared"; 1865 } 1866 printf(fmt, isp->isp_name, tgt, lp->loopid, lp->portid, 1867 roles[lp->roles & 0x3], ptr, 1868 (u_int32_t) (lp->port_wwn >> 32), 1869 (u_int32_t) (lp->port_wwn & 0xffffffffLL), 1870 (u_int32_t) (lp->node_wwn >> 32), 1871 (u_int32_t) (lp->node_wwn & 0xffffffffLL)); 1872 break; 1873 } 1874 case ISPASYNC_CHANGE_NOTIFY: 1875 printf("%s: Name Server Database Changed\n", isp->isp_name); 1876 break; 1877 #ifdef ISP2100_FABRIC 1878 case ISPASYNC_FABRIC_DEV: 1879 { 1880 int target; 1881 struct lportdb *lp; 1882 char *pt; 1883 sns_ganrsp_t *resp = (sns_ganrsp_t *) arg; 1884 u_int32_t portid; 1885 u_int64_t wwpn, wwnn; 1886 fcparam *fcp = isp->isp_param; 1887 1888 rv = -1; 1889 1890 portid = 1891 (((u_int32_t) resp->snscb_port_id[0]) << 16) | 1892 (((u_int32_t) resp->snscb_port_id[1]) << 8) | 1893 (((u_int32_t) resp->snscb_port_id[2])); 1894 1895 wwpn = 1896 (((u_int64_t)resp->snscb_portname[0]) << 56) | 1897 (((u_int64_t)resp->snscb_portname[1]) << 48) | 1898 (((u_int64_t)resp->snscb_portname[2]) << 40) | 1899 (((u_int64_t)resp->snscb_portname[3]) << 32) | 1900 (((u_int64_t)resp->snscb_portname[4]) << 24) | 1901 (((u_int64_t)resp->snscb_portname[5]) << 16) | 1902 (((u_int64_t)resp->snscb_portname[6]) << 8) | 1903 (((u_int64_t)resp->snscb_portname[7])); 1904 1905 wwnn = 1906 (((u_int64_t)resp->snscb_nodename[0]) << 56) | 1907 (((u_int64_t)resp->snscb_nodename[1]) << 48) | 1908 (((u_int64_t)resp->snscb_nodename[2]) << 40) | 1909 (((u_int64_t)resp->snscb_nodename[3]) << 32) | 1910 (((u_int64_t)resp->snscb_nodename[4]) << 24) | 1911 (((u_int64_t)resp->snscb_nodename[5]) << 16) | 1912 (((u_int64_t)resp->snscb_nodename[6]) << 8) | 1913 (((u_int64_t)resp->snscb_nodename[7])); 1914 if (portid == 0 || wwpn == 0) { 1915 rv = 0; 1916 break; 1917 } 1918 1919 switch (resp->snscb_port_type) { 1920 case 1: 1921 pt = " N_Port"; 1922 break; 1923 case 2: 1924 pt = " NL_Port"; 1925 break; 1926 case 3: 1927 pt = "F/NL_Port"; 1928 break; 1929 case 0x7f: 1930 pt = " Nx_Port"; 1931 break; 1932 case 0x81: 1933 pt = " F_port"; 1934 break; 1935 case 0x82: 1936 pt = " FL_Port"; 1937 break; 1938 case 0x84: 1939 pt = " E_port"; 1940 break; 1941 default: 1942 pt = "?"; 1943 break; 1944 } 1945 CFGPRINTF("%s: %s @ 0x%x, Node 0x%08x%08x Port %08x%08x\n", 1946 isp->isp_name, pt, portid, 1947 ((u_int32_t) (wwnn >> 32)), ((u_int32_t) wwnn), 1948 ((u_int32_t) (wwpn >> 32)), ((u_int32_t) wwpn)); 1949 #if 0 1950 if ((resp->snscb_fc4_types[1] & 0x1) == 0) { 1951 rv = 0; 1952 printf("Types 0..3: 0x%x 0x%x 0x%x 0x%x\n", 1953 resp->snscb_fc4_types[0], resp->snscb_fc4_types[1], 1954 resp->snscb_fc4_types[3], resp->snscb_fc4_types[3]); 1955 break; 1956 } 1957 #endif 1958 for (target = FC_SNS_ID+1; target < MAX_FC_TARG; target++) { 1959 lp = &fcp->portdb[target]; 1960 if (lp->port_wwn == wwpn && lp->node_wwn == wwnn) 1961 break; 1962 } 1963 if (target < MAX_FC_TARG) { 1964 rv = 0; 1965 break; 1966 } 1967 for (target = FC_SNS_ID+1; target < MAX_FC_TARG; target++) { 1968 lp = &fcp->portdb[target]; 1969 if (lp->port_wwn == 0) 1970 break; 1971 } 1972 if (target == MAX_FC_TARG) { 1973 printf("%s: no more space for fabric devices\n", 1974 isp->isp_name); 1975 break; 1976 } 1977 lp->node_wwn = wwnn; 1978 lp->port_wwn = wwpn; 1979 lp->portid = portid; 1980 rv = 0; 1981 break; 1982 } 1983 #endif 1984 #ifdef ISP_TARGET_MODE 1985 case ISPASYNC_TARGET_MESSAGE: 1986 { 1987 tmd_msg_t *mp = arg; 1988 ITDEBUG(2, ("%s: bus %d iid %d tgt %d lun %d ttype %x tval %x" 1989 " msg[0]=0x%x\n", isp->isp_name, mp->nt_bus, 1990 (int) mp->nt_iid, (int) mp->nt_tgt, (int) mp->nt_lun, 1991 mp->nt_tagtype, mp->nt_tagval, mp->nt_msg[0])); 1992 break; 1993 } 1994 case ISPASYNC_TARGET_EVENT: 1995 { 1996 tmd_event_t *ep = arg; 1997 ITDEBUG(2, ("%s: bus %d event code 0x%x\n", isp->isp_name, 1998 ep->ev_bus, ep->ev_event)); 1999 break; 2000 } 2001 case ISPASYNC_TARGET_ACTION: 2002 switch (((isphdr_t *)arg)->rqs_entry_type) { 2003 default: 2004 printf("%s: event 0x%x for unhandled target action\n", 2005 isp->isp_name, ((isphdr_t *)arg)->rqs_entry_type); 2006 break; 2007 case RQSTYPE_ATIO: 2008 rv = isp_handle_platform_atio(isp, (at_entry_t *) arg); 2009 break; 2010 case RQSTYPE_ATIO2: 2011 rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg); 2012 break; 2013 case RQSTYPE_CTIO2: 2014 case RQSTYPE_CTIO: 2015 rv = isp_handle_platform_ctio(isp, arg); 2016 break; 2017 case RQSTYPE_ENABLE_LUN: 2018 case RQSTYPE_MODIFY_LUN: 2019 isp_cv_signal_rqe(isp, ((lun_entry_t *)arg)->le_status); 2020 break; 2021 } 2022 break; 2023 #endif 2024 default: 2025 PRINTF("%s: unknown isp_async event %d\n", isp->isp_name, cmd); 2026 rv = -1; 2027 break; 2028 } 2029 return (rv); 2030 } 2031 2032 2033 /* 2034 * Locks are held before coming here. 2035 */ 2036 void 2037 isp_uninit(struct ispsoftc *isp) 2038 { 2039 ISP_WRITE(isp, HCCR, HCCR_CMD_RESET); 2040 DISABLE_INTS(isp); 2041 } 2042