1 /* $FreeBSD$ */ 2 /* 3 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters. 4 * 5 *--------------------------------------- 6 * Copyright (c) 1997, 1998, 1999 by Matthew Jacob 7 * NASA/Ames Research Center 8 * All rights reserved. 9 *--------------------------------------- 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice immediately at the beginning of the file, without modification, 16 * this list of conditions, and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 27 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 #include <dev/isp/isp_freebsd.h> 36 37 static void isp_intr_enable(void *); 38 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *); 39 static void isp_poll(struct cam_sim *); 40 static void isp_relsim(void *); 41 static timeout_t isp_watchdog; 42 static void isp_action(struct cam_sim *, union ccb *); 43 44 45 static struct ispsoftc *isplist = NULL; 46 #ifdef DEBUG 47 int isp_debug = 2; 48 #elif defined(CAMDEBUG) || defined(DIAGNOSTIC) 49 int isp_debug = 1; 50 #else 51 int isp_debug = 0; 52 #endif 53 54 void 55 isp_attach(struct ispsoftc *isp) 56 { 57 int primary, secondary; 58 struct ccb_setasync csa; 59 struct cam_devq *devq; 60 struct cam_sim *sim; 61 struct cam_path *path; 62 63 /* 64 * Establish (in case of 12X0) which bus is the primary. 65 */ 66 67 primary = 0; 68 secondary = 1; 69 70 /* 71 * Create the device queue for our SIM(s). 72 */ 73 devq = cam_simq_alloc(isp->isp_maxcmds); 74 if (devq == NULL) { 75 return; 76 } 77 78 /* 79 * Construct our SIM entry. 80 */ 81 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 82 isp->isp_unit, 1, isp->isp_maxcmds, devq); 83 if (sim == NULL) { 84 cam_simq_free(devq); 85 return; 86 } 87 88 isp->isp_osinfo.ehook.ich_func = isp_intr_enable; 89 isp->isp_osinfo.ehook.ich_arg = isp; 90 if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) { 91 printf("%s: could not establish interrupt enable hook\n", 92 isp->isp_name); 93 cam_sim_free(sim, TRUE); 94 return; 95 } 96 97 if (xpt_bus_register(sim, primary) != CAM_SUCCESS) { 98 cam_sim_free(sim, TRUE); 99 return; 100 } 101 102 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 103 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 104 xpt_bus_deregister(cam_sim_path(sim)); 105 cam_sim_free(sim, TRUE); 106 return; 107 } 108 109 xpt_setup_ccb(&csa.ccb_h, path, 5); 110 csa.ccb_h.func_code = XPT_SASYNC_CB; 111 csa.event_enable = AC_LOST_DEVICE; 112 csa.callback = isp_cam_async; 113 csa.callback_arg = sim; 114 xpt_action((union ccb *)&csa); 115 isp->isp_sim = sim; 116 isp->isp_path = path; 117 118 /* 119 * If we have a second channel, construct SIM entry for that. 120 */ 121 if (IS_DUALBUS(isp)) { 122 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 123 isp->isp_unit, 1, isp->isp_maxcmds, devq); 124 if (sim == NULL) { 125 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 126 xpt_free_path(isp->isp_path); 127 cam_simq_free(devq); 128 return; 129 } 130 if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) { 131 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 132 xpt_free_path(isp->isp_path); 133 cam_sim_free(sim, TRUE); 134 return; 135 } 136 137 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 138 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 139 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 140 xpt_free_path(isp->isp_path); 141 xpt_bus_deregister(cam_sim_path(sim)); 142 cam_sim_free(sim, TRUE); 143 return; 144 } 145 146 xpt_setup_ccb(&csa.ccb_h, path, 5); 147 csa.ccb_h.func_code = XPT_SASYNC_CB; 148 csa.event_enable = AC_LOST_DEVICE; 149 csa.callback = isp_cam_async; 150 csa.callback_arg = sim; 151 xpt_action((union ccb *)&csa); 152 isp->isp_sim2 = sim; 153 isp->isp_path2 = path; 154 } 155 isp->isp_state = ISP_RUNSTATE; 156 ENABLE_INTS(isp); 157 if (isplist == NULL) { 158 isplist = isp; 159 } else { 160 struct ispsoftc *tmp = isplist; 161 while (tmp->isp_osinfo.next) { 162 tmp = tmp->isp_osinfo.next; 163 } 164 tmp->isp_osinfo.next = isp; 165 } 166 } 167 168 static void 169 isp_intr_enable(void *arg) 170 { 171 struct ispsoftc *isp = arg; 172 ENABLE_INTS(isp); 173 isp->isp_osinfo.intsok = 1; 174 /* Release our hook so that the boot can continue. */ 175 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 176 } 177 178 /* 179 * Put the target mode functions here, because some are inlines 180 */ 181 182 #ifdef ISP_TARGET_MODE 183 184 static __inline int is_lun_enabled(struct ispsoftc *, lun_id_t); 185 static __inline int are_any_luns_enabled(struct ispsoftc *); 186 static __inline tstate_t *get_lun_statep(struct ispsoftc *, lun_id_t); 187 static __inline void rls_lun_statep(struct ispsoftc *, tstate_t *); 188 static __inline int isp_psema_sig_rqe(struct ispsoftc *); 189 static __inline int isp_cv_wait_timed_rqe(struct ispsoftc *, int); 190 static __inline void isp_cv_signal_rqe(struct ispsoftc *, int); 191 static __inline void isp_vsema_rqe(struct ispsoftc *); 192 static cam_status 193 create_lun_state(struct ispsoftc *, struct cam_path *, tstate_t **); 194 static void destroy_lun_state(struct ispsoftc *, tstate_t *); 195 static void isp_en_lun(struct ispsoftc *, union ccb *); 196 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *); 197 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *); 198 static cam_status isp_target_putback_atio(struct ispsoftc *, union ccb *); 199 static timeout_t isp_refire_putback_atio; 200 201 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *); 202 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *); 203 static int isp_handle_platform_ctio(struct ispsoftc *, void *); 204 static void isp_handle_platform_ctio_part2(struct ispsoftc *, union ccb *); 205 206 static __inline int 207 is_lun_enabled(struct ispsoftc *isp, lun_id_t lun) 208 { 209 tstate_t *tptr; 210 ISP_LOCK(isp); 211 if ((tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(lun)]) == NULL) { 212 ISP_UNLOCK(isp); 213 return (0); 214 } 215 do { 216 if (tptr->lun == (lun_id_t) lun) { 217 ISP_UNLOCK(isp); 218 return (1); 219 } 220 } while ((tptr = tptr->next) != NULL); 221 ISP_UNLOCK(isp); 222 return (0); 223 } 224 225 static __inline int 226 are_any_luns_enabled(struct ispsoftc *isp) 227 { 228 int i; 229 for (i = 0; i < LUN_HASH_SIZE; i++) { 230 if (isp->isp_osinfo.lun_hash[i]) { 231 return (1); 232 } 233 } 234 return (0); 235 } 236 237 static __inline tstate_t * 238 get_lun_statep(struct ispsoftc *isp, lun_id_t lun) 239 { 240 tstate_t *tptr; 241 242 ISP_LOCK(isp); 243 if (lun == CAM_LUN_WILDCARD) { 244 tptr = &isp->isp_osinfo.tsdflt; 245 tptr->hold++; 246 ISP_UNLOCK(isp); 247 return (tptr); 248 } else { 249 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(lun)]; 250 } 251 if (tptr == NULL) { 252 ISP_UNLOCK(isp); 253 return (NULL); 254 } 255 256 do { 257 if (tptr->lun == lun) { 258 tptr->hold++; 259 ISP_UNLOCK(isp); 260 return (tptr); 261 } 262 } while ((tptr = tptr->next) != NULL); 263 ISP_UNLOCK(isp); 264 return (tptr); 265 } 266 267 static __inline void 268 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr) 269 { 270 if (tptr->hold) 271 tptr->hold--; 272 } 273 274 static __inline int 275 isp_psema_sig_rqe(struct ispsoftc *isp) 276 { 277 ISP_LOCK(isp); 278 while (isp->isp_osinfo.tmflags & TM_BUSY) { 279 isp->isp_osinfo.tmflags |= TM_WANTED; 280 if (tsleep(&isp->isp_osinfo.tmflags, PRIBIO|PCATCH, "i0", 0)) { 281 ISP_UNLOCK(isp); 282 return (-1); 283 } 284 isp->isp_osinfo.tmflags |= TM_BUSY; 285 } 286 ISP_UNLOCK(isp); 287 return (0); 288 } 289 290 static __inline int 291 isp_cv_wait_timed_rqe(struct ispsoftc *isp, int timo) 292 { 293 ISP_LOCK(isp); 294 if (tsleep(&isp->isp_osinfo.rstatus, PRIBIO, "qt1", timo)) { 295 ISP_UNLOCK(isp); 296 return (-1); 297 } 298 ISP_UNLOCK(isp); 299 return (0); 300 } 301 302 static __inline void 303 isp_cv_signal_rqe(struct ispsoftc *isp, int status) 304 { 305 isp->isp_osinfo.rstatus = status; 306 wakeup(&isp->isp_osinfo.rstatus); 307 } 308 309 static __inline void 310 isp_vsema_rqe(struct ispsoftc *isp) 311 { 312 ISP_LOCK(isp); 313 if (isp->isp_osinfo.tmflags & TM_WANTED) { 314 isp->isp_osinfo.tmflags &= ~TM_WANTED; 315 wakeup(&isp->isp_osinfo.tmflags); 316 } 317 isp->isp_osinfo.tmflags &= ~TM_BUSY; 318 ISP_UNLOCK(isp); 319 } 320 321 static cam_status 322 create_lun_state(struct ispsoftc *isp, struct cam_path *path, tstate_t **rslt) 323 { 324 cam_status status; 325 lun_id_t lun; 326 tstate_t *tptr, *new; 327 328 lun = xpt_path_lun_id(path); 329 if (lun < 0) { 330 return (CAM_LUN_INVALID); 331 } 332 if (is_lun_enabled(isp, lun)) { 333 return (CAM_LUN_ALRDY_ENA); 334 } 335 new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT); 336 if (new == NULL) { 337 return (CAM_RESRC_UNAVAIL); 338 } 339 bzero(new, sizeof (tstate_t)); 340 341 status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path), 342 xpt_path_target_id(path), xpt_path_lun_id(path)); 343 if (status != CAM_REQ_CMP) { 344 free(new, M_DEVBUF); 345 return (status); 346 } 347 new->lun = lun; 348 SLIST_INIT(&new->atios); 349 SLIST_INIT(&new->inots); 350 new->hold = 1; 351 352 ISP_LOCK(isp); 353 if ((tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(lun)]) == NULL) { 354 isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(lun)] = new; 355 } else { 356 while (tptr->next) 357 tptr = tptr->next; 358 tptr->next = new; 359 } 360 ISP_UNLOCK(isp); 361 *rslt = new; 362 return (CAM_REQ_CMP); 363 } 364 365 static __inline void 366 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr) 367 { 368 tstate_t *lw, *pw; 369 370 ISP_LOCK(isp); 371 if (tptr->hold) { 372 ISP_UNLOCK(isp); 373 return; 374 } 375 pw = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(tptr->lun)]; 376 if (pw == NULL) { 377 ISP_UNLOCK(isp); 378 return; 379 } else if (pw->lun == tptr->lun) { 380 isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(tptr->lun)] = pw->next; 381 } else { 382 lw = pw; 383 pw = lw->next; 384 while (pw) { 385 if (pw->lun == tptr->lun) { 386 lw->next = pw->next; 387 break; 388 } 389 lw = pw; 390 pw = pw->next; 391 } 392 if (pw == NULL) { 393 ISP_UNLOCK(isp); 394 return; 395 } 396 } 397 free(tptr, M_DEVBUF); 398 ISP_UNLOCK(isp); 399 } 400 401 static void 402 isp_en_lun(struct ispsoftc *isp, union ccb *ccb) 403 { 404 const char *lfmt = "Lun now %sabled for target mode\n"; 405 struct ccb_en_lun *cel = &ccb->cel; 406 tstate_t *tptr; 407 u_int16_t rstat; 408 int bus; 409 lun_id_t lun; 410 target_id_t tgt; 411 412 413 bus = XS_CHANNEL(ccb); 414 tgt = ccb->ccb_h.target_id; 415 lun = ccb->ccb_h.target_lun; 416 417 /* 418 * First, check to see if we're enabling on fibre channel 419 * and don't yet have a notion of who the heck we are (no 420 * loop yet). 421 */ 422 if (IS_FC(isp) && cel->enable && 423 (isp->isp_osinfo.tmflags & TM_TMODE_ENABLED) == 0) { 424 int rv; 425 fcparam *fcp = isp->isp_param; 426 427 ISP_LOCK(isp); 428 rv = isp_control(isp, ISPCTL_FCLINK_TEST, NULL); 429 ISP_UNLOCK(isp); 430 if (rv || fcp->isp_fwstate != FW_READY) { 431 xpt_print_path(ccb->ccb_h.path); 432 printf("link status not good yet\n"); 433 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 434 return; 435 } 436 ISP_LOCK(isp); 437 rv = isp_control(isp, ISPCTL_PDB_SYNC, NULL); 438 ISP_UNLOCK(isp); 439 if (rv || fcp->isp_fwstate != FW_READY) { 440 xpt_print_path(ccb->ccb_h.path); 441 printf("could not get a good port database read\n"); 442 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 443 return; 444 } 445 } 446 447 448 /* 449 * Next check to see whether this is a target/lun wildcard action. 450 * 451 * If so, we enable/disable target mode but don't do any lun enabling. 452 */ 453 if (lun == CAM_LUN_WILDCARD && tgt == CAM_TARGET_WILDCARD) { 454 int av; 455 tptr = &isp->isp_osinfo.tsdflt; 456 if (cel->enable) { 457 if (isp->isp_osinfo.tmflags & TM_TMODE_ENABLED) { 458 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 459 return; 460 } 461 ccb->ccb_h.status = 462 xpt_create_path(&tptr->owner, NULL, 463 xpt_path_path_id(ccb->ccb_h.path), 464 xpt_path_target_id(ccb->ccb_h.path), 465 xpt_path_lun_id(ccb->ccb_h.path)); 466 if (ccb->ccb_h.status != CAM_REQ_CMP) { 467 return; 468 } 469 SLIST_INIT(&tptr->atios); 470 SLIST_INIT(&tptr->inots); 471 av = 1; 472 ISP_LOCK(isp); 473 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 474 if (av) { 475 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 476 xpt_free_path(tptr->owner); 477 ISP_UNLOCK(isp); 478 return; 479 } 480 isp->isp_osinfo.tmflags |= TM_TMODE_ENABLED; 481 ISP_UNLOCK(isp); 482 } else { 483 if ((isp->isp_osinfo.tmflags & TM_TMODE_ENABLED) == 0) { 484 ccb->ccb_h.status = CAM_LUN_INVALID; 485 return; 486 } 487 if (are_any_luns_enabled(isp)) { 488 ccb->ccb_h.status = CAM_SCSI_BUSY; 489 return; 490 } 491 av = 0; 492 ISP_LOCK(isp); 493 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 494 if (av) { 495 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 496 ISP_UNLOCK(isp); 497 return; 498 } 499 isp->isp_osinfo.tmflags &= ~TM_TMODE_ENABLED; 500 ISP_UNLOCK(isp); 501 ccb->ccb_h.status = CAM_REQ_CMP; 502 } 503 xpt_print_path(ccb->ccb_h.path); 504 printf(lfmt, (cel->enable) ? "en" : "dis"); 505 return; 506 } 507 508 /* 509 * Do some sanity checking first. 510 */ 511 512 if (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns) { 513 ccb->ccb_h.status = CAM_LUN_INVALID; 514 return; 515 } 516 if (IS_SCSI(isp)) { 517 if (tgt != CAM_TARGET_WILDCARD && 518 tgt != ((sdparam *) isp->isp_param)->isp_initiator_id) { 519 ccb->ccb_h.status = CAM_TID_INVALID; 520 return; 521 } 522 } else { 523 if (tgt != CAM_TARGET_WILDCARD && 524 tgt != ((fcparam *) isp->isp_param)->isp_loopid) { 525 ccb->ccb_h.status = CAM_TID_INVALID; 526 return; 527 } 528 } 529 530 531 if (cel->enable) { 532 ccb->ccb_h.status = 533 create_lun_state(isp, ccb->ccb_h.path, &tptr); 534 if (ccb->ccb_h.status != CAM_REQ_CMP) { 535 return; 536 } 537 } else { 538 tptr = get_lun_statep(isp, lun); 539 if (tptr == NULL) { 540 ccb->ccb_h.status = CAM_LUN_INVALID; 541 return; 542 } 543 } 544 545 if (isp_psema_sig_rqe(isp)) { 546 rls_lun_statep(isp, tptr); 547 if (cel->enable) 548 destroy_lun_state(isp, tptr); 549 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 550 return; 551 } 552 553 ISP_LOCK(isp); 554 if (cel->enable) { 555 u_int32_t seq = isp->isp_osinfo.rollinfo++; 556 rstat = LUN_ERR; 557 if (isp_lun_cmd(isp, RQSTYPE_ENABLE_LUN, bus, tgt, lun, seq)) { 558 xpt_print_path(ccb->ccb_h.path); 559 printf("isp_lun_cmd failed\n"); 560 goto out; 561 } 562 if (isp_cv_wait_timed_rqe(isp, 30 * hz)) { 563 xpt_print_path(ccb->ccb_h.path); 564 printf("wait for ENABLE LUN timed out\n"); 565 goto out; 566 } 567 rstat = isp->isp_osinfo.rstatus; 568 if (rstat != LUN_OK) { 569 xpt_print_path(ccb->ccb_h.path); 570 printf("ENABLE LUN returned 0x%x\n", rstat); 571 goto out; 572 } 573 } else { 574 u_int32_t seq; 575 576 seq = isp->isp_osinfo.rollinfo++; 577 rstat = LUN_ERR; 578 579 if (isp_lun_cmd(isp, -RQSTYPE_MODIFY_LUN, bus, tgt, lun, seq)) { 580 xpt_print_path(ccb->ccb_h.path); 581 printf("isp_lun_cmd failed\n"); 582 goto out; 583 } 584 if (isp_cv_wait_timed_rqe(isp, 30 * hz)) { 585 xpt_print_path(ccb->ccb_h.path); 586 printf("wait for MODIFY LUN timed out\n"); 587 goto out; 588 } 589 rstat = isp->isp_osinfo.rstatus; 590 if (rstat != LUN_OK) { 591 xpt_print_path(ccb->ccb_h.path); 592 printf("MODIFY LUN returned 0x%x\n", rstat); 593 goto out; 594 } 595 rstat = LUN_ERR; 596 seq = isp->isp_osinfo.rollinfo++; 597 598 if (isp_lun_cmd(isp, -RQSTYPE_ENABLE_LUN, bus, tgt, lun, seq)) { 599 xpt_print_path(ccb->ccb_h.path); 600 printf("isp_lun_cmd failed\n"); 601 goto out; 602 } 603 if (isp_cv_wait_timed_rqe(isp, 30 * hz)) { 604 xpt_print_path(ccb->ccb_h.path); 605 printf("wait for ENABLE LUN timed out\n"); 606 goto out; 607 } 608 rstat = isp->isp_osinfo.rstatus; 609 if (rstat != LUN_OK) { 610 xpt_print_path(ccb->ccb_h.path); 611 printf("ENABLE LUN returned 0x%x\n", rstat); 612 goto out; 613 } 614 } 615 out: 616 isp_vsema_rqe(isp); 617 ISP_UNLOCK(isp); 618 619 if (rstat != LUN_OK) { 620 xpt_print_path(ccb->ccb_h.path); 621 printf("lun %sable failed\n", (cel->enable) ? "en" : "dis"); 622 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 623 rls_lun_statep(isp, tptr); 624 if (cel->enable) 625 destroy_lun_state(isp, tptr); 626 } else { 627 xpt_print_path(ccb->ccb_h.path); 628 printf(lfmt, (cel->enable) ? "en" : "dis"); 629 rls_lun_statep(isp, tptr); 630 if (cel->enable == 0) { 631 destroy_lun_state(isp, tptr); 632 } 633 ccb->ccb_h.status = CAM_REQ_CMP; 634 } 635 } 636 637 static cam_status 638 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb) 639 { 640 tstate_t *tptr; 641 struct ccb_hdr_slist *lp; 642 struct ccb_hdr *curelm; 643 int found; 644 union ccb *accb = ccb->cab.abort_ccb; 645 646 if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 647 if (IS_FC(isp) && (accb->ccb_h.target_id != 648 ((fcparam *) isp->isp_param)->isp_loopid)) { 649 return (CAM_PATH_INVALID); 650 } else if (IS_SCSI(isp) && (accb->ccb_h.target_id != 651 ((sdparam *) isp->isp_param)->isp_initiator_id)) { 652 return (CAM_PATH_INVALID); 653 } 654 } 655 tptr = get_lun_statep(isp, accb->ccb_h.target_lun); 656 if (tptr == NULL) { 657 return (CAM_PATH_INVALID); 658 } 659 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 660 lp = &tptr->atios; 661 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 662 lp = &tptr->inots; 663 } else { 664 rls_lun_statep(isp, tptr); 665 return (CAM_UA_ABORT); 666 } 667 curelm = SLIST_FIRST(lp); 668 found = 0; 669 if (curelm == &accb->ccb_h) { 670 found = 1; 671 SLIST_REMOVE_HEAD(lp, sim_links.sle); 672 } else { 673 while(curelm != NULL) { 674 struct ccb_hdr *nextelm; 675 676 nextelm = SLIST_NEXT(curelm, sim_links.sle); 677 if (nextelm == &accb->ccb_h) { 678 found = 1; 679 SLIST_NEXT(curelm, sim_links.sle) = 680 SLIST_NEXT(nextelm, sim_links.sle); 681 break; 682 } 683 curelm = nextelm; 684 } 685 } 686 rls_lun_statep(isp, tptr); 687 if (found) { 688 accb->ccb_h.status = CAM_REQ_ABORTED; 689 return (CAM_REQ_CMP); 690 } 691 return(CAM_PATH_INVALID); 692 } 693 694 static cam_status 695 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb) 696 { 697 void *qe; 698 struct ccb_scsiio *cso = &ccb->csio; 699 u_int32_t *hp, save_handle; 700 u_int16_t iptr, optr; 701 702 703 if (isp_getrqentry(isp, &iptr, &optr, &qe)) { 704 xpt_print_path(ccb->ccb_h.path); 705 printf("Request Queue Overflow in isp_target_start_ctio\n"); 706 return (CAM_RESRC_UNAVAIL); 707 } 708 bzero(qe, QENTRY_LEN); 709 710 /* 711 * We're either moving data or completing a command here. 712 */ 713 714 if (IS_FC(isp)) { 715 struct ccb_accept_tio *atiop; 716 ct2_entry_t *cto = qe; 717 718 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; 719 cto->ct_header.rqs_entry_count = 1; 720 cto->ct_iid = cso->init_id; 721 if (isp->isp_maxluns <= 16) { 722 cto->ct_lun = ccb->ccb_h.target_lun; 723 } 724 /* 725 * Start with a residual based on what the original datalength 726 * was supposed to be. Basically, we ignore what CAM has set 727 * for residuals. The data transfer routines will knock off 728 * the residual for each byte actually moved- and also will 729 * be responsible for setting the underrun flag. 730 */ 731 /* HACK! HACK! */ 732 if ((atiop = ccb->ccb_h.periph_priv.entries[1].ptr) != NULL) { 733 cto->ct_resid = atiop->ccb_h.spriv_field0; 734 } 735 736 /* 737 * We always have to use the tag_id- it has the RX_ID 738 * for this exchage. 739 */ 740 cto->ct_rxid = cso->tag_id; 741 if (cso->dxfer_len == 0) { 742 cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA; 743 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 744 cto->ct_flags |= CT2_SENDSTATUS; 745 cto->rsp.m1.ct_scsi_status = cso->scsi_status; 746 } 747 if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) { 748 int m = min(cso->sense_len, MAXRESPLEN); 749 bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m); 750 cto->rsp.m1.ct_senselen = m; 751 cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID; 752 } 753 } else { 754 cto->ct_flags |= CT2_FLAG_MODE0; 755 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 756 cto->ct_flags |= CT2_DATA_IN; 757 } else { 758 cto->ct_flags |= CT2_DATA_OUT; 759 } 760 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 761 cto->ct_flags |= CT2_SENDSTATUS; 762 cto->rsp.m0.ct_scsi_status = cso->scsi_status; 763 } 764 /* 765 * If we're sending data and status back together, 766 * we can't also send back sense data as well. 767 */ 768 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 769 } 770 if (isp_tdebug > 1 && (cto->ct_flags & CAM_SEND_STATUS)) { 771 printf("%s:CTIO2 RX_ID 0x%x SCSI STATUS 0x%x " 772 "datalength %u\n", isp->isp_name, cto->ct_rxid, 773 cso->scsi_status, cto->ct_resid); 774 } 775 hp = &cto->ct_reserved; 776 } else { 777 ct_entry_t *cto = qe; 778 779 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 780 cto->ct_header.rqs_entry_count = 1; 781 cto->ct_iid = cso->init_id; 782 cto->ct_tgt = ccb->ccb_h.target_id; 783 cto->ct_lun = ccb->ccb_h.target_lun; 784 if (cso->tag_id && cso->tag_action) { 785 /* 786 * We don't specify a tag type for regular SCSI. 787 * Just the tag value and set the flag. 788 */ 789 cto->ct_tag_val = cso->tag_id; 790 cto->ct_flags |= CT_TQAE; 791 } 792 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 793 cto->ct_flags |= CT_NODISC; 794 } 795 if (cso->dxfer_len == 0) { 796 cto->ct_flags |= CT_NO_DATA; 797 } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 798 cto->ct_flags |= CT_DATA_IN; 799 } else { 800 cto->ct_flags |= CT_DATA_OUT; 801 } 802 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 803 cto->ct_flags |= CT_SENDSTATUS; 804 cto->ct_scsi_status = cso->scsi_status; 805 cto->ct_resid = cso->resid; 806 } 807 if (isp_tdebug > 1 && (cto->ct_flags & CAM_SEND_STATUS)) { 808 printf("%s:CTIO SCSI STATUS 0x%x resid %d\n", 809 isp->isp_name, cso->scsi_status, cso->resid); 810 } 811 hp = &cto->ct_reserved; 812 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 813 } 814 815 if (isp_save_xs(isp, (ISP_SCSI_XFER_T *)ccb, hp)) { 816 xpt_print_path(ccb->ccb_h.path); 817 printf("No XFLIST pointers for isp_target_start_ctio\n"); 818 return (CAM_RESRC_UNAVAIL); 819 } 820 821 822 /* 823 * Call the dma setup routines for this entry (and any subsequent 824 * CTIOs) if there's data to move, and then tell the f/w it's got 825 * new things to play with. As with ispscsicmd's usage of DMA setup, 826 * any swizzling is done in the machine dependent layer. Because 827 * of this, we put the request onto the queue area first in native 828 * format. 829 */ 830 831 save_handle = *hp; 832 switch (ISP_DMASETUP(isp, cso, qe, &iptr, optr)) { 833 case CMD_QUEUED: 834 MemoryBarrier(); 835 ISP_ADD_REQUEST(isp, iptr); 836 return (CAM_REQ_INPROG); 837 838 case CMD_EAGAIN: 839 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 840 isp_destroy_handle(isp, save_handle); 841 return (CAM_RESRC_UNAVAIL); 842 843 default: 844 isp_destroy_handle(isp, save_handle); 845 return (XS_ERR(ccb)); 846 } 847 } 848 849 static cam_status 850 isp_target_putback_atio(struct ispsoftc *isp, union ccb *ccb) 851 { 852 void *qe; 853 struct ccb_accept_tio *atiop; 854 u_int16_t iptr, optr; 855 856 if (isp_getrqentry(isp, &iptr, &optr, &qe)) { 857 xpt_print_path(ccb->ccb_h.path); 858 printf("Request Queue Overflow in isp_target_putback_atio\n"); 859 return (CAM_RESRC_UNAVAIL); 860 } 861 bzero(qe, QENTRY_LEN); 862 atiop = (struct ccb_accept_tio *) ccb; 863 if (IS_FC(isp)) { 864 at2_entry_t *at = qe; 865 at->at_header.rqs_entry_type = RQSTYPE_ATIO2; 866 at->at_header.rqs_entry_count = 1; 867 if (isp->isp_maxluns > 16) { 868 at->at_scclun = (uint16_t) atiop->ccb_h.target_lun; 869 } else { 870 at->at_lun = (uint8_t) atiop->ccb_h.target_lun; 871 } 872 at->at_status = CT_OK; 873 at->at_rxid = atiop->tag_id; 874 ISP_SWIZ_ATIO2(isp, qe, qe); 875 } else { 876 at_entry_t *at = qe; 877 at->at_header.rqs_entry_type = RQSTYPE_ATIO; 878 at->at_header.rqs_entry_count = 1; 879 at->at_iid = atiop->init_id; 880 at->at_tgt = atiop->ccb_h.target_id; 881 at->at_lun = atiop->ccb_h.target_lun; 882 at->at_status = CT_OK; 883 if (atiop->ccb_h.status & CAM_TAG_ACTION_VALID) { 884 at->at_tag_type = atiop->tag_action; 885 } 886 at->at_tag_val = atiop->tag_id; 887 ISP_SWIZ_ATIO(isp, qe, qe); 888 } 889 ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe); 890 MemoryBarrier(); 891 ISP_ADD_REQUEST(isp, iptr); 892 return (CAM_REQ_CMP); 893 } 894 895 static void 896 isp_refire_putback_atio(void *arg) 897 { 898 union ccb *ccb = arg; 899 int s = splcam(); 900 if (isp_target_putback_atio(XS_ISP(ccb), ccb) != CAM_REQ_CMP) { 901 (void) timeout(isp_refire_putback_atio, ccb, 10); 902 } else { 903 isp_handle_platform_ctio_part2(XS_ISP(ccb), ccb); 904 } 905 splx(s); 906 } 907 908 /* 909 * Handle ATIO stuff that the generic code can't. 910 * This means handling CDBs. 911 */ 912 913 static int 914 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep) 915 { 916 tstate_t *tptr; 917 int status; 918 struct ccb_accept_tio *atiop; 919 920 /* 921 * The firmware status (except for the QLTM_SVALID bit) 922 * indicates why this ATIO was sent to us. 923 * 924 * If QLTM_SVALID is set, the firware has recommended Sense Data. 925 * 926 * If the DISCONNECTS DISABLED bit is set in the flags field, 927 * we're still connected on the SCSI bus - i.e. the initiator 928 * did not set DiscPriv in the identify message. We don't care 929 * about this so it's ignored. 930 */ 931 status = aep->at_status; 932 if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) { 933 /* 934 * Bus Phase Sequence error. We should have sense data 935 * suggested by the f/w. I'm not sure quite yet what 936 * to do about this for CAM. 937 */ 938 printf("%s: PHASE ERROR\n", isp->isp_name); 939 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 940 return (0); 941 } 942 if ((status & ~QLTM_SVALID) != AT_CDB) { 943 printf("%s: bogus atio (0x%x) leaked to platform\n", 944 isp->isp_name, status); 945 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 946 return (0); 947 } 948 949 tptr = get_lun_statep(isp, aep->at_lun); 950 if (tptr == NULL) { 951 tptr = get_lun_statep(isp, CAM_LUN_WILDCARD); 952 } 953 954 if (tptr == NULL) { 955 /* 956 * Because we can't autofeed sense data back with 957 * a command for parallel SCSI, we can't give back 958 * a CHECK CONDITION. We'll give back a BUSY status 959 * instead. This works out okay because the only 960 * time we should, in fact, get this, is in the 961 * case that somebody configured us without the 962 * blackhole driver, so they get what they deserve. 963 */ 964 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 965 return (0); 966 } 967 968 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 969 if (atiop == NULL) { 970 /* 971 * Because we can't autofeed sense data back with 972 * a command for parallel SCSI, we can't give back 973 * a CHECK CONDITION. We'll give back a QUEUE FULL status 974 * instead. This works out okay because the only time we 975 * should, in fact, get this, is in the case that we've 976 * run out of ATIOS. 977 */ 978 xpt_print_path(tptr->owner); 979 printf("no ATIOS for lun %d from initiator %d\n", 980 aep->at_lun, aep->at_iid); 981 rls_lun_statep(isp, tptr); 982 if (aep->at_flags & AT_TQAE) 983 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 984 else 985 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 986 return (0); 987 } 988 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 989 if (tptr == &isp->isp_osinfo.tsdflt) { 990 atiop->ccb_h.target_id = aep->at_tgt; 991 atiop->ccb_h.target_lun = aep->at_lun; 992 } 993 if (aep->at_flags & AT_NODISC) { 994 atiop->ccb_h.flags = CAM_DIS_DISCONNECT; 995 } else { 996 atiop->ccb_h.flags = 0; 997 } 998 999 if (status & QLTM_SVALID) { 1000 size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data)); 1001 atiop->sense_len = amt; 1002 MEMCPY(&atiop->sense_data, aep->at_sense, amt); 1003 } else { 1004 atiop->sense_len = 0; 1005 } 1006 1007 atiop->init_id = aep->at_iid; 1008 atiop->cdb_len = aep->at_cdblen; 1009 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen); 1010 atiop->ccb_h.status = CAM_CDB_RECVD; 1011 atiop->tag_id = aep->at_tag_val; 1012 if ((atiop->tag_action = aep->at_tag_type) != 0) { 1013 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; 1014 } 1015 xpt_done((union ccb*)atiop); 1016 if (isp_tdebug > 1) { 1017 printf("%s:ATIO CDB=0x%x iid%d->lun%d tag 0x%x ttype 0x%x %s", 1018 isp->isp_name, aep->at_cdb[0] & 0xff, aep->at_iid, 1019 aep->at_lun, aep->at_tag_val & 0xff, aep->at_tag_type, 1020 (aep->at_flags & AT_NODISC)? "nondisc\n" : "\n"); 1021 } 1022 rls_lun_statep(isp, tptr); 1023 return (0); 1024 } 1025 1026 static int 1027 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep) 1028 { 1029 lun_id_t lun; 1030 tstate_t *tptr; 1031 struct ccb_accept_tio *atiop; 1032 1033 /* 1034 * The firmware status (except for the QLTM_SVALID bit) 1035 * indicates why this ATIO was sent to us. 1036 * 1037 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1038 */ 1039 if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) { 1040 printf("%s: bogus atio (0x%x) leaked to platform\n", 1041 isp->isp_name, aep->at_status); 1042 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1043 return (0); 1044 } 1045 1046 if (isp->isp_maxluns > 16) { 1047 lun = aep->at_scclun; 1048 } else { 1049 lun = aep->at_lun; 1050 } 1051 tptr = get_lun_statep(isp, lun); 1052 if (tptr == NULL) { 1053 tptr = get_lun_statep(isp, CAM_LUN_WILDCARD); 1054 } 1055 1056 if (tptr == NULL) { 1057 /* 1058 * What we'd like to know is whether or not we have a listener 1059 * upstream that really hasn't configured yet. If we do, then 1060 * we can give a more sensible reply here. If not, then we can 1061 * reject this out of hand. 1062 * 1063 * Choices for what to send were 1064 * 1065 * Not Ready, Unit Not Self-Configured Yet 1066 * (0x2,0x3e,0x00) 1067 * 1068 * for the former and 1069 * 1070 * Illegal Request, Logical Unit Not Supported 1071 * (0x5,0x25,0x00) 1072 * 1073 * for the latter. 1074 * 1075 * We used to decide whether there was at least one listener 1076 * based upon whether the black hole driver was configured. 1077 * However, recent config(8) changes have made this hard to do 1078 * at this time. 1079 * 1080 */ 1081 u_int32_t ccode = SCSI_STATUS_BUSY; 1082 1083 /* 1084 * Because we can't autofeed sense data back with 1085 * a command for parallel SCSI, we can't give back 1086 * a CHECK CONDITION. We'll give back a BUSY status 1087 * instead. This works out okay because the only 1088 * time we should, in fact, get this, is in the 1089 * case that somebody configured us without the 1090 * blackhole driver, so they get what they deserve. 1091 */ 1092 isp_endcmd(isp, aep, ccode, 0); 1093 return (0); 1094 } 1095 1096 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1097 if (atiop == NULL) { 1098 /* 1099 * Because we can't autofeed sense data back with 1100 * a command for parallel SCSI, we can't give back 1101 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1102 * instead. This works out okay because the only time we 1103 * should, in fact, get this, is in the case that we've 1104 * run out of ATIOS. 1105 */ 1106 xpt_print_path(tptr->owner); 1107 printf("no ATIOS for lun %d from initiator %d\n", 1108 lun, aep->at_iid); 1109 rls_lun_statep(isp, tptr); 1110 if (aep->at_flags & AT_TQAE) 1111 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1112 else 1113 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1114 return (0); 1115 } 1116 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1117 1118 if (tptr == &isp->isp_osinfo.tsdflt) { 1119 atiop->ccb_h.target_id = 1120 ((fcparam *)isp->isp_param)->isp_loopid; 1121 atiop->ccb_h.target_lun = lun; 1122 } 1123 if (aep->at_status & QLTM_SVALID) { 1124 size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data)); 1125 atiop->sense_len = amt; 1126 MEMCPY(&atiop->sense_data, aep->at_sense, amt); 1127 } else { 1128 atiop->sense_len = 0; 1129 } 1130 1131 atiop->init_id = aep->at_iid; 1132 atiop->cdb_len = ATIO2_CDBLEN; 1133 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN); 1134 atiop->ccb_h.status = CAM_CDB_RECVD; 1135 atiop->tag_id = aep->at_rxid; 1136 switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) { 1137 case ATIO2_TC_ATTR_SIMPLEQ: 1138 atiop->tag_action = MSG_SIMPLE_Q_TAG; 1139 break; 1140 case ATIO2_TC_ATTR_HEADOFQ: 1141 atiop->tag_action = MSG_HEAD_OF_Q_TAG; 1142 break; 1143 case ATIO2_TC_ATTR_ORDERED: 1144 atiop->tag_action = MSG_ORDERED_Q_TAG; 1145 break; 1146 case ATIO2_TC_ATTR_ACAQ: /* ?? */ 1147 case ATIO2_TC_ATTR_UNTAGGED: 1148 default: 1149 atiop->tag_action = 0; 1150 break; 1151 } 1152 if (atiop->tag_action != 0) { 1153 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; 1154 } 1155 1156 /* 1157 * Preserve overall command datalength in private field. 1158 */ 1159 atiop->ccb_h.spriv_field0 = aep->at_datalen; 1160 1161 xpt_done((union ccb*)atiop); 1162 if (isp_tdebug > 1) { 1163 printf("%s:ATIO2 RX_ID 0x%x CDB=0x%x iid%d->lun%d tattr 0x%x " 1164 "datalen %u\n", 1165 isp->isp_name, aep->at_rxid & 0xffff, aep->at_cdb[0] & 0xff, 1166 aep->at_iid, lun, aep->at_taskflags, aep->at_datalen); 1167 } 1168 rls_lun_statep(isp, tptr); 1169 return (0); 1170 } 1171 1172 static int 1173 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg) 1174 { 1175 union ccb *ccb; 1176 int sentstatus, ok, notify_cam; 1177 1178 /* 1179 * CTIO and CTIO2 are close enough.... 1180 */ 1181 1182 ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_reserved); 1183 KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio")); 1184 isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_reserved); 1185 1186 if (IS_FC(isp)) { 1187 ct2_entry_t *ct = arg; 1188 sentstatus = ct->ct_flags & CT2_SENDSTATUS; 1189 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1190 if (ok && (ccb->ccb_h.flags & CAM_SEND_SENSE)) { 1191 ccb->ccb_h.status |= CAM_SENT_SENSE; 1192 } 1193 if (isp_tdebug > 1) { 1194 printf("%s:CTIO2 RX_ID 0x%x sts 0x%x flg 0x%x sns " 1195 "%d FIN\n", isp->isp_name, ct->ct_rxid, 1196 ct->ct_status, ct->ct_flags, 1197 (ccb->ccb_h.status & CAM_SENT_SENSE) != 0); 1198 } 1199 notify_cam = ct->ct_header.rqs_seqno; 1200 } else { 1201 ct_entry_t *ct = arg; 1202 sentstatus = ct->ct_flags & CT_SENDSTATUS; 1203 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1204 if (isp_tdebug > 1) { 1205 printf("%s:CTIO tag 0x%x sts 0x%x flg 0x%x FIN\n", 1206 isp->isp_name, ct->ct_tag_val, ct->ct_status, 1207 ct->ct_flags); 1208 } 1209 notify_cam = ct->ct_header.rqs_seqno; 1210 } 1211 1212 /* 1213 * We're here either because data transfers are done (and 1214 * it's time to send a final status CTIO) or because the final 1215 * status CTIO is done. We don't get called for all intermediate 1216 * CTIOs that happen for a large data transfer. 1217 * 1218 * In any case, for this platform, the upper layers figure out 1219 * what to do next, so all we do here is collect status and 1220 * pass information along. The exception is that we clear 1221 * the notion of handling a non-disconnecting command here. 1222 */ 1223 1224 if (sentstatus) { 1225 /* 1226 * Data transfer done. See if all went okay. 1227 */ 1228 if (ok) { 1229 ccb->csio.resid = 0; 1230 } else { 1231 ccb->csio.resid = ccb->csio.dxfer_len; 1232 } 1233 } 1234 1235 if (notify_cam == 0) { 1236 if (isp_tdebug > 1) { 1237 printf("%s:Intermediate CTIO done\n", isp->isp_name); 1238 } 1239 return (0); 1240 } 1241 if (isp_tdebug > 1) { 1242 printf("%s:Final CTIO done\n", isp->isp_name); 1243 } 1244 if (isp_target_putback_atio(isp, ccb) != CAM_REQ_CMP) { 1245 (void) timeout(isp_refire_putback_atio, ccb, 10); 1246 } else { 1247 isp_handle_platform_ctio_part2(isp, ccb); 1248 } 1249 return (0); 1250 } 1251 1252 static void 1253 isp_handle_platform_ctio_part2(struct ispsoftc *isp, union ccb *ccb) 1254 { 1255 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1256 ccb->ccb_h.status |= CAM_REQ_CMP; 1257 } 1258 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1259 if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) { 1260 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE; 1261 if (isp->isp_osinfo.simqfrozen == 0) { 1262 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 1263 IDPRINTF(3, ("%s: isp_done -> relsimq\n", 1264 isp->isp_name)); 1265 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 1266 } else { 1267 IDPRINTF(3, ("%s: isp_done -> devq frozen\n", 1268 isp->isp_name)); 1269 } 1270 } else { 1271 IDPRINTF(3, ("%s: isp_done -> simqfrozen = %x\n", 1272 isp->isp_name, isp->isp_osinfo.simqfrozen)); 1273 } 1274 } 1275 xpt_done(ccb); 1276 } 1277 #endif 1278 1279 static void 1280 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg) 1281 { 1282 struct cam_sim *sim; 1283 struct ispsoftc *isp; 1284 1285 sim = (struct cam_sim *)cbarg; 1286 isp = (struct ispsoftc *) cam_sim_softc(sim); 1287 switch (code) { 1288 case AC_LOST_DEVICE: 1289 if (IS_SCSI(isp)) { 1290 u_int16_t oflags, nflags; 1291 sdparam *sdp = isp->isp_param; 1292 int rvf, tgt; 1293 1294 tgt = xpt_path_target_id(path); 1295 rvf = ISP_FW_REVX(isp->isp_fwrev); 1296 ISP_LOCK(isp); 1297 sdp += cam_sim_bus(sim); 1298 isp->isp_update |= (1 << cam_sim_bus(sim)); 1299 nflags = DPARM_SAFE_DFLT; 1300 if (rvf >= ISP_FW_REV(7, 55, 0) || 1301 (ISP_FW_REV(4, 55, 0) <= rvf && 1302 (rvf < ISP_FW_REV(5, 0, 0)))) { 1303 nflags |= DPARM_NARROW | DPARM_ASYNC; 1304 } 1305 oflags = sdp->isp_devparam[tgt].dev_flags; 1306 sdp->isp_devparam[tgt].dev_flags = nflags; 1307 sdp->isp_devparam[tgt].dev_update = 1; 1308 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, NULL); 1309 sdp->isp_devparam[tgt].dev_flags = oflags; 1310 ISP_UNLOCK(isp); 1311 } 1312 break; 1313 default: 1314 printf("%s: isp_attach Async Code 0x%x\n", isp->isp_name, code); 1315 break; 1316 } 1317 } 1318 1319 static void 1320 isp_poll(struct cam_sim *sim) 1321 { 1322 isp_intr((struct ispsoftc *) cam_sim_softc(sim)); 1323 } 1324 1325 static void 1326 isp_relsim(void *arg) 1327 { 1328 struct ispsoftc *isp = arg; 1329 ISP_LOCK(isp); 1330 if (isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED) { 1331 int wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED; 1332 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_TIMED; 1333 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { 1334 xpt_release_simq(isp->isp_sim, 1); 1335 IDPRINTF(3, ("%s: timed relsimq\n", isp->isp_name)); 1336 } 1337 } 1338 ISP_UNLOCK(isp); 1339 } 1340 1341 static void 1342 isp_watchdog(void *arg) 1343 { 1344 ISP_SCSI_XFER_T *xs = arg; 1345 struct ispsoftc *isp = XS_ISP(xs); 1346 u_int32_t handle; 1347 1348 /* 1349 * We've decided this command is dead. Make sure we're not trying 1350 * to kill a command that's already dead by getting it's handle and 1351 * and seeing whether it's still alive. 1352 */ 1353 ISP_LOCK(isp); 1354 handle = isp_find_handle(isp, xs); 1355 if (handle) { 1356 u_int16_t r; 1357 1358 if (XS_CMD_DONE_P(xs)) { 1359 PRINTF("%s: watchdog found done cmd (handle 0x%x)\n", 1360 isp->isp_name, handle); 1361 ISP_UNLOCK(isp); 1362 return; 1363 } 1364 1365 if (XS_CMD_WDOG_P(xs)) { 1366 PRINTF("%s: recursive watchdog (handle 0x%x)\n", 1367 isp->isp_name, handle); 1368 ISP_UNLOCK(isp); 1369 return; 1370 } 1371 1372 XS_CMD_S_WDOG(xs); 1373 1374 r = ISP_READ(isp, BIU_ISR); 1375 1376 if (INT_PENDING(isp, r) && isp_intr(isp) && XS_CMD_DONE_P(xs)) { 1377 IDPRINTF(2, ("%s: watchdog cleanup (%x, %x)\n", 1378 isp->isp_name, handle, r)); 1379 xpt_done((union ccb *) xs); 1380 } else if (XS_CMD_GRACE_P(xs)) { 1381 /* 1382 * Make sure the command is *really* dead before we 1383 * release the handle (and DMA resources) for reuse. 1384 */ 1385 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg); 1386 1387 /* 1388 * After this point, the comamnd is really dead. 1389 */ 1390 if (XS_XFRLEN(xs)) { 1391 ISP_DMAFREE(isp, xs, handle); 1392 } 1393 isp_destroy_handle(isp, handle); 1394 xpt_print_path(xs->ccb_h.path); 1395 printf("%s: watchdog timeout (%x, %x)\n", 1396 isp->isp_name, handle, r); 1397 XS_SETERR(xs, CAM_CMD_TIMEOUT); 1398 XS_CMD_C_WDOG(xs); 1399 isp_done(xs); 1400 } else { 1401 u_int16_t iptr, optr; 1402 ispreq_t *mp; 1403 1404 XS_CMD_C_WDOG(xs); 1405 xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz); 1406 if (isp_getrqentry(isp, &iptr, &optr, (void **) &mp)) { 1407 ISP_UNLOCK(isp); 1408 return; 1409 } 1410 XS_CMD_S_GRACE(xs); 1411 MEMZERO((void *) mp, sizeof (*mp)); 1412 mp->req_header.rqs_entry_count = 1; 1413 mp->req_header.rqs_entry_type = RQSTYPE_MARKER; 1414 mp->req_modifier = SYNC_ALL; 1415 mp->req_target = XS_CHANNEL(xs) << 7; 1416 ISP_SWIZZLE_REQUEST(isp, mp); 1417 MemoryBarrier(); 1418 ISP_ADD_REQUEST(isp, iptr); 1419 } 1420 } else { 1421 IDPRINTF(2, ("%s: watchdog with no command\n", isp->isp_name)); 1422 } 1423 ISP_UNLOCK(isp); 1424 } 1425 1426 static void 1427 isp_action(struct cam_sim *sim, union ccb *ccb) 1428 { 1429 int bus, tgt, error; 1430 struct ispsoftc *isp; 1431 struct ccb_trans_settings *cts; 1432 1433 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n")); 1434 1435 isp = (struct ispsoftc *)cam_sim_softc(sim); 1436 ccb->ccb_h.sim_priv.entries[0].field = 0; 1437 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 1438 if (isp->isp_state != ISP_RUNSTATE && 1439 ccb->ccb_h.func_code == XPT_SCSI_IO) { 1440 ISP_LOCK(isp); 1441 DISABLE_INTS(isp); 1442 isp_init(isp); 1443 if (isp->isp_state != ISP_INITSTATE) { 1444 ISP_UNLOCK(isp); 1445 /* 1446 * Lie. Say it was a selection timeout. 1447 */ 1448 ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN; 1449 xpt_freeze_devq(ccb->ccb_h.path, 1); 1450 xpt_done(ccb); 1451 return; 1452 } 1453 isp->isp_state = ISP_RUNSTATE; 1454 ENABLE_INTS(isp); 1455 ISP_UNLOCK(isp); 1456 } 1457 IDPRINTF(4, ("%s: isp_action code %x\n", isp->isp_name, 1458 ccb->ccb_h.func_code)); 1459 1460 switch (ccb->ccb_h.func_code) { 1461 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 1462 /* 1463 * Do a couple of preliminary checks... 1464 */ 1465 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 1466 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 1467 ccb->ccb_h.status = CAM_REQ_INVALID; 1468 xpt_done(ccb); 1469 break; 1470 } 1471 } 1472 #ifdef DIAGNOSTIC 1473 if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) { 1474 ccb->ccb_h.status = CAM_PATH_INVALID; 1475 } else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) { 1476 ccb->ccb_h.status = CAM_PATH_INVALID; 1477 } 1478 if (ccb->ccb_h.status == CAM_PATH_INVALID) { 1479 printf("%s: invalid tgt/lun (%d.%d) in XPT_SCSI_IO\n", 1480 isp->isp_name, ccb->ccb_h.target_id, 1481 ccb->ccb_h.target_lun); 1482 xpt_done(ccb); 1483 break; 1484 } 1485 #endif 1486 ((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK; 1487 ISP_LOCK(isp); 1488 error = ispscsicmd((ISP_SCSI_XFER_T *) ccb); 1489 ISP_UNLOCK(isp); 1490 switch (error) { 1491 case CMD_QUEUED: 1492 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1493 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 1494 int ticks; 1495 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) 1496 ticks = 60 * 1000 * hz; 1497 else 1498 ticks = ccb->ccb_h.timeout * hz; 1499 ticks = ((ticks + 999) / 1000) + hz + hz; 1500 ccb->ccb_h.timeout_ch = 1501 timeout(isp_watchdog, (caddr_t)ccb, ticks); 1502 } else { 1503 callout_handle_init(&ccb->ccb_h.timeout_ch); 1504 } 1505 break; 1506 case CMD_RQLATER: 1507 if (isp->isp_osinfo.simqfrozen == 0) { 1508 IDPRINTF(3, ("%s: RQLATER freeze simq\n", 1509 isp->isp_name)); 1510 isp->isp_osinfo.simqfrozen |= SIMQFRZ_TIMED; 1511 timeout(isp_relsim, isp, 500); 1512 xpt_freeze_simq(sim, 1); 1513 } 1514 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1515 xpt_done(ccb); 1516 break; 1517 case CMD_EAGAIN: 1518 if (isp->isp_osinfo.simqfrozen == 0) { 1519 xpt_freeze_simq(sim, 1); 1520 IDPRINTF(3, ("%s: EAGAIN freeze simq\n", 1521 isp->isp_name)); 1522 } 1523 isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE; 1524 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1525 xpt_done(ccb); 1526 break; 1527 case CMD_COMPLETE: 1528 isp_done((struct ccb_scsiio *) ccb); 1529 break; 1530 default: 1531 printf("%s: What's this? 0x%x at %d in file %s\n", 1532 isp->isp_name, error, __LINE__, __FILE__); 1533 XS_SETERR(ccb, CAM_REQ_CMP_ERR); 1534 xpt_done(ccb); 1535 } 1536 break; 1537 1538 #ifdef ISP_TARGET_MODE 1539 case XPT_EN_LUN: /* Enable LUN as a target */ 1540 isp_en_lun(isp, ccb); 1541 xpt_done(ccb); 1542 break; 1543 1544 case XPT_NOTIFY_ACK: /* recycle notify ack */ 1545 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ 1546 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 1547 { 1548 tstate_t *tptr = get_lun_statep(isp, ccb->ccb_h.target_lun); 1549 if (tptr == NULL) { 1550 ccb->ccb_h.status = CAM_LUN_INVALID; 1551 xpt_done(ccb); 1552 break; 1553 } 1554 ccb->ccb_h.sim_priv.entries[0].field = 0; 1555 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 1556 ISP_LOCK(isp); 1557 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 1558 #if 0 1559 (void) isp_target_putback_atio(isp, ccb); 1560 #endif 1561 SLIST_INSERT_HEAD(&tptr->atios, 1562 &ccb->ccb_h, sim_links.sle); 1563 } else { 1564 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, 1565 sim_links.sle); 1566 } 1567 ISP_UNLOCK(isp); 1568 rls_lun_statep(isp, tptr); 1569 ccb->ccb_h.status = CAM_REQ_INPROG; 1570 break; 1571 } 1572 case XPT_CONT_TARGET_IO: 1573 { 1574 ISP_LOCK(isp); 1575 ccb->ccb_h.status = isp_target_start_ctio(isp, ccb); 1576 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 1577 if (isp->isp_osinfo.simqfrozen == 0) { 1578 xpt_freeze_simq(sim, 1); 1579 xpt_print_path(ccb->ccb_h.path); 1580 printf("XPT_CONT_TARGET_IO freeze simq\n"); 1581 } 1582 isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE; 1583 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1584 xpt_done(ccb); 1585 } else { 1586 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1587 } 1588 ISP_UNLOCK(isp); 1589 break; 1590 } 1591 #endif 1592 case XPT_RESET_DEV: /* BDR the specified SCSI device */ 1593 1594 bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); 1595 tgt = ccb->ccb_h.target_id; 1596 tgt |= (bus << 16); 1597 1598 ISP_LOCK(isp); 1599 error = isp_control(isp, ISPCTL_RESET_DEV, &tgt); 1600 ISP_UNLOCK(isp); 1601 if (error) { 1602 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1603 } else { 1604 ccb->ccb_h.status = CAM_REQ_CMP; 1605 } 1606 xpt_done(ccb); 1607 break; 1608 case XPT_ABORT: /* Abort the specified CCB */ 1609 { 1610 union ccb *accb = ccb->cab.abort_ccb; 1611 switch (accb->ccb_h.func_code) { 1612 #ifdef ISP_TARGET_MODE 1613 case XPT_ACCEPT_TARGET_IO: 1614 case XPT_IMMED_NOTIFY: 1615 ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb); 1616 break; 1617 case XPT_CONT_TARGET_IO: 1618 PRINTF("%s: cannot abort CTIOs yet\n", isp->isp_name); 1619 ccb->ccb_h.status = CAM_UA_ABORT; 1620 break; 1621 #endif 1622 case XPT_SCSI_IO: 1623 ISP_LOCK(isp); 1624 error = isp_control(isp, ISPCTL_ABORT_CMD, ccb); 1625 ISP_UNLOCK(isp); 1626 if (error) { 1627 ccb->ccb_h.status = CAM_UA_ABORT; 1628 } else { 1629 ccb->ccb_h.status = CAM_REQ_CMP; 1630 } 1631 break; 1632 default: 1633 ccb->ccb_h.status = CAM_REQ_INVALID; 1634 break; 1635 } 1636 xpt_done(ccb); 1637 break; 1638 } 1639 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 1640 1641 cts = &ccb->cts; 1642 tgt = cts->ccb_h.target_id; 1643 ISP_LOCK(isp); 1644 if (IS_SCSI(isp)) { 1645 sdparam *sdp = isp->isp_param; 1646 u_int16_t *dptr; 1647 1648 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 1649 1650 sdp += bus; 1651 #if 0 1652 if (cts->flags & CCB_TRANS_CURRENT_SETTINGS) 1653 dptr = &sdp->isp_devparam[tgt].cur_dflags; 1654 else 1655 dptr = &sdp->isp_devparam[tgt].dev_flags; 1656 #else 1657 /* 1658 * We always update (internally) from dev_flags 1659 * so any request to change settings just gets 1660 * vectored to that location. 1661 */ 1662 dptr = &sdp->isp_devparam[tgt].dev_flags; 1663 #endif 1664 1665 /* 1666 * Note that these operations affect the 1667 * the goal flags (dev_flags)- not 1668 * the current state flags. Then we mark 1669 * things so that the next operation to 1670 * this HBA will cause the update to occur. 1671 */ 1672 if (cts->valid & CCB_TRANS_DISC_VALID) { 1673 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) { 1674 *dptr |= DPARM_DISC; 1675 } else { 1676 *dptr &= ~DPARM_DISC; 1677 } 1678 } 1679 if (cts->valid & CCB_TRANS_TQ_VALID) { 1680 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) { 1681 *dptr |= DPARM_TQING; 1682 } else { 1683 *dptr &= ~DPARM_TQING; 1684 } 1685 } 1686 if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) { 1687 switch (cts->bus_width) { 1688 case MSG_EXT_WDTR_BUS_16_BIT: 1689 *dptr |= DPARM_WIDE; 1690 break; 1691 default: 1692 *dptr &= ~DPARM_WIDE; 1693 } 1694 } 1695 /* 1696 * Any SYNC RATE of nonzero and SYNC_OFFSET 1697 * of nonzero will cause us to go to the 1698 * selected (from NVRAM) maximum value for 1699 * this device. At a later point, we'll 1700 * allow finer control. 1701 */ 1702 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && 1703 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) && 1704 (cts->sync_offset > 0)) { 1705 *dptr |= DPARM_SYNC; 1706 } else { 1707 *dptr &= ~DPARM_SYNC; 1708 } 1709 *dptr |= DPARM_SAFE_DFLT; 1710 if (bootverbose || isp->isp_dblev >= 3) 1711 printf("%s: %d.%d set %s period 0x%x offset " 1712 "0x%x flags 0x%x\n", isp->isp_name, bus, 1713 tgt, 1714 (cts->flags & CCB_TRANS_CURRENT_SETTINGS)? 1715 "current" : "user", 1716 sdp->isp_devparam[tgt].sync_period, 1717 sdp->isp_devparam[tgt].sync_offset, 1718 sdp->isp_devparam[tgt].dev_flags); 1719 sdp->isp_devparam[tgt].dev_update = 1; 1720 isp->isp_update |= (1 << bus); 1721 } 1722 ISP_UNLOCK(isp); 1723 ccb->ccb_h.status = CAM_REQ_CMP; 1724 xpt_done(ccb); 1725 break; 1726 1727 case XPT_GET_TRAN_SETTINGS: 1728 1729 cts = &ccb->cts; 1730 tgt = cts->ccb_h.target_id; 1731 if (IS_FC(isp)) { 1732 /* 1733 * a lot of normal SCSI things don't make sense. 1734 */ 1735 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 1736 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 1737 /* 1738 * How do you measure the width of a high 1739 * speed serial bus? Well, in bytes. 1740 * 1741 * Offset and period make no sense, though, so we set 1742 * (above) a 'base' transfer speed to be gigabit. 1743 */ 1744 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 1745 } else { 1746 sdparam *sdp = isp->isp_param; 1747 u_int16_t dval, pval, oval; 1748 int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 1749 1750 sdp += bus; 1751 if (cts->flags & CCB_TRANS_CURRENT_SETTINGS) { 1752 ISP_LOCK(isp); 1753 sdp->isp_devparam[tgt].dev_refresh = 1; 1754 isp->isp_update |= (1 << bus); 1755 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, 1756 NULL); 1757 ISP_UNLOCK(isp); 1758 dval = sdp->isp_devparam[tgt].cur_dflags; 1759 oval = sdp->isp_devparam[tgt].cur_offset; 1760 pval = sdp->isp_devparam[tgt].cur_period; 1761 } else { 1762 dval = sdp->isp_devparam[tgt].dev_flags; 1763 oval = sdp->isp_devparam[tgt].sync_offset; 1764 pval = sdp->isp_devparam[tgt].sync_period; 1765 } 1766 1767 ISP_LOCK(isp); 1768 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 1769 1770 if (dval & DPARM_DISC) { 1771 cts->flags |= CCB_TRANS_DISC_ENB; 1772 } 1773 if (dval & DPARM_TQING) { 1774 cts->flags |= CCB_TRANS_TAG_ENB; 1775 } 1776 if (dval & DPARM_WIDE) { 1777 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 1778 } else { 1779 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 1780 } 1781 cts->valid = CCB_TRANS_BUS_WIDTH_VALID | 1782 CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 1783 1784 if ((dval & DPARM_SYNC) && oval != 0) { 1785 cts->sync_period = pval; 1786 cts->sync_offset = oval; 1787 cts->valid |= 1788 CCB_TRANS_SYNC_RATE_VALID | 1789 CCB_TRANS_SYNC_OFFSET_VALID; 1790 } 1791 ISP_UNLOCK(isp); 1792 if (bootverbose || isp->isp_dblev >= 3) 1793 printf("%s: %d.%d get %s period 0x%x offset " 1794 "0x%x flags 0x%x\n", isp->isp_name, bus, 1795 tgt, 1796 (cts->flags & CCB_TRANS_CURRENT_SETTINGS)? 1797 "current" : "user", pval, oval, dval); 1798 } 1799 ccb->ccb_h.status = CAM_REQ_CMP; 1800 xpt_done(ccb); 1801 break; 1802 1803 case XPT_CALC_GEOMETRY: 1804 { 1805 struct ccb_calc_geometry *ccg; 1806 u_int32_t secs_per_cylinder; 1807 u_int32_t size_mb; 1808 1809 ccg = &ccb->ccg; 1810 if (ccg->block_size == 0) { 1811 printf("%s: %d.%d XPT_CALC_GEOMETRY block size 0?\n", 1812 isp->isp_name, ccg->ccb_h.target_id, 1813 ccg->ccb_h.target_lun); 1814 ccb->ccb_h.status = CAM_REQ_INVALID; 1815 xpt_done(ccb); 1816 break; 1817 } 1818 size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size); 1819 if (size_mb > 1024) { 1820 ccg->heads = 255; 1821 ccg->secs_per_track = 63; 1822 } else { 1823 ccg->heads = 64; 1824 ccg->secs_per_track = 32; 1825 } 1826 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 1827 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 1828 ccb->ccb_h.status = CAM_REQ_CMP; 1829 xpt_done(ccb); 1830 break; 1831 } 1832 case XPT_RESET_BUS: /* Reset the specified bus */ 1833 bus = cam_sim_bus(sim); 1834 ISP_LOCK(isp); 1835 error = isp_control(isp, ISPCTL_RESET_BUS, &bus); 1836 ISP_UNLOCK(isp); 1837 if (error) 1838 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1839 else { 1840 if (cam_sim_bus(sim) && isp->isp_path2 != NULL) 1841 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 1842 else if (isp->isp_path != NULL) 1843 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 1844 ccb->ccb_h.status = CAM_REQ_CMP; 1845 } 1846 xpt_done(ccb); 1847 break; 1848 1849 case XPT_TERM_IO: /* Terminate the I/O process */ 1850 ccb->ccb_h.status = CAM_REQ_INVALID; 1851 xpt_done(ccb); 1852 break; 1853 1854 case XPT_PATH_INQ: /* Path routing inquiry */ 1855 { 1856 struct ccb_pathinq *cpi = &ccb->cpi; 1857 1858 cpi->version_num = 1; 1859 #ifdef ISP_TARGET_MODE 1860 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 1861 #else 1862 cpi->target_sprt = 0; 1863 #endif 1864 cpi->hba_eng_cnt = 0; 1865 cpi->max_target = ISP_MAX_TARGETS(isp) - 1; 1866 cpi->max_lun = ISP_MAX_LUNS(isp) - 1; 1867 cpi->bus_id = cam_sim_bus(sim); 1868 if (IS_FC(isp)) { 1869 cpi->hba_misc = PIM_NOBUSRESET; 1870 /* 1871 * Because our loop ID can shift from time to time, 1872 * make our initiator ID out of range of our bus. 1873 */ 1874 cpi->initiator_id = cpi->max_target + 1; 1875 1876 /* 1877 * Set base transfer capabilities for Fibre Channel. 1878 * Technically not correct because we don't know 1879 * what media we're running on top of- but we'll 1880 * look good if we always say 100MB/s. 1881 */ 1882 cpi->base_transfer_speed = 100000; 1883 cpi->hba_inquiry = PI_TAG_ABLE; 1884 } else { 1885 sdparam *sdp = isp->isp_param; 1886 sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path)); 1887 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 1888 cpi->hba_misc = 0; 1889 cpi->initiator_id = sdp->isp_initiator_id; 1890 cpi->base_transfer_speed = 3300; 1891 } 1892 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 1893 strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN); 1894 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 1895 cpi->unit_number = cam_sim_unit(sim); 1896 cpi->ccb_h.status = CAM_REQ_CMP; 1897 xpt_done(ccb); 1898 break; 1899 } 1900 default: 1901 ccb->ccb_h.status = CAM_REQ_INVALID; 1902 xpt_done(ccb); 1903 break; 1904 } 1905 } 1906 1907 #define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB) 1908 void 1909 isp_done(struct ccb_scsiio *sccb) 1910 { 1911 struct ispsoftc *isp = XS_ISP(sccb); 1912 1913 if (XS_NOERR(sccb)) 1914 XS_SETERR(sccb, CAM_REQ_CMP); 1915 1916 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && 1917 (sccb->scsi_status != SCSI_STATUS_OK)) { 1918 sccb->ccb_h.status &= ~CAM_STATUS_MASK; 1919 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && 1920 (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) { 1921 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; 1922 } else { 1923 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 1924 } 1925 } 1926 1927 sccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1928 if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1929 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 1930 sccb->ccb_h.status |= CAM_DEV_QFRZN; 1931 xpt_freeze_devq(sccb->ccb_h.path, 1); 1932 if (sccb->scsi_status != SCSI_STATUS_OK) 1933 IDPRINTF(3, ("%s: fdevq %d.%d %x %x\n", 1934 isp->isp_name, sccb->ccb_h.target_id, 1935 sccb->ccb_h.target_lun, sccb->ccb_h.status, 1936 sccb->scsi_status)); 1937 } 1938 } 1939 1940 /* 1941 * If we were frozen waiting resources, clear that we were frozen 1942 * waiting for resources. If we are no longer frozen, and the devq 1943 * isn't frozen, mark the completing CCB to have the XPT layer 1944 * release the simq. 1945 */ 1946 if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) { 1947 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE; 1948 if (isp->isp_osinfo.simqfrozen == 0) { 1949 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 1950 IDPRINTF(3, ("%s: isp_done -> relsimq\n", 1951 isp->isp_name)); 1952 sccb->ccb_h.status |= CAM_RELEASE_SIMQ; 1953 } else { 1954 IDPRINTF(3, ("%s: isp_done -> devq frozen\n", 1955 isp->isp_name)); 1956 } 1957 } else { 1958 IDPRINTF(3, ("%s: isp_done -> simqfrozen = %x\n", 1959 isp->isp_name, isp->isp_osinfo.simqfrozen)); 1960 } 1961 } 1962 if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) && 1963 (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1964 xpt_print_path(sccb->ccb_h.path); 1965 printf("cam completion status 0x%x\n", sccb->ccb_h.status); 1966 } 1967 1968 XS_CMD_S_DONE(sccb); 1969 if (XS_CMD_WDOG_P(sccb) == 0) { 1970 untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch); 1971 if (XS_CMD_GRACE_P(sccb)) { 1972 IDPRINTF(2, ("%s: finished command on borrowed time\n", 1973 isp->isp_name)); 1974 } 1975 XS_CMD_S_CLEAR(sccb); 1976 xpt_done((union ccb *) sccb); 1977 } 1978 } 1979 1980 int 1981 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg) 1982 { 1983 int bus, rv = 0; 1984 switch (cmd) { 1985 case ISPASYNC_NEW_TGT_PARAMS: 1986 { 1987 int flags, tgt; 1988 sdparam *sdp = isp->isp_param; 1989 struct ccb_trans_settings neg; 1990 struct cam_path *tmppath; 1991 1992 tgt = *((int *)arg); 1993 bus = (tgt >> 16) & 0xffff; 1994 tgt &= 0xffff; 1995 sdp += bus; 1996 if (xpt_create_path(&tmppath, NULL, 1997 cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim), 1998 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1999 xpt_print_path(isp->isp_path); 2000 printf("isp_async cannot make temp path for " 2001 "target %d bus %d\n", tgt, bus); 2002 rv = -1; 2003 break; 2004 } 2005 flags = sdp->isp_devparam[tgt].cur_dflags; 2006 neg.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2007 if (flags & DPARM_DISC) { 2008 neg.flags |= CCB_TRANS_DISC_ENB; 2009 } 2010 if (flags & DPARM_TQING) { 2011 neg.flags |= CCB_TRANS_TAG_ENB; 2012 } 2013 neg.valid |= CCB_TRANS_BUS_WIDTH_VALID; 2014 neg.bus_width = (flags & DPARM_WIDE)? 2015 MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT; 2016 neg.sync_period = sdp->isp_devparam[tgt].cur_period; 2017 neg.sync_offset = sdp->isp_devparam[tgt].cur_offset; 2018 if (flags & DPARM_SYNC) { 2019 neg.valid |= 2020 CCB_TRANS_SYNC_RATE_VALID | 2021 CCB_TRANS_SYNC_OFFSET_VALID; 2022 } 2023 IDPRINTF(3, ("%s: NEW_TGT_PARAMS bus %d tgt %d period " 2024 "0x%x offset 0x%x flags 0x%x\n", isp->isp_name, 2025 bus, tgt, neg.sync_period, neg.sync_offset, flags)); 2026 xpt_setup_ccb(&neg.ccb_h, tmppath, 1); 2027 xpt_async(AC_TRANSFER_NEG, tmppath, &neg); 2028 xpt_free_path(tmppath); 2029 break; 2030 } 2031 case ISPASYNC_BUS_RESET: 2032 bus = *((int *)arg); 2033 printf("%s: SCSI bus reset on bus %d detected\n", 2034 isp->isp_name, bus); 2035 if (bus > 0 && isp->isp_path2) { 2036 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 2037 } else if (isp->isp_path) { 2038 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 2039 } 2040 break; 2041 case ISPASYNC_LOOP_DOWN: 2042 if (isp->isp_path) { 2043 if (isp->isp_osinfo.simqfrozen == 0) { 2044 IDPRINTF(3, ("%s: loop down freeze simq\n", 2045 isp->isp_name)); 2046 xpt_freeze_simq(isp->isp_sim, 1); 2047 } 2048 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 2049 } 2050 printf("%s: Loop DOWN\n", isp->isp_name); 2051 break; 2052 case ISPASYNC_LOOP_UP: 2053 if (isp->isp_path) { 2054 int wasfrozen = 2055 isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN; 2056 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN; 2057 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { 2058 xpt_release_simq(isp->isp_sim, 1); 2059 IDPRINTF(3, ("%s: loop up release simq\n", 2060 isp->isp_name)); 2061 } 2062 } 2063 printf("%s: Loop UP\n", isp->isp_name); 2064 break; 2065 case ISPASYNC_PDB_CHANGED: 2066 { 2067 const char *fmt = "%s: Target %d (Loop 0x%x) Port ID 0x%x " 2068 "role %s %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x\n"; 2069 const static char *roles[4] = { 2070 "(none)", "Target", "Initiator", "Target/Initiator" 2071 }; 2072 char *ptr; 2073 fcparam *fcp = isp->isp_param; 2074 int tgt = *((int *) arg); 2075 struct lportdb *lp = &fcp->portdb[tgt]; 2076 2077 if (lp->valid) { 2078 ptr = "arrived"; 2079 } else { 2080 ptr = "disappeared"; 2081 } 2082 printf(fmt, isp->isp_name, tgt, lp->loopid, lp->portid, 2083 roles[lp->roles & 0x3], ptr, 2084 (u_int32_t) (lp->port_wwn >> 32), 2085 (u_int32_t) (lp->port_wwn & 0xffffffffLL), 2086 (u_int32_t) (lp->node_wwn >> 32), 2087 (u_int32_t) (lp->node_wwn & 0xffffffffLL)); 2088 break; 2089 } 2090 case ISPASYNC_CHANGE_NOTIFY: 2091 printf("%s: Name Server Database Changed\n", isp->isp_name); 2092 break; 2093 #ifdef ISP2100_FABRIC 2094 case ISPASYNC_FABRIC_DEV: 2095 { 2096 int target; 2097 struct lportdb *lp; 2098 char *pt; 2099 sns_ganrsp_t *resp = (sns_ganrsp_t *) arg; 2100 u_int32_t portid; 2101 u_int64_t wwpn, wwnn; 2102 fcparam *fcp = isp->isp_param; 2103 2104 rv = -1; 2105 2106 portid = 2107 (((u_int32_t) resp->snscb_port_id[0]) << 16) | 2108 (((u_int32_t) resp->snscb_port_id[1]) << 8) | 2109 (((u_int32_t) resp->snscb_port_id[2])); 2110 2111 wwpn = 2112 (((u_int64_t)resp->snscb_portname[0]) << 56) | 2113 (((u_int64_t)resp->snscb_portname[1]) << 48) | 2114 (((u_int64_t)resp->snscb_portname[2]) << 40) | 2115 (((u_int64_t)resp->snscb_portname[3]) << 32) | 2116 (((u_int64_t)resp->snscb_portname[4]) << 24) | 2117 (((u_int64_t)resp->snscb_portname[5]) << 16) | 2118 (((u_int64_t)resp->snscb_portname[6]) << 8) | 2119 (((u_int64_t)resp->snscb_portname[7])); 2120 2121 wwnn = 2122 (((u_int64_t)resp->snscb_nodename[0]) << 56) | 2123 (((u_int64_t)resp->snscb_nodename[1]) << 48) | 2124 (((u_int64_t)resp->snscb_nodename[2]) << 40) | 2125 (((u_int64_t)resp->snscb_nodename[3]) << 32) | 2126 (((u_int64_t)resp->snscb_nodename[4]) << 24) | 2127 (((u_int64_t)resp->snscb_nodename[5]) << 16) | 2128 (((u_int64_t)resp->snscb_nodename[6]) << 8) | 2129 (((u_int64_t)resp->snscb_nodename[7])); 2130 if (portid == 0 || wwpn == 0) { 2131 rv = 0; 2132 break; 2133 } 2134 2135 switch (resp->snscb_port_type) { 2136 case 1: 2137 pt = " N_Port"; 2138 break; 2139 case 2: 2140 pt = " NL_Port"; 2141 break; 2142 case 3: 2143 pt = "F/NL_Port"; 2144 break; 2145 case 0x7f: 2146 pt = " Nx_Port"; 2147 break; 2148 case 0x81: 2149 pt = " F_port"; 2150 break; 2151 case 0x82: 2152 pt = " FL_Port"; 2153 break; 2154 case 0x84: 2155 pt = " E_port"; 2156 break; 2157 default: 2158 pt = "?"; 2159 break; 2160 } 2161 CFGPRINTF("%s: %s @ 0x%x, Node 0x%08x%08x Port %08x%08x\n", 2162 isp->isp_name, pt, portid, 2163 ((u_int32_t) (wwnn >> 32)), ((u_int32_t) wwnn), 2164 ((u_int32_t) (wwpn >> 32)), ((u_int32_t) wwpn)); 2165 #if 0 2166 if ((resp->snscb_fc4_types[1] & 0x1) == 0) { 2167 rv = 0; 2168 printf("Types 0..3: 0x%x 0x%x 0x%x 0x%x\n", 2169 resp->snscb_fc4_types[0], resp->snscb_fc4_types[1], 2170 resp->snscb_fc4_types[3], resp->snscb_fc4_types[3]); 2171 break; 2172 } 2173 #endif 2174 for (target = FC_SNS_ID+1; target < MAX_FC_TARG; target++) { 2175 lp = &fcp->portdb[target]; 2176 if (lp->port_wwn == wwpn && lp->node_wwn == wwnn) 2177 break; 2178 } 2179 if (target < MAX_FC_TARG) { 2180 rv = 0; 2181 break; 2182 } 2183 for (target = FC_SNS_ID+1; target < MAX_FC_TARG; target++) { 2184 lp = &fcp->portdb[target]; 2185 if (lp->port_wwn == 0) 2186 break; 2187 } 2188 if (target == MAX_FC_TARG) { 2189 printf("%s: no more space for fabric devices\n", 2190 isp->isp_name); 2191 break; 2192 } 2193 lp->node_wwn = wwnn; 2194 lp->port_wwn = wwpn; 2195 lp->portid = portid; 2196 rv = 0; 2197 break; 2198 } 2199 #endif 2200 #ifdef ISP_TARGET_MODE 2201 case ISPASYNC_TARGET_MESSAGE: 2202 { 2203 tmd_msg_t *mp = arg; 2204 ITDEBUG(2, ("%s: bus %d iid %d tgt %d lun %d ttype %x tval %x" 2205 " msg[0]=0x%x\n", isp->isp_name, mp->nt_bus, 2206 (int) mp->nt_iid, (int) mp->nt_tgt, (int) mp->nt_lun, 2207 mp->nt_tagtype, mp->nt_tagval, mp->nt_msg[0])); 2208 break; 2209 } 2210 case ISPASYNC_TARGET_EVENT: 2211 { 2212 tmd_event_t *ep = arg; 2213 ITDEBUG(2, ("%s: bus %d event code 0x%x\n", isp->isp_name, 2214 ep->ev_bus, ep->ev_event)); 2215 break; 2216 } 2217 case ISPASYNC_TARGET_ACTION: 2218 switch (((isphdr_t *)arg)->rqs_entry_type) { 2219 default: 2220 printf("%s: event 0x%x for unhandled target action\n", 2221 isp->isp_name, ((isphdr_t *)arg)->rqs_entry_type); 2222 break; 2223 case RQSTYPE_ATIO: 2224 rv = isp_handle_platform_atio(isp, (at_entry_t *) arg); 2225 break; 2226 case RQSTYPE_ATIO2: 2227 rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg); 2228 break; 2229 case RQSTYPE_CTIO2: 2230 case RQSTYPE_CTIO: 2231 rv = isp_handle_platform_ctio(isp, arg); 2232 break; 2233 case RQSTYPE_ENABLE_LUN: 2234 case RQSTYPE_MODIFY_LUN: 2235 isp_cv_signal_rqe(isp, ((lun_entry_t *)arg)->le_status); 2236 break; 2237 } 2238 break; 2239 #endif 2240 default: 2241 PRINTF("%s: unknown isp_async event %d\n", isp->isp_name, cmd); 2242 rv = -1; 2243 break; 2244 } 2245 return (rv); 2246 } 2247 2248 2249 /* 2250 * Locks are held before coming here. 2251 */ 2252 void 2253 isp_uninit(struct ispsoftc *isp) 2254 { 2255 ISP_WRITE(isp, HCCR, HCCR_CMD_RESET); 2256 DISABLE_INTS(isp); 2257 } 2258