1 /* $FreeBSD$ */ 2 /* 3 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters. 4 * 5 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 #include <dev/isp/isp_freebsd.h> 29 #include <sys/unistd.h> 30 #include <sys/kthread.h> 31 #include <machine/stdarg.h> /* for use by isp_prt below */ 32 #include <sys/conf.h> 33 #include <sys/ioccom.h> 34 #include <dev/isp/isp_ioctl.h> 35 36 37 static d_ioctl_t ispioctl; 38 static void isp_intr_enable(void *); 39 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *); 40 static void isp_poll(struct cam_sim *); 41 #if 0 42 static void isp_relsim(void *); 43 #endif 44 static timeout_t isp_watchdog; 45 static void isp_kthread(void *); 46 static void isp_action(struct cam_sim *, union ccb *); 47 48 49 #define ISP_CDEV_MAJOR 248 50 static struct cdevsw isp_cdevsw = { 51 /* open */ nullopen, 52 /* close */ nullclose, 53 /* read */ noread, 54 /* write */ nowrite, 55 /* ioctl */ ispioctl, 56 /* poll */ nopoll, 57 /* mmap */ nommap, 58 /* strategy */ nostrategy, 59 /* name */ "isp", 60 /* maj */ ISP_CDEV_MAJOR, 61 /* dump */ nodump, 62 /* psize */ nopsize, 63 /* flags */ D_TAPE, 64 }; 65 66 static struct ispsoftc *isplist = NULL; 67 68 void 69 isp_attach(struct ispsoftc *isp) 70 { 71 int primary, secondary; 72 struct ccb_setasync csa; 73 struct cam_devq *devq; 74 struct cam_sim *sim; 75 struct cam_path *path; 76 77 /* 78 * Establish (in case of 12X0) which bus is the primary. 79 */ 80 81 primary = 0; 82 secondary = 1; 83 84 /* 85 * Create the device queue for our SIM(s). 86 */ 87 devq = cam_simq_alloc(isp->isp_maxcmds); 88 if (devq == NULL) { 89 return; 90 } 91 92 /* 93 * Construct our SIM entry. 94 */ 95 ISPLOCK_2_CAMLOCK(isp); 96 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 97 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); 98 if (sim == NULL) { 99 cam_simq_free(devq); 100 CAMLOCK_2_ISPLOCK(isp); 101 return; 102 } 103 CAMLOCK_2_ISPLOCK(isp); 104 105 isp->isp_osinfo.ehook.ich_func = isp_intr_enable; 106 isp->isp_osinfo.ehook.ich_arg = isp; 107 ISPLOCK_2_CAMLOCK(isp); 108 if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) { 109 cam_sim_free(sim, TRUE); 110 CAMLOCK_2_ISPLOCK(isp); 111 isp_prt(isp, ISP_LOGERR, 112 "could not establish interrupt enable hook"); 113 return; 114 } 115 116 if (xpt_bus_register(sim, primary) != CAM_SUCCESS) { 117 cam_sim_free(sim, TRUE); 118 CAMLOCK_2_ISPLOCK(isp); 119 return; 120 } 121 122 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 123 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 124 xpt_bus_deregister(cam_sim_path(sim)); 125 cam_sim_free(sim, TRUE); 126 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 127 CAMLOCK_2_ISPLOCK(isp); 128 return; 129 } 130 131 xpt_setup_ccb(&csa.ccb_h, path, 5); 132 csa.ccb_h.func_code = XPT_SASYNC_CB; 133 csa.event_enable = AC_LOST_DEVICE; 134 csa.callback = isp_cam_async; 135 csa.callback_arg = sim; 136 xpt_action((union ccb *)&csa); 137 CAMLOCK_2_ISPLOCK(isp); 138 isp->isp_sim = sim; 139 isp->isp_path = path; 140 /* 141 * Create a kernel thread for fibre channel instances. We 142 * don't have dual channel FC cards. 143 */ 144 if (IS_FC(isp)) { 145 ISPLOCK_2_CAMLOCK(isp); 146 /* XXX: LOCK VIOLATION */ 147 cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv"); 148 if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc, 149 RFHIGHPID, "%s: fc_thrd", 150 device_get_nameunit(isp->isp_dev))) { 151 xpt_bus_deregister(cam_sim_path(sim)); 152 cam_sim_free(sim, TRUE); 153 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 154 CAMLOCK_2_ISPLOCK(isp); 155 isp_prt(isp, ISP_LOGERR, "could not create kthread"); 156 return; 157 } 158 CAMLOCK_2_ISPLOCK(isp); 159 } 160 161 162 /* 163 * If we have a second channel, construct SIM entry for that. 164 */ 165 if (IS_DUALBUS(isp)) { 166 ISPLOCK_2_CAMLOCK(isp); 167 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 168 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); 169 if (sim == NULL) { 170 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 171 xpt_free_path(isp->isp_path); 172 cam_simq_free(devq); 173 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 174 return; 175 } 176 if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) { 177 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 178 xpt_free_path(isp->isp_path); 179 cam_sim_free(sim, TRUE); 180 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 181 CAMLOCK_2_ISPLOCK(isp); 182 return; 183 } 184 185 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 186 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 187 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 188 xpt_free_path(isp->isp_path); 189 xpt_bus_deregister(cam_sim_path(sim)); 190 cam_sim_free(sim, TRUE); 191 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 192 CAMLOCK_2_ISPLOCK(isp); 193 return; 194 } 195 196 xpt_setup_ccb(&csa.ccb_h, path, 5); 197 csa.ccb_h.func_code = XPT_SASYNC_CB; 198 csa.event_enable = AC_LOST_DEVICE; 199 csa.callback = isp_cam_async; 200 csa.callback_arg = sim; 201 xpt_action((union ccb *)&csa); 202 CAMLOCK_2_ISPLOCK(isp); 203 isp->isp_sim2 = sim; 204 isp->isp_path2 = path; 205 } 206 207 #ifdef ISP_TARGET_MODE 208 cv_init(&isp->isp_osinfo.tgtcv0[0], "isp_tgcv0a"); 209 cv_init(&isp->isp_osinfo.tgtcv0[1], "isp_tgcv0b"); 210 cv_init(&isp->isp_osinfo.tgtcv1[0], "isp_tgcv1a"); 211 cv_init(&isp->isp_osinfo.tgtcv1[1], "isp_tgcv1b"); 212 #endif 213 /* 214 * Create device nodes 215 */ 216 (void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT, 217 GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev)); 218 219 if (isp->isp_role != ISP_ROLE_NONE) { 220 isp->isp_state = ISP_RUNSTATE; 221 ENABLE_INTS(isp); 222 } 223 if (isplist == NULL) { 224 isplist = isp; 225 } else { 226 struct ispsoftc *tmp = isplist; 227 while (tmp->isp_osinfo.next) { 228 tmp = tmp->isp_osinfo.next; 229 } 230 tmp->isp_osinfo.next = isp; 231 } 232 233 } 234 235 static int 236 ispioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 237 { 238 struct ispsoftc *isp; 239 int retval = ENOTTY; 240 241 isp = isplist; 242 while (isp) { 243 if (minor(dev) == device_get_unit(isp->isp_dev)) { 244 break; 245 } 246 isp = isp->isp_osinfo.next; 247 } 248 if (isp == NULL) 249 return (ENXIO); 250 251 switch (cmd) { 252 case ISP_SDBLEV: 253 { 254 int olddblev = isp->isp_dblev; 255 isp->isp_dblev = *(int *)addr; 256 *(int *)addr = olddblev; 257 retval = 0; 258 break; 259 } 260 case ISP_RESETHBA: 261 ISP_LOCK(isp); 262 isp_reinit(isp); 263 ISP_UNLOCK(isp); 264 retval = 0; 265 break; 266 case ISP_FC_RESCAN: 267 if (IS_FC(isp)) { 268 ISP_LOCK(isp); 269 if (isp_fc_runstate(isp, 5 * 1000000)) { 270 retval = EIO; 271 } else { 272 retval = 0; 273 } 274 ISP_UNLOCK(isp); 275 } 276 break; 277 case ISP_FC_LIP: 278 if (IS_FC(isp)) { 279 ISP_LOCK(isp); 280 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) { 281 retval = EIO; 282 } else { 283 retval = 0; 284 } 285 ISP_UNLOCK(isp); 286 } 287 break; 288 case ISP_FC_GETDINFO: 289 { 290 struct isp_fc_device *ifc = (struct isp_fc_device *) addr; 291 struct lportdb *lp; 292 293 if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) { 294 retval = EINVAL; 295 break; 296 } 297 ISP_LOCK(isp); 298 lp = &FCPARAM(isp)->portdb[ifc->loopid]; 299 if (lp->valid) { 300 ifc->loopid = lp->loopid; 301 ifc->portid = lp->portid; 302 ifc->node_wwn = lp->node_wwn; 303 ifc->port_wwn = lp->port_wwn; 304 retval = 0; 305 } else { 306 retval = ENODEV; 307 } 308 ISP_UNLOCK(isp); 309 break; 310 } 311 default: 312 break; 313 } 314 return (retval); 315 } 316 317 static void 318 isp_intr_enable(void *arg) 319 { 320 struct ispsoftc *isp = arg; 321 if (isp->isp_role != ISP_ROLE_NONE) { 322 ENABLE_INTS(isp); 323 isp->isp_osinfo.intsok = 1; 324 } 325 /* Release our hook so that the boot can continue. */ 326 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 327 } 328 329 /* 330 * Put the target mode functions here, because some are inlines 331 */ 332 333 #ifdef ISP_TARGET_MODE 334 335 static __inline int is_lun_enabled(struct ispsoftc *, int, lun_id_t); 336 static __inline int are_any_luns_enabled(struct ispsoftc *, int); 337 static __inline tstate_t *get_lun_statep(struct ispsoftc *, int, lun_id_t); 338 static __inline void rls_lun_statep(struct ispsoftc *, tstate_t *); 339 static __inline int isp_psema_sig_rqe(struct ispsoftc *, int); 340 static __inline int isp_cv_wait_timed_rqe(struct ispsoftc *, int, int); 341 static __inline void isp_cv_signal_rqe(struct ispsoftc *, int, int); 342 static __inline void isp_vsema_rqe(struct ispsoftc *, int); 343 static __inline atio_private_data_t *isp_get_atpd(struct ispsoftc *, int); 344 static cam_status 345 create_lun_state(struct ispsoftc *, int, struct cam_path *, tstate_t **); 346 static void destroy_lun_state(struct ispsoftc *, tstate_t *); 347 static void isp_en_lun(struct ispsoftc *, union ccb *); 348 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *); 349 static timeout_t isp_refire_putback_atio; 350 static void isp_complete_ctio(union ccb *); 351 static void isp_target_putback_atio(union ccb *); 352 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *); 353 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *); 354 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *); 355 static int isp_handle_platform_ctio(struct ispsoftc *, void *); 356 357 static __inline int 358 is_lun_enabled(struct ispsoftc *isp, int bus, lun_id_t lun) 359 { 360 tstate_t *tptr; 361 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 362 if (tptr == NULL) { 363 return (0); 364 } 365 do { 366 if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) { 367 return (1); 368 } 369 } while ((tptr = tptr->next) != NULL); 370 return (0); 371 } 372 373 static __inline int 374 are_any_luns_enabled(struct ispsoftc *isp, int port) 375 { 376 int lo, hi; 377 if (IS_DUALBUS(isp)) { 378 lo = (port * (LUN_HASH_SIZE >> 1)); 379 hi = lo + (LUN_HASH_SIZE >> 1); 380 } else { 381 lo = 0; 382 hi = LUN_HASH_SIZE; 383 } 384 for (lo = 0; lo < hi; lo++) { 385 if (isp->isp_osinfo.lun_hash[lo]) { 386 return (1); 387 } 388 } 389 return (0); 390 } 391 392 static __inline tstate_t * 393 get_lun_statep(struct ispsoftc *isp, int bus, lun_id_t lun) 394 { 395 tstate_t *tptr = NULL; 396 397 if (lun == CAM_LUN_WILDCARD) { 398 if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) { 399 tptr = &isp->isp_osinfo.tsdflt[bus]; 400 tptr->hold++; 401 return (tptr); 402 } 403 } else { 404 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 405 if (tptr == NULL) { 406 return (NULL); 407 } 408 } 409 410 do { 411 if (tptr->lun == lun && tptr->bus == bus) { 412 tptr->hold++; 413 return (tptr); 414 } 415 } while ((tptr = tptr->next) != NULL); 416 return (tptr); 417 } 418 419 static __inline void 420 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr) 421 { 422 if (tptr->hold) 423 tptr->hold--; 424 } 425 426 static __inline int 427 isp_psema_sig_rqe(struct ispsoftc *isp, int bus) 428 { 429 while (isp->isp_osinfo.tmflags[bus] & TM_BUSY) { 430 isp->isp_osinfo.tmflags[bus] |= TM_WANTED; 431 if (cv_wait_sig(&isp->isp_osinfo.tgtcv0[bus], &isp->isp_lock)) { 432 return (-1); 433 } 434 isp->isp_osinfo.tmflags[bus] |= TM_BUSY; 435 } 436 return (0); 437 } 438 439 static __inline int 440 isp_cv_wait_timed_rqe(struct ispsoftc *isp, int bus, int timo) 441 { 442 if (cv_timedwait(&isp->isp_osinfo.tgtcv1[bus], &isp->isp_lock, timo)) { 443 return (-1); 444 } 445 return (0); 446 } 447 448 static __inline void 449 isp_cv_signal_rqe(struct ispsoftc *isp, int bus, int status) 450 { 451 isp->isp_osinfo.rstatus[bus] = status; 452 cv_signal(&isp->isp_osinfo.tgtcv1[bus]); 453 } 454 455 static __inline void 456 isp_vsema_rqe(struct ispsoftc *isp, int bus) 457 { 458 if (isp->isp_osinfo.tmflags[bus] & TM_WANTED) { 459 isp->isp_osinfo.tmflags[bus] &= ~TM_WANTED; 460 cv_signal(&isp->isp_osinfo.tgtcv0[bus]); 461 } 462 isp->isp_osinfo.tmflags[bus] &= ~TM_BUSY; 463 } 464 465 static __inline atio_private_data_t * 466 isp_get_atpd(struct ispsoftc *isp, int tag) 467 { 468 atio_private_data_t *atp; 469 for (atp = isp->isp_osinfo.atpdp; 470 atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) { 471 if (atp->tag == tag) 472 return (atp); 473 } 474 return (NULL); 475 } 476 477 static cam_status 478 create_lun_state(struct ispsoftc *isp, int bus, 479 struct cam_path *path, tstate_t **rslt) 480 { 481 cam_status status; 482 lun_id_t lun; 483 int hfx; 484 tstate_t *tptr, *new; 485 486 lun = xpt_path_lun_id(path); 487 if (lun < 0) { 488 return (CAM_LUN_INVALID); 489 } 490 if (is_lun_enabled(isp, bus, lun)) { 491 return (CAM_LUN_ALRDY_ENA); 492 } 493 new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO); 494 if (new == NULL) { 495 return (CAM_RESRC_UNAVAIL); 496 } 497 498 status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path), 499 xpt_path_target_id(path), xpt_path_lun_id(path)); 500 if (status != CAM_REQ_CMP) { 501 free(new, M_DEVBUF); 502 return (status); 503 } 504 new->bus = bus; 505 new->lun = lun; 506 SLIST_INIT(&new->atios); 507 SLIST_INIT(&new->inots); 508 new->hold = 1; 509 510 hfx = LUN_HASH_FUNC(isp, new->bus, new->lun); 511 tptr = isp->isp_osinfo.lun_hash[hfx]; 512 if (tptr == NULL) { 513 isp->isp_osinfo.lun_hash[hfx] = new; 514 } else { 515 while (tptr->next) 516 tptr = tptr->next; 517 tptr->next = new; 518 } 519 *rslt = new; 520 return (CAM_REQ_CMP); 521 } 522 523 static __inline void 524 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr) 525 { 526 int hfx; 527 tstate_t *lw, *pw; 528 529 hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun); 530 if (tptr->hold) { 531 return; 532 } 533 pw = isp->isp_osinfo.lun_hash[hfx]; 534 if (pw == NULL) { 535 return; 536 } else if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 537 isp->isp_osinfo.lun_hash[hfx] = pw->next; 538 } else { 539 lw = pw; 540 pw = lw->next; 541 while (pw) { 542 if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 543 lw->next = pw->next; 544 break; 545 } 546 lw = pw; 547 pw = pw->next; 548 } 549 if (pw == NULL) { 550 return; 551 } 552 } 553 free(tptr, M_DEVBUF); 554 } 555 556 /* 557 * we enter with our locks held. 558 */ 559 static void 560 isp_en_lun(struct ispsoftc *isp, union ccb *ccb) 561 { 562 const char lfmt[] = "Lun now %sabled for target mode on channel %d"; 563 struct ccb_en_lun *cel = &ccb->cel; 564 tstate_t *tptr; 565 u_int16_t rstat; 566 int bus, cmd, av, wildcard; 567 lun_id_t lun; 568 target_id_t tgt; 569 570 571 bus = XS_CHANNEL(ccb) & 0x1; 572 tgt = ccb->ccb_h.target_id; 573 lun = ccb->ccb_h.target_lun; 574 575 /* 576 * Do some sanity checking first. 577 */ 578 579 if ((lun != CAM_LUN_WILDCARD) && 580 (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) { 581 ccb->ccb_h.status = CAM_LUN_INVALID; 582 return; 583 } 584 585 if (IS_SCSI(isp)) { 586 sdparam *sdp = isp->isp_param; 587 sdp += bus; 588 if (tgt != CAM_TARGET_WILDCARD && 589 tgt != sdp->isp_initiator_id) { 590 ccb->ccb_h.status = CAM_TID_INVALID; 591 return; 592 } 593 } else { 594 if (tgt != CAM_TARGET_WILDCARD && 595 tgt != FCPARAM(isp)->isp_iid) { 596 ccb->ccb_h.status = CAM_TID_INVALID; 597 return; 598 } 599 /* 600 * This is as a good a place as any to check f/w capabilities. 601 */ 602 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_TMODE) == 0) { 603 isp_prt(isp, ISP_LOGERR, 604 "firmware does not support target mode"); 605 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 606 return; 607 } 608 /* 609 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to 610 * XXX: dorks with our already fragile enable/disable code. 611 */ 612 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) { 613 isp_prt(isp, ISP_LOGERR, 614 "firmware not SCCLUN capable"); 615 } 616 } 617 618 if (tgt == CAM_TARGET_WILDCARD) { 619 if (lun == CAM_LUN_WILDCARD) { 620 wildcard = 1; 621 } else { 622 ccb->ccb_h.status = CAM_LUN_INVALID; 623 return; 624 } 625 } else { 626 wildcard = 0; 627 } 628 629 /* 630 * Next check to see whether this is a target/lun wildcard action. 631 * 632 * If so, we know that we can accept commands for luns that haven't 633 * been enabled yet and send them upstream. Otherwise, we have to 634 * handle them locally (if we see them at all). 635 */ 636 637 if (wildcard) { 638 tptr = &isp->isp_osinfo.tsdflt[bus]; 639 if (cel->enable) { 640 if (isp->isp_osinfo.tmflags[bus] & 641 TM_WILDCARD_ENABLED) { 642 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 643 return; 644 } 645 ccb->ccb_h.status = 646 xpt_create_path(&tptr->owner, NULL, 647 xpt_path_path_id(ccb->ccb_h.path), 648 xpt_path_target_id(ccb->ccb_h.path), 649 xpt_path_lun_id(ccb->ccb_h.path)); 650 if (ccb->ccb_h.status != CAM_REQ_CMP) { 651 return; 652 } 653 SLIST_INIT(&tptr->atios); 654 SLIST_INIT(&tptr->inots); 655 isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED; 656 } else { 657 if ((isp->isp_osinfo.tmflags[bus] & 658 TM_WILDCARD_ENABLED) == 0) { 659 ccb->ccb_h.status = CAM_REQ_CMP; 660 return; 661 } 662 if (tptr->hold) { 663 ccb->ccb_h.status = CAM_SCSI_BUSY; 664 return; 665 } 666 xpt_free_path(tptr->owner); 667 isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED; 668 } 669 } 670 671 /* 672 * Now check to see whether this bus needs to be 673 * enabled/disabled with respect to target mode. 674 */ 675 av = bus << 31; 676 if (cel->enable && !(isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED)) { 677 av |= ENABLE_TARGET_FLAG; 678 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 679 if (av) { 680 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 681 if (wildcard) { 682 isp->isp_osinfo.tmflags[bus] &= 683 ~TM_WILDCARD_ENABLED; 684 xpt_free_path(tptr->owner); 685 } 686 return; 687 } 688 isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED; 689 isp_prt(isp, ISP_LOGINFO, 690 "Target Mode enabled on channel %d", bus); 691 } else if (cel->enable == 0 && 692 (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) && wildcard) { 693 if (are_any_luns_enabled(isp, bus)) { 694 ccb->ccb_h.status = CAM_SCSI_BUSY; 695 return; 696 } 697 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 698 if (av) { 699 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 700 return; 701 } 702 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; 703 isp_prt(isp, ISP_LOGINFO, 704 "Target Mode disabled on channel %d", bus); 705 } 706 707 if (wildcard) { 708 ccb->ccb_h.status = CAM_REQ_CMP; 709 return; 710 } 711 712 if (cel->enable) { 713 ccb->ccb_h.status = 714 create_lun_state(isp, bus, ccb->ccb_h.path, &tptr); 715 if (ccb->ccb_h.status != CAM_REQ_CMP) { 716 return; 717 } 718 } else { 719 tptr = get_lun_statep(isp, bus, lun); 720 if (tptr == NULL) { 721 ccb->ccb_h.status = CAM_LUN_INVALID; 722 return; 723 } 724 } 725 726 if (isp_psema_sig_rqe(isp, bus)) { 727 rls_lun_statep(isp, tptr); 728 if (cel->enable) 729 destroy_lun_state(isp, tptr); 730 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 731 return; 732 } 733 734 if (cel->enable) { 735 u_int32_t seq = isp->isp_osinfo.rollinfo++; 736 int c, n, ulun = lun; 737 738 cmd = RQSTYPE_ENABLE_LUN; 739 c = DFLT_CMND_CNT; 740 n = DFLT_INOT_CNT; 741 if (IS_FC(isp) && lun != 0) { 742 cmd = RQSTYPE_MODIFY_LUN; 743 n = 0; 744 /* 745 * For SCC firmware, we only deal with setting 746 * (enabling or modifying) lun 0. 747 */ 748 ulun = 0; 749 } 750 rstat = LUN_ERR; 751 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) { 752 xpt_print_path(ccb->ccb_h.path); 753 isp_prt(isp, ISP_LOGWARN, "isp_lun_cmd failed"); 754 goto out; 755 } 756 if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) { 757 xpt_print_path(ccb->ccb_h.path); 758 isp_prt(isp, ISP_LOGERR, 759 "wait for ENABLE/MODIFY LUN timed out"); 760 goto out; 761 } 762 rstat = isp->isp_osinfo.rstatus[bus]; 763 if (rstat != LUN_OK) { 764 xpt_print_path(ccb->ccb_h.path); 765 isp_prt(isp, ISP_LOGERR, 766 "ENABLE/MODIFY LUN returned 0x%x", rstat); 767 goto out; 768 } 769 } else { 770 int c, n, ulun = lun; 771 u_int32_t seq; 772 773 rstat = LUN_ERR; 774 seq = isp->isp_osinfo.rollinfo++; 775 cmd = -RQSTYPE_MODIFY_LUN; 776 777 c = DFLT_CMND_CNT; 778 n = DFLT_INOT_CNT; 779 if (IS_FC(isp) && lun != 0) { 780 n = 0; 781 /* 782 * For SCC firmware, we only deal with setting 783 * (enabling or modifying) lun 0. 784 */ 785 ulun = 0; 786 } 787 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) { 788 xpt_print_path(ccb->ccb_h.path); 789 isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed"); 790 goto out; 791 } 792 if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) { 793 xpt_print_path(ccb->ccb_h.path); 794 isp_prt(isp, ISP_LOGERR, 795 "wait for MODIFY LUN timed out"); 796 goto out; 797 } 798 rstat = isp->isp_osinfo.rstatus[bus]; 799 if (rstat != LUN_OK) { 800 xpt_print_path(ccb->ccb_h.path); 801 isp_prt(isp, ISP_LOGERR, 802 "MODIFY LUN returned 0x%x", rstat); 803 goto out; 804 } 805 if (IS_FC(isp) && lun) { 806 goto out; 807 } 808 809 seq = isp->isp_osinfo.rollinfo++; 810 811 rstat = LUN_ERR; 812 cmd = -RQSTYPE_ENABLE_LUN; 813 if (isp_lun_cmd(isp, cmd, bus, tgt, lun, 0, 0, seq)) { 814 xpt_print_path(ccb->ccb_h.path); 815 isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed"); 816 goto out; 817 } 818 if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) { 819 xpt_print_path(ccb->ccb_h.path); 820 isp_prt(isp, ISP_LOGERR, 821 "wait for DISABLE LUN timed out"); 822 goto out; 823 } 824 rstat = isp->isp_osinfo.rstatus[bus]; 825 if (rstat != LUN_OK) { 826 xpt_print_path(ccb->ccb_h.path); 827 isp_prt(isp, ISP_LOGWARN, 828 "DISABLE LUN returned 0x%x", rstat); 829 goto out; 830 } 831 if (are_any_luns_enabled(isp, bus) == 0) { 832 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 833 if (av) { 834 isp_prt(isp, ISP_LOGWARN, 835 "disable target mode on channel %d failed", 836 bus); 837 goto out; 838 } 839 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; 840 xpt_print_path(ccb->ccb_h.path); 841 isp_prt(isp, ISP_LOGINFO, 842 "Target Mode disabled on channel %d", bus); 843 } 844 } 845 846 out: 847 isp_vsema_rqe(isp, bus); 848 849 if (rstat != LUN_OK) { 850 xpt_print_path(ccb->ccb_h.path); 851 isp_prt(isp, ISP_LOGWARN, 852 "lun %sable failed", (cel->enable) ? "en" : "dis"); 853 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 854 rls_lun_statep(isp, tptr); 855 if (cel->enable) 856 destroy_lun_state(isp, tptr); 857 } else { 858 xpt_print_path(ccb->ccb_h.path); 859 isp_prt(isp, ISP_LOGINFO, lfmt, 860 (cel->enable) ? "en" : "dis", bus); 861 rls_lun_statep(isp, tptr); 862 if (cel->enable == 0) { 863 destroy_lun_state(isp, tptr); 864 } 865 ccb->ccb_h.status = CAM_REQ_CMP; 866 } 867 } 868 869 static cam_status 870 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb) 871 { 872 tstate_t *tptr; 873 struct ccb_hdr_slist *lp; 874 struct ccb_hdr *curelm; 875 int found; 876 union ccb *accb = ccb->cab.abort_ccb; 877 878 if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 879 if (IS_FC(isp) && (accb->ccb_h.target_id != 880 ((fcparam *) isp->isp_param)->isp_loopid)) { 881 return (CAM_PATH_INVALID); 882 } else if (IS_SCSI(isp) && (accb->ccb_h.target_id != 883 ((sdparam *) isp->isp_param)->isp_initiator_id)) { 884 return (CAM_PATH_INVALID); 885 } 886 } 887 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun); 888 if (tptr == NULL) { 889 return (CAM_PATH_INVALID); 890 } 891 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 892 lp = &tptr->atios; 893 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 894 lp = &tptr->inots; 895 } else { 896 rls_lun_statep(isp, tptr); 897 return (CAM_UA_ABORT); 898 } 899 curelm = SLIST_FIRST(lp); 900 found = 0; 901 if (curelm == &accb->ccb_h) { 902 found = 1; 903 SLIST_REMOVE_HEAD(lp, sim_links.sle); 904 } else { 905 while(curelm != NULL) { 906 struct ccb_hdr *nextelm; 907 908 nextelm = SLIST_NEXT(curelm, sim_links.sle); 909 if (nextelm == &accb->ccb_h) { 910 found = 1; 911 SLIST_NEXT(curelm, sim_links.sle) = 912 SLIST_NEXT(nextelm, sim_links.sle); 913 break; 914 } 915 curelm = nextelm; 916 } 917 } 918 rls_lun_statep(isp, tptr); 919 if (found) { 920 accb->ccb_h.status = CAM_REQ_ABORTED; 921 return (CAM_REQ_CMP); 922 } 923 return(CAM_PATH_INVALID); 924 } 925 926 static cam_status 927 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb) 928 { 929 void *qe; 930 struct ccb_scsiio *cso = &ccb->csio; 931 u_int16_t *hp, save_handle; 932 u_int16_t nxti, optr; 933 u_int8_t local[QENTRY_LEN]; 934 935 936 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 937 xpt_print_path(ccb->ccb_h.path); 938 printf("Request Queue Overflow in isp_target_start_ctio\n"); 939 return (CAM_RESRC_UNAVAIL); 940 } 941 bzero(local, QENTRY_LEN); 942 943 /* 944 * We're either moving data or completing a command here. 945 */ 946 947 if (IS_FC(isp)) { 948 atio_private_data_t *atp; 949 ct2_entry_t *cto = (ct2_entry_t *) local; 950 951 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; 952 cto->ct_header.rqs_entry_count = 1; 953 cto->ct_iid = cso->init_id; 954 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) { 955 cto->ct_lun = ccb->ccb_h.target_lun; 956 } 957 958 atp = isp_get_atpd(isp, cso->tag_id); 959 if (atp == NULL) { 960 panic("cannot find private data adjunct for tag %x", 961 cso->tag_id); 962 } 963 964 cto->ct_rxid = cso->tag_id; 965 if (cso->dxfer_len == 0) { 966 cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA; 967 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 968 cto->ct_flags |= CT2_SENDSTATUS; 969 cto->rsp.m1.ct_scsi_status = cso->scsi_status; 970 cto->ct_resid = 971 atp->orig_datalen - atp->bytes_xfered; 972 } 973 if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) { 974 int m = min(cso->sense_len, MAXRESPLEN); 975 bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m); 976 cto->rsp.m1.ct_senselen = m; 977 cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID; 978 } 979 } else { 980 cto->ct_flags |= CT2_FLAG_MODE0; 981 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 982 cto->ct_flags |= CT2_DATA_IN; 983 } else { 984 cto->ct_flags |= CT2_DATA_OUT; 985 } 986 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 987 cto->ct_flags |= CT2_SENDSTATUS; 988 cto->rsp.m0.ct_scsi_status = cso->scsi_status; 989 cto->ct_resid = 990 atp->orig_datalen - 991 (atp->bytes_xfered + cso->dxfer_len); 992 } else { 993 atp->last_xframt = cso->dxfer_len; 994 } 995 /* 996 * If we're sending data and status back together, 997 * we can't also send back sense data as well. 998 */ 999 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1000 } 1001 1002 if (cto->ct_flags & CT2_SENDSTATUS) { 1003 isp_prt(isp, ISP_LOGTDEBUG0, 1004 "CTIO2[%x] STATUS %x origd %u curd %u resid %u", 1005 cto->ct_rxid, cso->scsi_status, atp->orig_datalen, 1006 cso->dxfer_len, cto->ct_resid); 1007 cto->ct_flags |= CT2_CCINCR; 1008 } 1009 cto->ct_timeout = 10; 1010 hp = &cto->ct_syshandle; 1011 } else { 1012 ct_entry_t *cto = (ct_entry_t *) local; 1013 1014 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1015 cto->ct_header.rqs_entry_count = 1; 1016 cto->ct_iid = cso->init_id; 1017 cto->ct_iid |= XS_CHANNEL(ccb) << 7; 1018 cto->ct_tgt = ccb->ccb_h.target_id; 1019 cto->ct_lun = ccb->ccb_h.target_lun; 1020 cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id); 1021 if (AT_HAS_TAG(cso->tag_id)) { 1022 cto->ct_tag_val = (u_int8_t) AT_GET_TAG(cso->tag_id); 1023 cto->ct_flags |= CT_TQAE; 1024 } 1025 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 1026 cto->ct_flags |= CT_NODISC; 1027 } 1028 if (cso->dxfer_len == 0) { 1029 cto->ct_flags |= CT_NO_DATA; 1030 } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1031 cto->ct_flags |= CT_DATA_IN; 1032 } else { 1033 cto->ct_flags |= CT_DATA_OUT; 1034 } 1035 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1036 cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR; 1037 cto->ct_scsi_status = cso->scsi_status; 1038 cto->ct_resid = cso->resid; 1039 isp_prt(isp, ISP_LOGTDEBUG0, 1040 "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x", 1041 cto->ct_fwhandle, cso->scsi_status, cso->resid, 1042 cso->tag_id); 1043 } 1044 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1045 cto->ct_timeout = 10; 1046 hp = &cto->ct_syshandle; 1047 } 1048 1049 if (isp_save_xs(isp, (XS_T *)ccb, hp)) { 1050 xpt_print_path(ccb->ccb_h.path); 1051 printf("No XFLIST pointers for isp_target_start_ctio\n"); 1052 return (CAM_RESRC_UNAVAIL); 1053 } 1054 1055 1056 /* 1057 * Call the dma setup routines for this entry (and any subsequent 1058 * CTIOs) if there's data to move, and then tell the f/w it's got 1059 * new things to play with. As with isp_start's usage of DMA setup, 1060 * any swizzling is done in the machine dependent layer. Because 1061 * of this, we put the request onto the queue area first in native 1062 * format. 1063 */ 1064 1065 save_handle = *hp; 1066 1067 switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) { 1068 case CMD_QUEUED: 1069 ISP_ADD_REQUEST(isp, nxti); 1070 return (CAM_REQ_INPROG); 1071 1072 case CMD_EAGAIN: 1073 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1074 isp_destroy_handle(isp, save_handle); 1075 return (CAM_RESRC_UNAVAIL); 1076 1077 default: 1078 isp_destroy_handle(isp, save_handle); 1079 return (XS_ERR(ccb)); 1080 } 1081 } 1082 1083 static void 1084 isp_refire_putback_atio(void *arg) 1085 { 1086 int s = splcam(); 1087 isp_target_putback_atio(arg); 1088 splx(s); 1089 } 1090 1091 static void 1092 isp_target_putback_atio(union ccb *ccb) 1093 { 1094 struct ispsoftc *isp; 1095 struct ccb_scsiio *cso; 1096 u_int16_t nxti, optr; 1097 void *qe; 1098 1099 isp = XS_ISP(ccb); 1100 1101 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 1102 (void) timeout(isp_refire_putback_atio, ccb, 10); 1103 isp_prt(isp, ISP_LOGWARN, 1104 "isp_target_putback_atio: Request Queue Overflow"); 1105 return; 1106 } 1107 bzero(qe, QENTRY_LEN); 1108 cso = &ccb->csio; 1109 if (IS_FC(isp)) { 1110 at2_entry_t local, *at = &local; 1111 MEMZERO(at, sizeof (at2_entry_t)); 1112 at->at_header.rqs_entry_type = RQSTYPE_ATIO2; 1113 at->at_header.rqs_entry_count = 1; 1114 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) { 1115 at->at_scclun = (uint16_t) ccb->ccb_h.target_lun; 1116 } else { 1117 at->at_lun = (uint8_t) ccb->ccb_h.target_lun; 1118 } 1119 at->at_status = CT_OK; 1120 at->at_rxid = cso->tag_id; 1121 isp_put_atio2(isp, at, qe); 1122 } else { 1123 at_entry_t local, *at = &local; 1124 MEMZERO(at, sizeof (at_entry_t)); 1125 at->at_header.rqs_entry_type = RQSTYPE_ATIO; 1126 at->at_header.rqs_entry_count = 1; 1127 at->at_iid = cso->init_id; 1128 at->at_iid |= XS_CHANNEL(ccb) << 7; 1129 at->at_tgt = cso->ccb_h.target_id; 1130 at->at_lun = cso->ccb_h.target_lun; 1131 at->at_status = CT_OK; 1132 at->at_tag_val = AT_GET_TAG(cso->tag_id); 1133 at->at_handle = AT_GET_HANDLE(cso->tag_id); 1134 isp_put_atio(isp, at, qe); 1135 } 1136 ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe); 1137 ISP_ADD_REQUEST(isp, nxti); 1138 isp_complete_ctio(ccb); 1139 } 1140 1141 static void 1142 isp_complete_ctio(union ccb *ccb) 1143 { 1144 struct ispsoftc *isp = XS_ISP(ccb); 1145 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1146 ccb->ccb_h.status |= CAM_REQ_CMP; 1147 } 1148 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1149 if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) { 1150 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE; 1151 if (isp->isp_osinfo.simqfrozen == 0) { 1152 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 1153 isp_prt(isp, ISP_LOGDEBUG2, "ctio->relsimq"); 1154 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 1155 } else { 1156 isp_prt(isp, ISP_LOGWARN, "ctio->devqfrozen"); 1157 } 1158 } else { 1159 isp_prt(isp, ISP_LOGWARN, 1160 "ctio->simqfrozen(%x)", isp->isp_osinfo.simqfrozen); 1161 } 1162 } 1163 xpt_done(ccb); 1164 } 1165 1166 /* 1167 * Handle ATIO stuff that the generic code can't. 1168 * This means handling CDBs. 1169 */ 1170 1171 static int 1172 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep) 1173 { 1174 tstate_t *tptr; 1175 int status, bus, iswildcard; 1176 struct ccb_accept_tio *atiop; 1177 1178 /* 1179 * The firmware status (except for the QLTM_SVALID bit) 1180 * indicates why this ATIO was sent to us. 1181 * 1182 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1183 * 1184 * If the DISCONNECTS DISABLED bit is set in the flags field, 1185 * we're still connected on the SCSI bus. 1186 */ 1187 status = aep->at_status; 1188 if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) { 1189 /* 1190 * Bus Phase Sequence error. We should have sense data 1191 * suggested by the f/w. I'm not sure quite yet what 1192 * to do about this for CAM. 1193 */ 1194 isp_prt(isp, ISP_LOGWARN, "PHASE ERROR"); 1195 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1196 return (0); 1197 } 1198 if ((status & ~QLTM_SVALID) != AT_CDB) { 1199 isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform", 1200 status); 1201 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1202 return (0); 1203 } 1204 1205 bus = GET_BUS_VAL(aep->at_iid); 1206 tptr = get_lun_statep(isp, bus, aep->at_lun); 1207 if (tptr == NULL) { 1208 tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD); 1209 iswildcard = 1; 1210 } else { 1211 iswildcard = 0; 1212 } 1213 1214 if (tptr == NULL) { 1215 /* 1216 * Because we can't autofeed sense data back with 1217 * a command for parallel SCSI, we can't give back 1218 * a CHECK CONDITION. We'll give back a BUSY status 1219 * instead. This works out okay because the only 1220 * time we should, in fact, get this, is in the 1221 * case that somebody configured us without the 1222 * blackhole driver, so they get what they deserve. 1223 */ 1224 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1225 return (0); 1226 } 1227 1228 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1229 if (atiop == NULL) { 1230 /* 1231 * Because we can't autofeed sense data back with 1232 * a command for parallel SCSI, we can't give back 1233 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1234 * instead. This works out okay because the only time we 1235 * should, in fact, get this, is in the case that we've 1236 * run out of ATIOS. 1237 */ 1238 xpt_print_path(tptr->owner); 1239 isp_prt(isp, ISP_LOGWARN, 1240 "no ATIOS for lun %d from initiator %d on channel %d", 1241 aep->at_lun, GET_IID_VAL(aep->at_iid), bus); 1242 if (aep->at_flags & AT_TQAE) 1243 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1244 else 1245 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1246 rls_lun_statep(isp, tptr); 1247 return (0); 1248 } 1249 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1250 if (iswildcard) { 1251 atiop->ccb_h.target_id = aep->at_tgt; 1252 atiop->ccb_h.target_lun = aep->at_lun; 1253 } 1254 if (aep->at_flags & AT_NODISC) { 1255 atiop->ccb_h.flags = CAM_DIS_DISCONNECT; 1256 } else { 1257 atiop->ccb_h.flags = 0; 1258 } 1259 1260 if (status & QLTM_SVALID) { 1261 size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data)); 1262 atiop->sense_len = amt; 1263 MEMCPY(&atiop->sense_data, aep->at_sense, amt); 1264 } else { 1265 atiop->sense_len = 0; 1266 } 1267 1268 atiop->init_id = GET_IID_VAL(aep->at_iid); 1269 atiop->cdb_len = aep->at_cdblen; 1270 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen); 1271 atiop->ccb_h.status = CAM_CDB_RECVD; 1272 /* 1273 * Construct a tag 'id' based upon tag value (which may be 0..255) 1274 * and the handle (which we have to preserve). 1275 */ 1276 AT_MAKE_TAGID(atiop->tag_id, aep); 1277 if (aep->at_flags & AT_TQAE) { 1278 atiop->tag_action = aep->at_tag_type; 1279 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; 1280 } 1281 xpt_done((union ccb*)atiop); 1282 isp_prt(isp, ISP_LOGTDEBUG0, 1283 "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s", 1284 aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid), 1285 GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff, 1286 aep->at_tag_type, (aep->at_flags & AT_NODISC)? 1287 "nondisc" : "disconnecting"); 1288 rls_lun_statep(isp, tptr); 1289 return (0); 1290 } 1291 1292 static int 1293 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep) 1294 { 1295 lun_id_t lun; 1296 tstate_t *tptr; 1297 struct ccb_accept_tio *atiop; 1298 atio_private_data_t *atp; 1299 1300 /* 1301 * The firmware status (except for the QLTM_SVALID bit) 1302 * indicates why this ATIO was sent to us. 1303 * 1304 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1305 */ 1306 if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) { 1307 isp_prt(isp, ISP_LOGWARN, 1308 "bogus atio (0x%x) leaked to platform", aep->at_status); 1309 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1310 return (0); 1311 } 1312 1313 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) { 1314 lun = aep->at_scclun; 1315 } else { 1316 lun = aep->at_lun; 1317 } 1318 tptr = get_lun_statep(isp, 0, lun); 1319 if (tptr == NULL) { 1320 tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD); 1321 } 1322 1323 if (tptr == NULL) { 1324 /* 1325 * What we'd like to know is whether or not we have a listener 1326 * upstream that really hasn't configured yet. If we do, then 1327 * we can give a more sensible reply here. If not, then we can 1328 * reject this out of hand. 1329 * 1330 * Choices for what to send were 1331 * 1332 * Not Ready, Unit Not Self-Configured Yet 1333 * (0x2,0x3e,0x00) 1334 * 1335 * for the former and 1336 * 1337 * Illegal Request, Logical Unit Not Supported 1338 * (0x5,0x25,0x00) 1339 * 1340 * for the latter. 1341 * 1342 * We used to decide whether there was at least one listener 1343 * based upon whether the black hole driver was configured. 1344 * However, recent config(8) changes have made this hard to do 1345 * at this time. 1346 * 1347 */ 1348 u_int32_t ccode = SCSI_STATUS_BUSY; 1349 1350 /* 1351 * Because we can't autofeed sense data back with 1352 * a command for parallel SCSI, we can't give back 1353 * a CHECK CONDITION. We'll give back a BUSY status 1354 * instead. This works out okay because the only 1355 * time we should, in fact, get this, is in the 1356 * case that somebody configured us without the 1357 * blackhole driver, so they get what they deserve. 1358 */ 1359 isp_endcmd(isp, aep, ccode, 0); 1360 return (0); 1361 } 1362 1363 atp = isp_get_atpd(isp, 0); 1364 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1365 if (atiop == NULL || atp == NULL) { 1366 /* 1367 * Because we can't autofeed sense data back with 1368 * a command for parallel SCSI, we can't give back 1369 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1370 * instead. This works out okay because the only time we 1371 * should, in fact, get this, is in the case that we've 1372 * run out of ATIOS. 1373 */ 1374 xpt_print_path(tptr->owner); 1375 isp_prt(isp, ISP_LOGWARN, 1376 "no ATIOS for lun %d from initiator %d", lun, aep->at_iid); 1377 rls_lun_statep(isp, tptr); 1378 if (aep->at_flags & AT_TQAE) 1379 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1380 else 1381 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1382 return (0); 1383 } 1384 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1385 1386 if (tptr == &isp->isp_osinfo.tsdflt[0]) { 1387 atiop->ccb_h.target_id = 1388 ((fcparam *)isp->isp_param)->isp_loopid; 1389 atiop->ccb_h.target_lun = lun; 1390 } 1391 /* 1392 * We don't get 'suggested' sense data as we do with SCSI cards. 1393 */ 1394 atiop->sense_len = 0; 1395 1396 atiop->init_id = aep->at_iid; 1397 atiop->cdb_len = ATIO2_CDBLEN; 1398 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN); 1399 atiop->ccb_h.status = CAM_CDB_RECVD; 1400 atiop->tag_id = aep->at_rxid; 1401 switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) { 1402 case ATIO2_TC_ATTR_SIMPLEQ: 1403 atiop->tag_action = MSG_SIMPLE_Q_TAG; 1404 break; 1405 case ATIO2_TC_ATTR_HEADOFQ: 1406 atiop->tag_action = MSG_HEAD_OF_Q_TAG; 1407 break; 1408 case ATIO2_TC_ATTR_ORDERED: 1409 atiop->tag_action = MSG_ORDERED_Q_TAG; 1410 break; 1411 case ATIO2_TC_ATTR_ACAQ: /* ?? */ 1412 case ATIO2_TC_ATTR_UNTAGGED: 1413 default: 1414 atiop->tag_action = 0; 1415 break; 1416 } 1417 if (atiop->tag_action != 0) { 1418 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; 1419 } 1420 1421 atp->tag = atiop->tag_id; 1422 atp->orig_datalen = aep->at_datalen; 1423 atp->last_xframt = 0; 1424 atp->bytes_xfered = 0; 1425 1426 xpt_done((union ccb*)atiop); 1427 isp_prt(isp, ISP_LOGTDEBUG0, 1428 "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u", 1429 aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid, 1430 lun, aep->at_taskflags, aep->at_datalen); 1431 rls_lun_statep(isp, tptr); 1432 return (0); 1433 } 1434 1435 static int 1436 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg) 1437 { 1438 union ccb *ccb; 1439 int sentstatus, ok, notify_cam, resid = 0; 1440 u_int16_t tval; 1441 1442 /* 1443 * CTIO and CTIO2 are close enough.... 1444 */ 1445 1446 ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_syshandle); 1447 KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio")); 1448 isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_syshandle); 1449 1450 if (IS_FC(isp)) { 1451 ct2_entry_t *ct = arg; 1452 sentstatus = ct->ct_flags & CT2_SENDSTATUS; 1453 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1454 if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) { 1455 ccb->ccb_h.status |= CAM_SENT_SENSE; 1456 } 1457 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1458 if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) { 1459 atio_private_data_t *atp = 1460 isp_get_atpd(isp, ct->ct_rxid); 1461 if (atp == NULL) { 1462 panic("cannot find adjunct after I/O"); 1463 } 1464 resid = ct->ct_resid; 1465 atp->bytes_xfered += (atp->last_xframt - resid); 1466 atp->last_xframt = 0; 1467 if (sentstatus) { 1468 atp->tag = 0; 1469 } 1470 } 1471 isp_prt(isp, ISP_LOGTDEBUG0, 1472 "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s", 1473 ct->ct_rxid, ct->ct_status, ct->ct_flags, 1474 (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, 1475 resid, sentstatus? "FIN" : "MID"); 1476 tval = ct->ct_rxid; 1477 } else { 1478 ct_entry_t *ct = arg; 1479 sentstatus = ct->ct_flags & CT_SENDSTATUS; 1480 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1481 /* 1482 * We *ought* to be able to get back to the original ATIO 1483 * here, but for some reason this gets lost. It's just as 1484 * well because it's squirrelled away as part of periph 1485 * private data. 1486 * 1487 * We can live without it as long as we continue to use 1488 * the auto-replenish feature for CTIOs. 1489 */ 1490 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1491 if (ct->ct_status & QLTM_SVALID) { 1492 char *sp = (char *)ct; 1493 sp += CTIO_SENSE_OFFSET; 1494 ccb->csio.sense_len = 1495 min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN); 1496 MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len); 1497 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1498 } 1499 if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) { 1500 resid = ct->ct_resid; 1501 } 1502 isp_prt(isp, ISP_LOGTDEBUG0, 1503 "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s", 1504 ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun, 1505 ct->ct_status, ct->ct_flags, resid, 1506 sentstatus? "FIN" : "MID"); 1507 tval = ct->ct_fwhandle; 1508 } 1509 ccb->csio.resid += resid; 1510 1511 /* 1512 * We're here either because intermediate data transfers are done 1513 * and/or the final status CTIO (which may have joined with a 1514 * Data Transfer) is done. 1515 * 1516 * In any case, for this platform, the upper layers figure out 1517 * what to do next, so all we do here is collect status and 1518 * pass information along. Any DMA handles have already been 1519 * freed. 1520 */ 1521 if (notify_cam == 0) { 1522 isp_prt(isp, ISP_LOGTDEBUG0, " INTER CTIO[0x%x] done", tval); 1523 return (0); 1524 } 1525 1526 isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done", 1527 (sentstatus)? " FINAL " : "MIDTERM ", tval); 1528 1529 if (!ok) { 1530 isp_target_putback_atio(ccb); 1531 } else { 1532 isp_complete_ctio(ccb); 1533 1534 } 1535 return (0); 1536 } 1537 #endif 1538 1539 static void 1540 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg) 1541 { 1542 struct cam_sim *sim; 1543 struct ispsoftc *isp; 1544 1545 sim = (struct cam_sim *)cbarg; 1546 isp = (struct ispsoftc *) cam_sim_softc(sim); 1547 switch (code) { 1548 case AC_LOST_DEVICE: 1549 if (IS_SCSI(isp)) { 1550 u_int16_t oflags, nflags; 1551 sdparam *sdp = isp->isp_param; 1552 int tgt; 1553 1554 tgt = xpt_path_target_id(path); 1555 ISP_LOCK(isp); 1556 sdp += cam_sim_bus(sim); 1557 nflags = sdp->isp_devparam[tgt].nvrm_flags; 1558 #ifndef ISP_TARGET_MODE 1559 nflags &= DPARM_SAFE_DFLT; 1560 if (isp->isp_loaded_fw) { 1561 nflags |= DPARM_NARROW | DPARM_ASYNC; 1562 } 1563 #else 1564 nflags = DPARM_DEFAULT; 1565 #endif 1566 oflags = sdp->isp_devparam[tgt].goal_flags; 1567 sdp->isp_devparam[tgt].goal_flags = nflags; 1568 sdp->isp_devparam[tgt].dev_update = 1; 1569 isp->isp_update |= (1 << cam_sim_bus(sim)); 1570 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, NULL); 1571 sdp->isp_devparam[tgt].goal_flags = oflags; 1572 ISP_UNLOCK(isp); 1573 } 1574 break; 1575 default: 1576 isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code); 1577 break; 1578 } 1579 } 1580 1581 static void 1582 isp_poll(struct cam_sim *sim) 1583 { 1584 struct ispsoftc *isp = cam_sim_softc(sim); 1585 u_int16_t isr, sema, mbox; 1586 1587 ISP_LOCK(isp); 1588 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 1589 isp_intr(isp, isr, sema, mbox); 1590 } 1591 ISP_UNLOCK(isp); 1592 } 1593 1594 #if 0 1595 static void 1596 isp_relsim(void *arg) 1597 { 1598 struct ispsoftc *isp = arg; 1599 ISP_LOCK(isp); 1600 if (isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED) { 1601 int wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED; 1602 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_TIMED; 1603 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { 1604 xpt_release_simq(isp->isp_sim, 1); 1605 isp_prt(isp, ISP_LOGDEBUG2, "timed relsimq"); 1606 } 1607 } 1608 ISP_UNLOCK(isp); 1609 } 1610 #endif 1611 1612 static void 1613 isp_watchdog(void *arg) 1614 { 1615 XS_T *xs = arg; 1616 struct ispsoftc *isp = XS_ISP(xs); 1617 u_int32_t handle; 1618 1619 /* 1620 * We've decided this command is dead. Make sure we're not trying 1621 * to kill a command that's already dead by getting it's handle and 1622 * and seeing whether it's still alive. 1623 */ 1624 ISP_LOCK(isp); 1625 handle = isp_find_handle(isp, xs); 1626 if (handle) { 1627 u_int16_t isr, sema, mbox; 1628 1629 if (XS_CMD_DONE_P(xs)) { 1630 isp_prt(isp, ISP_LOGDEBUG1, 1631 "watchdog found done cmd (handle 0x%x)", handle); 1632 ISP_UNLOCK(isp); 1633 return; 1634 } 1635 1636 if (XS_CMD_WDOG_P(xs)) { 1637 isp_prt(isp, ISP_LOGDEBUG2, 1638 "recursive watchdog (handle 0x%x)", handle); 1639 ISP_UNLOCK(isp); 1640 return; 1641 } 1642 1643 XS_CMD_S_WDOG(xs); 1644 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 1645 isp_intr(isp, isr, sema, mbox); 1646 } 1647 if (XS_CMD_DONE_P(xs)) { 1648 isp_prt(isp, ISP_LOGDEBUG2, 1649 "watchdog cleanup for handle 0x%x", handle); 1650 xpt_done((union ccb *) xs); 1651 } else if (XS_CMD_GRACE_P(xs)) { 1652 /* 1653 * Make sure the command is *really* dead before we 1654 * release the handle (and DMA resources) for reuse. 1655 */ 1656 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg); 1657 1658 /* 1659 * After this point, the comamnd is really dead. 1660 */ 1661 if (XS_XFRLEN(xs)) { 1662 ISP_DMAFREE(isp, xs, handle); 1663 } 1664 isp_destroy_handle(isp, handle); 1665 xpt_print_path(xs->ccb_h.path); 1666 isp_prt(isp, ISP_LOGWARN, 1667 "watchdog timeout for handle %x", handle); 1668 XS_SETERR(xs, CAM_CMD_TIMEOUT); 1669 XS_CMD_C_WDOG(xs); 1670 isp_done(xs); 1671 } else { 1672 u_int16_t nxti, optr; 1673 ispreq_t local, *mp= &local, *qe; 1674 1675 XS_CMD_C_WDOG(xs); 1676 xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz); 1677 if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) { 1678 ISP_UNLOCK(isp); 1679 return; 1680 } 1681 XS_CMD_S_GRACE(xs); 1682 MEMZERO((void *) mp, sizeof (*mp)); 1683 mp->req_header.rqs_entry_count = 1; 1684 mp->req_header.rqs_entry_type = RQSTYPE_MARKER; 1685 mp->req_modifier = SYNC_ALL; 1686 mp->req_target = XS_CHANNEL(xs) << 7; 1687 isp_put_request(isp, mp, qe); 1688 ISP_ADD_REQUEST(isp, nxti); 1689 } 1690 } else { 1691 isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command"); 1692 } 1693 ISP_UNLOCK(isp); 1694 } 1695 1696 static int isp_ktmature = 0; 1697 1698 static void 1699 isp_kthread(void *arg) 1700 { 1701 int wasfrozen; 1702 struct ispsoftc *isp = arg; 1703 1704 mtx_lock(&isp->isp_lock); 1705 for (;;) { 1706 isp_prt(isp, ISP_LOGDEBUG0, "kthread checking FC state"); 1707 while (isp_fc_runstate(isp, 2 * 1000000) != 0) { 1708 if (FCPARAM(isp)->isp_fwstate != FW_READY || 1709 FCPARAM(isp)->isp_loopstate < LOOP_PDB_RCVD) { 1710 if (FCPARAM(isp)->loop_seen_once == 0 || 1711 isp_ktmature == 0) { 1712 break; 1713 } 1714 } 1715 msleep(isp_kthread, &isp->isp_lock, 1716 PRIBIO, "isp_fcthrd", hz); 1717 } 1718 /* 1719 * Even if we didn't get good loop state we may be 1720 * unfreezing the SIMQ so that we can kill off 1721 * commands (if we've never seen loop before, e.g.) 1722 */ 1723 isp_ktmature = 1; 1724 wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN; 1725 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN; 1726 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { 1727 isp_prt(isp, ISP_LOGDEBUG0, "kthread up release simq"); 1728 ISPLOCK_2_CAMLOCK(isp); 1729 xpt_release_simq(isp->isp_sim, 1); 1730 CAMLOCK_2_ISPLOCK(isp); 1731 } 1732 cv_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock); 1733 } 1734 } 1735 1736 static void 1737 isp_action(struct cam_sim *sim, union ccb *ccb) 1738 { 1739 int bus, tgt, error; 1740 struct ispsoftc *isp; 1741 struct ccb_trans_settings *cts; 1742 1743 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n")); 1744 1745 isp = (struct ispsoftc *)cam_sim_softc(sim); 1746 ccb->ccb_h.sim_priv.entries[0].field = 0; 1747 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 1748 if (isp->isp_state != ISP_RUNSTATE && 1749 ccb->ccb_h.func_code == XPT_SCSI_IO) { 1750 CAMLOCK_2_ISPLOCK(isp); 1751 isp_init(isp); 1752 if (isp->isp_state != ISP_INITSTATE) { 1753 ISP_UNLOCK(isp); 1754 /* 1755 * Lie. Say it was a selection timeout. 1756 */ 1757 ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN; 1758 xpt_freeze_devq(ccb->ccb_h.path, 1); 1759 xpt_done(ccb); 1760 return; 1761 } 1762 isp->isp_state = ISP_RUNSTATE; 1763 ISPLOCK_2_CAMLOCK(isp); 1764 } 1765 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code); 1766 1767 1768 switch (ccb->ccb_h.func_code) { 1769 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 1770 /* 1771 * Do a couple of preliminary checks... 1772 */ 1773 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 1774 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 1775 ccb->ccb_h.status = CAM_REQ_INVALID; 1776 xpt_done(ccb); 1777 break; 1778 } 1779 } 1780 #ifdef DIAGNOSTIC 1781 if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) { 1782 ccb->ccb_h.status = CAM_PATH_INVALID; 1783 } else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) { 1784 ccb->ccb_h.status = CAM_PATH_INVALID; 1785 } 1786 if (ccb->ccb_h.status == CAM_PATH_INVALID) { 1787 isp_prt(isp, ISP_LOGERR, 1788 "invalid tgt/lun (%d.%d) in XPT_SCSI_IO", 1789 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 1790 xpt_done(ccb); 1791 break; 1792 } 1793 #endif 1794 ((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK; 1795 CAMLOCK_2_ISPLOCK(isp); 1796 error = isp_start((XS_T *) ccb); 1797 switch (error) { 1798 case CMD_QUEUED: 1799 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1800 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 1801 u_int64_t ticks = (u_int64_t) hz; 1802 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) 1803 ticks = 60 * 1000 * ticks; 1804 else 1805 ticks = ccb->ccb_h.timeout * hz; 1806 ticks = ((ticks + 999) / 1000) + hz + hz; 1807 if (ticks >= 0x80000000) { 1808 isp_prt(isp, ISP_LOGERR, 1809 "timeout overflow"); 1810 ticks = 0x80000000; 1811 } 1812 ccb->ccb_h.timeout_ch = timeout(isp_watchdog, 1813 (caddr_t)ccb, (int)ticks); 1814 } else { 1815 callout_handle_init(&ccb->ccb_h.timeout_ch); 1816 } 1817 ISPLOCK_2_CAMLOCK(isp); 1818 break; 1819 case CMD_RQLATER: 1820 /* 1821 * This can only happen for Fibre Channel 1822 */ 1823 KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only")); 1824 if (FCPARAM(isp)->loop_seen_once == 0 && isp_ktmature) { 1825 ISPLOCK_2_CAMLOCK(isp); 1826 XS_SETERR(ccb, CAM_SEL_TIMEOUT); 1827 xpt_done(ccb); 1828 break; 1829 } 1830 cv_signal(&isp->isp_osinfo.kthread_cv); 1831 if (isp->isp_osinfo.simqfrozen == 0) { 1832 isp_prt(isp, ISP_LOGDEBUG2, 1833 "RQLATER freeze simq"); 1834 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 1835 ISPLOCK_2_CAMLOCK(isp); 1836 xpt_freeze_simq(sim, 1); 1837 } else { 1838 ISPLOCK_2_CAMLOCK(isp); 1839 } 1840 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1841 xpt_done(ccb); 1842 break; 1843 case CMD_EAGAIN: 1844 if (isp->isp_osinfo.simqfrozen == 0) { 1845 xpt_freeze_simq(sim, 1); 1846 isp_prt(isp, ISP_LOGDEBUG2, 1847 "EAGAIN freeze simq"); 1848 } 1849 isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE; 1850 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1851 ISPLOCK_2_CAMLOCK(isp); 1852 xpt_done(ccb); 1853 break; 1854 case CMD_COMPLETE: 1855 isp_done((struct ccb_scsiio *) ccb); 1856 ISPLOCK_2_CAMLOCK(isp); 1857 break; 1858 default: 1859 isp_prt(isp, ISP_LOGERR, 1860 "What's this? 0x%x at %d in file %s", 1861 error, __LINE__, __FILE__); 1862 XS_SETERR(ccb, CAM_REQ_CMP_ERR); 1863 xpt_done(ccb); 1864 ISPLOCK_2_CAMLOCK(isp); 1865 } 1866 break; 1867 1868 #ifdef ISP_TARGET_MODE 1869 case XPT_EN_LUN: /* Enable LUN as a target */ 1870 { 1871 int iok; 1872 CAMLOCK_2_ISPLOCK(isp); 1873 iok = isp->isp_osinfo.intsok; 1874 isp->isp_osinfo.intsok = 0; 1875 isp_en_lun(isp, ccb); 1876 isp->isp_osinfo.intsok = iok; 1877 ISPLOCK_2_CAMLOCK(isp); 1878 xpt_done(ccb); 1879 break; 1880 } 1881 case XPT_NOTIFY_ACK: /* recycle notify ack */ 1882 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ 1883 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 1884 { 1885 tstate_t *tptr = 1886 get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun); 1887 if (tptr == NULL) { 1888 ccb->ccb_h.status = CAM_LUN_INVALID; 1889 xpt_done(ccb); 1890 break; 1891 } 1892 ccb->ccb_h.sim_priv.entries[0].field = 0; 1893 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 1894 CAMLOCK_2_ISPLOCK(isp); 1895 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 1896 SLIST_INSERT_HEAD(&tptr->atios, 1897 &ccb->ccb_h, sim_links.sle); 1898 } else { 1899 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, 1900 sim_links.sle); 1901 } 1902 rls_lun_statep(isp, tptr); 1903 ccb->ccb_h.status = CAM_REQ_INPROG; 1904 ISPLOCK_2_CAMLOCK(isp); 1905 break; 1906 } 1907 case XPT_CONT_TARGET_IO: 1908 { 1909 CAMLOCK_2_ISPLOCK(isp); 1910 ccb->ccb_h.status = isp_target_start_ctio(isp, ccb); 1911 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 1912 if (isp->isp_osinfo.simqfrozen == 0) { 1913 xpt_freeze_simq(sim, 1); 1914 xpt_print_path(ccb->ccb_h.path); 1915 isp_prt(isp, ISP_LOGINFO, 1916 "XPT_CONT_TARGET_IO freeze simq"); 1917 } 1918 isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE; 1919 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1920 ISPLOCK_2_CAMLOCK(isp); 1921 xpt_done(ccb); 1922 } else { 1923 ISPLOCK_2_CAMLOCK(isp); 1924 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1925 } 1926 break; 1927 } 1928 #endif 1929 case XPT_RESET_DEV: /* BDR the specified SCSI device */ 1930 1931 bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); 1932 tgt = ccb->ccb_h.target_id; 1933 tgt |= (bus << 16); 1934 1935 CAMLOCK_2_ISPLOCK(isp); 1936 error = isp_control(isp, ISPCTL_RESET_DEV, &tgt); 1937 ISPLOCK_2_CAMLOCK(isp); 1938 if (error) { 1939 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1940 } else { 1941 ccb->ccb_h.status = CAM_REQ_CMP; 1942 } 1943 xpt_done(ccb); 1944 break; 1945 case XPT_ABORT: /* Abort the specified CCB */ 1946 { 1947 union ccb *accb = ccb->cab.abort_ccb; 1948 CAMLOCK_2_ISPLOCK(isp); 1949 switch (accb->ccb_h.func_code) { 1950 #ifdef ISP_TARGET_MODE 1951 case XPT_ACCEPT_TARGET_IO: 1952 case XPT_IMMED_NOTIFY: 1953 ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb); 1954 break; 1955 case XPT_CONT_TARGET_IO: 1956 isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet"); 1957 ccb->ccb_h.status = CAM_UA_ABORT; 1958 break; 1959 #endif 1960 case XPT_SCSI_IO: 1961 error = isp_control(isp, ISPCTL_ABORT_CMD, ccb); 1962 if (error) { 1963 ccb->ccb_h.status = CAM_UA_ABORT; 1964 } else { 1965 ccb->ccb_h.status = CAM_REQ_CMP; 1966 } 1967 break; 1968 default: 1969 ccb->ccb_h.status = CAM_REQ_INVALID; 1970 break; 1971 } 1972 ISPLOCK_2_CAMLOCK(isp); 1973 xpt_done(ccb); 1974 break; 1975 } 1976 #ifdef CAM_NEW_TRAN_CODE 1977 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) 1978 #else 1979 #define IS_CURRENT_SETTINGS(c) (c->flags & CCB_TRANS_CURRENT_SETTINGS) 1980 #endif 1981 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 1982 cts = &ccb->cts; 1983 if (!IS_CURRENT_SETTINGS(cts)) { 1984 ccb->ccb_h.status = CAM_REQ_INVALID; 1985 xpt_done(ccb); 1986 break; 1987 } 1988 tgt = cts->ccb_h.target_id; 1989 CAMLOCK_2_ISPLOCK(isp); 1990 if (IS_SCSI(isp)) { 1991 #ifndef CAM_NEW_TRAN_CODE 1992 sdparam *sdp = isp->isp_param; 1993 u_int16_t *dptr; 1994 1995 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 1996 1997 sdp += bus; 1998 /* 1999 * We always update (internally) from goal_flags 2000 * so any request to change settings just gets 2001 * vectored to that location. 2002 */ 2003 dptr = &sdp->isp_devparam[tgt].goal_flags; 2004 2005 /* 2006 * Note that these operations affect the 2007 * the goal flags (goal_flags)- not 2008 * the current state flags. Then we mark 2009 * things so that the next operation to 2010 * this HBA will cause the update to occur. 2011 */ 2012 if (cts->valid & CCB_TRANS_DISC_VALID) { 2013 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) { 2014 *dptr |= DPARM_DISC; 2015 } else { 2016 *dptr &= ~DPARM_DISC; 2017 } 2018 } 2019 if (cts->valid & CCB_TRANS_TQ_VALID) { 2020 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) { 2021 *dptr |= DPARM_TQING; 2022 } else { 2023 *dptr &= ~DPARM_TQING; 2024 } 2025 } 2026 if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) { 2027 switch (cts->bus_width) { 2028 case MSG_EXT_WDTR_BUS_16_BIT: 2029 *dptr |= DPARM_WIDE; 2030 break; 2031 default: 2032 *dptr &= ~DPARM_WIDE; 2033 } 2034 } 2035 /* 2036 * Any SYNC RATE of nonzero and SYNC_OFFSET 2037 * of nonzero will cause us to go to the 2038 * selected (from NVRAM) maximum value for 2039 * this device. At a later point, we'll 2040 * allow finer control. 2041 */ 2042 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && 2043 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) && 2044 (cts->sync_offset > 0)) { 2045 *dptr |= DPARM_SYNC; 2046 } else { 2047 *dptr &= ~DPARM_SYNC; 2048 } 2049 *dptr |= DPARM_SAFE_DFLT; 2050 #else 2051 struct ccb_trans_settings_scsi *scsi = 2052 &cts->proto_specific.scsi; 2053 struct ccb_trans_settings_spi *spi = 2054 &cts->xport_specific.spi; 2055 sdparam *sdp = isp->isp_param; 2056 u_int16_t *dptr; 2057 2058 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2059 sdp += bus; 2060 /* 2061 * We always update (internally) from goal_flags 2062 * so any request to change settings just gets 2063 * vectored to that location. 2064 */ 2065 dptr = &sdp->isp_devparam[tgt].goal_flags; 2066 2067 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 2068 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 2069 *dptr |= DPARM_DISC; 2070 else 2071 *dptr &= ~DPARM_DISC; 2072 } 2073 2074 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 2075 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 2076 *dptr |= DPARM_TQING; 2077 else 2078 *dptr &= ~DPARM_TQING; 2079 } 2080 2081 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 2082 if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) 2083 *dptr |= DPARM_WIDE; 2084 else 2085 *dptr &= ~DPARM_WIDE; 2086 } 2087 2088 /* 2089 * XXX: FIX ME 2090 */ 2091 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) && 2092 (spi->valid & CTS_SPI_VALID_SYNC_RATE) && 2093 (spi->sync_period && spi->sync_offset)) { 2094 *dptr |= DPARM_SYNC; 2095 /* 2096 * XXX: CHECK FOR LEGALITY 2097 */ 2098 sdp->isp_devparam[tgt].goal_period = 2099 spi->sync_period; 2100 sdp->isp_devparam[tgt].goal_offset = 2101 spi->sync_offset; 2102 } else { 2103 *dptr &= ~DPARM_SYNC; 2104 } 2105 #endif 2106 isp_prt(isp, ISP_LOGDEBUG0, 2107 "SET bus %d targ %d to flags %x off %x per %x", 2108 bus, tgt, sdp->isp_devparam[tgt].goal_flags, 2109 sdp->isp_devparam[tgt].goal_offset, 2110 sdp->isp_devparam[tgt].goal_period); 2111 sdp->isp_devparam[tgt].dev_update = 1; 2112 isp->isp_update |= (1 << bus); 2113 } 2114 ISPLOCK_2_CAMLOCK(isp); 2115 ccb->ccb_h.status = CAM_REQ_CMP; 2116 xpt_done(ccb); 2117 break; 2118 case XPT_GET_TRAN_SETTINGS: 2119 cts = &ccb->cts; 2120 tgt = cts->ccb_h.target_id; 2121 CAMLOCK_2_ISPLOCK(isp); 2122 if (IS_FC(isp)) { 2123 #ifndef CAM_NEW_TRAN_CODE 2124 /* 2125 * a lot of normal SCSI things don't make sense. 2126 */ 2127 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 2128 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2129 /* 2130 * How do you measure the width of a high 2131 * speed serial bus? Well, in bytes. 2132 * 2133 * Offset and period make no sense, though, so we set 2134 * (above) a 'base' transfer speed to be gigabit. 2135 */ 2136 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2137 #else 2138 fcparam *fcp = isp->isp_param; 2139 struct ccb_trans_settings_fc *fc = 2140 &cts->xport_specific.fc; 2141 2142 cts->protocol = PROTO_SCSI; 2143 cts->protocol_version = SCSI_REV_2; 2144 cts->transport = XPORT_FC; 2145 cts->transport_version = 0; 2146 2147 fc->valid = CTS_FC_VALID_SPEED; 2148 if (fcp->isp_gbspeed == 2) 2149 fc->bitrate = 200000; 2150 else 2151 fc->bitrate = 100000; 2152 if (tgt > 0 && tgt < MAX_FC_TARG) { 2153 struct lportdb *lp = &fcp->portdb[tgt]; 2154 fc->wwnn = lp->node_wwn; 2155 fc->wwpn = lp->port_wwn; 2156 fc->port = lp->portid; 2157 fc->valid |= CTS_FC_VALID_WWNN | 2158 CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT; 2159 } 2160 #endif 2161 } else { 2162 #ifdef CAM_NEW_TRAN_CODE 2163 struct ccb_trans_settings_scsi *scsi = 2164 &cts->proto_specific.scsi; 2165 struct ccb_trans_settings_spi *spi = 2166 &cts->xport_specific.spi; 2167 #endif 2168 sdparam *sdp = isp->isp_param; 2169 int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2170 u_int16_t dval, pval, oval; 2171 2172 sdp += bus; 2173 2174 if (IS_CURRENT_SETTINGS(cts)) { 2175 sdp->isp_devparam[tgt].dev_refresh = 1; 2176 isp->isp_update |= (1 << bus); 2177 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, 2178 NULL); 2179 dval = sdp->isp_devparam[tgt].actv_flags; 2180 oval = sdp->isp_devparam[tgt].actv_offset; 2181 pval = sdp->isp_devparam[tgt].actv_period; 2182 } else { 2183 dval = sdp->isp_devparam[tgt].nvrm_flags; 2184 oval = sdp->isp_devparam[tgt].nvrm_offset; 2185 pval = sdp->isp_devparam[tgt].nvrm_period; 2186 } 2187 2188 #ifndef CAM_NEW_TRAN_CODE 2189 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 2190 2191 if (dval & DPARM_DISC) { 2192 cts->flags |= CCB_TRANS_DISC_ENB; 2193 } 2194 if (dval & DPARM_TQING) { 2195 cts->flags |= CCB_TRANS_TAG_ENB; 2196 } 2197 if (dval & DPARM_WIDE) { 2198 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2199 } else { 2200 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2201 } 2202 cts->valid = CCB_TRANS_BUS_WIDTH_VALID | 2203 CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2204 2205 if ((dval & DPARM_SYNC) && oval != 0) { 2206 cts->sync_period = pval; 2207 cts->sync_offset = oval; 2208 cts->valid |= 2209 CCB_TRANS_SYNC_RATE_VALID | 2210 CCB_TRANS_SYNC_OFFSET_VALID; 2211 } 2212 #else 2213 cts->protocol = PROTO_SCSI; 2214 cts->protocol_version = SCSI_REV_2; 2215 cts->transport = XPORT_SPI; 2216 cts->transport_version = 2; 2217 2218 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 2219 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; 2220 if (dval & DPARM_DISC) { 2221 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 2222 } 2223 if (dval & DPARM_TQING) { 2224 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 2225 } 2226 if ((dval & DPARM_SYNC) && oval && pval) { 2227 spi->sync_offset = oval; 2228 spi->sync_period = pval; 2229 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 2230 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 2231 } 2232 spi->valid |= CTS_SPI_VALID_BUS_WIDTH; 2233 if (dval & DPARM_WIDE) { 2234 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2235 } else { 2236 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2237 } 2238 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 2239 scsi->valid = CTS_SCSI_VALID_TQ; 2240 spi->valid |= CTS_SPI_VALID_DISC; 2241 } else { 2242 scsi->valid = 0; 2243 } 2244 #endif 2245 isp_prt(isp, ISP_LOGDEBUG0, 2246 "GET %s bus %d targ %d to flags %x off %x per %x", 2247 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM", 2248 bus, tgt, dval, oval, pval); 2249 } 2250 ISPLOCK_2_CAMLOCK(isp); 2251 ccb->ccb_h.status = CAM_REQ_CMP; 2252 xpt_done(ccb); 2253 break; 2254 2255 case XPT_CALC_GEOMETRY: 2256 { 2257 struct ccb_calc_geometry *ccg; 2258 u_int32_t secs_per_cylinder; 2259 u_int32_t size_mb; 2260 2261 ccg = &ccb->ccg; 2262 if (ccg->block_size == 0) { 2263 isp_prt(isp, ISP_LOGERR, 2264 "%d.%d XPT_CALC_GEOMETRY block size 0?", 2265 ccg->ccb_h.target_id, ccg->ccb_h.target_lun); 2266 ccb->ccb_h.status = CAM_REQ_INVALID; 2267 xpt_done(ccb); 2268 break; 2269 } 2270 size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size); 2271 if (size_mb > 1024) { 2272 ccg->heads = 255; 2273 ccg->secs_per_track = 63; 2274 } else { 2275 ccg->heads = 64; 2276 ccg->secs_per_track = 32; 2277 } 2278 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 2279 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 2280 ccb->ccb_h.status = CAM_REQ_CMP; 2281 xpt_done(ccb); 2282 break; 2283 } 2284 case XPT_RESET_BUS: /* Reset the specified bus */ 2285 bus = cam_sim_bus(sim); 2286 CAMLOCK_2_ISPLOCK(isp); 2287 error = isp_control(isp, ISPCTL_RESET_BUS, &bus); 2288 ISPLOCK_2_CAMLOCK(isp); 2289 if (error) 2290 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2291 else { 2292 if (cam_sim_bus(sim) && isp->isp_path2 != NULL) 2293 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 2294 else if (isp->isp_path != NULL) 2295 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 2296 ccb->ccb_h.status = CAM_REQ_CMP; 2297 } 2298 xpt_done(ccb); 2299 break; 2300 2301 case XPT_TERM_IO: /* Terminate the I/O process */ 2302 ccb->ccb_h.status = CAM_REQ_INVALID; 2303 xpt_done(ccb); 2304 break; 2305 2306 case XPT_PATH_INQ: /* Path routing inquiry */ 2307 { 2308 struct ccb_pathinq *cpi = &ccb->cpi; 2309 2310 cpi->version_num = 1; 2311 #ifdef ISP_TARGET_MODE 2312 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 2313 #else 2314 cpi->target_sprt = 0; 2315 #endif 2316 cpi->hba_eng_cnt = 0; 2317 cpi->max_target = ISP_MAX_TARGETS(isp) - 1; 2318 cpi->max_lun = ISP_MAX_LUNS(isp) - 1; 2319 cpi->bus_id = cam_sim_bus(sim); 2320 if (IS_FC(isp)) { 2321 cpi->hba_misc = PIM_NOBUSRESET; 2322 /* 2323 * Because our loop ID can shift from time to time, 2324 * make our initiator ID out of range of our bus. 2325 */ 2326 cpi->initiator_id = cpi->max_target + 1; 2327 2328 /* 2329 * Set base transfer capabilities for Fibre Channel. 2330 * Technically not correct because we don't know 2331 * what media we're running on top of- but we'll 2332 * look good if we always say 100MB/s. 2333 */ 2334 if (FCPARAM(isp)->isp_gbspeed == 2) 2335 cpi->base_transfer_speed = 200000; 2336 else 2337 cpi->base_transfer_speed = 100000; 2338 cpi->hba_inquiry = PI_TAG_ABLE; 2339 #ifdef CAM_NEW_TRAN_CODE 2340 cpi->transport = XPORT_FC; 2341 cpi->transport_version = 0; /* WHAT'S THIS FOR? */ 2342 #endif 2343 } else { 2344 sdparam *sdp = isp->isp_param; 2345 sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path)); 2346 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 2347 cpi->hba_misc = 0; 2348 cpi->initiator_id = sdp->isp_initiator_id; 2349 cpi->base_transfer_speed = 3300; 2350 #ifdef CAM_NEW_TRAN_CODE 2351 cpi->transport = XPORT_SPI; 2352 cpi->transport_version = 2; /* WHAT'S THIS FOR? */ 2353 #endif 2354 } 2355 #ifdef CAM_NEW_TRAN_CODE 2356 cpi->protocol = PROTO_SCSI; 2357 cpi->protocol_version = SCSI_REV_2; 2358 #endif 2359 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 2360 strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN); 2361 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 2362 cpi->unit_number = cam_sim_unit(sim); 2363 cpi->ccb_h.status = CAM_REQ_CMP; 2364 xpt_done(ccb); 2365 break; 2366 } 2367 default: 2368 ccb->ccb_h.status = CAM_REQ_INVALID; 2369 xpt_done(ccb); 2370 break; 2371 } 2372 } 2373 2374 #define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB) 2375 void 2376 isp_done(struct ccb_scsiio *sccb) 2377 { 2378 struct ispsoftc *isp = XS_ISP(sccb); 2379 2380 if (XS_NOERR(sccb)) 2381 XS_SETERR(sccb, CAM_REQ_CMP); 2382 2383 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && 2384 (sccb->scsi_status != SCSI_STATUS_OK)) { 2385 sccb->ccb_h.status &= ~CAM_STATUS_MASK; 2386 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && 2387 (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) { 2388 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; 2389 } else { 2390 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 2391 } 2392 } 2393 2394 sccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2395 if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2396 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 2397 sccb->ccb_h.status |= CAM_DEV_QFRZN; 2398 xpt_freeze_devq(sccb->ccb_h.path, 1); 2399 if (sccb->scsi_status != SCSI_STATUS_OK) 2400 isp_prt(isp, ISP_LOGDEBUG2, 2401 "freeze devq %d.%d %x %x", 2402 sccb->ccb_h.target_id, 2403 sccb->ccb_h.target_lun, sccb->ccb_h.status, 2404 sccb->scsi_status); 2405 } 2406 } 2407 2408 /* 2409 * If we were frozen waiting resources, clear that we were frozen 2410 * waiting for resources. If we are no longer frozen, and the devq 2411 * isn't frozen, mark the completing CCB to have the XPT layer 2412 * release the simq. 2413 */ 2414 if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) { 2415 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE; 2416 if (isp->isp_osinfo.simqfrozen == 0) { 2417 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 2418 isp_prt(isp, ISP_LOGDEBUG2, 2419 "isp_done->relsimq"); 2420 sccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2421 } else { 2422 isp_prt(isp, ISP_LOGDEBUG2, 2423 "isp_done->devq frozen"); 2424 } 2425 } else { 2426 isp_prt(isp, ISP_LOGDEBUG2, 2427 "isp_done -> simqfrozen = %x", 2428 isp->isp_osinfo.simqfrozen); 2429 } 2430 } 2431 if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) && 2432 (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2433 xpt_print_path(sccb->ccb_h.path); 2434 isp_prt(isp, ISP_LOGINFO, 2435 "cam completion status 0x%x", sccb->ccb_h.status); 2436 } 2437 2438 XS_CMD_S_DONE(sccb); 2439 if (XS_CMD_WDOG_P(sccb) == 0) { 2440 untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch); 2441 if (XS_CMD_GRACE_P(sccb)) { 2442 isp_prt(isp, ISP_LOGDEBUG2, 2443 "finished command on borrowed time"); 2444 } 2445 XS_CMD_S_CLEAR(sccb); 2446 ISPLOCK_2_CAMLOCK(isp); 2447 xpt_done((union ccb *) sccb); 2448 CAMLOCK_2_ISPLOCK(isp); 2449 } 2450 } 2451 2452 int 2453 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg) 2454 { 2455 int bus, rv = 0; 2456 switch (cmd) { 2457 case ISPASYNC_NEW_TGT_PARAMS: 2458 { 2459 #ifdef CAM_NEW_TRAN_CODE 2460 struct ccb_trans_settings_scsi *scsi; 2461 struct ccb_trans_settings_spi *spi; 2462 #endif 2463 int flags, tgt; 2464 sdparam *sdp = isp->isp_param; 2465 struct ccb_trans_settings cts; 2466 struct cam_path *tmppath; 2467 2468 bzero(&cts, sizeof (struct ccb_trans_settings)); 2469 2470 tgt = *((int *)arg); 2471 bus = (tgt >> 16) & 0xffff; 2472 tgt &= 0xffff; 2473 sdp += bus; 2474 ISPLOCK_2_CAMLOCK(isp); 2475 if (xpt_create_path(&tmppath, NULL, 2476 cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim), 2477 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2478 CAMLOCK_2_ISPLOCK(isp); 2479 isp_prt(isp, ISP_LOGWARN, 2480 "isp_async cannot make temp path for %d.%d", 2481 tgt, bus); 2482 rv = -1; 2483 break; 2484 } 2485 CAMLOCK_2_ISPLOCK(isp); 2486 flags = sdp->isp_devparam[tgt].actv_flags; 2487 #ifdef CAM_NEW_TRAN_CODE 2488 cts.type = CTS_TYPE_CURRENT_SETTINGS; 2489 cts.protocol = PROTO_SCSI; 2490 cts.transport = XPORT_SPI; 2491 2492 scsi = &cts.proto_specific.scsi; 2493 spi = &cts.xport_specific.spi; 2494 2495 if (flags & DPARM_TQING) { 2496 scsi->valid |= CTS_SCSI_VALID_TQ; 2497 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 2498 spi->flags |= CTS_SPI_FLAGS_TAG_ENB; 2499 } 2500 2501 if (flags & DPARM_DISC) { 2502 spi->valid |= CTS_SPI_VALID_DISC; 2503 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 2504 } 2505 spi->flags |= CTS_SPI_VALID_BUS_WIDTH; 2506 if (flags & DPARM_WIDE) { 2507 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2508 } else { 2509 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2510 } 2511 if (flags & DPARM_SYNC) { 2512 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 2513 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 2514 spi->sync_period = sdp->isp_devparam[tgt].actv_period; 2515 spi->sync_offset = sdp->isp_devparam[tgt].actv_offset; 2516 } 2517 #else 2518 cts.flags = CCB_TRANS_CURRENT_SETTINGS; 2519 cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2520 if (flags & DPARM_DISC) { 2521 cts.flags |= CCB_TRANS_DISC_ENB; 2522 } 2523 if (flags & DPARM_TQING) { 2524 cts.flags |= CCB_TRANS_TAG_ENB; 2525 } 2526 cts.valid |= CCB_TRANS_BUS_WIDTH_VALID; 2527 cts.bus_width = (flags & DPARM_WIDE)? 2528 MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT; 2529 cts.sync_period = sdp->isp_devparam[tgt].actv_period; 2530 cts.sync_offset = sdp->isp_devparam[tgt].actv_offset; 2531 if (flags & DPARM_SYNC) { 2532 cts.valid |= 2533 CCB_TRANS_SYNC_RATE_VALID | 2534 CCB_TRANS_SYNC_OFFSET_VALID; 2535 } 2536 #endif 2537 isp_prt(isp, ISP_LOGDEBUG2, 2538 "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x", 2539 bus, tgt, sdp->isp_devparam[tgt].actv_period, 2540 sdp->isp_devparam[tgt].actv_offset, flags); 2541 xpt_setup_ccb(&cts.ccb_h, tmppath, 1); 2542 ISPLOCK_2_CAMLOCK(isp); 2543 xpt_async(AC_TRANSFER_NEG, tmppath, &cts); 2544 xpt_free_path(tmppath); 2545 CAMLOCK_2_ISPLOCK(isp); 2546 break; 2547 } 2548 case ISPASYNC_BUS_RESET: 2549 bus = *((int *)arg); 2550 isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected", 2551 bus); 2552 if (bus > 0 && isp->isp_path2) { 2553 ISPLOCK_2_CAMLOCK(isp); 2554 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 2555 CAMLOCK_2_ISPLOCK(isp); 2556 } else if (isp->isp_path) { 2557 ISPLOCK_2_CAMLOCK(isp); 2558 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 2559 CAMLOCK_2_ISPLOCK(isp); 2560 } 2561 break; 2562 case ISPASYNC_LIP: 2563 if (isp->isp_path) { 2564 if (isp->isp_osinfo.simqfrozen == 0) { 2565 isp_prt(isp, ISP_LOGDEBUG0, "LIP freeze simq"); 2566 ISPLOCK_2_CAMLOCK(isp); 2567 xpt_freeze_simq(isp->isp_sim, 1); 2568 CAMLOCK_2_ISPLOCK(isp); 2569 } 2570 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 2571 } 2572 isp_prt(isp, ISP_LOGINFO, "LIP Received"); 2573 break; 2574 case ISPASYNC_LOOP_RESET: 2575 if (isp->isp_path) { 2576 if (isp->isp_osinfo.simqfrozen == 0) { 2577 isp_prt(isp, ISP_LOGDEBUG0, 2578 "Loop Reset freeze simq"); 2579 ISPLOCK_2_CAMLOCK(isp); 2580 xpt_freeze_simq(isp->isp_sim, 1); 2581 CAMLOCK_2_ISPLOCK(isp); 2582 } 2583 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 2584 } 2585 isp_prt(isp, ISP_LOGINFO, "Loop Reset Received"); 2586 break; 2587 case ISPASYNC_LOOP_DOWN: 2588 if (isp->isp_path) { 2589 if (isp->isp_osinfo.simqfrozen == 0) { 2590 isp_prt(isp, ISP_LOGDEBUG0, 2591 "loop down freeze simq"); 2592 ISPLOCK_2_CAMLOCK(isp); 2593 xpt_freeze_simq(isp->isp_sim, 1); 2594 CAMLOCK_2_ISPLOCK(isp); 2595 } 2596 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 2597 } 2598 isp_prt(isp, ISP_LOGINFO, "Loop DOWN"); 2599 break; 2600 case ISPASYNC_LOOP_UP: 2601 /* 2602 * Now we just note that Loop has come up. We don't 2603 * actually do anything because we're waiting for a 2604 * Change Notify before activating the FC cleanup 2605 * thread to look at the state of the loop again. 2606 */ 2607 isp_prt(isp, ISP_LOGINFO, "Loop UP"); 2608 break; 2609 case ISPASYNC_PROMENADE: 2610 { 2611 struct cam_path *tmppath; 2612 const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x " 2613 "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x"; 2614 static const char *roles[4] = { 2615 "(none)", "Target", "Initiator", "Target/Initiator" 2616 }; 2617 fcparam *fcp = isp->isp_param; 2618 int tgt = *((int *) arg); 2619 int is_tgt_mask = (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT); 2620 struct lportdb *lp = &fcp->portdb[tgt]; 2621 2622 isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid, 2623 roles[lp->roles & 0x3], 2624 (lp->valid)? "Arrived" : "Departed", 2625 (u_int32_t) (lp->port_wwn >> 32), 2626 (u_int32_t) (lp->port_wwn & 0xffffffffLL), 2627 (u_int32_t) (lp->node_wwn >> 32), 2628 (u_int32_t) (lp->node_wwn & 0xffffffffLL)); 2629 2630 ISPLOCK_2_CAMLOCK(isp); 2631 if (xpt_create_path(&tmppath, NULL, cam_sim_path(isp->isp_sim), 2632 (target_id_t)tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2633 CAMLOCK_2_ISPLOCK(isp); 2634 break; 2635 } 2636 /* 2637 * Policy: only announce targets. 2638 */ 2639 if (lp->roles & is_tgt_mask) { 2640 if (lp->valid) { 2641 xpt_async(AC_FOUND_DEVICE, tmppath, NULL); 2642 } else { 2643 xpt_async(AC_LOST_DEVICE, tmppath, NULL); 2644 } 2645 } 2646 xpt_free_path(tmppath); 2647 CAMLOCK_2_ISPLOCK(isp); 2648 break; 2649 } 2650 case ISPASYNC_CHANGE_NOTIFY: 2651 if (arg == ISPASYNC_CHANGE_PDB) { 2652 isp_prt(isp, ISP_LOGINFO, 2653 "Port Database Changed"); 2654 } else if (arg == ISPASYNC_CHANGE_SNS) { 2655 isp_prt(isp, ISP_LOGINFO, 2656 "Name Server Database Changed"); 2657 } 2658 cv_signal(&isp->isp_osinfo.kthread_cv); 2659 break; 2660 case ISPASYNC_FABRIC_DEV: 2661 { 2662 int target, lrange; 2663 struct lportdb *lp = NULL; 2664 char *pt; 2665 sns_ganrsp_t *resp = (sns_ganrsp_t *) arg; 2666 u_int32_t portid; 2667 u_int64_t wwpn, wwnn; 2668 fcparam *fcp = isp->isp_param; 2669 2670 portid = 2671 (((u_int32_t) resp->snscb_port_id[0]) << 16) | 2672 (((u_int32_t) resp->snscb_port_id[1]) << 8) | 2673 (((u_int32_t) resp->snscb_port_id[2])); 2674 2675 wwpn = 2676 (((u_int64_t)resp->snscb_portname[0]) << 56) | 2677 (((u_int64_t)resp->snscb_portname[1]) << 48) | 2678 (((u_int64_t)resp->snscb_portname[2]) << 40) | 2679 (((u_int64_t)resp->snscb_portname[3]) << 32) | 2680 (((u_int64_t)resp->snscb_portname[4]) << 24) | 2681 (((u_int64_t)resp->snscb_portname[5]) << 16) | 2682 (((u_int64_t)resp->snscb_portname[6]) << 8) | 2683 (((u_int64_t)resp->snscb_portname[7])); 2684 2685 wwnn = 2686 (((u_int64_t)resp->snscb_nodename[0]) << 56) | 2687 (((u_int64_t)resp->snscb_nodename[1]) << 48) | 2688 (((u_int64_t)resp->snscb_nodename[2]) << 40) | 2689 (((u_int64_t)resp->snscb_nodename[3]) << 32) | 2690 (((u_int64_t)resp->snscb_nodename[4]) << 24) | 2691 (((u_int64_t)resp->snscb_nodename[5]) << 16) | 2692 (((u_int64_t)resp->snscb_nodename[6]) << 8) | 2693 (((u_int64_t)resp->snscb_nodename[7])); 2694 if (portid == 0 || wwpn == 0) { 2695 break; 2696 } 2697 2698 switch (resp->snscb_port_type) { 2699 case 1: 2700 pt = " N_Port"; 2701 break; 2702 case 2: 2703 pt = " NL_Port"; 2704 break; 2705 case 3: 2706 pt = "F/NL_Port"; 2707 break; 2708 case 0x7f: 2709 pt = " Nx_Port"; 2710 break; 2711 case 0x81: 2712 pt = " F_port"; 2713 break; 2714 case 0x82: 2715 pt = " FL_Port"; 2716 break; 2717 case 0x84: 2718 pt = " E_port"; 2719 break; 2720 default: 2721 pt = "?"; 2722 break; 2723 } 2724 isp_prt(isp, ISP_LOGINFO, 2725 "%s @ 0x%x, Node 0x%08x%08x Port %08x%08x", 2726 pt, portid, ((u_int32_t) (wwnn >> 32)), ((u_int32_t) wwnn), 2727 ((u_int32_t) (wwpn >> 32)), ((u_int32_t) wwpn)); 2728 /* 2729 * We're only interested in SCSI_FCP types (for now) 2730 */ 2731 if ((resp->snscb_fc4_types[2] & 1) == 0) { 2732 break; 2733 } 2734 if (fcp->isp_topo != TOPO_F_PORT) 2735 lrange = FC_SNS_ID+1; 2736 else 2737 lrange = 0; 2738 /* 2739 * Is it already in our list? 2740 */ 2741 for (target = lrange; target < MAX_FC_TARG; target++) { 2742 if (target >= FL_PORT_ID && target <= FC_SNS_ID) { 2743 continue; 2744 } 2745 lp = &fcp->portdb[target]; 2746 if (lp->port_wwn == wwpn && lp->node_wwn == wwnn) { 2747 lp->fabric_dev = 1; 2748 break; 2749 } 2750 } 2751 if (target < MAX_FC_TARG) { 2752 break; 2753 } 2754 for (target = lrange; target < MAX_FC_TARG; target++) { 2755 if (target >= FL_PORT_ID && target <= FC_SNS_ID) { 2756 continue; 2757 } 2758 lp = &fcp->portdb[target]; 2759 if (lp->port_wwn == 0) { 2760 break; 2761 } 2762 } 2763 if (target == MAX_FC_TARG) { 2764 isp_prt(isp, ISP_LOGWARN, 2765 "no more space for fabric devices"); 2766 break; 2767 } 2768 lp->node_wwn = wwnn; 2769 lp->port_wwn = wwpn; 2770 lp->portid = portid; 2771 lp->fabric_dev = 1; 2772 break; 2773 } 2774 #ifdef ISP_TARGET_MODE 2775 case ISPASYNC_TARGET_MESSAGE: 2776 { 2777 tmd_msg_t *mp = arg; 2778 isp_prt(isp, ISP_LOGALL, 2779 "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x", 2780 mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt, 2781 (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval, 2782 mp->nt_msg[0]); 2783 break; 2784 } 2785 case ISPASYNC_TARGET_EVENT: 2786 { 2787 tmd_event_t *ep = arg; 2788 isp_prt(isp, ISP_LOGALL, 2789 "bus %d event code 0x%x", ep->ev_bus, ep->ev_event); 2790 break; 2791 } 2792 case ISPASYNC_TARGET_ACTION: 2793 switch (((isphdr_t *)arg)->rqs_entry_type) { 2794 default: 2795 isp_prt(isp, ISP_LOGWARN, 2796 "event 0x%x for unhandled target action", 2797 ((isphdr_t *)arg)->rqs_entry_type); 2798 break; 2799 case RQSTYPE_ATIO: 2800 rv = isp_handle_platform_atio(isp, (at_entry_t *) arg); 2801 break; 2802 case RQSTYPE_ATIO2: 2803 rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg); 2804 break; 2805 case RQSTYPE_CTIO2: 2806 case RQSTYPE_CTIO: 2807 rv = isp_handle_platform_ctio(isp, arg); 2808 break; 2809 case RQSTYPE_ENABLE_LUN: 2810 case RQSTYPE_MODIFY_LUN: 2811 if (IS_DUALBUS(isp)) { 2812 bus = 2813 GET_BUS_VAL(((lun_entry_t *)arg)->le_rsvd); 2814 } else { 2815 bus = 0; 2816 } 2817 isp_cv_signal_rqe(isp, bus, 2818 ((lun_entry_t *)arg)->le_status); 2819 break; 2820 } 2821 break; 2822 #endif 2823 case ISPASYNC_FW_CRASH: 2824 { 2825 u_int16_t mbox1, mbox6; 2826 mbox1 = ISP_READ(isp, OUTMAILBOX1); 2827 if (IS_DUALBUS(isp)) { 2828 mbox6 = ISP_READ(isp, OUTMAILBOX6); 2829 } else { 2830 mbox6 = 0; 2831 } 2832 isp_prt(isp, ISP_LOGERR, 2833 "Internal Firmware on bus %d Error @ RISC Address 0x%x", 2834 mbox6, mbox1); 2835 isp_reinit(isp); 2836 break; 2837 } 2838 case ISPASYNC_UNHANDLED_RESPONSE: 2839 break; 2840 default: 2841 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd); 2842 break; 2843 } 2844 return (rv); 2845 } 2846 2847 2848 /* 2849 * Locks are held before coming here. 2850 */ 2851 void 2852 isp_uninit(struct ispsoftc *isp) 2853 { 2854 ISP_WRITE(isp, HCCR, HCCR_CMD_RESET); 2855 DISABLE_INTS(isp); 2856 } 2857 2858 void 2859 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...) 2860 { 2861 va_list ap; 2862 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { 2863 return; 2864 } 2865 printf("%s: ", device_get_nameunit(isp->isp_dev)); 2866 va_start(ap, fmt); 2867 vprintf(fmt, ap); 2868 va_end(ap); 2869 printf("\n"); 2870 } 2871