1 /* $FreeBSD$ */ 2 /* 3 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters. 4 * 5 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 #include <dev/isp/isp_freebsd.h> 29 #include <sys/unistd.h> 30 #include <sys/kthread.h> 31 #include <machine/stdarg.h> /* for use by isp_prt below */ 32 #include <sys/conf.h> 33 #include <sys/module.h> 34 #include <sys/ioccom.h> 35 #include <dev/isp/isp_ioctl.h> 36 37 38 MODULE_VERSION(isp, 1); 39 int isp_announced = 0; 40 ispfwfunc *isp_get_firmware_p = NULL; 41 42 static d_ioctl_t ispioctl; 43 static void isp_intr_enable(void *); 44 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *); 45 static void isp_poll(struct cam_sim *); 46 static timeout_t isp_watchdog; 47 static void isp_kthread(void *); 48 static void isp_action(struct cam_sim *, union ccb *); 49 50 51 #define ISP_CDEV_MAJOR 248 52 static struct cdevsw isp_cdevsw = { 53 .d_open = nullopen, 54 .d_close = nullclose, 55 .d_ioctl = ispioctl, 56 .d_name = "isp", 57 .d_maj = ISP_CDEV_MAJOR, 58 .d_flags = D_TAPE, 59 }; 60 61 static struct ispsoftc *isplist = NULL; 62 63 void 64 isp_attach(struct ispsoftc *isp) 65 { 66 int primary, secondary; 67 struct ccb_setasync csa; 68 struct cam_devq *devq; 69 struct cam_sim *sim; 70 struct cam_path *path; 71 72 /* 73 * Establish (in case of 12X0) which bus is the primary. 74 */ 75 76 primary = 0; 77 secondary = 1; 78 79 /* 80 * Create the device queue for our SIM(s). 81 */ 82 devq = cam_simq_alloc(isp->isp_maxcmds); 83 if (devq == NULL) { 84 return; 85 } 86 87 /* 88 * Construct our SIM entry. 89 */ 90 ISPLOCK_2_CAMLOCK(isp); 91 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 92 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); 93 if (sim == NULL) { 94 cam_simq_free(devq); 95 CAMLOCK_2_ISPLOCK(isp); 96 return; 97 } 98 CAMLOCK_2_ISPLOCK(isp); 99 100 isp->isp_osinfo.ehook.ich_func = isp_intr_enable; 101 isp->isp_osinfo.ehook.ich_arg = isp; 102 ISPLOCK_2_CAMLOCK(isp); 103 if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) { 104 cam_sim_free(sim, TRUE); 105 CAMLOCK_2_ISPLOCK(isp); 106 isp_prt(isp, ISP_LOGERR, 107 "could not establish interrupt enable hook"); 108 return; 109 } 110 111 if (xpt_bus_register(sim, primary) != CAM_SUCCESS) { 112 cam_sim_free(sim, TRUE); 113 CAMLOCK_2_ISPLOCK(isp); 114 return; 115 } 116 117 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 118 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 119 xpt_bus_deregister(cam_sim_path(sim)); 120 cam_sim_free(sim, TRUE); 121 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 122 CAMLOCK_2_ISPLOCK(isp); 123 return; 124 } 125 126 xpt_setup_ccb(&csa.ccb_h, path, 5); 127 csa.ccb_h.func_code = XPT_SASYNC_CB; 128 csa.event_enable = AC_LOST_DEVICE; 129 csa.callback = isp_cam_async; 130 csa.callback_arg = sim; 131 xpt_action((union ccb *)&csa); 132 CAMLOCK_2_ISPLOCK(isp); 133 isp->isp_sim = sim; 134 isp->isp_path = path; 135 /* 136 * Create a kernel thread for fibre channel instances. We 137 * don't have dual channel FC cards. 138 */ 139 if (IS_FC(isp)) { 140 ISPLOCK_2_CAMLOCK(isp); 141 /* XXX: LOCK VIOLATION */ 142 cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv"); 143 if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc, 144 RFHIGHPID, 0, "%s: fc_thrd", 145 device_get_nameunit(isp->isp_dev))) { 146 xpt_bus_deregister(cam_sim_path(sim)); 147 cam_sim_free(sim, TRUE); 148 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 149 CAMLOCK_2_ISPLOCK(isp); 150 isp_prt(isp, ISP_LOGERR, "could not create kthread"); 151 return; 152 } 153 CAMLOCK_2_ISPLOCK(isp); 154 } 155 156 157 /* 158 * If we have a second channel, construct SIM entry for that. 159 */ 160 if (IS_DUALBUS(isp)) { 161 ISPLOCK_2_CAMLOCK(isp); 162 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 163 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); 164 if (sim == NULL) { 165 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 166 xpt_free_path(isp->isp_path); 167 cam_simq_free(devq); 168 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 169 return; 170 } 171 if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) { 172 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 173 xpt_free_path(isp->isp_path); 174 cam_sim_free(sim, TRUE); 175 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 176 CAMLOCK_2_ISPLOCK(isp); 177 return; 178 } 179 180 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 181 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 182 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 183 xpt_free_path(isp->isp_path); 184 xpt_bus_deregister(cam_sim_path(sim)); 185 cam_sim_free(sim, TRUE); 186 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 187 CAMLOCK_2_ISPLOCK(isp); 188 return; 189 } 190 191 xpt_setup_ccb(&csa.ccb_h, path, 5); 192 csa.ccb_h.func_code = XPT_SASYNC_CB; 193 csa.event_enable = AC_LOST_DEVICE; 194 csa.callback = isp_cam_async; 195 csa.callback_arg = sim; 196 xpt_action((union ccb *)&csa); 197 CAMLOCK_2_ISPLOCK(isp); 198 isp->isp_sim2 = sim; 199 isp->isp_path2 = path; 200 } 201 202 #ifdef ISP_TARGET_MODE 203 cv_init(&isp->isp_osinfo.tgtcv0[0], "isp_tgcv0a"); 204 cv_init(&isp->isp_osinfo.tgtcv0[1], "isp_tgcv0b"); 205 cv_init(&isp->isp_osinfo.tgtcv1[0], "isp_tgcv1a"); 206 cv_init(&isp->isp_osinfo.tgtcv1[1], "isp_tgcv1b"); 207 #endif 208 /* 209 * Create device nodes 210 */ 211 (void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT, 212 GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev)); 213 214 if (isp->isp_role != ISP_ROLE_NONE) { 215 isp->isp_state = ISP_RUNSTATE; 216 ENABLE_INTS(isp); 217 } 218 if (isplist == NULL) { 219 isplist = isp; 220 } else { 221 struct ispsoftc *tmp = isplist; 222 while (tmp->isp_osinfo.next) { 223 tmp = tmp->isp_osinfo.next; 224 } 225 tmp->isp_osinfo.next = isp; 226 } 227 228 } 229 230 static INLINE void 231 isp_freeze_loopdown(struct ispsoftc *isp, char *msg) 232 { 233 if (isp->isp_osinfo.simqfrozen == 0) { 234 isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg); 235 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 236 ISPLOCK_2_CAMLOCK(isp); 237 xpt_freeze_simq(isp->isp_sim, 1); 238 CAMLOCK_2_ISPLOCK(isp); 239 } else { 240 isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg); 241 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 242 } 243 } 244 245 static int 246 ispioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 247 { 248 struct ispsoftc *isp; 249 int retval = ENOTTY; 250 251 isp = isplist; 252 while (isp) { 253 if (minor(dev) == device_get_unit(isp->isp_dev)) { 254 break; 255 } 256 isp = isp->isp_osinfo.next; 257 } 258 if (isp == NULL) 259 return (ENXIO); 260 261 switch (cmd) { 262 #ifdef ISP_FW_CRASH_DUMP 263 case ISP_GET_FW_CRASH_DUMP: 264 { 265 u_int16_t *ptr = FCPARAM(isp)->isp_dump_data; 266 size_t sz; 267 268 retval = 0; 269 if (IS_2200(isp)) 270 sz = QLA2200_RISC_IMAGE_DUMP_SIZE; 271 else 272 sz = QLA2300_RISC_IMAGE_DUMP_SIZE; 273 ISP_LOCK(isp); 274 if (ptr && *ptr) { 275 void *uaddr = *((void **) addr); 276 if (copyout(ptr, uaddr, sz)) { 277 retval = EFAULT; 278 } else { 279 *ptr = 0; 280 } 281 } else { 282 retval = ENXIO; 283 } 284 ISP_UNLOCK(isp); 285 break; 286 } 287 288 case ISP_FORCE_CRASH_DUMP: 289 ISP_LOCK(isp); 290 isp_freeze_loopdown(isp, "ispioctl(ISP_FORCE_CRASH_DUMP)"); 291 isp_fw_dump(isp); 292 isp_reinit(isp); 293 ISP_UNLOCK(isp); 294 retval = 0; 295 break; 296 #endif 297 case ISP_SDBLEV: 298 { 299 int olddblev = isp->isp_dblev; 300 isp->isp_dblev = *(int *)addr; 301 *(int *)addr = olddblev; 302 retval = 0; 303 break; 304 } 305 case ISP_RESETHBA: 306 ISP_LOCK(isp); 307 isp_reinit(isp); 308 ISP_UNLOCK(isp); 309 retval = 0; 310 break; 311 case ISP_RESCAN: 312 if (IS_FC(isp)) { 313 ISP_LOCK(isp); 314 if (isp_fc_runstate(isp, 5 * 1000000)) { 315 retval = EIO; 316 } else { 317 retval = 0; 318 } 319 ISP_UNLOCK(isp); 320 } 321 break; 322 case ISP_FC_LIP: 323 if (IS_FC(isp)) { 324 ISP_LOCK(isp); 325 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) { 326 retval = EIO; 327 } else { 328 retval = 0; 329 } 330 ISP_UNLOCK(isp); 331 } 332 break; 333 case ISP_FC_GETDINFO: 334 { 335 struct isp_fc_device *ifc = (struct isp_fc_device *) addr; 336 struct lportdb *lp; 337 338 if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) { 339 retval = EINVAL; 340 break; 341 } 342 ISP_LOCK(isp); 343 lp = &FCPARAM(isp)->portdb[ifc->loopid]; 344 if (lp->valid) { 345 ifc->loopid = lp->loopid; 346 ifc->portid = lp->portid; 347 ifc->node_wwn = lp->node_wwn; 348 ifc->port_wwn = lp->port_wwn; 349 retval = 0; 350 } else { 351 retval = ENODEV; 352 } 353 ISP_UNLOCK(isp); 354 break; 355 } 356 case ISP_GET_STATS: 357 { 358 isp_stats_t *sp = (isp_stats_t *) addr; 359 360 MEMZERO(sp, sizeof (*sp)); 361 sp->isp_stat_version = ISP_STATS_VERSION; 362 sp->isp_type = isp->isp_type; 363 sp->isp_revision = isp->isp_revision; 364 ISP_LOCK(isp); 365 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt; 366 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus; 367 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc; 368 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync; 369 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt; 370 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt; 371 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater; 372 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater; 373 ISP_UNLOCK(isp); 374 retval = 0; 375 break; 376 } 377 case ISP_CLR_STATS: 378 ISP_LOCK(isp); 379 isp->isp_intcnt = 0; 380 isp->isp_intbogus = 0; 381 isp->isp_intmboxc = 0; 382 isp->isp_intoasync = 0; 383 isp->isp_rsltccmplt = 0; 384 isp->isp_fphccmplt = 0; 385 isp->isp_rscchiwater = 0; 386 isp->isp_fpcchiwater = 0; 387 ISP_UNLOCK(isp); 388 retval = 0; 389 break; 390 case ISP_FC_GETHINFO: 391 { 392 struct isp_hba_device *hba = (struct isp_hba_device *) addr; 393 MEMZERO(hba, sizeof (*hba)); 394 ISP_LOCK(isp); 395 hba->fc_speed = FCPARAM(isp)->isp_gbspeed; 396 hba->fc_scsi_supported = 1; 397 hba->fc_topology = FCPARAM(isp)->isp_topo + 1; 398 hba->fc_loopid = FCPARAM(isp)->isp_loopid; 399 hba->active_node_wwn = FCPARAM(isp)->isp_nodewwn; 400 hba->active_port_wwn = FCPARAM(isp)->isp_portwwn; 401 ISP_UNLOCK(isp); 402 retval = 0; 403 break; 404 } 405 case ISP_GET_FC_PARAM: 406 { 407 struct isp_fc_param *f = (struct isp_fc_param *) addr; 408 409 if (!IS_FC(isp)) { 410 retval = EINVAL; 411 break; 412 } 413 f->parameter = 0; 414 if (strcmp(f->param_name, "framelength") == 0) { 415 f->parameter = FCPARAM(isp)->isp_maxfrmlen; 416 retval = 0; 417 break; 418 } 419 if (strcmp(f->param_name, "exec_throttle") == 0) { 420 f->parameter = FCPARAM(isp)->isp_execthrottle; 421 retval = 0; 422 break; 423 } 424 if (strcmp(f->param_name, "fullduplex") == 0) { 425 if (FCPARAM(isp)->isp_fwoptions & ICBOPT_FULL_DUPLEX) 426 f->parameter = 1; 427 retval = 0; 428 break; 429 } 430 if (strcmp(f->param_name, "loopid") == 0) { 431 f->parameter = FCPARAM(isp)->isp_loopid; 432 retval = 0; 433 break; 434 } 435 retval = EINVAL; 436 break; 437 } 438 case ISP_SET_FC_PARAM: 439 { 440 struct isp_fc_param *f = (struct isp_fc_param *) addr; 441 u_int32_t param = f->parameter; 442 443 if (!IS_FC(isp)) { 444 retval = EINVAL; 445 break; 446 } 447 f->parameter = 0; 448 if (strcmp(f->param_name, "framelength") == 0) { 449 if (param != 512 && param != 1024 && param != 1024) { 450 retval = EINVAL; 451 break; 452 } 453 FCPARAM(isp)->isp_maxfrmlen = param; 454 retval = 0; 455 break; 456 } 457 if (strcmp(f->param_name, "exec_throttle") == 0) { 458 if (param < 16 || param > 255) { 459 retval = EINVAL; 460 break; 461 } 462 FCPARAM(isp)->isp_execthrottle = param; 463 retval = 0; 464 break; 465 } 466 if (strcmp(f->param_name, "fullduplex") == 0) { 467 if (param != 0 && param != 1) { 468 retval = EINVAL; 469 break; 470 } 471 if (param) { 472 FCPARAM(isp)->isp_fwoptions |= 473 ICBOPT_FULL_DUPLEX; 474 } else { 475 FCPARAM(isp)->isp_fwoptions &= 476 ~ICBOPT_FULL_DUPLEX; 477 } 478 retval = 0; 479 break; 480 } 481 if (strcmp(f->param_name, "loopid") == 0) { 482 if (param < 0 || param > 125) { 483 retval = EINVAL; 484 break; 485 } 486 FCPARAM(isp)->isp_loopid = param; 487 retval = 0; 488 break; 489 } 490 retval = EINVAL; 491 break; 492 } 493 default: 494 break; 495 } 496 return (retval); 497 } 498 499 static void 500 isp_intr_enable(void *arg) 501 { 502 struct ispsoftc *isp = arg; 503 if (isp->isp_role != ISP_ROLE_NONE) { 504 ENABLE_INTS(isp); 505 isp->isp_osinfo.intsok = 1; 506 } 507 /* Release our hook so that the boot can continue. */ 508 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 509 } 510 511 /* 512 * Put the target mode functions here, because some are inlines 513 */ 514 515 #ifdef ISP_TARGET_MODE 516 517 static INLINE int is_lun_enabled(struct ispsoftc *, int, lun_id_t); 518 static INLINE int are_any_luns_enabled(struct ispsoftc *, int); 519 static INLINE tstate_t *get_lun_statep(struct ispsoftc *, int, lun_id_t); 520 static INLINE void rls_lun_statep(struct ispsoftc *, tstate_t *); 521 static INLINE int isp_psema_sig_rqe(struct ispsoftc *, int); 522 static INLINE int isp_cv_wait_timed_rqe(struct ispsoftc *, int, int); 523 static INLINE void isp_cv_signal_rqe(struct ispsoftc *, int, int); 524 static INLINE void isp_vsema_rqe(struct ispsoftc *, int); 525 static INLINE atio_private_data_t *isp_get_atpd(struct ispsoftc *, int); 526 static cam_status 527 create_lun_state(struct ispsoftc *, int, struct cam_path *, tstate_t **); 528 static void destroy_lun_state(struct ispsoftc *, tstate_t *); 529 static void isp_en_lun(struct ispsoftc *, union ccb *); 530 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *); 531 static timeout_t isp_refire_putback_atio; 532 static void isp_complete_ctio(union ccb *); 533 static void isp_target_putback_atio(union ccb *); 534 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *); 535 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *); 536 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *); 537 static int isp_handle_platform_ctio(struct ispsoftc *, void *); 538 static int isp_handle_platform_notify_scsi(struct ispsoftc *, in_entry_t *); 539 static int isp_handle_platform_notify_fc(struct ispsoftc *, in_fcentry_t *); 540 541 static INLINE int 542 is_lun_enabled(struct ispsoftc *isp, int bus, lun_id_t lun) 543 { 544 tstate_t *tptr; 545 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 546 if (tptr == NULL) { 547 return (0); 548 } 549 do { 550 if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) { 551 return (1); 552 } 553 } while ((tptr = tptr->next) != NULL); 554 return (0); 555 } 556 557 static INLINE int 558 are_any_luns_enabled(struct ispsoftc *isp, int port) 559 { 560 int lo, hi; 561 if (IS_DUALBUS(isp)) { 562 lo = (port * (LUN_HASH_SIZE >> 1)); 563 hi = lo + (LUN_HASH_SIZE >> 1); 564 } else { 565 lo = 0; 566 hi = LUN_HASH_SIZE; 567 } 568 for (lo = 0; lo < hi; lo++) { 569 if (isp->isp_osinfo.lun_hash[lo]) { 570 return (1); 571 } 572 } 573 return (0); 574 } 575 576 static INLINE tstate_t * 577 get_lun_statep(struct ispsoftc *isp, int bus, lun_id_t lun) 578 { 579 tstate_t *tptr = NULL; 580 581 if (lun == CAM_LUN_WILDCARD) { 582 if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) { 583 tptr = &isp->isp_osinfo.tsdflt[bus]; 584 tptr->hold++; 585 return (tptr); 586 } 587 } else { 588 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 589 if (tptr == NULL) { 590 return (NULL); 591 } 592 } 593 594 do { 595 if (tptr->lun == lun && tptr->bus == bus) { 596 tptr->hold++; 597 return (tptr); 598 } 599 } while ((tptr = tptr->next) != NULL); 600 return (tptr); 601 } 602 603 static INLINE void 604 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr) 605 { 606 if (tptr->hold) 607 tptr->hold--; 608 } 609 610 static INLINE int 611 isp_psema_sig_rqe(struct ispsoftc *isp, int bus) 612 { 613 while (isp->isp_osinfo.tmflags[bus] & TM_BUSY) { 614 isp->isp_osinfo.tmflags[bus] |= TM_WANTED; 615 #ifdef ISP_SMPLOCK 616 if (cv_wait_sig(&isp->isp_osinfo.tgtcv0[bus], &isp->isp_lock)) { 617 return (-1); 618 } 619 #else 620 if (tsleep(&isp->isp_osinfo.tgtcv0[bus], PZERO, "cv_isp", 0)) { 621 return (-1); 622 } 623 #endif 624 isp->isp_osinfo.tmflags[bus] |= TM_BUSY; 625 } 626 return (0); 627 } 628 629 static INLINE int 630 isp_cv_wait_timed_rqe(struct ispsoftc *isp, int bus, int timo) 631 { 632 #ifdef ISP_SMPLOCK 633 if (cv_timedwait(&isp->isp_osinfo.tgtcv1[bus], &isp->isp_lock, timo)) { 634 return (-1); 635 } 636 #else 637 if (tsleep(&isp->isp_osinfo.tgtcv1[bus], PZERO, "cv_isp1", 0)) { 638 return (-1); 639 } 640 #endif 641 return (0); 642 } 643 644 static INLINE void 645 isp_cv_signal_rqe(struct ispsoftc *isp, int bus, int status) 646 { 647 isp->isp_osinfo.rstatus[bus] = status; 648 #ifdef ISP_SMPLOCK 649 cv_signal(&isp->isp_osinfo.tgtcv1[bus]); 650 #else 651 wakeup(&isp->isp_osinfo.tgtcv1[bus]); 652 #endif 653 } 654 655 static INLINE void 656 isp_vsema_rqe(struct ispsoftc *isp, int bus) 657 { 658 if (isp->isp_osinfo.tmflags[bus] & TM_WANTED) { 659 isp->isp_osinfo.tmflags[bus] &= ~TM_WANTED; 660 #ifdef ISP_SMPLOCK 661 cv_signal(&isp->isp_osinfo.tgtcv0[bus]); 662 #else 663 cv_signal(&isp->isp_osinfo.tgtcv0[bus]); 664 #endif 665 } 666 isp->isp_osinfo.tmflags[bus] &= ~TM_BUSY; 667 } 668 669 static INLINE atio_private_data_t * 670 isp_get_atpd(struct ispsoftc *isp, int tag) 671 { 672 atio_private_data_t *atp; 673 for (atp = isp->isp_osinfo.atpdp; 674 atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) { 675 if (atp->tag == tag) 676 return (atp); 677 } 678 return (NULL); 679 } 680 681 static cam_status 682 create_lun_state(struct ispsoftc *isp, int bus, 683 struct cam_path *path, tstate_t **rslt) 684 { 685 cam_status status; 686 lun_id_t lun; 687 int hfx; 688 tstate_t *tptr, *new; 689 690 lun = xpt_path_lun_id(path); 691 if (lun < 0) { 692 return (CAM_LUN_INVALID); 693 } 694 if (is_lun_enabled(isp, bus, lun)) { 695 return (CAM_LUN_ALRDY_ENA); 696 } 697 new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO); 698 if (new == NULL) { 699 return (CAM_RESRC_UNAVAIL); 700 } 701 702 status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path), 703 xpt_path_target_id(path), xpt_path_lun_id(path)); 704 if (status != CAM_REQ_CMP) { 705 free(new, M_DEVBUF); 706 return (status); 707 } 708 new->bus = bus; 709 new->lun = lun; 710 SLIST_INIT(&new->atios); 711 SLIST_INIT(&new->inots); 712 new->hold = 1; 713 714 hfx = LUN_HASH_FUNC(isp, new->bus, new->lun); 715 tptr = isp->isp_osinfo.lun_hash[hfx]; 716 if (tptr == NULL) { 717 isp->isp_osinfo.lun_hash[hfx] = new; 718 } else { 719 while (tptr->next) 720 tptr = tptr->next; 721 tptr->next = new; 722 } 723 *rslt = new; 724 return (CAM_REQ_CMP); 725 } 726 727 static INLINE void 728 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr) 729 { 730 int hfx; 731 tstate_t *lw, *pw; 732 733 hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun); 734 if (tptr->hold) { 735 return; 736 } 737 pw = isp->isp_osinfo.lun_hash[hfx]; 738 if (pw == NULL) { 739 return; 740 } else if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 741 isp->isp_osinfo.lun_hash[hfx] = pw->next; 742 } else { 743 lw = pw; 744 pw = lw->next; 745 while (pw) { 746 if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 747 lw->next = pw->next; 748 break; 749 } 750 lw = pw; 751 pw = pw->next; 752 } 753 if (pw == NULL) { 754 return; 755 } 756 } 757 free(tptr, M_DEVBUF); 758 } 759 760 /* 761 * we enter with our locks held. 762 */ 763 static void 764 isp_en_lun(struct ispsoftc *isp, union ccb *ccb) 765 { 766 const char lfmt[] = "Lun now %sabled for target mode on channel %d"; 767 struct ccb_en_lun *cel = &ccb->cel; 768 tstate_t *tptr; 769 u_int16_t rstat; 770 int bus, cmd, av, wildcard; 771 lun_id_t lun; 772 target_id_t tgt; 773 774 775 bus = XS_CHANNEL(ccb) & 0x1; 776 tgt = ccb->ccb_h.target_id; 777 lun = ccb->ccb_h.target_lun; 778 779 /* 780 * Do some sanity checking first. 781 */ 782 783 if ((lun != CAM_LUN_WILDCARD) && 784 (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) { 785 ccb->ccb_h.status = CAM_LUN_INVALID; 786 return; 787 } 788 789 if (IS_SCSI(isp)) { 790 sdparam *sdp = isp->isp_param; 791 sdp += bus; 792 if (tgt != CAM_TARGET_WILDCARD && 793 tgt != sdp->isp_initiator_id) { 794 ccb->ccb_h.status = CAM_TID_INVALID; 795 return; 796 } 797 } else { 798 if (tgt != CAM_TARGET_WILDCARD && 799 tgt != FCPARAM(isp)->isp_iid) { 800 ccb->ccb_h.status = CAM_TID_INVALID; 801 return; 802 } 803 /* 804 * This is as a good a place as any to check f/w capabilities. 805 */ 806 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_TMODE) == 0) { 807 isp_prt(isp, ISP_LOGERR, 808 "firmware does not support target mode"); 809 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 810 return; 811 } 812 /* 813 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to 814 * XXX: dorks with our already fragile enable/disable code. 815 */ 816 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) { 817 isp_prt(isp, ISP_LOGERR, 818 "firmware not SCCLUN capable"); 819 } 820 } 821 822 if (tgt == CAM_TARGET_WILDCARD) { 823 if (lun == CAM_LUN_WILDCARD) { 824 wildcard = 1; 825 } else { 826 ccb->ccb_h.status = CAM_LUN_INVALID; 827 return; 828 } 829 } else { 830 wildcard = 0; 831 } 832 833 /* 834 * Next check to see whether this is a target/lun wildcard action. 835 * 836 * If so, we know that we can accept commands for luns that haven't 837 * been enabled yet and send them upstream. Otherwise, we have to 838 * handle them locally (if we see them at all). 839 */ 840 841 if (wildcard) { 842 tptr = &isp->isp_osinfo.tsdflt[bus]; 843 if (cel->enable) { 844 if (isp->isp_osinfo.tmflags[bus] & 845 TM_WILDCARD_ENABLED) { 846 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 847 return; 848 } 849 ccb->ccb_h.status = 850 xpt_create_path(&tptr->owner, NULL, 851 xpt_path_path_id(ccb->ccb_h.path), 852 xpt_path_target_id(ccb->ccb_h.path), 853 xpt_path_lun_id(ccb->ccb_h.path)); 854 if (ccb->ccb_h.status != CAM_REQ_CMP) { 855 return; 856 } 857 SLIST_INIT(&tptr->atios); 858 SLIST_INIT(&tptr->inots); 859 isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED; 860 } else { 861 if ((isp->isp_osinfo.tmflags[bus] & 862 TM_WILDCARD_ENABLED) == 0) { 863 ccb->ccb_h.status = CAM_REQ_CMP; 864 return; 865 } 866 if (tptr->hold) { 867 ccb->ccb_h.status = CAM_SCSI_BUSY; 868 return; 869 } 870 xpt_free_path(tptr->owner); 871 isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED; 872 } 873 } 874 875 /* 876 * Now check to see whether this bus needs to be 877 * enabled/disabled with respect to target mode. 878 */ 879 av = bus << 31; 880 if (cel->enable && !(isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED)) { 881 av |= ENABLE_TARGET_FLAG; 882 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 883 if (av) { 884 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 885 if (wildcard) { 886 isp->isp_osinfo.tmflags[bus] &= 887 ~TM_WILDCARD_ENABLED; 888 xpt_free_path(tptr->owner); 889 } 890 return; 891 } 892 isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED; 893 isp_prt(isp, ISP_LOGINFO, 894 "Target Mode enabled on channel %d", bus); 895 } else if (cel->enable == 0 && 896 (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) && wildcard) { 897 if (are_any_luns_enabled(isp, bus)) { 898 ccb->ccb_h.status = CAM_SCSI_BUSY; 899 return; 900 } 901 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 902 if (av) { 903 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 904 return; 905 } 906 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; 907 isp_prt(isp, ISP_LOGINFO, 908 "Target Mode disabled on channel %d", bus); 909 } 910 911 if (wildcard) { 912 ccb->ccb_h.status = CAM_REQ_CMP; 913 return; 914 } 915 916 if (cel->enable) { 917 ccb->ccb_h.status = 918 create_lun_state(isp, bus, ccb->ccb_h.path, &tptr); 919 if (ccb->ccb_h.status != CAM_REQ_CMP) { 920 return; 921 } 922 } else { 923 tptr = get_lun_statep(isp, bus, lun); 924 if (tptr == NULL) { 925 ccb->ccb_h.status = CAM_LUN_INVALID; 926 return; 927 } 928 } 929 930 if (isp_psema_sig_rqe(isp, bus)) { 931 rls_lun_statep(isp, tptr); 932 if (cel->enable) 933 destroy_lun_state(isp, tptr); 934 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 935 return; 936 } 937 938 if (cel->enable) { 939 u_int32_t seq = isp->isp_osinfo.rollinfo++; 940 int c, n, ulun = lun; 941 942 cmd = RQSTYPE_ENABLE_LUN; 943 c = DFLT_CMND_CNT; 944 n = DFLT_INOT_CNT; 945 if (IS_FC(isp) && lun != 0) { 946 cmd = RQSTYPE_MODIFY_LUN; 947 n = 0; 948 /* 949 * For SCC firmware, we only deal with setting 950 * (enabling or modifying) lun 0. 951 */ 952 ulun = 0; 953 } 954 rstat = LUN_ERR; 955 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) { 956 xpt_print_path(ccb->ccb_h.path); 957 isp_prt(isp, ISP_LOGWARN, "isp_lun_cmd failed"); 958 goto out; 959 } 960 if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) { 961 xpt_print_path(ccb->ccb_h.path); 962 isp_prt(isp, ISP_LOGERR, 963 "wait for ENABLE/MODIFY LUN timed out"); 964 goto out; 965 } 966 rstat = isp->isp_osinfo.rstatus[bus]; 967 if (rstat != LUN_OK) { 968 xpt_print_path(ccb->ccb_h.path); 969 isp_prt(isp, ISP_LOGERR, 970 "ENABLE/MODIFY LUN returned 0x%x", rstat); 971 goto out; 972 } 973 } else { 974 int c, n, ulun = lun; 975 u_int32_t seq; 976 977 rstat = LUN_ERR; 978 seq = isp->isp_osinfo.rollinfo++; 979 cmd = -RQSTYPE_MODIFY_LUN; 980 981 c = DFLT_CMND_CNT; 982 n = DFLT_INOT_CNT; 983 if (IS_FC(isp) && lun != 0) { 984 n = 0; 985 /* 986 * For SCC firmware, we only deal with setting 987 * (enabling or modifying) lun 0. 988 */ 989 ulun = 0; 990 } 991 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) { 992 xpt_print_path(ccb->ccb_h.path); 993 isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed"); 994 goto out; 995 } 996 if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) { 997 xpt_print_path(ccb->ccb_h.path); 998 isp_prt(isp, ISP_LOGERR, 999 "wait for MODIFY LUN timed out"); 1000 goto out; 1001 } 1002 rstat = isp->isp_osinfo.rstatus[bus]; 1003 if (rstat != LUN_OK) { 1004 xpt_print_path(ccb->ccb_h.path); 1005 isp_prt(isp, ISP_LOGERR, 1006 "MODIFY LUN returned 0x%x", rstat); 1007 goto out; 1008 } 1009 if (IS_FC(isp) && lun) { 1010 goto out; 1011 } 1012 1013 seq = isp->isp_osinfo.rollinfo++; 1014 1015 rstat = LUN_ERR; 1016 cmd = -RQSTYPE_ENABLE_LUN; 1017 if (isp_lun_cmd(isp, cmd, bus, tgt, lun, 0, 0, seq)) { 1018 xpt_print_path(ccb->ccb_h.path); 1019 isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed"); 1020 goto out; 1021 } 1022 if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) { 1023 xpt_print_path(ccb->ccb_h.path); 1024 isp_prt(isp, ISP_LOGERR, 1025 "wait for DISABLE LUN timed out"); 1026 goto out; 1027 } 1028 rstat = isp->isp_osinfo.rstatus[bus]; 1029 if (rstat != LUN_OK) { 1030 xpt_print_path(ccb->ccb_h.path); 1031 isp_prt(isp, ISP_LOGWARN, 1032 "DISABLE LUN returned 0x%x", rstat); 1033 goto out; 1034 } 1035 if (are_any_luns_enabled(isp, bus) == 0) { 1036 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 1037 if (av) { 1038 isp_prt(isp, ISP_LOGWARN, 1039 "disable target mode on channel %d failed", 1040 bus); 1041 goto out; 1042 } 1043 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; 1044 xpt_print_path(ccb->ccb_h.path); 1045 isp_prt(isp, ISP_LOGINFO, 1046 "Target Mode disabled on channel %d", bus); 1047 } 1048 } 1049 1050 out: 1051 isp_vsema_rqe(isp, bus); 1052 1053 if (rstat != LUN_OK) { 1054 xpt_print_path(ccb->ccb_h.path); 1055 isp_prt(isp, ISP_LOGWARN, 1056 "lun %sable failed", (cel->enable) ? "en" : "dis"); 1057 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1058 rls_lun_statep(isp, tptr); 1059 if (cel->enable) 1060 destroy_lun_state(isp, tptr); 1061 } else { 1062 xpt_print_path(ccb->ccb_h.path); 1063 isp_prt(isp, ISP_LOGINFO, lfmt, 1064 (cel->enable) ? "en" : "dis", bus); 1065 rls_lun_statep(isp, tptr); 1066 if (cel->enable == 0) { 1067 destroy_lun_state(isp, tptr); 1068 } 1069 ccb->ccb_h.status = CAM_REQ_CMP; 1070 } 1071 } 1072 1073 static cam_status 1074 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb) 1075 { 1076 tstate_t *tptr; 1077 struct ccb_hdr_slist *lp; 1078 struct ccb_hdr *curelm; 1079 int found; 1080 union ccb *accb = ccb->cab.abort_ccb; 1081 1082 if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 1083 if (IS_FC(isp) && (accb->ccb_h.target_id != 1084 ((fcparam *) isp->isp_param)->isp_loopid)) { 1085 return (CAM_PATH_INVALID); 1086 } else if (IS_SCSI(isp) && (accb->ccb_h.target_id != 1087 ((sdparam *) isp->isp_param)->isp_initiator_id)) { 1088 return (CAM_PATH_INVALID); 1089 } 1090 } 1091 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun); 1092 if (tptr == NULL) { 1093 return (CAM_PATH_INVALID); 1094 } 1095 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 1096 lp = &tptr->atios; 1097 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 1098 lp = &tptr->inots; 1099 } else { 1100 rls_lun_statep(isp, tptr); 1101 return (CAM_UA_ABORT); 1102 } 1103 curelm = SLIST_FIRST(lp); 1104 found = 0; 1105 if (curelm == &accb->ccb_h) { 1106 found = 1; 1107 SLIST_REMOVE_HEAD(lp, sim_links.sle); 1108 } else { 1109 while(curelm != NULL) { 1110 struct ccb_hdr *nextelm; 1111 1112 nextelm = SLIST_NEXT(curelm, sim_links.sle); 1113 if (nextelm == &accb->ccb_h) { 1114 found = 1; 1115 SLIST_NEXT(curelm, sim_links.sle) = 1116 SLIST_NEXT(nextelm, sim_links.sle); 1117 break; 1118 } 1119 curelm = nextelm; 1120 } 1121 } 1122 rls_lun_statep(isp, tptr); 1123 if (found) { 1124 accb->ccb_h.status = CAM_REQ_ABORTED; 1125 return (CAM_REQ_CMP); 1126 } 1127 return(CAM_PATH_INVALID); 1128 } 1129 1130 static cam_status 1131 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb) 1132 { 1133 void *qe; 1134 struct ccb_scsiio *cso = &ccb->csio; 1135 u_int16_t *hp, save_handle; 1136 u_int16_t nxti, optr; 1137 u_int8_t local[QENTRY_LEN]; 1138 1139 1140 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 1141 xpt_print_path(ccb->ccb_h.path); 1142 printf("Request Queue Overflow in isp_target_start_ctio\n"); 1143 return (CAM_RESRC_UNAVAIL); 1144 } 1145 bzero(local, QENTRY_LEN); 1146 1147 /* 1148 * We're either moving data or completing a command here. 1149 */ 1150 1151 if (IS_FC(isp)) { 1152 atio_private_data_t *atp; 1153 ct2_entry_t *cto = (ct2_entry_t *) local; 1154 1155 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; 1156 cto->ct_header.rqs_entry_count = 1; 1157 cto->ct_iid = cso->init_id; 1158 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) { 1159 cto->ct_lun = ccb->ccb_h.target_lun; 1160 } 1161 1162 atp = isp_get_atpd(isp, cso->tag_id); 1163 if (atp == NULL) { 1164 isp_prt(isp, ISP_LOGERR, 1165 "cannot find private data adjunct for tag %x", 1166 cso->tag_id); 1167 return (-1); 1168 } 1169 1170 cto->ct_rxid = cso->tag_id; 1171 if (cso->dxfer_len == 0) { 1172 cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA; 1173 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1174 cto->ct_flags |= CT2_SENDSTATUS; 1175 cto->rsp.m1.ct_scsi_status = cso->scsi_status; 1176 cto->ct_resid = 1177 atp->orig_datalen - atp->bytes_xfered; 1178 if (cto->ct_resid < 0) { 1179 cto->rsp.m1.ct_scsi_status |= 1180 CT2_DATA_OVER; 1181 } else if (cto->ct_resid > 0) { 1182 cto->rsp.m1.ct_scsi_status |= 1183 CT2_DATA_UNDER; 1184 } 1185 } 1186 if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) { 1187 int m = min(cso->sense_len, MAXRESPLEN); 1188 bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m); 1189 cto->rsp.m1.ct_senselen = m; 1190 cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID; 1191 } 1192 } else { 1193 cto->ct_flags |= CT2_FLAG_MODE0; 1194 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1195 cto->ct_flags |= CT2_DATA_IN; 1196 } else { 1197 cto->ct_flags |= CT2_DATA_OUT; 1198 } 1199 cto->ct_reloff = atp->bytes_xfered; 1200 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 1201 cto->ct_flags |= CT2_SENDSTATUS; 1202 cto->rsp.m0.ct_scsi_status = cso->scsi_status; 1203 cto->ct_resid = 1204 atp->orig_datalen - 1205 (atp->bytes_xfered + cso->dxfer_len); 1206 if (cto->ct_resid < 0) { 1207 cto->rsp.m0.ct_scsi_status |= 1208 CT2_DATA_OVER; 1209 } else if (cto->ct_resid > 0) { 1210 cto->rsp.m0.ct_scsi_status |= 1211 CT2_DATA_UNDER; 1212 } 1213 } else { 1214 atp->last_xframt = cso->dxfer_len; 1215 } 1216 /* 1217 * If we're sending data and status back together, 1218 * we can't also send back sense data as well. 1219 */ 1220 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1221 } 1222 1223 if (cto->ct_flags & CT2_SENDSTATUS) { 1224 isp_prt(isp, ISP_LOGTDEBUG0, 1225 "CTIO2[%x] STATUS %x origd %u curd %u resid %u", 1226 cto->ct_rxid, cso->scsi_status, atp->orig_datalen, 1227 cso->dxfer_len, cto->ct_resid); 1228 cto->ct_flags |= CT2_CCINCR; 1229 atp->state = ATPD_STATE_LAST_CTIO; 1230 } else 1231 atp->state = ATPD_STATE_CTIO; 1232 cto->ct_timeout = 10; 1233 hp = &cto->ct_syshandle; 1234 } else { 1235 ct_entry_t *cto = (ct_entry_t *) local; 1236 1237 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1238 cto->ct_header.rqs_entry_count = 1; 1239 cto->ct_iid = cso->init_id; 1240 cto->ct_iid |= XS_CHANNEL(ccb) << 7; 1241 cto->ct_tgt = ccb->ccb_h.target_id; 1242 cto->ct_lun = ccb->ccb_h.target_lun; 1243 cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id); 1244 if (AT_HAS_TAG(cso->tag_id)) { 1245 cto->ct_tag_val = (u_int8_t) AT_GET_TAG(cso->tag_id); 1246 cto->ct_flags |= CT_TQAE; 1247 } 1248 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 1249 cto->ct_flags |= CT_NODISC; 1250 } 1251 if (cso->dxfer_len == 0) { 1252 cto->ct_flags |= CT_NO_DATA; 1253 } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1254 cto->ct_flags |= CT_DATA_IN; 1255 } else { 1256 cto->ct_flags |= CT_DATA_OUT; 1257 } 1258 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1259 cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR; 1260 cto->ct_scsi_status = cso->scsi_status; 1261 cto->ct_resid = cso->resid; 1262 isp_prt(isp, ISP_LOGTDEBUG0, 1263 "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x", 1264 cto->ct_fwhandle, cso->scsi_status, cso->resid, 1265 cso->tag_id); 1266 } 1267 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1268 cto->ct_timeout = 10; 1269 hp = &cto->ct_syshandle; 1270 } 1271 1272 if (isp_save_xs(isp, (XS_T *)ccb, hp)) { 1273 xpt_print_path(ccb->ccb_h.path); 1274 printf("No XFLIST pointers for isp_target_start_ctio\n"); 1275 return (CAM_RESRC_UNAVAIL); 1276 } 1277 1278 1279 /* 1280 * Call the dma setup routines for this entry (and any subsequent 1281 * CTIOs) if there's data to move, and then tell the f/w it's got 1282 * new things to play with. As with isp_start's usage of DMA setup, 1283 * any swizzling is done in the machine dependent layer. Because 1284 * of this, we put the request onto the queue area first in native 1285 * format. 1286 */ 1287 1288 save_handle = *hp; 1289 1290 switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) { 1291 case CMD_QUEUED: 1292 ISP_ADD_REQUEST(isp, nxti); 1293 return (CAM_REQ_INPROG); 1294 1295 case CMD_EAGAIN: 1296 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1297 isp_destroy_handle(isp, save_handle); 1298 return (CAM_RESRC_UNAVAIL); 1299 1300 default: 1301 isp_destroy_handle(isp, save_handle); 1302 return (XS_ERR(ccb)); 1303 } 1304 } 1305 1306 static void 1307 isp_refire_putback_atio(void *arg) 1308 { 1309 int s = splcam(); 1310 isp_target_putback_atio(arg); 1311 splx(s); 1312 } 1313 1314 static void 1315 isp_target_putback_atio(union ccb *ccb) 1316 { 1317 struct ispsoftc *isp; 1318 struct ccb_scsiio *cso; 1319 u_int16_t nxti, optr; 1320 void *qe; 1321 1322 isp = XS_ISP(ccb); 1323 1324 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 1325 (void) timeout(isp_refire_putback_atio, ccb, 10); 1326 isp_prt(isp, ISP_LOGWARN, 1327 "isp_target_putback_atio: Request Queue Overflow"); 1328 return; 1329 } 1330 bzero(qe, QENTRY_LEN); 1331 cso = &ccb->csio; 1332 if (IS_FC(isp)) { 1333 at2_entry_t local, *at = &local; 1334 MEMZERO(at, sizeof (at2_entry_t)); 1335 at->at_header.rqs_entry_type = RQSTYPE_ATIO2; 1336 at->at_header.rqs_entry_count = 1; 1337 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) { 1338 at->at_scclun = (uint16_t) ccb->ccb_h.target_lun; 1339 } else { 1340 at->at_lun = (uint8_t) ccb->ccb_h.target_lun; 1341 } 1342 at->at_status = CT_OK; 1343 at->at_rxid = cso->tag_id; 1344 at->at_iid = cso->ccb_h.target_id; 1345 isp_put_atio2(isp, at, qe); 1346 } else { 1347 at_entry_t local, *at = &local; 1348 MEMZERO(at, sizeof (at_entry_t)); 1349 at->at_header.rqs_entry_type = RQSTYPE_ATIO; 1350 at->at_header.rqs_entry_count = 1; 1351 at->at_iid = cso->init_id; 1352 at->at_iid |= XS_CHANNEL(ccb) << 7; 1353 at->at_tgt = cso->ccb_h.target_id; 1354 at->at_lun = cso->ccb_h.target_lun; 1355 at->at_status = CT_OK; 1356 at->at_tag_val = AT_GET_TAG(cso->tag_id); 1357 at->at_handle = AT_GET_HANDLE(cso->tag_id); 1358 isp_put_atio(isp, at, qe); 1359 } 1360 ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe); 1361 ISP_ADD_REQUEST(isp, nxti); 1362 isp_complete_ctio(ccb); 1363 } 1364 1365 static void 1366 isp_complete_ctio(union ccb *ccb) 1367 { 1368 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1369 ccb->ccb_h.status |= CAM_REQ_CMP; 1370 } 1371 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1372 xpt_done(ccb); 1373 } 1374 1375 /* 1376 * Handle ATIO stuff that the generic code can't. 1377 * This means handling CDBs. 1378 */ 1379 1380 static int 1381 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep) 1382 { 1383 tstate_t *tptr; 1384 int status, bus, iswildcard; 1385 struct ccb_accept_tio *atiop; 1386 1387 /* 1388 * The firmware status (except for the QLTM_SVALID bit) 1389 * indicates why this ATIO was sent to us. 1390 * 1391 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1392 * 1393 * If the DISCONNECTS DISABLED bit is set in the flags field, 1394 * we're still connected on the SCSI bus. 1395 */ 1396 status = aep->at_status; 1397 if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) { 1398 /* 1399 * Bus Phase Sequence error. We should have sense data 1400 * suggested by the f/w. I'm not sure quite yet what 1401 * to do about this for CAM. 1402 */ 1403 isp_prt(isp, ISP_LOGWARN, "PHASE ERROR"); 1404 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1405 return (0); 1406 } 1407 if ((status & ~QLTM_SVALID) != AT_CDB) { 1408 isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform", 1409 status); 1410 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1411 return (0); 1412 } 1413 1414 bus = GET_BUS_VAL(aep->at_iid); 1415 tptr = get_lun_statep(isp, bus, aep->at_lun); 1416 if (tptr == NULL) { 1417 tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD); 1418 iswildcard = 1; 1419 } else { 1420 iswildcard = 0; 1421 } 1422 1423 if (tptr == NULL) { 1424 /* 1425 * Because we can't autofeed sense data back with 1426 * a command for parallel SCSI, we can't give back 1427 * a CHECK CONDITION. We'll give back a BUSY status 1428 * instead. This works out okay because the only 1429 * time we should, in fact, get this, is in the 1430 * case that somebody configured us without the 1431 * blackhole driver, so they get what they deserve. 1432 */ 1433 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1434 return (0); 1435 } 1436 1437 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1438 if (atiop == NULL) { 1439 /* 1440 * Because we can't autofeed sense data back with 1441 * a command for parallel SCSI, we can't give back 1442 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1443 * instead. This works out okay because the only time we 1444 * should, in fact, get this, is in the case that we've 1445 * run out of ATIOS. 1446 */ 1447 xpt_print_path(tptr->owner); 1448 isp_prt(isp, ISP_LOGWARN, 1449 "no ATIOS for lun %d from initiator %d on channel %d", 1450 aep->at_lun, GET_IID_VAL(aep->at_iid), bus); 1451 if (aep->at_flags & AT_TQAE) 1452 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1453 else 1454 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1455 rls_lun_statep(isp, tptr); 1456 return (0); 1457 } 1458 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1459 if (iswildcard) { 1460 atiop->ccb_h.target_id = aep->at_tgt; 1461 atiop->ccb_h.target_lun = aep->at_lun; 1462 } 1463 if (aep->at_flags & AT_NODISC) { 1464 atiop->ccb_h.flags = CAM_DIS_DISCONNECT; 1465 } else { 1466 atiop->ccb_h.flags = 0; 1467 } 1468 1469 if (status & QLTM_SVALID) { 1470 size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data)); 1471 atiop->sense_len = amt; 1472 MEMCPY(&atiop->sense_data, aep->at_sense, amt); 1473 } else { 1474 atiop->sense_len = 0; 1475 } 1476 1477 atiop->init_id = GET_IID_VAL(aep->at_iid); 1478 atiop->cdb_len = aep->at_cdblen; 1479 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen); 1480 atiop->ccb_h.status = CAM_CDB_RECVD; 1481 /* 1482 * Construct a tag 'id' based upon tag value (which may be 0..255) 1483 * and the handle (which we have to preserve). 1484 */ 1485 AT_MAKE_TAGID(atiop->tag_id, aep); 1486 if (aep->at_flags & AT_TQAE) { 1487 atiop->tag_action = aep->at_tag_type; 1488 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; 1489 } 1490 xpt_done((union ccb*)atiop); 1491 isp_prt(isp, ISP_LOGTDEBUG0, 1492 "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s", 1493 aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid), 1494 GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff, 1495 aep->at_tag_type, (aep->at_flags & AT_NODISC)? 1496 "nondisc" : "disconnecting"); 1497 rls_lun_statep(isp, tptr); 1498 return (0); 1499 } 1500 1501 static int 1502 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep) 1503 { 1504 lun_id_t lun; 1505 tstate_t *tptr; 1506 struct ccb_accept_tio *atiop; 1507 atio_private_data_t *atp; 1508 1509 /* 1510 * The firmware status (except for the QLTM_SVALID bit) 1511 * indicates why this ATIO was sent to us. 1512 * 1513 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1514 */ 1515 if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) { 1516 isp_prt(isp, ISP_LOGWARN, 1517 "bogus atio (0x%x) leaked to platform", aep->at_status); 1518 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1519 return (0); 1520 } 1521 1522 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) { 1523 lun = aep->at_scclun; 1524 } else { 1525 lun = aep->at_lun; 1526 } 1527 tptr = get_lun_statep(isp, 0, lun); 1528 if (tptr == NULL) { 1529 isp_prt(isp, ISP_LOGWARN, "no state pointer for lun %d", lun); 1530 tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD); 1531 } 1532 1533 if (tptr == NULL) { 1534 /* 1535 * What we'd like to know is whether or not we have a listener 1536 * upstream that really hasn't configured yet. If we do, then 1537 * we can give a more sensible reply here. If not, then we can 1538 * reject this out of hand. 1539 * 1540 * Choices for what to send were 1541 * 1542 * Not Ready, Unit Not Self-Configured Yet 1543 * (0x2,0x3e,0x00) 1544 * 1545 * for the former and 1546 * 1547 * Illegal Request, Logical Unit Not Supported 1548 * (0x5,0x25,0x00) 1549 * 1550 * for the latter. 1551 * 1552 * We used to decide whether there was at least one listener 1553 * based upon whether the black hole driver was configured. 1554 * However, recent config(8) changes have made this hard to do 1555 * at this time. 1556 * 1557 */ 1558 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1559 return (0); 1560 } 1561 1562 atp = isp_get_atpd(isp, 0); 1563 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1564 if (atiop == NULL || atp == NULL) { 1565 /* 1566 * Because we can't autofeed sense data back with 1567 * a command for parallel SCSI, we can't give back 1568 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1569 * instead. This works out okay because the only time we 1570 * should, in fact, get this, is in the case that we've 1571 * run out of ATIOS. 1572 */ 1573 xpt_print_path(tptr->owner); 1574 isp_prt(isp, ISP_LOGWARN, 1575 "no %s for lun %d from initiator %d", 1576 (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" : 1577 ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid); 1578 rls_lun_statep(isp, tptr); 1579 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1580 return (0); 1581 } 1582 atp->state = ATPD_STATE_ATIO; 1583 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1584 tptr->atio_count--; 1585 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO2 lun %d, count now %d", 1586 lun, tptr->atio_count); 1587 1588 if (tptr == &isp->isp_osinfo.tsdflt[0]) { 1589 atiop->ccb_h.target_id = 1590 ((fcparam *)isp->isp_param)->isp_loopid; 1591 atiop->ccb_h.target_lun = lun; 1592 } 1593 /* 1594 * We don't get 'suggested' sense data as we do with SCSI cards. 1595 */ 1596 atiop->sense_len = 0; 1597 1598 atiop->init_id = aep->at_iid; 1599 atiop->cdb_len = ATIO2_CDBLEN; 1600 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN); 1601 atiop->ccb_h.status = CAM_CDB_RECVD; 1602 atiop->tag_id = aep->at_rxid; 1603 switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) { 1604 case ATIO2_TC_ATTR_SIMPLEQ: 1605 atiop->tag_action = MSG_SIMPLE_Q_TAG; 1606 break; 1607 case ATIO2_TC_ATTR_HEADOFQ: 1608 atiop->tag_action = MSG_HEAD_OF_Q_TAG; 1609 break; 1610 case ATIO2_TC_ATTR_ORDERED: 1611 atiop->tag_action = MSG_ORDERED_Q_TAG; 1612 break; 1613 case ATIO2_TC_ATTR_ACAQ: /* ?? */ 1614 case ATIO2_TC_ATTR_UNTAGGED: 1615 default: 1616 atiop->tag_action = 0; 1617 break; 1618 } 1619 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; 1620 1621 atp->tag = atiop->tag_id; 1622 atp->lun = lun; 1623 atp->orig_datalen = aep->at_datalen; 1624 atp->last_xframt = 0; 1625 atp->bytes_xfered = 0; 1626 atp->state = ATPD_STATE_CAM; 1627 xpt_done((union ccb*)atiop); 1628 1629 isp_prt(isp, ISP_LOGTDEBUG0, 1630 "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u", 1631 aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid, 1632 lun, aep->at_taskflags, aep->at_datalen); 1633 rls_lun_statep(isp, tptr); 1634 return (0); 1635 } 1636 1637 static int 1638 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg) 1639 { 1640 union ccb *ccb; 1641 int sentstatus, ok, notify_cam, resid = 0; 1642 u_int16_t tval; 1643 1644 /* 1645 * CTIO and CTIO2 are close enough.... 1646 */ 1647 1648 ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_syshandle); 1649 KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio")); 1650 isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_syshandle); 1651 1652 if (IS_FC(isp)) { 1653 ct2_entry_t *ct = arg; 1654 atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid); 1655 if (atp == NULL) { 1656 isp_prt(isp, ISP_LOGERR, 1657 "cannot find adjunct for %x after I/O", 1658 ct->ct_rxid); 1659 return (0); 1660 } 1661 sentstatus = ct->ct_flags & CT2_SENDSTATUS; 1662 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1663 if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) { 1664 ccb->ccb_h.status |= CAM_SENT_SENSE; 1665 } 1666 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1667 if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) { 1668 resid = ct->ct_resid; 1669 atp->bytes_xfered += (atp->last_xframt - resid); 1670 atp->last_xframt = 0; 1671 } 1672 if (sentstatus || !ok) { 1673 atp->tag = 0; 1674 } 1675 isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, 1676 "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s", 1677 ct->ct_rxid, ct->ct_status, ct->ct_flags, 1678 (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, 1679 resid, sentstatus? "FIN" : "MID"); 1680 tval = ct->ct_rxid; 1681 1682 /* XXX: should really come after isp_complete_ctio */ 1683 atp->state = ATPD_STATE_PDON; 1684 } else { 1685 ct_entry_t *ct = arg; 1686 sentstatus = ct->ct_flags & CT_SENDSTATUS; 1687 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1688 /* 1689 * We *ought* to be able to get back to the original ATIO 1690 * here, but for some reason this gets lost. It's just as 1691 * well because it's squirrelled away as part of periph 1692 * private data. 1693 * 1694 * We can live without it as long as we continue to use 1695 * the auto-replenish feature for CTIOs. 1696 */ 1697 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1698 if (ct->ct_status & QLTM_SVALID) { 1699 char *sp = (char *)ct; 1700 sp += CTIO_SENSE_OFFSET; 1701 ccb->csio.sense_len = 1702 min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN); 1703 MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len); 1704 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1705 } 1706 if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) { 1707 resid = ct->ct_resid; 1708 } 1709 isp_prt(isp, ISP_LOGTDEBUG0, 1710 "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s", 1711 ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun, 1712 ct->ct_status, ct->ct_flags, resid, 1713 sentstatus? "FIN" : "MID"); 1714 tval = ct->ct_fwhandle; 1715 } 1716 ccb->csio.resid += resid; 1717 1718 /* 1719 * We're here either because intermediate data transfers are done 1720 * and/or the final status CTIO (which may have joined with a 1721 * Data Transfer) is done. 1722 * 1723 * In any case, for this platform, the upper layers figure out 1724 * what to do next, so all we do here is collect status and 1725 * pass information along. Any DMA handles have already been 1726 * freed. 1727 */ 1728 if (notify_cam == 0) { 1729 isp_prt(isp, ISP_LOGTDEBUG0, " INTER CTIO[0x%x] done", tval); 1730 return (0); 1731 } 1732 1733 isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done", 1734 (sentstatus)? " FINAL " : "MIDTERM ", tval); 1735 1736 if (!ok) { 1737 isp_target_putback_atio(ccb); 1738 } else { 1739 isp_complete_ctio(ccb); 1740 1741 } 1742 return (0); 1743 } 1744 1745 static int 1746 isp_handle_platform_notify_scsi(struct ispsoftc *isp, in_entry_t *inp) 1747 { 1748 return (0); /* XXXX */ 1749 } 1750 1751 static int 1752 isp_handle_platform_notify_fc(struct ispsoftc *isp, in_fcentry_t *inp) 1753 { 1754 1755 switch (inp->in_status) { 1756 case IN_PORT_LOGOUT: 1757 isp_prt(isp, ISP_LOGWARN, "port logout of iid %d", 1758 inp->in_iid); 1759 break; 1760 case IN_PORT_CHANGED: 1761 isp_prt(isp, ISP_LOGWARN, "port changed for iid %d", 1762 inp->in_iid); 1763 break; 1764 case IN_GLOBAL_LOGO: 1765 isp_prt(isp, ISP_LOGINFO, "all ports logged out"); 1766 break; 1767 case IN_ABORT_TASK: 1768 { 1769 atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid); 1770 struct ccb_immed_notify *inot = NULL; 1771 1772 if (atp) { 1773 tstate_t *tptr = get_lun_statep(isp, 0, atp->lun); 1774 if (tptr) { 1775 inot = (struct ccb_immed_notify *) 1776 SLIST_FIRST(&tptr->inots); 1777 if (inot) { 1778 SLIST_REMOVE_HEAD(&tptr->inots, 1779 sim_links.sle); 1780 } 1781 } 1782 isp_prt(isp, ISP_LOGWARN, 1783 "abort task RX_ID %x IID %d state %d", 1784 inp->in_seqid, inp->in_iid, atp->state); 1785 } else { 1786 isp_prt(isp, ISP_LOGWARN, 1787 "abort task RX_ID %x from iid %d, state unknown", 1788 inp->in_seqid, inp->in_iid); 1789 } 1790 if (inot) { 1791 inot->initiator_id = inp->in_iid; 1792 inot->sense_len = 0; 1793 inot->message_args[0] = MSG_ABORT_TAG; 1794 inot->message_args[1] = inp->in_seqid & 0xff; 1795 inot->message_args[2] = (inp->in_seqid >> 8) & 0xff; 1796 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 1797 xpt_done((union ccb *)inot); 1798 } 1799 break; 1800 } 1801 default: 1802 break; 1803 } 1804 return (0); 1805 } 1806 #endif 1807 1808 static void 1809 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg) 1810 { 1811 struct cam_sim *sim; 1812 struct ispsoftc *isp; 1813 1814 sim = (struct cam_sim *)cbarg; 1815 isp = (struct ispsoftc *) cam_sim_softc(sim); 1816 switch (code) { 1817 case AC_LOST_DEVICE: 1818 if (IS_SCSI(isp)) { 1819 u_int16_t oflags, nflags; 1820 sdparam *sdp = isp->isp_param; 1821 int tgt; 1822 1823 tgt = xpt_path_target_id(path); 1824 if (tgt >= 0) { 1825 sdp += cam_sim_bus(sim); 1826 ISP_LOCK(isp); 1827 nflags = sdp->isp_devparam[tgt].nvrm_flags; 1828 #ifndef ISP_TARGET_MODE 1829 nflags &= DPARM_SAFE_DFLT; 1830 if (isp->isp_loaded_fw) { 1831 nflags |= DPARM_NARROW | DPARM_ASYNC; 1832 } 1833 #else 1834 nflags = DPARM_DEFAULT; 1835 #endif 1836 oflags = sdp->isp_devparam[tgt].goal_flags; 1837 sdp->isp_devparam[tgt].goal_flags = nflags; 1838 sdp->isp_devparam[tgt].dev_update = 1; 1839 isp->isp_update |= (1 << cam_sim_bus(sim)); 1840 (void) isp_control(isp, 1841 ISPCTL_UPDATE_PARAMS, NULL); 1842 sdp->isp_devparam[tgt].goal_flags = oflags; 1843 ISP_UNLOCK(isp); 1844 } 1845 } 1846 break; 1847 default: 1848 isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code); 1849 break; 1850 } 1851 } 1852 1853 static void 1854 isp_poll(struct cam_sim *sim) 1855 { 1856 struct ispsoftc *isp = cam_sim_softc(sim); 1857 u_int16_t isr, sema, mbox; 1858 1859 ISP_LOCK(isp); 1860 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 1861 isp_intr(isp, isr, sema, mbox); 1862 } 1863 ISP_UNLOCK(isp); 1864 } 1865 1866 1867 static void 1868 isp_watchdog(void *arg) 1869 { 1870 XS_T *xs = arg; 1871 struct ispsoftc *isp = XS_ISP(xs); 1872 u_int32_t handle; 1873 int iok; 1874 1875 /* 1876 * We've decided this command is dead. Make sure we're not trying 1877 * to kill a command that's already dead by getting it's handle and 1878 * and seeing whether it's still alive. 1879 */ 1880 ISP_LOCK(isp); 1881 iok = isp->isp_osinfo.intsok; 1882 isp->isp_osinfo.intsok = 0; 1883 handle = isp_find_handle(isp, xs); 1884 if (handle) { 1885 u_int16_t isr, sema, mbox; 1886 1887 if (XS_CMD_DONE_P(xs)) { 1888 isp_prt(isp, ISP_LOGDEBUG1, 1889 "watchdog found done cmd (handle 0x%x)", handle); 1890 ISP_UNLOCK(isp); 1891 return; 1892 } 1893 1894 if (XS_CMD_WDOG_P(xs)) { 1895 isp_prt(isp, ISP_LOGDEBUG2, 1896 "recursive watchdog (handle 0x%x)", handle); 1897 ISP_UNLOCK(isp); 1898 return; 1899 } 1900 1901 XS_CMD_S_WDOG(xs); 1902 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 1903 isp_intr(isp, isr, sema, mbox); 1904 } 1905 if (XS_CMD_DONE_P(xs)) { 1906 isp_prt(isp, ISP_LOGDEBUG2, 1907 "watchdog cleanup for handle 0x%x", handle); 1908 xpt_done((union ccb *) xs); 1909 } else if (XS_CMD_GRACE_P(xs)) { 1910 /* 1911 * Make sure the command is *really* dead before we 1912 * release the handle (and DMA resources) for reuse. 1913 */ 1914 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg); 1915 1916 /* 1917 * After this point, the comamnd is really dead. 1918 */ 1919 if (XS_XFRLEN(xs)) { 1920 ISP_DMAFREE(isp, xs, handle); 1921 } 1922 isp_destroy_handle(isp, handle); 1923 xpt_print_path(xs->ccb_h.path); 1924 isp_prt(isp, ISP_LOGWARN, 1925 "watchdog timeout for handle 0x%x", handle); 1926 XS_SETERR(xs, CAM_CMD_TIMEOUT); 1927 XS_CMD_C_WDOG(xs); 1928 isp_done(xs); 1929 } else { 1930 u_int16_t nxti, optr; 1931 ispreq_t local, *mp= &local, *qe; 1932 1933 XS_CMD_C_WDOG(xs); 1934 xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz); 1935 if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) { 1936 ISP_UNLOCK(isp); 1937 return; 1938 } 1939 XS_CMD_S_GRACE(xs); 1940 MEMZERO((void *) mp, sizeof (*mp)); 1941 mp->req_header.rqs_entry_count = 1; 1942 mp->req_header.rqs_entry_type = RQSTYPE_MARKER; 1943 mp->req_modifier = SYNC_ALL; 1944 mp->req_target = XS_CHANNEL(xs) << 7; 1945 isp_put_request(isp, mp, qe); 1946 ISP_ADD_REQUEST(isp, nxti); 1947 } 1948 } else { 1949 isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command"); 1950 } 1951 isp->isp_osinfo.intsok = iok; 1952 ISP_UNLOCK(isp); 1953 } 1954 1955 static void 1956 isp_kthread(void *arg) 1957 { 1958 struct ispsoftc *isp = arg; 1959 1960 #ifdef ISP_SMPLOCK 1961 mtx_lock(&isp->isp_lock); 1962 #else 1963 mtx_lock(&Giant); 1964 #endif 1965 /* 1966 * The first loop is for our usage where we have yet to have 1967 * gotten good fibre channel state. 1968 */ 1969 for (;;) { 1970 int wasfrozen; 1971 1972 isp_prt(isp, ISP_LOGDEBUG0, "kthread: checking FC state"); 1973 while (isp_fc_runstate(isp, 2 * 1000000) != 0) { 1974 isp_prt(isp, ISP_LOGDEBUG0, "kthread: FC state ungood"); 1975 if (FCPARAM(isp)->isp_fwstate != FW_READY || 1976 FCPARAM(isp)->isp_loopstate < LOOP_PDB_RCVD) { 1977 if (FCPARAM(isp)->loop_seen_once == 0 || 1978 isp->isp_osinfo.ktmature == 0) { 1979 break; 1980 } 1981 } 1982 #ifdef ISP_SMPLOCK 1983 msleep(isp_kthread, &isp->isp_lock, 1984 PRIBIO, "isp_fcthrd", hz); 1985 #else 1986 (void) tsleep(isp_kthread, PRIBIO, "isp_fcthrd", hz); 1987 #endif 1988 } 1989 1990 /* 1991 * Even if we didn't get good loop state we may be 1992 * unfreezing the SIMQ so that we can kill off 1993 * commands (if we've never seen loop before, for example). 1994 */ 1995 isp->isp_osinfo.ktmature = 1; 1996 wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN; 1997 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN; 1998 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { 1999 isp_prt(isp, ISP_LOGDEBUG0, "kthread: releasing simq"); 2000 ISPLOCK_2_CAMLOCK(isp); 2001 xpt_release_simq(isp->isp_sim, 1); 2002 CAMLOCK_2_ISPLOCK(isp); 2003 } 2004 isp_prt(isp, ISP_LOGDEBUG0, "kthread: waiting until called"); 2005 #ifdef ISP_SMPLOCK 2006 cv_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock); 2007 #else 2008 (void) tsleep(&isp->isp_osinfo.kthread_cv, PRIBIO, "fc_cv", 0); 2009 #endif 2010 } 2011 } 2012 2013 static void 2014 isp_action(struct cam_sim *sim, union ccb *ccb) 2015 { 2016 int bus, tgt, error; 2017 struct ispsoftc *isp; 2018 struct ccb_trans_settings *cts; 2019 2020 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n")); 2021 2022 isp = (struct ispsoftc *)cam_sim_softc(sim); 2023 ccb->ccb_h.sim_priv.entries[0].field = 0; 2024 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 2025 if (isp->isp_state != ISP_RUNSTATE && 2026 ccb->ccb_h.func_code == XPT_SCSI_IO) { 2027 CAMLOCK_2_ISPLOCK(isp); 2028 isp_init(isp); 2029 if (isp->isp_state != ISP_INITSTATE) { 2030 ISP_UNLOCK(isp); 2031 /* 2032 * Lie. Say it was a selection timeout. 2033 */ 2034 ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN; 2035 xpt_freeze_devq(ccb->ccb_h.path, 1); 2036 xpt_done(ccb); 2037 return; 2038 } 2039 isp->isp_state = ISP_RUNSTATE; 2040 ISPLOCK_2_CAMLOCK(isp); 2041 } 2042 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code); 2043 2044 2045 switch (ccb->ccb_h.func_code) { 2046 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 2047 /* 2048 * Do a couple of preliminary checks... 2049 */ 2050 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 2051 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 2052 ccb->ccb_h.status = CAM_REQ_INVALID; 2053 xpt_done(ccb); 2054 break; 2055 } 2056 } 2057 #ifdef DIAGNOSTIC 2058 if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) { 2059 ccb->ccb_h.status = CAM_PATH_INVALID; 2060 } else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) { 2061 ccb->ccb_h.status = CAM_PATH_INVALID; 2062 } 2063 if (ccb->ccb_h.status == CAM_PATH_INVALID) { 2064 isp_prt(isp, ISP_LOGERR, 2065 "invalid tgt/lun (%d.%d) in XPT_SCSI_IO", 2066 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 2067 xpt_done(ccb); 2068 break; 2069 } 2070 #endif 2071 ((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK; 2072 CAMLOCK_2_ISPLOCK(isp); 2073 error = isp_start((XS_T *) ccb); 2074 switch (error) { 2075 case CMD_QUEUED: 2076 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2077 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 2078 u_int64_t ticks = (u_int64_t) hz; 2079 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) 2080 ticks = 60 * 1000 * ticks; 2081 else 2082 ticks = ccb->ccb_h.timeout * hz; 2083 ticks = ((ticks + 999) / 1000) + hz + hz; 2084 if (ticks >= 0x80000000) { 2085 isp_prt(isp, ISP_LOGERR, 2086 "timeout overflow"); 2087 ticks = 0x7fffffff; 2088 } 2089 ccb->ccb_h.timeout_ch = timeout(isp_watchdog, 2090 (caddr_t)ccb, (int)ticks); 2091 } else { 2092 callout_handle_init(&ccb->ccb_h.timeout_ch); 2093 } 2094 ISPLOCK_2_CAMLOCK(isp); 2095 break; 2096 case CMD_RQLATER: 2097 /* 2098 * This can only happen for Fibre Channel 2099 */ 2100 KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only")); 2101 if (FCPARAM(isp)->loop_seen_once == 0 && 2102 isp->isp_osinfo.ktmature) { 2103 ISPLOCK_2_CAMLOCK(isp); 2104 XS_SETERR(ccb, CAM_SEL_TIMEOUT); 2105 xpt_done(ccb); 2106 break; 2107 } 2108 #ifdef ISP_SMPLOCK 2109 cv_signal(&isp->isp_osinfo.kthread_cv); 2110 #else 2111 wakeup(&isp->isp_osinfo.kthread_cv); 2112 #endif 2113 isp_freeze_loopdown(isp, "isp_action(RQLATER)"); 2114 XS_SETERR(ccb, CAM_REQUEUE_REQ); 2115 ISPLOCK_2_CAMLOCK(isp); 2116 xpt_done(ccb); 2117 break; 2118 case CMD_EAGAIN: 2119 XS_SETERR(ccb, CAM_REQUEUE_REQ); 2120 ISPLOCK_2_CAMLOCK(isp); 2121 xpt_done(ccb); 2122 break; 2123 case CMD_COMPLETE: 2124 isp_done((struct ccb_scsiio *) ccb); 2125 ISPLOCK_2_CAMLOCK(isp); 2126 break; 2127 default: 2128 isp_prt(isp, ISP_LOGERR, 2129 "What's this? 0x%x at %d in file %s", 2130 error, __LINE__, __FILE__); 2131 XS_SETERR(ccb, CAM_REQ_CMP_ERR); 2132 xpt_done(ccb); 2133 ISPLOCK_2_CAMLOCK(isp); 2134 } 2135 break; 2136 2137 #ifdef ISP_TARGET_MODE 2138 case XPT_EN_LUN: /* Enable LUN as a target */ 2139 { 2140 int iok; 2141 CAMLOCK_2_ISPLOCK(isp); 2142 iok = isp->isp_osinfo.intsok; 2143 isp->isp_osinfo.intsok = 0; 2144 isp_en_lun(isp, ccb); 2145 isp->isp_osinfo.intsok = iok; 2146 ISPLOCK_2_CAMLOCK(isp); 2147 xpt_done(ccb); 2148 break; 2149 } 2150 case XPT_NOTIFY_ACK: /* recycle notify ack */ 2151 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ 2152 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 2153 { 2154 tstate_t *tptr = 2155 get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun); 2156 if (tptr == NULL) { 2157 ccb->ccb_h.status = CAM_LUN_INVALID; 2158 xpt_done(ccb); 2159 break; 2160 } 2161 ccb->ccb_h.sim_priv.entries[0].field = 0; 2162 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 2163 ccb->ccb_h.flags = 0; 2164 2165 CAMLOCK_2_ISPLOCK(isp); 2166 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 2167 /* 2168 * Note that the command itself may not be done- 2169 * it may not even have had the first CTIO sent. 2170 */ 2171 tptr->atio_count++; 2172 isp_prt(isp, ISP_LOGTDEBUG0, 2173 "Put FREE ATIO2, lun %d, count now %d", 2174 ccb->ccb_h.target_lun, tptr->atio_count); 2175 SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h, 2176 sim_links.sle); 2177 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 2178 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, 2179 sim_links.sle); 2180 } else { 2181 ; 2182 } 2183 rls_lun_statep(isp, tptr); 2184 ccb->ccb_h.status = CAM_REQ_INPROG; 2185 ISPLOCK_2_CAMLOCK(isp); 2186 break; 2187 } 2188 case XPT_CONT_TARGET_IO: 2189 { 2190 CAMLOCK_2_ISPLOCK(isp); 2191 ccb->ccb_h.status = isp_target_start_ctio(isp, ccb); 2192 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 2193 isp_prt(isp, ISP_LOGWARN, 2194 "XPT_CONT_TARGET_IO: status 0x%x", 2195 ccb->ccb_h.status); 2196 XS_SETERR(ccb, CAM_REQUEUE_REQ); 2197 ISPLOCK_2_CAMLOCK(isp); 2198 xpt_done(ccb); 2199 } else { 2200 ISPLOCK_2_CAMLOCK(isp); 2201 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2202 } 2203 break; 2204 } 2205 #endif 2206 case XPT_RESET_DEV: /* BDR the specified SCSI device */ 2207 2208 bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); 2209 tgt = ccb->ccb_h.target_id; 2210 tgt |= (bus << 16); 2211 2212 CAMLOCK_2_ISPLOCK(isp); 2213 error = isp_control(isp, ISPCTL_RESET_DEV, &tgt); 2214 ISPLOCK_2_CAMLOCK(isp); 2215 if (error) { 2216 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2217 } else { 2218 ccb->ccb_h.status = CAM_REQ_CMP; 2219 } 2220 xpt_done(ccb); 2221 break; 2222 case XPT_ABORT: /* Abort the specified CCB */ 2223 { 2224 union ccb *accb = ccb->cab.abort_ccb; 2225 CAMLOCK_2_ISPLOCK(isp); 2226 switch (accb->ccb_h.func_code) { 2227 #ifdef ISP_TARGET_MODE 2228 case XPT_ACCEPT_TARGET_IO: 2229 case XPT_IMMED_NOTIFY: 2230 ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb); 2231 break; 2232 case XPT_CONT_TARGET_IO: 2233 isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet"); 2234 ccb->ccb_h.status = CAM_UA_ABORT; 2235 break; 2236 #endif 2237 case XPT_SCSI_IO: 2238 error = isp_control(isp, ISPCTL_ABORT_CMD, ccb); 2239 if (error) { 2240 ccb->ccb_h.status = CAM_UA_ABORT; 2241 } else { 2242 ccb->ccb_h.status = CAM_REQ_CMP; 2243 } 2244 break; 2245 default: 2246 ccb->ccb_h.status = CAM_REQ_INVALID; 2247 break; 2248 } 2249 ISPLOCK_2_CAMLOCK(isp); 2250 xpt_done(ccb); 2251 break; 2252 } 2253 #ifdef CAM_NEW_TRAN_CODE 2254 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) 2255 #else 2256 #define IS_CURRENT_SETTINGS(c) (c->flags & CCB_TRANS_CURRENT_SETTINGS) 2257 #endif 2258 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 2259 cts = &ccb->cts; 2260 if (!IS_CURRENT_SETTINGS(cts)) { 2261 ccb->ccb_h.status = CAM_REQ_INVALID; 2262 xpt_done(ccb); 2263 break; 2264 } 2265 tgt = cts->ccb_h.target_id; 2266 CAMLOCK_2_ISPLOCK(isp); 2267 if (IS_SCSI(isp)) { 2268 #ifndef CAM_NEW_TRAN_CODE 2269 sdparam *sdp = isp->isp_param; 2270 u_int16_t *dptr; 2271 2272 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2273 2274 sdp += bus; 2275 /* 2276 * We always update (internally) from goal_flags 2277 * so any request to change settings just gets 2278 * vectored to that location. 2279 */ 2280 dptr = &sdp->isp_devparam[tgt].goal_flags; 2281 2282 /* 2283 * Note that these operations affect the 2284 * the goal flags (goal_flags)- not 2285 * the current state flags. Then we mark 2286 * things so that the next operation to 2287 * this HBA will cause the update to occur. 2288 */ 2289 if (cts->valid & CCB_TRANS_DISC_VALID) { 2290 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) { 2291 *dptr |= DPARM_DISC; 2292 } else { 2293 *dptr &= ~DPARM_DISC; 2294 } 2295 } 2296 if (cts->valid & CCB_TRANS_TQ_VALID) { 2297 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) { 2298 *dptr |= DPARM_TQING; 2299 } else { 2300 *dptr &= ~DPARM_TQING; 2301 } 2302 } 2303 if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) { 2304 switch (cts->bus_width) { 2305 case MSG_EXT_WDTR_BUS_16_BIT: 2306 *dptr |= DPARM_WIDE; 2307 break; 2308 default: 2309 *dptr &= ~DPARM_WIDE; 2310 } 2311 } 2312 /* 2313 * Any SYNC RATE of nonzero and SYNC_OFFSET 2314 * of nonzero will cause us to go to the 2315 * selected (from NVRAM) maximum value for 2316 * this device. At a later point, we'll 2317 * allow finer control. 2318 */ 2319 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && 2320 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) && 2321 (cts->sync_offset > 0)) { 2322 *dptr |= DPARM_SYNC; 2323 } else { 2324 *dptr &= ~DPARM_SYNC; 2325 } 2326 *dptr |= DPARM_SAFE_DFLT; 2327 #else 2328 struct ccb_trans_settings_scsi *scsi = 2329 &cts->proto_specific.scsi; 2330 struct ccb_trans_settings_spi *spi = 2331 &cts->xport_specific.spi; 2332 sdparam *sdp = isp->isp_param; 2333 u_int16_t *dptr; 2334 2335 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2336 sdp += bus; 2337 /* 2338 * We always update (internally) from goal_flags 2339 * so any request to change settings just gets 2340 * vectored to that location. 2341 */ 2342 dptr = &sdp->isp_devparam[tgt].goal_flags; 2343 2344 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 2345 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 2346 *dptr |= DPARM_DISC; 2347 else 2348 *dptr &= ~DPARM_DISC; 2349 } 2350 2351 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 2352 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 2353 *dptr |= DPARM_TQING; 2354 else 2355 *dptr &= ~DPARM_TQING; 2356 } 2357 2358 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 2359 if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) 2360 *dptr |= DPARM_WIDE; 2361 else 2362 *dptr &= ~DPARM_WIDE; 2363 } 2364 2365 /* 2366 * XXX: FIX ME 2367 */ 2368 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) && 2369 (spi->valid & CTS_SPI_VALID_SYNC_RATE) && 2370 (spi->sync_period && spi->sync_offset)) { 2371 *dptr |= DPARM_SYNC; 2372 /* 2373 * XXX: CHECK FOR LEGALITY 2374 */ 2375 sdp->isp_devparam[tgt].goal_period = 2376 spi->sync_period; 2377 sdp->isp_devparam[tgt].goal_offset = 2378 spi->sync_offset; 2379 } else { 2380 *dptr &= ~DPARM_SYNC; 2381 } 2382 #endif 2383 isp_prt(isp, ISP_LOGDEBUG0, 2384 "SET bus %d targ %d to flags %x off %x per %x", 2385 bus, tgt, sdp->isp_devparam[tgt].goal_flags, 2386 sdp->isp_devparam[tgt].goal_offset, 2387 sdp->isp_devparam[tgt].goal_period); 2388 sdp->isp_devparam[tgt].dev_update = 1; 2389 isp->isp_update |= (1 << bus); 2390 } 2391 ISPLOCK_2_CAMLOCK(isp); 2392 ccb->ccb_h.status = CAM_REQ_CMP; 2393 xpt_done(ccb); 2394 break; 2395 case XPT_GET_TRAN_SETTINGS: 2396 cts = &ccb->cts; 2397 tgt = cts->ccb_h.target_id; 2398 CAMLOCK_2_ISPLOCK(isp); 2399 if (IS_FC(isp)) { 2400 #ifndef CAM_NEW_TRAN_CODE 2401 /* 2402 * a lot of normal SCSI things don't make sense. 2403 */ 2404 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 2405 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2406 /* 2407 * How do you measure the width of a high 2408 * speed serial bus? Well, in bytes. 2409 * 2410 * Offset and period make no sense, though, so we set 2411 * (above) a 'base' transfer speed to be gigabit. 2412 */ 2413 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2414 #else 2415 fcparam *fcp = isp->isp_param; 2416 struct ccb_trans_settings_fc *fc = 2417 &cts->xport_specific.fc; 2418 2419 cts->protocol = PROTO_SCSI; 2420 cts->protocol_version = SCSI_REV_2; 2421 cts->transport = XPORT_FC; 2422 cts->transport_version = 0; 2423 2424 fc->valid = CTS_FC_VALID_SPEED; 2425 if (fcp->isp_gbspeed == 2) 2426 fc->bitrate = 200000; 2427 else 2428 fc->bitrate = 100000; 2429 if (tgt > 0 && tgt < MAX_FC_TARG) { 2430 struct lportdb *lp = &fcp->portdb[tgt]; 2431 fc->wwnn = lp->node_wwn; 2432 fc->wwpn = lp->port_wwn; 2433 fc->port = lp->portid; 2434 fc->valid |= CTS_FC_VALID_WWNN | 2435 CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT; 2436 } 2437 #endif 2438 } else { 2439 #ifdef CAM_NEW_TRAN_CODE 2440 struct ccb_trans_settings_scsi *scsi = 2441 &cts->proto_specific.scsi; 2442 struct ccb_trans_settings_spi *spi = 2443 &cts->xport_specific.spi; 2444 #endif 2445 sdparam *sdp = isp->isp_param; 2446 int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2447 u_int16_t dval, pval, oval; 2448 2449 sdp += bus; 2450 2451 if (IS_CURRENT_SETTINGS(cts)) { 2452 sdp->isp_devparam[tgt].dev_refresh = 1; 2453 isp->isp_update |= (1 << bus); 2454 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, 2455 NULL); 2456 dval = sdp->isp_devparam[tgt].actv_flags; 2457 oval = sdp->isp_devparam[tgt].actv_offset; 2458 pval = sdp->isp_devparam[tgt].actv_period; 2459 } else { 2460 dval = sdp->isp_devparam[tgt].nvrm_flags; 2461 oval = sdp->isp_devparam[tgt].nvrm_offset; 2462 pval = sdp->isp_devparam[tgt].nvrm_period; 2463 } 2464 2465 #ifndef CAM_NEW_TRAN_CODE 2466 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 2467 2468 if (dval & DPARM_DISC) { 2469 cts->flags |= CCB_TRANS_DISC_ENB; 2470 } 2471 if (dval & DPARM_TQING) { 2472 cts->flags |= CCB_TRANS_TAG_ENB; 2473 } 2474 if (dval & DPARM_WIDE) { 2475 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2476 } else { 2477 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2478 } 2479 cts->valid = CCB_TRANS_BUS_WIDTH_VALID | 2480 CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2481 2482 if ((dval & DPARM_SYNC) && oval != 0) { 2483 cts->sync_period = pval; 2484 cts->sync_offset = oval; 2485 cts->valid |= 2486 CCB_TRANS_SYNC_RATE_VALID | 2487 CCB_TRANS_SYNC_OFFSET_VALID; 2488 } 2489 #else 2490 cts->protocol = PROTO_SCSI; 2491 cts->protocol_version = SCSI_REV_2; 2492 cts->transport = XPORT_SPI; 2493 cts->transport_version = 2; 2494 2495 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 2496 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; 2497 if (dval & DPARM_DISC) { 2498 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 2499 } 2500 if (dval & DPARM_TQING) { 2501 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 2502 } 2503 if ((dval & DPARM_SYNC) && oval && pval) { 2504 spi->sync_offset = oval; 2505 spi->sync_period = pval; 2506 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 2507 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 2508 } 2509 spi->valid |= CTS_SPI_VALID_BUS_WIDTH; 2510 if (dval & DPARM_WIDE) { 2511 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2512 } else { 2513 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2514 } 2515 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 2516 scsi->valid = CTS_SCSI_VALID_TQ; 2517 spi->valid |= CTS_SPI_VALID_DISC; 2518 } else { 2519 scsi->valid = 0; 2520 } 2521 #endif 2522 isp_prt(isp, ISP_LOGDEBUG0, 2523 "GET %s bus %d targ %d to flags %x off %x per %x", 2524 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM", 2525 bus, tgt, dval, oval, pval); 2526 } 2527 ISPLOCK_2_CAMLOCK(isp); 2528 ccb->ccb_h.status = CAM_REQ_CMP; 2529 xpt_done(ccb); 2530 break; 2531 2532 case XPT_CALC_GEOMETRY: 2533 { 2534 struct ccb_calc_geometry *ccg; 2535 u_int32_t secs_per_cylinder; 2536 u_int32_t size_mb; 2537 2538 ccg = &ccb->ccg; 2539 if (ccg->block_size == 0) { 2540 isp_prt(isp, ISP_LOGERR, 2541 "%d.%d XPT_CALC_GEOMETRY block size 0?", 2542 ccg->ccb_h.target_id, ccg->ccb_h.target_lun); 2543 ccb->ccb_h.status = CAM_REQ_INVALID; 2544 xpt_done(ccb); 2545 break; 2546 } 2547 size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size); 2548 if (size_mb > 1024) { 2549 ccg->heads = 255; 2550 ccg->secs_per_track = 63; 2551 } else { 2552 ccg->heads = 64; 2553 ccg->secs_per_track = 32; 2554 } 2555 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 2556 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 2557 ccb->ccb_h.status = CAM_REQ_CMP; 2558 xpt_done(ccb); 2559 break; 2560 } 2561 case XPT_RESET_BUS: /* Reset the specified bus */ 2562 bus = cam_sim_bus(sim); 2563 CAMLOCK_2_ISPLOCK(isp); 2564 error = isp_control(isp, ISPCTL_RESET_BUS, &bus); 2565 ISPLOCK_2_CAMLOCK(isp); 2566 if (error) 2567 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2568 else { 2569 if (cam_sim_bus(sim) && isp->isp_path2 != NULL) 2570 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 2571 else if (isp->isp_path != NULL) 2572 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 2573 ccb->ccb_h.status = CAM_REQ_CMP; 2574 } 2575 xpt_done(ccb); 2576 break; 2577 2578 case XPT_TERM_IO: /* Terminate the I/O process */ 2579 ccb->ccb_h.status = CAM_REQ_INVALID; 2580 xpt_done(ccb); 2581 break; 2582 2583 case XPT_PATH_INQ: /* Path routing inquiry */ 2584 { 2585 struct ccb_pathinq *cpi = &ccb->cpi; 2586 2587 cpi->version_num = 1; 2588 #ifdef ISP_TARGET_MODE 2589 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 2590 #else 2591 cpi->target_sprt = 0; 2592 #endif 2593 cpi->hba_eng_cnt = 0; 2594 cpi->max_target = ISP_MAX_TARGETS(isp) - 1; 2595 cpi->max_lun = ISP_MAX_LUNS(isp) - 1; 2596 cpi->bus_id = cam_sim_bus(sim); 2597 if (IS_FC(isp)) { 2598 cpi->hba_misc = PIM_NOBUSRESET; 2599 /* 2600 * Because our loop ID can shift from time to time, 2601 * make our initiator ID out of range of our bus. 2602 */ 2603 cpi->initiator_id = cpi->max_target + 1; 2604 2605 /* 2606 * Set base transfer capabilities for Fibre Channel. 2607 * Technically not correct because we don't know 2608 * what media we're running on top of- but we'll 2609 * look good if we always say 100MB/s. 2610 */ 2611 if (FCPARAM(isp)->isp_gbspeed == 2) 2612 cpi->base_transfer_speed = 200000; 2613 else 2614 cpi->base_transfer_speed = 100000; 2615 cpi->hba_inquiry = PI_TAG_ABLE; 2616 #ifdef CAM_NEW_TRAN_CODE 2617 cpi->transport = XPORT_FC; 2618 cpi->transport_version = 0; /* WHAT'S THIS FOR? */ 2619 #endif 2620 } else { 2621 sdparam *sdp = isp->isp_param; 2622 sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path)); 2623 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 2624 cpi->hba_misc = 0; 2625 cpi->initiator_id = sdp->isp_initiator_id; 2626 cpi->base_transfer_speed = 3300; 2627 #ifdef CAM_NEW_TRAN_CODE 2628 cpi->transport = XPORT_SPI; 2629 cpi->transport_version = 2; /* WHAT'S THIS FOR? */ 2630 #endif 2631 } 2632 #ifdef CAM_NEW_TRAN_CODE 2633 cpi->protocol = PROTO_SCSI; 2634 cpi->protocol_version = SCSI_REV_2; 2635 #endif 2636 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 2637 strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN); 2638 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 2639 cpi->unit_number = cam_sim_unit(sim); 2640 cpi->ccb_h.status = CAM_REQ_CMP; 2641 xpt_done(ccb); 2642 break; 2643 } 2644 default: 2645 ccb->ccb_h.status = CAM_REQ_INVALID; 2646 xpt_done(ccb); 2647 break; 2648 } 2649 } 2650 2651 #define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB) 2652 void 2653 isp_done(struct ccb_scsiio *sccb) 2654 { 2655 struct ispsoftc *isp = XS_ISP(sccb); 2656 2657 if (XS_NOERR(sccb)) 2658 XS_SETERR(sccb, CAM_REQ_CMP); 2659 2660 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && 2661 (sccb->scsi_status != SCSI_STATUS_OK)) { 2662 sccb->ccb_h.status &= ~CAM_STATUS_MASK; 2663 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && 2664 (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) { 2665 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; 2666 } else { 2667 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 2668 } 2669 } 2670 2671 sccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2672 if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2673 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 2674 sccb->ccb_h.status |= CAM_DEV_QFRZN; 2675 xpt_freeze_devq(sccb->ccb_h.path, 1); 2676 isp_prt(isp, ISP_LOGDEBUG0, 2677 "freeze devq %d.%d cam sts %x scsi sts %x", 2678 sccb->ccb_h.target_id, sccb->ccb_h.target_lun, 2679 sccb->ccb_h.status, sccb->scsi_status); 2680 } 2681 } 2682 2683 if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) && 2684 (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2685 xpt_print_path(sccb->ccb_h.path); 2686 isp_prt(isp, ISP_LOGINFO, 2687 "cam completion status 0x%x", sccb->ccb_h.status); 2688 } 2689 2690 XS_CMD_S_DONE(sccb); 2691 if (XS_CMD_WDOG_P(sccb) == 0) { 2692 untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch); 2693 if (XS_CMD_GRACE_P(sccb)) { 2694 isp_prt(isp, ISP_LOGDEBUG2, 2695 "finished command on borrowed time"); 2696 } 2697 XS_CMD_S_CLEAR(sccb); 2698 ISPLOCK_2_CAMLOCK(isp); 2699 xpt_done((union ccb *) sccb); 2700 CAMLOCK_2_ISPLOCK(isp); 2701 } 2702 } 2703 2704 int 2705 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg) 2706 { 2707 int bus, rv = 0; 2708 switch (cmd) { 2709 case ISPASYNC_NEW_TGT_PARAMS: 2710 { 2711 #ifdef CAM_NEW_TRAN_CODE 2712 struct ccb_trans_settings_scsi *scsi; 2713 struct ccb_trans_settings_spi *spi; 2714 #endif 2715 int flags, tgt; 2716 sdparam *sdp = isp->isp_param; 2717 struct ccb_trans_settings cts; 2718 struct cam_path *tmppath; 2719 2720 bzero(&cts, sizeof (struct ccb_trans_settings)); 2721 2722 tgt = *((int *)arg); 2723 bus = (tgt >> 16) & 0xffff; 2724 tgt &= 0xffff; 2725 sdp += bus; 2726 ISPLOCK_2_CAMLOCK(isp); 2727 if (xpt_create_path(&tmppath, NULL, 2728 cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim), 2729 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2730 CAMLOCK_2_ISPLOCK(isp); 2731 isp_prt(isp, ISP_LOGWARN, 2732 "isp_async cannot make temp path for %d.%d", 2733 tgt, bus); 2734 rv = -1; 2735 break; 2736 } 2737 CAMLOCK_2_ISPLOCK(isp); 2738 flags = sdp->isp_devparam[tgt].actv_flags; 2739 #ifdef CAM_NEW_TRAN_CODE 2740 cts.type = CTS_TYPE_CURRENT_SETTINGS; 2741 cts.protocol = PROTO_SCSI; 2742 cts.transport = XPORT_SPI; 2743 2744 scsi = &cts.proto_specific.scsi; 2745 spi = &cts.xport_specific.spi; 2746 2747 if (flags & DPARM_TQING) { 2748 scsi->valid |= CTS_SCSI_VALID_TQ; 2749 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 2750 spi->flags |= CTS_SPI_FLAGS_TAG_ENB; 2751 } 2752 2753 if (flags & DPARM_DISC) { 2754 spi->valid |= CTS_SPI_VALID_DISC; 2755 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 2756 } 2757 spi->flags |= CTS_SPI_VALID_BUS_WIDTH; 2758 if (flags & DPARM_WIDE) { 2759 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2760 } else { 2761 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2762 } 2763 if (flags & DPARM_SYNC) { 2764 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 2765 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 2766 spi->sync_period = sdp->isp_devparam[tgt].actv_period; 2767 spi->sync_offset = sdp->isp_devparam[tgt].actv_offset; 2768 } 2769 #else 2770 cts.flags = CCB_TRANS_CURRENT_SETTINGS; 2771 cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2772 if (flags & DPARM_DISC) { 2773 cts.flags |= CCB_TRANS_DISC_ENB; 2774 } 2775 if (flags & DPARM_TQING) { 2776 cts.flags |= CCB_TRANS_TAG_ENB; 2777 } 2778 cts.valid |= CCB_TRANS_BUS_WIDTH_VALID; 2779 cts.bus_width = (flags & DPARM_WIDE)? 2780 MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT; 2781 cts.sync_period = sdp->isp_devparam[tgt].actv_period; 2782 cts.sync_offset = sdp->isp_devparam[tgt].actv_offset; 2783 if (flags & DPARM_SYNC) { 2784 cts.valid |= 2785 CCB_TRANS_SYNC_RATE_VALID | 2786 CCB_TRANS_SYNC_OFFSET_VALID; 2787 } 2788 #endif 2789 isp_prt(isp, ISP_LOGDEBUG2, 2790 "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x", 2791 bus, tgt, sdp->isp_devparam[tgt].actv_period, 2792 sdp->isp_devparam[tgt].actv_offset, flags); 2793 xpt_setup_ccb(&cts.ccb_h, tmppath, 1); 2794 ISPLOCK_2_CAMLOCK(isp); 2795 xpt_async(AC_TRANSFER_NEG, tmppath, &cts); 2796 xpt_free_path(tmppath); 2797 CAMLOCK_2_ISPLOCK(isp); 2798 break; 2799 } 2800 case ISPASYNC_BUS_RESET: 2801 bus = *((int *)arg); 2802 isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected", 2803 bus); 2804 if (bus > 0 && isp->isp_path2) { 2805 ISPLOCK_2_CAMLOCK(isp); 2806 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 2807 CAMLOCK_2_ISPLOCK(isp); 2808 } else if (isp->isp_path) { 2809 ISPLOCK_2_CAMLOCK(isp); 2810 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 2811 CAMLOCK_2_ISPLOCK(isp); 2812 } 2813 break; 2814 case ISPASYNC_LIP: 2815 if (isp->isp_path) { 2816 isp_freeze_loopdown(isp, "ISPASYNC_LIP"); 2817 } 2818 isp_prt(isp, ISP_LOGINFO, "LIP Received"); 2819 break; 2820 case ISPASYNC_LOOP_RESET: 2821 if (isp->isp_path) { 2822 isp_freeze_loopdown(isp, "ISPASYNC_LOOP_RESET"); 2823 } 2824 isp_prt(isp, ISP_LOGINFO, "Loop Reset Received"); 2825 break; 2826 case ISPASYNC_LOOP_DOWN: 2827 if (isp->isp_path) { 2828 isp_freeze_loopdown(isp, "ISPASYNC_LOOP_DOWN"); 2829 } 2830 isp_prt(isp, ISP_LOGINFO, "Loop DOWN"); 2831 break; 2832 case ISPASYNC_LOOP_UP: 2833 /* 2834 * Now we just note that Loop has come up. We don't 2835 * actually do anything because we're waiting for a 2836 * Change Notify before activating the FC cleanup 2837 * thread to look at the state of the loop again. 2838 */ 2839 isp_prt(isp, ISP_LOGINFO, "Loop UP"); 2840 break; 2841 case ISPASYNC_PROMENADE: 2842 { 2843 struct cam_path *tmppath; 2844 const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x " 2845 "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x"; 2846 static const char *roles[4] = { 2847 "(none)", "Target", "Initiator", "Target/Initiator" 2848 }; 2849 fcparam *fcp = isp->isp_param; 2850 int tgt = *((int *) arg); 2851 int is_tgt_mask = (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT); 2852 struct lportdb *lp = &fcp->portdb[tgt]; 2853 2854 isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid, 2855 roles[lp->roles & 0x3], 2856 (lp->valid)? "Arrived" : "Departed", 2857 (u_int32_t) (lp->port_wwn >> 32), 2858 (u_int32_t) (lp->port_wwn & 0xffffffffLL), 2859 (u_int32_t) (lp->node_wwn >> 32), 2860 (u_int32_t) (lp->node_wwn & 0xffffffffLL)); 2861 2862 ISPLOCK_2_CAMLOCK(isp); 2863 if (xpt_create_path(&tmppath, NULL, cam_sim_path(isp->isp_sim), 2864 (target_id_t)tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2865 CAMLOCK_2_ISPLOCK(isp); 2866 break; 2867 } 2868 /* 2869 * Policy: only announce targets. 2870 */ 2871 if (lp->roles & is_tgt_mask) { 2872 if (lp->valid) { 2873 xpt_async(AC_FOUND_DEVICE, tmppath, NULL); 2874 } else { 2875 xpt_async(AC_LOST_DEVICE, tmppath, NULL); 2876 } 2877 } 2878 xpt_free_path(tmppath); 2879 CAMLOCK_2_ISPLOCK(isp); 2880 break; 2881 } 2882 case ISPASYNC_CHANGE_NOTIFY: 2883 if (arg == ISPASYNC_CHANGE_PDB) { 2884 isp_prt(isp, ISP_LOGINFO, 2885 "Port Database Changed"); 2886 } else if (arg == ISPASYNC_CHANGE_SNS) { 2887 isp_prt(isp, ISP_LOGINFO, 2888 "Name Server Database Changed"); 2889 } 2890 #ifdef ISP_SMPLOCK 2891 cv_signal(&isp->isp_osinfo.kthread_cv); 2892 #else 2893 wakeup(&isp->isp_osinfo.kthread_cv); 2894 #endif 2895 break; 2896 case ISPASYNC_FABRIC_DEV: 2897 { 2898 int target, base, lim; 2899 fcparam *fcp = isp->isp_param; 2900 struct lportdb *lp = NULL; 2901 struct lportdb *clp = (struct lportdb *) arg; 2902 char *pt; 2903 2904 switch (clp->port_type) { 2905 case 1: 2906 pt = " N_Port"; 2907 break; 2908 case 2: 2909 pt = " NL_Port"; 2910 break; 2911 case 3: 2912 pt = "F/NL_Port"; 2913 break; 2914 case 0x7f: 2915 pt = " Nx_Port"; 2916 break; 2917 case 0x81: 2918 pt = " F_port"; 2919 break; 2920 case 0x82: 2921 pt = " FL_Port"; 2922 break; 2923 case 0x84: 2924 pt = " E_port"; 2925 break; 2926 default: 2927 pt = " "; 2928 break; 2929 } 2930 2931 isp_prt(isp, ISP_LOGINFO, 2932 "%s Fabric Device @ PortID 0x%x", pt, clp->portid); 2933 2934 /* 2935 * If we don't have an initiator role we bail. 2936 * 2937 * We just use ISPASYNC_FABRIC_DEV for announcement purposes. 2938 */ 2939 2940 if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) { 2941 break; 2942 } 2943 2944 /* 2945 * Is this entry for us? If so, we bail. 2946 */ 2947 2948 if (fcp->isp_portid == clp->portid) { 2949 break; 2950 } 2951 2952 /* 2953 * Else, the default policy is to find room for it in 2954 * our local port database. Later, when we execute 2955 * the call to isp_pdb_sync either this newly arrived 2956 * or already logged in device will be (re)announced. 2957 */ 2958 2959 if (fcp->isp_topo == TOPO_FL_PORT) 2960 base = FC_SNS_ID+1; 2961 else 2962 base = 0; 2963 2964 if (fcp->isp_topo == TOPO_N_PORT) 2965 lim = 1; 2966 else 2967 lim = MAX_FC_TARG; 2968 2969 /* 2970 * Is it already in our list? 2971 */ 2972 for (target = base; target < lim; target++) { 2973 if (target >= FL_PORT_ID && target <= FC_SNS_ID) { 2974 continue; 2975 } 2976 lp = &fcp->portdb[target]; 2977 if (lp->port_wwn == clp->port_wwn && 2978 lp->node_wwn == clp->node_wwn) { 2979 lp->fabric_dev = 1; 2980 break; 2981 } 2982 } 2983 if (target < lim) { 2984 break; 2985 } 2986 for (target = base; target < lim; target++) { 2987 if (target >= FL_PORT_ID && target <= FC_SNS_ID) { 2988 continue; 2989 } 2990 lp = &fcp->portdb[target]; 2991 if (lp->port_wwn == 0) { 2992 break; 2993 } 2994 } 2995 if (target == lim) { 2996 isp_prt(isp, ISP_LOGWARN, 2997 "out of space for fabric devices"); 2998 break; 2999 } 3000 lp->port_type = clp->port_type; 3001 lp->fc4_type = clp->fc4_type; 3002 lp->node_wwn = clp->node_wwn; 3003 lp->port_wwn = clp->port_wwn; 3004 lp->portid = clp->portid; 3005 lp->fabric_dev = 1; 3006 break; 3007 } 3008 #ifdef ISP_TARGET_MODE 3009 case ISPASYNC_TARGET_MESSAGE: 3010 { 3011 tmd_msg_t *mp = arg; 3012 isp_prt(isp, ISP_LOGALL, 3013 "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x", 3014 mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt, 3015 (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval, 3016 mp->nt_msg[0]); 3017 break; 3018 } 3019 case ISPASYNC_TARGET_EVENT: 3020 { 3021 tmd_event_t *ep = arg; 3022 isp_prt(isp, ISP_LOGALL, 3023 "bus %d event code 0x%x", ep->ev_bus, ep->ev_event); 3024 break; 3025 } 3026 case ISPASYNC_TARGET_ACTION: 3027 switch (((isphdr_t *)arg)->rqs_entry_type) { 3028 default: 3029 isp_prt(isp, ISP_LOGWARN, 3030 "event 0x%x for unhandled target action", 3031 ((isphdr_t *)arg)->rqs_entry_type); 3032 break; 3033 case RQSTYPE_NOTIFY: 3034 if (IS_SCSI(isp)) { 3035 rv = isp_handle_platform_notify_scsi(isp, 3036 (in_entry_t *) arg); 3037 } else { 3038 rv = isp_handle_platform_notify_fc(isp, 3039 (in_fcentry_t *) arg); 3040 } 3041 break; 3042 case RQSTYPE_ATIO: 3043 rv = isp_handle_platform_atio(isp, (at_entry_t *) arg); 3044 break; 3045 case RQSTYPE_ATIO2: 3046 rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg); 3047 break; 3048 case RQSTYPE_CTIO2: 3049 case RQSTYPE_CTIO: 3050 rv = isp_handle_platform_ctio(isp, arg); 3051 break; 3052 case RQSTYPE_ENABLE_LUN: 3053 case RQSTYPE_MODIFY_LUN: 3054 if (IS_DUALBUS(isp)) { 3055 bus = 3056 GET_BUS_VAL(((lun_entry_t *)arg)->le_rsvd); 3057 } else { 3058 bus = 0; 3059 } 3060 isp_cv_signal_rqe(isp, bus, 3061 ((lun_entry_t *)arg)->le_status); 3062 break; 3063 } 3064 break; 3065 #endif 3066 case ISPASYNC_FW_CRASH: 3067 { 3068 u_int16_t mbox1, mbox6; 3069 mbox1 = ISP_READ(isp, OUTMAILBOX1); 3070 if (IS_DUALBUS(isp)) { 3071 mbox6 = ISP_READ(isp, OUTMAILBOX6); 3072 } else { 3073 mbox6 = 0; 3074 } 3075 isp_prt(isp, ISP_LOGERR, 3076 "Internal Firmware Error on bus %d @ RISC Address 0x%x", 3077 mbox6, mbox1); 3078 #ifdef ISP_FW_CRASH_DUMP 3079 /* 3080 * XXX: really need a thread to do this right. 3081 */ 3082 if (IS_FC(isp)) { 3083 FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT; 3084 FCPARAM(isp)->isp_loopstate = LOOP_NIL; 3085 isp_freeze_loopdown(isp, "f/w crash"); 3086 isp_fw_dump(isp); 3087 } 3088 isp_reinit(isp); 3089 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL); 3090 #endif 3091 break; 3092 } 3093 case ISPASYNC_UNHANDLED_RESPONSE: 3094 break; 3095 default: 3096 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd); 3097 break; 3098 } 3099 return (rv); 3100 } 3101 3102 3103 /* 3104 * Locks are held before coming here. 3105 */ 3106 void 3107 isp_uninit(struct ispsoftc *isp) 3108 { 3109 ISP_WRITE(isp, HCCR, HCCR_CMD_RESET); 3110 DISABLE_INTS(isp); 3111 } 3112 3113 void 3114 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...) 3115 { 3116 va_list ap; 3117 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { 3118 return; 3119 } 3120 printf("%s: ", device_get_nameunit(isp->isp_dev)); 3121 va_start(ap, fmt); 3122 vprintf(fmt, ap); 3123 va_end(ap); 3124 printf("\n"); 3125 } 3126