1 /* 2 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters. 3 * 4 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice immediately at the beginning of the file, without modification, 11 * this list of conditions, and the following disclaimer. 12 * 2. The name of the author may not be used to endorse or promote products 13 * derived from this software without specific prior written permission. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <dev/isp/isp_freebsd.h> 32 #include <sys/unistd.h> 33 #include <sys/kthread.h> 34 #include <machine/stdarg.h> /* for use by isp_prt below */ 35 #include <sys/conf.h> 36 #include <sys/module.h> 37 #include <sys/ioccom.h> 38 #include <dev/isp/isp_ioctl.h> 39 40 41 MODULE_VERSION(isp, 1); 42 MODULE_DEPEND(isp, cam, 1, 1, 1); 43 int isp_announced = 0; 44 ispfwfunc *isp_get_firmware_p = NULL; 45 46 static d_ioctl_t ispioctl; 47 static void isp_intr_enable(void *); 48 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *); 49 static void isp_poll(struct cam_sim *); 50 static timeout_t isp_watchdog; 51 static void isp_kthread(void *); 52 static void isp_action(struct cam_sim *, union ccb *); 53 54 55 static struct cdevsw isp_cdevsw = { 56 .d_version = D_VERSION, 57 .d_flags = D_NEEDGIANT, 58 .d_ioctl = ispioctl, 59 .d_name = "isp", 60 }; 61 62 static struct ispsoftc *isplist = NULL; 63 64 void 65 isp_attach(struct ispsoftc *isp) 66 { 67 int primary, secondary; 68 struct ccb_setasync csa; 69 struct cam_devq *devq; 70 struct cam_sim *sim; 71 struct cam_path *path; 72 73 /* 74 * Establish (in case of 12X0) which bus is the primary. 75 */ 76 77 primary = 0; 78 secondary = 1; 79 80 /* 81 * Create the device queue for our SIM(s). 82 */ 83 devq = cam_simq_alloc(isp->isp_maxcmds); 84 if (devq == NULL) { 85 return; 86 } 87 88 /* 89 * Construct our SIM entry. 90 */ 91 ISPLOCK_2_CAMLOCK(isp); 92 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 93 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); 94 if (sim == NULL) { 95 cam_simq_free(devq); 96 CAMLOCK_2_ISPLOCK(isp); 97 return; 98 } 99 CAMLOCK_2_ISPLOCK(isp); 100 101 isp->isp_osinfo.ehook.ich_func = isp_intr_enable; 102 isp->isp_osinfo.ehook.ich_arg = isp; 103 ISPLOCK_2_CAMLOCK(isp); 104 if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) { 105 cam_sim_free(sim, TRUE); 106 CAMLOCK_2_ISPLOCK(isp); 107 isp_prt(isp, ISP_LOGERR, 108 "could not establish interrupt enable hook"); 109 return; 110 } 111 112 if (xpt_bus_register(sim, primary) != CAM_SUCCESS) { 113 cam_sim_free(sim, TRUE); 114 CAMLOCK_2_ISPLOCK(isp); 115 return; 116 } 117 118 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 119 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 120 xpt_bus_deregister(cam_sim_path(sim)); 121 cam_sim_free(sim, TRUE); 122 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 123 CAMLOCK_2_ISPLOCK(isp); 124 return; 125 } 126 127 xpt_setup_ccb(&csa.ccb_h, path, 5); 128 csa.ccb_h.func_code = XPT_SASYNC_CB; 129 csa.event_enable = AC_LOST_DEVICE; 130 csa.callback = isp_cam_async; 131 csa.callback_arg = sim; 132 xpt_action((union ccb *)&csa); 133 CAMLOCK_2_ISPLOCK(isp); 134 isp->isp_sim = sim; 135 isp->isp_path = path; 136 /* 137 * Create a kernel thread for fibre channel instances. We 138 * don't have dual channel FC cards. 139 */ 140 if (IS_FC(isp)) { 141 ISPLOCK_2_CAMLOCK(isp); 142 /* XXX: LOCK VIOLATION */ 143 cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv"); 144 if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc, 145 RFHIGHPID, 0, "%s: fc_thrd", 146 device_get_nameunit(isp->isp_dev))) { 147 xpt_bus_deregister(cam_sim_path(sim)); 148 cam_sim_free(sim, TRUE); 149 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 150 CAMLOCK_2_ISPLOCK(isp); 151 isp_prt(isp, ISP_LOGERR, "could not create kthread"); 152 return; 153 } 154 CAMLOCK_2_ISPLOCK(isp); 155 } 156 157 158 /* 159 * If we have a second channel, construct SIM entry for that. 160 */ 161 if (IS_DUALBUS(isp)) { 162 ISPLOCK_2_CAMLOCK(isp); 163 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 164 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); 165 if (sim == NULL) { 166 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 167 xpt_free_path(isp->isp_path); 168 cam_simq_free(devq); 169 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 170 return; 171 } 172 if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) { 173 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 174 xpt_free_path(isp->isp_path); 175 cam_sim_free(sim, TRUE); 176 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 177 CAMLOCK_2_ISPLOCK(isp); 178 return; 179 } 180 181 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 182 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 183 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 184 xpt_free_path(isp->isp_path); 185 xpt_bus_deregister(cam_sim_path(sim)); 186 cam_sim_free(sim, TRUE); 187 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 188 CAMLOCK_2_ISPLOCK(isp); 189 return; 190 } 191 192 xpt_setup_ccb(&csa.ccb_h, path, 5); 193 csa.ccb_h.func_code = XPT_SASYNC_CB; 194 csa.event_enable = AC_LOST_DEVICE; 195 csa.callback = isp_cam_async; 196 csa.callback_arg = sim; 197 xpt_action((union ccb *)&csa); 198 CAMLOCK_2_ISPLOCK(isp); 199 isp->isp_sim2 = sim; 200 isp->isp_path2 = path; 201 } 202 203 /* 204 * Create device nodes 205 */ 206 (void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT, 207 GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev)); 208 209 if (isp->isp_role != ISP_ROLE_NONE) { 210 isp->isp_state = ISP_RUNSTATE; 211 ENABLE_INTS(isp); 212 } 213 if (isplist == NULL) { 214 isplist = isp; 215 } else { 216 struct ispsoftc *tmp = isplist; 217 while (tmp->isp_osinfo.next) { 218 tmp = tmp->isp_osinfo.next; 219 } 220 tmp->isp_osinfo.next = isp; 221 } 222 223 } 224 225 static INLINE void 226 isp_freeze_loopdown(struct ispsoftc *isp, char *msg) 227 { 228 if (isp->isp_osinfo.simqfrozen == 0) { 229 isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg); 230 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 231 ISPLOCK_2_CAMLOCK(isp); 232 xpt_freeze_simq(isp->isp_sim, 1); 233 CAMLOCK_2_ISPLOCK(isp); 234 } else { 235 isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg); 236 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 237 } 238 } 239 240 static int 241 ispioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 242 { 243 struct ispsoftc *isp; 244 int nr, retval = ENOTTY; 245 246 isp = isplist; 247 while (isp) { 248 if (minor(dev) == device_get_unit(isp->isp_dev)) { 249 break; 250 } 251 isp = isp->isp_osinfo.next; 252 } 253 if (isp == NULL) 254 return (ENXIO); 255 256 switch (cmd) { 257 #ifdef ISP_FW_CRASH_DUMP 258 case ISP_GET_FW_CRASH_DUMP: 259 { 260 u_int16_t *ptr = FCPARAM(isp)->isp_dump_data; 261 size_t sz; 262 263 retval = 0; 264 if (IS_2200(isp)) 265 sz = QLA2200_RISC_IMAGE_DUMP_SIZE; 266 else 267 sz = QLA2300_RISC_IMAGE_DUMP_SIZE; 268 ISP_LOCK(isp); 269 if (ptr && *ptr) { 270 void *uaddr = *((void **) addr); 271 if (copyout(ptr, uaddr, sz)) { 272 retval = EFAULT; 273 } else { 274 *ptr = 0; 275 } 276 } else { 277 retval = ENXIO; 278 } 279 ISP_UNLOCK(isp); 280 break; 281 } 282 283 case ISP_FORCE_CRASH_DUMP: 284 ISP_LOCK(isp); 285 isp_freeze_loopdown(isp, "ispioctl(ISP_FORCE_CRASH_DUMP)"); 286 isp_fw_dump(isp); 287 isp_reinit(isp); 288 ISP_UNLOCK(isp); 289 retval = 0; 290 break; 291 #endif 292 case ISP_SDBLEV: 293 { 294 int olddblev = isp->isp_dblev; 295 isp->isp_dblev = *(int *)addr; 296 *(int *)addr = olddblev; 297 retval = 0; 298 break; 299 } 300 case ISP_GETROLE: 301 *(int *)addr = isp->isp_role; 302 retval = 0; 303 break; 304 case ISP_SETROLE: 305 nr = *(int *)addr; 306 if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) { 307 retval = EINVAL; 308 break; 309 } 310 *(int *)addr = isp->isp_role; 311 isp->isp_role = nr; 312 /* FALLTHROUGH */ 313 case ISP_RESETHBA: 314 ISP_LOCK(isp); 315 isp_reinit(isp); 316 ISP_UNLOCK(isp); 317 retval = 0; 318 break; 319 case ISP_RESCAN: 320 if (IS_FC(isp)) { 321 ISP_LOCK(isp); 322 if (isp_fc_runstate(isp, 5 * 1000000)) { 323 retval = EIO; 324 } else { 325 retval = 0; 326 } 327 ISP_UNLOCK(isp); 328 } 329 break; 330 case ISP_FC_LIP: 331 if (IS_FC(isp)) { 332 ISP_LOCK(isp); 333 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) { 334 retval = EIO; 335 } else { 336 retval = 0; 337 } 338 ISP_UNLOCK(isp); 339 } 340 break; 341 case ISP_FC_GETDINFO: 342 { 343 struct isp_fc_device *ifc = (struct isp_fc_device *) addr; 344 struct lportdb *lp; 345 346 if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) { 347 retval = EINVAL; 348 break; 349 } 350 ISP_LOCK(isp); 351 lp = &FCPARAM(isp)->portdb[ifc->loopid]; 352 if (lp->valid) { 353 ifc->loopid = lp->loopid; 354 ifc->portid = lp->portid; 355 ifc->node_wwn = lp->node_wwn; 356 ifc->port_wwn = lp->port_wwn; 357 retval = 0; 358 } else { 359 retval = ENODEV; 360 } 361 ISP_UNLOCK(isp); 362 break; 363 } 364 case ISP_GET_STATS: 365 { 366 isp_stats_t *sp = (isp_stats_t *) addr; 367 368 MEMZERO(sp, sizeof (*sp)); 369 sp->isp_stat_version = ISP_STATS_VERSION; 370 sp->isp_type = isp->isp_type; 371 sp->isp_revision = isp->isp_revision; 372 ISP_LOCK(isp); 373 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt; 374 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus; 375 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc; 376 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync; 377 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt; 378 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt; 379 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater; 380 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater; 381 ISP_UNLOCK(isp); 382 retval = 0; 383 break; 384 } 385 case ISP_CLR_STATS: 386 ISP_LOCK(isp); 387 isp->isp_intcnt = 0; 388 isp->isp_intbogus = 0; 389 isp->isp_intmboxc = 0; 390 isp->isp_intoasync = 0; 391 isp->isp_rsltccmplt = 0; 392 isp->isp_fphccmplt = 0; 393 isp->isp_rscchiwater = 0; 394 isp->isp_fpcchiwater = 0; 395 ISP_UNLOCK(isp); 396 retval = 0; 397 break; 398 case ISP_FC_GETHINFO: 399 { 400 struct isp_hba_device *hba = (struct isp_hba_device *) addr; 401 MEMZERO(hba, sizeof (*hba)); 402 ISP_LOCK(isp); 403 hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev); 404 hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev); 405 hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev); 406 hba->fc_speed = FCPARAM(isp)->isp_gbspeed; 407 hba->fc_scsi_supported = 1; 408 hba->fc_topology = FCPARAM(isp)->isp_topo + 1; 409 hba->fc_loopid = FCPARAM(isp)->isp_loopid; 410 hba->nvram_node_wwn = FCPARAM(isp)->isp_nodewwn; 411 hba->nvram_port_wwn = FCPARAM(isp)->isp_portwwn; 412 hba->active_node_wwn = ISP_NODEWWN(isp); 413 hba->active_port_wwn = ISP_PORTWWN(isp); 414 ISP_UNLOCK(isp); 415 retval = 0; 416 break; 417 } 418 case ISP_GET_FC_PARAM: 419 { 420 struct isp_fc_param *f = (struct isp_fc_param *) addr; 421 422 if (!IS_FC(isp)) { 423 retval = EINVAL; 424 break; 425 } 426 f->parameter = 0; 427 if (strcmp(f->param_name, "framelength") == 0) { 428 f->parameter = FCPARAM(isp)->isp_maxfrmlen; 429 retval = 0; 430 break; 431 } 432 if (strcmp(f->param_name, "exec_throttle") == 0) { 433 f->parameter = FCPARAM(isp)->isp_execthrottle; 434 retval = 0; 435 break; 436 } 437 if (strcmp(f->param_name, "fullduplex") == 0) { 438 if (FCPARAM(isp)->isp_fwoptions & ICBOPT_FULL_DUPLEX) 439 f->parameter = 1; 440 retval = 0; 441 break; 442 } 443 if (strcmp(f->param_name, "loopid") == 0) { 444 f->parameter = FCPARAM(isp)->isp_loopid; 445 retval = 0; 446 break; 447 } 448 retval = EINVAL; 449 break; 450 } 451 case ISP_SET_FC_PARAM: 452 { 453 struct isp_fc_param *f = (struct isp_fc_param *) addr; 454 u_int32_t param = f->parameter; 455 456 if (!IS_FC(isp)) { 457 retval = EINVAL; 458 break; 459 } 460 f->parameter = 0; 461 if (strcmp(f->param_name, "framelength") == 0) { 462 if (param != 512 && param != 1024 && param != 1024) { 463 retval = EINVAL; 464 break; 465 } 466 FCPARAM(isp)->isp_maxfrmlen = param; 467 retval = 0; 468 break; 469 } 470 if (strcmp(f->param_name, "exec_throttle") == 0) { 471 if (param < 16 || param > 255) { 472 retval = EINVAL; 473 break; 474 } 475 FCPARAM(isp)->isp_execthrottle = param; 476 retval = 0; 477 break; 478 } 479 if (strcmp(f->param_name, "fullduplex") == 0) { 480 if (param != 0 && param != 1) { 481 retval = EINVAL; 482 break; 483 } 484 if (param) { 485 FCPARAM(isp)->isp_fwoptions |= 486 ICBOPT_FULL_DUPLEX; 487 } else { 488 FCPARAM(isp)->isp_fwoptions &= 489 ~ICBOPT_FULL_DUPLEX; 490 } 491 retval = 0; 492 break; 493 } 494 if (strcmp(f->param_name, "loopid") == 0) { 495 if (param < 0 || param > 125) { 496 retval = EINVAL; 497 break; 498 } 499 FCPARAM(isp)->isp_loopid = param; 500 retval = 0; 501 break; 502 } 503 retval = EINVAL; 504 break; 505 } 506 default: 507 break; 508 } 509 return (retval); 510 } 511 512 static void 513 isp_intr_enable(void *arg) 514 { 515 struct ispsoftc *isp = arg; 516 if (isp->isp_role != ISP_ROLE_NONE) { 517 ENABLE_INTS(isp); 518 isp->isp_osinfo.intsok = 1; 519 } 520 /* Release our hook so that the boot can continue. */ 521 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 522 } 523 524 /* 525 * Put the target mode functions here, because some are inlines 526 */ 527 528 #ifdef ISP_TARGET_MODE 529 530 static INLINE int is_lun_enabled(struct ispsoftc *, int, lun_id_t); 531 static INLINE int are_any_luns_enabled(struct ispsoftc *, int); 532 static INLINE tstate_t *get_lun_statep(struct ispsoftc *, int, lun_id_t); 533 static INLINE void rls_lun_statep(struct ispsoftc *, tstate_t *); 534 static INLINE atio_private_data_t *isp_get_atpd(struct ispsoftc *, int); 535 static cam_status 536 create_lun_state(struct ispsoftc *, int, struct cam_path *, tstate_t **); 537 static void destroy_lun_state(struct ispsoftc *, tstate_t *); 538 static int isp_en_lun(struct ispsoftc *, union ccb *); 539 static void isp_ledone(struct ispsoftc *, lun_entry_t *); 540 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *); 541 static timeout_t isp_refire_putback_atio; 542 static void isp_complete_ctio(union ccb *); 543 static void isp_target_putback_atio(union ccb *); 544 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *); 545 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *); 546 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *); 547 static int isp_handle_platform_ctio(struct ispsoftc *, void *); 548 static int isp_handle_platform_notify_scsi(struct ispsoftc *, in_entry_t *); 549 static int isp_handle_platform_notify_fc(struct ispsoftc *, in_fcentry_t *); 550 551 static INLINE int 552 is_lun_enabled(struct ispsoftc *isp, int bus, lun_id_t lun) 553 { 554 tstate_t *tptr; 555 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 556 if (tptr == NULL) { 557 return (0); 558 } 559 do { 560 if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) { 561 return (1); 562 } 563 } while ((tptr = tptr->next) != NULL); 564 return (0); 565 } 566 567 static INLINE int 568 are_any_luns_enabled(struct ispsoftc *isp, int port) 569 { 570 int lo, hi; 571 if (IS_DUALBUS(isp)) { 572 lo = (port * (LUN_HASH_SIZE >> 1)); 573 hi = lo + (LUN_HASH_SIZE >> 1); 574 } else { 575 lo = 0; 576 hi = LUN_HASH_SIZE; 577 } 578 for (lo = 0; lo < hi; lo++) { 579 if (isp->isp_osinfo.lun_hash[lo]) { 580 return (1); 581 } 582 } 583 return (0); 584 } 585 586 static INLINE tstate_t * 587 get_lun_statep(struct ispsoftc *isp, int bus, lun_id_t lun) 588 { 589 tstate_t *tptr = NULL; 590 591 if (lun == CAM_LUN_WILDCARD) { 592 if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) { 593 tptr = &isp->isp_osinfo.tsdflt[bus]; 594 tptr->hold++; 595 return (tptr); 596 } 597 return (NULL); 598 } else { 599 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 600 if (tptr == NULL) { 601 return (NULL); 602 } 603 } 604 605 do { 606 if (tptr->lun == lun && tptr->bus == bus) { 607 tptr->hold++; 608 return (tptr); 609 } 610 } while ((tptr = tptr->next) != NULL); 611 return (tptr); 612 } 613 614 static INLINE void 615 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr) 616 { 617 if (tptr->hold) 618 tptr->hold--; 619 } 620 621 static INLINE atio_private_data_t * 622 isp_get_atpd(struct ispsoftc *isp, int tag) 623 { 624 atio_private_data_t *atp; 625 for (atp = isp->isp_osinfo.atpdp; 626 atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) { 627 if (atp->tag == tag) 628 return (atp); 629 } 630 return (NULL); 631 } 632 633 static cam_status 634 create_lun_state(struct ispsoftc *isp, int bus, 635 struct cam_path *path, tstate_t **rslt) 636 { 637 cam_status status; 638 lun_id_t lun; 639 int hfx; 640 tstate_t *tptr, *new; 641 642 lun = xpt_path_lun_id(path); 643 if (lun < 0) { 644 return (CAM_LUN_INVALID); 645 } 646 if (is_lun_enabled(isp, bus, lun)) { 647 return (CAM_LUN_ALRDY_ENA); 648 } 649 new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO); 650 if (new == NULL) { 651 return (CAM_RESRC_UNAVAIL); 652 } 653 654 status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path), 655 xpt_path_target_id(path), xpt_path_lun_id(path)); 656 if (status != CAM_REQ_CMP) { 657 free(new, M_DEVBUF); 658 return (status); 659 } 660 new->bus = bus; 661 new->lun = lun; 662 SLIST_INIT(&new->atios); 663 SLIST_INIT(&new->inots); 664 new->hold = 1; 665 666 hfx = LUN_HASH_FUNC(isp, new->bus, new->lun); 667 tptr = isp->isp_osinfo.lun_hash[hfx]; 668 if (tptr == NULL) { 669 isp->isp_osinfo.lun_hash[hfx] = new; 670 } else { 671 while (tptr->next) 672 tptr = tptr->next; 673 tptr->next = new; 674 } 675 *rslt = new; 676 return (CAM_REQ_CMP); 677 } 678 679 static INLINE void 680 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr) 681 { 682 int hfx; 683 tstate_t *lw, *pw; 684 685 if (tptr->hold) { 686 return; 687 } 688 hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun); 689 pw = isp->isp_osinfo.lun_hash[hfx]; 690 if (pw == NULL) { 691 return; 692 } else if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 693 isp->isp_osinfo.lun_hash[hfx] = pw->next; 694 } else { 695 lw = pw; 696 pw = lw->next; 697 while (pw) { 698 if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 699 lw->next = pw->next; 700 break; 701 } 702 lw = pw; 703 pw = pw->next; 704 } 705 if (pw == NULL) { 706 return; 707 } 708 } 709 free(tptr, M_DEVBUF); 710 } 711 712 /* 713 * Enable luns. 714 */ 715 static int 716 isp_en_lun(struct ispsoftc *isp, union ccb *ccb) 717 { 718 struct ccb_en_lun *cel = &ccb->cel; 719 tstate_t *tptr; 720 u_int32_t seq; 721 int bus, cmd, av, wildcard, tm_on; 722 lun_id_t lun; 723 target_id_t tgt; 724 725 bus = XS_CHANNEL(ccb); 726 if (bus > 1) { 727 xpt_print_path(ccb->ccb_h.path); 728 printf("illegal bus %d\n", bus); 729 ccb->ccb_h.status = CAM_PATH_INVALID; 730 return (-1); 731 } 732 tgt = ccb->ccb_h.target_id; 733 lun = ccb->ccb_h.target_lun; 734 735 isp_prt(isp, ISP_LOGTDEBUG0, 736 "isp_en_lun: %sabling lun 0x%x on channel %d", 737 cel->enable? "en" : "dis", lun, bus); 738 739 740 if ((lun != CAM_LUN_WILDCARD) && 741 (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) { 742 ccb->ccb_h.status = CAM_LUN_INVALID; 743 return (-1); 744 } 745 746 if (IS_SCSI(isp)) { 747 sdparam *sdp = isp->isp_param; 748 sdp += bus; 749 if (tgt != CAM_TARGET_WILDCARD && 750 tgt != sdp->isp_initiator_id) { 751 ccb->ccb_h.status = CAM_TID_INVALID; 752 return (-1); 753 } 754 } else { 755 /* 756 * There's really no point in doing this yet w/o multi-tid 757 * capability. Even then, it's problematic. 758 */ 759 #if 0 760 if (tgt != CAM_TARGET_WILDCARD && 761 tgt != FCPARAM(isp)->isp_iid) { 762 ccb->ccb_h.status = CAM_TID_INVALID; 763 return (-1); 764 } 765 #endif 766 /* 767 * This is as a good a place as any to check f/w capabilities. 768 */ 769 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_TMODE) == 0) { 770 isp_prt(isp, ISP_LOGERR, 771 "firmware does not support target mode"); 772 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 773 return (-1); 774 } 775 /* 776 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to 777 * XXX: dorks with our already fragile enable/disable code. 778 */ 779 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) { 780 isp_prt(isp, ISP_LOGERR, 781 "firmware not SCCLUN capable"); 782 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 783 return (-1); 784 } 785 } 786 787 if (tgt == CAM_TARGET_WILDCARD) { 788 if (lun == CAM_LUN_WILDCARD) { 789 wildcard = 1; 790 } else { 791 ccb->ccb_h.status = CAM_LUN_INVALID; 792 return (-1); 793 } 794 } else { 795 wildcard = 0; 796 } 797 798 tm_on = (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) != 0; 799 800 /* 801 * Next check to see whether this is a target/lun wildcard action. 802 * 803 * If so, we know that we can accept commands for luns that haven't 804 * been enabled yet and send them upstream. Otherwise, we have to 805 * handle them locally (if we see them at all). 806 */ 807 808 if (wildcard) { 809 tptr = &isp->isp_osinfo.tsdflt[bus]; 810 if (cel->enable) { 811 if (tm_on) { 812 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 813 return (-1); 814 } 815 ccb->ccb_h.status = 816 xpt_create_path(&tptr->owner, NULL, 817 xpt_path_path_id(ccb->ccb_h.path), 818 xpt_path_target_id(ccb->ccb_h.path), 819 xpt_path_lun_id(ccb->ccb_h.path)); 820 if (ccb->ccb_h.status != CAM_REQ_CMP) { 821 return (-1); 822 } 823 SLIST_INIT(&tptr->atios); 824 SLIST_INIT(&tptr->inots); 825 isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED; 826 } else { 827 if (tm_on == 0) { 828 ccb->ccb_h.status = CAM_REQ_CMP; 829 return (-1); 830 } 831 if (tptr->hold) { 832 ccb->ccb_h.status = CAM_SCSI_BUSY; 833 return (-1); 834 } 835 xpt_free_path(tptr->owner); 836 isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED; 837 } 838 } 839 840 /* 841 * Now check to see whether this bus needs to be 842 * enabled/disabled with respect to target mode. 843 */ 844 av = bus << 31; 845 if (cel->enable && tm_on == 0) { 846 av |= ENABLE_TARGET_FLAG; 847 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 848 if (av) { 849 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 850 if (wildcard) { 851 isp->isp_osinfo.tmflags[bus] &= 852 ~TM_WILDCARD_ENABLED; 853 xpt_free_path(tptr->owner); 854 } 855 return (-1); 856 } 857 isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED; 858 isp_prt(isp, ISP_LOGINFO, 859 "Target Mode enabled on channel %d", bus); 860 } else if (cel->enable == 0 && tm_on && wildcard) { 861 if (are_any_luns_enabled(isp, bus)) { 862 ccb->ccb_h.status = CAM_SCSI_BUSY; 863 return (-1); 864 } 865 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 866 if (av) { 867 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 868 return (-1); 869 } 870 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; 871 isp_prt(isp, ISP_LOGINFO, 872 "Target Mode disabled on channel %d", bus); 873 } 874 875 if (wildcard) { 876 ccb->ccb_h.status = CAM_REQ_CMP; 877 return (-1); 878 } 879 880 /* 881 * Find an empty slot 882 */ 883 for (seq = 0; seq < NLEACT; seq++) { 884 if (isp->isp_osinfo.leact[seq] == 0) { 885 break; 886 } 887 } 888 if (seq >= NLEACT) { 889 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 890 return (-1); 891 892 } 893 isp->isp_osinfo.leact[seq] = ccb; 894 895 if (cel->enable) { 896 ccb->ccb_h.status = 897 create_lun_state(isp, bus, ccb->ccb_h.path, &tptr); 898 if (ccb->ccb_h.status != CAM_REQ_CMP) { 899 isp->isp_osinfo.leact[seq] = 0; 900 return (-1); 901 } 902 } else { 903 tptr = get_lun_statep(isp, bus, lun); 904 if (tptr == NULL) { 905 ccb->ccb_h.status = CAM_LUN_INVALID; 906 return (-1); 907 } 908 } 909 910 if (cel->enable) { 911 int c, n, ulun = lun; 912 913 cmd = RQSTYPE_ENABLE_LUN; 914 c = DFLT_CMND_CNT; 915 n = DFLT_INOT_CNT; 916 if (IS_FC(isp) && lun != 0) { 917 cmd = RQSTYPE_MODIFY_LUN; 918 n = 0; 919 /* 920 * For SCC firmware, we only deal with setting 921 * (enabling or modifying) lun 0. 922 */ 923 ulun = 0; 924 } 925 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) { 926 rls_lun_statep(isp, tptr); 927 ccb->ccb_h.status = CAM_REQ_INPROG; 928 return (seq); 929 } 930 } else { 931 int c, n, ulun = lun; 932 933 cmd = -RQSTYPE_MODIFY_LUN; 934 c = DFLT_CMND_CNT; 935 n = DFLT_INOT_CNT; 936 if (IS_FC(isp) && lun != 0) { 937 n = 0; 938 /* 939 * For SCC firmware, we only deal with setting 940 * (enabling or modifying) lun 0. 941 */ 942 ulun = 0; 943 } 944 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) { 945 rls_lun_statep(isp, tptr); 946 ccb->ccb_h.status = CAM_REQ_INPROG; 947 return (seq); 948 } 949 } 950 rls_lun_statep(isp, tptr); 951 xpt_print_path(ccb->ccb_h.path); 952 printf("isp_lun_cmd failed\n"); 953 isp->isp_osinfo.leact[seq] = 0; 954 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 955 return (-1); 956 } 957 958 static void 959 isp_ledone(struct ispsoftc *isp, lun_entry_t *lep) 960 { 961 const char lfmt[] = "lun %d now %sabled for target mode on channel %d"; 962 union ccb *ccb; 963 u_int32_t seq; 964 tstate_t *tptr; 965 int av; 966 struct ccb_en_lun *cel; 967 968 seq = lep->le_reserved - 1; 969 if (seq >= NLEACT) { 970 isp_prt(isp, ISP_LOGERR, 971 "seq out of range (%u) in isp_ledone", seq); 972 return; 973 } 974 ccb = isp->isp_osinfo.leact[seq]; 975 if (ccb == 0) { 976 isp_prt(isp, ISP_LOGERR, 977 "no ccb for seq %u in isp_ledone", seq); 978 return; 979 } 980 cel = &ccb->cel; 981 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), XS_LUN(ccb)); 982 if (tptr == NULL) { 983 xpt_print_path(ccb->ccb_h.path); 984 printf("null tptr in isp_ledone\n"); 985 isp->isp_osinfo.leact[seq] = 0; 986 return; 987 } 988 989 if (lep->le_status != LUN_OK) { 990 xpt_print_path(ccb->ccb_h.path); 991 printf("ENABLE/MODIFY LUN returned 0x%x\n", lep->le_status); 992 err: 993 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 994 xpt_print_path(ccb->ccb_h.path); 995 rls_lun_statep(isp, tptr); 996 isp->isp_osinfo.leact[seq] = 0; 997 ISPLOCK_2_CAMLOCK(isp); 998 xpt_done(ccb); 999 CAMLOCK_2_ISPLOCK(isp); 1000 return; 1001 } else { 1002 isp_prt(isp, ISP_LOGTDEBUG0, 1003 "isp_ledone: ENABLE/MODIFY done okay"); 1004 } 1005 1006 1007 if (cel->enable) { 1008 ccb->ccb_h.status = CAM_REQ_CMP; 1009 isp_prt(isp, /* ISP_LOGINFO */ ISP_LOGALL, lfmt, 1010 XS_LUN(ccb), "en", XS_CHANNEL(ccb)); 1011 rls_lun_statep(isp, tptr); 1012 isp->isp_osinfo.leact[seq] = 0; 1013 ISPLOCK_2_CAMLOCK(isp); 1014 xpt_done(ccb); 1015 CAMLOCK_2_ISPLOCK(isp); 1016 return; 1017 } 1018 1019 if (lep->le_header.rqs_entry_type == RQSTYPE_MODIFY_LUN) { 1020 if (isp_lun_cmd(isp, -RQSTYPE_ENABLE_LUN, XS_CHANNEL(ccb), 1021 XS_TGT(ccb), XS_LUN(ccb), 0, 0, seq+1)) { 1022 xpt_print_path(ccb->ccb_h.path); 1023 printf("isp_ledone: isp_lun_cmd failed\n"); 1024 goto err; 1025 } 1026 rls_lun_statep(isp, tptr); 1027 return; 1028 } 1029 1030 isp_prt(isp, ISP_LOGINFO, lfmt, XS_LUN(ccb), "dis", XS_CHANNEL(ccb)); 1031 rls_lun_statep(isp, tptr); 1032 destroy_lun_state(isp, tptr); 1033 ccb->ccb_h.status = CAM_REQ_CMP; 1034 isp->isp_osinfo.leact[seq] = 0; 1035 ISPLOCK_2_CAMLOCK(isp); 1036 xpt_done(ccb); 1037 CAMLOCK_2_ISPLOCK(isp); 1038 if (are_any_luns_enabled(isp, XS_CHANNEL(ccb)) == 0) { 1039 int bus = XS_CHANNEL(ccb); 1040 av = bus << 31; 1041 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 1042 if (av) { 1043 isp_prt(isp, ISP_LOGWARN, 1044 "disable target mode on channel %d failed", bus); 1045 } else { 1046 isp_prt(isp, ISP_LOGINFO, 1047 "Target Mode disabled on channel %d", bus); 1048 } 1049 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; 1050 } 1051 } 1052 1053 1054 static cam_status 1055 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb) 1056 { 1057 tstate_t *tptr; 1058 struct ccb_hdr_slist *lp; 1059 struct ccb_hdr *curelm; 1060 int found, *ctr; 1061 union ccb *accb = ccb->cab.abort_ccb; 1062 1063 isp_prt(isp, ISP_LOGTDEBUG0, "aborting ccb %p", accb); 1064 if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 1065 int badpath = 0; 1066 if (IS_FC(isp) && (accb->ccb_h.target_id != 1067 ((fcparam *) isp->isp_param)->isp_loopid)) { 1068 badpath = 1; 1069 } else if (IS_SCSI(isp) && (accb->ccb_h.target_id != 1070 ((sdparam *) isp->isp_param)->isp_initiator_id)) { 1071 badpath = 1; 1072 } 1073 if (badpath) { 1074 /* 1075 * Being restrictive about target ids is really about 1076 * making sure we're aborting for the right multi-tid 1077 * path. This doesn't really make much sense at present. 1078 */ 1079 #if 0 1080 return (CAM_PATH_INVALID); 1081 #endif 1082 } 1083 } 1084 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun); 1085 if (tptr == NULL) { 1086 isp_prt(isp, ISP_LOGTDEBUG0, 1087 "isp_abort_tgt_ccb: can't get statep"); 1088 return (CAM_PATH_INVALID); 1089 } 1090 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 1091 lp = &tptr->atios; 1092 ctr = &tptr->atio_count; 1093 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 1094 lp = &tptr->inots; 1095 ctr = &tptr->inot_count; 1096 } else { 1097 rls_lun_statep(isp, tptr); 1098 isp_prt(isp, ISP_LOGTDEBUG0, 1099 "isp_abort_tgt_ccb: bad func %d\n", accb->ccb_h.func_code); 1100 return (CAM_UA_ABORT); 1101 } 1102 curelm = SLIST_FIRST(lp); 1103 found = 0; 1104 if (curelm == &accb->ccb_h) { 1105 found = 1; 1106 SLIST_REMOVE_HEAD(lp, sim_links.sle); 1107 } else { 1108 while(curelm != NULL) { 1109 struct ccb_hdr *nextelm; 1110 1111 nextelm = SLIST_NEXT(curelm, sim_links.sle); 1112 if (nextelm == &accb->ccb_h) { 1113 found = 1; 1114 SLIST_NEXT(curelm, sim_links.sle) = 1115 SLIST_NEXT(nextelm, sim_links.sle); 1116 break; 1117 } 1118 curelm = nextelm; 1119 } 1120 } 1121 rls_lun_statep(isp, tptr); 1122 if (found) { 1123 *ctr--; 1124 accb->ccb_h.status = CAM_REQ_ABORTED; 1125 xpt_done(accb); 1126 return (CAM_REQ_CMP); 1127 } 1128 isp_prt(isp, ISP_LOGTDEBUG0, 1129 "isp_abort_tgt_ccb: CCB %p not found\n", ccb); 1130 return (CAM_PATH_INVALID); 1131 } 1132 1133 static cam_status 1134 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb) 1135 { 1136 void *qe; 1137 struct ccb_scsiio *cso = &ccb->csio; 1138 u_int16_t *hp, save_handle; 1139 u_int16_t nxti, optr; 1140 u_int8_t local[QENTRY_LEN]; 1141 1142 1143 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 1144 xpt_print_path(ccb->ccb_h.path); 1145 printf("Request Queue Overflow in isp_target_start_ctio\n"); 1146 return (CAM_RESRC_UNAVAIL); 1147 } 1148 bzero(local, QENTRY_LEN); 1149 1150 /* 1151 * We're either moving data or completing a command here. 1152 */ 1153 1154 if (IS_FC(isp)) { 1155 atio_private_data_t *atp; 1156 ct2_entry_t *cto = (ct2_entry_t *) local; 1157 1158 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; 1159 cto->ct_header.rqs_entry_count = 1; 1160 cto->ct_iid = cso->init_id; 1161 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) { 1162 cto->ct_lun = ccb->ccb_h.target_lun; 1163 } 1164 1165 atp = isp_get_atpd(isp, cso->tag_id); 1166 if (atp == NULL) { 1167 isp_prt(isp, ISP_LOGERR, 1168 "cannot find private data adjunct for tag %x", 1169 cso->tag_id); 1170 return (-1); 1171 } 1172 1173 cto->ct_rxid = cso->tag_id; 1174 if (cso->dxfer_len == 0) { 1175 cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA; 1176 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1177 cto->ct_flags |= CT2_SENDSTATUS; 1178 cto->rsp.m1.ct_scsi_status = cso->scsi_status; 1179 cto->ct_resid = 1180 atp->orig_datalen - atp->bytes_xfered; 1181 if (cto->ct_resid < 0) { 1182 cto->rsp.m1.ct_scsi_status |= 1183 CT2_DATA_OVER; 1184 } else if (cto->ct_resid > 0) { 1185 cto->rsp.m1.ct_scsi_status |= 1186 CT2_DATA_UNDER; 1187 } 1188 } 1189 if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) { 1190 int m = min(cso->sense_len, MAXRESPLEN); 1191 bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m); 1192 cto->rsp.m1.ct_senselen = m; 1193 cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID; 1194 } 1195 } else { 1196 cto->ct_flags |= CT2_FLAG_MODE0; 1197 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1198 cto->ct_flags |= CT2_DATA_IN; 1199 } else { 1200 cto->ct_flags |= CT2_DATA_OUT; 1201 } 1202 cto->ct_reloff = atp->bytes_xfered; 1203 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 1204 cto->ct_flags |= CT2_SENDSTATUS; 1205 cto->rsp.m0.ct_scsi_status = cso->scsi_status; 1206 cto->ct_resid = 1207 atp->orig_datalen - 1208 (atp->bytes_xfered + cso->dxfer_len); 1209 if (cto->ct_resid < 0) { 1210 cto->rsp.m0.ct_scsi_status |= 1211 CT2_DATA_OVER; 1212 } else if (cto->ct_resid > 0) { 1213 cto->rsp.m0.ct_scsi_status |= 1214 CT2_DATA_UNDER; 1215 } 1216 } else { 1217 atp->last_xframt = cso->dxfer_len; 1218 } 1219 /* 1220 * If we're sending data and status back together, 1221 * we can't also send back sense data as well. 1222 */ 1223 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1224 } 1225 1226 if (cto->ct_flags & CT2_SENDSTATUS) { 1227 isp_prt(isp, ISP_LOGTDEBUG0, 1228 "CTIO2[%x] STATUS %x origd %u curd %u resid %u", 1229 cto->ct_rxid, cso->scsi_status, atp->orig_datalen, 1230 cso->dxfer_len, cto->ct_resid); 1231 cto->ct_flags |= CT2_CCINCR; 1232 atp->state = ATPD_STATE_LAST_CTIO; 1233 } else 1234 atp->state = ATPD_STATE_CTIO; 1235 cto->ct_timeout = 10; 1236 hp = &cto->ct_syshandle; 1237 } else { 1238 ct_entry_t *cto = (ct_entry_t *) local; 1239 1240 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1241 cto->ct_header.rqs_entry_count = 1; 1242 cto->ct_iid = cso->init_id; 1243 cto->ct_iid |= XS_CHANNEL(ccb) << 7; 1244 cto->ct_tgt = ccb->ccb_h.target_id; 1245 cto->ct_lun = ccb->ccb_h.target_lun; 1246 cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id); 1247 if (AT_HAS_TAG(cso->tag_id)) { 1248 cto->ct_tag_val = (u_int8_t) AT_GET_TAG(cso->tag_id); 1249 cto->ct_flags |= CT_TQAE; 1250 } 1251 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 1252 cto->ct_flags |= CT_NODISC; 1253 } 1254 if (cso->dxfer_len == 0) { 1255 cto->ct_flags |= CT_NO_DATA; 1256 } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1257 cto->ct_flags |= CT_DATA_IN; 1258 } else { 1259 cto->ct_flags |= CT_DATA_OUT; 1260 } 1261 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1262 cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR; 1263 cto->ct_scsi_status = cso->scsi_status; 1264 cto->ct_resid = cso->resid; 1265 isp_prt(isp, ISP_LOGTDEBUG0, 1266 "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x", 1267 cto->ct_fwhandle, cso->scsi_status, cso->resid, 1268 cso->tag_id); 1269 } 1270 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1271 cto->ct_timeout = 10; 1272 hp = &cto->ct_syshandle; 1273 } 1274 1275 if (isp_save_xs(isp, (XS_T *)ccb, hp)) { 1276 xpt_print_path(ccb->ccb_h.path); 1277 printf("No XFLIST pointers for isp_target_start_ctio\n"); 1278 return (CAM_RESRC_UNAVAIL); 1279 } 1280 1281 1282 /* 1283 * Call the dma setup routines for this entry (and any subsequent 1284 * CTIOs) if there's data to move, and then tell the f/w it's got 1285 * new things to play with. As with isp_start's usage of DMA setup, 1286 * any swizzling is done in the machine dependent layer. Because 1287 * of this, we put the request onto the queue area first in native 1288 * format. 1289 */ 1290 1291 save_handle = *hp; 1292 1293 switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) { 1294 case CMD_QUEUED: 1295 ISP_ADD_REQUEST(isp, nxti); 1296 return (CAM_REQ_INPROG); 1297 1298 case CMD_EAGAIN: 1299 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1300 isp_destroy_handle(isp, save_handle); 1301 return (CAM_RESRC_UNAVAIL); 1302 1303 default: 1304 isp_destroy_handle(isp, save_handle); 1305 return (XS_ERR(ccb)); 1306 } 1307 } 1308 1309 static void 1310 isp_refire_putback_atio(void *arg) 1311 { 1312 int s = splcam(); 1313 isp_target_putback_atio(arg); 1314 splx(s); 1315 } 1316 1317 static void 1318 isp_target_putback_atio(union ccb *ccb) 1319 { 1320 struct ispsoftc *isp; 1321 struct ccb_scsiio *cso; 1322 u_int16_t nxti, optr; 1323 void *qe; 1324 1325 isp = XS_ISP(ccb); 1326 1327 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 1328 (void) timeout(isp_refire_putback_atio, ccb, 10); 1329 isp_prt(isp, ISP_LOGWARN, 1330 "isp_target_putback_atio: Request Queue Overflow"); 1331 return; 1332 } 1333 bzero(qe, QENTRY_LEN); 1334 cso = &ccb->csio; 1335 if (IS_FC(isp)) { 1336 at2_entry_t local, *at = &local; 1337 MEMZERO(at, sizeof (at2_entry_t)); 1338 at->at_header.rqs_entry_type = RQSTYPE_ATIO2; 1339 at->at_header.rqs_entry_count = 1; 1340 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) { 1341 at->at_scclun = (uint16_t) ccb->ccb_h.target_lun; 1342 } else { 1343 at->at_lun = (uint8_t) ccb->ccb_h.target_lun; 1344 } 1345 at->at_status = CT_OK; 1346 at->at_rxid = cso->tag_id; 1347 at->at_iid = cso->ccb_h.target_id; 1348 isp_put_atio2(isp, at, qe); 1349 } else { 1350 at_entry_t local, *at = &local; 1351 MEMZERO(at, sizeof (at_entry_t)); 1352 at->at_header.rqs_entry_type = RQSTYPE_ATIO; 1353 at->at_header.rqs_entry_count = 1; 1354 at->at_iid = cso->init_id; 1355 at->at_iid |= XS_CHANNEL(ccb) << 7; 1356 at->at_tgt = cso->ccb_h.target_id; 1357 at->at_lun = cso->ccb_h.target_lun; 1358 at->at_status = CT_OK; 1359 at->at_tag_val = AT_GET_TAG(cso->tag_id); 1360 at->at_handle = AT_GET_HANDLE(cso->tag_id); 1361 isp_put_atio(isp, at, qe); 1362 } 1363 ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe); 1364 ISP_ADD_REQUEST(isp, nxti); 1365 isp_complete_ctio(ccb); 1366 } 1367 1368 static void 1369 isp_complete_ctio(union ccb *ccb) 1370 { 1371 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1372 ccb->ccb_h.status |= CAM_REQ_CMP; 1373 } 1374 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1375 xpt_done(ccb); 1376 } 1377 1378 /* 1379 * Handle ATIO stuff that the generic code can't. 1380 * This means handling CDBs. 1381 */ 1382 1383 static int 1384 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep) 1385 { 1386 tstate_t *tptr; 1387 int status, bus, iswildcard; 1388 struct ccb_accept_tio *atiop; 1389 1390 /* 1391 * The firmware status (except for the QLTM_SVALID bit) 1392 * indicates why this ATIO was sent to us. 1393 * 1394 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1395 * 1396 * If the DISCONNECTS DISABLED bit is set in the flags field, 1397 * we're still connected on the SCSI bus. 1398 */ 1399 status = aep->at_status; 1400 if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) { 1401 /* 1402 * Bus Phase Sequence error. We should have sense data 1403 * suggested by the f/w. I'm not sure quite yet what 1404 * to do about this for CAM. 1405 */ 1406 isp_prt(isp, ISP_LOGWARN, "PHASE ERROR"); 1407 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1408 return (0); 1409 } 1410 if ((status & ~QLTM_SVALID) != AT_CDB) { 1411 isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform", 1412 status); 1413 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1414 return (0); 1415 } 1416 1417 bus = GET_BUS_VAL(aep->at_iid); 1418 tptr = get_lun_statep(isp, bus, aep->at_lun); 1419 if (tptr == NULL) { 1420 tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD); 1421 if (tptr == NULL) { 1422 isp_endcmd(isp, aep, 1423 SCSI_STATUS_CHECK_COND | ECMD_SVALID | 1424 (0x5 << 12) | (0x25 << 16), 0); 1425 return (0); 1426 } 1427 iswildcard = 1; 1428 } else { 1429 iswildcard = 0; 1430 } 1431 1432 if (tptr == NULL) { 1433 /* 1434 * Because we can't autofeed sense data back with 1435 * a command for parallel SCSI, we can't give back 1436 * a CHECK CONDITION. We'll give back a BUSY status 1437 * instead. This works out okay because the only 1438 * time we should, in fact, get this, is in the 1439 * case that somebody configured us without the 1440 * blackhole driver, so they get what they deserve. 1441 */ 1442 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1443 return (0); 1444 } 1445 1446 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1447 if (atiop == NULL) { 1448 /* 1449 * Because we can't autofeed sense data back with 1450 * a command for parallel SCSI, we can't give back 1451 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1452 * instead. This works out okay because the only time we 1453 * should, in fact, get this, is in the case that we've 1454 * run out of ATIOS. 1455 */ 1456 xpt_print_path(tptr->owner); 1457 isp_prt(isp, ISP_LOGWARN, 1458 "no ATIOS for lun %d from initiator %d on channel %d", 1459 aep->at_lun, GET_IID_VAL(aep->at_iid), bus); 1460 if (aep->at_flags & AT_TQAE) 1461 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1462 else 1463 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1464 rls_lun_statep(isp, tptr); 1465 return (0); 1466 } 1467 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1468 tptr->atio_count--; 1469 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d", 1470 aep->at_lun, tptr->atio_count); 1471 if (iswildcard) { 1472 atiop->ccb_h.target_id = aep->at_tgt; 1473 atiop->ccb_h.target_lun = aep->at_lun; 1474 } 1475 if (aep->at_flags & AT_NODISC) { 1476 atiop->ccb_h.flags = CAM_DIS_DISCONNECT; 1477 } else { 1478 atiop->ccb_h.flags = 0; 1479 } 1480 1481 if (status & QLTM_SVALID) { 1482 size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data)); 1483 atiop->sense_len = amt; 1484 MEMCPY(&atiop->sense_data, aep->at_sense, amt); 1485 } else { 1486 atiop->sense_len = 0; 1487 } 1488 1489 atiop->init_id = GET_IID_VAL(aep->at_iid); 1490 atiop->cdb_len = aep->at_cdblen; 1491 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen); 1492 atiop->ccb_h.status = CAM_CDB_RECVD; 1493 /* 1494 * Construct a tag 'id' based upon tag value (which may be 0..255) 1495 * and the handle (which we have to preserve). 1496 */ 1497 AT_MAKE_TAGID(atiop->tag_id, aep); 1498 if (aep->at_flags & AT_TQAE) { 1499 atiop->tag_action = aep->at_tag_type; 1500 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; 1501 } 1502 xpt_done((union ccb*)atiop); 1503 isp_prt(isp, ISP_LOGTDEBUG0, 1504 "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s", 1505 aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid), 1506 GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff, 1507 aep->at_tag_type, (aep->at_flags & AT_NODISC)? 1508 "nondisc" : "disconnecting"); 1509 rls_lun_statep(isp, tptr); 1510 return (0); 1511 } 1512 1513 static int 1514 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep) 1515 { 1516 lun_id_t lun; 1517 tstate_t *tptr; 1518 struct ccb_accept_tio *atiop; 1519 atio_private_data_t *atp; 1520 1521 /* 1522 * The firmware status (except for the QLTM_SVALID bit) 1523 * indicates why this ATIO was sent to us. 1524 * 1525 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1526 */ 1527 if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) { 1528 isp_prt(isp, ISP_LOGWARN, 1529 "bogus atio (0x%x) leaked to platform", aep->at_status); 1530 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1531 return (0); 1532 } 1533 1534 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) { 1535 lun = aep->at_scclun; 1536 } else { 1537 lun = aep->at_lun; 1538 } 1539 tptr = get_lun_statep(isp, 0, lun); 1540 if (tptr == NULL) { 1541 isp_prt(isp, ISP_LOGTDEBUG0, 1542 "[0x%x] no state pointer for lun %d", aep->at_rxid, lun); 1543 tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD); 1544 if (tptr == NULL) { 1545 isp_endcmd(isp, aep, 1546 SCSI_STATUS_CHECK_COND | ECMD_SVALID | 1547 (0x5 << 12) | (0x25 << 16), 0); 1548 return (0); 1549 } 1550 } 1551 1552 atp = isp_get_atpd(isp, 0); 1553 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1554 if (atiop == NULL || atp == NULL) { 1555 1556 /* 1557 * Because we can't autofeed sense data back with 1558 * a command for parallel SCSI, we can't give back 1559 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1560 * instead. This works out okay because the only time we 1561 * should, in fact, get this, is in the case that we've 1562 * run out of ATIOS. 1563 */ 1564 xpt_print_path(tptr->owner); 1565 isp_prt(isp, ISP_LOGWARN, 1566 "no %s for lun %d from initiator %d", 1567 (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" : 1568 ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid); 1569 rls_lun_statep(isp, tptr); 1570 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1571 return (0); 1572 } 1573 atp->state = ATPD_STATE_ATIO; 1574 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1575 tptr->atio_count--; 1576 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d", 1577 lun, tptr->atio_count); 1578 1579 if (tptr == &isp->isp_osinfo.tsdflt[0]) { 1580 atiop->ccb_h.target_id = 1581 ((fcparam *)isp->isp_param)->isp_loopid; 1582 atiop->ccb_h.target_lun = lun; 1583 } 1584 /* 1585 * We don't get 'suggested' sense data as we do with SCSI cards. 1586 */ 1587 atiop->sense_len = 0; 1588 1589 atiop->init_id = aep->at_iid; 1590 atiop->cdb_len = ATIO2_CDBLEN; 1591 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN); 1592 atiop->ccb_h.status = CAM_CDB_RECVD; 1593 atiop->tag_id = aep->at_rxid; 1594 switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) { 1595 case ATIO2_TC_ATTR_SIMPLEQ: 1596 atiop->tag_action = MSG_SIMPLE_Q_TAG; 1597 break; 1598 case ATIO2_TC_ATTR_HEADOFQ: 1599 atiop->tag_action = MSG_HEAD_OF_Q_TAG; 1600 break; 1601 case ATIO2_TC_ATTR_ORDERED: 1602 atiop->tag_action = MSG_ORDERED_Q_TAG; 1603 break; 1604 case ATIO2_TC_ATTR_ACAQ: /* ?? */ 1605 case ATIO2_TC_ATTR_UNTAGGED: 1606 default: 1607 atiop->tag_action = 0; 1608 break; 1609 } 1610 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; 1611 1612 atp->tag = atiop->tag_id; 1613 atp->lun = lun; 1614 atp->orig_datalen = aep->at_datalen; 1615 atp->last_xframt = 0; 1616 atp->bytes_xfered = 0; 1617 atp->state = ATPD_STATE_CAM; 1618 ISPLOCK_2_CAMLOCK(siP); 1619 xpt_done((union ccb*)atiop); 1620 1621 isp_prt(isp, ISP_LOGTDEBUG0, 1622 "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u", 1623 aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid, 1624 lun, aep->at_taskflags, aep->at_datalen); 1625 rls_lun_statep(isp, tptr); 1626 return (0); 1627 } 1628 1629 static int 1630 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg) 1631 { 1632 union ccb *ccb; 1633 int sentstatus, ok, notify_cam, resid = 0; 1634 u_int16_t tval; 1635 1636 /* 1637 * CTIO and CTIO2 are close enough.... 1638 */ 1639 1640 ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_syshandle); 1641 KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio")); 1642 isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_syshandle); 1643 1644 if (IS_FC(isp)) { 1645 ct2_entry_t *ct = arg; 1646 atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid); 1647 if (atp == NULL) { 1648 isp_prt(isp, ISP_LOGERR, 1649 "cannot find adjunct for %x after I/O", 1650 ct->ct_rxid); 1651 return (0); 1652 } 1653 sentstatus = ct->ct_flags & CT2_SENDSTATUS; 1654 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1655 if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) { 1656 ccb->ccb_h.status |= CAM_SENT_SENSE; 1657 } 1658 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1659 if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) { 1660 resid = ct->ct_resid; 1661 atp->bytes_xfered += (atp->last_xframt - resid); 1662 atp->last_xframt = 0; 1663 } 1664 if (sentstatus || !ok) { 1665 atp->tag = 0; 1666 } 1667 isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, 1668 "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s", 1669 ct->ct_rxid, ct->ct_status, ct->ct_flags, 1670 (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, 1671 resid, sentstatus? "FIN" : "MID"); 1672 tval = ct->ct_rxid; 1673 1674 /* XXX: should really come after isp_complete_ctio */ 1675 atp->state = ATPD_STATE_PDON; 1676 } else { 1677 ct_entry_t *ct = arg; 1678 sentstatus = ct->ct_flags & CT_SENDSTATUS; 1679 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1680 /* 1681 * We *ought* to be able to get back to the original ATIO 1682 * here, but for some reason this gets lost. It's just as 1683 * well because it's squirrelled away as part of periph 1684 * private data. 1685 * 1686 * We can live without it as long as we continue to use 1687 * the auto-replenish feature for CTIOs. 1688 */ 1689 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1690 if (ct->ct_status & QLTM_SVALID) { 1691 char *sp = (char *)ct; 1692 sp += CTIO_SENSE_OFFSET; 1693 ccb->csio.sense_len = 1694 min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN); 1695 MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len); 1696 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1697 } 1698 if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) { 1699 resid = ct->ct_resid; 1700 } 1701 isp_prt(isp, ISP_LOGTDEBUG0, 1702 "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s", 1703 ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun, 1704 ct->ct_status, ct->ct_flags, resid, 1705 sentstatus? "FIN" : "MID"); 1706 tval = ct->ct_fwhandle; 1707 } 1708 ccb->csio.resid += resid; 1709 1710 /* 1711 * We're here either because intermediate data transfers are done 1712 * and/or the final status CTIO (which may have joined with a 1713 * Data Transfer) is done. 1714 * 1715 * In any case, for this platform, the upper layers figure out 1716 * what to do next, so all we do here is collect status and 1717 * pass information along. Any DMA handles have already been 1718 * freed. 1719 */ 1720 if (notify_cam == 0) { 1721 isp_prt(isp, ISP_LOGTDEBUG0, " INTER CTIO[0x%x] done", tval); 1722 return (0); 1723 } 1724 1725 isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done", 1726 (sentstatus)? " FINAL " : "MIDTERM ", tval); 1727 1728 if (!ok) { 1729 isp_target_putback_atio(ccb); 1730 } else { 1731 isp_complete_ctio(ccb); 1732 1733 } 1734 return (0); 1735 } 1736 1737 static int 1738 isp_handle_platform_notify_scsi(struct ispsoftc *isp, in_entry_t *inp) 1739 { 1740 return (0); /* XXXX */ 1741 } 1742 1743 static int 1744 isp_handle_platform_notify_fc(struct ispsoftc *isp, in_fcentry_t *inp) 1745 { 1746 1747 switch (inp->in_status) { 1748 case IN_PORT_LOGOUT: 1749 isp_prt(isp, ISP_LOGWARN, "port logout of iid %d", 1750 inp->in_iid); 1751 break; 1752 case IN_PORT_CHANGED: 1753 isp_prt(isp, ISP_LOGWARN, "port changed for iid %d", 1754 inp->in_iid); 1755 break; 1756 case IN_GLOBAL_LOGO: 1757 isp_prt(isp, ISP_LOGINFO, "all ports logged out"); 1758 break; 1759 case IN_ABORT_TASK: 1760 { 1761 atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid); 1762 struct ccb_immed_notify *inot = NULL; 1763 1764 if (atp) { 1765 tstate_t *tptr = get_lun_statep(isp, 0, atp->lun); 1766 if (tptr) { 1767 inot = (struct ccb_immed_notify *) 1768 SLIST_FIRST(&tptr->inots); 1769 if (inot) { 1770 tptr->inot_count--; 1771 SLIST_REMOVE_HEAD(&tptr->inots, 1772 sim_links.sle); 1773 isp_prt(isp, ISP_LOGTDEBUG0, 1774 "Take FREE INOT count now %d", 1775 tptr->inot_count); 1776 } 1777 } 1778 isp_prt(isp, ISP_LOGWARN, 1779 "abort task RX_ID %x IID %d state %d", 1780 inp->in_seqid, inp->in_iid, atp->state); 1781 } else { 1782 isp_prt(isp, ISP_LOGWARN, 1783 "abort task RX_ID %x from iid %d, state unknown", 1784 inp->in_seqid, inp->in_iid); 1785 } 1786 if (inot) { 1787 inot->initiator_id = inp->in_iid; 1788 inot->sense_len = 0; 1789 inot->message_args[0] = MSG_ABORT_TAG; 1790 inot->message_args[1] = inp->in_seqid & 0xff; 1791 inot->message_args[2] = (inp->in_seqid >> 8) & 0xff; 1792 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 1793 xpt_done((union ccb *)inot); 1794 } 1795 break; 1796 } 1797 default: 1798 break; 1799 } 1800 return (0); 1801 } 1802 #endif 1803 1804 static void 1805 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg) 1806 { 1807 struct cam_sim *sim; 1808 struct ispsoftc *isp; 1809 1810 sim = (struct cam_sim *)cbarg; 1811 isp = (struct ispsoftc *) cam_sim_softc(sim); 1812 switch (code) { 1813 case AC_LOST_DEVICE: 1814 if (IS_SCSI(isp)) { 1815 u_int16_t oflags, nflags; 1816 sdparam *sdp = isp->isp_param; 1817 int tgt; 1818 1819 tgt = xpt_path_target_id(path); 1820 if (tgt >= 0) { 1821 sdp += cam_sim_bus(sim); 1822 ISP_LOCK(isp); 1823 nflags = sdp->isp_devparam[tgt].nvrm_flags; 1824 #ifndef ISP_TARGET_MODE 1825 nflags &= DPARM_SAFE_DFLT; 1826 if (isp->isp_loaded_fw) { 1827 nflags |= DPARM_NARROW | DPARM_ASYNC; 1828 } 1829 #else 1830 nflags = DPARM_DEFAULT; 1831 #endif 1832 oflags = sdp->isp_devparam[tgt].goal_flags; 1833 sdp->isp_devparam[tgt].goal_flags = nflags; 1834 sdp->isp_devparam[tgt].dev_update = 1; 1835 isp->isp_update |= (1 << cam_sim_bus(sim)); 1836 (void) isp_control(isp, 1837 ISPCTL_UPDATE_PARAMS, NULL); 1838 sdp->isp_devparam[tgt].goal_flags = oflags; 1839 ISP_UNLOCK(isp); 1840 } 1841 } 1842 break; 1843 default: 1844 isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code); 1845 break; 1846 } 1847 } 1848 1849 static void 1850 isp_poll(struct cam_sim *sim) 1851 { 1852 struct ispsoftc *isp = cam_sim_softc(sim); 1853 u_int16_t isr, sema, mbox; 1854 1855 ISP_LOCK(isp); 1856 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 1857 isp_intr(isp, isr, sema, mbox); 1858 } 1859 ISP_UNLOCK(isp); 1860 } 1861 1862 1863 static void 1864 isp_watchdog(void *arg) 1865 { 1866 XS_T *xs = arg; 1867 struct ispsoftc *isp = XS_ISP(xs); 1868 u_int32_t handle; 1869 int iok; 1870 1871 /* 1872 * We've decided this command is dead. Make sure we're not trying 1873 * to kill a command that's already dead by getting it's handle and 1874 * and seeing whether it's still alive. 1875 */ 1876 ISP_LOCK(isp); 1877 iok = isp->isp_osinfo.intsok; 1878 isp->isp_osinfo.intsok = 0; 1879 handle = isp_find_handle(isp, xs); 1880 if (handle) { 1881 u_int16_t isr, sema, mbox; 1882 1883 if (XS_CMD_DONE_P(xs)) { 1884 isp_prt(isp, ISP_LOGDEBUG1, 1885 "watchdog found done cmd (handle 0x%x)", handle); 1886 ISP_UNLOCK(isp); 1887 return; 1888 } 1889 1890 if (XS_CMD_WDOG_P(xs)) { 1891 isp_prt(isp, ISP_LOGDEBUG2, 1892 "recursive watchdog (handle 0x%x)", handle); 1893 ISP_UNLOCK(isp); 1894 return; 1895 } 1896 1897 XS_CMD_S_WDOG(xs); 1898 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 1899 isp_intr(isp, isr, sema, mbox); 1900 } 1901 if (XS_CMD_DONE_P(xs)) { 1902 isp_prt(isp, ISP_LOGDEBUG2, 1903 "watchdog cleanup for handle 0x%x", handle); 1904 xpt_done((union ccb *) xs); 1905 } else if (XS_CMD_GRACE_P(xs)) { 1906 /* 1907 * Make sure the command is *really* dead before we 1908 * release the handle (and DMA resources) for reuse. 1909 */ 1910 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg); 1911 1912 /* 1913 * After this point, the comamnd is really dead. 1914 */ 1915 if (XS_XFRLEN(xs)) { 1916 ISP_DMAFREE(isp, xs, handle); 1917 } 1918 isp_destroy_handle(isp, handle); 1919 xpt_print_path(xs->ccb_h.path); 1920 isp_prt(isp, ISP_LOGWARN, 1921 "watchdog timeout for handle 0x%x", handle); 1922 XS_SETERR(xs, CAM_CMD_TIMEOUT); 1923 XS_CMD_C_WDOG(xs); 1924 isp_done(xs); 1925 } else { 1926 u_int16_t nxti, optr; 1927 ispreq_t local, *mp= &local, *qe; 1928 1929 XS_CMD_C_WDOG(xs); 1930 xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz); 1931 if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) { 1932 ISP_UNLOCK(isp); 1933 return; 1934 } 1935 XS_CMD_S_GRACE(xs); 1936 MEMZERO((void *) mp, sizeof (*mp)); 1937 mp->req_header.rqs_entry_count = 1; 1938 mp->req_header.rqs_entry_type = RQSTYPE_MARKER; 1939 mp->req_modifier = SYNC_ALL; 1940 mp->req_target = XS_CHANNEL(xs) << 7; 1941 isp_put_request(isp, mp, qe); 1942 ISP_ADD_REQUEST(isp, nxti); 1943 } 1944 } else { 1945 isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command"); 1946 } 1947 isp->isp_osinfo.intsok = iok; 1948 ISP_UNLOCK(isp); 1949 } 1950 1951 static void 1952 isp_kthread(void *arg) 1953 { 1954 struct ispsoftc *isp = arg; 1955 1956 #ifdef ISP_SMPLOCK 1957 mtx_lock(&isp->isp_lock); 1958 #else 1959 mtx_lock(&Giant); 1960 #endif 1961 /* 1962 * The first loop is for our usage where we have yet to have 1963 * gotten good fibre channel state. 1964 */ 1965 for (;;) { 1966 int wasfrozen; 1967 1968 isp_prt(isp, ISP_LOGDEBUG0, "kthread: checking FC state"); 1969 while (isp_fc_runstate(isp, 2 * 1000000) != 0) { 1970 isp_prt(isp, ISP_LOGDEBUG0, "kthread: FC state ungood"); 1971 if (FCPARAM(isp)->isp_fwstate != FW_READY || 1972 FCPARAM(isp)->isp_loopstate < LOOP_PDB_RCVD) { 1973 if (FCPARAM(isp)->loop_seen_once == 0 || 1974 isp->isp_osinfo.ktmature == 0) { 1975 break; 1976 } 1977 } 1978 #ifdef ISP_SMPLOCK 1979 msleep(isp_kthread, &isp->isp_lock, 1980 PRIBIO, "isp_fcthrd", hz); 1981 #else 1982 (void) tsleep(isp_kthread, PRIBIO, "isp_fcthrd", hz); 1983 #endif 1984 } 1985 1986 /* 1987 * Even if we didn't get good loop state we may be 1988 * unfreezing the SIMQ so that we can kill off 1989 * commands (if we've never seen loop before, for example). 1990 */ 1991 isp->isp_osinfo.ktmature = 1; 1992 wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN; 1993 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN; 1994 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { 1995 isp_prt(isp, ISP_LOGDEBUG0, "kthread: releasing simq"); 1996 ISPLOCK_2_CAMLOCK(isp); 1997 xpt_release_simq(isp->isp_sim, 1); 1998 CAMLOCK_2_ISPLOCK(isp); 1999 } 2000 isp_prt(isp, ISP_LOGDEBUG0, "kthread: waiting until called"); 2001 #ifdef ISP_SMPLOCK 2002 cv_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock); 2003 #else 2004 (void) tsleep(&isp->isp_osinfo.kthread_cv, PRIBIO, "fc_cv", 0); 2005 #endif 2006 } 2007 } 2008 2009 static void 2010 isp_action(struct cam_sim *sim, union ccb *ccb) 2011 { 2012 int bus, tgt, error; 2013 struct ispsoftc *isp; 2014 struct ccb_trans_settings *cts; 2015 2016 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n")); 2017 2018 isp = (struct ispsoftc *)cam_sim_softc(sim); 2019 ccb->ccb_h.sim_priv.entries[0].field = 0; 2020 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 2021 if (isp->isp_state != ISP_RUNSTATE && 2022 ccb->ccb_h.func_code == XPT_SCSI_IO) { 2023 CAMLOCK_2_ISPLOCK(isp); 2024 isp_init(isp); 2025 if (isp->isp_state != ISP_INITSTATE) { 2026 ISP_UNLOCK(isp); 2027 /* 2028 * Lie. Say it was a selection timeout. 2029 */ 2030 ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN; 2031 xpt_freeze_devq(ccb->ccb_h.path, 1); 2032 xpt_done(ccb); 2033 return; 2034 } 2035 isp->isp_state = ISP_RUNSTATE; 2036 ISPLOCK_2_CAMLOCK(isp); 2037 } 2038 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code); 2039 2040 2041 switch (ccb->ccb_h.func_code) { 2042 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 2043 /* 2044 * Do a couple of preliminary checks... 2045 */ 2046 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 2047 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 2048 ccb->ccb_h.status = CAM_REQ_INVALID; 2049 xpt_done(ccb); 2050 break; 2051 } 2052 } 2053 #ifdef DIAGNOSTIC 2054 if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) { 2055 ccb->ccb_h.status = CAM_PATH_INVALID; 2056 } else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) { 2057 ccb->ccb_h.status = CAM_PATH_INVALID; 2058 } 2059 if (ccb->ccb_h.status == CAM_PATH_INVALID) { 2060 isp_prt(isp, ISP_LOGERR, 2061 "invalid tgt/lun (%d.%d) in XPT_SCSI_IO", 2062 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 2063 xpt_done(ccb); 2064 break; 2065 } 2066 #endif 2067 ((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK; 2068 CAMLOCK_2_ISPLOCK(isp); 2069 error = isp_start((XS_T *) ccb); 2070 switch (error) { 2071 case CMD_QUEUED: 2072 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2073 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 2074 u_int64_t ticks = (u_int64_t) hz; 2075 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) 2076 ticks = 60 * 1000 * ticks; 2077 else 2078 ticks = ccb->ccb_h.timeout * hz; 2079 ticks = ((ticks + 999) / 1000) + hz + hz; 2080 if (ticks >= 0x80000000) { 2081 isp_prt(isp, ISP_LOGERR, 2082 "timeout overflow"); 2083 ticks = 0x7fffffff; 2084 } 2085 ccb->ccb_h.timeout_ch = timeout(isp_watchdog, 2086 (caddr_t)ccb, (int)ticks); 2087 } else { 2088 callout_handle_init(&ccb->ccb_h.timeout_ch); 2089 } 2090 ISPLOCK_2_CAMLOCK(isp); 2091 break; 2092 case CMD_RQLATER: 2093 /* 2094 * This can only happen for Fibre Channel 2095 */ 2096 KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only")); 2097 if (FCPARAM(isp)->loop_seen_once == 0 && 2098 isp->isp_osinfo.ktmature) { 2099 ISPLOCK_2_CAMLOCK(isp); 2100 XS_SETERR(ccb, CAM_SEL_TIMEOUT); 2101 xpt_done(ccb); 2102 break; 2103 } 2104 #ifdef ISP_SMPLOCK 2105 cv_signal(&isp->isp_osinfo.kthread_cv); 2106 #else 2107 wakeup(&isp->isp_osinfo.kthread_cv); 2108 #endif 2109 isp_freeze_loopdown(isp, "isp_action(RQLATER)"); 2110 XS_SETERR(ccb, CAM_REQUEUE_REQ); 2111 ISPLOCK_2_CAMLOCK(isp); 2112 xpt_done(ccb); 2113 break; 2114 case CMD_EAGAIN: 2115 XS_SETERR(ccb, CAM_REQUEUE_REQ); 2116 ISPLOCK_2_CAMLOCK(isp); 2117 xpt_done(ccb); 2118 break; 2119 case CMD_COMPLETE: 2120 isp_done((struct ccb_scsiio *) ccb); 2121 ISPLOCK_2_CAMLOCK(isp); 2122 break; 2123 default: 2124 isp_prt(isp, ISP_LOGERR, 2125 "What's this? 0x%x at %d in file %s", 2126 error, __LINE__, __FILE__); 2127 XS_SETERR(ccb, CAM_REQ_CMP_ERR); 2128 xpt_done(ccb); 2129 ISPLOCK_2_CAMLOCK(isp); 2130 } 2131 break; 2132 2133 #ifdef ISP_TARGET_MODE 2134 case XPT_EN_LUN: /* Enable LUN as a target */ 2135 { 2136 int seq, iok, i; 2137 CAMLOCK_2_ISPLOCK(isp); 2138 iok = isp->isp_osinfo.intsok; 2139 isp->isp_osinfo.intsok = 0; 2140 seq = isp_en_lun(isp, ccb); 2141 if (seq < 0) { 2142 isp->isp_osinfo.intsok = iok; 2143 ISPLOCK_2_CAMLOCK(isp); 2144 xpt_done(ccb); 2145 break; 2146 } 2147 for (i = 0; isp->isp_osinfo.leact[seq] && i < 30 * 1000; i++) { 2148 u_int16_t isr, sema, mbox; 2149 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 2150 isp_intr(isp, isr, sema, mbox); 2151 } 2152 DELAY(1000); 2153 } 2154 isp->isp_osinfo.intsok = iok; 2155 ISPLOCK_2_CAMLOCK(isp); 2156 break; 2157 } 2158 case XPT_NOTIFY_ACK: /* recycle notify ack */ 2159 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ 2160 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 2161 { 2162 tstate_t *tptr = 2163 get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun); 2164 if (tptr == NULL) { 2165 ccb->ccb_h.status = CAM_LUN_INVALID; 2166 xpt_done(ccb); 2167 break; 2168 } 2169 ccb->ccb_h.sim_priv.entries[0].field = 0; 2170 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 2171 ccb->ccb_h.flags = 0; 2172 2173 CAMLOCK_2_ISPLOCK(isp); 2174 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 2175 /* 2176 * Note that the command itself may not be done- 2177 * it may not even have had the first CTIO sent. 2178 */ 2179 tptr->atio_count++; 2180 isp_prt(isp, ISP_LOGTDEBUG0, 2181 "Put FREE ATIO, lun %d, count now %d", 2182 ccb->ccb_h.target_lun, tptr->atio_count); 2183 SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h, 2184 sim_links.sle); 2185 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 2186 tptr->inot_count++; 2187 isp_prt(isp, ISP_LOGTDEBUG0, 2188 "Put FREE INOT, lun %d, count now %d", 2189 ccb->ccb_h.target_lun, tptr->inot_count); 2190 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, 2191 sim_links.sle); 2192 } else { 2193 isp_prt(isp, ISP_LOGWARN, "Got Notify ACK");; 2194 } 2195 rls_lun_statep(isp, tptr); 2196 ccb->ccb_h.status = CAM_REQ_INPROG; 2197 ISPLOCK_2_CAMLOCK(isp); 2198 break; 2199 } 2200 case XPT_CONT_TARGET_IO: 2201 { 2202 CAMLOCK_2_ISPLOCK(isp); 2203 ccb->ccb_h.status = isp_target_start_ctio(isp, ccb); 2204 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 2205 isp_prt(isp, ISP_LOGWARN, 2206 "XPT_CONT_TARGET_IO: status 0x%x", 2207 ccb->ccb_h.status); 2208 XS_SETERR(ccb, CAM_REQUEUE_REQ); 2209 ISPLOCK_2_CAMLOCK(isp); 2210 xpt_done(ccb); 2211 } else { 2212 ISPLOCK_2_CAMLOCK(isp); 2213 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2214 } 2215 break; 2216 } 2217 #endif 2218 case XPT_RESET_DEV: /* BDR the specified SCSI device */ 2219 2220 bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); 2221 tgt = ccb->ccb_h.target_id; 2222 tgt |= (bus << 16); 2223 2224 CAMLOCK_2_ISPLOCK(isp); 2225 error = isp_control(isp, ISPCTL_RESET_DEV, &tgt); 2226 ISPLOCK_2_CAMLOCK(isp); 2227 if (error) { 2228 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2229 } else { 2230 ccb->ccb_h.status = CAM_REQ_CMP; 2231 } 2232 xpt_done(ccb); 2233 break; 2234 case XPT_ABORT: /* Abort the specified CCB */ 2235 { 2236 union ccb *accb = ccb->cab.abort_ccb; 2237 CAMLOCK_2_ISPLOCK(isp); 2238 switch (accb->ccb_h.func_code) { 2239 #ifdef ISP_TARGET_MODE 2240 case XPT_ACCEPT_TARGET_IO: 2241 case XPT_IMMED_NOTIFY: 2242 ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb); 2243 break; 2244 case XPT_CONT_TARGET_IO: 2245 isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet"); 2246 ccb->ccb_h.status = CAM_UA_ABORT; 2247 break; 2248 #endif 2249 case XPT_SCSI_IO: 2250 error = isp_control(isp, ISPCTL_ABORT_CMD, ccb); 2251 if (error) { 2252 ccb->ccb_h.status = CAM_UA_ABORT; 2253 } else { 2254 ccb->ccb_h.status = CAM_REQ_CMP; 2255 } 2256 break; 2257 default: 2258 ccb->ccb_h.status = CAM_REQ_INVALID; 2259 break; 2260 } 2261 ISPLOCK_2_CAMLOCK(isp); 2262 xpt_done(ccb); 2263 break; 2264 } 2265 #ifdef CAM_NEW_TRAN_CODE 2266 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) 2267 #else 2268 #define IS_CURRENT_SETTINGS(c) (c->flags & CCB_TRANS_CURRENT_SETTINGS) 2269 #endif 2270 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 2271 cts = &ccb->cts; 2272 if (!IS_CURRENT_SETTINGS(cts)) { 2273 ccb->ccb_h.status = CAM_REQ_INVALID; 2274 xpt_done(ccb); 2275 break; 2276 } 2277 tgt = cts->ccb_h.target_id; 2278 CAMLOCK_2_ISPLOCK(isp); 2279 if (IS_SCSI(isp)) { 2280 #ifndef CAM_NEW_TRAN_CODE 2281 sdparam *sdp = isp->isp_param; 2282 u_int16_t *dptr; 2283 2284 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2285 2286 sdp += bus; 2287 /* 2288 * We always update (internally) from goal_flags 2289 * so any request to change settings just gets 2290 * vectored to that location. 2291 */ 2292 dptr = &sdp->isp_devparam[tgt].goal_flags; 2293 2294 /* 2295 * Note that these operations affect the 2296 * the goal flags (goal_flags)- not 2297 * the current state flags. Then we mark 2298 * things so that the next operation to 2299 * this HBA will cause the update to occur. 2300 */ 2301 if (cts->valid & CCB_TRANS_DISC_VALID) { 2302 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) { 2303 *dptr |= DPARM_DISC; 2304 } else { 2305 *dptr &= ~DPARM_DISC; 2306 } 2307 } 2308 if (cts->valid & CCB_TRANS_TQ_VALID) { 2309 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) { 2310 *dptr |= DPARM_TQING; 2311 } else { 2312 *dptr &= ~DPARM_TQING; 2313 } 2314 } 2315 if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) { 2316 switch (cts->bus_width) { 2317 case MSG_EXT_WDTR_BUS_16_BIT: 2318 *dptr |= DPARM_WIDE; 2319 break; 2320 default: 2321 *dptr &= ~DPARM_WIDE; 2322 } 2323 } 2324 /* 2325 * Any SYNC RATE of nonzero and SYNC_OFFSET 2326 * of nonzero will cause us to go to the 2327 * selected (from NVRAM) maximum value for 2328 * this device. At a later point, we'll 2329 * allow finer control. 2330 */ 2331 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && 2332 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) && 2333 (cts->sync_offset > 0)) { 2334 *dptr |= DPARM_SYNC; 2335 } else { 2336 *dptr &= ~DPARM_SYNC; 2337 } 2338 *dptr |= DPARM_SAFE_DFLT; 2339 #else 2340 struct ccb_trans_settings_scsi *scsi = 2341 &cts->proto_specific.scsi; 2342 struct ccb_trans_settings_spi *spi = 2343 &cts->xport_specific.spi; 2344 sdparam *sdp = isp->isp_param; 2345 u_int16_t *dptr; 2346 2347 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2348 sdp += bus; 2349 /* 2350 * We always update (internally) from goal_flags 2351 * so any request to change settings just gets 2352 * vectored to that location. 2353 */ 2354 dptr = &sdp->isp_devparam[tgt].goal_flags; 2355 2356 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 2357 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 2358 *dptr |= DPARM_DISC; 2359 else 2360 *dptr &= ~DPARM_DISC; 2361 } 2362 2363 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 2364 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 2365 *dptr |= DPARM_TQING; 2366 else 2367 *dptr &= ~DPARM_TQING; 2368 } 2369 2370 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 2371 if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) 2372 *dptr |= DPARM_WIDE; 2373 else 2374 *dptr &= ~DPARM_WIDE; 2375 } 2376 2377 /* 2378 * XXX: FIX ME 2379 */ 2380 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) && 2381 (spi->valid & CTS_SPI_VALID_SYNC_RATE) && 2382 (spi->sync_period && spi->sync_offset)) { 2383 *dptr |= DPARM_SYNC; 2384 /* 2385 * XXX: CHECK FOR LEGALITY 2386 */ 2387 sdp->isp_devparam[tgt].goal_period = 2388 spi->sync_period; 2389 sdp->isp_devparam[tgt].goal_offset = 2390 spi->sync_offset; 2391 } else { 2392 *dptr &= ~DPARM_SYNC; 2393 } 2394 #endif 2395 isp_prt(isp, ISP_LOGDEBUG0, 2396 "SET bus %d targ %d to flags %x off %x per %x", 2397 bus, tgt, sdp->isp_devparam[tgt].goal_flags, 2398 sdp->isp_devparam[tgt].goal_offset, 2399 sdp->isp_devparam[tgt].goal_period); 2400 sdp->isp_devparam[tgt].dev_update = 1; 2401 isp->isp_update |= (1 << bus); 2402 } 2403 ISPLOCK_2_CAMLOCK(isp); 2404 ccb->ccb_h.status = CAM_REQ_CMP; 2405 xpt_done(ccb); 2406 break; 2407 case XPT_GET_TRAN_SETTINGS: 2408 cts = &ccb->cts; 2409 tgt = cts->ccb_h.target_id; 2410 CAMLOCK_2_ISPLOCK(isp); 2411 if (IS_FC(isp)) { 2412 #ifndef CAM_NEW_TRAN_CODE 2413 /* 2414 * a lot of normal SCSI things don't make sense. 2415 */ 2416 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 2417 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2418 /* 2419 * How do you measure the width of a high 2420 * speed serial bus? Well, in bytes. 2421 * 2422 * Offset and period make no sense, though, so we set 2423 * (above) a 'base' transfer speed to be gigabit. 2424 */ 2425 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2426 #else 2427 fcparam *fcp = isp->isp_param; 2428 struct ccb_trans_settings_fc *fc = 2429 &cts->xport_specific.fc; 2430 2431 cts->protocol = PROTO_SCSI; 2432 cts->protocol_version = SCSI_REV_2; 2433 cts->transport = XPORT_FC; 2434 cts->transport_version = 0; 2435 2436 fc->valid = CTS_FC_VALID_SPEED; 2437 if (fcp->isp_gbspeed == 2) 2438 fc->bitrate = 200000; 2439 else 2440 fc->bitrate = 100000; 2441 if (tgt > 0 && tgt < MAX_FC_TARG) { 2442 struct lportdb *lp = &fcp->portdb[tgt]; 2443 fc->wwnn = lp->node_wwn; 2444 fc->wwpn = lp->port_wwn; 2445 fc->port = lp->portid; 2446 fc->valid |= CTS_FC_VALID_WWNN | 2447 CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT; 2448 } 2449 #endif 2450 } else { 2451 #ifdef CAM_NEW_TRAN_CODE 2452 struct ccb_trans_settings_scsi *scsi = 2453 &cts->proto_specific.scsi; 2454 struct ccb_trans_settings_spi *spi = 2455 &cts->xport_specific.spi; 2456 #endif 2457 sdparam *sdp = isp->isp_param; 2458 int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2459 u_int16_t dval, pval, oval; 2460 2461 sdp += bus; 2462 2463 if (IS_CURRENT_SETTINGS(cts)) { 2464 sdp->isp_devparam[tgt].dev_refresh = 1; 2465 isp->isp_update |= (1 << bus); 2466 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, 2467 NULL); 2468 dval = sdp->isp_devparam[tgt].actv_flags; 2469 oval = sdp->isp_devparam[tgt].actv_offset; 2470 pval = sdp->isp_devparam[tgt].actv_period; 2471 } else { 2472 dval = sdp->isp_devparam[tgt].nvrm_flags; 2473 oval = sdp->isp_devparam[tgt].nvrm_offset; 2474 pval = sdp->isp_devparam[tgt].nvrm_period; 2475 } 2476 2477 #ifndef CAM_NEW_TRAN_CODE 2478 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 2479 2480 if (dval & DPARM_DISC) { 2481 cts->flags |= CCB_TRANS_DISC_ENB; 2482 } 2483 if (dval & DPARM_TQING) { 2484 cts->flags |= CCB_TRANS_TAG_ENB; 2485 } 2486 if (dval & DPARM_WIDE) { 2487 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2488 } else { 2489 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2490 } 2491 cts->valid = CCB_TRANS_BUS_WIDTH_VALID | 2492 CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2493 2494 if ((dval & DPARM_SYNC) && oval != 0) { 2495 cts->sync_period = pval; 2496 cts->sync_offset = oval; 2497 cts->valid |= 2498 CCB_TRANS_SYNC_RATE_VALID | 2499 CCB_TRANS_SYNC_OFFSET_VALID; 2500 } 2501 #else 2502 cts->protocol = PROTO_SCSI; 2503 cts->protocol_version = SCSI_REV_2; 2504 cts->transport = XPORT_SPI; 2505 cts->transport_version = 2; 2506 2507 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 2508 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; 2509 if (dval & DPARM_DISC) { 2510 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 2511 } 2512 if (dval & DPARM_TQING) { 2513 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 2514 } 2515 if ((dval & DPARM_SYNC) && oval && pval) { 2516 spi->sync_offset = oval; 2517 spi->sync_period = pval; 2518 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 2519 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 2520 } 2521 spi->valid |= CTS_SPI_VALID_BUS_WIDTH; 2522 if (dval & DPARM_WIDE) { 2523 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2524 } else { 2525 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2526 } 2527 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 2528 scsi->valid = CTS_SCSI_VALID_TQ; 2529 spi->valid |= CTS_SPI_VALID_DISC; 2530 } else { 2531 scsi->valid = 0; 2532 } 2533 #endif 2534 isp_prt(isp, ISP_LOGDEBUG0, 2535 "GET %s bus %d targ %d to flags %x off %x per %x", 2536 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM", 2537 bus, tgt, dval, oval, pval); 2538 } 2539 ISPLOCK_2_CAMLOCK(isp); 2540 ccb->ccb_h.status = CAM_REQ_CMP; 2541 xpt_done(ccb); 2542 break; 2543 2544 case XPT_CALC_GEOMETRY: 2545 { 2546 struct ccb_calc_geometry *ccg; 2547 2548 ccg = &ccb->ccg; 2549 if (ccg->block_size == 0) { 2550 isp_prt(isp, ISP_LOGERR, 2551 "%d.%d XPT_CALC_GEOMETRY block size 0?", 2552 ccg->ccb_h.target_id, ccg->ccb_h.target_lun); 2553 ccb->ccb_h.status = CAM_REQ_INVALID; 2554 xpt_done(ccb); 2555 break; 2556 } 2557 cam_calc_geometry(ccg, /*extended*/1); 2558 xpt_done(ccb); 2559 break; 2560 } 2561 case XPT_RESET_BUS: /* Reset the specified bus */ 2562 bus = cam_sim_bus(sim); 2563 CAMLOCK_2_ISPLOCK(isp); 2564 error = isp_control(isp, ISPCTL_RESET_BUS, &bus); 2565 ISPLOCK_2_CAMLOCK(isp); 2566 if (error) 2567 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2568 else { 2569 if (cam_sim_bus(sim) && isp->isp_path2 != NULL) 2570 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 2571 else if (isp->isp_path != NULL) 2572 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 2573 ccb->ccb_h.status = CAM_REQ_CMP; 2574 } 2575 xpt_done(ccb); 2576 break; 2577 2578 case XPT_TERM_IO: /* Terminate the I/O process */ 2579 ccb->ccb_h.status = CAM_REQ_INVALID; 2580 xpt_done(ccb); 2581 break; 2582 2583 case XPT_PATH_INQ: /* Path routing inquiry */ 2584 { 2585 struct ccb_pathinq *cpi = &ccb->cpi; 2586 2587 cpi->version_num = 1; 2588 #ifdef ISP_TARGET_MODE 2589 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 2590 #else 2591 cpi->target_sprt = 0; 2592 #endif 2593 cpi->hba_eng_cnt = 0; 2594 cpi->max_target = ISP_MAX_TARGETS(isp) - 1; 2595 cpi->max_lun = ISP_MAX_LUNS(isp) - 1; 2596 cpi->bus_id = cam_sim_bus(sim); 2597 if (IS_FC(isp)) { 2598 cpi->hba_misc = PIM_NOBUSRESET; 2599 /* 2600 * Because our loop ID can shift from time to time, 2601 * make our initiator ID out of range of our bus. 2602 */ 2603 cpi->initiator_id = cpi->max_target + 1; 2604 2605 /* 2606 * Set base transfer capabilities for Fibre Channel. 2607 * Technically not correct because we don't know 2608 * what media we're running on top of- but we'll 2609 * look good if we always say 100MB/s. 2610 */ 2611 if (FCPARAM(isp)->isp_gbspeed == 2) 2612 cpi->base_transfer_speed = 200000; 2613 else 2614 cpi->base_transfer_speed = 100000; 2615 cpi->hba_inquiry = PI_TAG_ABLE; 2616 #ifdef CAM_NEW_TRAN_CODE 2617 cpi->transport = XPORT_FC; 2618 cpi->transport_version = 0; /* WHAT'S THIS FOR? */ 2619 #endif 2620 } else { 2621 sdparam *sdp = isp->isp_param; 2622 sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path)); 2623 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 2624 cpi->hba_misc = 0; 2625 cpi->initiator_id = sdp->isp_initiator_id; 2626 cpi->base_transfer_speed = 3300; 2627 #ifdef CAM_NEW_TRAN_CODE 2628 cpi->transport = XPORT_SPI; 2629 cpi->transport_version = 2; /* WHAT'S THIS FOR? */ 2630 #endif 2631 } 2632 #ifdef CAM_NEW_TRAN_CODE 2633 cpi->protocol = PROTO_SCSI; 2634 cpi->protocol_version = SCSI_REV_2; 2635 #endif 2636 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 2637 strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN); 2638 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 2639 cpi->unit_number = cam_sim_unit(sim); 2640 cpi->ccb_h.status = CAM_REQ_CMP; 2641 xpt_done(ccb); 2642 break; 2643 } 2644 default: 2645 ccb->ccb_h.status = CAM_REQ_INVALID; 2646 xpt_done(ccb); 2647 break; 2648 } 2649 } 2650 2651 #define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB) 2652 void 2653 isp_done(struct ccb_scsiio *sccb) 2654 { 2655 struct ispsoftc *isp = XS_ISP(sccb); 2656 2657 if (XS_NOERR(sccb)) 2658 XS_SETERR(sccb, CAM_REQ_CMP); 2659 2660 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && 2661 (sccb->scsi_status != SCSI_STATUS_OK)) { 2662 sccb->ccb_h.status &= ~CAM_STATUS_MASK; 2663 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && 2664 (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) { 2665 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; 2666 } else { 2667 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 2668 } 2669 } 2670 2671 sccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2672 if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2673 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 2674 sccb->ccb_h.status |= CAM_DEV_QFRZN; 2675 xpt_freeze_devq(sccb->ccb_h.path, 1); 2676 isp_prt(isp, ISP_LOGDEBUG0, 2677 "freeze devq %d.%d cam sts %x scsi sts %x", 2678 sccb->ccb_h.target_id, sccb->ccb_h.target_lun, 2679 sccb->ccb_h.status, sccb->scsi_status); 2680 } 2681 } 2682 2683 if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) && 2684 (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2685 xpt_print_path(sccb->ccb_h.path); 2686 isp_prt(isp, ISP_LOGINFO, 2687 "cam completion status 0x%x", sccb->ccb_h.status); 2688 } 2689 2690 XS_CMD_S_DONE(sccb); 2691 if (XS_CMD_WDOG_P(sccb) == 0) { 2692 untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch); 2693 if (XS_CMD_GRACE_P(sccb)) { 2694 isp_prt(isp, ISP_LOGDEBUG2, 2695 "finished command on borrowed time"); 2696 } 2697 XS_CMD_S_CLEAR(sccb); 2698 ISPLOCK_2_CAMLOCK(isp); 2699 xpt_done((union ccb *) sccb); 2700 CAMLOCK_2_ISPLOCK(isp); 2701 } 2702 } 2703 2704 int 2705 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg) 2706 { 2707 int bus, rv = 0; 2708 switch (cmd) { 2709 case ISPASYNC_NEW_TGT_PARAMS: 2710 { 2711 #ifdef CAM_NEW_TRAN_CODE 2712 struct ccb_trans_settings_scsi *scsi; 2713 struct ccb_trans_settings_spi *spi; 2714 #endif 2715 int flags, tgt; 2716 sdparam *sdp = isp->isp_param; 2717 struct ccb_trans_settings cts; 2718 struct cam_path *tmppath; 2719 2720 bzero(&cts, sizeof (struct ccb_trans_settings)); 2721 2722 tgt = *((int *)arg); 2723 bus = (tgt >> 16) & 0xffff; 2724 tgt &= 0xffff; 2725 sdp += bus; 2726 ISPLOCK_2_CAMLOCK(isp); 2727 if (xpt_create_path(&tmppath, NULL, 2728 cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim), 2729 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2730 CAMLOCK_2_ISPLOCK(isp); 2731 isp_prt(isp, ISP_LOGWARN, 2732 "isp_async cannot make temp path for %d.%d", 2733 tgt, bus); 2734 rv = -1; 2735 break; 2736 } 2737 CAMLOCK_2_ISPLOCK(isp); 2738 flags = sdp->isp_devparam[tgt].actv_flags; 2739 #ifdef CAM_NEW_TRAN_CODE 2740 cts.type = CTS_TYPE_CURRENT_SETTINGS; 2741 cts.protocol = PROTO_SCSI; 2742 cts.transport = XPORT_SPI; 2743 2744 scsi = &cts.proto_specific.scsi; 2745 spi = &cts.xport_specific.spi; 2746 2747 if (flags & DPARM_TQING) { 2748 scsi->valid |= CTS_SCSI_VALID_TQ; 2749 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 2750 spi->flags |= CTS_SPI_FLAGS_TAG_ENB; 2751 } 2752 2753 if (flags & DPARM_DISC) { 2754 spi->valid |= CTS_SPI_VALID_DISC; 2755 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 2756 } 2757 spi->flags |= CTS_SPI_VALID_BUS_WIDTH; 2758 if (flags & DPARM_WIDE) { 2759 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2760 } else { 2761 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2762 } 2763 if (flags & DPARM_SYNC) { 2764 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 2765 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 2766 spi->sync_period = sdp->isp_devparam[tgt].actv_period; 2767 spi->sync_offset = sdp->isp_devparam[tgt].actv_offset; 2768 } 2769 #else 2770 cts.flags = CCB_TRANS_CURRENT_SETTINGS; 2771 cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2772 if (flags & DPARM_DISC) { 2773 cts.flags |= CCB_TRANS_DISC_ENB; 2774 } 2775 if (flags & DPARM_TQING) { 2776 cts.flags |= CCB_TRANS_TAG_ENB; 2777 } 2778 cts.valid |= CCB_TRANS_BUS_WIDTH_VALID; 2779 cts.bus_width = (flags & DPARM_WIDE)? 2780 MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT; 2781 cts.sync_period = sdp->isp_devparam[tgt].actv_period; 2782 cts.sync_offset = sdp->isp_devparam[tgt].actv_offset; 2783 if (flags & DPARM_SYNC) { 2784 cts.valid |= 2785 CCB_TRANS_SYNC_RATE_VALID | 2786 CCB_TRANS_SYNC_OFFSET_VALID; 2787 } 2788 #endif 2789 isp_prt(isp, ISP_LOGDEBUG2, 2790 "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x", 2791 bus, tgt, sdp->isp_devparam[tgt].actv_period, 2792 sdp->isp_devparam[tgt].actv_offset, flags); 2793 xpt_setup_ccb(&cts.ccb_h, tmppath, 1); 2794 ISPLOCK_2_CAMLOCK(isp); 2795 xpt_async(AC_TRANSFER_NEG, tmppath, &cts); 2796 xpt_free_path(tmppath); 2797 CAMLOCK_2_ISPLOCK(isp); 2798 break; 2799 } 2800 case ISPASYNC_BUS_RESET: 2801 bus = *((int *)arg); 2802 isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected", 2803 bus); 2804 if (bus > 0 && isp->isp_path2) { 2805 ISPLOCK_2_CAMLOCK(isp); 2806 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 2807 CAMLOCK_2_ISPLOCK(isp); 2808 } else if (isp->isp_path) { 2809 ISPLOCK_2_CAMLOCK(isp); 2810 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 2811 CAMLOCK_2_ISPLOCK(isp); 2812 } 2813 break; 2814 case ISPASYNC_LIP: 2815 if (isp->isp_path) { 2816 isp_freeze_loopdown(isp, "ISPASYNC_LIP"); 2817 } 2818 isp_prt(isp, ISP_LOGINFO, "LIP Received"); 2819 break; 2820 case ISPASYNC_LOOP_RESET: 2821 if (isp->isp_path) { 2822 isp_freeze_loopdown(isp, "ISPASYNC_LOOP_RESET"); 2823 } 2824 isp_prt(isp, ISP_LOGINFO, "Loop Reset Received"); 2825 break; 2826 case ISPASYNC_LOOP_DOWN: 2827 if (isp->isp_path) { 2828 isp_freeze_loopdown(isp, "ISPASYNC_LOOP_DOWN"); 2829 } 2830 isp_prt(isp, ISP_LOGINFO, "Loop DOWN"); 2831 break; 2832 case ISPASYNC_LOOP_UP: 2833 /* 2834 * Now we just note that Loop has come up. We don't 2835 * actually do anything because we're waiting for a 2836 * Change Notify before activating the FC cleanup 2837 * thread to look at the state of the loop again. 2838 */ 2839 isp_prt(isp, ISP_LOGINFO, "Loop UP"); 2840 break; 2841 case ISPASYNC_PROMENADE: 2842 { 2843 struct cam_path *tmppath; 2844 const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x " 2845 "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x"; 2846 static const char *roles[4] = { 2847 "(none)", "Target", "Initiator", "Target/Initiator" 2848 }; 2849 fcparam *fcp = isp->isp_param; 2850 int tgt = *((int *) arg); 2851 int is_tgt_mask = (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT); 2852 struct lportdb *lp = &fcp->portdb[tgt]; 2853 2854 isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid, 2855 roles[lp->roles & 0x3], 2856 (lp->valid)? "Arrived" : "Departed", 2857 (u_int32_t) (lp->port_wwn >> 32), 2858 (u_int32_t) (lp->port_wwn & 0xffffffffLL), 2859 (u_int32_t) (lp->node_wwn >> 32), 2860 (u_int32_t) (lp->node_wwn & 0xffffffffLL)); 2861 2862 ISPLOCK_2_CAMLOCK(isp); 2863 if (xpt_create_path(&tmppath, NULL, cam_sim_path(isp->isp_sim), 2864 (target_id_t)tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2865 CAMLOCK_2_ISPLOCK(isp); 2866 break; 2867 } 2868 /* 2869 * Policy: only announce targets. 2870 */ 2871 if (lp->roles & is_tgt_mask) { 2872 if (lp->valid) { 2873 xpt_async(AC_FOUND_DEVICE, tmppath, NULL); 2874 } else { 2875 xpt_async(AC_LOST_DEVICE, tmppath, NULL); 2876 } 2877 } 2878 xpt_free_path(tmppath); 2879 CAMLOCK_2_ISPLOCK(isp); 2880 break; 2881 } 2882 case ISPASYNC_CHANGE_NOTIFY: 2883 if (arg == ISPASYNC_CHANGE_PDB) { 2884 isp_prt(isp, ISP_LOGINFO, 2885 "Port Database Changed"); 2886 } else if (arg == ISPASYNC_CHANGE_SNS) { 2887 isp_prt(isp, ISP_LOGINFO, 2888 "Name Server Database Changed"); 2889 } 2890 #ifdef ISP_SMPLOCK 2891 cv_signal(&isp->isp_osinfo.kthread_cv); 2892 #else 2893 wakeup(&isp->isp_osinfo.kthread_cv); 2894 #endif 2895 break; 2896 case ISPASYNC_FABRIC_DEV: 2897 { 2898 int target, base, lim; 2899 fcparam *fcp = isp->isp_param; 2900 struct lportdb *lp = NULL; 2901 struct lportdb *clp = (struct lportdb *) arg; 2902 char *pt; 2903 2904 switch (clp->port_type) { 2905 case 1: 2906 pt = " N_Port"; 2907 break; 2908 case 2: 2909 pt = " NL_Port"; 2910 break; 2911 case 3: 2912 pt = "F/NL_Port"; 2913 break; 2914 case 0x7f: 2915 pt = " Nx_Port"; 2916 break; 2917 case 0x81: 2918 pt = " F_port"; 2919 break; 2920 case 0x82: 2921 pt = " FL_Port"; 2922 break; 2923 case 0x84: 2924 pt = " E_port"; 2925 break; 2926 default: 2927 pt = " "; 2928 break; 2929 } 2930 2931 isp_prt(isp, ISP_LOGINFO, 2932 "%s Fabric Device @ PortID 0x%x", pt, clp->portid); 2933 2934 /* 2935 * If we don't have an initiator role we bail. 2936 * 2937 * We just use ISPASYNC_FABRIC_DEV for announcement purposes. 2938 */ 2939 2940 if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) { 2941 break; 2942 } 2943 2944 /* 2945 * Is this entry for us? If so, we bail. 2946 */ 2947 2948 if (fcp->isp_portid == clp->portid) { 2949 break; 2950 } 2951 2952 /* 2953 * Else, the default policy is to find room for it in 2954 * our local port database. Later, when we execute 2955 * the call to isp_pdb_sync either this newly arrived 2956 * or already logged in device will be (re)announced. 2957 */ 2958 2959 if (fcp->isp_topo == TOPO_FL_PORT) 2960 base = FC_SNS_ID+1; 2961 else 2962 base = 0; 2963 2964 if (fcp->isp_topo == TOPO_N_PORT) 2965 lim = 1; 2966 else 2967 lim = MAX_FC_TARG; 2968 2969 /* 2970 * Is it already in our list? 2971 */ 2972 for (target = base; target < lim; target++) { 2973 if (target >= FL_PORT_ID && target <= FC_SNS_ID) { 2974 continue; 2975 } 2976 lp = &fcp->portdb[target]; 2977 if (lp->port_wwn == clp->port_wwn && 2978 lp->node_wwn == clp->node_wwn) { 2979 lp->fabric_dev = 1; 2980 break; 2981 } 2982 } 2983 if (target < lim) { 2984 break; 2985 } 2986 for (target = base; target < lim; target++) { 2987 if (target >= FL_PORT_ID && target <= FC_SNS_ID) { 2988 continue; 2989 } 2990 lp = &fcp->portdb[target]; 2991 if (lp->port_wwn == 0) { 2992 break; 2993 } 2994 } 2995 if (target == lim) { 2996 isp_prt(isp, ISP_LOGWARN, 2997 "out of space for fabric devices"); 2998 break; 2999 } 3000 lp->port_type = clp->port_type; 3001 lp->fc4_type = clp->fc4_type; 3002 lp->node_wwn = clp->node_wwn; 3003 lp->port_wwn = clp->port_wwn; 3004 lp->portid = clp->portid; 3005 lp->fabric_dev = 1; 3006 break; 3007 } 3008 #ifdef ISP_TARGET_MODE 3009 case ISPASYNC_TARGET_MESSAGE: 3010 { 3011 tmd_msg_t *mp = arg; 3012 isp_prt(isp, ISP_LOGALL, 3013 "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x", 3014 mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt, 3015 (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval, 3016 mp->nt_msg[0]); 3017 break; 3018 } 3019 case ISPASYNC_TARGET_EVENT: 3020 { 3021 tmd_event_t *ep = arg; 3022 isp_prt(isp, ISP_LOGALL, 3023 "bus %d event code 0x%x", ep->ev_bus, ep->ev_event); 3024 break; 3025 } 3026 case ISPASYNC_TARGET_ACTION: 3027 switch (((isphdr_t *)arg)->rqs_entry_type) { 3028 default: 3029 isp_prt(isp, ISP_LOGWARN, 3030 "event 0x%x for unhandled target action", 3031 ((isphdr_t *)arg)->rqs_entry_type); 3032 break; 3033 case RQSTYPE_NOTIFY: 3034 if (IS_SCSI(isp)) { 3035 rv = isp_handle_platform_notify_scsi(isp, 3036 (in_entry_t *) arg); 3037 } else { 3038 rv = isp_handle_platform_notify_fc(isp, 3039 (in_fcentry_t *) arg); 3040 } 3041 break; 3042 case RQSTYPE_ATIO: 3043 rv = isp_handle_platform_atio(isp, (at_entry_t *) arg); 3044 break; 3045 case RQSTYPE_ATIO2: 3046 rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg); 3047 break; 3048 case RQSTYPE_CTIO2: 3049 case RQSTYPE_CTIO: 3050 rv = isp_handle_platform_ctio(isp, arg); 3051 break; 3052 case RQSTYPE_ENABLE_LUN: 3053 case RQSTYPE_MODIFY_LUN: 3054 isp_ledone(isp, (lun_entry_t *) arg); 3055 break; 3056 } 3057 break; 3058 #endif 3059 case ISPASYNC_FW_CRASH: 3060 { 3061 u_int16_t mbox1, mbox6; 3062 mbox1 = ISP_READ(isp, OUTMAILBOX1); 3063 if (IS_DUALBUS(isp)) { 3064 mbox6 = ISP_READ(isp, OUTMAILBOX6); 3065 } else { 3066 mbox6 = 0; 3067 } 3068 isp_prt(isp, ISP_LOGERR, 3069 "Internal Firmware Error on bus %d @ RISC Address 0x%x", 3070 mbox6, mbox1); 3071 #ifdef ISP_FW_CRASH_DUMP 3072 /* 3073 * XXX: really need a thread to do this right. 3074 */ 3075 if (IS_FC(isp)) { 3076 FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT; 3077 FCPARAM(isp)->isp_loopstate = LOOP_NIL; 3078 isp_freeze_loopdown(isp, "f/w crash"); 3079 isp_fw_dump(isp); 3080 } 3081 isp_reinit(isp); 3082 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL); 3083 #endif 3084 break; 3085 } 3086 case ISPASYNC_UNHANDLED_RESPONSE: 3087 break; 3088 default: 3089 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd); 3090 break; 3091 } 3092 return (rv); 3093 } 3094 3095 3096 /* 3097 * Locks are held before coming here. 3098 */ 3099 void 3100 isp_uninit(struct ispsoftc *isp) 3101 { 3102 ISP_WRITE(isp, HCCR, HCCR_CMD_RESET); 3103 DISABLE_INTS(isp); 3104 } 3105 3106 void 3107 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...) 3108 { 3109 va_list ap; 3110 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { 3111 return; 3112 } 3113 printf("%s: ", device_get_nameunit(isp->isp_dev)); 3114 va_start(ap, fmt); 3115 vprintf(fmt, ap); 3116 va_end(ap); 3117 printf("\n"); 3118 } 3119