1 /*- 2 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters. 3 * 4 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice immediately at the beginning of the file, without modification, 11 * this list of conditions, and the following disclaimer. 12 * 2. The name of the author may not be used to endorse or promote products 13 * derived from this software without specific prior written permission. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <dev/isp/isp_freebsd.h> 32 #include <sys/unistd.h> 33 #include <sys/kthread.h> 34 #include <machine/stdarg.h> /* for use by isp_prt below */ 35 #include <sys/conf.h> 36 #include <sys/module.h> 37 #include <sys/ioccom.h> 38 #include <dev/isp/isp_ioctl.h> 39 40 41 MODULE_VERSION(isp, 1); 42 MODULE_DEPEND(isp, cam, 1, 1, 1); 43 int isp_announced = 0; 44 ispfwfunc *isp_get_firmware_p = NULL; 45 46 static d_ioctl_t ispioctl; 47 static void isp_intr_enable(void *); 48 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *); 49 static void isp_poll(struct cam_sim *); 50 static timeout_t isp_watchdog; 51 static void isp_kthread(void *); 52 static void isp_action(struct cam_sim *, union ccb *); 53 54 55 static struct cdevsw isp_cdevsw = { 56 .d_version = D_VERSION, 57 .d_flags = D_NEEDGIANT, 58 .d_ioctl = ispioctl, 59 .d_name = "isp", 60 }; 61 62 static struct ispsoftc *isplist = NULL; 63 64 void 65 isp_attach(struct ispsoftc *isp) 66 { 67 int primary, secondary; 68 struct ccb_setasync csa; 69 struct cam_devq *devq; 70 struct cam_sim *sim; 71 struct cam_path *path; 72 73 /* 74 * Establish (in case of 12X0) which bus is the primary. 75 */ 76 77 primary = 0; 78 secondary = 1; 79 80 /* 81 * Create the device queue for our SIM(s). 82 */ 83 devq = cam_simq_alloc(isp->isp_maxcmds); 84 if (devq == NULL) { 85 return; 86 } 87 88 /* 89 * Construct our SIM entry. 90 */ 91 ISPLOCK_2_CAMLOCK(isp); 92 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 93 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); 94 if (sim == NULL) { 95 cam_simq_free(devq); 96 CAMLOCK_2_ISPLOCK(isp); 97 return; 98 } 99 CAMLOCK_2_ISPLOCK(isp); 100 101 isp->isp_osinfo.ehook.ich_func = isp_intr_enable; 102 isp->isp_osinfo.ehook.ich_arg = isp; 103 ISPLOCK_2_CAMLOCK(isp); 104 if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) { 105 cam_sim_free(sim, TRUE); 106 CAMLOCK_2_ISPLOCK(isp); 107 isp_prt(isp, ISP_LOGERR, 108 "could not establish interrupt enable hook"); 109 return; 110 } 111 112 if (xpt_bus_register(sim, primary) != CAM_SUCCESS) { 113 cam_sim_free(sim, TRUE); 114 CAMLOCK_2_ISPLOCK(isp); 115 return; 116 } 117 118 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 119 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 120 xpt_bus_deregister(cam_sim_path(sim)); 121 cam_sim_free(sim, TRUE); 122 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 123 CAMLOCK_2_ISPLOCK(isp); 124 return; 125 } 126 127 xpt_setup_ccb(&csa.ccb_h, path, 5); 128 csa.ccb_h.func_code = XPT_SASYNC_CB; 129 csa.event_enable = AC_LOST_DEVICE; 130 csa.callback = isp_cam_async; 131 csa.callback_arg = sim; 132 xpt_action((union ccb *)&csa); 133 CAMLOCK_2_ISPLOCK(isp); 134 isp->isp_sim = sim; 135 isp->isp_path = path; 136 /* 137 * Create a kernel thread for fibre channel instances. We 138 * don't have dual channel FC cards. 139 */ 140 if (IS_FC(isp)) { 141 ISPLOCK_2_CAMLOCK(isp); 142 /* XXX: LOCK VIOLATION */ 143 cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv"); 144 if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc, 145 RFHIGHPID, 0, "%s: fc_thrd", 146 device_get_nameunit(isp->isp_dev))) { 147 xpt_bus_deregister(cam_sim_path(sim)); 148 cam_sim_free(sim, TRUE); 149 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 150 CAMLOCK_2_ISPLOCK(isp); 151 isp_prt(isp, ISP_LOGERR, "could not create kthread"); 152 return; 153 } 154 CAMLOCK_2_ISPLOCK(isp); 155 } 156 157 158 /* 159 * If we have a second channel, construct SIM entry for that. 160 */ 161 if (IS_DUALBUS(isp)) { 162 ISPLOCK_2_CAMLOCK(isp); 163 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 164 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); 165 if (sim == NULL) { 166 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 167 xpt_free_path(isp->isp_path); 168 cam_simq_free(devq); 169 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 170 return; 171 } 172 if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) { 173 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 174 xpt_free_path(isp->isp_path); 175 cam_sim_free(sim, TRUE); 176 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 177 CAMLOCK_2_ISPLOCK(isp); 178 return; 179 } 180 181 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 182 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 183 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 184 xpt_free_path(isp->isp_path); 185 xpt_bus_deregister(cam_sim_path(sim)); 186 cam_sim_free(sim, TRUE); 187 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 188 CAMLOCK_2_ISPLOCK(isp); 189 return; 190 } 191 192 xpt_setup_ccb(&csa.ccb_h, path, 5); 193 csa.ccb_h.func_code = XPT_SASYNC_CB; 194 csa.event_enable = AC_LOST_DEVICE; 195 csa.callback = isp_cam_async; 196 csa.callback_arg = sim; 197 xpt_action((union ccb *)&csa); 198 CAMLOCK_2_ISPLOCK(isp); 199 isp->isp_sim2 = sim; 200 isp->isp_path2 = path; 201 } 202 203 /* 204 * Create device nodes 205 */ 206 (void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT, 207 GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev)); 208 209 if (isp->isp_role != ISP_ROLE_NONE) { 210 isp->isp_state = ISP_RUNSTATE; 211 ENABLE_INTS(isp); 212 } 213 if (isplist == NULL) { 214 isplist = isp; 215 } else { 216 struct ispsoftc *tmp = isplist; 217 while (tmp->isp_osinfo.next) { 218 tmp = tmp->isp_osinfo.next; 219 } 220 tmp->isp_osinfo.next = isp; 221 } 222 223 } 224 225 static INLINE void 226 isp_freeze_loopdown(struct ispsoftc *isp, char *msg) 227 { 228 if (isp->isp_osinfo.simqfrozen == 0) { 229 isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg); 230 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 231 ISPLOCK_2_CAMLOCK(isp); 232 xpt_freeze_simq(isp->isp_sim, 1); 233 CAMLOCK_2_ISPLOCK(isp); 234 } else { 235 isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg); 236 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 237 } 238 } 239 240 static int 241 ispioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 242 { 243 struct ispsoftc *isp; 244 int nr, retval = ENOTTY; 245 246 isp = isplist; 247 while (isp) { 248 if (minor(dev) == device_get_unit(isp->isp_dev)) { 249 break; 250 } 251 isp = isp->isp_osinfo.next; 252 } 253 if (isp == NULL) 254 return (ENXIO); 255 256 switch (cmd) { 257 #ifdef ISP_FW_CRASH_DUMP 258 case ISP_GET_FW_CRASH_DUMP: 259 { 260 u_int16_t *ptr = FCPARAM(isp)->isp_dump_data; 261 size_t sz; 262 263 retval = 0; 264 if (IS_2200(isp)) 265 sz = QLA2200_RISC_IMAGE_DUMP_SIZE; 266 else 267 sz = QLA2300_RISC_IMAGE_DUMP_SIZE; 268 ISP_LOCK(isp); 269 if (ptr && *ptr) { 270 void *uaddr = *((void **) addr); 271 if (copyout(ptr, uaddr, sz)) { 272 retval = EFAULT; 273 } else { 274 *ptr = 0; 275 } 276 } else { 277 retval = ENXIO; 278 } 279 ISP_UNLOCK(isp); 280 break; 281 } 282 283 case ISP_FORCE_CRASH_DUMP: 284 ISP_LOCK(isp); 285 isp_freeze_loopdown(isp, "ispioctl(ISP_FORCE_CRASH_DUMP)"); 286 isp_fw_dump(isp); 287 isp_reinit(isp); 288 ISP_UNLOCK(isp); 289 retval = 0; 290 break; 291 #endif 292 case ISP_SDBLEV: 293 { 294 int olddblev = isp->isp_dblev; 295 isp->isp_dblev = *(int *)addr; 296 *(int *)addr = olddblev; 297 retval = 0; 298 break; 299 } 300 case ISP_GETROLE: 301 *(int *)addr = isp->isp_role; 302 retval = 0; 303 break; 304 case ISP_SETROLE: 305 nr = *(int *)addr; 306 if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) { 307 retval = EINVAL; 308 break; 309 } 310 *(int *)addr = isp->isp_role; 311 isp->isp_role = nr; 312 /* FALLTHROUGH */ 313 case ISP_RESETHBA: 314 ISP_LOCK(isp); 315 isp_reinit(isp); 316 ISP_UNLOCK(isp); 317 retval = 0; 318 break; 319 case ISP_RESCAN: 320 if (IS_FC(isp)) { 321 ISP_LOCK(isp); 322 if (isp_fc_runstate(isp, 5 * 1000000)) { 323 retval = EIO; 324 } else { 325 retval = 0; 326 } 327 ISP_UNLOCK(isp); 328 } 329 break; 330 case ISP_FC_LIP: 331 if (IS_FC(isp)) { 332 ISP_LOCK(isp); 333 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) { 334 retval = EIO; 335 } else { 336 retval = 0; 337 } 338 ISP_UNLOCK(isp); 339 } 340 break; 341 case ISP_FC_GETDINFO: 342 { 343 struct isp_fc_device *ifc = (struct isp_fc_device *) addr; 344 struct lportdb *lp; 345 346 if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) { 347 retval = EINVAL; 348 break; 349 } 350 ISP_LOCK(isp); 351 lp = &FCPARAM(isp)->portdb[ifc->loopid]; 352 if (lp->valid) { 353 ifc->loopid = lp->loopid; 354 ifc->portid = lp->portid; 355 ifc->node_wwn = lp->node_wwn; 356 ifc->port_wwn = lp->port_wwn; 357 retval = 0; 358 } else { 359 retval = ENODEV; 360 } 361 ISP_UNLOCK(isp); 362 break; 363 } 364 case ISP_GET_STATS: 365 { 366 isp_stats_t *sp = (isp_stats_t *) addr; 367 368 MEMZERO(sp, sizeof (*sp)); 369 sp->isp_stat_version = ISP_STATS_VERSION; 370 sp->isp_type = isp->isp_type; 371 sp->isp_revision = isp->isp_revision; 372 ISP_LOCK(isp); 373 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt; 374 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus; 375 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc; 376 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync; 377 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt; 378 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt; 379 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater; 380 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater; 381 ISP_UNLOCK(isp); 382 retval = 0; 383 break; 384 } 385 case ISP_CLR_STATS: 386 ISP_LOCK(isp); 387 isp->isp_intcnt = 0; 388 isp->isp_intbogus = 0; 389 isp->isp_intmboxc = 0; 390 isp->isp_intoasync = 0; 391 isp->isp_rsltccmplt = 0; 392 isp->isp_fphccmplt = 0; 393 isp->isp_rscchiwater = 0; 394 isp->isp_fpcchiwater = 0; 395 ISP_UNLOCK(isp); 396 retval = 0; 397 break; 398 case ISP_FC_GETHINFO: 399 { 400 struct isp_hba_device *hba = (struct isp_hba_device *) addr; 401 MEMZERO(hba, sizeof (*hba)); 402 ISP_LOCK(isp); 403 hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev); 404 hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev); 405 hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev); 406 hba->fc_speed = FCPARAM(isp)->isp_gbspeed; 407 hba->fc_scsi_supported = 1; 408 hba->fc_topology = FCPARAM(isp)->isp_topo + 1; 409 hba->fc_loopid = FCPARAM(isp)->isp_loopid; 410 hba->nvram_node_wwn = FCPARAM(isp)->isp_nodewwn; 411 hba->nvram_port_wwn = FCPARAM(isp)->isp_portwwn; 412 hba->active_node_wwn = ISP_NODEWWN(isp); 413 hba->active_port_wwn = ISP_PORTWWN(isp); 414 ISP_UNLOCK(isp); 415 retval = 0; 416 break; 417 } 418 case ISP_GET_FC_PARAM: 419 { 420 struct isp_fc_param *f = (struct isp_fc_param *) addr; 421 422 if (!IS_FC(isp)) { 423 retval = EINVAL; 424 break; 425 } 426 f->parameter = 0; 427 if (strcmp(f->param_name, "framelength") == 0) { 428 f->parameter = FCPARAM(isp)->isp_maxfrmlen; 429 retval = 0; 430 break; 431 } 432 if (strcmp(f->param_name, "exec_throttle") == 0) { 433 f->parameter = FCPARAM(isp)->isp_execthrottle; 434 retval = 0; 435 break; 436 } 437 if (strcmp(f->param_name, "fullduplex") == 0) { 438 if (FCPARAM(isp)->isp_fwoptions & ICBOPT_FULL_DUPLEX) 439 f->parameter = 1; 440 retval = 0; 441 break; 442 } 443 if (strcmp(f->param_name, "loopid") == 0) { 444 f->parameter = FCPARAM(isp)->isp_loopid; 445 retval = 0; 446 break; 447 } 448 retval = EINVAL; 449 break; 450 } 451 case ISP_SET_FC_PARAM: 452 { 453 struct isp_fc_param *f = (struct isp_fc_param *) addr; 454 u_int32_t param = f->parameter; 455 456 if (!IS_FC(isp)) { 457 retval = EINVAL; 458 break; 459 } 460 f->parameter = 0; 461 if (strcmp(f->param_name, "framelength") == 0) { 462 if (param != 512 && param != 1024 && param != 1024) { 463 retval = EINVAL; 464 break; 465 } 466 FCPARAM(isp)->isp_maxfrmlen = param; 467 retval = 0; 468 break; 469 } 470 if (strcmp(f->param_name, "exec_throttle") == 0) { 471 if (param < 16 || param > 255) { 472 retval = EINVAL; 473 break; 474 } 475 FCPARAM(isp)->isp_execthrottle = param; 476 retval = 0; 477 break; 478 } 479 if (strcmp(f->param_name, "fullduplex") == 0) { 480 if (param != 0 && param != 1) { 481 retval = EINVAL; 482 break; 483 } 484 if (param) { 485 FCPARAM(isp)->isp_fwoptions |= 486 ICBOPT_FULL_DUPLEX; 487 } else { 488 FCPARAM(isp)->isp_fwoptions &= 489 ~ICBOPT_FULL_DUPLEX; 490 } 491 retval = 0; 492 break; 493 } 494 if (strcmp(f->param_name, "loopid") == 0) { 495 if (param < 0 || param > 125) { 496 retval = EINVAL; 497 break; 498 } 499 FCPARAM(isp)->isp_loopid = param; 500 retval = 0; 501 break; 502 } 503 retval = EINVAL; 504 break; 505 } 506 case ISP_TSK_MGMT: 507 { 508 int needmarker; 509 struct isp_fc_tsk_mgmt *fct = (struct isp_fc_tsk_mgmt *) addr; 510 mbreg_t mbs; 511 512 if (IS_SCSI(isp)) { 513 retval = EINVAL; 514 break; 515 } 516 517 memset(&mbs, 0, sizeof (mbs)); 518 needmarker = retval = 0; 519 520 switch (fct->action) { 521 case CLEAR_ACA: 522 mbs.param[0] = MBOX_CLEAR_ACA; 523 mbs.param[1] = fct->loopid << 8; 524 mbs.param[2] = fct->lun; 525 break; 526 case TARGET_RESET: 527 mbs.param[0] = MBOX_TARGET_RESET; 528 mbs.param[1] = fct->loopid << 8; 529 needmarker = 1; 530 break; 531 case LUN_RESET: 532 mbs.param[0] = MBOX_LUN_RESET; 533 mbs.param[1] = fct->loopid << 8; 534 mbs.param[2] = fct->lun; 535 needmarker = 1; 536 break; 537 case CLEAR_TASK_SET: 538 mbs.param[0] = MBOX_CLEAR_TASK_SET; 539 mbs.param[1] = fct->loopid << 8; 540 mbs.param[2] = fct->lun; 541 needmarker = 1; 542 break; 543 case ABORT_TASK_SET: 544 mbs.param[0] = MBOX_ABORT_TASK_SET; 545 mbs.param[1] = fct->loopid << 8; 546 mbs.param[2] = fct->lun; 547 needmarker = 1; 548 break; 549 default: 550 retval = EINVAL; 551 break; 552 } 553 if (retval == 0) { 554 ISP_LOCK(isp); 555 if (needmarker) { 556 isp->isp_sendmarker |= 1; 557 } 558 retval = isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs); 559 ISP_UNLOCK(isp); 560 if (retval) 561 retval = EIO; 562 } 563 break; 564 } 565 default: 566 break; 567 } 568 return (retval); 569 } 570 571 static void 572 isp_intr_enable(void *arg) 573 { 574 struct ispsoftc *isp = arg; 575 if (isp->isp_role != ISP_ROLE_NONE) { 576 ENABLE_INTS(isp); 577 #if 0 578 isp->isp_osinfo.intsok = 1; 579 #endif 580 } 581 /* Release our hook so that the boot can continue. */ 582 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 583 } 584 585 /* 586 * Put the target mode functions here, because some are inlines 587 */ 588 589 #ifdef ISP_TARGET_MODE 590 591 static INLINE int is_lun_enabled(struct ispsoftc *, int, lun_id_t); 592 static INLINE int are_any_luns_enabled(struct ispsoftc *, int); 593 static INLINE tstate_t *get_lun_statep(struct ispsoftc *, int, lun_id_t); 594 static INLINE void rls_lun_statep(struct ispsoftc *, tstate_t *); 595 static INLINE atio_private_data_t *isp_get_atpd(struct ispsoftc *, int); 596 static cam_status 597 create_lun_state(struct ispsoftc *, int, struct cam_path *, tstate_t **); 598 static void destroy_lun_state(struct ispsoftc *, tstate_t *); 599 static int isp_en_lun(struct ispsoftc *, union ccb *); 600 static void isp_ledone(struct ispsoftc *, lun_entry_t *); 601 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *); 602 static timeout_t isp_refire_putback_atio; 603 static void isp_complete_ctio(union ccb *); 604 static void isp_target_putback_atio(union ccb *); 605 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *); 606 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *); 607 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *); 608 static int isp_handle_platform_ctio(struct ispsoftc *, void *); 609 static void isp_handle_platform_ctio_fastpost(struct ispsoftc *, u_int32_t); 610 static int isp_handle_platform_notify_scsi(struct ispsoftc *, in_entry_t *); 611 static int isp_handle_platform_notify_fc(struct ispsoftc *, in_fcentry_t *); 612 613 static INLINE int 614 is_lun_enabled(struct ispsoftc *isp, int bus, lun_id_t lun) 615 { 616 tstate_t *tptr; 617 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 618 if (tptr == NULL) { 619 return (0); 620 } 621 do { 622 if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) { 623 return (1); 624 } 625 } while ((tptr = tptr->next) != NULL); 626 return (0); 627 } 628 629 static INLINE int 630 are_any_luns_enabled(struct ispsoftc *isp, int port) 631 { 632 int lo, hi; 633 if (IS_DUALBUS(isp)) { 634 lo = (port * (LUN_HASH_SIZE >> 1)); 635 hi = lo + (LUN_HASH_SIZE >> 1); 636 } else { 637 lo = 0; 638 hi = LUN_HASH_SIZE; 639 } 640 for (lo = 0; lo < hi; lo++) { 641 if (isp->isp_osinfo.lun_hash[lo]) { 642 return (1); 643 } 644 } 645 return (0); 646 } 647 648 static INLINE tstate_t * 649 get_lun_statep(struct ispsoftc *isp, int bus, lun_id_t lun) 650 { 651 tstate_t *tptr = NULL; 652 653 if (lun == CAM_LUN_WILDCARD) { 654 if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) { 655 tptr = &isp->isp_osinfo.tsdflt[bus]; 656 tptr->hold++; 657 return (tptr); 658 } 659 return (NULL); 660 } else { 661 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 662 if (tptr == NULL) { 663 return (NULL); 664 } 665 } 666 667 do { 668 if (tptr->lun == lun && tptr->bus == bus) { 669 tptr->hold++; 670 return (tptr); 671 } 672 } while ((tptr = tptr->next) != NULL); 673 return (tptr); 674 } 675 676 static INLINE void 677 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr) 678 { 679 if (tptr->hold) 680 tptr->hold--; 681 } 682 683 static INLINE atio_private_data_t * 684 isp_get_atpd(struct ispsoftc *isp, int tag) 685 { 686 atio_private_data_t *atp; 687 for (atp = isp->isp_osinfo.atpdp; 688 atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) { 689 if (atp->tag == tag) 690 return (atp); 691 } 692 return (NULL); 693 } 694 695 static cam_status 696 create_lun_state(struct ispsoftc *isp, int bus, 697 struct cam_path *path, tstate_t **rslt) 698 { 699 cam_status status; 700 lun_id_t lun; 701 int hfx; 702 tstate_t *tptr, *new; 703 704 lun = xpt_path_lun_id(path); 705 if (lun < 0) { 706 return (CAM_LUN_INVALID); 707 } 708 if (is_lun_enabled(isp, bus, lun)) { 709 return (CAM_LUN_ALRDY_ENA); 710 } 711 new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO); 712 if (new == NULL) { 713 return (CAM_RESRC_UNAVAIL); 714 } 715 716 status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path), 717 xpt_path_target_id(path), xpt_path_lun_id(path)); 718 if (status != CAM_REQ_CMP) { 719 free(new, M_DEVBUF); 720 return (status); 721 } 722 new->bus = bus; 723 new->lun = lun; 724 SLIST_INIT(&new->atios); 725 SLIST_INIT(&new->inots); 726 new->hold = 1; 727 728 hfx = LUN_HASH_FUNC(isp, new->bus, new->lun); 729 tptr = isp->isp_osinfo.lun_hash[hfx]; 730 if (tptr == NULL) { 731 isp->isp_osinfo.lun_hash[hfx] = new; 732 } else { 733 while (tptr->next) 734 tptr = tptr->next; 735 tptr->next = new; 736 } 737 *rslt = new; 738 return (CAM_REQ_CMP); 739 } 740 741 static INLINE void 742 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr) 743 { 744 int hfx; 745 tstate_t *lw, *pw; 746 747 if (tptr->hold) { 748 return; 749 } 750 hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun); 751 pw = isp->isp_osinfo.lun_hash[hfx]; 752 if (pw == NULL) { 753 return; 754 } else if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 755 isp->isp_osinfo.lun_hash[hfx] = pw->next; 756 } else { 757 lw = pw; 758 pw = lw->next; 759 while (pw) { 760 if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 761 lw->next = pw->next; 762 break; 763 } 764 lw = pw; 765 pw = pw->next; 766 } 767 if (pw == NULL) { 768 return; 769 } 770 } 771 free(tptr, M_DEVBUF); 772 } 773 774 /* 775 * Enable luns. 776 */ 777 static int 778 isp_en_lun(struct ispsoftc *isp, union ccb *ccb) 779 { 780 struct ccb_en_lun *cel = &ccb->cel; 781 tstate_t *tptr; 782 u_int32_t seq; 783 int bus, cmd, av, wildcard, tm_on; 784 lun_id_t lun; 785 target_id_t tgt; 786 787 bus = XS_CHANNEL(ccb); 788 if (bus > 1) { 789 xpt_print_path(ccb->ccb_h.path); 790 printf("illegal bus %d\n", bus); 791 ccb->ccb_h.status = CAM_PATH_INVALID; 792 return (-1); 793 } 794 tgt = ccb->ccb_h.target_id; 795 lun = ccb->ccb_h.target_lun; 796 797 isp_prt(isp, ISP_LOGTDEBUG0, 798 "isp_en_lun: %sabling lun 0x%x on channel %d", 799 cel->enable? "en" : "dis", lun, bus); 800 801 802 if ((lun != CAM_LUN_WILDCARD) && 803 (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) { 804 ccb->ccb_h.status = CAM_LUN_INVALID; 805 return (-1); 806 } 807 808 if (IS_SCSI(isp)) { 809 sdparam *sdp = isp->isp_param; 810 sdp += bus; 811 if (tgt != CAM_TARGET_WILDCARD && 812 tgt != sdp->isp_initiator_id) { 813 ccb->ccb_h.status = CAM_TID_INVALID; 814 return (-1); 815 } 816 } else { 817 /* 818 * There's really no point in doing this yet w/o multi-tid 819 * capability. Even then, it's problematic. 820 */ 821 #if 0 822 if (tgt != CAM_TARGET_WILDCARD && 823 tgt != FCPARAM(isp)->isp_iid) { 824 ccb->ccb_h.status = CAM_TID_INVALID; 825 return (-1); 826 } 827 #endif 828 /* 829 * This is as a good a place as any to check f/w capabilities. 830 */ 831 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_TMODE) == 0) { 832 isp_prt(isp, ISP_LOGERR, 833 "firmware does not support target mode"); 834 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 835 return (-1); 836 } 837 /* 838 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to 839 * XXX: dorks with our already fragile enable/disable code. 840 */ 841 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) { 842 isp_prt(isp, ISP_LOGERR, 843 "firmware not SCCLUN capable"); 844 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 845 return (-1); 846 } 847 } 848 849 if (tgt == CAM_TARGET_WILDCARD) { 850 if (lun == CAM_LUN_WILDCARD) { 851 wildcard = 1; 852 } else { 853 ccb->ccb_h.status = CAM_LUN_INVALID; 854 return (-1); 855 } 856 } else { 857 wildcard = 0; 858 } 859 860 tm_on = (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) != 0; 861 862 /* 863 * Next check to see whether this is a target/lun wildcard action. 864 * 865 * If so, we know that we can accept commands for luns that haven't 866 * been enabled yet and send them upstream. Otherwise, we have to 867 * handle them locally (if we see them at all). 868 */ 869 870 if (wildcard) { 871 tptr = &isp->isp_osinfo.tsdflt[bus]; 872 if (cel->enable) { 873 if (tm_on) { 874 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 875 return (-1); 876 } 877 ccb->ccb_h.status = 878 xpt_create_path(&tptr->owner, NULL, 879 xpt_path_path_id(ccb->ccb_h.path), 880 xpt_path_target_id(ccb->ccb_h.path), 881 xpt_path_lun_id(ccb->ccb_h.path)); 882 if (ccb->ccb_h.status != CAM_REQ_CMP) { 883 return (-1); 884 } 885 SLIST_INIT(&tptr->atios); 886 SLIST_INIT(&tptr->inots); 887 isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED; 888 } else { 889 if (tm_on == 0) { 890 ccb->ccb_h.status = CAM_REQ_CMP; 891 return (-1); 892 } 893 if (tptr->hold) { 894 ccb->ccb_h.status = CAM_SCSI_BUSY; 895 return (-1); 896 } 897 xpt_free_path(tptr->owner); 898 isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED; 899 } 900 } 901 902 /* 903 * Now check to see whether this bus needs to be 904 * enabled/disabled with respect to target mode. 905 */ 906 av = bus << 31; 907 if (cel->enable && tm_on == 0) { 908 av |= ENABLE_TARGET_FLAG; 909 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 910 if (av) { 911 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 912 if (wildcard) { 913 isp->isp_osinfo.tmflags[bus] &= 914 ~TM_WILDCARD_ENABLED; 915 xpt_free_path(tptr->owner); 916 } 917 return (-1); 918 } 919 isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED; 920 isp_prt(isp, ISP_LOGINFO, 921 "Target Mode enabled on channel %d", bus); 922 } else if (cel->enable == 0 && tm_on && wildcard) { 923 if (are_any_luns_enabled(isp, bus)) { 924 ccb->ccb_h.status = CAM_SCSI_BUSY; 925 return (-1); 926 } 927 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 928 if (av) { 929 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 930 return (-1); 931 } 932 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; 933 isp_prt(isp, ISP_LOGINFO, 934 "Target Mode disabled on channel %d", bus); 935 } 936 937 if (wildcard) { 938 ccb->ccb_h.status = CAM_REQ_CMP; 939 return (-1); 940 } 941 942 /* 943 * Find an empty slot 944 */ 945 for (seq = 0; seq < NLEACT; seq++) { 946 if (isp->isp_osinfo.leact[seq] == 0) { 947 break; 948 } 949 } 950 if (seq >= NLEACT) { 951 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 952 return (-1); 953 954 } 955 isp->isp_osinfo.leact[seq] = ccb; 956 957 if (cel->enable) { 958 ccb->ccb_h.status = 959 create_lun_state(isp, bus, ccb->ccb_h.path, &tptr); 960 if (ccb->ccb_h.status != CAM_REQ_CMP) { 961 isp->isp_osinfo.leact[seq] = 0; 962 return (-1); 963 } 964 } else { 965 tptr = get_lun_statep(isp, bus, lun); 966 if (tptr == NULL) { 967 ccb->ccb_h.status = CAM_LUN_INVALID; 968 return (-1); 969 } 970 } 971 972 if (cel->enable) { 973 int c, n, ulun = lun; 974 975 cmd = RQSTYPE_ENABLE_LUN; 976 c = DFLT_CMND_CNT; 977 n = DFLT_INOT_CNT; 978 if (IS_FC(isp) && lun != 0) { 979 cmd = RQSTYPE_MODIFY_LUN; 980 n = 0; 981 /* 982 * For SCC firmware, we only deal with setting 983 * (enabling or modifying) lun 0. 984 */ 985 ulun = 0; 986 } 987 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) { 988 rls_lun_statep(isp, tptr); 989 ccb->ccb_h.status = CAM_REQ_INPROG; 990 return (seq); 991 } 992 } else { 993 int c, n, ulun = lun; 994 995 cmd = -RQSTYPE_MODIFY_LUN; 996 c = DFLT_CMND_CNT; 997 n = DFLT_INOT_CNT; 998 if (IS_FC(isp) && lun != 0) { 999 n = 0; 1000 /* 1001 * For SCC firmware, we only deal with setting 1002 * (enabling or modifying) lun 0. 1003 */ 1004 ulun = 0; 1005 } 1006 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) { 1007 rls_lun_statep(isp, tptr); 1008 ccb->ccb_h.status = CAM_REQ_INPROG; 1009 return (seq); 1010 } 1011 } 1012 rls_lun_statep(isp, tptr); 1013 xpt_print_path(ccb->ccb_h.path); 1014 printf("isp_lun_cmd failed\n"); 1015 isp->isp_osinfo.leact[seq] = 0; 1016 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1017 return (-1); 1018 } 1019 1020 static void 1021 isp_ledone(struct ispsoftc *isp, lun_entry_t *lep) 1022 { 1023 const char lfmt[] = "lun %d now %sabled for target mode on channel %d"; 1024 union ccb *ccb; 1025 u_int32_t seq; 1026 tstate_t *tptr; 1027 int av; 1028 struct ccb_en_lun *cel; 1029 1030 seq = lep->le_reserved - 1; 1031 if (seq >= NLEACT) { 1032 isp_prt(isp, ISP_LOGERR, 1033 "seq out of range (%u) in isp_ledone", seq); 1034 return; 1035 } 1036 ccb = isp->isp_osinfo.leact[seq]; 1037 if (ccb == 0) { 1038 isp_prt(isp, ISP_LOGERR, 1039 "no ccb for seq %u in isp_ledone", seq); 1040 return; 1041 } 1042 cel = &ccb->cel; 1043 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), XS_LUN(ccb)); 1044 if (tptr == NULL) { 1045 xpt_print_path(ccb->ccb_h.path); 1046 printf("null tptr in isp_ledone\n"); 1047 isp->isp_osinfo.leact[seq] = 0; 1048 return; 1049 } 1050 1051 if (lep->le_status != LUN_OK) { 1052 xpt_print_path(ccb->ccb_h.path); 1053 printf("ENABLE/MODIFY LUN returned 0x%x\n", lep->le_status); 1054 err: 1055 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1056 xpt_print_path(ccb->ccb_h.path); 1057 rls_lun_statep(isp, tptr); 1058 isp->isp_osinfo.leact[seq] = 0; 1059 ISPLOCK_2_CAMLOCK(isp); 1060 xpt_done(ccb); 1061 CAMLOCK_2_ISPLOCK(isp); 1062 return; 1063 } else { 1064 isp_prt(isp, ISP_LOGTDEBUG0, 1065 "isp_ledone: ENABLE/MODIFY done okay"); 1066 } 1067 1068 1069 if (cel->enable) { 1070 ccb->ccb_h.status = CAM_REQ_CMP; 1071 isp_prt(isp, /* ISP_LOGINFO */ ISP_LOGALL, lfmt, 1072 XS_LUN(ccb), "en", XS_CHANNEL(ccb)); 1073 rls_lun_statep(isp, tptr); 1074 isp->isp_osinfo.leact[seq] = 0; 1075 ISPLOCK_2_CAMLOCK(isp); 1076 xpt_done(ccb); 1077 CAMLOCK_2_ISPLOCK(isp); 1078 return; 1079 } 1080 1081 if (lep->le_header.rqs_entry_type == RQSTYPE_MODIFY_LUN) { 1082 if (isp_lun_cmd(isp, -RQSTYPE_ENABLE_LUN, XS_CHANNEL(ccb), 1083 XS_TGT(ccb), XS_LUN(ccb), 0, 0, seq+1)) { 1084 xpt_print_path(ccb->ccb_h.path); 1085 printf("isp_ledone: isp_lun_cmd failed\n"); 1086 goto err; 1087 } 1088 rls_lun_statep(isp, tptr); 1089 return; 1090 } 1091 1092 isp_prt(isp, ISP_LOGINFO, lfmt, XS_LUN(ccb), "dis", XS_CHANNEL(ccb)); 1093 rls_lun_statep(isp, tptr); 1094 destroy_lun_state(isp, tptr); 1095 ccb->ccb_h.status = CAM_REQ_CMP; 1096 isp->isp_osinfo.leact[seq] = 0; 1097 ISPLOCK_2_CAMLOCK(isp); 1098 xpt_done(ccb); 1099 CAMLOCK_2_ISPLOCK(isp); 1100 if (are_any_luns_enabled(isp, XS_CHANNEL(ccb)) == 0) { 1101 int bus = XS_CHANNEL(ccb); 1102 av = bus << 31; 1103 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 1104 if (av) { 1105 isp_prt(isp, ISP_LOGWARN, 1106 "disable target mode on channel %d failed", bus); 1107 } else { 1108 isp_prt(isp, ISP_LOGINFO, 1109 "Target Mode disabled on channel %d", bus); 1110 } 1111 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; 1112 } 1113 } 1114 1115 1116 static cam_status 1117 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb) 1118 { 1119 tstate_t *tptr; 1120 struct ccb_hdr_slist *lp; 1121 struct ccb_hdr *curelm; 1122 int found, *ctr; 1123 union ccb *accb = ccb->cab.abort_ccb; 1124 1125 isp_prt(isp, ISP_LOGTDEBUG0, "aborting ccb %p", accb); 1126 if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 1127 int badpath = 0; 1128 if (IS_FC(isp) && (accb->ccb_h.target_id != 1129 ((fcparam *) isp->isp_param)->isp_loopid)) { 1130 badpath = 1; 1131 } else if (IS_SCSI(isp) && (accb->ccb_h.target_id != 1132 ((sdparam *) isp->isp_param)->isp_initiator_id)) { 1133 badpath = 1; 1134 } 1135 if (badpath) { 1136 /* 1137 * Being restrictive about target ids is really about 1138 * making sure we're aborting for the right multi-tid 1139 * path. This doesn't really make much sense at present. 1140 */ 1141 #if 0 1142 return (CAM_PATH_INVALID); 1143 #endif 1144 } 1145 } 1146 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun); 1147 if (tptr == NULL) { 1148 isp_prt(isp, ISP_LOGTDEBUG0, 1149 "isp_abort_tgt_ccb: can't get statep"); 1150 return (CAM_PATH_INVALID); 1151 } 1152 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 1153 lp = &tptr->atios; 1154 ctr = &tptr->atio_count; 1155 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 1156 lp = &tptr->inots; 1157 ctr = &tptr->inot_count; 1158 } else { 1159 rls_lun_statep(isp, tptr); 1160 isp_prt(isp, ISP_LOGTDEBUG0, 1161 "isp_abort_tgt_ccb: bad func %d\n", accb->ccb_h.func_code); 1162 return (CAM_UA_ABORT); 1163 } 1164 curelm = SLIST_FIRST(lp); 1165 found = 0; 1166 if (curelm == &accb->ccb_h) { 1167 found = 1; 1168 SLIST_REMOVE_HEAD(lp, sim_links.sle); 1169 } else { 1170 while(curelm != NULL) { 1171 struct ccb_hdr *nextelm; 1172 1173 nextelm = SLIST_NEXT(curelm, sim_links.sle); 1174 if (nextelm == &accb->ccb_h) { 1175 found = 1; 1176 SLIST_NEXT(curelm, sim_links.sle) = 1177 SLIST_NEXT(nextelm, sim_links.sle); 1178 break; 1179 } 1180 curelm = nextelm; 1181 } 1182 } 1183 rls_lun_statep(isp, tptr); 1184 if (found) { 1185 *ctr--; 1186 accb->ccb_h.status = CAM_REQ_ABORTED; 1187 xpt_done(accb); 1188 return (CAM_REQ_CMP); 1189 } 1190 isp_prt(isp, ISP_LOGTDEBUG0, 1191 "isp_abort_tgt_ccb: CCB %p not found\n", ccb); 1192 return (CAM_PATH_INVALID); 1193 } 1194 1195 static cam_status 1196 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb) 1197 { 1198 void *qe; 1199 struct ccb_scsiio *cso = &ccb->csio; 1200 u_int16_t *hp, save_handle; 1201 u_int16_t nxti, optr; 1202 u_int8_t local[QENTRY_LEN]; 1203 1204 1205 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 1206 xpt_print_path(ccb->ccb_h.path); 1207 printf("Request Queue Overflow in isp_target_start_ctio\n"); 1208 return (CAM_RESRC_UNAVAIL); 1209 } 1210 bzero(local, QENTRY_LEN); 1211 1212 /* 1213 * We're either moving data or completing a command here. 1214 */ 1215 1216 if (IS_FC(isp)) { 1217 atio_private_data_t *atp; 1218 ct2_entry_t *cto = (ct2_entry_t *) local; 1219 1220 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; 1221 cto->ct_header.rqs_entry_count = 1; 1222 cto->ct_iid = cso->init_id; 1223 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) { 1224 cto->ct_lun = ccb->ccb_h.target_lun; 1225 } 1226 1227 atp = isp_get_atpd(isp, cso->tag_id); 1228 if (atp == NULL) { 1229 isp_prt(isp, ISP_LOGERR, 1230 "cannot find private data adjunct for tag %x", 1231 cso->tag_id); 1232 return (-1); 1233 } 1234 1235 cto->ct_rxid = cso->tag_id; 1236 if (cso->dxfer_len == 0) { 1237 cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA; 1238 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1239 cto->ct_flags |= CT2_SENDSTATUS; 1240 cto->rsp.m1.ct_scsi_status = cso->scsi_status; 1241 cto->ct_resid = 1242 atp->orig_datalen - atp->bytes_xfered; 1243 if (cto->ct_resid < 0) { 1244 cto->rsp.m1.ct_scsi_status |= 1245 CT2_DATA_OVER; 1246 } else if (cto->ct_resid > 0) { 1247 cto->rsp.m1.ct_scsi_status |= 1248 CT2_DATA_UNDER; 1249 } 1250 } 1251 if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) { 1252 int m = min(cso->sense_len, MAXRESPLEN); 1253 bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m); 1254 cto->rsp.m1.ct_senselen = m; 1255 cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID; 1256 } 1257 } else { 1258 cto->ct_flags |= CT2_FLAG_MODE0; 1259 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1260 cto->ct_flags |= CT2_DATA_IN; 1261 } else { 1262 cto->ct_flags |= CT2_DATA_OUT; 1263 } 1264 cto->ct_reloff = atp->bytes_xfered; 1265 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 1266 cto->ct_flags |= CT2_SENDSTATUS; 1267 cto->rsp.m0.ct_scsi_status = cso->scsi_status; 1268 cto->ct_resid = 1269 atp->orig_datalen - 1270 (atp->bytes_xfered + cso->dxfer_len); 1271 if (cto->ct_resid < 0) { 1272 cto->rsp.m0.ct_scsi_status |= 1273 CT2_DATA_OVER; 1274 } else if (cto->ct_resid > 0) { 1275 cto->rsp.m0.ct_scsi_status |= 1276 CT2_DATA_UNDER; 1277 } 1278 } else { 1279 atp->last_xframt = cso->dxfer_len; 1280 } 1281 /* 1282 * If we're sending data and status back together, 1283 * we can't also send back sense data as well. 1284 */ 1285 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1286 } 1287 1288 if (cto->ct_flags & CT2_SENDSTATUS) { 1289 isp_prt(isp, ISP_LOGTDEBUG0, 1290 "CTIO2[%x] STATUS %x origd %u curd %u resid %u", 1291 cto->ct_rxid, cso->scsi_status, atp->orig_datalen, 1292 cso->dxfer_len, cto->ct_resid); 1293 cto->ct_flags |= CT2_CCINCR; 1294 atp->state = ATPD_STATE_LAST_CTIO; 1295 } else 1296 atp->state = ATPD_STATE_CTIO; 1297 cto->ct_timeout = 10; 1298 hp = &cto->ct_syshandle; 1299 } else { 1300 ct_entry_t *cto = (ct_entry_t *) local; 1301 1302 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1303 cto->ct_header.rqs_entry_count = 1; 1304 cto->ct_iid = cso->init_id; 1305 cto->ct_iid |= XS_CHANNEL(ccb) << 7; 1306 cto->ct_tgt = ccb->ccb_h.target_id; 1307 cto->ct_lun = ccb->ccb_h.target_lun; 1308 cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id); 1309 if (AT_HAS_TAG(cso->tag_id)) { 1310 cto->ct_tag_val = (u_int8_t) AT_GET_TAG(cso->tag_id); 1311 cto->ct_flags |= CT_TQAE; 1312 } 1313 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 1314 cto->ct_flags |= CT_NODISC; 1315 } 1316 if (cso->dxfer_len == 0) { 1317 cto->ct_flags |= CT_NO_DATA; 1318 } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1319 cto->ct_flags |= CT_DATA_IN; 1320 } else { 1321 cto->ct_flags |= CT_DATA_OUT; 1322 } 1323 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1324 cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR; 1325 cto->ct_scsi_status = cso->scsi_status; 1326 cto->ct_resid = cso->resid; 1327 isp_prt(isp, ISP_LOGTDEBUG0, 1328 "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x", 1329 cto->ct_fwhandle, cso->scsi_status, cso->resid, 1330 cso->tag_id); 1331 } 1332 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1333 cto->ct_timeout = 10; 1334 hp = &cto->ct_syshandle; 1335 } 1336 1337 if (isp_save_xs_tgt(isp, ccb, hp)) { 1338 xpt_print_path(ccb->ccb_h.path); 1339 printf("No XFLIST pointers for isp_target_start_ctio\n"); 1340 return (CAM_RESRC_UNAVAIL); 1341 } 1342 1343 1344 /* 1345 * Call the dma setup routines for this entry (and any subsequent 1346 * CTIOs) if there's data to move, and then tell the f/w it's got 1347 * new things to play with. As with isp_start's usage of DMA setup, 1348 * any swizzling is done in the machine dependent layer. Because 1349 * of this, we put the request onto the queue area first in native 1350 * format. 1351 */ 1352 1353 save_handle = *hp; 1354 1355 switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) { 1356 case CMD_QUEUED: 1357 ISP_ADD_REQUEST(isp, nxti); 1358 return (CAM_REQ_INPROG); 1359 1360 case CMD_EAGAIN: 1361 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1362 isp_destroy_tgt_handle(isp, save_handle); 1363 return (CAM_RESRC_UNAVAIL); 1364 1365 default: 1366 isp_destroy_tgt_handle(isp, save_handle); 1367 return (XS_ERR(ccb)); 1368 } 1369 } 1370 1371 static void 1372 isp_refire_putback_atio(void *arg) 1373 { 1374 int s = splcam(); 1375 isp_target_putback_atio(arg); 1376 splx(s); 1377 } 1378 1379 static void 1380 isp_target_putback_atio(union ccb *ccb) 1381 { 1382 struct ispsoftc *isp; 1383 struct ccb_scsiio *cso; 1384 u_int16_t nxti, optr; 1385 void *qe; 1386 1387 isp = XS_ISP(ccb); 1388 1389 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 1390 (void) timeout(isp_refire_putback_atio, ccb, 10); 1391 isp_prt(isp, ISP_LOGWARN, 1392 "isp_target_putback_atio: Request Queue Overflow"); 1393 return; 1394 } 1395 bzero(qe, QENTRY_LEN); 1396 cso = &ccb->csio; 1397 if (IS_FC(isp)) { 1398 at2_entry_t local, *at = &local; 1399 MEMZERO(at, sizeof (at2_entry_t)); 1400 at->at_header.rqs_entry_type = RQSTYPE_ATIO2; 1401 at->at_header.rqs_entry_count = 1; 1402 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) { 1403 at->at_scclun = (uint16_t) ccb->ccb_h.target_lun; 1404 } else { 1405 at->at_lun = (uint8_t) ccb->ccb_h.target_lun; 1406 } 1407 at->at_status = CT_OK; 1408 at->at_rxid = cso->tag_id; 1409 at->at_iid = cso->ccb_h.target_id; 1410 isp_put_atio2(isp, at, qe); 1411 } else { 1412 at_entry_t local, *at = &local; 1413 MEMZERO(at, sizeof (at_entry_t)); 1414 at->at_header.rqs_entry_type = RQSTYPE_ATIO; 1415 at->at_header.rqs_entry_count = 1; 1416 at->at_iid = cso->init_id; 1417 at->at_iid |= XS_CHANNEL(ccb) << 7; 1418 at->at_tgt = cso->ccb_h.target_id; 1419 at->at_lun = cso->ccb_h.target_lun; 1420 at->at_status = CT_OK; 1421 at->at_tag_val = AT_GET_TAG(cso->tag_id); 1422 at->at_handle = AT_GET_HANDLE(cso->tag_id); 1423 isp_put_atio(isp, at, qe); 1424 } 1425 ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe); 1426 ISP_ADD_REQUEST(isp, nxti); 1427 isp_complete_ctio(ccb); 1428 } 1429 1430 static void 1431 isp_complete_ctio(union ccb *ccb) 1432 { 1433 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1434 ccb->ccb_h.status |= CAM_REQ_CMP; 1435 } 1436 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1437 xpt_done(ccb); 1438 } 1439 1440 /* 1441 * Handle ATIO stuff that the generic code can't. 1442 * This means handling CDBs. 1443 */ 1444 1445 static int 1446 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep) 1447 { 1448 tstate_t *tptr; 1449 int status, bus, iswildcard; 1450 struct ccb_accept_tio *atiop; 1451 1452 /* 1453 * The firmware status (except for the QLTM_SVALID bit) 1454 * indicates why this ATIO was sent to us. 1455 * 1456 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1457 * 1458 * If the DISCONNECTS DISABLED bit is set in the flags field, 1459 * we're still connected on the SCSI bus. 1460 */ 1461 status = aep->at_status; 1462 if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) { 1463 /* 1464 * Bus Phase Sequence error. We should have sense data 1465 * suggested by the f/w. I'm not sure quite yet what 1466 * to do about this for CAM. 1467 */ 1468 isp_prt(isp, ISP_LOGWARN, "PHASE ERROR"); 1469 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1470 return (0); 1471 } 1472 if ((status & ~QLTM_SVALID) != AT_CDB) { 1473 isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform", 1474 status); 1475 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1476 return (0); 1477 } 1478 1479 bus = GET_BUS_VAL(aep->at_iid); 1480 tptr = get_lun_statep(isp, bus, aep->at_lun); 1481 if (tptr == NULL) { 1482 tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD); 1483 if (tptr == NULL) { 1484 isp_endcmd(isp, aep, 1485 SCSI_STATUS_CHECK_COND | ECMD_SVALID | 1486 (0x5 << 12) | (0x25 << 16), 0); 1487 return (0); 1488 } 1489 iswildcard = 1; 1490 } else { 1491 iswildcard = 0; 1492 } 1493 1494 if (tptr == NULL) { 1495 /* 1496 * Because we can't autofeed sense data back with 1497 * a command for parallel SCSI, we can't give back 1498 * a CHECK CONDITION. We'll give back a BUSY status 1499 * instead. This works out okay because the only 1500 * time we should, in fact, get this, is in the 1501 * case that somebody configured us without the 1502 * blackhole driver, so they get what they deserve. 1503 */ 1504 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1505 return (0); 1506 } 1507 1508 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1509 if (atiop == NULL) { 1510 /* 1511 * Because we can't autofeed sense data back with 1512 * a command for parallel SCSI, we can't give back 1513 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1514 * instead. This works out okay because the only time we 1515 * should, in fact, get this, is in the case that we've 1516 * run out of ATIOS. 1517 */ 1518 xpt_print_path(tptr->owner); 1519 isp_prt(isp, ISP_LOGWARN, 1520 "no ATIOS for lun %d from initiator %d on channel %d", 1521 aep->at_lun, GET_IID_VAL(aep->at_iid), bus); 1522 if (aep->at_flags & AT_TQAE) 1523 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1524 else 1525 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1526 rls_lun_statep(isp, tptr); 1527 return (0); 1528 } 1529 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1530 tptr->atio_count--; 1531 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d", 1532 aep->at_lun, tptr->atio_count); 1533 if (iswildcard) { 1534 atiop->ccb_h.target_id = aep->at_tgt; 1535 atiop->ccb_h.target_lun = aep->at_lun; 1536 } 1537 if (aep->at_flags & AT_NODISC) { 1538 atiop->ccb_h.flags = CAM_DIS_DISCONNECT; 1539 } else { 1540 atiop->ccb_h.flags = 0; 1541 } 1542 1543 if (status & QLTM_SVALID) { 1544 size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data)); 1545 atiop->sense_len = amt; 1546 MEMCPY(&atiop->sense_data, aep->at_sense, amt); 1547 } else { 1548 atiop->sense_len = 0; 1549 } 1550 1551 atiop->init_id = GET_IID_VAL(aep->at_iid); 1552 atiop->cdb_len = aep->at_cdblen; 1553 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen); 1554 atiop->ccb_h.status = CAM_CDB_RECVD; 1555 /* 1556 * Construct a tag 'id' based upon tag value (which may be 0..255) 1557 * and the handle (which we have to preserve). 1558 */ 1559 AT_MAKE_TAGID(atiop->tag_id, device_get_unit(isp->isp_dev), aep); 1560 if (aep->at_flags & AT_TQAE) { 1561 atiop->tag_action = aep->at_tag_type; 1562 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; 1563 } 1564 xpt_done((union ccb*)atiop); 1565 isp_prt(isp, ISP_LOGTDEBUG0, 1566 "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s", 1567 aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid), 1568 GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff, 1569 aep->at_tag_type, (aep->at_flags & AT_NODISC)? 1570 "nondisc" : "disconnecting"); 1571 rls_lun_statep(isp, tptr); 1572 return (0); 1573 } 1574 1575 static int 1576 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep) 1577 { 1578 lun_id_t lun; 1579 tstate_t *tptr; 1580 struct ccb_accept_tio *atiop; 1581 atio_private_data_t *atp; 1582 1583 /* 1584 * The firmware status (except for the QLTM_SVALID bit) 1585 * indicates why this ATIO was sent to us. 1586 * 1587 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1588 */ 1589 if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) { 1590 isp_prt(isp, ISP_LOGWARN, 1591 "bogus atio (0x%x) leaked to platform", aep->at_status); 1592 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1593 return (0); 1594 } 1595 1596 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) { 1597 lun = aep->at_scclun; 1598 } else { 1599 lun = aep->at_lun; 1600 } 1601 tptr = get_lun_statep(isp, 0, lun); 1602 if (tptr == NULL) { 1603 isp_prt(isp, ISP_LOGTDEBUG0, 1604 "[0x%x] no state pointer for lun %d", aep->at_rxid, lun); 1605 tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD); 1606 if (tptr == NULL) { 1607 isp_endcmd(isp, aep, 1608 SCSI_STATUS_CHECK_COND | ECMD_SVALID | 1609 (0x5 << 12) | (0x25 << 16), 0); 1610 return (0); 1611 } 1612 } 1613 1614 atp = isp_get_atpd(isp, 0); 1615 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1616 if (atiop == NULL || atp == NULL) { 1617 1618 /* 1619 * Because we can't autofeed sense data back with 1620 * a command for parallel SCSI, we can't give back 1621 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1622 * instead. This works out okay because the only time we 1623 * should, in fact, get this, is in the case that we've 1624 * run out of ATIOS. 1625 */ 1626 xpt_print_path(tptr->owner); 1627 isp_prt(isp, ISP_LOGWARN, 1628 "no %s for lun %d from initiator %d", 1629 (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" : 1630 ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid); 1631 rls_lun_statep(isp, tptr); 1632 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1633 return (0); 1634 } 1635 atp->state = ATPD_STATE_ATIO; 1636 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1637 tptr->atio_count--; 1638 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d", 1639 lun, tptr->atio_count); 1640 1641 if (tptr == &isp->isp_osinfo.tsdflt[0]) { 1642 atiop->ccb_h.target_id = 1643 ((fcparam *)isp->isp_param)->isp_loopid; 1644 atiop->ccb_h.target_lun = lun; 1645 } 1646 /* 1647 * We don't get 'suggested' sense data as we do with SCSI cards. 1648 */ 1649 atiop->sense_len = 0; 1650 1651 atiop->init_id = aep->at_iid; 1652 atiop->cdb_len = ATIO2_CDBLEN; 1653 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN); 1654 atiop->ccb_h.status = CAM_CDB_RECVD; 1655 atiop->tag_id = aep->at_rxid; 1656 switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) { 1657 case ATIO2_TC_ATTR_SIMPLEQ: 1658 atiop->tag_action = MSG_SIMPLE_Q_TAG; 1659 break; 1660 case ATIO2_TC_ATTR_HEADOFQ: 1661 atiop->tag_action = MSG_HEAD_OF_Q_TAG; 1662 break; 1663 case ATIO2_TC_ATTR_ORDERED: 1664 atiop->tag_action = MSG_ORDERED_Q_TAG; 1665 break; 1666 case ATIO2_TC_ATTR_ACAQ: /* ?? */ 1667 case ATIO2_TC_ATTR_UNTAGGED: 1668 default: 1669 atiop->tag_action = 0; 1670 break; 1671 } 1672 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; 1673 1674 atp->tag = atiop->tag_id; 1675 atp->lun = lun; 1676 atp->orig_datalen = aep->at_datalen; 1677 atp->last_xframt = 0; 1678 atp->bytes_xfered = 0; 1679 atp->state = ATPD_STATE_CAM; 1680 ISPLOCK_2_CAMLOCK(siP); 1681 xpt_done((union ccb*)atiop); 1682 1683 isp_prt(isp, ISP_LOGTDEBUG0, 1684 "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u", 1685 aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid, 1686 lun, aep->at_taskflags, aep->at_datalen); 1687 rls_lun_statep(isp, tptr); 1688 return (0); 1689 } 1690 1691 static int 1692 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg) 1693 { 1694 union ccb *ccb; 1695 int sentstatus, ok, notify_cam, resid = 0; 1696 u_int16_t tval; 1697 1698 /* 1699 * CTIO and CTIO2 are close enough.... 1700 */ 1701 1702 ccb = isp_find_xs_tgt(isp, ((ct_entry_t *)arg)->ct_syshandle); 1703 KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio")); 1704 isp_destroy_tgt_handle(isp, ((ct_entry_t *)arg)->ct_syshandle); 1705 1706 if (IS_FC(isp)) { 1707 ct2_entry_t *ct = arg; 1708 atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid); 1709 if (atp == NULL) { 1710 isp_prt(isp, ISP_LOGERR, 1711 "cannot find adjunct for %x after I/O", 1712 ct->ct_rxid); 1713 return (0); 1714 } 1715 sentstatus = ct->ct_flags & CT2_SENDSTATUS; 1716 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1717 if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) { 1718 ccb->ccb_h.status |= CAM_SENT_SENSE; 1719 } 1720 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1721 if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) { 1722 resid = ct->ct_resid; 1723 atp->bytes_xfered += (atp->last_xframt - resid); 1724 atp->last_xframt = 0; 1725 } 1726 if (sentstatus || !ok) { 1727 atp->tag = 0; 1728 } 1729 isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, 1730 "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s", 1731 ct->ct_rxid, ct->ct_status, ct->ct_flags, 1732 (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, 1733 resid, sentstatus? "FIN" : "MID"); 1734 tval = ct->ct_rxid; 1735 1736 /* XXX: should really come after isp_complete_ctio */ 1737 atp->state = ATPD_STATE_PDON; 1738 } else { 1739 ct_entry_t *ct = arg; 1740 sentstatus = ct->ct_flags & CT_SENDSTATUS; 1741 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1742 /* 1743 * We *ought* to be able to get back to the original ATIO 1744 * here, but for some reason this gets lost. It's just as 1745 * well because it's squirrelled away as part of periph 1746 * private data. 1747 * 1748 * We can live without it as long as we continue to use 1749 * the auto-replenish feature for CTIOs. 1750 */ 1751 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1752 if (ct->ct_status & QLTM_SVALID) { 1753 char *sp = (char *)ct; 1754 sp += CTIO_SENSE_OFFSET; 1755 ccb->csio.sense_len = 1756 min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN); 1757 MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len); 1758 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1759 } 1760 if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) { 1761 resid = ct->ct_resid; 1762 } 1763 isp_prt(isp, ISP_LOGTDEBUG0, 1764 "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s", 1765 ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun, 1766 ct->ct_status, ct->ct_flags, resid, 1767 sentstatus? "FIN" : "MID"); 1768 tval = ct->ct_fwhandle; 1769 } 1770 ccb->csio.resid += resid; 1771 1772 /* 1773 * We're here either because intermediate data transfers are done 1774 * and/or the final status CTIO (which may have joined with a 1775 * Data Transfer) is done. 1776 * 1777 * In any case, for this platform, the upper layers figure out 1778 * what to do next, so all we do here is collect status and 1779 * pass information along. Any DMA handles have already been 1780 * freed. 1781 */ 1782 if (notify_cam == 0) { 1783 isp_prt(isp, ISP_LOGTDEBUG0, " INTER CTIO[0x%x] done", tval); 1784 return (0); 1785 } 1786 1787 isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done", 1788 (sentstatus)? " FINAL " : "MIDTERM ", tval); 1789 1790 if (!ok) { 1791 isp_target_putback_atio(ccb); 1792 } else { 1793 isp_complete_ctio(ccb); 1794 1795 } 1796 return (0); 1797 } 1798 1799 static void 1800 isp_handle_platform_ctio_fastpost(struct ispsoftc *isp, u_int32_t token) 1801 { 1802 union ccb *ccb; 1803 ccb = isp_find_xs_tgt(isp, token & 0xffff); 1804 KASSERT((ccb != NULL), 1805 ("null ccb in isp_handle_platform_ctio_fastpost")); 1806 isp_destroy_tgt_handle(isp, token & 0xffff); 1807 isp_prt(isp, ISP_LOGTDEBUG1, "CTIOx[%x] fastpost complete", 1808 token & 0xffff); 1809 isp_complete_ctio(ccb); 1810 } 1811 1812 static int 1813 isp_handle_platform_notify_scsi(struct ispsoftc *isp, in_entry_t *inp) 1814 { 1815 return (0); /* XXXX */ 1816 } 1817 1818 static int 1819 isp_handle_platform_notify_fc(struct ispsoftc *isp, in_fcentry_t *inp) 1820 { 1821 1822 switch (inp->in_status) { 1823 case IN_PORT_LOGOUT: 1824 isp_prt(isp, ISP_LOGWARN, "port logout of iid %d", 1825 inp->in_iid); 1826 break; 1827 case IN_PORT_CHANGED: 1828 isp_prt(isp, ISP_LOGWARN, "port changed for iid %d", 1829 inp->in_iid); 1830 break; 1831 case IN_GLOBAL_LOGO: 1832 isp_prt(isp, ISP_LOGINFO, "all ports logged out"); 1833 break; 1834 case IN_ABORT_TASK: 1835 { 1836 atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid); 1837 struct ccb_immed_notify *inot = NULL; 1838 1839 if (atp) { 1840 tstate_t *tptr = get_lun_statep(isp, 0, atp->lun); 1841 if (tptr) { 1842 inot = (struct ccb_immed_notify *) 1843 SLIST_FIRST(&tptr->inots); 1844 if (inot) { 1845 tptr->inot_count--; 1846 SLIST_REMOVE_HEAD(&tptr->inots, 1847 sim_links.sle); 1848 isp_prt(isp, ISP_LOGTDEBUG0, 1849 "Take FREE INOT count now %d", 1850 tptr->inot_count); 1851 } 1852 } 1853 isp_prt(isp, ISP_LOGWARN, 1854 "abort task RX_ID %x IID %d state %d", 1855 inp->in_seqid, inp->in_iid, atp->state); 1856 } else { 1857 isp_prt(isp, ISP_LOGWARN, 1858 "abort task RX_ID %x from iid %d, state unknown", 1859 inp->in_seqid, inp->in_iid); 1860 } 1861 if (inot) { 1862 inot->initiator_id = inp->in_iid; 1863 inot->sense_len = 0; 1864 inot->message_args[0] = MSG_ABORT_TAG; 1865 inot->message_args[1] = inp->in_seqid & 0xff; 1866 inot->message_args[2] = (inp->in_seqid >> 8) & 0xff; 1867 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 1868 xpt_done((union ccb *)inot); 1869 } 1870 break; 1871 } 1872 default: 1873 break; 1874 } 1875 return (0); 1876 } 1877 #endif 1878 1879 static void 1880 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg) 1881 { 1882 struct cam_sim *sim; 1883 struct ispsoftc *isp; 1884 1885 sim = (struct cam_sim *)cbarg; 1886 isp = (struct ispsoftc *) cam_sim_softc(sim); 1887 switch (code) { 1888 case AC_LOST_DEVICE: 1889 if (IS_SCSI(isp)) { 1890 u_int16_t oflags, nflags; 1891 sdparam *sdp = isp->isp_param; 1892 int tgt; 1893 1894 tgt = xpt_path_target_id(path); 1895 if (tgt >= 0) { 1896 sdp += cam_sim_bus(sim); 1897 ISP_LOCK(isp); 1898 nflags = sdp->isp_devparam[tgt].nvrm_flags; 1899 #ifndef ISP_TARGET_MODE 1900 nflags &= DPARM_SAFE_DFLT; 1901 if (isp->isp_loaded_fw) { 1902 nflags |= DPARM_NARROW | DPARM_ASYNC; 1903 } 1904 #else 1905 nflags = DPARM_DEFAULT; 1906 #endif 1907 oflags = sdp->isp_devparam[tgt].goal_flags; 1908 sdp->isp_devparam[tgt].goal_flags = nflags; 1909 sdp->isp_devparam[tgt].dev_update = 1; 1910 isp->isp_update |= (1 << cam_sim_bus(sim)); 1911 (void) isp_control(isp, 1912 ISPCTL_UPDATE_PARAMS, NULL); 1913 sdp->isp_devparam[tgt].goal_flags = oflags; 1914 ISP_UNLOCK(isp); 1915 } 1916 } 1917 break; 1918 default: 1919 isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code); 1920 break; 1921 } 1922 } 1923 1924 static void 1925 isp_poll(struct cam_sim *sim) 1926 { 1927 struct ispsoftc *isp = cam_sim_softc(sim); 1928 u_int16_t isr, sema, mbox; 1929 1930 ISP_LOCK(isp); 1931 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 1932 isp_intr(isp, isr, sema, mbox); 1933 } 1934 ISP_UNLOCK(isp); 1935 } 1936 1937 1938 static void 1939 isp_watchdog(void *arg) 1940 { 1941 XS_T *xs = arg; 1942 struct ispsoftc *isp = XS_ISP(xs); 1943 u_int32_t handle; 1944 int iok; 1945 1946 /* 1947 * We've decided this command is dead. Make sure we're not trying 1948 * to kill a command that's already dead by getting it's handle and 1949 * and seeing whether it's still alive. 1950 */ 1951 ISP_LOCK(isp); 1952 iok = isp->isp_osinfo.intsok; 1953 isp->isp_osinfo.intsok = 0; 1954 handle = isp_find_handle(isp, xs); 1955 if (handle) { 1956 u_int16_t isr, sema, mbox; 1957 1958 if (XS_CMD_DONE_P(xs)) { 1959 isp_prt(isp, ISP_LOGDEBUG1, 1960 "watchdog found done cmd (handle 0x%x)", handle); 1961 ISP_UNLOCK(isp); 1962 return; 1963 } 1964 1965 if (XS_CMD_WDOG_P(xs)) { 1966 isp_prt(isp, ISP_LOGDEBUG2, 1967 "recursive watchdog (handle 0x%x)", handle); 1968 ISP_UNLOCK(isp); 1969 return; 1970 } 1971 1972 XS_CMD_S_WDOG(xs); 1973 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 1974 isp_intr(isp, isr, sema, mbox); 1975 } 1976 if (XS_CMD_DONE_P(xs)) { 1977 isp_prt(isp, ISP_LOGDEBUG2, 1978 "watchdog cleanup for handle 0x%x", handle); 1979 xpt_done((union ccb *) xs); 1980 } else if (XS_CMD_GRACE_P(xs)) { 1981 /* 1982 * Make sure the command is *really* dead before we 1983 * release the handle (and DMA resources) for reuse. 1984 */ 1985 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg); 1986 1987 /* 1988 * After this point, the comamnd is really dead. 1989 */ 1990 if (XS_XFRLEN(xs)) { 1991 ISP_DMAFREE(isp, xs, handle); 1992 } 1993 isp_destroy_handle(isp, handle); 1994 xpt_print_path(xs->ccb_h.path); 1995 isp_prt(isp, ISP_LOGWARN, 1996 "watchdog timeout for handle 0x%x", handle); 1997 XS_SETERR(xs, CAM_CMD_TIMEOUT); 1998 XS_CMD_C_WDOG(xs); 1999 isp_done(xs); 2000 } else { 2001 u_int16_t nxti, optr; 2002 ispreq_t local, *mp= &local, *qe; 2003 2004 XS_CMD_C_WDOG(xs); 2005 xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz); 2006 if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) { 2007 ISP_UNLOCK(isp); 2008 return; 2009 } 2010 XS_CMD_S_GRACE(xs); 2011 MEMZERO((void *) mp, sizeof (*mp)); 2012 mp->req_header.rqs_entry_count = 1; 2013 mp->req_header.rqs_entry_type = RQSTYPE_MARKER; 2014 mp->req_modifier = SYNC_ALL; 2015 mp->req_target = XS_CHANNEL(xs) << 7; 2016 isp_put_request(isp, mp, qe); 2017 ISP_ADD_REQUEST(isp, nxti); 2018 } 2019 } else { 2020 isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command"); 2021 } 2022 isp->isp_osinfo.intsok = iok; 2023 ISP_UNLOCK(isp); 2024 } 2025 2026 static void 2027 isp_kthread(void *arg) 2028 { 2029 struct ispsoftc *isp = arg; 2030 2031 #ifdef ISP_SMPLOCK 2032 mtx_lock(&isp->isp_lock); 2033 #else 2034 mtx_lock(&Giant); 2035 #endif 2036 /* 2037 * The first loop is for our usage where we have yet to have 2038 * gotten good fibre channel state. 2039 */ 2040 for (;;) { 2041 int wasfrozen; 2042 2043 isp_prt(isp, ISP_LOGDEBUG0, "kthread: checking FC state"); 2044 while (isp_fc_runstate(isp, 2 * 1000000) != 0) { 2045 isp_prt(isp, ISP_LOGDEBUG0, "kthread: FC state ungood"); 2046 if (FCPARAM(isp)->isp_fwstate != FW_READY || 2047 FCPARAM(isp)->isp_loopstate < LOOP_PDB_RCVD) { 2048 if (FCPARAM(isp)->loop_seen_once == 0 || 2049 isp->isp_osinfo.ktmature == 0) { 2050 break; 2051 } 2052 } 2053 #ifdef ISP_SMPLOCK 2054 msleep(isp_kthread, &isp->isp_lock, 2055 PRIBIO, "isp_fcthrd", hz); 2056 #else 2057 (void) tsleep(isp_kthread, PRIBIO, "isp_fcthrd", hz); 2058 #endif 2059 } 2060 2061 /* 2062 * Even if we didn't get good loop state we may be 2063 * unfreezing the SIMQ so that we can kill off 2064 * commands (if we've never seen loop before, for example). 2065 */ 2066 isp->isp_osinfo.ktmature = 1; 2067 wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN; 2068 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN; 2069 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { 2070 isp_prt(isp, ISP_LOGDEBUG0, "kthread: releasing simq"); 2071 ISPLOCK_2_CAMLOCK(isp); 2072 xpt_release_simq(isp->isp_sim, 1); 2073 CAMLOCK_2_ISPLOCK(isp); 2074 } 2075 isp_prt(isp, ISP_LOGDEBUG0, "kthread: waiting until called"); 2076 #ifdef ISP_SMPLOCK 2077 cv_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock); 2078 #else 2079 (void) tsleep(&isp->isp_osinfo.kthread_cv, PRIBIO, "fc_cv", 0); 2080 #endif 2081 } 2082 } 2083 2084 static void 2085 isp_action(struct cam_sim *sim, union ccb *ccb) 2086 { 2087 int bus, tgt, error; 2088 struct ispsoftc *isp; 2089 struct ccb_trans_settings *cts; 2090 2091 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n")); 2092 2093 isp = (struct ispsoftc *)cam_sim_softc(sim); 2094 ccb->ccb_h.sim_priv.entries[0].field = 0; 2095 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 2096 if (isp->isp_state != ISP_RUNSTATE && 2097 ccb->ccb_h.func_code == XPT_SCSI_IO) { 2098 CAMLOCK_2_ISPLOCK(isp); 2099 isp_init(isp); 2100 if (isp->isp_state != ISP_INITSTATE) { 2101 ISP_UNLOCK(isp); 2102 /* 2103 * Lie. Say it was a selection timeout. 2104 */ 2105 ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN; 2106 xpt_freeze_devq(ccb->ccb_h.path, 1); 2107 xpt_done(ccb); 2108 return; 2109 } 2110 isp->isp_state = ISP_RUNSTATE; 2111 ISPLOCK_2_CAMLOCK(isp); 2112 } 2113 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code); 2114 2115 2116 switch (ccb->ccb_h.func_code) { 2117 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 2118 /* 2119 * Do a couple of preliminary checks... 2120 */ 2121 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 2122 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 2123 ccb->ccb_h.status = CAM_REQ_INVALID; 2124 xpt_done(ccb); 2125 break; 2126 } 2127 } 2128 #ifdef DIAGNOSTIC 2129 if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) { 2130 ccb->ccb_h.status = CAM_PATH_INVALID; 2131 } else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) { 2132 ccb->ccb_h.status = CAM_PATH_INVALID; 2133 } 2134 if (ccb->ccb_h.status == CAM_PATH_INVALID) { 2135 isp_prt(isp, ISP_LOGERR, 2136 "invalid tgt/lun (%d.%d) in XPT_SCSI_IO", 2137 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 2138 xpt_done(ccb); 2139 break; 2140 } 2141 #endif 2142 ((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK; 2143 CAMLOCK_2_ISPLOCK(isp); 2144 error = isp_start((XS_T *) ccb); 2145 switch (error) { 2146 case CMD_QUEUED: 2147 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2148 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 2149 u_int64_t ticks = (u_int64_t) hz; 2150 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) 2151 ticks = 60 * 1000 * ticks; 2152 else 2153 ticks = ccb->ccb_h.timeout * hz; 2154 ticks = ((ticks + 999) / 1000) + hz + hz; 2155 if (ticks >= 0x80000000) { 2156 isp_prt(isp, ISP_LOGERR, 2157 "timeout overflow"); 2158 ticks = 0x7fffffff; 2159 } 2160 ccb->ccb_h.timeout_ch = timeout(isp_watchdog, 2161 (caddr_t)ccb, (int)ticks); 2162 } else { 2163 callout_handle_init(&ccb->ccb_h.timeout_ch); 2164 } 2165 ISPLOCK_2_CAMLOCK(isp); 2166 break; 2167 case CMD_RQLATER: 2168 /* 2169 * This can only happen for Fibre Channel 2170 */ 2171 KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only")); 2172 if (FCPARAM(isp)->loop_seen_once == 0 && 2173 isp->isp_osinfo.ktmature) { 2174 ISPLOCK_2_CAMLOCK(isp); 2175 XS_SETERR(ccb, CAM_SEL_TIMEOUT); 2176 xpt_done(ccb); 2177 break; 2178 } 2179 #ifdef ISP_SMPLOCK 2180 cv_signal(&isp->isp_osinfo.kthread_cv); 2181 #else 2182 wakeup(&isp->isp_osinfo.kthread_cv); 2183 #endif 2184 isp_freeze_loopdown(isp, "isp_action(RQLATER)"); 2185 XS_SETERR(ccb, CAM_REQUEUE_REQ); 2186 ISPLOCK_2_CAMLOCK(isp); 2187 xpt_done(ccb); 2188 break; 2189 case CMD_EAGAIN: 2190 XS_SETERR(ccb, CAM_REQUEUE_REQ); 2191 ISPLOCK_2_CAMLOCK(isp); 2192 xpt_done(ccb); 2193 break; 2194 case CMD_COMPLETE: 2195 isp_done((struct ccb_scsiio *) ccb); 2196 ISPLOCK_2_CAMLOCK(isp); 2197 break; 2198 default: 2199 isp_prt(isp, ISP_LOGERR, 2200 "What's this? 0x%x at %d in file %s", 2201 error, __LINE__, __FILE__); 2202 XS_SETERR(ccb, CAM_REQ_CMP_ERR); 2203 xpt_done(ccb); 2204 ISPLOCK_2_CAMLOCK(isp); 2205 } 2206 break; 2207 2208 #ifdef ISP_TARGET_MODE 2209 case XPT_EN_LUN: /* Enable LUN as a target */ 2210 { 2211 int seq, iok, i; 2212 CAMLOCK_2_ISPLOCK(isp); 2213 iok = isp->isp_osinfo.intsok; 2214 isp->isp_osinfo.intsok = 0; 2215 seq = isp_en_lun(isp, ccb); 2216 if (seq < 0) { 2217 isp->isp_osinfo.intsok = iok; 2218 ISPLOCK_2_CAMLOCK(isp); 2219 xpt_done(ccb); 2220 break; 2221 } 2222 for (i = 0; isp->isp_osinfo.leact[seq] && i < 30 * 1000; i++) { 2223 u_int16_t isr, sema, mbox; 2224 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 2225 isp_intr(isp, isr, sema, mbox); 2226 } 2227 DELAY(1000); 2228 } 2229 isp->isp_osinfo.intsok = iok; 2230 ISPLOCK_2_CAMLOCK(isp); 2231 break; 2232 } 2233 case XPT_NOTIFY_ACK: /* recycle notify ack */ 2234 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ 2235 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 2236 { 2237 tstate_t *tptr = 2238 get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun); 2239 if (tptr == NULL) { 2240 ccb->ccb_h.status = CAM_LUN_INVALID; 2241 xpt_done(ccb); 2242 break; 2243 } 2244 ccb->ccb_h.sim_priv.entries[0].field = 0; 2245 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 2246 ccb->ccb_h.flags = 0; 2247 2248 CAMLOCK_2_ISPLOCK(isp); 2249 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 2250 /* 2251 * Note that the command itself may not be done- 2252 * it may not even have had the first CTIO sent. 2253 */ 2254 tptr->atio_count++; 2255 isp_prt(isp, ISP_LOGTDEBUG0, 2256 "Put FREE ATIO, lun %d, count now %d", 2257 ccb->ccb_h.target_lun, tptr->atio_count); 2258 SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h, 2259 sim_links.sle); 2260 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 2261 tptr->inot_count++; 2262 isp_prt(isp, ISP_LOGTDEBUG0, 2263 "Put FREE INOT, lun %d, count now %d", 2264 ccb->ccb_h.target_lun, tptr->inot_count); 2265 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, 2266 sim_links.sle); 2267 } else { 2268 isp_prt(isp, ISP_LOGWARN, "Got Notify ACK");; 2269 } 2270 rls_lun_statep(isp, tptr); 2271 ccb->ccb_h.status = CAM_REQ_INPROG; 2272 ISPLOCK_2_CAMLOCK(isp); 2273 break; 2274 } 2275 case XPT_CONT_TARGET_IO: 2276 { 2277 CAMLOCK_2_ISPLOCK(isp); 2278 ccb->ccb_h.status = isp_target_start_ctio(isp, ccb); 2279 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 2280 isp_prt(isp, ISP_LOGWARN, 2281 "XPT_CONT_TARGET_IO: status 0x%x", 2282 ccb->ccb_h.status); 2283 XS_SETERR(ccb, CAM_REQUEUE_REQ); 2284 ISPLOCK_2_CAMLOCK(isp); 2285 xpt_done(ccb); 2286 } else { 2287 ISPLOCK_2_CAMLOCK(isp); 2288 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2289 } 2290 break; 2291 } 2292 #endif 2293 case XPT_RESET_DEV: /* BDR the specified SCSI device */ 2294 2295 bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); 2296 tgt = ccb->ccb_h.target_id; 2297 tgt |= (bus << 16); 2298 2299 CAMLOCK_2_ISPLOCK(isp); 2300 error = isp_control(isp, ISPCTL_RESET_DEV, &tgt); 2301 ISPLOCK_2_CAMLOCK(isp); 2302 if (error) { 2303 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2304 } else { 2305 ccb->ccb_h.status = CAM_REQ_CMP; 2306 } 2307 xpt_done(ccb); 2308 break; 2309 case XPT_ABORT: /* Abort the specified CCB */ 2310 { 2311 union ccb *accb = ccb->cab.abort_ccb; 2312 CAMLOCK_2_ISPLOCK(isp); 2313 switch (accb->ccb_h.func_code) { 2314 #ifdef ISP_TARGET_MODE 2315 case XPT_ACCEPT_TARGET_IO: 2316 case XPT_IMMED_NOTIFY: 2317 ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb); 2318 break; 2319 case XPT_CONT_TARGET_IO: 2320 isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet"); 2321 ccb->ccb_h.status = CAM_UA_ABORT; 2322 break; 2323 #endif 2324 case XPT_SCSI_IO: 2325 error = isp_control(isp, ISPCTL_ABORT_CMD, ccb); 2326 if (error) { 2327 ccb->ccb_h.status = CAM_UA_ABORT; 2328 } else { 2329 ccb->ccb_h.status = CAM_REQ_CMP; 2330 } 2331 break; 2332 default: 2333 ccb->ccb_h.status = CAM_REQ_INVALID; 2334 break; 2335 } 2336 ISPLOCK_2_CAMLOCK(isp); 2337 xpt_done(ccb); 2338 break; 2339 } 2340 #ifdef CAM_NEW_TRAN_CODE 2341 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) 2342 #else 2343 #define IS_CURRENT_SETTINGS(c) (c->flags & CCB_TRANS_CURRENT_SETTINGS) 2344 #endif 2345 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 2346 cts = &ccb->cts; 2347 if (!IS_CURRENT_SETTINGS(cts)) { 2348 ccb->ccb_h.status = CAM_REQ_INVALID; 2349 xpt_done(ccb); 2350 break; 2351 } 2352 tgt = cts->ccb_h.target_id; 2353 CAMLOCK_2_ISPLOCK(isp); 2354 if (IS_SCSI(isp)) { 2355 #ifndef CAM_NEW_TRAN_CODE 2356 sdparam *sdp = isp->isp_param; 2357 u_int16_t *dptr; 2358 2359 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2360 2361 sdp += bus; 2362 /* 2363 * We always update (internally) from goal_flags 2364 * so any request to change settings just gets 2365 * vectored to that location. 2366 */ 2367 dptr = &sdp->isp_devparam[tgt].goal_flags; 2368 2369 /* 2370 * Note that these operations affect the 2371 * the goal flags (goal_flags)- not 2372 * the current state flags. Then we mark 2373 * things so that the next operation to 2374 * this HBA will cause the update to occur. 2375 */ 2376 if (cts->valid & CCB_TRANS_DISC_VALID) { 2377 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) { 2378 *dptr |= DPARM_DISC; 2379 } else { 2380 *dptr &= ~DPARM_DISC; 2381 } 2382 } 2383 if (cts->valid & CCB_TRANS_TQ_VALID) { 2384 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) { 2385 *dptr |= DPARM_TQING; 2386 } else { 2387 *dptr &= ~DPARM_TQING; 2388 } 2389 } 2390 if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) { 2391 switch (cts->bus_width) { 2392 case MSG_EXT_WDTR_BUS_16_BIT: 2393 *dptr |= DPARM_WIDE; 2394 break; 2395 default: 2396 *dptr &= ~DPARM_WIDE; 2397 } 2398 } 2399 /* 2400 * Any SYNC RATE of nonzero and SYNC_OFFSET 2401 * of nonzero will cause us to go to the 2402 * selected (from NVRAM) maximum value for 2403 * this device. At a later point, we'll 2404 * allow finer control. 2405 */ 2406 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && 2407 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) && 2408 (cts->sync_offset > 0)) { 2409 *dptr |= DPARM_SYNC; 2410 } else { 2411 *dptr &= ~DPARM_SYNC; 2412 } 2413 *dptr |= DPARM_SAFE_DFLT; 2414 #else 2415 struct ccb_trans_settings_scsi *scsi = 2416 &cts->proto_specific.scsi; 2417 struct ccb_trans_settings_spi *spi = 2418 &cts->xport_specific.spi; 2419 sdparam *sdp = isp->isp_param; 2420 u_int16_t *dptr; 2421 2422 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2423 sdp += bus; 2424 /* 2425 * We always update (internally) from goal_flags 2426 * so any request to change settings just gets 2427 * vectored to that location. 2428 */ 2429 dptr = &sdp->isp_devparam[tgt].goal_flags; 2430 2431 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 2432 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 2433 *dptr |= DPARM_DISC; 2434 else 2435 *dptr &= ~DPARM_DISC; 2436 } 2437 2438 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 2439 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 2440 *dptr |= DPARM_TQING; 2441 else 2442 *dptr &= ~DPARM_TQING; 2443 } 2444 2445 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 2446 if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) 2447 *dptr |= DPARM_WIDE; 2448 else 2449 *dptr &= ~DPARM_WIDE; 2450 } 2451 2452 /* 2453 * XXX: FIX ME 2454 */ 2455 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) && 2456 (spi->valid & CTS_SPI_VALID_SYNC_RATE) && 2457 (spi->sync_period && spi->sync_offset)) { 2458 *dptr |= DPARM_SYNC; 2459 /* 2460 * XXX: CHECK FOR LEGALITY 2461 */ 2462 sdp->isp_devparam[tgt].goal_period = 2463 spi->sync_period; 2464 sdp->isp_devparam[tgt].goal_offset = 2465 spi->sync_offset; 2466 } else { 2467 *dptr &= ~DPARM_SYNC; 2468 } 2469 #endif 2470 isp_prt(isp, ISP_LOGDEBUG0, 2471 "SET bus %d targ %d to flags %x off %x per %x", 2472 bus, tgt, sdp->isp_devparam[tgt].goal_flags, 2473 sdp->isp_devparam[tgt].goal_offset, 2474 sdp->isp_devparam[tgt].goal_period); 2475 sdp->isp_devparam[tgt].dev_update = 1; 2476 isp->isp_update |= (1 << bus); 2477 } 2478 ISPLOCK_2_CAMLOCK(isp); 2479 ccb->ccb_h.status = CAM_REQ_CMP; 2480 xpt_done(ccb); 2481 break; 2482 case XPT_GET_TRAN_SETTINGS: 2483 cts = &ccb->cts; 2484 tgt = cts->ccb_h.target_id; 2485 CAMLOCK_2_ISPLOCK(isp); 2486 if (IS_FC(isp)) { 2487 #ifndef CAM_NEW_TRAN_CODE 2488 /* 2489 * a lot of normal SCSI things don't make sense. 2490 */ 2491 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 2492 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2493 /* 2494 * How do you measure the width of a high 2495 * speed serial bus? Well, in bytes. 2496 * 2497 * Offset and period make no sense, though, so we set 2498 * (above) a 'base' transfer speed to be gigabit. 2499 */ 2500 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2501 #else 2502 fcparam *fcp = isp->isp_param; 2503 struct ccb_trans_settings_fc *fc = 2504 &cts->xport_specific.fc; 2505 2506 cts->protocol = PROTO_SCSI; 2507 cts->protocol_version = SCSI_REV_2; 2508 cts->transport = XPORT_FC; 2509 cts->transport_version = 0; 2510 2511 fc->valid = CTS_FC_VALID_SPEED; 2512 if (fcp->isp_gbspeed == 2) 2513 fc->bitrate = 200000; 2514 else 2515 fc->bitrate = 100000; 2516 if (tgt > 0 && tgt < MAX_FC_TARG) { 2517 struct lportdb *lp = &fcp->portdb[tgt]; 2518 fc->wwnn = lp->node_wwn; 2519 fc->wwpn = lp->port_wwn; 2520 fc->port = lp->portid; 2521 fc->valid |= CTS_FC_VALID_WWNN | 2522 CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT; 2523 } 2524 #endif 2525 } else { 2526 #ifdef CAM_NEW_TRAN_CODE 2527 struct ccb_trans_settings_scsi *scsi = 2528 &cts->proto_specific.scsi; 2529 struct ccb_trans_settings_spi *spi = 2530 &cts->xport_specific.spi; 2531 #endif 2532 sdparam *sdp = isp->isp_param; 2533 int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2534 u_int16_t dval, pval, oval; 2535 2536 sdp += bus; 2537 2538 if (IS_CURRENT_SETTINGS(cts)) { 2539 sdp->isp_devparam[tgt].dev_refresh = 1; 2540 isp->isp_update |= (1 << bus); 2541 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, 2542 NULL); 2543 dval = sdp->isp_devparam[tgt].actv_flags; 2544 oval = sdp->isp_devparam[tgt].actv_offset; 2545 pval = sdp->isp_devparam[tgt].actv_period; 2546 } else { 2547 dval = sdp->isp_devparam[tgt].nvrm_flags; 2548 oval = sdp->isp_devparam[tgt].nvrm_offset; 2549 pval = sdp->isp_devparam[tgt].nvrm_period; 2550 } 2551 2552 #ifndef CAM_NEW_TRAN_CODE 2553 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 2554 2555 if (dval & DPARM_DISC) { 2556 cts->flags |= CCB_TRANS_DISC_ENB; 2557 } 2558 if (dval & DPARM_TQING) { 2559 cts->flags |= CCB_TRANS_TAG_ENB; 2560 } 2561 if (dval & DPARM_WIDE) { 2562 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2563 } else { 2564 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2565 } 2566 cts->valid = CCB_TRANS_BUS_WIDTH_VALID | 2567 CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2568 2569 if ((dval & DPARM_SYNC) && oval != 0) { 2570 cts->sync_period = pval; 2571 cts->sync_offset = oval; 2572 cts->valid |= 2573 CCB_TRANS_SYNC_RATE_VALID | 2574 CCB_TRANS_SYNC_OFFSET_VALID; 2575 } 2576 #else 2577 cts->protocol = PROTO_SCSI; 2578 cts->protocol_version = SCSI_REV_2; 2579 cts->transport = XPORT_SPI; 2580 cts->transport_version = 2; 2581 2582 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 2583 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; 2584 if (dval & DPARM_DISC) { 2585 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 2586 } 2587 if (dval & DPARM_TQING) { 2588 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 2589 } 2590 if ((dval & DPARM_SYNC) && oval && pval) { 2591 spi->sync_offset = oval; 2592 spi->sync_period = pval; 2593 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 2594 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 2595 } 2596 spi->valid |= CTS_SPI_VALID_BUS_WIDTH; 2597 if (dval & DPARM_WIDE) { 2598 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2599 } else { 2600 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2601 } 2602 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 2603 scsi->valid = CTS_SCSI_VALID_TQ; 2604 spi->valid |= CTS_SPI_VALID_DISC; 2605 } else { 2606 scsi->valid = 0; 2607 } 2608 #endif 2609 isp_prt(isp, ISP_LOGDEBUG0, 2610 "GET %s bus %d targ %d to flags %x off %x per %x", 2611 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM", 2612 bus, tgt, dval, oval, pval); 2613 } 2614 ISPLOCK_2_CAMLOCK(isp); 2615 ccb->ccb_h.status = CAM_REQ_CMP; 2616 xpt_done(ccb); 2617 break; 2618 2619 case XPT_CALC_GEOMETRY: 2620 { 2621 struct ccb_calc_geometry *ccg; 2622 2623 ccg = &ccb->ccg; 2624 if (ccg->block_size == 0) { 2625 isp_prt(isp, ISP_LOGERR, 2626 "%d.%d XPT_CALC_GEOMETRY block size 0?", 2627 ccg->ccb_h.target_id, ccg->ccb_h.target_lun); 2628 ccb->ccb_h.status = CAM_REQ_INVALID; 2629 xpt_done(ccb); 2630 break; 2631 } 2632 cam_calc_geometry(ccg, /*extended*/1); 2633 xpt_done(ccb); 2634 break; 2635 } 2636 case XPT_RESET_BUS: /* Reset the specified bus */ 2637 bus = cam_sim_bus(sim); 2638 CAMLOCK_2_ISPLOCK(isp); 2639 error = isp_control(isp, ISPCTL_RESET_BUS, &bus); 2640 ISPLOCK_2_CAMLOCK(isp); 2641 if (error) 2642 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2643 else { 2644 if (cam_sim_bus(sim) && isp->isp_path2 != NULL) 2645 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 2646 else if (isp->isp_path != NULL) 2647 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 2648 ccb->ccb_h.status = CAM_REQ_CMP; 2649 } 2650 xpt_done(ccb); 2651 break; 2652 2653 case XPT_TERM_IO: /* Terminate the I/O process */ 2654 ccb->ccb_h.status = CAM_REQ_INVALID; 2655 xpt_done(ccb); 2656 break; 2657 2658 case XPT_PATH_INQ: /* Path routing inquiry */ 2659 { 2660 struct ccb_pathinq *cpi = &ccb->cpi; 2661 2662 cpi->version_num = 1; 2663 #ifdef ISP_TARGET_MODE 2664 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 2665 #else 2666 cpi->target_sprt = 0; 2667 #endif 2668 cpi->hba_eng_cnt = 0; 2669 cpi->max_target = ISP_MAX_TARGETS(isp) - 1; 2670 cpi->max_lun = ISP_MAX_LUNS(isp) - 1; 2671 cpi->bus_id = cam_sim_bus(sim); 2672 if (IS_FC(isp)) { 2673 cpi->hba_misc = PIM_NOBUSRESET; 2674 /* 2675 * Because our loop ID can shift from time to time, 2676 * make our initiator ID out of range of our bus. 2677 */ 2678 cpi->initiator_id = cpi->max_target + 1; 2679 2680 /* 2681 * Set base transfer capabilities for Fibre Channel. 2682 * Technically not correct because we don't know 2683 * what media we're running on top of- but we'll 2684 * look good if we always say 100MB/s. 2685 */ 2686 if (FCPARAM(isp)->isp_gbspeed == 2) 2687 cpi->base_transfer_speed = 200000; 2688 else 2689 cpi->base_transfer_speed = 100000; 2690 cpi->hba_inquiry = PI_TAG_ABLE; 2691 #ifdef CAM_NEW_TRAN_CODE 2692 cpi->transport = XPORT_FC; 2693 cpi->transport_version = 0; /* WHAT'S THIS FOR? */ 2694 #endif 2695 } else { 2696 sdparam *sdp = isp->isp_param; 2697 sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path)); 2698 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 2699 cpi->hba_misc = 0; 2700 cpi->initiator_id = sdp->isp_initiator_id; 2701 cpi->base_transfer_speed = 3300; 2702 #ifdef CAM_NEW_TRAN_CODE 2703 cpi->transport = XPORT_SPI; 2704 cpi->transport_version = 2; /* WHAT'S THIS FOR? */ 2705 #endif 2706 } 2707 #ifdef CAM_NEW_TRAN_CODE 2708 cpi->protocol = PROTO_SCSI; 2709 cpi->protocol_version = SCSI_REV_2; 2710 #endif 2711 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 2712 strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN); 2713 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 2714 cpi->unit_number = cam_sim_unit(sim); 2715 cpi->ccb_h.status = CAM_REQ_CMP; 2716 xpt_done(ccb); 2717 break; 2718 } 2719 default: 2720 ccb->ccb_h.status = CAM_REQ_INVALID; 2721 xpt_done(ccb); 2722 break; 2723 } 2724 } 2725 2726 #define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB) 2727 void 2728 isp_done(struct ccb_scsiio *sccb) 2729 { 2730 struct ispsoftc *isp = XS_ISP(sccb); 2731 2732 if (XS_NOERR(sccb)) 2733 XS_SETERR(sccb, CAM_REQ_CMP); 2734 2735 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && 2736 (sccb->scsi_status != SCSI_STATUS_OK)) { 2737 sccb->ccb_h.status &= ~CAM_STATUS_MASK; 2738 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && 2739 (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) { 2740 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; 2741 } else { 2742 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 2743 } 2744 } 2745 2746 sccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2747 if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2748 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 2749 sccb->ccb_h.status |= CAM_DEV_QFRZN; 2750 xpt_freeze_devq(sccb->ccb_h.path, 1); 2751 isp_prt(isp, ISP_LOGDEBUG0, 2752 "freeze devq %d.%d cam sts %x scsi sts %x", 2753 sccb->ccb_h.target_id, sccb->ccb_h.target_lun, 2754 sccb->ccb_h.status, sccb->scsi_status); 2755 } 2756 } 2757 2758 if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) && 2759 (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2760 xpt_print_path(sccb->ccb_h.path); 2761 isp_prt(isp, ISP_LOGINFO, 2762 "cam completion status 0x%x", sccb->ccb_h.status); 2763 } 2764 2765 XS_CMD_S_DONE(sccb); 2766 if (XS_CMD_WDOG_P(sccb) == 0) { 2767 untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch); 2768 if (XS_CMD_GRACE_P(sccb)) { 2769 isp_prt(isp, ISP_LOGDEBUG2, 2770 "finished command on borrowed time"); 2771 } 2772 XS_CMD_S_CLEAR(sccb); 2773 ISPLOCK_2_CAMLOCK(isp); 2774 xpt_done((union ccb *) sccb); 2775 CAMLOCK_2_ISPLOCK(isp); 2776 } 2777 } 2778 2779 int 2780 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg) 2781 { 2782 int bus, rv = 0; 2783 switch (cmd) { 2784 case ISPASYNC_NEW_TGT_PARAMS: 2785 { 2786 #ifdef CAM_NEW_TRAN_CODE 2787 struct ccb_trans_settings_scsi *scsi; 2788 struct ccb_trans_settings_spi *spi; 2789 #endif 2790 int flags, tgt; 2791 sdparam *sdp = isp->isp_param; 2792 struct ccb_trans_settings cts; 2793 struct cam_path *tmppath; 2794 2795 bzero(&cts, sizeof (struct ccb_trans_settings)); 2796 2797 tgt = *((int *)arg); 2798 bus = (tgt >> 16) & 0xffff; 2799 tgt &= 0xffff; 2800 sdp += bus; 2801 ISPLOCK_2_CAMLOCK(isp); 2802 if (xpt_create_path(&tmppath, NULL, 2803 cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim), 2804 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2805 CAMLOCK_2_ISPLOCK(isp); 2806 isp_prt(isp, ISP_LOGWARN, 2807 "isp_async cannot make temp path for %d.%d", 2808 tgt, bus); 2809 rv = -1; 2810 break; 2811 } 2812 CAMLOCK_2_ISPLOCK(isp); 2813 flags = sdp->isp_devparam[tgt].actv_flags; 2814 #ifdef CAM_NEW_TRAN_CODE 2815 cts.type = CTS_TYPE_CURRENT_SETTINGS; 2816 cts.protocol = PROTO_SCSI; 2817 cts.transport = XPORT_SPI; 2818 2819 scsi = &cts.proto_specific.scsi; 2820 spi = &cts.xport_specific.spi; 2821 2822 if (flags & DPARM_TQING) { 2823 scsi->valid |= CTS_SCSI_VALID_TQ; 2824 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 2825 spi->flags |= CTS_SPI_FLAGS_TAG_ENB; 2826 } 2827 2828 if (flags & DPARM_DISC) { 2829 spi->valid |= CTS_SPI_VALID_DISC; 2830 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 2831 } 2832 spi->flags |= CTS_SPI_VALID_BUS_WIDTH; 2833 if (flags & DPARM_WIDE) { 2834 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2835 } else { 2836 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2837 } 2838 if (flags & DPARM_SYNC) { 2839 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 2840 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 2841 spi->sync_period = sdp->isp_devparam[tgt].actv_period; 2842 spi->sync_offset = sdp->isp_devparam[tgt].actv_offset; 2843 } 2844 #else 2845 cts.flags = CCB_TRANS_CURRENT_SETTINGS; 2846 cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2847 if (flags & DPARM_DISC) { 2848 cts.flags |= CCB_TRANS_DISC_ENB; 2849 } 2850 if (flags & DPARM_TQING) { 2851 cts.flags |= CCB_TRANS_TAG_ENB; 2852 } 2853 cts.valid |= CCB_TRANS_BUS_WIDTH_VALID; 2854 cts.bus_width = (flags & DPARM_WIDE)? 2855 MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT; 2856 cts.sync_period = sdp->isp_devparam[tgt].actv_period; 2857 cts.sync_offset = sdp->isp_devparam[tgt].actv_offset; 2858 if (flags & DPARM_SYNC) { 2859 cts.valid |= 2860 CCB_TRANS_SYNC_RATE_VALID | 2861 CCB_TRANS_SYNC_OFFSET_VALID; 2862 } 2863 #endif 2864 isp_prt(isp, ISP_LOGDEBUG2, 2865 "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x", 2866 bus, tgt, sdp->isp_devparam[tgt].actv_period, 2867 sdp->isp_devparam[tgt].actv_offset, flags); 2868 xpt_setup_ccb(&cts.ccb_h, tmppath, 1); 2869 ISPLOCK_2_CAMLOCK(isp); 2870 xpt_async(AC_TRANSFER_NEG, tmppath, &cts); 2871 xpt_free_path(tmppath); 2872 CAMLOCK_2_ISPLOCK(isp); 2873 break; 2874 } 2875 case ISPASYNC_BUS_RESET: 2876 bus = *((int *)arg); 2877 isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected", 2878 bus); 2879 if (bus > 0 && isp->isp_path2) { 2880 ISPLOCK_2_CAMLOCK(isp); 2881 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 2882 CAMLOCK_2_ISPLOCK(isp); 2883 } else if (isp->isp_path) { 2884 ISPLOCK_2_CAMLOCK(isp); 2885 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 2886 CAMLOCK_2_ISPLOCK(isp); 2887 } 2888 break; 2889 case ISPASYNC_LIP: 2890 if (isp->isp_path) { 2891 isp_freeze_loopdown(isp, "ISPASYNC_LIP"); 2892 } 2893 isp_prt(isp, ISP_LOGINFO, "LIP Received"); 2894 break; 2895 case ISPASYNC_LOOP_RESET: 2896 if (isp->isp_path) { 2897 isp_freeze_loopdown(isp, "ISPASYNC_LOOP_RESET"); 2898 } 2899 isp_prt(isp, ISP_LOGINFO, "Loop Reset Received"); 2900 break; 2901 case ISPASYNC_LOOP_DOWN: 2902 if (isp->isp_path) { 2903 isp_freeze_loopdown(isp, "ISPASYNC_LOOP_DOWN"); 2904 } 2905 isp_prt(isp, ISP_LOGINFO, "Loop DOWN"); 2906 break; 2907 case ISPASYNC_LOOP_UP: 2908 /* 2909 * Now we just note that Loop has come up. We don't 2910 * actually do anything because we're waiting for a 2911 * Change Notify before activating the FC cleanup 2912 * thread to look at the state of the loop again. 2913 */ 2914 isp_prt(isp, ISP_LOGINFO, "Loop UP"); 2915 break; 2916 case ISPASYNC_PROMENADE: 2917 { 2918 struct cam_path *tmppath; 2919 const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x " 2920 "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x"; 2921 static const char *roles[4] = { 2922 "(none)", "Target", "Initiator", "Target/Initiator" 2923 }; 2924 fcparam *fcp = isp->isp_param; 2925 int tgt = *((int *) arg); 2926 int is_tgt_mask = (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT); 2927 struct lportdb *lp = &fcp->portdb[tgt]; 2928 2929 isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid, 2930 roles[lp->roles & 0x3], 2931 (lp->valid)? "Arrived" : "Departed", 2932 (u_int32_t) (lp->port_wwn >> 32), 2933 (u_int32_t) (lp->port_wwn & 0xffffffffLL), 2934 (u_int32_t) (lp->node_wwn >> 32), 2935 (u_int32_t) (lp->node_wwn & 0xffffffffLL)); 2936 2937 ISPLOCK_2_CAMLOCK(isp); 2938 if (xpt_create_path(&tmppath, NULL, cam_sim_path(isp->isp_sim), 2939 (target_id_t)tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2940 CAMLOCK_2_ISPLOCK(isp); 2941 break; 2942 } 2943 /* 2944 * Policy: only announce targets. 2945 */ 2946 if (lp->roles & is_tgt_mask) { 2947 if (lp->valid) { 2948 xpt_async(AC_FOUND_DEVICE, tmppath, NULL); 2949 } else { 2950 xpt_async(AC_LOST_DEVICE, tmppath, NULL); 2951 } 2952 } 2953 xpt_free_path(tmppath); 2954 CAMLOCK_2_ISPLOCK(isp); 2955 break; 2956 } 2957 case ISPASYNC_CHANGE_NOTIFY: 2958 if (arg == ISPASYNC_CHANGE_PDB) { 2959 isp_prt(isp, ISP_LOGINFO, 2960 "Port Database Changed"); 2961 } else if (arg == ISPASYNC_CHANGE_SNS) { 2962 isp_prt(isp, ISP_LOGINFO, 2963 "Name Server Database Changed"); 2964 } 2965 #ifdef ISP_SMPLOCK 2966 cv_signal(&isp->isp_osinfo.kthread_cv); 2967 #else 2968 wakeup(&isp->isp_osinfo.kthread_cv); 2969 #endif 2970 break; 2971 case ISPASYNC_FABRIC_DEV: 2972 { 2973 int target, base, lim; 2974 fcparam *fcp = isp->isp_param; 2975 struct lportdb *lp = NULL; 2976 struct lportdb *clp = (struct lportdb *) arg; 2977 char *pt; 2978 2979 switch (clp->port_type) { 2980 case 1: 2981 pt = " N_Port"; 2982 break; 2983 case 2: 2984 pt = " NL_Port"; 2985 break; 2986 case 3: 2987 pt = "F/NL_Port"; 2988 break; 2989 case 0x7f: 2990 pt = " Nx_Port"; 2991 break; 2992 case 0x81: 2993 pt = " F_port"; 2994 break; 2995 case 0x82: 2996 pt = " FL_Port"; 2997 break; 2998 case 0x84: 2999 pt = " E_port"; 3000 break; 3001 default: 3002 pt = " "; 3003 break; 3004 } 3005 3006 isp_prt(isp, ISP_LOGINFO, 3007 "%s Fabric Device @ PortID 0x%x", pt, clp->portid); 3008 3009 /* 3010 * If we don't have an initiator role we bail. 3011 * 3012 * We just use ISPASYNC_FABRIC_DEV for announcement purposes. 3013 */ 3014 3015 if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) { 3016 break; 3017 } 3018 3019 /* 3020 * Is this entry for us? If so, we bail. 3021 */ 3022 3023 if (fcp->isp_portid == clp->portid) { 3024 break; 3025 } 3026 3027 /* 3028 * Else, the default policy is to find room for it in 3029 * our local port database. Later, when we execute 3030 * the call to isp_pdb_sync either this newly arrived 3031 * or already logged in device will be (re)announced. 3032 */ 3033 3034 if (fcp->isp_topo == TOPO_FL_PORT) 3035 base = FC_SNS_ID+1; 3036 else 3037 base = 0; 3038 3039 if (fcp->isp_topo == TOPO_N_PORT) 3040 lim = 1; 3041 else 3042 lim = MAX_FC_TARG; 3043 3044 /* 3045 * Is it already in our list? 3046 */ 3047 for (target = base; target < lim; target++) { 3048 if (target >= FL_PORT_ID && target <= FC_SNS_ID) { 3049 continue; 3050 } 3051 lp = &fcp->portdb[target]; 3052 if (lp->port_wwn == clp->port_wwn && 3053 lp->node_wwn == clp->node_wwn) { 3054 lp->fabric_dev = 1; 3055 break; 3056 } 3057 } 3058 if (target < lim) { 3059 break; 3060 } 3061 for (target = base; target < lim; target++) { 3062 if (target >= FL_PORT_ID && target <= FC_SNS_ID) { 3063 continue; 3064 } 3065 lp = &fcp->portdb[target]; 3066 if (lp->port_wwn == 0) { 3067 break; 3068 } 3069 } 3070 if (target == lim) { 3071 isp_prt(isp, ISP_LOGWARN, 3072 "out of space for fabric devices"); 3073 break; 3074 } 3075 lp->port_type = clp->port_type; 3076 lp->fc4_type = clp->fc4_type; 3077 lp->node_wwn = clp->node_wwn; 3078 lp->port_wwn = clp->port_wwn; 3079 lp->portid = clp->portid; 3080 lp->fabric_dev = 1; 3081 break; 3082 } 3083 #ifdef ISP_TARGET_MODE 3084 case ISPASYNC_TARGET_MESSAGE: 3085 { 3086 tmd_msg_t *mp = arg; 3087 isp_prt(isp, ISP_LOGALL, 3088 "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x", 3089 mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt, 3090 (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval, 3091 mp->nt_msg[0]); 3092 break; 3093 } 3094 case ISPASYNC_TARGET_EVENT: 3095 { 3096 tmd_event_t *ep = arg; 3097 if (ep->ev_event == ASYNC_CTIO_DONE) { 3098 /* 3099 * ACK the interrupt first 3100 */ 3101 ISP_WRITE(isp, BIU_SEMA, 0); 3102 ISP_WRITE(isp, HCCR, HCCR_CMD_CLEAR_RISC_INT); 3103 isp_handle_platform_ctio_fastpost(isp, ep->ev_bus); 3104 break; 3105 } 3106 isp_prt(isp, ISP_LOGALL, 3107 "bus %d event code 0x%x", ep->ev_bus, ep->ev_event); 3108 break; 3109 } 3110 case ISPASYNC_TARGET_ACTION: 3111 switch (((isphdr_t *)arg)->rqs_entry_type) { 3112 default: 3113 isp_prt(isp, ISP_LOGWARN, 3114 "event 0x%x for unhandled target action", 3115 ((isphdr_t *)arg)->rqs_entry_type); 3116 break; 3117 case RQSTYPE_NOTIFY: 3118 if (IS_SCSI(isp)) { 3119 rv = isp_handle_platform_notify_scsi(isp, 3120 (in_entry_t *) arg); 3121 } else { 3122 rv = isp_handle_platform_notify_fc(isp, 3123 (in_fcentry_t *) arg); 3124 } 3125 break; 3126 case RQSTYPE_ATIO: 3127 rv = isp_handle_platform_atio(isp, (at_entry_t *) arg); 3128 break; 3129 case RQSTYPE_ATIO2: 3130 rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg); 3131 break; 3132 case RQSTYPE_CTIO2: 3133 case RQSTYPE_CTIO: 3134 rv = isp_handle_platform_ctio(isp, arg); 3135 break; 3136 case RQSTYPE_ENABLE_LUN: 3137 case RQSTYPE_MODIFY_LUN: 3138 isp_ledone(isp, (lun_entry_t *) arg); 3139 break; 3140 } 3141 break; 3142 #endif 3143 case ISPASYNC_FW_CRASH: 3144 { 3145 u_int16_t mbox1, mbox6; 3146 mbox1 = ISP_READ(isp, OUTMAILBOX1); 3147 if (IS_DUALBUS(isp)) { 3148 mbox6 = ISP_READ(isp, OUTMAILBOX6); 3149 } else { 3150 mbox6 = 0; 3151 } 3152 isp_prt(isp, ISP_LOGERR, 3153 "Internal Firmware Error on bus %d @ RISC Address 0x%x", 3154 mbox6, mbox1); 3155 #ifdef ISP_FW_CRASH_DUMP 3156 /* 3157 * XXX: really need a thread to do this right. 3158 */ 3159 if (IS_FC(isp)) { 3160 FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT; 3161 FCPARAM(isp)->isp_loopstate = LOOP_NIL; 3162 isp_freeze_loopdown(isp, "f/w crash"); 3163 isp_fw_dump(isp); 3164 } 3165 isp_reinit(isp); 3166 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL); 3167 #endif 3168 break; 3169 } 3170 case ISPASYNC_UNHANDLED_RESPONSE: 3171 break; 3172 default: 3173 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd); 3174 break; 3175 } 3176 return (rv); 3177 } 3178 3179 3180 /* 3181 * Locks are held before coming here. 3182 */ 3183 void 3184 isp_uninit(struct ispsoftc *isp) 3185 { 3186 ISP_WRITE(isp, HCCR, HCCR_CMD_RESET); 3187 DISABLE_INTS(isp); 3188 } 3189 3190 void 3191 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...) 3192 { 3193 va_list ap; 3194 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { 3195 return; 3196 } 3197 printf("%s: ", device_get_nameunit(isp->isp_dev)); 3198 va_start(ap, fmt); 3199 vprintf(fmt, ap); 3200 va_end(ap); 3201 printf("\n"); 3202 } 3203