1 /* 2 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters. 3 * 4 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice immediately at the beginning of the file, without modification, 11 * this list of conditions, and the following disclaimer. 12 * 2. The name of the author may not be used to endorse or promote products 13 * derived from this software without specific prior written permission. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <dev/isp/isp_freebsd.h> 32 #include <sys/unistd.h> 33 #include <sys/kthread.h> 34 #include <machine/stdarg.h> /* for use by isp_prt below */ 35 #include <sys/conf.h> 36 #include <sys/module.h> 37 #include <sys/ioccom.h> 38 #include <dev/isp/isp_ioctl.h> 39 40 41 MODULE_VERSION(isp, 1); 42 MODULE_DEPEND(isp, cam, 1, 1, 1); 43 int isp_announced = 0; 44 ispfwfunc *isp_get_firmware_p = NULL; 45 46 static d_ioctl_t ispioctl; 47 static void isp_intr_enable(void *); 48 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *); 49 static void isp_poll(struct cam_sim *); 50 static timeout_t isp_watchdog; 51 static void isp_kthread(void *); 52 static void isp_action(struct cam_sim *, union ccb *); 53 54 55 #define ISP_CDEV_MAJOR 248 56 static struct cdevsw isp_cdevsw = { 57 .d_open = nullopen, 58 .d_close = nullclose, 59 .d_ioctl = ispioctl, 60 .d_name = "isp", 61 .d_maj = ISP_CDEV_MAJOR, 62 .d_flags = D_TAPE, 63 }; 64 65 static struct ispsoftc *isplist = NULL; 66 67 void 68 isp_attach(struct ispsoftc *isp) 69 { 70 int primary, secondary; 71 struct ccb_setasync csa; 72 struct cam_devq *devq; 73 struct cam_sim *sim; 74 struct cam_path *path; 75 76 /* 77 * Establish (in case of 12X0) which bus is the primary. 78 */ 79 80 primary = 0; 81 secondary = 1; 82 83 /* 84 * Create the device queue for our SIM(s). 85 */ 86 devq = cam_simq_alloc(isp->isp_maxcmds); 87 if (devq == NULL) { 88 return; 89 } 90 91 /* 92 * Construct our SIM entry. 93 */ 94 ISPLOCK_2_CAMLOCK(isp); 95 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 96 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); 97 if (sim == NULL) { 98 cam_simq_free(devq); 99 CAMLOCK_2_ISPLOCK(isp); 100 return; 101 } 102 CAMLOCK_2_ISPLOCK(isp); 103 104 isp->isp_osinfo.ehook.ich_func = isp_intr_enable; 105 isp->isp_osinfo.ehook.ich_arg = isp; 106 ISPLOCK_2_CAMLOCK(isp); 107 if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) { 108 cam_sim_free(sim, TRUE); 109 CAMLOCK_2_ISPLOCK(isp); 110 isp_prt(isp, ISP_LOGERR, 111 "could not establish interrupt enable hook"); 112 return; 113 } 114 115 if (xpt_bus_register(sim, primary) != CAM_SUCCESS) { 116 cam_sim_free(sim, TRUE); 117 CAMLOCK_2_ISPLOCK(isp); 118 return; 119 } 120 121 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 122 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 123 xpt_bus_deregister(cam_sim_path(sim)); 124 cam_sim_free(sim, TRUE); 125 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 126 CAMLOCK_2_ISPLOCK(isp); 127 return; 128 } 129 130 xpt_setup_ccb(&csa.ccb_h, path, 5); 131 csa.ccb_h.func_code = XPT_SASYNC_CB; 132 csa.event_enable = AC_LOST_DEVICE; 133 csa.callback = isp_cam_async; 134 csa.callback_arg = sim; 135 xpt_action((union ccb *)&csa); 136 CAMLOCK_2_ISPLOCK(isp); 137 isp->isp_sim = sim; 138 isp->isp_path = path; 139 /* 140 * Create a kernel thread for fibre channel instances. We 141 * don't have dual channel FC cards. 142 */ 143 if (IS_FC(isp)) { 144 ISPLOCK_2_CAMLOCK(isp); 145 /* XXX: LOCK VIOLATION */ 146 cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv"); 147 if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc, 148 RFHIGHPID, 0, "%s: fc_thrd", 149 device_get_nameunit(isp->isp_dev))) { 150 xpt_bus_deregister(cam_sim_path(sim)); 151 cam_sim_free(sim, TRUE); 152 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 153 CAMLOCK_2_ISPLOCK(isp); 154 isp_prt(isp, ISP_LOGERR, "could not create kthread"); 155 return; 156 } 157 CAMLOCK_2_ISPLOCK(isp); 158 } 159 160 161 /* 162 * If we have a second channel, construct SIM entry for that. 163 */ 164 if (IS_DUALBUS(isp)) { 165 ISPLOCK_2_CAMLOCK(isp); 166 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 167 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); 168 if (sim == NULL) { 169 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 170 xpt_free_path(isp->isp_path); 171 cam_simq_free(devq); 172 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 173 return; 174 } 175 if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) { 176 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 177 xpt_free_path(isp->isp_path); 178 cam_sim_free(sim, TRUE); 179 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 180 CAMLOCK_2_ISPLOCK(isp); 181 return; 182 } 183 184 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 185 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 186 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 187 xpt_free_path(isp->isp_path); 188 xpt_bus_deregister(cam_sim_path(sim)); 189 cam_sim_free(sim, TRUE); 190 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 191 CAMLOCK_2_ISPLOCK(isp); 192 return; 193 } 194 195 xpt_setup_ccb(&csa.ccb_h, path, 5); 196 csa.ccb_h.func_code = XPT_SASYNC_CB; 197 csa.event_enable = AC_LOST_DEVICE; 198 csa.callback = isp_cam_async; 199 csa.callback_arg = sim; 200 xpt_action((union ccb *)&csa); 201 CAMLOCK_2_ISPLOCK(isp); 202 isp->isp_sim2 = sim; 203 isp->isp_path2 = path; 204 } 205 206 #ifdef ISP_TARGET_MODE 207 cv_init(&isp->isp_osinfo.tgtcv0[0], "isp_tgcv0a"); 208 cv_init(&isp->isp_osinfo.tgtcv0[1], "isp_tgcv0b"); 209 cv_init(&isp->isp_osinfo.tgtcv1[0], "isp_tgcv1a"); 210 cv_init(&isp->isp_osinfo.tgtcv1[1], "isp_tgcv1b"); 211 #endif 212 /* 213 * Create device nodes 214 */ 215 (void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT, 216 GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev)); 217 218 if (isp->isp_role != ISP_ROLE_NONE) { 219 isp->isp_state = ISP_RUNSTATE; 220 ENABLE_INTS(isp); 221 } 222 if (isplist == NULL) { 223 isplist = isp; 224 } else { 225 struct ispsoftc *tmp = isplist; 226 while (tmp->isp_osinfo.next) { 227 tmp = tmp->isp_osinfo.next; 228 } 229 tmp->isp_osinfo.next = isp; 230 } 231 232 } 233 234 static INLINE void 235 isp_freeze_loopdown(struct ispsoftc *isp, char *msg) 236 { 237 if (isp->isp_osinfo.simqfrozen == 0) { 238 isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg); 239 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 240 ISPLOCK_2_CAMLOCK(isp); 241 xpt_freeze_simq(isp->isp_sim, 1); 242 CAMLOCK_2_ISPLOCK(isp); 243 } else { 244 isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg); 245 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 246 } 247 } 248 249 static int 250 ispioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 251 { 252 struct ispsoftc *isp; 253 int retval = ENOTTY; 254 255 isp = isplist; 256 while (isp) { 257 if (minor(dev) == device_get_unit(isp->isp_dev)) { 258 break; 259 } 260 isp = isp->isp_osinfo.next; 261 } 262 if (isp == NULL) 263 return (ENXIO); 264 265 switch (cmd) { 266 #ifdef ISP_FW_CRASH_DUMP 267 case ISP_GET_FW_CRASH_DUMP: 268 { 269 u_int16_t *ptr = FCPARAM(isp)->isp_dump_data; 270 size_t sz; 271 272 retval = 0; 273 if (IS_2200(isp)) 274 sz = QLA2200_RISC_IMAGE_DUMP_SIZE; 275 else 276 sz = QLA2300_RISC_IMAGE_DUMP_SIZE; 277 ISP_LOCK(isp); 278 if (ptr && *ptr) { 279 void *uaddr = *((void **) addr); 280 if (copyout(ptr, uaddr, sz)) { 281 retval = EFAULT; 282 } else { 283 *ptr = 0; 284 } 285 } else { 286 retval = ENXIO; 287 } 288 ISP_UNLOCK(isp); 289 break; 290 } 291 292 case ISP_FORCE_CRASH_DUMP: 293 ISP_LOCK(isp); 294 isp_freeze_loopdown(isp, "ispioctl(ISP_FORCE_CRASH_DUMP)"); 295 isp_fw_dump(isp); 296 isp_reinit(isp); 297 ISP_UNLOCK(isp); 298 retval = 0; 299 break; 300 #endif 301 case ISP_SDBLEV: 302 { 303 int olddblev = isp->isp_dblev; 304 isp->isp_dblev = *(int *)addr; 305 *(int *)addr = olddblev; 306 retval = 0; 307 break; 308 } 309 case ISP_RESETHBA: 310 ISP_LOCK(isp); 311 isp_reinit(isp); 312 ISP_UNLOCK(isp); 313 retval = 0; 314 break; 315 case ISP_RESCAN: 316 if (IS_FC(isp)) { 317 ISP_LOCK(isp); 318 if (isp_fc_runstate(isp, 5 * 1000000)) { 319 retval = EIO; 320 } else { 321 retval = 0; 322 } 323 ISP_UNLOCK(isp); 324 } 325 break; 326 case ISP_FC_LIP: 327 if (IS_FC(isp)) { 328 ISP_LOCK(isp); 329 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) { 330 retval = EIO; 331 } else { 332 retval = 0; 333 } 334 ISP_UNLOCK(isp); 335 } 336 break; 337 case ISP_FC_GETDINFO: 338 { 339 struct isp_fc_device *ifc = (struct isp_fc_device *) addr; 340 struct lportdb *lp; 341 342 if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) { 343 retval = EINVAL; 344 break; 345 } 346 ISP_LOCK(isp); 347 lp = &FCPARAM(isp)->portdb[ifc->loopid]; 348 if (lp->valid) { 349 ifc->loopid = lp->loopid; 350 ifc->portid = lp->portid; 351 ifc->node_wwn = lp->node_wwn; 352 ifc->port_wwn = lp->port_wwn; 353 retval = 0; 354 } else { 355 retval = ENODEV; 356 } 357 ISP_UNLOCK(isp); 358 break; 359 } 360 case ISP_GET_STATS: 361 { 362 isp_stats_t *sp = (isp_stats_t *) addr; 363 364 MEMZERO(sp, sizeof (*sp)); 365 sp->isp_stat_version = ISP_STATS_VERSION; 366 sp->isp_type = isp->isp_type; 367 sp->isp_revision = isp->isp_revision; 368 ISP_LOCK(isp); 369 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt; 370 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus; 371 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc; 372 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync; 373 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt; 374 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt; 375 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater; 376 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater; 377 ISP_UNLOCK(isp); 378 retval = 0; 379 break; 380 } 381 case ISP_CLR_STATS: 382 ISP_LOCK(isp); 383 isp->isp_intcnt = 0; 384 isp->isp_intbogus = 0; 385 isp->isp_intmboxc = 0; 386 isp->isp_intoasync = 0; 387 isp->isp_rsltccmplt = 0; 388 isp->isp_fphccmplt = 0; 389 isp->isp_rscchiwater = 0; 390 isp->isp_fpcchiwater = 0; 391 ISP_UNLOCK(isp); 392 retval = 0; 393 break; 394 case ISP_FC_GETHINFO: 395 { 396 struct isp_hba_device *hba = (struct isp_hba_device *) addr; 397 MEMZERO(hba, sizeof (*hba)); 398 ISP_LOCK(isp); 399 hba->fc_speed = FCPARAM(isp)->isp_gbspeed; 400 hba->fc_scsi_supported = 1; 401 hba->fc_topology = FCPARAM(isp)->isp_topo + 1; 402 hba->fc_loopid = FCPARAM(isp)->isp_loopid; 403 hba->nvram_node_wwn = FCPARAM(isp)->isp_nodewwn; 404 hba->nvram_port_wwn = FCPARAM(isp)->isp_portwwn; 405 hba->active_node_wwn = ISP_NODEWWN(isp); 406 hba->active_port_wwn = ISP_PORTWWN(isp); 407 ISP_UNLOCK(isp); 408 retval = 0; 409 break; 410 } 411 case ISP_GET_FC_PARAM: 412 { 413 struct isp_fc_param *f = (struct isp_fc_param *) addr; 414 415 if (!IS_FC(isp)) { 416 retval = EINVAL; 417 break; 418 } 419 f->parameter = 0; 420 if (strcmp(f->param_name, "framelength") == 0) { 421 f->parameter = FCPARAM(isp)->isp_maxfrmlen; 422 retval = 0; 423 break; 424 } 425 if (strcmp(f->param_name, "exec_throttle") == 0) { 426 f->parameter = FCPARAM(isp)->isp_execthrottle; 427 retval = 0; 428 break; 429 } 430 if (strcmp(f->param_name, "fullduplex") == 0) { 431 if (FCPARAM(isp)->isp_fwoptions & ICBOPT_FULL_DUPLEX) 432 f->parameter = 1; 433 retval = 0; 434 break; 435 } 436 if (strcmp(f->param_name, "loopid") == 0) { 437 f->parameter = FCPARAM(isp)->isp_loopid; 438 retval = 0; 439 break; 440 } 441 retval = EINVAL; 442 break; 443 } 444 case ISP_SET_FC_PARAM: 445 { 446 struct isp_fc_param *f = (struct isp_fc_param *) addr; 447 u_int32_t param = f->parameter; 448 449 if (!IS_FC(isp)) { 450 retval = EINVAL; 451 break; 452 } 453 f->parameter = 0; 454 if (strcmp(f->param_name, "framelength") == 0) { 455 if (param != 512 && param != 1024 && param != 1024) { 456 retval = EINVAL; 457 break; 458 } 459 FCPARAM(isp)->isp_maxfrmlen = param; 460 retval = 0; 461 break; 462 } 463 if (strcmp(f->param_name, "exec_throttle") == 0) { 464 if (param < 16 || param > 255) { 465 retval = EINVAL; 466 break; 467 } 468 FCPARAM(isp)->isp_execthrottle = param; 469 retval = 0; 470 break; 471 } 472 if (strcmp(f->param_name, "fullduplex") == 0) { 473 if (param != 0 && param != 1) { 474 retval = EINVAL; 475 break; 476 } 477 if (param) { 478 FCPARAM(isp)->isp_fwoptions |= 479 ICBOPT_FULL_DUPLEX; 480 } else { 481 FCPARAM(isp)->isp_fwoptions &= 482 ~ICBOPT_FULL_DUPLEX; 483 } 484 retval = 0; 485 break; 486 } 487 if (strcmp(f->param_name, "loopid") == 0) { 488 if (param < 0 || param > 125) { 489 retval = EINVAL; 490 break; 491 } 492 FCPARAM(isp)->isp_loopid = param; 493 retval = 0; 494 break; 495 } 496 retval = EINVAL; 497 break; 498 } 499 default: 500 break; 501 } 502 return (retval); 503 } 504 505 static void 506 isp_intr_enable(void *arg) 507 { 508 struct ispsoftc *isp = arg; 509 if (isp->isp_role != ISP_ROLE_NONE) { 510 ENABLE_INTS(isp); 511 isp->isp_osinfo.intsok = 1; 512 } 513 /* Release our hook so that the boot can continue. */ 514 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 515 } 516 517 /* 518 * Put the target mode functions here, because some are inlines 519 */ 520 521 #ifdef ISP_TARGET_MODE 522 523 static INLINE int is_lun_enabled(struct ispsoftc *, int, lun_id_t); 524 static INLINE int are_any_luns_enabled(struct ispsoftc *, int); 525 static INLINE tstate_t *get_lun_statep(struct ispsoftc *, int, lun_id_t); 526 static INLINE void rls_lun_statep(struct ispsoftc *, tstate_t *); 527 static INLINE int isp_psema_sig_rqe(struct ispsoftc *, int); 528 static INLINE int isp_cv_wait_timed_rqe(struct ispsoftc *, int, int); 529 static INLINE void isp_cv_signal_rqe(struct ispsoftc *, int, int); 530 static INLINE void isp_vsema_rqe(struct ispsoftc *, int); 531 static INLINE atio_private_data_t *isp_get_atpd(struct ispsoftc *, int); 532 static cam_status 533 create_lun_state(struct ispsoftc *, int, struct cam_path *, tstate_t **); 534 static void destroy_lun_state(struct ispsoftc *, tstate_t *); 535 static void isp_en_lun(struct ispsoftc *, union ccb *); 536 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *); 537 static timeout_t isp_refire_putback_atio; 538 static void isp_complete_ctio(union ccb *); 539 static void isp_target_putback_atio(union ccb *); 540 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *); 541 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *); 542 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *); 543 static int isp_handle_platform_ctio(struct ispsoftc *, void *); 544 static int isp_handle_platform_notify_scsi(struct ispsoftc *, in_entry_t *); 545 static int isp_handle_platform_notify_fc(struct ispsoftc *, in_fcentry_t *); 546 547 static INLINE int 548 is_lun_enabled(struct ispsoftc *isp, int bus, lun_id_t lun) 549 { 550 tstate_t *tptr; 551 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 552 if (tptr == NULL) { 553 return (0); 554 } 555 do { 556 if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) { 557 return (1); 558 } 559 } while ((tptr = tptr->next) != NULL); 560 return (0); 561 } 562 563 static INLINE int 564 are_any_luns_enabled(struct ispsoftc *isp, int port) 565 { 566 int lo, hi; 567 if (IS_DUALBUS(isp)) { 568 lo = (port * (LUN_HASH_SIZE >> 1)); 569 hi = lo + (LUN_HASH_SIZE >> 1); 570 } else { 571 lo = 0; 572 hi = LUN_HASH_SIZE; 573 } 574 for (lo = 0; lo < hi; lo++) { 575 if (isp->isp_osinfo.lun_hash[lo]) { 576 return (1); 577 } 578 } 579 return (0); 580 } 581 582 static INLINE tstate_t * 583 get_lun_statep(struct ispsoftc *isp, int bus, lun_id_t lun) 584 { 585 tstate_t *tptr = NULL; 586 587 if (lun == CAM_LUN_WILDCARD) { 588 if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) { 589 tptr = &isp->isp_osinfo.tsdflt[bus]; 590 tptr->hold++; 591 return (tptr); 592 } 593 } else { 594 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 595 if (tptr == NULL) { 596 return (NULL); 597 } 598 } 599 600 do { 601 if (tptr->lun == lun && tptr->bus == bus) { 602 tptr->hold++; 603 return (tptr); 604 } 605 } while ((tptr = tptr->next) != NULL); 606 return (tptr); 607 } 608 609 static INLINE void 610 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr) 611 { 612 if (tptr->hold) 613 tptr->hold--; 614 } 615 616 static INLINE int 617 isp_psema_sig_rqe(struct ispsoftc *isp, int bus) 618 { 619 while (isp->isp_osinfo.tmflags[bus] & TM_BUSY) { 620 isp->isp_osinfo.tmflags[bus] |= TM_WANTED; 621 #ifdef ISP_SMPLOCK 622 if (cv_wait_sig(&isp->isp_osinfo.tgtcv0[bus], &isp->isp_lock)) { 623 return (-1); 624 } 625 #else 626 if (tsleep(&isp->isp_osinfo.tgtcv0[bus], PZERO, "cv_isp", 0)) { 627 return (-1); 628 } 629 #endif 630 isp->isp_osinfo.tmflags[bus] |= TM_BUSY; 631 } 632 return (0); 633 } 634 635 static INLINE int 636 isp_cv_wait_timed_rqe(struct ispsoftc *isp, int bus, int timo) 637 { 638 #ifdef ISP_SMPLOCK 639 if (cv_timedwait(&isp->isp_osinfo.tgtcv1[bus], &isp->isp_lock, timo)) { 640 return (-1); 641 } 642 #else 643 if (tsleep(&isp->isp_osinfo.tgtcv1[bus], PZERO, "cv_isp1", 0)) { 644 return (-1); 645 } 646 #endif 647 return (0); 648 } 649 650 static INLINE void 651 isp_cv_signal_rqe(struct ispsoftc *isp, int bus, int status) 652 { 653 isp->isp_osinfo.rstatus[bus] = status; 654 #ifdef ISP_SMPLOCK 655 cv_signal(&isp->isp_osinfo.tgtcv1[bus]); 656 #else 657 wakeup(&isp->isp_osinfo.tgtcv1[bus]); 658 #endif 659 } 660 661 static INLINE void 662 isp_vsema_rqe(struct ispsoftc *isp, int bus) 663 { 664 if (isp->isp_osinfo.tmflags[bus] & TM_WANTED) { 665 isp->isp_osinfo.tmflags[bus] &= ~TM_WANTED; 666 #ifdef ISP_SMPLOCK 667 cv_signal(&isp->isp_osinfo.tgtcv0[bus]); 668 #else 669 cv_signal(&isp->isp_osinfo.tgtcv0[bus]); 670 #endif 671 } 672 isp->isp_osinfo.tmflags[bus] &= ~TM_BUSY; 673 } 674 675 static INLINE atio_private_data_t * 676 isp_get_atpd(struct ispsoftc *isp, int tag) 677 { 678 atio_private_data_t *atp; 679 for (atp = isp->isp_osinfo.atpdp; 680 atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) { 681 if (atp->tag == tag) 682 return (atp); 683 } 684 return (NULL); 685 } 686 687 static cam_status 688 create_lun_state(struct ispsoftc *isp, int bus, 689 struct cam_path *path, tstate_t **rslt) 690 { 691 cam_status status; 692 lun_id_t lun; 693 int hfx; 694 tstate_t *tptr, *new; 695 696 lun = xpt_path_lun_id(path); 697 if (lun < 0) { 698 return (CAM_LUN_INVALID); 699 } 700 if (is_lun_enabled(isp, bus, lun)) { 701 return (CAM_LUN_ALRDY_ENA); 702 } 703 new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO); 704 if (new == NULL) { 705 return (CAM_RESRC_UNAVAIL); 706 } 707 708 status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path), 709 xpt_path_target_id(path), xpt_path_lun_id(path)); 710 if (status != CAM_REQ_CMP) { 711 free(new, M_DEVBUF); 712 return (status); 713 } 714 new->bus = bus; 715 new->lun = lun; 716 SLIST_INIT(&new->atios); 717 SLIST_INIT(&new->inots); 718 new->hold = 1; 719 720 hfx = LUN_HASH_FUNC(isp, new->bus, new->lun); 721 tptr = isp->isp_osinfo.lun_hash[hfx]; 722 if (tptr == NULL) { 723 isp->isp_osinfo.lun_hash[hfx] = new; 724 } else { 725 while (tptr->next) 726 tptr = tptr->next; 727 tptr->next = new; 728 } 729 *rslt = new; 730 return (CAM_REQ_CMP); 731 } 732 733 static INLINE void 734 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr) 735 { 736 int hfx; 737 tstate_t *lw, *pw; 738 739 hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun); 740 if (tptr->hold) { 741 return; 742 } 743 pw = isp->isp_osinfo.lun_hash[hfx]; 744 if (pw == NULL) { 745 return; 746 } else if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 747 isp->isp_osinfo.lun_hash[hfx] = pw->next; 748 } else { 749 lw = pw; 750 pw = lw->next; 751 while (pw) { 752 if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 753 lw->next = pw->next; 754 break; 755 } 756 lw = pw; 757 pw = pw->next; 758 } 759 if (pw == NULL) { 760 return; 761 } 762 } 763 free(tptr, M_DEVBUF); 764 } 765 766 /* 767 * we enter with our locks held. 768 */ 769 static void 770 isp_en_lun(struct ispsoftc *isp, union ccb *ccb) 771 { 772 const char lfmt[] = "Lun now %sabled for target mode on channel %d"; 773 struct ccb_en_lun *cel = &ccb->cel; 774 tstate_t *tptr; 775 u_int16_t rstat; 776 int bus, cmd, av, wildcard; 777 lun_id_t lun; 778 target_id_t tgt; 779 780 781 bus = XS_CHANNEL(ccb) & 0x1; 782 tgt = ccb->ccb_h.target_id; 783 lun = ccb->ccb_h.target_lun; 784 785 /* 786 * Do some sanity checking first. 787 */ 788 789 if ((lun != CAM_LUN_WILDCARD) && 790 (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) { 791 ccb->ccb_h.status = CAM_LUN_INVALID; 792 return; 793 } 794 795 if (IS_SCSI(isp)) { 796 sdparam *sdp = isp->isp_param; 797 sdp += bus; 798 if (tgt != CAM_TARGET_WILDCARD && 799 tgt != sdp->isp_initiator_id) { 800 ccb->ccb_h.status = CAM_TID_INVALID; 801 return; 802 } 803 } else { 804 if (tgt != CAM_TARGET_WILDCARD && 805 tgt != FCPARAM(isp)->isp_iid) { 806 ccb->ccb_h.status = CAM_TID_INVALID; 807 return; 808 } 809 /* 810 * This is as a good a place as any to check f/w capabilities. 811 */ 812 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_TMODE) == 0) { 813 isp_prt(isp, ISP_LOGERR, 814 "firmware does not support target mode"); 815 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 816 return; 817 } 818 /* 819 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to 820 * XXX: dorks with our already fragile enable/disable code. 821 */ 822 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) { 823 isp_prt(isp, ISP_LOGERR, 824 "firmware not SCCLUN capable"); 825 } 826 } 827 828 if (tgt == CAM_TARGET_WILDCARD) { 829 if (lun == CAM_LUN_WILDCARD) { 830 wildcard = 1; 831 } else { 832 ccb->ccb_h.status = CAM_LUN_INVALID; 833 return; 834 } 835 } else { 836 wildcard = 0; 837 } 838 839 /* 840 * Next check to see whether this is a target/lun wildcard action. 841 * 842 * If so, we know that we can accept commands for luns that haven't 843 * been enabled yet and send them upstream. Otherwise, we have to 844 * handle them locally (if we see them at all). 845 */ 846 847 if (wildcard) { 848 tptr = &isp->isp_osinfo.tsdflt[bus]; 849 if (cel->enable) { 850 if (isp->isp_osinfo.tmflags[bus] & 851 TM_WILDCARD_ENABLED) { 852 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 853 return; 854 } 855 ccb->ccb_h.status = 856 xpt_create_path(&tptr->owner, NULL, 857 xpt_path_path_id(ccb->ccb_h.path), 858 xpt_path_target_id(ccb->ccb_h.path), 859 xpt_path_lun_id(ccb->ccb_h.path)); 860 if (ccb->ccb_h.status != CAM_REQ_CMP) { 861 return; 862 } 863 SLIST_INIT(&tptr->atios); 864 SLIST_INIT(&tptr->inots); 865 isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED; 866 } else { 867 if ((isp->isp_osinfo.tmflags[bus] & 868 TM_WILDCARD_ENABLED) == 0) { 869 ccb->ccb_h.status = CAM_REQ_CMP; 870 return; 871 } 872 if (tptr->hold) { 873 ccb->ccb_h.status = CAM_SCSI_BUSY; 874 return; 875 } 876 xpt_free_path(tptr->owner); 877 isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED; 878 } 879 } 880 881 /* 882 * Now check to see whether this bus needs to be 883 * enabled/disabled with respect to target mode. 884 */ 885 av = bus << 31; 886 if (cel->enable && !(isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED)) { 887 av |= ENABLE_TARGET_FLAG; 888 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 889 if (av) { 890 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 891 if (wildcard) { 892 isp->isp_osinfo.tmflags[bus] &= 893 ~TM_WILDCARD_ENABLED; 894 xpt_free_path(tptr->owner); 895 } 896 return; 897 } 898 isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED; 899 isp_prt(isp, ISP_LOGINFO, 900 "Target Mode enabled on channel %d", bus); 901 } else if (cel->enable == 0 && 902 (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) && wildcard) { 903 if (are_any_luns_enabled(isp, bus)) { 904 ccb->ccb_h.status = CAM_SCSI_BUSY; 905 return; 906 } 907 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 908 if (av) { 909 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 910 return; 911 } 912 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; 913 isp_prt(isp, ISP_LOGINFO, 914 "Target Mode disabled on channel %d", bus); 915 } 916 917 if (wildcard) { 918 ccb->ccb_h.status = CAM_REQ_CMP; 919 return; 920 } 921 922 if (cel->enable) { 923 ccb->ccb_h.status = 924 create_lun_state(isp, bus, ccb->ccb_h.path, &tptr); 925 if (ccb->ccb_h.status != CAM_REQ_CMP) { 926 return; 927 } 928 } else { 929 tptr = get_lun_statep(isp, bus, lun); 930 if (tptr == NULL) { 931 ccb->ccb_h.status = CAM_LUN_INVALID; 932 return; 933 } 934 } 935 936 if (isp_psema_sig_rqe(isp, bus)) { 937 rls_lun_statep(isp, tptr); 938 if (cel->enable) 939 destroy_lun_state(isp, tptr); 940 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 941 return; 942 } 943 944 if (cel->enable) { 945 u_int32_t seq = isp->isp_osinfo.rollinfo++; 946 int c, n, ulun = lun; 947 948 cmd = RQSTYPE_ENABLE_LUN; 949 c = DFLT_CMND_CNT; 950 n = DFLT_INOT_CNT; 951 if (IS_FC(isp) && lun != 0) { 952 cmd = RQSTYPE_MODIFY_LUN; 953 n = 0; 954 /* 955 * For SCC firmware, we only deal with setting 956 * (enabling or modifying) lun 0. 957 */ 958 ulun = 0; 959 } 960 rstat = LUN_ERR; 961 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) { 962 xpt_print_path(ccb->ccb_h.path); 963 isp_prt(isp, ISP_LOGWARN, "isp_lun_cmd failed"); 964 goto out; 965 } 966 if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) { 967 xpt_print_path(ccb->ccb_h.path); 968 isp_prt(isp, ISP_LOGERR, 969 "wait for ENABLE/MODIFY LUN timed out"); 970 goto out; 971 } 972 rstat = isp->isp_osinfo.rstatus[bus]; 973 if (rstat != LUN_OK) { 974 xpt_print_path(ccb->ccb_h.path); 975 isp_prt(isp, ISP_LOGERR, 976 "ENABLE/MODIFY LUN returned 0x%x", rstat); 977 goto out; 978 } 979 } else { 980 int c, n, ulun = lun; 981 u_int32_t seq; 982 983 rstat = LUN_ERR; 984 seq = isp->isp_osinfo.rollinfo++; 985 cmd = -RQSTYPE_MODIFY_LUN; 986 987 c = DFLT_CMND_CNT; 988 n = DFLT_INOT_CNT; 989 if (IS_FC(isp) && lun != 0) { 990 n = 0; 991 /* 992 * For SCC firmware, we only deal with setting 993 * (enabling or modifying) lun 0. 994 */ 995 ulun = 0; 996 } 997 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) { 998 xpt_print_path(ccb->ccb_h.path); 999 isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed"); 1000 goto out; 1001 } 1002 if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) { 1003 xpt_print_path(ccb->ccb_h.path); 1004 isp_prt(isp, ISP_LOGERR, 1005 "wait for MODIFY LUN timed out"); 1006 goto out; 1007 } 1008 rstat = isp->isp_osinfo.rstatus[bus]; 1009 if (rstat != LUN_OK) { 1010 xpt_print_path(ccb->ccb_h.path); 1011 isp_prt(isp, ISP_LOGERR, 1012 "MODIFY LUN returned 0x%x", rstat); 1013 goto out; 1014 } 1015 if (IS_FC(isp) && lun) { 1016 goto out; 1017 } 1018 1019 seq = isp->isp_osinfo.rollinfo++; 1020 1021 rstat = LUN_ERR; 1022 cmd = -RQSTYPE_ENABLE_LUN; 1023 if (isp_lun_cmd(isp, cmd, bus, tgt, lun, 0, 0, seq)) { 1024 xpt_print_path(ccb->ccb_h.path); 1025 isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed"); 1026 goto out; 1027 } 1028 if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) { 1029 xpt_print_path(ccb->ccb_h.path); 1030 isp_prt(isp, ISP_LOGERR, 1031 "wait for DISABLE LUN timed out"); 1032 goto out; 1033 } 1034 rstat = isp->isp_osinfo.rstatus[bus]; 1035 if (rstat != LUN_OK) { 1036 xpt_print_path(ccb->ccb_h.path); 1037 isp_prt(isp, ISP_LOGWARN, 1038 "DISABLE LUN returned 0x%x", rstat); 1039 goto out; 1040 } 1041 if (are_any_luns_enabled(isp, bus) == 0) { 1042 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 1043 if (av) { 1044 isp_prt(isp, ISP_LOGWARN, 1045 "disable target mode on channel %d failed", 1046 bus); 1047 goto out; 1048 } 1049 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; 1050 xpt_print_path(ccb->ccb_h.path); 1051 isp_prt(isp, ISP_LOGINFO, 1052 "Target Mode disabled on channel %d", bus); 1053 } 1054 } 1055 1056 out: 1057 isp_vsema_rqe(isp, bus); 1058 1059 if (rstat != LUN_OK) { 1060 xpt_print_path(ccb->ccb_h.path); 1061 isp_prt(isp, ISP_LOGWARN, 1062 "lun %sable failed", (cel->enable) ? "en" : "dis"); 1063 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1064 rls_lun_statep(isp, tptr); 1065 if (cel->enable) 1066 destroy_lun_state(isp, tptr); 1067 } else { 1068 xpt_print_path(ccb->ccb_h.path); 1069 isp_prt(isp, ISP_LOGINFO, lfmt, 1070 (cel->enable) ? "en" : "dis", bus); 1071 rls_lun_statep(isp, tptr); 1072 if (cel->enable == 0) { 1073 destroy_lun_state(isp, tptr); 1074 } 1075 ccb->ccb_h.status = CAM_REQ_CMP; 1076 } 1077 } 1078 1079 static cam_status 1080 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb) 1081 { 1082 tstate_t *tptr; 1083 struct ccb_hdr_slist *lp; 1084 struct ccb_hdr *curelm; 1085 int found; 1086 union ccb *accb = ccb->cab.abort_ccb; 1087 1088 if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 1089 if (IS_FC(isp) && (accb->ccb_h.target_id != 1090 ((fcparam *) isp->isp_param)->isp_loopid)) { 1091 return (CAM_PATH_INVALID); 1092 } else if (IS_SCSI(isp) && (accb->ccb_h.target_id != 1093 ((sdparam *) isp->isp_param)->isp_initiator_id)) { 1094 return (CAM_PATH_INVALID); 1095 } 1096 } 1097 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun); 1098 if (tptr == NULL) { 1099 return (CAM_PATH_INVALID); 1100 } 1101 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 1102 lp = &tptr->atios; 1103 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 1104 lp = &tptr->inots; 1105 } else { 1106 rls_lun_statep(isp, tptr); 1107 return (CAM_UA_ABORT); 1108 } 1109 curelm = SLIST_FIRST(lp); 1110 found = 0; 1111 if (curelm == &accb->ccb_h) { 1112 found = 1; 1113 SLIST_REMOVE_HEAD(lp, sim_links.sle); 1114 } else { 1115 while(curelm != NULL) { 1116 struct ccb_hdr *nextelm; 1117 1118 nextelm = SLIST_NEXT(curelm, sim_links.sle); 1119 if (nextelm == &accb->ccb_h) { 1120 found = 1; 1121 SLIST_NEXT(curelm, sim_links.sle) = 1122 SLIST_NEXT(nextelm, sim_links.sle); 1123 break; 1124 } 1125 curelm = nextelm; 1126 } 1127 } 1128 rls_lun_statep(isp, tptr); 1129 if (found) { 1130 accb->ccb_h.status = CAM_REQ_ABORTED; 1131 return (CAM_REQ_CMP); 1132 } 1133 return(CAM_PATH_INVALID); 1134 } 1135 1136 static cam_status 1137 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb) 1138 { 1139 void *qe; 1140 struct ccb_scsiio *cso = &ccb->csio; 1141 u_int16_t *hp, save_handle; 1142 u_int16_t nxti, optr; 1143 u_int8_t local[QENTRY_LEN]; 1144 1145 1146 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 1147 xpt_print_path(ccb->ccb_h.path); 1148 printf("Request Queue Overflow in isp_target_start_ctio\n"); 1149 return (CAM_RESRC_UNAVAIL); 1150 } 1151 bzero(local, QENTRY_LEN); 1152 1153 /* 1154 * We're either moving data or completing a command here. 1155 */ 1156 1157 if (IS_FC(isp)) { 1158 atio_private_data_t *atp; 1159 ct2_entry_t *cto = (ct2_entry_t *) local; 1160 1161 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; 1162 cto->ct_header.rqs_entry_count = 1; 1163 cto->ct_iid = cso->init_id; 1164 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) { 1165 cto->ct_lun = ccb->ccb_h.target_lun; 1166 } 1167 1168 atp = isp_get_atpd(isp, cso->tag_id); 1169 if (atp == NULL) { 1170 isp_prt(isp, ISP_LOGERR, 1171 "cannot find private data adjunct for tag %x", 1172 cso->tag_id); 1173 return (-1); 1174 } 1175 1176 cto->ct_rxid = cso->tag_id; 1177 if (cso->dxfer_len == 0) { 1178 cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA; 1179 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1180 cto->ct_flags |= CT2_SENDSTATUS; 1181 cto->rsp.m1.ct_scsi_status = cso->scsi_status; 1182 cto->ct_resid = 1183 atp->orig_datalen - atp->bytes_xfered; 1184 if (cto->ct_resid < 0) { 1185 cto->rsp.m1.ct_scsi_status |= 1186 CT2_DATA_OVER; 1187 } else if (cto->ct_resid > 0) { 1188 cto->rsp.m1.ct_scsi_status |= 1189 CT2_DATA_UNDER; 1190 } 1191 } 1192 if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) { 1193 int m = min(cso->sense_len, MAXRESPLEN); 1194 bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m); 1195 cto->rsp.m1.ct_senselen = m; 1196 cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID; 1197 } 1198 } else { 1199 cto->ct_flags |= CT2_FLAG_MODE0; 1200 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1201 cto->ct_flags |= CT2_DATA_IN; 1202 } else { 1203 cto->ct_flags |= CT2_DATA_OUT; 1204 } 1205 cto->ct_reloff = atp->bytes_xfered; 1206 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 1207 cto->ct_flags |= CT2_SENDSTATUS; 1208 cto->rsp.m0.ct_scsi_status = cso->scsi_status; 1209 cto->ct_resid = 1210 atp->orig_datalen - 1211 (atp->bytes_xfered + cso->dxfer_len); 1212 if (cto->ct_resid < 0) { 1213 cto->rsp.m0.ct_scsi_status |= 1214 CT2_DATA_OVER; 1215 } else if (cto->ct_resid > 0) { 1216 cto->rsp.m0.ct_scsi_status |= 1217 CT2_DATA_UNDER; 1218 } 1219 } else { 1220 atp->last_xframt = cso->dxfer_len; 1221 } 1222 /* 1223 * If we're sending data and status back together, 1224 * we can't also send back sense data as well. 1225 */ 1226 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1227 } 1228 1229 if (cto->ct_flags & CT2_SENDSTATUS) { 1230 isp_prt(isp, ISP_LOGTDEBUG0, 1231 "CTIO2[%x] STATUS %x origd %u curd %u resid %u", 1232 cto->ct_rxid, cso->scsi_status, atp->orig_datalen, 1233 cso->dxfer_len, cto->ct_resid); 1234 cto->ct_flags |= CT2_CCINCR; 1235 atp->state = ATPD_STATE_LAST_CTIO; 1236 } else 1237 atp->state = ATPD_STATE_CTIO; 1238 cto->ct_timeout = 10; 1239 hp = &cto->ct_syshandle; 1240 } else { 1241 ct_entry_t *cto = (ct_entry_t *) local; 1242 1243 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1244 cto->ct_header.rqs_entry_count = 1; 1245 cto->ct_iid = cso->init_id; 1246 cto->ct_iid |= XS_CHANNEL(ccb) << 7; 1247 cto->ct_tgt = ccb->ccb_h.target_id; 1248 cto->ct_lun = ccb->ccb_h.target_lun; 1249 cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id); 1250 if (AT_HAS_TAG(cso->tag_id)) { 1251 cto->ct_tag_val = (u_int8_t) AT_GET_TAG(cso->tag_id); 1252 cto->ct_flags |= CT_TQAE; 1253 } 1254 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 1255 cto->ct_flags |= CT_NODISC; 1256 } 1257 if (cso->dxfer_len == 0) { 1258 cto->ct_flags |= CT_NO_DATA; 1259 } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1260 cto->ct_flags |= CT_DATA_IN; 1261 } else { 1262 cto->ct_flags |= CT_DATA_OUT; 1263 } 1264 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1265 cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR; 1266 cto->ct_scsi_status = cso->scsi_status; 1267 cto->ct_resid = cso->resid; 1268 isp_prt(isp, ISP_LOGTDEBUG0, 1269 "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x", 1270 cto->ct_fwhandle, cso->scsi_status, cso->resid, 1271 cso->tag_id); 1272 } 1273 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1274 cto->ct_timeout = 10; 1275 hp = &cto->ct_syshandle; 1276 } 1277 1278 if (isp_save_xs(isp, (XS_T *)ccb, hp)) { 1279 xpt_print_path(ccb->ccb_h.path); 1280 printf("No XFLIST pointers for isp_target_start_ctio\n"); 1281 return (CAM_RESRC_UNAVAIL); 1282 } 1283 1284 1285 /* 1286 * Call the dma setup routines for this entry (and any subsequent 1287 * CTIOs) if there's data to move, and then tell the f/w it's got 1288 * new things to play with. As with isp_start's usage of DMA setup, 1289 * any swizzling is done in the machine dependent layer. Because 1290 * of this, we put the request onto the queue area first in native 1291 * format. 1292 */ 1293 1294 save_handle = *hp; 1295 1296 switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) { 1297 case CMD_QUEUED: 1298 ISP_ADD_REQUEST(isp, nxti); 1299 return (CAM_REQ_INPROG); 1300 1301 case CMD_EAGAIN: 1302 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1303 isp_destroy_handle(isp, save_handle); 1304 return (CAM_RESRC_UNAVAIL); 1305 1306 default: 1307 isp_destroy_handle(isp, save_handle); 1308 return (XS_ERR(ccb)); 1309 } 1310 } 1311 1312 static void 1313 isp_refire_putback_atio(void *arg) 1314 { 1315 int s = splcam(); 1316 isp_target_putback_atio(arg); 1317 splx(s); 1318 } 1319 1320 static void 1321 isp_target_putback_atio(union ccb *ccb) 1322 { 1323 struct ispsoftc *isp; 1324 struct ccb_scsiio *cso; 1325 u_int16_t nxti, optr; 1326 void *qe; 1327 1328 isp = XS_ISP(ccb); 1329 1330 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 1331 (void) timeout(isp_refire_putback_atio, ccb, 10); 1332 isp_prt(isp, ISP_LOGWARN, 1333 "isp_target_putback_atio: Request Queue Overflow"); 1334 return; 1335 } 1336 bzero(qe, QENTRY_LEN); 1337 cso = &ccb->csio; 1338 if (IS_FC(isp)) { 1339 at2_entry_t local, *at = &local; 1340 MEMZERO(at, sizeof (at2_entry_t)); 1341 at->at_header.rqs_entry_type = RQSTYPE_ATIO2; 1342 at->at_header.rqs_entry_count = 1; 1343 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) { 1344 at->at_scclun = (uint16_t) ccb->ccb_h.target_lun; 1345 } else { 1346 at->at_lun = (uint8_t) ccb->ccb_h.target_lun; 1347 } 1348 at->at_status = CT_OK; 1349 at->at_rxid = cso->tag_id; 1350 at->at_iid = cso->ccb_h.target_id; 1351 isp_put_atio2(isp, at, qe); 1352 } else { 1353 at_entry_t local, *at = &local; 1354 MEMZERO(at, sizeof (at_entry_t)); 1355 at->at_header.rqs_entry_type = RQSTYPE_ATIO; 1356 at->at_header.rqs_entry_count = 1; 1357 at->at_iid = cso->init_id; 1358 at->at_iid |= XS_CHANNEL(ccb) << 7; 1359 at->at_tgt = cso->ccb_h.target_id; 1360 at->at_lun = cso->ccb_h.target_lun; 1361 at->at_status = CT_OK; 1362 at->at_tag_val = AT_GET_TAG(cso->tag_id); 1363 at->at_handle = AT_GET_HANDLE(cso->tag_id); 1364 isp_put_atio(isp, at, qe); 1365 } 1366 ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe); 1367 ISP_ADD_REQUEST(isp, nxti); 1368 isp_complete_ctio(ccb); 1369 } 1370 1371 static void 1372 isp_complete_ctio(union ccb *ccb) 1373 { 1374 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1375 ccb->ccb_h.status |= CAM_REQ_CMP; 1376 } 1377 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1378 xpt_done(ccb); 1379 } 1380 1381 /* 1382 * Handle ATIO stuff that the generic code can't. 1383 * This means handling CDBs. 1384 */ 1385 1386 static int 1387 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep) 1388 { 1389 tstate_t *tptr; 1390 int status, bus, iswildcard; 1391 struct ccb_accept_tio *atiop; 1392 1393 /* 1394 * The firmware status (except for the QLTM_SVALID bit) 1395 * indicates why this ATIO was sent to us. 1396 * 1397 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1398 * 1399 * If the DISCONNECTS DISABLED bit is set in the flags field, 1400 * we're still connected on the SCSI bus. 1401 */ 1402 status = aep->at_status; 1403 if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) { 1404 /* 1405 * Bus Phase Sequence error. We should have sense data 1406 * suggested by the f/w. I'm not sure quite yet what 1407 * to do about this for CAM. 1408 */ 1409 isp_prt(isp, ISP_LOGWARN, "PHASE ERROR"); 1410 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1411 return (0); 1412 } 1413 if ((status & ~QLTM_SVALID) != AT_CDB) { 1414 isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform", 1415 status); 1416 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1417 return (0); 1418 } 1419 1420 bus = GET_BUS_VAL(aep->at_iid); 1421 tptr = get_lun_statep(isp, bus, aep->at_lun); 1422 if (tptr == NULL) { 1423 tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD); 1424 iswildcard = 1; 1425 } else { 1426 iswildcard = 0; 1427 } 1428 1429 if (tptr == NULL) { 1430 /* 1431 * Because we can't autofeed sense data back with 1432 * a command for parallel SCSI, we can't give back 1433 * a CHECK CONDITION. We'll give back a BUSY status 1434 * instead. This works out okay because the only 1435 * time we should, in fact, get this, is in the 1436 * case that somebody configured us without the 1437 * blackhole driver, so they get what they deserve. 1438 */ 1439 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1440 return (0); 1441 } 1442 1443 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1444 if (atiop == NULL) { 1445 /* 1446 * Because we can't autofeed sense data back with 1447 * a command for parallel SCSI, we can't give back 1448 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1449 * instead. This works out okay because the only time we 1450 * should, in fact, get this, is in the case that we've 1451 * run out of ATIOS. 1452 */ 1453 xpt_print_path(tptr->owner); 1454 isp_prt(isp, ISP_LOGWARN, 1455 "no ATIOS for lun %d from initiator %d on channel %d", 1456 aep->at_lun, GET_IID_VAL(aep->at_iid), bus); 1457 if (aep->at_flags & AT_TQAE) 1458 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1459 else 1460 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1461 rls_lun_statep(isp, tptr); 1462 return (0); 1463 } 1464 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1465 if (iswildcard) { 1466 atiop->ccb_h.target_id = aep->at_tgt; 1467 atiop->ccb_h.target_lun = aep->at_lun; 1468 } 1469 if (aep->at_flags & AT_NODISC) { 1470 atiop->ccb_h.flags = CAM_DIS_DISCONNECT; 1471 } else { 1472 atiop->ccb_h.flags = 0; 1473 } 1474 1475 if (status & QLTM_SVALID) { 1476 size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data)); 1477 atiop->sense_len = amt; 1478 MEMCPY(&atiop->sense_data, aep->at_sense, amt); 1479 } else { 1480 atiop->sense_len = 0; 1481 } 1482 1483 atiop->init_id = GET_IID_VAL(aep->at_iid); 1484 atiop->cdb_len = aep->at_cdblen; 1485 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen); 1486 atiop->ccb_h.status = CAM_CDB_RECVD; 1487 /* 1488 * Construct a tag 'id' based upon tag value (which may be 0..255) 1489 * and the handle (which we have to preserve). 1490 */ 1491 AT_MAKE_TAGID(atiop->tag_id, aep); 1492 if (aep->at_flags & AT_TQAE) { 1493 atiop->tag_action = aep->at_tag_type; 1494 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; 1495 } 1496 xpt_done((union ccb*)atiop); 1497 isp_prt(isp, ISP_LOGTDEBUG0, 1498 "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s", 1499 aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid), 1500 GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff, 1501 aep->at_tag_type, (aep->at_flags & AT_NODISC)? 1502 "nondisc" : "disconnecting"); 1503 rls_lun_statep(isp, tptr); 1504 return (0); 1505 } 1506 1507 static int 1508 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep) 1509 { 1510 lun_id_t lun; 1511 tstate_t *tptr; 1512 struct ccb_accept_tio *atiop; 1513 atio_private_data_t *atp; 1514 1515 /* 1516 * The firmware status (except for the QLTM_SVALID bit) 1517 * indicates why this ATIO was sent to us. 1518 * 1519 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1520 */ 1521 if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) { 1522 isp_prt(isp, ISP_LOGWARN, 1523 "bogus atio (0x%x) leaked to platform", aep->at_status); 1524 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1525 return (0); 1526 } 1527 1528 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) { 1529 lun = aep->at_scclun; 1530 } else { 1531 lun = aep->at_lun; 1532 } 1533 tptr = get_lun_statep(isp, 0, lun); 1534 if (tptr == NULL) { 1535 isp_prt(isp, ISP_LOGWARN, "no state pointer for lun %d", lun); 1536 tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD); 1537 } 1538 1539 if (tptr == NULL) { 1540 /* 1541 * What we'd like to know is whether or not we have a listener 1542 * upstream that really hasn't configured yet. If we do, then 1543 * we can give a more sensible reply here. If not, then we can 1544 * reject this out of hand. 1545 * 1546 * Choices for what to send were 1547 * 1548 * Not Ready, Unit Not Self-Configured Yet 1549 * (0x2,0x3e,0x00) 1550 * 1551 * for the former and 1552 * 1553 * Illegal Request, Logical Unit Not Supported 1554 * (0x5,0x25,0x00) 1555 * 1556 * for the latter. 1557 * 1558 * We used to decide whether there was at least one listener 1559 * based upon whether the black hole driver was configured. 1560 * However, recent config(8) changes have made this hard to do 1561 * at this time. 1562 * 1563 */ 1564 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1565 return (0); 1566 } 1567 1568 atp = isp_get_atpd(isp, 0); 1569 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1570 if (atiop == NULL || atp == NULL) { 1571 /* 1572 * Because we can't autofeed sense data back with 1573 * a command for parallel SCSI, we can't give back 1574 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1575 * instead. This works out okay because the only time we 1576 * should, in fact, get this, is in the case that we've 1577 * run out of ATIOS. 1578 */ 1579 xpt_print_path(tptr->owner); 1580 isp_prt(isp, ISP_LOGWARN, 1581 "no %s for lun %d from initiator %d", 1582 (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" : 1583 ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid); 1584 rls_lun_statep(isp, tptr); 1585 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1586 return (0); 1587 } 1588 atp->state = ATPD_STATE_ATIO; 1589 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1590 tptr->atio_count--; 1591 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO2 lun %d, count now %d", 1592 lun, tptr->atio_count); 1593 1594 if (tptr == &isp->isp_osinfo.tsdflt[0]) { 1595 atiop->ccb_h.target_id = 1596 ((fcparam *)isp->isp_param)->isp_loopid; 1597 atiop->ccb_h.target_lun = lun; 1598 } 1599 /* 1600 * We don't get 'suggested' sense data as we do with SCSI cards. 1601 */ 1602 atiop->sense_len = 0; 1603 1604 atiop->init_id = aep->at_iid; 1605 atiop->cdb_len = ATIO2_CDBLEN; 1606 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN); 1607 atiop->ccb_h.status = CAM_CDB_RECVD; 1608 atiop->tag_id = aep->at_rxid; 1609 switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) { 1610 case ATIO2_TC_ATTR_SIMPLEQ: 1611 atiop->tag_action = MSG_SIMPLE_Q_TAG; 1612 break; 1613 case ATIO2_TC_ATTR_HEADOFQ: 1614 atiop->tag_action = MSG_HEAD_OF_Q_TAG; 1615 break; 1616 case ATIO2_TC_ATTR_ORDERED: 1617 atiop->tag_action = MSG_ORDERED_Q_TAG; 1618 break; 1619 case ATIO2_TC_ATTR_ACAQ: /* ?? */ 1620 case ATIO2_TC_ATTR_UNTAGGED: 1621 default: 1622 atiop->tag_action = 0; 1623 break; 1624 } 1625 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; 1626 1627 atp->tag = atiop->tag_id; 1628 atp->lun = lun; 1629 atp->orig_datalen = aep->at_datalen; 1630 atp->last_xframt = 0; 1631 atp->bytes_xfered = 0; 1632 atp->state = ATPD_STATE_CAM; 1633 xpt_done((union ccb*)atiop); 1634 1635 isp_prt(isp, ISP_LOGTDEBUG0, 1636 "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u", 1637 aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid, 1638 lun, aep->at_taskflags, aep->at_datalen); 1639 rls_lun_statep(isp, tptr); 1640 return (0); 1641 } 1642 1643 static int 1644 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg) 1645 { 1646 union ccb *ccb; 1647 int sentstatus, ok, notify_cam, resid = 0; 1648 u_int16_t tval; 1649 1650 /* 1651 * CTIO and CTIO2 are close enough.... 1652 */ 1653 1654 ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_syshandle); 1655 KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio")); 1656 isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_syshandle); 1657 1658 if (IS_FC(isp)) { 1659 ct2_entry_t *ct = arg; 1660 atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid); 1661 if (atp == NULL) { 1662 isp_prt(isp, ISP_LOGERR, 1663 "cannot find adjunct for %x after I/O", 1664 ct->ct_rxid); 1665 return (0); 1666 } 1667 sentstatus = ct->ct_flags & CT2_SENDSTATUS; 1668 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1669 if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) { 1670 ccb->ccb_h.status |= CAM_SENT_SENSE; 1671 } 1672 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1673 if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) { 1674 resid = ct->ct_resid; 1675 atp->bytes_xfered += (atp->last_xframt - resid); 1676 atp->last_xframt = 0; 1677 } 1678 if (sentstatus || !ok) { 1679 atp->tag = 0; 1680 } 1681 isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, 1682 "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s", 1683 ct->ct_rxid, ct->ct_status, ct->ct_flags, 1684 (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, 1685 resid, sentstatus? "FIN" : "MID"); 1686 tval = ct->ct_rxid; 1687 1688 /* XXX: should really come after isp_complete_ctio */ 1689 atp->state = ATPD_STATE_PDON; 1690 } else { 1691 ct_entry_t *ct = arg; 1692 sentstatus = ct->ct_flags & CT_SENDSTATUS; 1693 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1694 /* 1695 * We *ought* to be able to get back to the original ATIO 1696 * here, but for some reason this gets lost. It's just as 1697 * well because it's squirrelled away as part of periph 1698 * private data. 1699 * 1700 * We can live without it as long as we continue to use 1701 * the auto-replenish feature for CTIOs. 1702 */ 1703 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1704 if (ct->ct_status & QLTM_SVALID) { 1705 char *sp = (char *)ct; 1706 sp += CTIO_SENSE_OFFSET; 1707 ccb->csio.sense_len = 1708 min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN); 1709 MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len); 1710 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1711 } 1712 if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) { 1713 resid = ct->ct_resid; 1714 } 1715 isp_prt(isp, ISP_LOGTDEBUG0, 1716 "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s", 1717 ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun, 1718 ct->ct_status, ct->ct_flags, resid, 1719 sentstatus? "FIN" : "MID"); 1720 tval = ct->ct_fwhandle; 1721 } 1722 ccb->csio.resid += resid; 1723 1724 /* 1725 * We're here either because intermediate data transfers are done 1726 * and/or the final status CTIO (which may have joined with a 1727 * Data Transfer) is done. 1728 * 1729 * In any case, for this platform, the upper layers figure out 1730 * what to do next, so all we do here is collect status and 1731 * pass information along. Any DMA handles have already been 1732 * freed. 1733 */ 1734 if (notify_cam == 0) { 1735 isp_prt(isp, ISP_LOGTDEBUG0, " INTER CTIO[0x%x] done", tval); 1736 return (0); 1737 } 1738 1739 isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done", 1740 (sentstatus)? " FINAL " : "MIDTERM ", tval); 1741 1742 if (!ok) { 1743 isp_target_putback_atio(ccb); 1744 } else { 1745 isp_complete_ctio(ccb); 1746 1747 } 1748 return (0); 1749 } 1750 1751 static int 1752 isp_handle_platform_notify_scsi(struct ispsoftc *isp, in_entry_t *inp) 1753 { 1754 return (0); /* XXXX */ 1755 } 1756 1757 static int 1758 isp_handle_platform_notify_fc(struct ispsoftc *isp, in_fcentry_t *inp) 1759 { 1760 1761 switch (inp->in_status) { 1762 case IN_PORT_LOGOUT: 1763 isp_prt(isp, ISP_LOGWARN, "port logout of iid %d", 1764 inp->in_iid); 1765 break; 1766 case IN_PORT_CHANGED: 1767 isp_prt(isp, ISP_LOGWARN, "port changed for iid %d", 1768 inp->in_iid); 1769 break; 1770 case IN_GLOBAL_LOGO: 1771 isp_prt(isp, ISP_LOGINFO, "all ports logged out"); 1772 break; 1773 case IN_ABORT_TASK: 1774 { 1775 atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid); 1776 struct ccb_immed_notify *inot = NULL; 1777 1778 if (atp) { 1779 tstate_t *tptr = get_lun_statep(isp, 0, atp->lun); 1780 if (tptr) { 1781 inot = (struct ccb_immed_notify *) 1782 SLIST_FIRST(&tptr->inots); 1783 if (inot) { 1784 SLIST_REMOVE_HEAD(&tptr->inots, 1785 sim_links.sle); 1786 } 1787 } 1788 isp_prt(isp, ISP_LOGWARN, 1789 "abort task RX_ID %x IID %d state %d", 1790 inp->in_seqid, inp->in_iid, atp->state); 1791 } else { 1792 isp_prt(isp, ISP_LOGWARN, 1793 "abort task RX_ID %x from iid %d, state unknown", 1794 inp->in_seqid, inp->in_iid); 1795 } 1796 if (inot) { 1797 inot->initiator_id = inp->in_iid; 1798 inot->sense_len = 0; 1799 inot->message_args[0] = MSG_ABORT_TAG; 1800 inot->message_args[1] = inp->in_seqid & 0xff; 1801 inot->message_args[2] = (inp->in_seqid >> 8) & 0xff; 1802 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 1803 xpt_done((union ccb *)inot); 1804 } 1805 break; 1806 } 1807 default: 1808 break; 1809 } 1810 return (0); 1811 } 1812 #endif 1813 1814 static void 1815 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg) 1816 { 1817 struct cam_sim *sim; 1818 struct ispsoftc *isp; 1819 1820 sim = (struct cam_sim *)cbarg; 1821 isp = (struct ispsoftc *) cam_sim_softc(sim); 1822 switch (code) { 1823 case AC_LOST_DEVICE: 1824 if (IS_SCSI(isp)) { 1825 u_int16_t oflags, nflags; 1826 sdparam *sdp = isp->isp_param; 1827 int tgt; 1828 1829 tgt = xpt_path_target_id(path); 1830 if (tgt >= 0) { 1831 sdp += cam_sim_bus(sim); 1832 ISP_LOCK(isp); 1833 nflags = sdp->isp_devparam[tgt].nvrm_flags; 1834 #ifndef ISP_TARGET_MODE 1835 nflags &= DPARM_SAFE_DFLT; 1836 if (isp->isp_loaded_fw) { 1837 nflags |= DPARM_NARROW | DPARM_ASYNC; 1838 } 1839 #else 1840 nflags = DPARM_DEFAULT; 1841 #endif 1842 oflags = sdp->isp_devparam[tgt].goal_flags; 1843 sdp->isp_devparam[tgt].goal_flags = nflags; 1844 sdp->isp_devparam[tgt].dev_update = 1; 1845 isp->isp_update |= (1 << cam_sim_bus(sim)); 1846 (void) isp_control(isp, 1847 ISPCTL_UPDATE_PARAMS, NULL); 1848 sdp->isp_devparam[tgt].goal_flags = oflags; 1849 ISP_UNLOCK(isp); 1850 } 1851 } 1852 break; 1853 default: 1854 isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code); 1855 break; 1856 } 1857 } 1858 1859 static void 1860 isp_poll(struct cam_sim *sim) 1861 { 1862 struct ispsoftc *isp = cam_sim_softc(sim); 1863 u_int16_t isr, sema, mbox; 1864 1865 ISP_LOCK(isp); 1866 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 1867 isp_intr(isp, isr, sema, mbox); 1868 } 1869 ISP_UNLOCK(isp); 1870 } 1871 1872 1873 static void 1874 isp_watchdog(void *arg) 1875 { 1876 XS_T *xs = arg; 1877 struct ispsoftc *isp = XS_ISP(xs); 1878 u_int32_t handle; 1879 int iok; 1880 1881 /* 1882 * We've decided this command is dead. Make sure we're not trying 1883 * to kill a command that's already dead by getting it's handle and 1884 * and seeing whether it's still alive. 1885 */ 1886 ISP_LOCK(isp); 1887 iok = isp->isp_osinfo.intsok; 1888 isp->isp_osinfo.intsok = 0; 1889 handle = isp_find_handle(isp, xs); 1890 if (handle) { 1891 u_int16_t isr, sema, mbox; 1892 1893 if (XS_CMD_DONE_P(xs)) { 1894 isp_prt(isp, ISP_LOGDEBUG1, 1895 "watchdog found done cmd (handle 0x%x)", handle); 1896 ISP_UNLOCK(isp); 1897 return; 1898 } 1899 1900 if (XS_CMD_WDOG_P(xs)) { 1901 isp_prt(isp, ISP_LOGDEBUG2, 1902 "recursive watchdog (handle 0x%x)", handle); 1903 ISP_UNLOCK(isp); 1904 return; 1905 } 1906 1907 XS_CMD_S_WDOG(xs); 1908 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 1909 isp_intr(isp, isr, sema, mbox); 1910 } 1911 if (XS_CMD_DONE_P(xs)) { 1912 isp_prt(isp, ISP_LOGDEBUG2, 1913 "watchdog cleanup for handle 0x%x", handle); 1914 xpt_done((union ccb *) xs); 1915 } else if (XS_CMD_GRACE_P(xs)) { 1916 /* 1917 * Make sure the command is *really* dead before we 1918 * release the handle (and DMA resources) for reuse. 1919 */ 1920 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg); 1921 1922 /* 1923 * After this point, the comamnd is really dead. 1924 */ 1925 if (XS_XFRLEN(xs)) { 1926 ISP_DMAFREE(isp, xs, handle); 1927 } 1928 isp_destroy_handle(isp, handle); 1929 xpt_print_path(xs->ccb_h.path); 1930 isp_prt(isp, ISP_LOGWARN, 1931 "watchdog timeout for handle 0x%x", handle); 1932 XS_SETERR(xs, CAM_CMD_TIMEOUT); 1933 XS_CMD_C_WDOG(xs); 1934 isp_done(xs); 1935 } else { 1936 u_int16_t nxti, optr; 1937 ispreq_t local, *mp= &local, *qe; 1938 1939 XS_CMD_C_WDOG(xs); 1940 xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz); 1941 if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) { 1942 ISP_UNLOCK(isp); 1943 return; 1944 } 1945 XS_CMD_S_GRACE(xs); 1946 MEMZERO((void *) mp, sizeof (*mp)); 1947 mp->req_header.rqs_entry_count = 1; 1948 mp->req_header.rqs_entry_type = RQSTYPE_MARKER; 1949 mp->req_modifier = SYNC_ALL; 1950 mp->req_target = XS_CHANNEL(xs) << 7; 1951 isp_put_request(isp, mp, qe); 1952 ISP_ADD_REQUEST(isp, nxti); 1953 } 1954 } else { 1955 isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command"); 1956 } 1957 isp->isp_osinfo.intsok = iok; 1958 ISP_UNLOCK(isp); 1959 } 1960 1961 static void 1962 isp_kthread(void *arg) 1963 { 1964 struct ispsoftc *isp = arg; 1965 1966 #ifdef ISP_SMPLOCK 1967 mtx_lock(&isp->isp_lock); 1968 #else 1969 mtx_lock(&Giant); 1970 #endif 1971 /* 1972 * The first loop is for our usage where we have yet to have 1973 * gotten good fibre channel state. 1974 */ 1975 for (;;) { 1976 int wasfrozen; 1977 1978 isp_prt(isp, ISP_LOGDEBUG0, "kthread: checking FC state"); 1979 while (isp_fc_runstate(isp, 2 * 1000000) != 0) { 1980 isp_prt(isp, ISP_LOGDEBUG0, "kthread: FC state ungood"); 1981 if (FCPARAM(isp)->isp_fwstate != FW_READY || 1982 FCPARAM(isp)->isp_loopstate < LOOP_PDB_RCVD) { 1983 if (FCPARAM(isp)->loop_seen_once == 0 || 1984 isp->isp_osinfo.ktmature == 0) { 1985 break; 1986 } 1987 } 1988 #ifdef ISP_SMPLOCK 1989 msleep(isp_kthread, &isp->isp_lock, 1990 PRIBIO, "isp_fcthrd", hz); 1991 #else 1992 (void) tsleep(isp_kthread, PRIBIO, "isp_fcthrd", hz); 1993 #endif 1994 } 1995 1996 /* 1997 * Even if we didn't get good loop state we may be 1998 * unfreezing the SIMQ so that we can kill off 1999 * commands (if we've never seen loop before, for example). 2000 */ 2001 isp->isp_osinfo.ktmature = 1; 2002 wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN; 2003 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN; 2004 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { 2005 isp_prt(isp, ISP_LOGDEBUG0, "kthread: releasing simq"); 2006 ISPLOCK_2_CAMLOCK(isp); 2007 xpt_release_simq(isp->isp_sim, 1); 2008 CAMLOCK_2_ISPLOCK(isp); 2009 } 2010 isp_prt(isp, ISP_LOGDEBUG0, "kthread: waiting until called"); 2011 #ifdef ISP_SMPLOCK 2012 cv_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock); 2013 #else 2014 (void) tsleep(&isp->isp_osinfo.kthread_cv, PRIBIO, "fc_cv", 0); 2015 #endif 2016 } 2017 } 2018 2019 static void 2020 isp_action(struct cam_sim *sim, union ccb *ccb) 2021 { 2022 int bus, tgt, error; 2023 struct ispsoftc *isp; 2024 struct ccb_trans_settings *cts; 2025 2026 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n")); 2027 2028 isp = (struct ispsoftc *)cam_sim_softc(sim); 2029 ccb->ccb_h.sim_priv.entries[0].field = 0; 2030 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 2031 if (isp->isp_state != ISP_RUNSTATE && 2032 ccb->ccb_h.func_code == XPT_SCSI_IO) { 2033 CAMLOCK_2_ISPLOCK(isp); 2034 isp_init(isp); 2035 if (isp->isp_state != ISP_INITSTATE) { 2036 ISP_UNLOCK(isp); 2037 /* 2038 * Lie. Say it was a selection timeout. 2039 */ 2040 ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN; 2041 xpt_freeze_devq(ccb->ccb_h.path, 1); 2042 xpt_done(ccb); 2043 return; 2044 } 2045 isp->isp_state = ISP_RUNSTATE; 2046 ISPLOCK_2_CAMLOCK(isp); 2047 } 2048 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code); 2049 2050 2051 switch (ccb->ccb_h.func_code) { 2052 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 2053 /* 2054 * Do a couple of preliminary checks... 2055 */ 2056 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 2057 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 2058 ccb->ccb_h.status = CAM_REQ_INVALID; 2059 xpt_done(ccb); 2060 break; 2061 } 2062 } 2063 #ifdef DIAGNOSTIC 2064 if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) { 2065 ccb->ccb_h.status = CAM_PATH_INVALID; 2066 } else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) { 2067 ccb->ccb_h.status = CAM_PATH_INVALID; 2068 } 2069 if (ccb->ccb_h.status == CAM_PATH_INVALID) { 2070 isp_prt(isp, ISP_LOGERR, 2071 "invalid tgt/lun (%d.%d) in XPT_SCSI_IO", 2072 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 2073 xpt_done(ccb); 2074 break; 2075 } 2076 #endif 2077 ((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK; 2078 CAMLOCK_2_ISPLOCK(isp); 2079 error = isp_start((XS_T *) ccb); 2080 switch (error) { 2081 case CMD_QUEUED: 2082 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2083 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 2084 u_int64_t ticks = (u_int64_t) hz; 2085 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) 2086 ticks = 60 * 1000 * ticks; 2087 else 2088 ticks = ccb->ccb_h.timeout * hz; 2089 ticks = ((ticks + 999) / 1000) + hz + hz; 2090 if (ticks >= 0x80000000) { 2091 isp_prt(isp, ISP_LOGERR, 2092 "timeout overflow"); 2093 ticks = 0x7fffffff; 2094 } 2095 ccb->ccb_h.timeout_ch = timeout(isp_watchdog, 2096 (caddr_t)ccb, (int)ticks); 2097 } else { 2098 callout_handle_init(&ccb->ccb_h.timeout_ch); 2099 } 2100 ISPLOCK_2_CAMLOCK(isp); 2101 break; 2102 case CMD_RQLATER: 2103 /* 2104 * This can only happen for Fibre Channel 2105 */ 2106 KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only")); 2107 if (FCPARAM(isp)->loop_seen_once == 0 && 2108 isp->isp_osinfo.ktmature) { 2109 ISPLOCK_2_CAMLOCK(isp); 2110 XS_SETERR(ccb, CAM_SEL_TIMEOUT); 2111 xpt_done(ccb); 2112 break; 2113 } 2114 #ifdef ISP_SMPLOCK 2115 cv_signal(&isp->isp_osinfo.kthread_cv); 2116 #else 2117 wakeup(&isp->isp_osinfo.kthread_cv); 2118 #endif 2119 isp_freeze_loopdown(isp, "isp_action(RQLATER)"); 2120 XS_SETERR(ccb, CAM_REQUEUE_REQ); 2121 ISPLOCK_2_CAMLOCK(isp); 2122 xpt_done(ccb); 2123 break; 2124 case CMD_EAGAIN: 2125 XS_SETERR(ccb, CAM_REQUEUE_REQ); 2126 ISPLOCK_2_CAMLOCK(isp); 2127 xpt_done(ccb); 2128 break; 2129 case CMD_COMPLETE: 2130 isp_done((struct ccb_scsiio *) ccb); 2131 ISPLOCK_2_CAMLOCK(isp); 2132 break; 2133 default: 2134 isp_prt(isp, ISP_LOGERR, 2135 "What's this? 0x%x at %d in file %s", 2136 error, __LINE__, __FILE__); 2137 XS_SETERR(ccb, CAM_REQ_CMP_ERR); 2138 xpt_done(ccb); 2139 ISPLOCK_2_CAMLOCK(isp); 2140 } 2141 break; 2142 2143 #ifdef ISP_TARGET_MODE 2144 case XPT_EN_LUN: /* Enable LUN as a target */ 2145 { 2146 int iok; 2147 CAMLOCK_2_ISPLOCK(isp); 2148 iok = isp->isp_osinfo.intsok; 2149 isp->isp_osinfo.intsok = 0; 2150 isp_en_lun(isp, ccb); 2151 isp->isp_osinfo.intsok = iok; 2152 ISPLOCK_2_CAMLOCK(isp); 2153 xpt_done(ccb); 2154 break; 2155 } 2156 case XPT_NOTIFY_ACK: /* recycle notify ack */ 2157 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ 2158 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 2159 { 2160 tstate_t *tptr = 2161 get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun); 2162 if (tptr == NULL) { 2163 ccb->ccb_h.status = CAM_LUN_INVALID; 2164 xpt_done(ccb); 2165 break; 2166 } 2167 ccb->ccb_h.sim_priv.entries[0].field = 0; 2168 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 2169 ccb->ccb_h.flags = 0; 2170 2171 CAMLOCK_2_ISPLOCK(isp); 2172 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 2173 /* 2174 * Note that the command itself may not be done- 2175 * it may not even have had the first CTIO sent. 2176 */ 2177 tptr->atio_count++; 2178 isp_prt(isp, ISP_LOGTDEBUG0, 2179 "Put FREE ATIO2, lun %d, count now %d", 2180 ccb->ccb_h.target_lun, tptr->atio_count); 2181 SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h, 2182 sim_links.sle); 2183 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 2184 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, 2185 sim_links.sle); 2186 } else { 2187 ; 2188 } 2189 rls_lun_statep(isp, tptr); 2190 ccb->ccb_h.status = CAM_REQ_INPROG; 2191 ISPLOCK_2_CAMLOCK(isp); 2192 break; 2193 } 2194 case XPT_CONT_TARGET_IO: 2195 { 2196 CAMLOCK_2_ISPLOCK(isp); 2197 ccb->ccb_h.status = isp_target_start_ctio(isp, ccb); 2198 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 2199 isp_prt(isp, ISP_LOGWARN, 2200 "XPT_CONT_TARGET_IO: status 0x%x", 2201 ccb->ccb_h.status); 2202 XS_SETERR(ccb, CAM_REQUEUE_REQ); 2203 ISPLOCK_2_CAMLOCK(isp); 2204 xpt_done(ccb); 2205 } else { 2206 ISPLOCK_2_CAMLOCK(isp); 2207 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2208 } 2209 break; 2210 } 2211 #endif 2212 case XPT_RESET_DEV: /* BDR the specified SCSI device */ 2213 2214 bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); 2215 tgt = ccb->ccb_h.target_id; 2216 tgt |= (bus << 16); 2217 2218 CAMLOCK_2_ISPLOCK(isp); 2219 error = isp_control(isp, ISPCTL_RESET_DEV, &tgt); 2220 ISPLOCK_2_CAMLOCK(isp); 2221 if (error) { 2222 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2223 } else { 2224 ccb->ccb_h.status = CAM_REQ_CMP; 2225 } 2226 xpt_done(ccb); 2227 break; 2228 case XPT_ABORT: /* Abort the specified CCB */ 2229 { 2230 union ccb *accb = ccb->cab.abort_ccb; 2231 CAMLOCK_2_ISPLOCK(isp); 2232 switch (accb->ccb_h.func_code) { 2233 #ifdef ISP_TARGET_MODE 2234 case XPT_ACCEPT_TARGET_IO: 2235 case XPT_IMMED_NOTIFY: 2236 ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb); 2237 break; 2238 case XPT_CONT_TARGET_IO: 2239 isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet"); 2240 ccb->ccb_h.status = CAM_UA_ABORT; 2241 break; 2242 #endif 2243 case XPT_SCSI_IO: 2244 error = isp_control(isp, ISPCTL_ABORT_CMD, ccb); 2245 if (error) { 2246 ccb->ccb_h.status = CAM_UA_ABORT; 2247 } else { 2248 ccb->ccb_h.status = CAM_REQ_CMP; 2249 } 2250 break; 2251 default: 2252 ccb->ccb_h.status = CAM_REQ_INVALID; 2253 break; 2254 } 2255 ISPLOCK_2_CAMLOCK(isp); 2256 xpt_done(ccb); 2257 break; 2258 } 2259 #ifdef CAM_NEW_TRAN_CODE 2260 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) 2261 #else 2262 #define IS_CURRENT_SETTINGS(c) (c->flags & CCB_TRANS_CURRENT_SETTINGS) 2263 #endif 2264 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 2265 cts = &ccb->cts; 2266 if (!IS_CURRENT_SETTINGS(cts)) { 2267 ccb->ccb_h.status = CAM_REQ_INVALID; 2268 xpt_done(ccb); 2269 break; 2270 } 2271 tgt = cts->ccb_h.target_id; 2272 CAMLOCK_2_ISPLOCK(isp); 2273 if (IS_SCSI(isp)) { 2274 #ifndef CAM_NEW_TRAN_CODE 2275 sdparam *sdp = isp->isp_param; 2276 u_int16_t *dptr; 2277 2278 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2279 2280 sdp += bus; 2281 /* 2282 * We always update (internally) from goal_flags 2283 * so any request to change settings just gets 2284 * vectored to that location. 2285 */ 2286 dptr = &sdp->isp_devparam[tgt].goal_flags; 2287 2288 /* 2289 * Note that these operations affect the 2290 * the goal flags (goal_flags)- not 2291 * the current state flags. Then we mark 2292 * things so that the next operation to 2293 * this HBA will cause the update to occur. 2294 */ 2295 if (cts->valid & CCB_TRANS_DISC_VALID) { 2296 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) { 2297 *dptr |= DPARM_DISC; 2298 } else { 2299 *dptr &= ~DPARM_DISC; 2300 } 2301 } 2302 if (cts->valid & CCB_TRANS_TQ_VALID) { 2303 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) { 2304 *dptr |= DPARM_TQING; 2305 } else { 2306 *dptr &= ~DPARM_TQING; 2307 } 2308 } 2309 if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) { 2310 switch (cts->bus_width) { 2311 case MSG_EXT_WDTR_BUS_16_BIT: 2312 *dptr |= DPARM_WIDE; 2313 break; 2314 default: 2315 *dptr &= ~DPARM_WIDE; 2316 } 2317 } 2318 /* 2319 * Any SYNC RATE of nonzero and SYNC_OFFSET 2320 * of nonzero will cause us to go to the 2321 * selected (from NVRAM) maximum value for 2322 * this device. At a later point, we'll 2323 * allow finer control. 2324 */ 2325 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && 2326 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) && 2327 (cts->sync_offset > 0)) { 2328 *dptr |= DPARM_SYNC; 2329 } else { 2330 *dptr &= ~DPARM_SYNC; 2331 } 2332 *dptr |= DPARM_SAFE_DFLT; 2333 #else 2334 struct ccb_trans_settings_scsi *scsi = 2335 &cts->proto_specific.scsi; 2336 struct ccb_trans_settings_spi *spi = 2337 &cts->xport_specific.spi; 2338 sdparam *sdp = isp->isp_param; 2339 u_int16_t *dptr; 2340 2341 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2342 sdp += bus; 2343 /* 2344 * We always update (internally) from goal_flags 2345 * so any request to change settings just gets 2346 * vectored to that location. 2347 */ 2348 dptr = &sdp->isp_devparam[tgt].goal_flags; 2349 2350 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 2351 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 2352 *dptr |= DPARM_DISC; 2353 else 2354 *dptr &= ~DPARM_DISC; 2355 } 2356 2357 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 2358 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 2359 *dptr |= DPARM_TQING; 2360 else 2361 *dptr &= ~DPARM_TQING; 2362 } 2363 2364 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 2365 if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) 2366 *dptr |= DPARM_WIDE; 2367 else 2368 *dptr &= ~DPARM_WIDE; 2369 } 2370 2371 /* 2372 * XXX: FIX ME 2373 */ 2374 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) && 2375 (spi->valid & CTS_SPI_VALID_SYNC_RATE) && 2376 (spi->sync_period && spi->sync_offset)) { 2377 *dptr |= DPARM_SYNC; 2378 /* 2379 * XXX: CHECK FOR LEGALITY 2380 */ 2381 sdp->isp_devparam[tgt].goal_period = 2382 spi->sync_period; 2383 sdp->isp_devparam[tgt].goal_offset = 2384 spi->sync_offset; 2385 } else { 2386 *dptr &= ~DPARM_SYNC; 2387 } 2388 #endif 2389 isp_prt(isp, ISP_LOGDEBUG0, 2390 "SET bus %d targ %d to flags %x off %x per %x", 2391 bus, tgt, sdp->isp_devparam[tgt].goal_flags, 2392 sdp->isp_devparam[tgt].goal_offset, 2393 sdp->isp_devparam[tgt].goal_period); 2394 sdp->isp_devparam[tgt].dev_update = 1; 2395 isp->isp_update |= (1 << bus); 2396 } 2397 ISPLOCK_2_CAMLOCK(isp); 2398 ccb->ccb_h.status = CAM_REQ_CMP; 2399 xpt_done(ccb); 2400 break; 2401 case XPT_GET_TRAN_SETTINGS: 2402 cts = &ccb->cts; 2403 tgt = cts->ccb_h.target_id; 2404 CAMLOCK_2_ISPLOCK(isp); 2405 if (IS_FC(isp)) { 2406 #ifndef CAM_NEW_TRAN_CODE 2407 /* 2408 * a lot of normal SCSI things don't make sense. 2409 */ 2410 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 2411 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2412 /* 2413 * How do you measure the width of a high 2414 * speed serial bus? Well, in bytes. 2415 * 2416 * Offset and period make no sense, though, so we set 2417 * (above) a 'base' transfer speed to be gigabit. 2418 */ 2419 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2420 #else 2421 fcparam *fcp = isp->isp_param; 2422 struct ccb_trans_settings_fc *fc = 2423 &cts->xport_specific.fc; 2424 2425 cts->protocol = PROTO_SCSI; 2426 cts->protocol_version = SCSI_REV_2; 2427 cts->transport = XPORT_FC; 2428 cts->transport_version = 0; 2429 2430 fc->valid = CTS_FC_VALID_SPEED; 2431 if (fcp->isp_gbspeed == 2) 2432 fc->bitrate = 200000; 2433 else 2434 fc->bitrate = 100000; 2435 if (tgt > 0 && tgt < MAX_FC_TARG) { 2436 struct lportdb *lp = &fcp->portdb[tgt]; 2437 fc->wwnn = lp->node_wwn; 2438 fc->wwpn = lp->port_wwn; 2439 fc->port = lp->portid; 2440 fc->valid |= CTS_FC_VALID_WWNN | 2441 CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT; 2442 } 2443 #endif 2444 } else { 2445 #ifdef CAM_NEW_TRAN_CODE 2446 struct ccb_trans_settings_scsi *scsi = 2447 &cts->proto_specific.scsi; 2448 struct ccb_trans_settings_spi *spi = 2449 &cts->xport_specific.spi; 2450 #endif 2451 sdparam *sdp = isp->isp_param; 2452 int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2453 u_int16_t dval, pval, oval; 2454 2455 sdp += bus; 2456 2457 if (IS_CURRENT_SETTINGS(cts)) { 2458 sdp->isp_devparam[tgt].dev_refresh = 1; 2459 isp->isp_update |= (1 << bus); 2460 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, 2461 NULL); 2462 dval = sdp->isp_devparam[tgt].actv_flags; 2463 oval = sdp->isp_devparam[tgt].actv_offset; 2464 pval = sdp->isp_devparam[tgt].actv_period; 2465 } else { 2466 dval = sdp->isp_devparam[tgt].nvrm_flags; 2467 oval = sdp->isp_devparam[tgt].nvrm_offset; 2468 pval = sdp->isp_devparam[tgt].nvrm_period; 2469 } 2470 2471 #ifndef CAM_NEW_TRAN_CODE 2472 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 2473 2474 if (dval & DPARM_DISC) { 2475 cts->flags |= CCB_TRANS_DISC_ENB; 2476 } 2477 if (dval & DPARM_TQING) { 2478 cts->flags |= CCB_TRANS_TAG_ENB; 2479 } 2480 if (dval & DPARM_WIDE) { 2481 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2482 } else { 2483 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2484 } 2485 cts->valid = CCB_TRANS_BUS_WIDTH_VALID | 2486 CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2487 2488 if ((dval & DPARM_SYNC) && oval != 0) { 2489 cts->sync_period = pval; 2490 cts->sync_offset = oval; 2491 cts->valid |= 2492 CCB_TRANS_SYNC_RATE_VALID | 2493 CCB_TRANS_SYNC_OFFSET_VALID; 2494 } 2495 #else 2496 cts->protocol = PROTO_SCSI; 2497 cts->protocol_version = SCSI_REV_2; 2498 cts->transport = XPORT_SPI; 2499 cts->transport_version = 2; 2500 2501 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 2502 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; 2503 if (dval & DPARM_DISC) { 2504 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 2505 } 2506 if (dval & DPARM_TQING) { 2507 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 2508 } 2509 if ((dval & DPARM_SYNC) && oval && pval) { 2510 spi->sync_offset = oval; 2511 spi->sync_period = pval; 2512 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 2513 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 2514 } 2515 spi->valid |= CTS_SPI_VALID_BUS_WIDTH; 2516 if (dval & DPARM_WIDE) { 2517 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2518 } else { 2519 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2520 } 2521 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 2522 scsi->valid = CTS_SCSI_VALID_TQ; 2523 spi->valid |= CTS_SPI_VALID_DISC; 2524 } else { 2525 scsi->valid = 0; 2526 } 2527 #endif 2528 isp_prt(isp, ISP_LOGDEBUG0, 2529 "GET %s bus %d targ %d to flags %x off %x per %x", 2530 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM", 2531 bus, tgt, dval, oval, pval); 2532 } 2533 ISPLOCK_2_CAMLOCK(isp); 2534 ccb->ccb_h.status = CAM_REQ_CMP; 2535 xpt_done(ccb); 2536 break; 2537 2538 case XPT_CALC_GEOMETRY: 2539 { 2540 struct ccb_calc_geometry *ccg; 2541 2542 ccg = &ccb->ccg; 2543 if (ccg->block_size == 0) { 2544 isp_prt(isp, ISP_LOGERR, 2545 "%d.%d XPT_CALC_GEOMETRY block size 0?", 2546 ccg->ccb_h.target_id, ccg->ccb_h.target_lun); 2547 ccb->ccb_h.status = CAM_REQ_INVALID; 2548 xpt_done(ccb); 2549 break; 2550 } 2551 cam_calc_geometry(ccg, /*extended*/1); 2552 xpt_done(ccb); 2553 break; 2554 } 2555 case XPT_RESET_BUS: /* Reset the specified bus */ 2556 bus = cam_sim_bus(sim); 2557 CAMLOCK_2_ISPLOCK(isp); 2558 error = isp_control(isp, ISPCTL_RESET_BUS, &bus); 2559 ISPLOCK_2_CAMLOCK(isp); 2560 if (error) 2561 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2562 else { 2563 if (cam_sim_bus(sim) && isp->isp_path2 != NULL) 2564 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 2565 else if (isp->isp_path != NULL) 2566 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 2567 ccb->ccb_h.status = CAM_REQ_CMP; 2568 } 2569 xpt_done(ccb); 2570 break; 2571 2572 case XPT_TERM_IO: /* Terminate the I/O process */ 2573 ccb->ccb_h.status = CAM_REQ_INVALID; 2574 xpt_done(ccb); 2575 break; 2576 2577 case XPT_PATH_INQ: /* Path routing inquiry */ 2578 { 2579 struct ccb_pathinq *cpi = &ccb->cpi; 2580 2581 cpi->version_num = 1; 2582 #ifdef ISP_TARGET_MODE 2583 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 2584 #else 2585 cpi->target_sprt = 0; 2586 #endif 2587 cpi->hba_eng_cnt = 0; 2588 cpi->max_target = ISP_MAX_TARGETS(isp) - 1; 2589 cpi->max_lun = ISP_MAX_LUNS(isp) - 1; 2590 cpi->bus_id = cam_sim_bus(sim); 2591 if (IS_FC(isp)) { 2592 cpi->hba_misc = PIM_NOBUSRESET; 2593 /* 2594 * Because our loop ID can shift from time to time, 2595 * make our initiator ID out of range of our bus. 2596 */ 2597 cpi->initiator_id = cpi->max_target + 1; 2598 2599 /* 2600 * Set base transfer capabilities for Fibre Channel. 2601 * Technically not correct because we don't know 2602 * what media we're running on top of- but we'll 2603 * look good if we always say 100MB/s. 2604 */ 2605 if (FCPARAM(isp)->isp_gbspeed == 2) 2606 cpi->base_transfer_speed = 200000; 2607 else 2608 cpi->base_transfer_speed = 100000; 2609 cpi->hba_inquiry = PI_TAG_ABLE; 2610 #ifdef CAM_NEW_TRAN_CODE 2611 cpi->transport = XPORT_FC; 2612 cpi->transport_version = 0; /* WHAT'S THIS FOR? */ 2613 #endif 2614 } else { 2615 sdparam *sdp = isp->isp_param; 2616 sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path)); 2617 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 2618 cpi->hba_misc = 0; 2619 cpi->initiator_id = sdp->isp_initiator_id; 2620 cpi->base_transfer_speed = 3300; 2621 #ifdef CAM_NEW_TRAN_CODE 2622 cpi->transport = XPORT_SPI; 2623 cpi->transport_version = 2; /* WHAT'S THIS FOR? */ 2624 #endif 2625 } 2626 #ifdef CAM_NEW_TRAN_CODE 2627 cpi->protocol = PROTO_SCSI; 2628 cpi->protocol_version = SCSI_REV_2; 2629 #endif 2630 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 2631 strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN); 2632 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 2633 cpi->unit_number = cam_sim_unit(sim); 2634 cpi->ccb_h.status = CAM_REQ_CMP; 2635 xpt_done(ccb); 2636 break; 2637 } 2638 default: 2639 ccb->ccb_h.status = CAM_REQ_INVALID; 2640 xpt_done(ccb); 2641 break; 2642 } 2643 } 2644 2645 #define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB) 2646 void 2647 isp_done(struct ccb_scsiio *sccb) 2648 { 2649 struct ispsoftc *isp = XS_ISP(sccb); 2650 2651 if (XS_NOERR(sccb)) 2652 XS_SETERR(sccb, CAM_REQ_CMP); 2653 2654 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && 2655 (sccb->scsi_status != SCSI_STATUS_OK)) { 2656 sccb->ccb_h.status &= ~CAM_STATUS_MASK; 2657 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && 2658 (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) { 2659 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; 2660 } else { 2661 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 2662 } 2663 } 2664 2665 sccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2666 if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2667 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 2668 sccb->ccb_h.status |= CAM_DEV_QFRZN; 2669 xpt_freeze_devq(sccb->ccb_h.path, 1); 2670 isp_prt(isp, ISP_LOGDEBUG0, 2671 "freeze devq %d.%d cam sts %x scsi sts %x", 2672 sccb->ccb_h.target_id, sccb->ccb_h.target_lun, 2673 sccb->ccb_h.status, sccb->scsi_status); 2674 } 2675 } 2676 2677 if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) && 2678 (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2679 xpt_print_path(sccb->ccb_h.path); 2680 isp_prt(isp, ISP_LOGINFO, 2681 "cam completion status 0x%x", sccb->ccb_h.status); 2682 } 2683 2684 XS_CMD_S_DONE(sccb); 2685 if (XS_CMD_WDOG_P(sccb) == 0) { 2686 untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch); 2687 if (XS_CMD_GRACE_P(sccb)) { 2688 isp_prt(isp, ISP_LOGDEBUG2, 2689 "finished command on borrowed time"); 2690 } 2691 XS_CMD_S_CLEAR(sccb); 2692 ISPLOCK_2_CAMLOCK(isp); 2693 xpt_done((union ccb *) sccb); 2694 CAMLOCK_2_ISPLOCK(isp); 2695 } 2696 } 2697 2698 int 2699 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg) 2700 { 2701 int bus, rv = 0; 2702 switch (cmd) { 2703 case ISPASYNC_NEW_TGT_PARAMS: 2704 { 2705 #ifdef CAM_NEW_TRAN_CODE 2706 struct ccb_trans_settings_scsi *scsi; 2707 struct ccb_trans_settings_spi *spi; 2708 #endif 2709 int flags, tgt; 2710 sdparam *sdp = isp->isp_param; 2711 struct ccb_trans_settings cts; 2712 struct cam_path *tmppath; 2713 2714 bzero(&cts, sizeof (struct ccb_trans_settings)); 2715 2716 tgt = *((int *)arg); 2717 bus = (tgt >> 16) & 0xffff; 2718 tgt &= 0xffff; 2719 sdp += bus; 2720 ISPLOCK_2_CAMLOCK(isp); 2721 if (xpt_create_path(&tmppath, NULL, 2722 cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim), 2723 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2724 CAMLOCK_2_ISPLOCK(isp); 2725 isp_prt(isp, ISP_LOGWARN, 2726 "isp_async cannot make temp path for %d.%d", 2727 tgt, bus); 2728 rv = -1; 2729 break; 2730 } 2731 CAMLOCK_2_ISPLOCK(isp); 2732 flags = sdp->isp_devparam[tgt].actv_flags; 2733 #ifdef CAM_NEW_TRAN_CODE 2734 cts.type = CTS_TYPE_CURRENT_SETTINGS; 2735 cts.protocol = PROTO_SCSI; 2736 cts.transport = XPORT_SPI; 2737 2738 scsi = &cts.proto_specific.scsi; 2739 spi = &cts.xport_specific.spi; 2740 2741 if (flags & DPARM_TQING) { 2742 scsi->valid |= CTS_SCSI_VALID_TQ; 2743 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 2744 spi->flags |= CTS_SPI_FLAGS_TAG_ENB; 2745 } 2746 2747 if (flags & DPARM_DISC) { 2748 spi->valid |= CTS_SPI_VALID_DISC; 2749 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 2750 } 2751 spi->flags |= CTS_SPI_VALID_BUS_WIDTH; 2752 if (flags & DPARM_WIDE) { 2753 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2754 } else { 2755 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2756 } 2757 if (flags & DPARM_SYNC) { 2758 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 2759 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 2760 spi->sync_period = sdp->isp_devparam[tgt].actv_period; 2761 spi->sync_offset = sdp->isp_devparam[tgt].actv_offset; 2762 } 2763 #else 2764 cts.flags = CCB_TRANS_CURRENT_SETTINGS; 2765 cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2766 if (flags & DPARM_DISC) { 2767 cts.flags |= CCB_TRANS_DISC_ENB; 2768 } 2769 if (flags & DPARM_TQING) { 2770 cts.flags |= CCB_TRANS_TAG_ENB; 2771 } 2772 cts.valid |= CCB_TRANS_BUS_WIDTH_VALID; 2773 cts.bus_width = (flags & DPARM_WIDE)? 2774 MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT; 2775 cts.sync_period = sdp->isp_devparam[tgt].actv_period; 2776 cts.sync_offset = sdp->isp_devparam[tgt].actv_offset; 2777 if (flags & DPARM_SYNC) { 2778 cts.valid |= 2779 CCB_TRANS_SYNC_RATE_VALID | 2780 CCB_TRANS_SYNC_OFFSET_VALID; 2781 } 2782 #endif 2783 isp_prt(isp, ISP_LOGDEBUG2, 2784 "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x", 2785 bus, tgt, sdp->isp_devparam[tgt].actv_period, 2786 sdp->isp_devparam[tgt].actv_offset, flags); 2787 xpt_setup_ccb(&cts.ccb_h, tmppath, 1); 2788 ISPLOCK_2_CAMLOCK(isp); 2789 xpt_async(AC_TRANSFER_NEG, tmppath, &cts); 2790 xpt_free_path(tmppath); 2791 CAMLOCK_2_ISPLOCK(isp); 2792 break; 2793 } 2794 case ISPASYNC_BUS_RESET: 2795 bus = *((int *)arg); 2796 isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected", 2797 bus); 2798 if (bus > 0 && isp->isp_path2) { 2799 ISPLOCK_2_CAMLOCK(isp); 2800 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 2801 CAMLOCK_2_ISPLOCK(isp); 2802 } else if (isp->isp_path) { 2803 ISPLOCK_2_CAMLOCK(isp); 2804 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 2805 CAMLOCK_2_ISPLOCK(isp); 2806 } 2807 break; 2808 case ISPASYNC_LIP: 2809 if (isp->isp_path) { 2810 isp_freeze_loopdown(isp, "ISPASYNC_LIP"); 2811 } 2812 isp_prt(isp, ISP_LOGINFO, "LIP Received"); 2813 break; 2814 case ISPASYNC_LOOP_RESET: 2815 if (isp->isp_path) { 2816 isp_freeze_loopdown(isp, "ISPASYNC_LOOP_RESET"); 2817 } 2818 isp_prt(isp, ISP_LOGINFO, "Loop Reset Received"); 2819 break; 2820 case ISPASYNC_LOOP_DOWN: 2821 if (isp->isp_path) { 2822 isp_freeze_loopdown(isp, "ISPASYNC_LOOP_DOWN"); 2823 } 2824 isp_prt(isp, ISP_LOGINFO, "Loop DOWN"); 2825 break; 2826 case ISPASYNC_LOOP_UP: 2827 /* 2828 * Now we just note that Loop has come up. We don't 2829 * actually do anything because we're waiting for a 2830 * Change Notify before activating the FC cleanup 2831 * thread to look at the state of the loop again. 2832 */ 2833 isp_prt(isp, ISP_LOGINFO, "Loop UP"); 2834 break; 2835 case ISPASYNC_PROMENADE: 2836 { 2837 struct cam_path *tmppath; 2838 const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x " 2839 "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x"; 2840 static const char *roles[4] = { 2841 "(none)", "Target", "Initiator", "Target/Initiator" 2842 }; 2843 fcparam *fcp = isp->isp_param; 2844 int tgt = *((int *) arg); 2845 int is_tgt_mask = (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT); 2846 struct lportdb *lp = &fcp->portdb[tgt]; 2847 2848 isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid, 2849 roles[lp->roles & 0x3], 2850 (lp->valid)? "Arrived" : "Departed", 2851 (u_int32_t) (lp->port_wwn >> 32), 2852 (u_int32_t) (lp->port_wwn & 0xffffffffLL), 2853 (u_int32_t) (lp->node_wwn >> 32), 2854 (u_int32_t) (lp->node_wwn & 0xffffffffLL)); 2855 2856 ISPLOCK_2_CAMLOCK(isp); 2857 if (xpt_create_path(&tmppath, NULL, cam_sim_path(isp->isp_sim), 2858 (target_id_t)tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2859 CAMLOCK_2_ISPLOCK(isp); 2860 break; 2861 } 2862 /* 2863 * Policy: only announce targets. 2864 */ 2865 if (lp->roles & is_tgt_mask) { 2866 if (lp->valid) { 2867 xpt_async(AC_FOUND_DEVICE, tmppath, NULL); 2868 } else { 2869 xpt_async(AC_LOST_DEVICE, tmppath, NULL); 2870 } 2871 } 2872 xpt_free_path(tmppath); 2873 CAMLOCK_2_ISPLOCK(isp); 2874 break; 2875 } 2876 case ISPASYNC_CHANGE_NOTIFY: 2877 if (arg == ISPASYNC_CHANGE_PDB) { 2878 isp_prt(isp, ISP_LOGINFO, 2879 "Port Database Changed"); 2880 } else if (arg == ISPASYNC_CHANGE_SNS) { 2881 isp_prt(isp, ISP_LOGINFO, 2882 "Name Server Database Changed"); 2883 } 2884 #ifdef ISP_SMPLOCK 2885 cv_signal(&isp->isp_osinfo.kthread_cv); 2886 #else 2887 wakeup(&isp->isp_osinfo.kthread_cv); 2888 #endif 2889 break; 2890 case ISPASYNC_FABRIC_DEV: 2891 { 2892 int target, base, lim; 2893 fcparam *fcp = isp->isp_param; 2894 struct lportdb *lp = NULL; 2895 struct lportdb *clp = (struct lportdb *) arg; 2896 char *pt; 2897 2898 switch (clp->port_type) { 2899 case 1: 2900 pt = " N_Port"; 2901 break; 2902 case 2: 2903 pt = " NL_Port"; 2904 break; 2905 case 3: 2906 pt = "F/NL_Port"; 2907 break; 2908 case 0x7f: 2909 pt = " Nx_Port"; 2910 break; 2911 case 0x81: 2912 pt = " F_port"; 2913 break; 2914 case 0x82: 2915 pt = " FL_Port"; 2916 break; 2917 case 0x84: 2918 pt = " E_port"; 2919 break; 2920 default: 2921 pt = " "; 2922 break; 2923 } 2924 2925 isp_prt(isp, ISP_LOGINFO, 2926 "%s Fabric Device @ PortID 0x%x", pt, clp->portid); 2927 2928 /* 2929 * If we don't have an initiator role we bail. 2930 * 2931 * We just use ISPASYNC_FABRIC_DEV for announcement purposes. 2932 */ 2933 2934 if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) { 2935 break; 2936 } 2937 2938 /* 2939 * Is this entry for us? If so, we bail. 2940 */ 2941 2942 if (fcp->isp_portid == clp->portid) { 2943 break; 2944 } 2945 2946 /* 2947 * Else, the default policy is to find room for it in 2948 * our local port database. Later, when we execute 2949 * the call to isp_pdb_sync either this newly arrived 2950 * or already logged in device will be (re)announced. 2951 */ 2952 2953 if (fcp->isp_topo == TOPO_FL_PORT) 2954 base = FC_SNS_ID+1; 2955 else 2956 base = 0; 2957 2958 if (fcp->isp_topo == TOPO_N_PORT) 2959 lim = 1; 2960 else 2961 lim = MAX_FC_TARG; 2962 2963 /* 2964 * Is it already in our list? 2965 */ 2966 for (target = base; target < lim; target++) { 2967 if (target >= FL_PORT_ID && target <= FC_SNS_ID) { 2968 continue; 2969 } 2970 lp = &fcp->portdb[target]; 2971 if (lp->port_wwn == clp->port_wwn && 2972 lp->node_wwn == clp->node_wwn) { 2973 lp->fabric_dev = 1; 2974 break; 2975 } 2976 } 2977 if (target < lim) { 2978 break; 2979 } 2980 for (target = base; target < lim; target++) { 2981 if (target >= FL_PORT_ID && target <= FC_SNS_ID) { 2982 continue; 2983 } 2984 lp = &fcp->portdb[target]; 2985 if (lp->port_wwn == 0) { 2986 break; 2987 } 2988 } 2989 if (target == lim) { 2990 isp_prt(isp, ISP_LOGWARN, 2991 "out of space for fabric devices"); 2992 break; 2993 } 2994 lp->port_type = clp->port_type; 2995 lp->fc4_type = clp->fc4_type; 2996 lp->node_wwn = clp->node_wwn; 2997 lp->port_wwn = clp->port_wwn; 2998 lp->portid = clp->portid; 2999 lp->fabric_dev = 1; 3000 break; 3001 } 3002 #ifdef ISP_TARGET_MODE 3003 case ISPASYNC_TARGET_MESSAGE: 3004 { 3005 tmd_msg_t *mp = arg; 3006 isp_prt(isp, ISP_LOGALL, 3007 "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x", 3008 mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt, 3009 (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval, 3010 mp->nt_msg[0]); 3011 break; 3012 } 3013 case ISPASYNC_TARGET_EVENT: 3014 { 3015 tmd_event_t *ep = arg; 3016 isp_prt(isp, ISP_LOGALL, 3017 "bus %d event code 0x%x", ep->ev_bus, ep->ev_event); 3018 break; 3019 } 3020 case ISPASYNC_TARGET_ACTION: 3021 switch (((isphdr_t *)arg)->rqs_entry_type) { 3022 default: 3023 isp_prt(isp, ISP_LOGWARN, 3024 "event 0x%x for unhandled target action", 3025 ((isphdr_t *)arg)->rqs_entry_type); 3026 break; 3027 case RQSTYPE_NOTIFY: 3028 if (IS_SCSI(isp)) { 3029 rv = isp_handle_platform_notify_scsi(isp, 3030 (in_entry_t *) arg); 3031 } else { 3032 rv = isp_handle_platform_notify_fc(isp, 3033 (in_fcentry_t *) arg); 3034 } 3035 break; 3036 case RQSTYPE_ATIO: 3037 rv = isp_handle_platform_atio(isp, (at_entry_t *) arg); 3038 break; 3039 case RQSTYPE_ATIO2: 3040 rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg); 3041 break; 3042 case RQSTYPE_CTIO2: 3043 case RQSTYPE_CTIO: 3044 rv = isp_handle_platform_ctio(isp, arg); 3045 break; 3046 case RQSTYPE_ENABLE_LUN: 3047 case RQSTYPE_MODIFY_LUN: 3048 if (IS_DUALBUS(isp)) { 3049 bus = 3050 GET_BUS_VAL(((lun_entry_t *)arg)->le_rsvd); 3051 } else { 3052 bus = 0; 3053 } 3054 isp_cv_signal_rqe(isp, bus, 3055 ((lun_entry_t *)arg)->le_status); 3056 break; 3057 } 3058 break; 3059 #endif 3060 case ISPASYNC_FW_CRASH: 3061 { 3062 u_int16_t mbox1, mbox6; 3063 mbox1 = ISP_READ(isp, OUTMAILBOX1); 3064 if (IS_DUALBUS(isp)) { 3065 mbox6 = ISP_READ(isp, OUTMAILBOX6); 3066 } else { 3067 mbox6 = 0; 3068 } 3069 isp_prt(isp, ISP_LOGERR, 3070 "Internal Firmware Error on bus %d @ RISC Address 0x%x", 3071 mbox6, mbox1); 3072 #ifdef ISP_FW_CRASH_DUMP 3073 /* 3074 * XXX: really need a thread to do this right. 3075 */ 3076 if (IS_FC(isp)) { 3077 FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT; 3078 FCPARAM(isp)->isp_loopstate = LOOP_NIL; 3079 isp_freeze_loopdown(isp, "f/w crash"); 3080 isp_fw_dump(isp); 3081 } 3082 isp_reinit(isp); 3083 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL); 3084 #endif 3085 break; 3086 } 3087 case ISPASYNC_UNHANDLED_RESPONSE: 3088 break; 3089 default: 3090 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd); 3091 break; 3092 } 3093 return (rv); 3094 } 3095 3096 3097 /* 3098 * Locks are held before coming here. 3099 */ 3100 void 3101 isp_uninit(struct ispsoftc *isp) 3102 { 3103 ISP_WRITE(isp, HCCR, HCCR_CMD_RESET); 3104 DISABLE_INTS(isp); 3105 } 3106 3107 void 3108 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...) 3109 { 3110 va_list ap; 3111 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { 3112 return; 3113 } 3114 printf("%s: ", device_get_nameunit(isp->isp_dev)); 3115 va_start(ap, fmt); 3116 vprintf(fmt, ap); 3117 va_end(ap); 3118 printf("\n"); 3119 } 3120