1 /*- 2 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters. 3 * 4 * Copyright (c) 1997-2006 by Matthew Jacob 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <dev/isp/isp_freebsd.h> 33 #include <sys/unistd.h> 34 #include <sys/kthread.h> 35 #include <machine/stdarg.h> /* for use by isp_prt below */ 36 #include <sys/conf.h> 37 #include <sys/module.h> 38 #include <sys/ioccom.h> 39 #include <dev/isp/isp_ioctl.h> 40 41 42 MODULE_VERSION(isp, 1); 43 MODULE_DEPEND(isp, cam, 1, 1, 1); 44 int isp_announced = 0; 45 ispfwfunc *isp_get_firmware_p = NULL; 46 47 static d_ioctl_t ispioctl; 48 static void isp_intr_enable(void *); 49 static void isp_cam_async(void *, uint32_t, struct cam_path *, void *); 50 static void isp_poll(struct cam_sim *); 51 static timeout_t isp_watchdog; 52 static void isp_kthread(void *); 53 static void isp_action(struct cam_sim *, union ccb *); 54 55 56 static struct cdevsw isp_cdevsw = { 57 .d_version = D_VERSION, 58 .d_flags = D_NEEDGIANT, 59 .d_ioctl = ispioctl, 60 .d_name = "isp", 61 }; 62 63 static struct ispsoftc *isplist = NULL; 64 65 void 66 isp_attach(struct ispsoftc *isp) 67 { 68 int primary, secondary; 69 struct ccb_setasync csa; 70 struct cam_devq *devq; 71 struct cam_sim *sim; 72 struct cam_path *path; 73 74 /* 75 * Establish (in case of 12X0) which bus is the primary. 76 */ 77 78 primary = 0; 79 secondary = 1; 80 81 /* 82 * Create the device queue for our SIM(s). 83 */ 84 devq = cam_simq_alloc(isp->isp_maxcmds); 85 if (devq == NULL) { 86 return; 87 } 88 89 /* 90 * Construct our SIM entry. 91 */ 92 ISPLOCK_2_CAMLOCK(isp); 93 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 94 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); 95 if (sim == NULL) { 96 cam_simq_free(devq); 97 CAMLOCK_2_ISPLOCK(isp); 98 return; 99 } 100 CAMLOCK_2_ISPLOCK(isp); 101 102 isp->isp_osinfo.ehook.ich_func = isp_intr_enable; 103 isp->isp_osinfo.ehook.ich_arg = isp; 104 ISPLOCK_2_CAMLOCK(isp); 105 if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) { 106 cam_sim_free(sim, TRUE); 107 CAMLOCK_2_ISPLOCK(isp); 108 isp_prt(isp, ISP_LOGERR, 109 "could not establish interrupt enable hook"); 110 return; 111 } 112 113 if (xpt_bus_register(sim, primary) != CAM_SUCCESS) { 114 cam_sim_free(sim, TRUE); 115 CAMLOCK_2_ISPLOCK(isp); 116 return; 117 } 118 119 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 120 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 121 xpt_bus_deregister(cam_sim_path(sim)); 122 cam_sim_free(sim, TRUE); 123 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 124 CAMLOCK_2_ISPLOCK(isp); 125 return; 126 } 127 128 xpt_setup_ccb(&csa.ccb_h, path, 5); 129 csa.ccb_h.func_code = XPT_SASYNC_CB; 130 csa.event_enable = AC_LOST_DEVICE; 131 csa.callback = isp_cam_async; 132 csa.callback_arg = sim; 133 xpt_action((union ccb *)&csa); 134 CAMLOCK_2_ISPLOCK(isp); 135 isp->isp_sim = sim; 136 isp->isp_path = path; 137 /* 138 * Create a kernel thread for fibre channel instances. We 139 * don't have dual channel FC cards. 140 */ 141 if (IS_FC(isp)) { 142 ISPLOCK_2_CAMLOCK(isp); 143 /* XXX: LOCK VIOLATION */ 144 cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv"); 145 if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc, 146 RFHIGHPID, 0, "%s: fc_thrd", 147 device_get_nameunit(isp->isp_dev))) { 148 xpt_bus_deregister(cam_sim_path(sim)); 149 cam_sim_free(sim, TRUE); 150 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 151 CAMLOCK_2_ISPLOCK(isp); 152 isp_prt(isp, ISP_LOGERR, "could not create kthread"); 153 return; 154 } 155 CAMLOCK_2_ISPLOCK(isp); 156 } 157 158 159 /* 160 * If we have a second channel, construct SIM entry for that. 161 */ 162 if (IS_DUALBUS(isp)) { 163 ISPLOCK_2_CAMLOCK(isp); 164 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 165 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); 166 if (sim == NULL) { 167 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 168 xpt_free_path(isp->isp_path); 169 cam_simq_free(devq); 170 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 171 return; 172 } 173 if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) { 174 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 175 xpt_free_path(isp->isp_path); 176 cam_sim_free(sim, TRUE); 177 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 178 CAMLOCK_2_ISPLOCK(isp); 179 return; 180 } 181 182 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 183 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 184 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 185 xpt_free_path(isp->isp_path); 186 xpt_bus_deregister(cam_sim_path(sim)); 187 cam_sim_free(sim, TRUE); 188 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 189 CAMLOCK_2_ISPLOCK(isp); 190 return; 191 } 192 193 xpt_setup_ccb(&csa.ccb_h, path, 5); 194 csa.ccb_h.func_code = XPT_SASYNC_CB; 195 csa.event_enable = AC_LOST_DEVICE; 196 csa.callback = isp_cam_async; 197 csa.callback_arg = sim; 198 xpt_action((union ccb *)&csa); 199 CAMLOCK_2_ISPLOCK(isp); 200 isp->isp_sim2 = sim; 201 isp->isp_path2 = path; 202 } 203 204 /* 205 * Create device nodes 206 */ 207 (void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT, 208 GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev)); 209 210 if (isp->isp_role != ISP_ROLE_NONE) { 211 isp->isp_state = ISP_RUNSTATE; 212 ENABLE_INTS(isp); 213 } 214 if (isplist == NULL) { 215 isplist = isp; 216 } else { 217 struct ispsoftc *tmp = isplist; 218 while (tmp->isp_osinfo.next) { 219 tmp = tmp->isp_osinfo.next; 220 } 221 tmp->isp_osinfo.next = isp; 222 } 223 224 } 225 226 static __inline void 227 isp_freeze_loopdown(struct ispsoftc *isp, char *msg) 228 { 229 if (isp->isp_osinfo.simqfrozen == 0) { 230 isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg); 231 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 232 ISPLOCK_2_CAMLOCK(isp); 233 xpt_freeze_simq(isp->isp_sim, 1); 234 CAMLOCK_2_ISPLOCK(isp); 235 } else { 236 isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg); 237 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 238 } 239 } 240 241 static int 242 ispioctl(struct cdev *dev, u_long c, caddr_t addr, int flags, struct thread *td) 243 { 244 struct ispsoftc *isp; 245 int nr, retval = ENOTTY; 246 247 isp = isplist; 248 while (isp) { 249 if (minor(dev) == device_get_unit(isp->isp_dev)) { 250 break; 251 } 252 isp = isp->isp_osinfo.next; 253 } 254 if (isp == NULL) 255 return (ENXIO); 256 257 switch (c) { 258 #ifdef ISP_FW_CRASH_DUMP 259 case ISP_GET_FW_CRASH_DUMP: 260 { 261 uint16_t *ptr = FCPARAM(isp)->isp_dump_data; 262 size_t sz; 263 264 retval = 0; 265 if (IS_2200(isp)) 266 sz = QLA2200_RISC_IMAGE_DUMP_SIZE; 267 else 268 sz = QLA2300_RISC_IMAGE_DUMP_SIZE; 269 ISP_LOCK(isp); 270 if (ptr && *ptr) { 271 void *uaddr = *((void **) addr); 272 if (copyout(ptr, uaddr, sz)) { 273 retval = EFAULT; 274 } else { 275 *ptr = 0; 276 } 277 } else { 278 retval = ENXIO; 279 } 280 ISP_UNLOCK(isp); 281 break; 282 } 283 284 case ISP_FORCE_CRASH_DUMP: 285 ISP_LOCK(isp); 286 isp_freeze_loopdown(isp, "ispioctl(ISP_FORCE_CRASH_DUMP)"); 287 isp_fw_dump(isp); 288 isp_reinit(isp); 289 ISP_UNLOCK(isp); 290 retval = 0; 291 break; 292 #endif 293 case ISP_SDBLEV: 294 { 295 int olddblev = isp->isp_dblev; 296 isp->isp_dblev = *(int *)addr; 297 *(int *)addr = olddblev; 298 retval = 0; 299 break; 300 } 301 case ISP_GETROLE: 302 *(int *)addr = isp->isp_role; 303 retval = 0; 304 break; 305 case ISP_SETROLE: 306 nr = *(int *)addr; 307 if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) { 308 retval = EINVAL; 309 break; 310 } 311 *(int *)addr = isp->isp_role; 312 isp->isp_role = nr; 313 /* FALLTHROUGH */ 314 case ISP_RESETHBA: 315 ISP_LOCK(isp); 316 isp_reinit(isp); 317 ISP_UNLOCK(isp); 318 retval = 0; 319 break; 320 case ISP_RESCAN: 321 if (IS_FC(isp)) { 322 ISP_LOCK(isp); 323 if (isp_fc_runstate(isp, 5 * 1000000)) { 324 retval = EIO; 325 } else { 326 retval = 0; 327 } 328 ISP_UNLOCK(isp); 329 } 330 break; 331 case ISP_FC_LIP: 332 if (IS_FC(isp)) { 333 ISP_LOCK(isp); 334 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) { 335 retval = EIO; 336 } else { 337 retval = 0; 338 } 339 ISP_UNLOCK(isp); 340 } 341 break; 342 case ISP_FC_GETDINFO: 343 { 344 struct isp_fc_device *ifc = (struct isp_fc_device *) addr; 345 struct lportdb *lp; 346 347 if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) { 348 retval = EINVAL; 349 break; 350 } 351 ISP_LOCK(isp); 352 lp = &FCPARAM(isp)->portdb[ifc->loopid]; 353 if (lp->valid) { 354 ifc->role = lp->roles; 355 ifc->loopid = lp->loopid; 356 ifc->portid = lp->portid; 357 ifc->node_wwn = lp->node_wwn; 358 ifc->port_wwn = lp->port_wwn; 359 retval = 0; 360 } else { 361 retval = ENODEV; 362 } 363 ISP_UNLOCK(isp); 364 break; 365 } 366 case ISP_GET_STATS: 367 { 368 isp_stats_t *sp = (isp_stats_t *) addr; 369 370 MEMZERO(sp, sizeof (*sp)); 371 sp->isp_stat_version = ISP_STATS_VERSION; 372 sp->isp_type = isp->isp_type; 373 sp->isp_revision = isp->isp_revision; 374 ISP_LOCK(isp); 375 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt; 376 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus; 377 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc; 378 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync; 379 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt; 380 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt; 381 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater; 382 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater; 383 ISP_UNLOCK(isp); 384 retval = 0; 385 break; 386 } 387 case ISP_CLR_STATS: 388 ISP_LOCK(isp); 389 isp->isp_intcnt = 0; 390 isp->isp_intbogus = 0; 391 isp->isp_intmboxc = 0; 392 isp->isp_intoasync = 0; 393 isp->isp_rsltccmplt = 0; 394 isp->isp_fphccmplt = 0; 395 isp->isp_rscchiwater = 0; 396 isp->isp_fpcchiwater = 0; 397 ISP_UNLOCK(isp); 398 retval = 0; 399 break; 400 case ISP_FC_GETHINFO: 401 { 402 struct isp_hba_device *hba = (struct isp_hba_device *) addr; 403 MEMZERO(hba, sizeof (*hba)); 404 ISP_LOCK(isp); 405 hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev); 406 hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev); 407 hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev); 408 hba->fc_speed = FCPARAM(isp)->isp_gbspeed; 409 hba->fc_scsi_supported = 1; 410 hba->fc_topology = FCPARAM(isp)->isp_topo + 1; 411 hba->fc_loopid = FCPARAM(isp)->isp_loopid; 412 hba->nvram_node_wwn = FCPARAM(isp)->isp_nodewwn; 413 hba->nvram_port_wwn = FCPARAM(isp)->isp_portwwn; 414 hba->active_node_wwn = ISP_NODEWWN(isp); 415 hba->active_port_wwn = ISP_PORTWWN(isp); 416 ISP_UNLOCK(isp); 417 retval = 0; 418 break; 419 } 420 case ISP_GET_FC_PARAM: 421 { 422 struct isp_fc_param *f = (struct isp_fc_param *) addr; 423 424 if (!IS_FC(isp)) { 425 retval = EINVAL; 426 break; 427 } 428 f->parameter = 0; 429 if (strcmp(f->param_name, "framelength") == 0) { 430 f->parameter = FCPARAM(isp)->isp_maxfrmlen; 431 retval = 0; 432 break; 433 } 434 if (strcmp(f->param_name, "exec_throttle") == 0) { 435 f->parameter = FCPARAM(isp)->isp_execthrottle; 436 retval = 0; 437 break; 438 } 439 if (strcmp(f->param_name, "fullduplex") == 0) { 440 if (FCPARAM(isp)->isp_fwoptions & ICBOPT_FULL_DUPLEX) 441 f->parameter = 1; 442 retval = 0; 443 break; 444 } 445 if (strcmp(f->param_name, "loopid") == 0) { 446 f->parameter = FCPARAM(isp)->isp_loopid; 447 retval = 0; 448 break; 449 } 450 retval = EINVAL; 451 break; 452 } 453 case ISP_SET_FC_PARAM: 454 { 455 struct isp_fc_param *f = (struct isp_fc_param *) addr; 456 uint32_t param = f->parameter; 457 458 if (!IS_FC(isp)) { 459 retval = EINVAL; 460 break; 461 } 462 f->parameter = 0; 463 if (strcmp(f->param_name, "framelength") == 0) { 464 if (param != 512 && param != 1024 && param != 1024) { 465 retval = EINVAL; 466 break; 467 } 468 FCPARAM(isp)->isp_maxfrmlen = param; 469 retval = 0; 470 break; 471 } 472 if (strcmp(f->param_name, "exec_throttle") == 0) { 473 if (param < 16 || param > 255) { 474 retval = EINVAL; 475 break; 476 } 477 FCPARAM(isp)->isp_execthrottle = param; 478 retval = 0; 479 break; 480 } 481 if (strcmp(f->param_name, "fullduplex") == 0) { 482 if (param != 0 && param != 1) { 483 retval = EINVAL; 484 break; 485 } 486 if (param) { 487 FCPARAM(isp)->isp_fwoptions |= 488 ICBOPT_FULL_DUPLEX; 489 } else { 490 FCPARAM(isp)->isp_fwoptions &= 491 ~ICBOPT_FULL_DUPLEX; 492 } 493 retval = 0; 494 break; 495 } 496 if (strcmp(f->param_name, "loopid") == 0) { 497 if (param < 0 || param > 125) { 498 retval = EINVAL; 499 break; 500 } 501 FCPARAM(isp)->isp_loopid = param; 502 retval = 0; 503 break; 504 } 505 retval = EINVAL; 506 break; 507 } 508 case ISP_TSK_MGMT: 509 { 510 int needmarker; 511 struct isp_fc_tsk_mgmt *fct = (struct isp_fc_tsk_mgmt *) addr; 512 uint16_t loopid; 513 mbreg_t mbs; 514 515 if (IS_SCSI(isp)) { 516 retval = EINVAL; 517 break; 518 } 519 520 memset(&mbs, 0, sizeof (mbs)); 521 needmarker = retval = 0; 522 loopid = fct->loopid; 523 if (IS_2KLOGIN(isp) == 0) { 524 loopid <<= 8; 525 } 526 switch (fct->action) { 527 case CLEAR_ACA: 528 mbs.param[0] = MBOX_CLEAR_ACA; 529 mbs.param[1] = loopid; 530 mbs.param[2] = fct->lun; 531 break; 532 case TARGET_RESET: 533 mbs.param[0] = MBOX_TARGET_RESET; 534 mbs.param[1] = loopid; 535 needmarker = 1; 536 break; 537 case LUN_RESET: 538 mbs.param[0] = MBOX_LUN_RESET; 539 mbs.param[1] = loopid; 540 mbs.param[2] = fct->lun; 541 needmarker = 1; 542 break; 543 case CLEAR_TASK_SET: 544 mbs.param[0] = MBOX_CLEAR_TASK_SET; 545 mbs.param[1] = loopid; 546 mbs.param[2] = fct->lun; 547 needmarker = 1; 548 break; 549 case ABORT_TASK_SET: 550 mbs.param[0] = MBOX_ABORT_TASK_SET; 551 mbs.param[1] = loopid; 552 mbs.param[2] = fct->lun; 553 needmarker = 1; 554 break; 555 default: 556 retval = EINVAL; 557 break; 558 } 559 if (retval == 0) { 560 ISP_LOCK(isp); 561 if (needmarker) { 562 isp->isp_sendmarker |= 1; 563 } 564 retval = isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs); 565 ISP_UNLOCK(isp); 566 if (retval) 567 retval = EIO; 568 } 569 break; 570 } 571 default: 572 break; 573 } 574 return (retval); 575 } 576 577 static void 578 isp_intr_enable(void *arg) 579 { 580 struct ispsoftc *isp = arg; 581 if (isp->isp_role != ISP_ROLE_NONE) { 582 ENABLE_INTS(isp); 583 #if 0 584 isp->isp_osinfo.intsok = 1; 585 #endif 586 } 587 /* Release our hook so that the boot can continue. */ 588 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 589 } 590 591 /* 592 * Put the target mode functions here, because some are inlines 593 */ 594 595 #ifdef ISP_TARGET_MODE 596 597 static __inline int is_lun_enabled(struct ispsoftc *, int, lun_id_t); 598 static __inline int are_any_luns_enabled(struct ispsoftc *, int); 599 static __inline tstate_t *get_lun_statep(struct ispsoftc *, int, lun_id_t); 600 static __inline void rls_lun_statep(struct ispsoftc *, tstate_t *); 601 static __inline atio_private_data_t *isp_get_atpd(struct ispsoftc *, int); 602 static cam_status 603 create_lun_state(struct ispsoftc *, int, struct cam_path *, tstate_t **); 604 static void destroy_lun_state(struct ispsoftc *, tstate_t *); 605 static int isp_en_lun(struct ispsoftc *, union ccb *); 606 static void isp_ledone(struct ispsoftc *, lun_entry_t *); 607 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *); 608 static timeout_t isp_refire_putback_atio; 609 static void isp_complete_ctio(union ccb *); 610 static void isp_target_putback_atio(union ccb *); 611 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *); 612 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *); 613 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *); 614 static int isp_handle_platform_ctio(struct ispsoftc *, void *); 615 static int isp_handle_platform_notify_scsi(struct ispsoftc *, in_entry_t *); 616 static int isp_handle_platform_notify_fc(struct ispsoftc *, in_fcentry_t *); 617 618 static __inline int 619 is_lun_enabled(struct ispsoftc *isp, int bus, lun_id_t lun) 620 { 621 tstate_t *tptr; 622 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 623 if (tptr == NULL) { 624 return (0); 625 } 626 do { 627 if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) { 628 return (1); 629 } 630 } while ((tptr = tptr->next) != NULL); 631 return (0); 632 } 633 634 static __inline int 635 are_any_luns_enabled(struct ispsoftc *isp, int port) 636 { 637 int lo, hi; 638 if (IS_DUALBUS(isp)) { 639 lo = (port * (LUN_HASH_SIZE >> 1)); 640 hi = lo + (LUN_HASH_SIZE >> 1); 641 } else { 642 lo = 0; 643 hi = LUN_HASH_SIZE; 644 } 645 for (lo = 0; lo < hi; lo++) { 646 if (isp->isp_osinfo.lun_hash[lo]) { 647 return (1); 648 } 649 } 650 return (0); 651 } 652 653 static __inline tstate_t * 654 get_lun_statep(struct ispsoftc *isp, int bus, lun_id_t lun) 655 { 656 tstate_t *tptr = NULL; 657 658 if (lun == CAM_LUN_WILDCARD) { 659 if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) { 660 tptr = &isp->isp_osinfo.tsdflt[bus]; 661 tptr->hold++; 662 return (tptr); 663 } 664 return (NULL); 665 } else { 666 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 667 if (tptr == NULL) { 668 return (NULL); 669 } 670 } 671 672 do { 673 if (tptr->lun == lun && tptr->bus == bus) { 674 tptr->hold++; 675 return (tptr); 676 } 677 } while ((tptr = tptr->next) != NULL); 678 return (tptr); 679 } 680 681 static __inline void 682 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr) 683 { 684 if (tptr->hold) 685 tptr->hold--; 686 } 687 688 static __inline atio_private_data_t * 689 isp_get_atpd(struct ispsoftc *isp, int tag) 690 { 691 atio_private_data_t *atp; 692 for (atp = isp->isp_osinfo.atpdp; 693 atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) { 694 if (atp->tag == tag) 695 return (atp); 696 } 697 return (NULL); 698 } 699 700 static cam_status 701 create_lun_state(struct ispsoftc *isp, int bus, 702 struct cam_path *path, tstate_t **rslt) 703 { 704 cam_status status; 705 lun_id_t lun; 706 int hfx; 707 tstate_t *tptr, *new; 708 709 lun = xpt_path_lun_id(path); 710 if (lun < 0) { 711 return (CAM_LUN_INVALID); 712 } 713 if (is_lun_enabled(isp, bus, lun)) { 714 return (CAM_LUN_ALRDY_ENA); 715 } 716 new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO); 717 if (new == NULL) { 718 return (CAM_RESRC_UNAVAIL); 719 } 720 721 status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path), 722 xpt_path_target_id(path), xpt_path_lun_id(path)); 723 if (status != CAM_REQ_CMP) { 724 free(new, M_DEVBUF); 725 return (status); 726 } 727 new->bus = bus; 728 new->lun = lun; 729 SLIST_INIT(&new->atios); 730 SLIST_INIT(&new->inots); 731 new->hold = 1; 732 733 hfx = LUN_HASH_FUNC(isp, new->bus, new->lun); 734 tptr = isp->isp_osinfo.lun_hash[hfx]; 735 if (tptr == NULL) { 736 isp->isp_osinfo.lun_hash[hfx] = new; 737 } else { 738 while (tptr->next) 739 tptr = tptr->next; 740 tptr->next = new; 741 } 742 *rslt = new; 743 return (CAM_REQ_CMP); 744 } 745 746 static __inline void 747 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr) 748 { 749 int hfx; 750 tstate_t *lw, *pw; 751 752 if (tptr->hold) { 753 return; 754 } 755 hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun); 756 pw = isp->isp_osinfo.lun_hash[hfx]; 757 if (pw == NULL) { 758 return; 759 } else if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 760 isp->isp_osinfo.lun_hash[hfx] = pw->next; 761 } else { 762 lw = pw; 763 pw = lw->next; 764 while (pw) { 765 if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 766 lw->next = pw->next; 767 break; 768 } 769 lw = pw; 770 pw = pw->next; 771 } 772 if (pw == NULL) { 773 return; 774 } 775 } 776 free(tptr, M_DEVBUF); 777 } 778 779 /* 780 * Enable luns. 781 */ 782 static int 783 isp_en_lun(struct ispsoftc *isp, union ccb *ccb) 784 { 785 struct ccb_en_lun *cel = &ccb->cel; 786 tstate_t *tptr; 787 uint32_t seq; 788 int bus, cmd, av, wildcard, tm_on; 789 lun_id_t lun; 790 target_id_t tgt; 791 792 bus = XS_CHANNEL(ccb); 793 if (bus > 1) { 794 xpt_print_path(ccb->ccb_h.path); 795 printf("illegal bus %d\n", bus); 796 ccb->ccb_h.status = CAM_PATH_INVALID; 797 return (-1); 798 } 799 tgt = ccb->ccb_h.target_id; 800 lun = ccb->ccb_h.target_lun; 801 802 isp_prt(isp, ISP_LOGTDEBUG0, 803 "isp_en_lun: %sabling lun 0x%x on channel %d", 804 cel->enable? "en" : "dis", lun, bus); 805 806 807 if ((lun != CAM_LUN_WILDCARD) && 808 (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) { 809 ccb->ccb_h.status = CAM_LUN_INVALID; 810 return (-1); 811 } 812 813 if (IS_SCSI(isp)) { 814 sdparam *sdp = isp->isp_param; 815 sdp += bus; 816 if (tgt != CAM_TARGET_WILDCARD && 817 tgt != sdp->isp_initiator_id) { 818 ccb->ccb_h.status = CAM_TID_INVALID; 819 return (-1); 820 } 821 } else { 822 /* 823 * There's really no point in doing this yet w/o multi-tid 824 * capability. Even then, it's problematic. 825 */ 826 #if 0 827 if (tgt != CAM_TARGET_WILDCARD && 828 tgt != FCPARAM(isp)->isp_iid) { 829 ccb->ccb_h.status = CAM_TID_INVALID; 830 return (-1); 831 } 832 #endif 833 /* 834 * This is as a good a place as any to check f/w capabilities. 835 */ 836 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_TMODE) == 0) { 837 isp_prt(isp, ISP_LOGERR, 838 "firmware does not support target mode"); 839 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 840 return (-1); 841 } 842 /* 843 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to 844 * XXX: dorks with our already fragile enable/disable code. 845 */ 846 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) { 847 isp_prt(isp, ISP_LOGERR, 848 "firmware not SCCLUN capable"); 849 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 850 return (-1); 851 } 852 } 853 854 if (tgt == CAM_TARGET_WILDCARD) { 855 if (lun == CAM_LUN_WILDCARD) { 856 wildcard = 1; 857 } else { 858 ccb->ccb_h.status = CAM_LUN_INVALID; 859 return (-1); 860 } 861 } else { 862 wildcard = 0; 863 } 864 865 tm_on = (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) != 0; 866 867 /* 868 * Next check to see whether this is a target/lun wildcard action. 869 * 870 * If so, we know that we can accept commands for luns that haven't 871 * been enabled yet and send them upstream. Otherwise, we have to 872 * handle them locally (if we see them at all). 873 */ 874 875 if (wildcard) { 876 tptr = &isp->isp_osinfo.tsdflt[bus]; 877 if (cel->enable) { 878 if (tm_on) { 879 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 880 return (-1); 881 } 882 ccb->ccb_h.status = 883 xpt_create_path(&tptr->owner, NULL, 884 xpt_path_path_id(ccb->ccb_h.path), 885 xpt_path_target_id(ccb->ccb_h.path), 886 xpt_path_lun_id(ccb->ccb_h.path)); 887 if (ccb->ccb_h.status != CAM_REQ_CMP) { 888 return (-1); 889 } 890 SLIST_INIT(&tptr->atios); 891 SLIST_INIT(&tptr->inots); 892 isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED; 893 } else { 894 if (tm_on == 0) { 895 ccb->ccb_h.status = CAM_REQ_CMP; 896 return (-1); 897 } 898 if (tptr->hold) { 899 ccb->ccb_h.status = CAM_SCSI_BUSY; 900 return (-1); 901 } 902 xpt_free_path(tptr->owner); 903 isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED; 904 } 905 } 906 907 /* 908 * Now check to see whether this bus needs to be 909 * enabled/disabled with respect to target mode. 910 */ 911 av = bus << 31; 912 if (cel->enable && tm_on == 0) { 913 av |= ENABLE_TARGET_FLAG; 914 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 915 if (av) { 916 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 917 if (wildcard) { 918 isp->isp_osinfo.tmflags[bus] &= 919 ~TM_WILDCARD_ENABLED; 920 xpt_free_path(tptr->owner); 921 } 922 return (-1); 923 } 924 isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED; 925 isp_prt(isp, ISP_LOGINFO, 926 "Target Mode enabled on channel %d", bus); 927 } else if (cel->enable == 0 && tm_on && wildcard) { 928 if (are_any_luns_enabled(isp, bus)) { 929 ccb->ccb_h.status = CAM_SCSI_BUSY; 930 return (-1); 931 } 932 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 933 if (av) { 934 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 935 return (-1); 936 } 937 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; 938 isp_prt(isp, ISP_LOGINFO, 939 "Target Mode disabled on channel %d", bus); 940 } 941 942 if (wildcard) { 943 ccb->ccb_h.status = CAM_REQ_CMP; 944 return (-1); 945 } 946 947 /* 948 * Find an empty slot 949 */ 950 for (seq = 0; seq < NLEACT; seq++) { 951 if (isp->isp_osinfo.leact[seq] == 0) { 952 break; 953 } 954 } 955 if (seq >= NLEACT) { 956 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 957 return (-1); 958 959 } 960 isp->isp_osinfo.leact[seq] = ccb; 961 962 if (cel->enable) { 963 ccb->ccb_h.status = 964 create_lun_state(isp, bus, ccb->ccb_h.path, &tptr); 965 if (ccb->ccb_h.status != CAM_REQ_CMP) { 966 isp->isp_osinfo.leact[seq] = 0; 967 return (-1); 968 } 969 } else { 970 tptr = get_lun_statep(isp, bus, lun); 971 if (tptr == NULL) { 972 ccb->ccb_h.status = CAM_LUN_INVALID; 973 return (-1); 974 } 975 } 976 977 if (cel->enable) { 978 int c, n, ulun = lun; 979 980 cmd = RQSTYPE_ENABLE_LUN; 981 c = DFLT_CMND_CNT; 982 n = DFLT_INOT_CNT; 983 if (IS_FC(isp) && lun != 0) { 984 cmd = RQSTYPE_MODIFY_LUN; 985 n = 0; 986 /* 987 * For SCC firmware, we only deal with setting 988 * (enabling or modifying) lun 0. 989 */ 990 ulun = 0; 991 } 992 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) { 993 rls_lun_statep(isp, tptr); 994 ccb->ccb_h.status = CAM_REQ_INPROG; 995 return (seq); 996 } 997 } else { 998 int c, n, ulun = lun; 999 1000 cmd = -RQSTYPE_MODIFY_LUN; 1001 c = DFLT_CMND_CNT; 1002 n = DFLT_INOT_CNT; 1003 if (IS_FC(isp) && lun != 0) { 1004 n = 0; 1005 /* 1006 * For SCC firmware, we only deal with setting 1007 * (enabling or modifying) lun 0. 1008 */ 1009 ulun = 0; 1010 } 1011 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) { 1012 rls_lun_statep(isp, tptr); 1013 ccb->ccb_h.status = CAM_REQ_INPROG; 1014 return (seq); 1015 } 1016 } 1017 rls_lun_statep(isp, tptr); 1018 xpt_print_path(ccb->ccb_h.path); 1019 printf("isp_lun_cmd failed\n"); 1020 isp->isp_osinfo.leact[seq] = 0; 1021 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1022 return (-1); 1023 } 1024 1025 static void 1026 isp_ledone(struct ispsoftc *isp, lun_entry_t *lep) 1027 { 1028 const char lfmt[] = "lun %d now %sabled for target mode on channel %d"; 1029 union ccb *ccb; 1030 uint32_t seq; 1031 tstate_t *tptr; 1032 int av; 1033 struct ccb_en_lun *cel; 1034 1035 seq = lep->le_reserved - 1; 1036 if (seq >= NLEACT) { 1037 isp_prt(isp, ISP_LOGERR, 1038 "seq out of range (%u) in isp_ledone", seq); 1039 return; 1040 } 1041 ccb = isp->isp_osinfo.leact[seq]; 1042 if (ccb == 0) { 1043 isp_prt(isp, ISP_LOGERR, 1044 "no ccb for seq %u in isp_ledone", seq); 1045 return; 1046 } 1047 cel = &ccb->cel; 1048 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), XS_LUN(ccb)); 1049 if (tptr == NULL) { 1050 xpt_print_path(ccb->ccb_h.path); 1051 printf("null tptr in isp_ledone\n"); 1052 isp->isp_osinfo.leact[seq] = 0; 1053 return; 1054 } 1055 1056 if (lep->le_status != LUN_OK) { 1057 xpt_print_path(ccb->ccb_h.path); 1058 printf("ENABLE/MODIFY LUN returned 0x%x\n", lep->le_status); 1059 err: 1060 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1061 xpt_print_path(ccb->ccb_h.path); 1062 rls_lun_statep(isp, tptr); 1063 isp->isp_osinfo.leact[seq] = 0; 1064 ISPLOCK_2_CAMLOCK(isp); 1065 xpt_done(ccb); 1066 CAMLOCK_2_ISPLOCK(isp); 1067 return; 1068 } else { 1069 isp_prt(isp, ISP_LOGTDEBUG0, 1070 "isp_ledone: ENABLE/MODIFY done okay"); 1071 } 1072 1073 1074 if (cel->enable) { 1075 ccb->ccb_h.status = CAM_REQ_CMP; 1076 isp_prt(isp, /* ISP_LOGINFO */ ISP_LOGALL, lfmt, 1077 XS_LUN(ccb), "en", XS_CHANNEL(ccb)); 1078 rls_lun_statep(isp, tptr); 1079 isp->isp_osinfo.leact[seq] = 0; 1080 ISPLOCK_2_CAMLOCK(isp); 1081 xpt_done(ccb); 1082 CAMLOCK_2_ISPLOCK(isp); 1083 return; 1084 } 1085 1086 if (lep->le_header.rqs_entry_type == RQSTYPE_MODIFY_LUN) { 1087 if (isp_lun_cmd(isp, -RQSTYPE_ENABLE_LUN, XS_CHANNEL(ccb), 1088 XS_TGT(ccb), XS_LUN(ccb), 0, 0, seq+1)) { 1089 xpt_print_path(ccb->ccb_h.path); 1090 printf("isp_ledone: isp_lun_cmd failed\n"); 1091 goto err; 1092 } 1093 rls_lun_statep(isp, tptr); 1094 return; 1095 } 1096 1097 isp_prt(isp, ISP_LOGINFO, lfmt, XS_LUN(ccb), "dis", XS_CHANNEL(ccb)); 1098 rls_lun_statep(isp, tptr); 1099 destroy_lun_state(isp, tptr); 1100 ccb->ccb_h.status = CAM_REQ_CMP; 1101 isp->isp_osinfo.leact[seq] = 0; 1102 ISPLOCK_2_CAMLOCK(isp); 1103 xpt_done(ccb); 1104 CAMLOCK_2_ISPLOCK(isp); 1105 if (are_any_luns_enabled(isp, XS_CHANNEL(ccb)) == 0) { 1106 int bus = XS_CHANNEL(ccb); 1107 av = bus << 31; 1108 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 1109 if (av) { 1110 isp_prt(isp, ISP_LOGWARN, 1111 "disable target mode on channel %d failed", bus); 1112 } else { 1113 isp_prt(isp, ISP_LOGINFO, 1114 "Target Mode disabled on channel %d", bus); 1115 } 1116 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; 1117 } 1118 } 1119 1120 1121 static cam_status 1122 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb) 1123 { 1124 tstate_t *tptr; 1125 struct ccb_hdr_slist *lp; 1126 struct ccb_hdr *curelm; 1127 int found, *ctr; 1128 union ccb *accb = ccb->cab.abort_ccb; 1129 1130 isp_prt(isp, ISP_LOGTDEBUG0, "aborting ccb %p", accb); 1131 if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 1132 int badpath = 0; 1133 if (IS_FC(isp) && (accb->ccb_h.target_id != 1134 ((fcparam *) isp->isp_param)->isp_loopid)) { 1135 badpath = 1; 1136 } else if (IS_SCSI(isp) && (accb->ccb_h.target_id != 1137 ((sdparam *) isp->isp_param)->isp_initiator_id)) { 1138 badpath = 1; 1139 } 1140 if (badpath) { 1141 /* 1142 * Being restrictive about target ids is really about 1143 * making sure we're aborting for the right multi-tid 1144 * path. This doesn't really make much sense at present. 1145 */ 1146 #if 0 1147 return (CAM_PATH_INVALID); 1148 #endif 1149 } 1150 } 1151 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun); 1152 if (tptr == NULL) { 1153 isp_prt(isp, ISP_LOGTDEBUG0, 1154 "isp_abort_tgt_ccb: can't get statep"); 1155 return (CAM_PATH_INVALID); 1156 } 1157 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 1158 lp = &tptr->atios; 1159 ctr = &tptr->atio_count; 1160 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 1161 lp = &tptr->inots; 1162 ctr = &tptr->inot_count; 1163 } else { 1164 rls_lun_statep(isp, tptr); 1165 isp_prt(isp, ISP_LOGTDEBUG0, 1166 "isp_abort_tgt_ccb: bad func %d\n", accb->ccb_h.func_code); 1167 return (CAM_UA_ABORT); 1168 } 1169 curelm = SLIST_FIRST(lp); 1170 found = 0; 1171 if (curelm == &accb->ccb_h) { 1172 found = 1; 1173 SLIST_REMOVE_HEAD(lp, sim_links.sle); 1174 } else { 1175 while(curelm != NULL) { 1176 struct ccb_hdr *nextelm; 1177 1178 nextelm = SLIST_NEXT(curelm, sim_links.sle); 1179 if (nextelm == &accb->ccb_h) { 1180 found = 1; 1181 SLIST_NEXT(curelm, sim_links.sle) = 1182 SLIST_NEXT(nextelm, sim_links.sle); 1183 break; 1184 } 1185 curelm = nextelm; 1186 } 1187 } 1188 rls_lun_statep(isp, tptr); 1189 if (found) { 1190 (*ctr)--; 1191 accb->ccb_h.status = CAM_REQ_ABORTED; 1192 xpt_done(accb); 1193 return (CAM_REQ_CMP); 1194 } 1195 isp_prt(isp, ISP_LOGTDEBUG0, 1196 "isp_abort_tgt_ccb: CCB %p not found\n", ccb); 1197 return (CAM_PATH_INVALID); 1198 } 1199 1200 static cam_status 1201 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb) 1202 { 1203 void *qe; 1204 struct ccb_scsiio *cso = &ccb->csio; 1205 uint16_t *hp, save_handle; 1206 uint16_t nxti, optr; 1207 uint8_t local[QENTRY_LEN]; 1208 1209 1210 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 1211 xpt_print_path(ccb->ccb_h.path); 1212 printf("Request Queue Overflow in isp_target_start_ctio\n"); 1213 return (CAM_RESRC_UNAVAIL); 1214 } 1215 bzero(local, QENTRY_LEN); 1216 1217 /* 1218 * We're either moving data or completing a command here. 1219 */ 1220 1221 if (IS_FC(isp)) { 1222 atio_private_data_t *atp; 1223 ct2_entry_t *cto = (ct2_entry_t *) local; 1224 1225 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; 1226 cto->ct_header.rqs_entry_count = 1; 1227 cto->ct_iid = cso->init_id; 1228 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) { 1229 cto->ct_lun = ccb->ccb_h.target_lun; 1230 } 1231 1232 atp = isp_get_atpd(isp, cso->tag_id); 1233 if (atp == NULL) { 1234 isp_prt(isp, ISP_LOGERR, 1235 "cannot find private data adjunct for tag %x", 1236 cso->tag_id); 1237 return (-1); 1238 } 1239 1240 cto->ct_rxid = cso->tag_id; 1241 if (cso->dxfer_len == 0) { 1242 cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA; 1243 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1244 cto->ct_flags |= CT2_SENDSTATUS; 1245 cto->rsp.m1.ct_scsi_status = cso->scsi_status; 1246 cto->ct_resid = 1247 atp->orig_datalen - atp->bytes_xfered; 1248 if (cto->ct_resid < 0) { 1249 cto->rsp.m1.ct_scsi_status |= 1250 CT2_DATA_OVER; 1251 } else if (cto->ct_resid > 0) { 1252 cto->rsp.m1.ct_scsi_status |= 1253 CT2_DATA_UNDER; 1254 } 1255 } 1256 if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) { 1257 int m = min(cso->sense_len, MAXRESPLEN); 1258 bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m); 1259 cto->rsp.m1.ct_senselen = m; 1260 cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID; 1261 } 1262 } else { 1263 cto->ct_flags |= CT2_FLAG_MODE0; 1264 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1265 cto->ct_flags |= CT2_DATA_IN; 1266 } else { 1267 cto->ct_flags |= CT2_DATA_OUT; 1268 } 1269 cto->ct_reloff = atp->bytes_xfered; 1270 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 1271 cto->ct_flags |= CT2_SENDSTATUS; 1272 cto->rsp.m0.ct_scsi_status = cso->scsi_status; 1273 cto->ct_resid = 1274 atp->orig_datalen - 1275 (atp->bytes_xfered + cso->dxfer_len); 1276 if (cto->ct_resid < 0) { 1277 cto->rsp.m0.ct_scsi_status |= 1278 CT2_DATA_OVER; 1279 } else if (cto->ct_resid > 0) { 1280 cto->rsp.m0.ct_scsi_status |= 1281 CT2_DATA_UNDER; 1282 } 1283 } else { 1284 atp->last_xframt = cso->dxfer_len; 1285 } 1286 /* 1287 * If we're sending data and status back together, 1288 * we can't also send back sense data as well. 1289 */ 1290 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1291 } 1292 1293 if (cto->ct_flags & CT2_SENDSTATUS) { 1294 isp_prt(isp, ISP_LOGTDEBUG0, 1295 "CTIO2[%x] STATUS %x origd %u curd %u resid %u", 1296 cto->ct_rxid, cso->scsi_status, atp->orig_datalen, 1297 cso->dxfer_len, cto->ct_resid); 1298 cto->ct_flags |= CT2_CCINCR; 1299 atp->state = ATPD_STATE_LAST_CTIO; 1300 } else 1301 atp->state = ATPD_STATE_CTIO; 1302 cto->ct_timeout = 10; 1303 hp = &cto->ct_syshandle; 1304 } else { 1305 ct_entry_t *cto = (ct_entry_t *) local; 1306 1307 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1308 cto->ct_header.rqs_entry_count = 1; 1309 cto->ct_iid = cso->init_id; 1310 cto->ct_iid |= XS_CHANNEL(ccb) << 7; 1311 cto->ct_tgt = ccb->ccb_h.target_id; 1312 cto->ct_lun = ccb->ccb_h.target_lun; 1313 cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id); 1314 if (AT_HAS_TAG(cso->tag_id)) { 1315 cto->ct_tag_val = (uint8_t) AT_GET_TAG(cso->tag_id); 1316 cto->ct_flags |= CT_TQAE; 1317 } 1318 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 1319 cto->ct_flags |= CT_NODISC; 1320 } 1321 if (cso->dxfer_len == 0) { 1322 cto->ct_flags |= CT_NO_DATA; 1323 } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1324 cto->ct_flags |= CT_DATA_IN; 1325 } else { 1326 cto->ct_flags |= CT_DATA_OUT; 1327 } 1328 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1329 cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR; 1330 cto->ct_scsi_status = cso->scsi_status; 1331 cto->ct_resid = cso->resid; 1332 isp_prt(isp, ISP_LOGTDEBUG0, 1333 "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x", 1334 cto->ct_fwhandle, cso->scsi_status, cso->resid, 1335 cso->tag_id); 1336 } 1337 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1338 cto->ct_timeout = 10; 1339 hp = &cto->ct_syshandle; 1340 } 1341 1342 if (isp_save_xs_tgt(isp, ccb, hp)) { 1343 xpt_print_path(ccb->ccb_h.path); 1344 printf("No XFLIST pointers for isp_target_start_ctio\n"); 1345 return (CAM_RESRC_UNAVAIL); 1346 } 1347 1348 1349 /* 1350 * Call the dma setup routines for this entry (and any subsequent 1351 * CTIOs) if there's data to move, and then tell the f/w it's got 1352 * new things to play with. As with isp_start's usage of DMA setup, 1353 * any swizzling is done in the machine dependent layer. Because 1354 * of this, we put the request onto the queue area first in native 1355 * format. 1356 */ 1357 1358 save_handle = *hp; 1359 1360 switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) { 1361 case CMD_QUEUED: 1362 ISP_ADD_REQUEST(isp, nxti); 1363 return (CAM_REQ_INPROG); 1364 1365 case CMD_EAGAIN: 1366 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1367 isp_destroy_tgt_handle(isp, save_handle); 1368 return (CAM_RESRC_UNAVAIL); 1369 1370 default: 1371 isp_destroy_tgt_handle(isp, save_handle); 1372 return (XS_ERR(ccb)); 1373 } 1374 } 1375 1376 static void 1377 isp_refire_putback_atio(void *arg) 1378 { 1379 int s = splcam(); 1380 isp_target_putback_atio(arg); 1381 splx(s); 1382 } 1383 1384 static void 1385 isp_target_putback_atio(union ccb *ccb) 1386 { 1387 struct ispsoftc *isp; 1388 struct ccb_scsiio *cso; 1389 uint16_t nxti, optr; 1390 void *qe; 1391 1392 isp = XS_ISP(ccb); 1393 1394 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 1395 (void) timeout(isp_refire_putback_atio, ccb, 10); 1396 isp_prt(isp, ISP_LOGWARN, 1397 "isp_target_putback_atio: Request Queue Overflow"); 1398 return; 1399 } 1400 bzero(qe, QENTRY_LEN); 1401 cso = &ccb->csio; 1402 if (IS_FC(isp)) { 1403 at2_entry_t local, *at = &local; 1404 MEMZERO(at, sizeof (at2_entry_t)); 1405 at->at_header.rqs_entry_type = RQSTYPE_ATIO2; 1406 at->at_header.rqs_entry_count = 1; 1407 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) { 1408 at->at_scclun = (uint16_t) ccb->ccb_h.target_lun; 1409 } else { 1410 at->at_lun = (uint8_t) ccb->ccb_h.target_lun; 1411 } 1412 at->at_status = CT_OK; 1413 at->at_rxid = cso->tag_id; 1414 at->at_iid = cso->ccb_h.target_id; 1415 isp_put_atio2(isp, at, qe); 1416 } else { 1417 at_entry_t local, *at = &local; 1418 MEMZERO(at, sizeof (at_entry_t)); 1419 at->at_header.rqs_entry_type = RQSTYPE_ATIO; 1420 at->at_header.rqs_entry_count = 1; 1421 at->at_iid = cso->init_id; 1422 at->at_iid |= XS_CHANNEL(ccb) << 7; 1423 at->at_tgt = cso->ccb_h.target_id; 1424 at->at_lun = cso->ccb_h.target_lun; 1425 at->at_status = CT_OK; 1426 at->at_tag_val = AT_GET_TAG(cso->tag_id); 1427 at->at_handle = AT_GET_HANDLE(cso->tag_id); 1428 isp_put_atio(isp, at, qe); 1429 } 1430 ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe); 1431 ISP_ADD_REQUEST(isp, nxti); 1432 isp_complete_ctio(ccb); 1433 } 1434 1435 static void 1436 isp_complete_ctio(union ccb *ccb) 1437 { 1438 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1439 ccb->ccb_h.status |= CAM_REQ_CMP; 1440 } 1441 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1442 xpt_done(ccb); 1443 } 1444 1445 /* 1446 * Handle ATIO stuff that the generic code can't. 1447 * This means handling CDBs. 1448 */ 1449 1450 static int 1451 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep) 1452 { 1453 tstate_t *tptr; 1454 int status, bus, iswildcard; 1455 struct ccb_accept_tio *atiop; 1456 1457 /* 1458 * The firmware status (except for the QLTM_SVALID bit) 1459 * indicates why this ATIO was sent to us. 1460 * 1461 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1462 * 1463 * If the DISCONNECTS DISABLED bit is set in the flags field, 1464 * we're still connected on the SCSI bus. 1465 */ 1466 status = aep->at_status; 1467 if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) { 1468 /* 1469 * Bus Phase Sequence error. We should have sense data 1470 * suggested by the f/w. I'm not sure quite yet what 1471 * to do about this for CAM. 1472 */ 1473 isp_prt(isp, ISP_LOGWARN, "PHASE ERROR"); 1474 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1475 return (0); 1476 } 1477 if ((status & ~QLTM_SVALID) != AT_CDB) { 1478 isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform", 1479 status); 1480 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1481 return (0); 1482 } 1483 1484 bus = GET_BUS_VAL(aep->at_iid); 1485 tptr = get_lun_statep(isp, bus, aep->at_lun); 1486 if (tptr == NULL) { 1487 tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD); 1488 if (tptr == NULL) { 1489 isp_endcmd(isp, aep, 1490 SCSI_STATUS_CHECK_COND | ECMD_SVALID | 1491 (0x5 << 12) | (0x25 << 16), 0); 1492 return (0); 1493 } 1494 iswildcard = 1; 1495 } else { 1496 iswildcard = 0; 1497 } 1498 1499 if (tptr == NULL) { 1500 /* 1501 * Because we can't autofeed sense data back with 1502 * a command for parallel SCSI, we can't give back 1503 * a CHECK CONDITION. We'll give back a BUSY status 1504 * instead. This works out okay because the only 1505 * time we should, in fact, get this, is in the 1506 * case that somebody configured us without the 1507 * blackhole driver, so they get what they deserve. 1508 */ 1509 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1510 return (0); 1511 } 1512 1513 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1514 if (atiop == NULL) { 1515 /* 1516 * Because we can't autofeed sense data back with 1517 * a command for parallel SCSI, we can't give back 1518 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1519 * instead. This works out okay because the only time we 1520 * should, in fact, get this, is in the case that we've 1521 * run out of ATIOS. 1522 */ 1523 xpt_print_path(tptr->owner); 1524 isp_prt(isp, ISP_LOGWARN, 1525 "no ATIOS for lun %d from initiator %d on channel %d", 1526 aep->at_lun, GET_IID_VAL(aep->at_iid), bus); 1527 if (aep->at_flags & AT_TQAE) 1528 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1529 else 1530 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1531 rls_lun_statep(isp, tptr); 1532 return (0); 1533 } 1534 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1535 tptr->atio_count--; 1536 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d", 1537 aep->at_lun, tptr->atio_count); 1538 if (iswildcard) { 1539 atiop->ccb_h.target_id = aep->at_tgt; 1540 atiop->ccb_h.target_lun = aep->at_lun; 1541 } 1542 if (aep->at_flags & AT_NODISC) { 1543 atiop->ccb_h.flags = CAM_DIS_DISCONNECT; 1544 } else { 1545 atiop->ccb_h.flags = 0; 1546 } 1547 1548 if (status & QLTM_SVALID) { 1549 size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data)); 1550 atiop->sense_len = amt; 1551 MEMCPY(&atiop->sense_data, aep->at_sense, amt); 1552 } else { 1553 atiop->sense_len = 0; 1554 } 1555 1556 atiop->init_id = GET_IID_VAL(aep->at_iid); 1557 atiop->cdb_len = aep->at_cdblen; 1558 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen); 1559 atiop->ccb_h.status = CAM_CDB_RECVD; 1560 /* 1561 * Construct a tag 'id' based upon tag value (which may be 0..255) 1562 * and the handle (which we have to preserve). 1563 */ 1564 AT_MAKE_TAGID(atiop->tag_id, device_get_unit(isp->isp_dev), aep); 1565 if (aep->at_flags & AT_TQAE) { 1566 atiop->tag_action = aep->at_tag_type; 1567 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; 1568 } 1569 xpt_done((union ccb*)atiop); 1570 isp_prt(isp, ISP_LOGTDEBUG0, 1571 "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s", 1572 aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid), 1573 GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff, 1574 aep->at_tag_type, (aep->at_flags & AT_NODISC)? 1575 "nondisc" : "disconnecting"); 1576 rls_lun_statep(isp, tptr); 1577 return (0); 1578 } 1579 1580 static int 1581 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep) 1582 { 1583 lun_id_t lun; 1584 tstate_t *tptr; 1585 struct ccb_accept_tio *atiop; 1586 atio_private_data_t *atp; 1587 1588 /* 1589 * The firmware status (except for the QLTM_SVALID bit) 1590 * indicates why this ATIO was sent to us. 1591 * 1592 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1593 */ 1594 if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) { 1595 isp_prt(isp, ISP_LOGWARN, 1596 "bogus atio (0x%x) leaked to platform", aep->at_status); 1597 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1598 return (0); 1599 } 1600 1601 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) { 1602 lun = aep->at_scclun; 1603 } else { 1604 lun = aep->at_lun; 1605 } 1606 tptr = get_lun_statep(isp, 0, lun); 1607 if (tptr == NULL) { 1608 isp_prt(isp, ISP_LOGTDEBUG0, 1609 "[0x%x] no state pointer for lun %d", aep->at_rxid, lun); 1610 tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD); 1611 if (tptr == NULL) { 1612 isp_endcmd(isp, aep, 1613 SCSI_STATUS_CHECK_COND | ECMD_SVALID | 1614 (0x5 << 12) | (0x25 << 16), 0); 1615 return (0); 1616 } 1617 } 1618 1619 atp = isp_get_atpd(isp, 0); 1620 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1621 if (atiop == NULL || atp == NULL) { 1622 1623 /* 1624 * Because we can't autofeed sense data back with 1625 * a command for parallel SCSI, we can't give back 1626 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1627 * instead. This works out okay because the only time we 1628 * should, in fact, get this, is in the case that we've 1629 * run out of ATIOS. 1630 */ 1631 xpt_print_path(tptr->owner); 1632 isp_prt(isp, ISP_LOGWARN, 1633 "no %s for lun %d from initiator %d", 1634 (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" : 1635 ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid); 1636 rls_lun_statep(isp, tptr); 1637 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1638 return (0); 1639 } 1640 atp->state = ATPD_STATE_ATIO; 1641 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1642 tptr->atio_count--; 1643 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d", 1644 lun, tptr->atio_count); 1645 1646 if (tptr == &isp->isp_osinfo.tsdflt[0]) { 1647 atiop->ccb_h.target_id = 1648 ((fcparam *)isp->isp_param)->isp_loopid; 1649 atiop->ccb_h.target_lun = lun; 1650 } 1651 /* 1652 * We don't get 'suggested' sense data as we do with SCSI cards. 1653 */ 1654 atiop->sense_len = 0; 1655 1656 atiop->init_id = aep->at_iid; 1657 atiop->cdb_len = ATIO2_CDBLEN; 1658 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN); 1659 atiop->ccb_h.status = CAM_CDB_RECVD; 1660 atiop->tag_id = aep->at_rxid; 1661 switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) { 1662 case ATIO2_TC_ATTR_SIMPLEQ: 1663 atiop->tag_action = MSG_SIMPLE_Q_TAG; 1664 break; 1665 case ATIO2_TC_ATTR_HEADOFQ: 1666 atiop->tag_action = MSG_HEAD_OF_Q_TAG; 1667 break; 1668 case ATIO2_TC_ATTR_ORDERED: 1669 atiop->tag_action = MSG_ORDERED_Q_TAG; 1670 break; 1671 case ATIO2_TC_ATTR_ACAQ: /* ?? */ 1672 case ATIO2_TC_ATTR_UNTAGGED: 1673 default: 1674 atiop->tag_action = 0; 1675 break; 1676 } 1677 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; 1678 1679 atp->tag = atiop->tag_id; 1680 atp->lun = lun; 1681 atp->orig_datalen = aep->at_datalen; 1682 atp->last_xframt = 0; 1683 atp->bytes_xfered = 0; 1684 atp->state = ATPD_STATE_CAM; 1685 ISPLOCK_2_CAMLOCK(siP); 1686 xpt_done((union ccb*)atiop); 1687 1688 isp_prt(isp, ISP_LOGTDEBUG0, 1689 "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u", 1690 aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid, 1691 lun, aep->at_taskflags, aep->at_datalen); 1692 rls_lun_statep(isp, tptr); 1693 return (0); 1694 } 1695 1696 static int 1697 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg) 1698 { 1699 union ccb *ccb; 1700 int sentstatus, ok, notify_cam, resid = 0; 1701 uint16_t tval; 1702 1703 /* 1704 * CTIO and CTIO2 are close enough.... 1705 */ 1706 1707 ccb = isp_find_xs_tgt(isp, ((ct_entry_t *)arg)->ct_syshandle); 1708 KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio")); 1709 isp_destroy_tgt_handle(isp, ((ct_entry_t *)arg)->ct_syshandle); 1710 1711 if (IS_FC(isp)) { 1712 ct2_entry_t *ct = arg; 1713 atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid); 1714 if (atp == NULL) { 1715 isp_prt(isp, ISP_LOGERR, 1716 "cannot find adjunct for %x after I/O", 1717 ct->ct_rxid); 1718 return (0); 1719 } 1720 sentstatus = ct->ct_flags & CT2_SENDSTATUS; 1721 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1722 if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) { 1723 ccb->ccb_h.status |= CAM_SENT_SENSE; 1724 } 1725 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1726 if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) { 1727 resid = ct->ct_resid; 1728 atp->bytes_xfered += (atp->last_xframt - resid); 1729 atp->last_xframt = 0; 1730 } 1731 if (sentstatus || !ok) { 1732 atp->tag = 0; 1733 } 1734 isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, 1735 "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s", 1736 ct->ct_rxid, ct->ct_status, ct->ct_flags, 1737 (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, 1738 resid, sentstatus? "FIN" : "MID"); 1739 tval = ct->ct_rxid; 1740 1741 /* XXX: should really come after isp_complete_ctio */ 1742 atp->state = ATPD_STATE_PDON; 1743 } else { 1744 ct_entry_t *ct = arg; 1745 sentstatus = ct->ct_flags & CT_SENDSTATUS; 1746 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1747 /* 1748 * We *ought* to be able to get back to the original ATIO 1749 * here, but for some reason this gets lost. It's just as 1750 * well because it's squirrelled away as part of periph 1751 * private data. 1752 * 1753 * We can live without it as long as we continue to use 1754 * the auto-replenish feature for CTIOs. 1755 */ 1756 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1757 if (ct->ct_status & QLTM_SVALID) { 1758 char *sp = (char *)ct; 1759 sp += CTIO_SENSE_OFFSET; 1760 ccb->csio.sense_len = 1761 min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN); 1762 MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len); 1763 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1764 } 1765 if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) { 1766 resid = ct->ct_resid; 1767 } 1768 isp_prt(isp, ISP_LOGTDEBUG0, 1769 "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s", 1770 ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun, 1771 ct->ct_status, ct->ct_flags, resid, 1772 sentstatus? "FIN" : "MID"); 1773 tval = ct->ct_fwhandle; 1774 } 1775 ccb->csio.resid += resid; 1776 1777 /* 1778 * We're here either because intermediate data transfers are done 1779 * and/or the final status CTIO (which may have joined with a 1780 * Data Transfer) is done. 1781 * 1782 * In any case, for this platform, the upper layers figure out 1783 * what to do next, so all we do here is collect status and 1784 * pass information along. Any DMA handles have already been 1785 * freed. 1786 */ 1787 if (notify_cam == 0) { 1788 isp_prt(isp, ISP_LOGTDEBUG0, " INTER CTIO[0x%x] done", tval); 1789 return (0); 1790 } 1791 1792 isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done", 1793 (sentstatus)? " FINAL " : "MIDTERM ", tval); 1794 1795 if (!ok) { 1796 isp_target_putback_atio(ccb); 1797 } else { 1798 isp_complete_ctio(ccb); 1799 1800 } 1801 return (0); 1802 } 1803 1804 static int 1805 isp_handle_platform_notify_scsi(struct ispsoftc *isp, in_entry_t *inp) 1806 { 1807 return (0); /* XXXX */ 1808 } 1809 1810 static int 1811 isp_handle_platform_notify_fc(struct ispsoftc *isp, in_fcentry_t *inp) 1812 { 1813 1814 switch (inp->in_status) { 1815 case IN_PORT_LOGOUT: 1816 isp_prt(isp, ISP_LOGWARN, "port logout of iid %d", 1817 inp->in_iid); 1818 break; 1819 case IN_PORT_CHANGED: 1820 isp_prt(isp, ISP_LOGWARN, "port changed for iid %d", 1821 inp->in_iid); 1822 break; 1823 case IN_GLOBAL_LOGO: 1824 isp_prt(isp, ISP_LOGINFO, "all ports logged out"); 1825 break; 1826 case IN_ABORT_TASK: 1827 { 1828 atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid); 1829 struct ccb_immed_notify *inot = NULL; 1830 1831 if (atp) { 1832 tstate_t *tptr = get_lun_statep(isp, 0, atp->lun); 1833 if (tptr) { 1834 inot = (struct ccb_immed_notify *) 1835 SLIST_FIRST(&tptr->inots); 1836 if (inot) { 1837 tptr->inot_count--; 1838 SLIST_REMOVE_HEAD(&tptr->inots, 1839 sim_links.sle); 1840 isp_prt(isp, ISP_LOGTDEBUG0, 1841 "Take FREE INOT count now %d", 1842 tptr->inot_count); 1843 } 1844 } 1845 isp_prt(isp, ISP_LOGWARN, 1846 "abort task RX_ID %x IID %d state %d", 1847 inp->in_seqid, inp->in_iid, atp->state); 1848 } else { 1849 isp_prt(isp, ISP_LOGWARN, 1850 "abort task RX_ID %x from iid %d, state unknown", 1851 inp->in_seqid, inp->in_iid); 1852 } 1853 if (inot) { 1854 inot->initiator_id = inp->in_iid; 1855 inot->sense_len = 0; 1856 inot->message_args[0] = MSG_ABORT_TAG; 1857 inot->message_args[1] = inp->in_seqid & 0xff; 1858 inot->message_args[2] = (inp->in_seqid >> 8) & 0xff; 1859 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 1860 xpt_done((union ccb *)inot); 1861 } 1862 break; 1863 } 1864 default: 1865 break; 1866 } 1867 return (0); 1868 } 1869 #endif 1870 1871 static void 1872 isp_cam_async(void *cbarg, uint32_t code, struct cam_path *path, void *arg) 1873 { 1874 struct cam_sim *sim; 1875 struct ispsoftc *isp; 1876 1877 sim = (struct cam_sim *)cbarg; 1878 isp = (struct ispsoftc *) cam_sim_softc(sim); 1879 switch (code) { 1880 case AC_LOST_DEVICE: 1881 if (IS_SCSI(isp)) { 1882 uint16_t oflags, nflags; 1883 sdparam *sdp = isp->isp_param; 1884 int tgt; 1885 1886 tgt = xpt_path_target_id(path); 1887 if (tgt >= 0) { 1888 sdp += cam_sim_bus(sim); 1889 ISP_LOCK(isp); 1890 nflags = sdp->isp_devparam[tgt].nvrm_flags; 1891 #ifndef ISP_TARGET_MODE 1892 nflags &= DPARM_SAFE_DFLT; 1893 if (isp->isp_loaded_fw) { 1894 nflags |= DPARM_NARROW | DPARM_ASYNC; 1895 } 1896 #else 1897 nflags = DPARM_DEFAULT; 1898 #endif 1899 oflags = sdp->isp_devparam[tgt].goal_flags; 1900 sdp->isp_devparam[tgt].goal_flags = nflags; 1901 sdp->isp_devparam[tgt].dev_update = 1; 1902 isp->isp_update |= (1 << cam_sim_bus(sim)); 1903 (void) isp_control(isp, 1904 ISPCTL_UPDATE_PARAMS, NULL); 1905 sdp->isp_devparam[tgt].goal_flags = oflags; 1906 ISP_UNLOCK(isp); 1907 } 1908 } 1909 break; 1910 default: 1911 isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code); 1912 break; 1913 } 1914 } 1915 1916 static void 1917 isp_poll(struct cam_sim *sim) 1918 { 1919 struct ispsoftc *isp = cam_sim_softc(sim); 1920 uint16_t isr, sema, mbox; 1921 1922 ISP_LOCK(isp); 1923 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 1924 isp_intr(isp, isr, sema, mbox); 1925 } 1926 ISP_UNLOCK(isp); 1927 } 1928 1929 1930 static void 1931 isp_watchdog(void *arg) 1932 { 1933 XS_T *xs = arg; 1934 struct ispsoftc *isp = XS_ISP(xs); 1935 uint32_t handle; 1936 int iok; 1937 1938 /* 1939 * We've decided this command is dead. Make sure we're not trying 1940 * to kill a command that's already dead by getting it's handle and 1941 * and seeing whether it's still alive. 1942 */ 1943 ISP_LOCK(isp); 1944 iok = isp->isp_osinfo.intsok; 1945 isp->isp_osinfo.intsok = 0; 1946 handle = isp_find_handle(isp, xs); 1947 if (handle) { 1948 uint16_t isr, sema, mbox; 1949 1950 if (XS_CMD_DONE_P(xs)) { 1951 isp_prt(isp, ISP_LOGDEBUG1, 1952 "watchdog found done cmd (handle 0x%x)", handle); 1953 ISP_UNLOCK(isp); 1954 return; 1955 } 1956 1957 if (XS_CMD_WDOG_P(xs)) { 1958 isp_prt(isp, ISP_LOGDEBUG2, 1959 "recursive watchdog (handle 0x%x)", handle); 1960 ISP_UNLOCK(isp); 1961 return; 1962 } 1963 1964 XS_CMD_S_WDOG(xs); 1965 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 1966 isp_intr(isp, isr, sema, mbox); 1967 } 1968 if (XS_CMD_DONE_P(xs)) { 1969 isp_prt(isp, ISP_LOGDEBUG2, 1970 "watchdog cleanup for handle 0x%x", handle); 1971 xpt_done((union ccb *) xs); 1972 } else if (XS_CMD_GRACE_P(xs)) { 1973 /* 1974 * Make sure the command is *really* dead before we 1975 * release the handle (and DMA resources) for reuse. 1976 */ 1977 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg); 1978 1979 /* 1980 * After this point, the comamnd is really dead. 1981 */ 1982 if (XS_XFRLEN(xs)) { 1983 ISP_DMAFREE(isp, xs, handle); 1984 } 1985 isp_destroy_handle(isp, handle); 1986 xpt_print_path(xs->ccb_h.path); 1987 isp_prt(isp, ISP_LOGWARN, 1988 "watchdog timeout for handle 0x%x", handle); 1989 XS_SETERR(xs, CAM_CMD_TIMEOUT); 1990 XS_CMD_C_WDOG(xs); 1991 isp_done(xs); 1992 } else { 1993 uint16_t nxti, optr; 1994 ispreq_t local, *mp= &local, *qe; 1995 1996 XS_CMD_C_WDOG(xs); 1997 xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz); 1998 if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) { 1999 ISP_UNLOCK(isp); 2000 return; 2001 } 2002 XS_CMD_S_GRACE(xs); 2003 MEMZERO((void *) mp, sizeof (*mp)); 2004 mp->req_header.rqs_entry_count = 1; 2005 mp->req_header.rqs_entry_type = RQSTYPE_MARKER; 2006 mp->req_modifier = SYNC_ALL; 2007 mp->req_target = XS_CHANNEL(xs) << 7; 2008 isp_put_request(isp, mp, qe); 2009 ISP_ADD_REQUEST(isp, nxti); 2010 } 2011 } else { 2012 isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command"); 2013 } 2014 isp->isp_osinfo.intsok = iok; 2015 ISP_UNLOCK(isp); 2016 } 2017 2018 static void 2019 isp_kthread(void *arg) 2020 { 2021 struct ispsoftc *isp = arg; 2022 2023 #ifdef ISP_SMPLOCK 2024 mtx_lock(&isp->isp_lock); 2025 #else 2026 mtx_lock(&Giant); 2027 #endif 2028 /* 2029 * The first loop is for our usage where we have yet to have 2030 * gotten good fibre channel state. 2031 */ 2032 for (;;) { 2033 int wasfrozen; 2034 2035 isp_prt(isp, ISP_LOGDEBUG0, "kthread: checking FC state"); 2036 while (isp_fc_runstate(isp, 2 * 1000000) != 0) { 2037 isp_prt(isp, ISP_LOGDEBUG0, "kthread: FC state ungood"); 2038 if (FCPARAM(isp)->isp_fwstate != FW_READY || 2039 FCPARAM(isp)->isp_loopstate < LOOP_PDB_RCVD) { 2040 if (FCPARAM(isp)->loop_seen_once == 0 || 2041 isp->isp_osinfo.ktmature == 0) { 2042 break; 2043 } 2044 } 2045 #ifdef ISP_SMPLOCK 2046 msleep(isp_kthread, &isp->isp_lock, 2047 PRIBIO, "isp_fcthrd", hz); 2048 #else 2049 (void) tsleep(isp_kthread, PRIBIO, "isp_fcthrd", hz); 2050 #endif 2051 } 2052 2053 /* 2054 * Even if we didn't get good loop state we may be 2055 * unfreezing the SIMQ so that we can kill off 2056 * commands (if we've never seen loop before, for example). 2057 */ 2058 isp->isp_osinfo.ktmature = 1; 2059 wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN; 2060 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN; 2061 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { 2062 isp_prt(isp, ISP_LOGDEBUG0, "kthread: releasing simq"); 2063 ISPLOCK_2_CAMLOCK(isp); 2064 xpt_release_simq(isp->isp_sim, 1); 2065 CAMLOCK_2_ISPLOCK(isp); 2066 } 2067 isp_prt(isp, ISP_LOGDEBUG0, "kthread: waiting until called"); 2068 #ifdef ISP_SMPLOCK 2069 cv_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock); 2070 #else 2071 (void) tsleep(&isp->isp_osinfo.kthread_cv, PRIBIO, "fc_cv", 0); 2072 #endif 2073 } 2074 } 2075 2076 static void 2077 isp_action(struct cam_sim *sim, union ccb *ccb) 2078 { 2079 int bus, tgt, error; 2080 struct ispsoftc *isp; 2081 struct ccb_trans_settings *cts; 2082 2083 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n")); 2084 2085 isp = (struct ispsoftc *)cam_sim_softc(sim); 2086 ccb->ccb_h.sim_priv.entries[0].field = 0; 2087 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 2088 if (isp->isp_state != ISP_RUNSTATE && 2089 ccb->ccb_h.func_code == XPT_SCSI_IO) { 2090 CAMLOCK_2_ISPLOCK(isp); 2091 isp_init(isp); 2092 if (isp->isp_state != ISP_INITSTATE) { 2093 ISP_UNLOCK(isp); 2094 /* 2095 * Lie. Say it was a selection timeout. 2096 */ 2097 ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN; 2098 xpt_freeze_devq(ccb->ccb_h.path, 1); 2099 xpt_done(ccb); 2100 return; 2101 } 2102 isp->isp_state = ISP_RUNSTATE; 2103 ISPLOCK_2_CAMLOCK(isp); 2104 } 2105 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code); 2106 2107 2108 switch (ccb->ccb_h.func_code) { 2109 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 2110 /* 2111 * Do a couple of preliminary checks... 2112 */ 2113 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 2114 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 2115 ccb->ccb_h.status = CAM_REQ_INVALID; 2116 xpt_done(ccb); 2117 break; 2118 } 2119 } 2120 #ifdef DIAGNOSTIC 2121 if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) { 2122 ccb->ccb_h.status = CAM_PATH_INVALID; 2123 } else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) { 2124 ccb->ccb_h.status = CAM_PATH_INVALID; 2125 } 2126 if (ccb->ccb_h.status == CAM_PATH_INVALID) { 2127 isp_prt(isp, ISP_LOGERR, 2128 "invalid tgt/lun (%d.%d) in XPT_SCSI_IO", 2129 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 2130 xpt_done(ccb); 2131 break; 2132 } 2133 #endif 2134 ((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK; 2135 CAMLOCK_2_ISPLOCK(isp); 2136 error = isp_start((XS_T *) ccb); 2137 switch (error) { 2138 case CMD_QUEUED: 2139 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2140 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 2141 uint64_t ticks = (uint64_t) hz; 2142 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) 2143 ticks = 60 * 1000 * ticks; 2144 else 2145 ticks = ccb->ccb_h.timeout * hz; 2146 ticks = ((ticks + 999) / 1000) + hz + hz; 2147 if (ticks >= 0x80000000) { 2148 isp_prt(isp, ISP_LOGERR, 2149 "timeout overflow"); 2150 ticks = 0x7fffffff; 2151 } 2152 ccb->ccb_h.timeout_ch = timeout(isp_watchdog, 2153 (caddr_t)ccb, (int)ticks); 2154 } else { 2155 callout_handle_init(&ccb->ccb_h.timeout_ch); 2156 } 2157 ISPLOCK_2_CAMLOCK(isp); 2158 break; 2159 case CMD_RQLATER: 2160 /* 2161 * This can only happen for Fibre Channel 2162 */ 2163 KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only")); 2164 if (FCPARAM(isp)->loop_seen_once == 0 && 2165 isp->isp_osinfo.ktmature) { 2166 ISPLOCK_2_CAMLOCK(isp); 2167 XS_SETERR(ccb, CAM_SEL_TIMEOUT); 2168 xpt_done(ccb); 2169 break; 2170 } 2171 #ifdef ISP_SMPLOCK 2172 cv_signal(&isp->isp_osinfo.kthread_cv); 2173 #else 2174 wakeup(&isp->isp_osinfo.kthread_cv); 2175 #endif 2176 isp_freeze_loopdown(isp, "isp_action(RQLATER)"); 2177 XS_SETERR(ccb, CAM_REQUEUE_REQ); 2178 ISPLOCK_2_CAMLOCK(isp); 2179 xpt_done(ccb); 2180 break; 2181 case CMD_EAGAIN: 2182 XS_SETERR(ccb, CAM_REQUEUE_REQ); 2183 ISPLOCK_2_CAMLOCK(isp); 2184 xpt_done(ccb); 2185 break; 2186 case CMD_COMPLETE: 2187 isp_done((struct ccb_scsiio *) ccb); 2188 ISPLOCK_2_CAMLOCK(isp); 2189 break; 2190 default: 2191 isp_prt(isp, ISP_LOGERR, 2192 "What's this? 0x%x at %d in file %s", 2193 error, __LINE__, __FILE__); 2194 XS_SETERR(ccb, CAM_REQ_CMP_ERR); 2195 xpt_done(ccb); 2196 ISPLOCK_2_CAMLOCK(isp); 2197 } 2198 break; 2199 2200 #ifdef ISP_TARGET_MODE 2201 case XPT_EN_LUN: /* Enable LUN as a target */ 2202 { 2203 int seq, iok, i; 2204 CAMLOCK_2_ISPLOCK(isp); 2205 iok = isp->isp_osinfo.intsok; 2206 isp->isp_osinfo.intsok = 0; 2207 seq = isp_en_lun(isp, ccb); 2208 if (seq < 0) { 2209 isp->isp_osinfo.intsok = iok; 2210 ISPLOCK_2_CAMLOCK(isp); 2211 xpt_done(ccb); 2212 break; 2213 } 2214 for (i = 0; isp->isp_osinfo.leact[seq] && i < 30 * 1000; i++) { 2215 uint16_t isr, sema, mbox; 2216 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 2217 isp_intr(isp, isr, sema, mbox); 2218 } 2219 DELAY(1000); 2220 } 2221 isp->isp_osinfo.intsok = iok; 2222 ISPLOCK_2_CAMLOCK(isp); 2223 break; 2224 } 2225 case XPT_NOTIFY_ACK: /* recycle notify ack */ 2226 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ 2227 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 2228 { 2229 tstate_t *tptr = 2230 get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun); 2231 if (tptr == NULL) { 2232 ccb->ccb_h.status = CAM_LUN_INVALID; 2233 xpt_done(ccb); 2234 break; 2235 } 2236 ccb->ccb_h.sim_priv.entries[0].field = 0; 2237 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 2238 ccb->ccb_h.flags = 0; 2239 2240 CAMLOCK_2_ISPLOCK(isp); 2241 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 2242 /* 2243 * Note that the command itself may not be done- 2244 * it may not even have had the first CTIO sent. 2245 */ 2246 tptr->atio_count++; 2247 isp_prt(isp, ISP_LOGTDEBUG0, 2248 "Put FREE ATIO, lun %d, count now %d", 2249 ccb->ccb_h.target_lun, tptr->atio_count); 2250 SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h, 2251 sim_links.sle); 2252 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 2253 tptr->inot_count++; 2254 isp_prt(isp, ISP_LOGTDEBUG0, 2255 "Put FREE INOT, lun %d, count now %d", 2256 ccb->ccb_h.target_lun, tptr->inot_count); 2257 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, 2258 sim_links.sle); 2259 } else { 2260 isp_prt(isp, ISP_LOGWARN, "Got Notify ACK");; 2261 } 2262 rls_lun_statep(isp, tptr); 2263 ccb->ccb_h.status = CAM_REQ_INPROG; 2264 ISPLOCK_2_CAMLOCK(isp); 2265 break; 2266 } 2267 case XPT_CONT_TARGET_IO: 2268 { 2269 CAMLOCK_2_ISPLOCK(isp); 2270 ccb->ccb_h.status = isp_target_start_ctio(isp, ccb); 2271 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 2272 isp_prt(isp, ISP_LOGWARN, 2273 "XPT_CONT_TARGET_IO: status 0x%x", 2274 ccb->ccb_h.status); 2275 XS_SETERR(ccb, CAM_REQUEUE_REQ); 2276 ISPLOCK_2_CAMLOCK(isp); 2277 xpt_done(ccb); 2278 } else { 2279 ISPLOCK_2_CAMLOCK(isp); 2280 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2281 } 2282 break; 2283 } 2284 #endif 2285 case XPT_RESET_DEV: /* BDR the specified SCSI device */ 2286 2287 bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); 2288 tgt = ccb->ccb_h.target_id; 2289 tgt |= (bus << 16); 2290 2291 CAMLOCK_2_ISPLOCK(isp); 2292 error = isp_control(isp, ISPCTL_RESET_DEV, &tgt); 2293 ISPLOCK_2_CAMLOCK(isp); 2294 if (error) { 2295 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2296 } else { 2297 ccb->ccb_h.status = CAM_REQ_CMP; 2298 } 2299 xpt_done(ccb); 2300 break; 2301 case XPT_ABORT: /* Abort the specified CCB */ 2302 { 2303 union ccb *accb = ccb->cab.abort_ccb; 2304 CAMLOCK_2_ISPLOCK(isp); 2305 switch (accb->ccb_h.func_code) { 2306 #ifdef ISP_TARGET_MODE 2307 case XPT_ACCEPT_TARGET_IO: 2308 case XPT_IMMED_NOTIFY: 2309 ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb); 2310 break; 2311 case XPT_CONT_TARGET_IO: 2312 isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet"); 2313 ccb->ccb_h.status = CAM_UA_ABORT; 2314 break; 2315 #endif 2316 case XPT_SCSI_IO: 2317 error = isp_control(isp, ISPCTL_ABORT_CMD, ccb); 2318 if (error) { 2319 ccb->ccb_h.status = CAM_UA_ABORT; 2320 } else { 2321 ccb->ccb_h.status = CAM_REQ_CMP; 2322 } 2323 break; 2324 default: 2325 ccb->ccb_h.status = CAM_REQ_INVALID; 2326 break; 2327 } 2328 ISPLOCK_2_CAMLOCK(isp); 2329 xpt_done(ccb); 2330 break; 2331 } 2332 #ifdef CAM_NEW_TRAN_CODE 2333 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) 2334 #else 2335 #define IS_CURRENT_SETTINGS(c) (c->flags & CCB_TRANS_CURRENT_SETTINGS) 2336 #endif 2337 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 2338 cts = &ccb->cts; 2339 if (!IS_CURRENT_SETTINGS(cts)) { 2340 ccb->ccb_h.status = CAM_REQ_INVALID; 2341 xpt_done(ccb); 2342 break; 2343 } 2344 tgt = cts->ccb_h.target_id; 2345 CAMLOCK_2_ISPLOCK(isp); 2346 if (IS_SCSI(isp)) { 2347 #ifndef CAM_NEW_TRAN_CODE 2348 sdparam *sdp = isp->isp_param; 2349 uint16_t *dptr; 2350 2351 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2352 2353 sdp += bus; 2354 /* 2355 * We always update (internally) from goal_flags 2356 * so any request to change settings just gets 2357 * vectored to that location. 2358 */ 2359 dptr = &sdp->isp_devparam[tgt].goal_flags; 2360 2361 /* 2362 * Note that these operations affect the 2363 * the goal flags (goal_flags)- not 2364 * the current state flags. Then we mark 2365 * things so that the next operation to 2366 * this HBA will cause the update to occur. 2367 */ 2368 if (cts->valid & CCB_TRANS_DISC_VALID) { 2369 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) { 2370 *dptr |= DPARM_DISC; 2371 } else { 2372 *dptr &= ~DPARM_DISC; 2373 } 2374 } 2375 if (cts->valid & CCB_TRANS_TQ_VALID) { 2376 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) { 2377 *dptr |= DPARM_TQING; 2378 } else { 2379 *dptr &= ~DPARM_TQING; 2380 } 2381 } 2382 if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) { 2383 switch (cts->bus_width) { 2384 case MSG_EXT_WDTR_BUS_16_BIT: 2385 *dptr |= DPARM_WIDE; 2386 break; 2387 default: 2388 *dptr &= ~DPARM_WIDE; 2389 } 2390 } 2391 /* 2392 * Any SYNC RATE of nonzero and SYNC_OFFSET 2393 * of nonzero will cause us to go to the 2394 * selected (from NVRAM) maximum value for 2395 * this device. At a later point, we'll 2396 * allow finer control. 2397 */ 2398 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && 2399 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) && 2400 (cts->sync_offset > 0)) { 2401 *dptr |= DPARM_SYNC; 2402 } else { 2403 *dptr &= ~DPARM_SYNC; 2404 } 2405 *dptr |= DPARM_SAFE_DFLT; 2406 #else 2407 struct ccb_trans_settings_scsi *scsi = 2408 &cts->proto_specific.scsi; 2409 struct ccb_trans_settings_spi *spi = 2410 &cts->xport_specific.spi; 2411 sdparam *sdp = isp->isp_param; 2412 uint16_t *dptr; 2413 2414 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2415 sdp += bus; 2416 /* 2417 * We always update (internally) from goal_flags 2418 * so any request to change settings just gets 2419 * vectored to that location. 2420 */ 2421 dptr = &sdp->isp_devparam[tgt].goal_flags; 2422 2423 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 2424 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 2425 *dptr |= DPARM_DISC; 2426 else 2427 *dptr &= ~DPARM_DISC; 2428 } 2429 2430 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 2431 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 2432 *dptr |= DPARM_TQING; 2433 else 2434 *dptr &= ~DPARM_TQING; 2435 } 2436 2437 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 2438 if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) 2439 *dptr |= DPARM_WIDE; 2440 else 2441 *dptr &= ~DPARM_WIDE; 2442 } 2443 2444 /* 2445 * XXX: FIX ME 2446 */ 2447 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) && 2448 (spi->valid & CTS_SPI_VALID_SYNC_RATE) && 2449 (spi->sync_period && spi->sync_offset)) { 2450 *dptr |= DPARM_SYNC; 2451 /* 2452 * XXX: CHECK FOR LEGALITY 2453 */ 2454 sdp->isp_devparam[tgt].goal_period = 2455 spi->sync_period; 2456 sdp->isp_devparam[tgt].goal_offset = 2457 spi->sync_offset; 2458 } else { 2459 *dptr &= ~DPARM_SYNC; 2460 } 2461 #endif 2462 isp_prt(isp, ISP_LOGDEBUG0, 2463 "SET bus %d targ %d to flags %x off %x per %x", 2464 bus, tgt, sdp->isp_devparam[tgt].goal_flags, 2465 sdp->isp_devparam[tgt].goal_offset, 2466 sdp->isp_devparam[tgt].goal_period); 2467 sdp->isp_devparam[tgt].dev_update = 1; 2468 isp->isp_update |= (1 << bus); 2469 } 2470 ISPLOCK_2_CAMLOCK(isp); 2471 ccb->ccb_h.status = CAM_REQ_CMP; 2472 xpt_done(ccb); 2473 break; 2474 case XPT_GET_TRAN_SETTINGS: 2475 cts = &ccb->cts; 2476 tgt = cts->ccb_h.target_id; 2477 CAMLOCK_2_ISPLOCK(isp); 2478 if (IS_FC(isp)) { 2479 #ifndef CAM_NEW_TRAN_CODE 2480 /* 2481 * a lot of normal SCSI things don't make sense. 2482 */ 2483 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 2484 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2485 /* 2486 * How do you measure the width of a high 2487 * speed serial bus? Well, in bytes. 2488 * 2489 * Offset and period make no sense, though, so we set 2490 * (above) a 'base' transfer speed to be gigabit. 2491 */ 2492 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2493 #else 2494 fcparam *fcp = isp->isp_param; 2495 struct ccb_trans_settings_fc *fc = 2496 &cts->xport_specific.fc; 2497 2498 cts->protocol = PROTO_SCSI; 2499 cts->protocol_version = SCSI_REV_2; 2500 cts->transport = XPORT_FC; 2501 cts->transport_version = 0; 2502 2503 fc->valid = CTS_FC_VALID_SPEED; 2504 if (fcp->isp_gbspeed == 2) 2505 fc->bitrate = 200000; 2506 else 2507 fc->bitrate = 100000; 2508 if (tgt > 0 && tgt < MAX_FC_TARG) { 2509 struct lportdb *lp = &fcp->portdb[tgt]; 2510 fc->wwnn = lp->node_wwn; 2511 fc->wwpn = lp->port_wwn; 2512 fc->port = lp->portid; 2513 fc->valid |= CTS_FC_VALID_WWNN | 2514 CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT; 2515 } 2516 #endif 2517 } else { 2518 #ifdef CAM_NEW_TRAN_CODE 2519 struct ccb_trans_settings_scsi *scsi = 2520 &cts->proto_specific.scsi; 2521 struct ccb_trans_settings_spi *spi = 2522 &cts->xport_specific.spi; 2523 #endif 2524 sdparam *sdp = isp->isp_param; 2525 int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2526 uint16_t dval, pval, oval; 2527 2528 sdp += bus; 2529 2530 if (IS_CURRENT_SETTINGS(cts)) { 2531 sdp->isp_devparam[tgt].dev_refresh = 1; 2532 isp->isp_update |= (1 << bus); 2533 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, 2534 NULL); 2535 dval = sdp->isp_devparam[tgt].actv_flags; 2536 oval = sdp->isp_devparam[tgt].actv_offset; 2537 pval = sdp->isp_devparam[tgt].actv_period; 2538 } else { 2539 dval = sdp->isp_devparam[tgt].nvrm_flags; 2540 oval = sdp->isp_devparam[tgt].nvrm_offset; 2541 pval = sdp->isp_devparam[tgt].nvrm_period; 2542 } 2543 2544 #ifndef CAM_NEW_TRAN_CODE 2545 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 2546 2547 if (dval & DPARM_DISC) { 2548 cts->flags |= CCB_TRANS_DISC_ENB; 2549 } 2550 if (dval & DPARM_TQING) { 2551 cts->flags |= CCB_TRANS_TAG_ENB; 2552 } 2553 if (dval & DPARM_WIDE) { 2554 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2555 } else { 2556 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2557 } 2558 cts->valid = CCB_TRANS_BUS_WIDTH_VALID | 2559 CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2560 2561 if ((dval & DPARM_SYNC) && oval != 0) { 2562 cts->sync_period = pval; 2563 cts->sync_offset = oval; 2564 cts->valid |= 2565 CCB_TRANS_SYNC_RATE_VALID | 2566 CCB_TRANS_SYNC_OFFSET_VALID; 2567 } 2568 #else 2569 cts->protocol = PROTO_SCSI; 2570 cts->protocol_version = SCSI_REV_2; 2571 cts->transport = XPORT_SPI; 2572 cts->transport_version = 2; 2573 2574 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 2575 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; 2576 if (dval & DPARM_DISC) { 2577 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 2578 } 2579 if (dval & DPARM_TQING) { 2580 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 2581 } 2582 if ((dval & DPARM_SYNC) && oval && pval) { 2583 spi->sync_offset = oval; 2584 spi->sync_period = pval; 2585 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 2586 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 2587 } 2588 spi->valid |= CTS_SPI_VALID_BUS_WIDTH; 2589 if (dval & DPARM_WIDE) { 2590 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2591 } else { 2592 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2593 } 2594 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 2595 scsi->valid = CTS_SCSI_VALID_TQ; 2596 spi->valid |= CTS_SPI_VALID_DISC; 2597 } else { 2598 scsi->valid = 0; 2599 } 2600 #endif 2601 isp_prt(isp, ISP_LOGDEBUG0, 2602 "GET %s bus %d targ %d to flags %x off %x per %x", 2603 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM", 2604 bus, tgt, dval, oval, pval); 2605 } 2606 ISPLOCK_2_CAMLOCK(isp); 2607 ccb->ccb_h.status = CAM_REQ_CMP; 2608 xpt_done(ccb); 2609 break; 2610 2611 case XPT_CALC_GEOMETRY: 2612 { 2613 struct ccb_calc_geometry *ccg; 2614 2615 ccg = &ccb->ccg; 2616 if (ccg->block_size == 0) { 2617 isp_prt(isp, ISP_LOGERR, 2618 "%d.%d XPT_CALC_GEOMETRY block size 0?", 2619 ccg->ccb_h.target_id, ccg->ccb_h.target_lun); 2620 ccb->ccb_h.status = CAM_REQ_INVALID; 2621 xpt_done(ccb); 2622 break; 2623 } 2624 cam_calc_geometry(ccg, /*extended*/1); 2625 xpt_done(ccb); 2626 break; 2627 } 2628 case XPT_RESET_BUS: /* Reset the specified bus */ 2629 bus = cam_sim_bus(sim); 2630 CAMLOCK_2_ISPLOCK(isp); 2631 error = isp_control(isp, ISPCTL_RESET_BUS, &bus); 2632 ISPLOCK_2_CAMLOCK(isp); 2633 if (error) 2634 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2635 else { 2636 if (cam_sim_bus(sim) && isp->isp_path2 != NULL) 2637 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 2638 else if (isp->isp_path != NULL) 2639 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 2640 ccb->ccb_h.status = CAM_REQ_CMP; 2641 } 2642 xpt_done(ccb); 2643 break; 2644 2645 case XPT_TERM_IO: /* Terminate the I/O process */ 2646 ccb->ccb_h.status = CAM_REQ_INVALID; 2647 xpt_done(ccb); 2648 break; 2649 2650 case XPT_PATH_INQ: /* Path routing inquiry */ 2651 { 2652 struct ccb_pathinq *cpi = &ccb->cpi; 2653 2654 cpi->version_num = 1; 2655 #ifdef ISP_TARGET_MODE 2656 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 2657 #else 2658 cpi->target_sprt = 0; 2659 #endif 2660 cpi->hba_eng_cnt = 0; 2661 cpi->max_target = ISP_MAX_TARGETS(isp) - 1; 2662 cpi->max_lun = ISP_MAX_LUNS(isp) - 1; 2663 cpi->bus_id = cam_sim_bus(sim); 2664 if (IS_FC(isp)) { 2665 cpi->hba_misc = PIM_NOBUSRESET; 2666 /* 2667 * Because our loop ID can shift from time to time, 2668 * make our initiator ID out of range of our bus. 2669 */ 2670 cpi->initiator_id = cpi->max_target + 1; 2671 2672 /* 2673 * Set base transfer capabilities for Fibre Channel. 2674 * Technically not correct because we don't know 2675 * what media we're running on top of- but we'll 2676 * look good if we always say 100MB/s. 2677 */ 2678 if (FCPARAM(isp)->isp_gbspeed == 2) 2679 cpi->base_transfer_speed = 200000; 2680 else 2681 cpi->base_transfer_speed = 100000; 2682 cpi->hba_inquiry = PI_TAG_ABLE; 2683 #ifdef CAM_NEW_TRAN_CODE 2684 cpi->transport = XPORT_FC; 2685 cpi->transport_version = 0; /* WHAT'S THIS FOR? */ 2686 #endif 2687 } else { 2688 sdparam *sdp = isp->isp_param; 2689 sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path)); 2690 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 2691 cpi->hba_misc = 0; 2692 cpi->initiator_id = sdp->isp_initiator_id; 2693 cpi->base_transfer_speed = 3300; 2694 #ifdef CAM_NEW_TRAN_CODE 2695 cpi->transport = XPORT_SPI; 2696 cpi->transport_version = 2; /* WHAT'S THIS FOR? */ 2697 #endif 2698 } 2699 #ifdef CAM_NEW_TRAN_CODE 2700 cpi->protocol = PROTO_SCSI; 2701 cpi->protocol_version = SCSI_REV_2; 2702 #endif 2703 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 2704 strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN); 2705 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 2706 cpi->unit_number = cam_sim_unit(sim); 2707 cpi->ccb_h.status = CAM_REQ_CMP; 2708 xpt_done(ccb); 2709 break; 2710 } 2711 default: 2712 ccb->ccb_h.status = CAM_REQ_INVALID; 2713 xpt_done(ccb); 2714 break; 2715 } 2716 } 2717 2718 #define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB) 2719 void 2720 isp_done(struct ccb_scsiio *sccb) 2721 { 2722 struct ispsoftc *isp = XS_ISP(sccb); 2723 2724 if (XS_NOERR(sccb)) 2725 XS_SETERR(sccb, CAM_REQ_CMP); 2726 2727 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && 2728 (sccb->scsi_status != SCSI_STATUS_OK)) { 2729 sccb->ccb_h.status &= ~CAM_STATUS_MASK; 2730 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && 2731 (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) { 2732 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; 2733 } else { 2734 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 2735 } 2736 } 2737 2738 sccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2739 if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2740 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 2741 sccb->ccb_h.status |= CAM_DEV_QFRZN; 2742 xpt_freeze_devq(sccb->ccb_h.path, 1); 2743 isp_prt(isp, ISP_LOGDEBUG0, 2744 "freeze devq %d.%d cam sts %x scsi sts %x", 2745 sccb->ccb_h.target_id, sccb->ccb_h.target_lun, 2746 sccb->ccb_h.status, sccb->scsi_status); 2747 } 2748 } 2749 2750 if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) && 2751 (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2752 xpt_print_path(sccb->ccb_h.path); 2753 isp_prt(isp, ISP_LOGINFO, 2754 "cam completion status 0x%x", sccb->ccb_h.status); 2755 } 2756 2757 XS_CMD_S_DONE(sccb); 2758 if (XS_CMD_WDOG_P(sccb) == 0) { 2759 untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch); 2760 if (XS_CMD_GRACE_P(sccb)) { 2761 isp_prt(isp, ISP_LOGDEBUG2, 2762 "finished command on borrowed time"); 2763 } 2764 XS_CMD_S_CLEAR(sccb); 2765 ISPLOCK_2_CAMLOCK(isp); 2766 xpt_done((union ccb *) sccb); 2767 CAMLOCK_2_ISPLOCK(isp); 2768 } 2769 } 2770 2771 int 2772 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg) 2773 { 2774 int bus, rv = 0; 2775 switch (cmd) { 2776 case ISPASYNC_NEW_TGT_PARAMS: 2777 { 2778 #ifdef CAM_NEW_TRAN_CODE 2779 struct ccb_trans_settings_scsi *scsi; 2780 struct ccb_trans_settings_spi *spi; 2781 #endif 2782 int flags, tgt; 2783 sdparam *sdp = isp->isp_param; 2784 struct ccb_trans_settings cts; 2785 struct cam_path *tmppath; 2786 2787 bzero(&cts, sizeof (struct ccb_trans_settings)); 2788 2789 tgt = *((int *)arg); 2790 bus = (tgt >> 16) & 0xffff; 2791 tgt &= 0xffff; 2792 sdp += bus; 2793 ISPLOCK_2_CAMLOCK(isp); 2794 if (xpt_create_path(&tmppath, NULL, 2795 cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim), 2796 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2797 CAMLOCK_2_ISPLOCK(isp); 2798 isp_prt(isp, ISP_LOGWARN, 2799 "isp_async cannot make temp path for %d.%d", 2800 tgt, bus); 2801 rv = -1; 2802 break; 2803 } 2804 CAMLOCK_2_ISPLOCK(isp); 2805 flags = sdp->isp_devparam[tgt].actv_flags; 2806 #ifdef CAM_NEW_TRAN_CODE 2807 cts.type = CTS_TYPE_CURRENT_SETTINGS; 2808 cts.protocol = PROTO_SCSI; 2809 cts.transport = XPORT_SPI; 2810 2811 scsi = &cts.proto_specific.scsi; 2812 spi = &cts.xport_specific.spi; 2813 2814 if (flags & DPARM_TQING) { 2815 scsi->valid |= CTS_SCSI_VALID_TQ; 2816 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 2817 spi->flags |= CTS_SPI_FLAGS_TAG_ENB; 2818 } 2819 2820 if (flags & DPARM_DISC) { 2821 spi->valid |= CTS_SPI_VALID_DISC; 2822 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 2823 } 2824 spi->flags |= CTS_SPI_VALID_BUS_WIDTH; 2825 if (flags & DPARM_WIDE) { 2826 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2827 } else { 2828 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2829 } 2830 if (flags & DPARM_SYNC) { 2831 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 2832 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 2833 spi->sync_period = sdp->isp_devparam[tgt].actv_period; 2834 spi->sync_offset = sdp->isp_devparam[tgt].actv_offset; 2835 } 2836 #else 2837 cts.flags = CCB_TRANS_CURRENT_SETTINGS; 2838 cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2839 if (flags & DPARM_DISC) { 2840 cts.flags |= CCB_TRANS_DISC_ENB; 2841 } 2842 if (flags & DPARM_TQING) { 2843 cts.flags |= CCB_TRANS_TAG_ENB; 2844 } 2845 cts.valid |= CCB_TRANS_BUS_WIDTH_VALID; 2846 cts.bus_width = (flags & DPARM_WIDE)? 2847 MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT; 2848 cts.sync_period = sdp->isp_devparam[tgt].actv_period; 2849 cts.sync_offset = sdp->isp_devparam[tgt].actv_offset; 2850 if (flags & DPARM_SYNC) { 2851 cts.valid |= 2852 CCB_TRANS_SYNC_RATE_VALID | 2853 CCB_TRANS_SYNC_OFFSET_VALID; 2854 } 2855 #endif 2856 isp_prt(isp, ISP_LOGDEBUG2, 2857 "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x", 2858 bus, tgt, sdp->isp_devparam[tgt].actv_period, 2859 sdp->isp_devparam[tgt].actv_offset, flags); 2860 xpt_setup_ccb(&cts.ccb_h, tmppath, 1); 2861 ISPLOCK_2_CAMLOCK(isp); 2862 xpt_async(AC_TRANSFER_NEG, tmppath, &cts); 2863 xpt_free_path(tmppath); 2864 CAMLOCK_2_ISPLOCK(isp); 2865 break; 2866 } 2867 case ISPASYNC_BUS_RESET: 2868 bus = *((int *)arg); 2869 isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected", 2870 bus); 2871 if (bus > 0 && isp->isp_path2) { 2872 ISPLOCK_2_CAMLOCK(isp); 2873 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 2874 CAMLOCK_2_ISPLOCK(isp); 2875 } else if (isp->isp_path) { 2876 ISPLOCK_2_CAMLOCK(isp); 2877 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 2878 CAMLOCK_2_ISPLOCK(isp); 2879 } 2880 break; 2881 case ISPASYNC_LIP: 2882 if (isp->isp_path) { 2883 isp_freeze_loopdown(isp, "ISPASYNC_LIP"); 2884 } 2885 isp_prt(isp, ISP_LOGINFO, "LIP Received"); 2886 break; 2887 case ISPASYNC_LOOP_RESET: 2888 if (isp->isp_path) { 2889 isp_freeze_loopdown(isp, "ISPASYNC_LOOP_RESET"); 2890 } 2891 isp_prt(isp, ISP_LOGINFO, "Loop Reset Received"); 2892 break; 2893 case ISPASYNC_LOOP_DOWN: 2894 if (isp->isp_path) { 2895 isp_freeze_loopdown(isp, "ISPASYNC_LOOP_DOWN"); 2896 } 2897 isp_prt(isp, ISP_LOGINFO, "Loop DOWN"); 2898 break; 2899 case ISPASYNC_LOOP_UP: 2900 /* 2901 * Now we just note that Loop has come up. We don't 2902 * actually do anything because we're waiting for a 2903 * Change Notify before activating the FC cleanup 2904 * thread to look at the state of the loop again. 2905 */ 2906 isp_prt(isp, ISP_LOGINFO, "Loop UP"); 2907 break; 2908 case ISPASYNC_PROMENADE: 2909 { 2910 struct cam_path *tmppath; 2911 const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x " 2912 "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x"; 2913 static const char *roles[4] = { 2914 "(none)", "Target", "Initiator", "Target/Initiator" 2915 }; 2916 fcparam *fcp = isp->isp_param; 2917 int tgt = *((int *) arg); 2918 int is_tgt_mask = (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT); 2919 struct lportdb *lp = &fcp->portdb[tgt]; 2920 2921 isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid, 2922 roles[lp->roles & 0x3], 2923 (lp->valid)? "Arrived" : "Departed", 2924 (uint32_t) (lp->port_wwn >> 32), 2925 (uint32_t) (lp->port_wwn & 0xffffffffLL), 2926 (uint32_t) (lp->node_wwn >> 32), 2927 (uint32_t) (lp->node_wwn & 0xffffffffLL)); 2928 2929 ISPLOCK_2_CAMLOCK(isp); 2930 if (xpt_create_path(&tmppath, NULL, cam_sim_path(isp->isp_sim), 2931 (target_id_t)tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2932 CAMLOCK_2_ISPLOCK(isp); 2933 break; 2934 } 2935 /* 2936 * Policy: only announce targets. 2937 */ 2938 if (lp->roles & is_tgt_mask) { 2939 if (lp->valid) { 2940 xpt_async(AC_FOUND_DEVICE, tmppath, NULL); 2941 } else { 2942 xpt_async(AC_LOST_DEVICE, tmppath, NULL); 2943 } 2944 } 2945 xpt_free_path(tmppath); 2946 CAMLOCK_2_ISPLOCK(isp); 2947 break; 2948 } 2949 case ISPASYNC_CHANGE_NOTIFY: 2950 if (arg == ISPASYNC_CHANGE_PDB) { 2951 isp_prt(isp, ISP_LOGINFO, 2952 "Port Database Changed"); 2953 } else if (arg == ISPASYNC_CHANGE_SNS) { 2954 isp_prt(isp, ISP_LOGINFO, 2955 "Name Server Database Changed"); 2956 } 2957 #ifdef ISP_SMPLOCK 2958 cv_signal(&isp->isp_osinfo.kthread_cv); 2959 #else 2960 wakeup(&isp->isp_osinfo.kthread_cv); 2961 #endif 2962 break; 2963 case ISPASYNC_FABRIC_DEV: 2964 { 2965 int target, base, lim; 2966 fcparam *fcp = isp->isp_param; 2967 struct lportdb *lp = NULL; 2968 struct lportdb *clp = (struct lportdb *) arg; 2969 char *pt; 2970 2971 switch (clp->port_type) { 2972 case 1: 2973 pt = " N_Port"; 2974 break; 2975 case 2: 2976 pt = " NL_Port"; 2977 break; 2978 case 3: 2979 pt = "F/NL_Port"; 2980 break; 2981 case 0x7f: 2982 pt = " Nx_Port"; 2983 break; 2984 case 0x81: 2985 pt = " F_port"; 2986 break; 2987 case 0x82: 2988 pt = " FL_Port"; 2989 break; 2990 case 0x84: 2991 pt = " E_port"; 2992 break; 2993 default: 2994 pt = " "; 2995 break; 2996 } 2997 2998 isp_prt(isp, ISP_LOGINFO, 2999 "%s Fabric Device @ PortID 0x%x", pt, clp->portid); 3000 3001 /* 3002 * If we don't have an initiator role we bail. 3003 * 3004 * We just use ISPASYNC_FABRIC_DEV for announcement purposes. 3005 */ 3006 3007 if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) { 3008 break; 3009 } 3010 3011 /* 3012 * Is this entry for us? If so, we bail. 3013 */ 3014 3015 if (fcp->isp_portid == clp->portid) { 3016 break; 3017 } 3018 3019 /* 3020 * Else, the default policy is to find room for it in 3021 * our local port database. Later, when we execute 3022 * the call to isp_pdb_sync either this newly arrived 3023 * or already logged in device will be (re)announced. 3024 */ 3025 3026 if (fcp->isp_topo == TOPO_FL_PORT) 3027 base = FC_SNS_ID+1; 3028 else 3029 base = 0; 3030 3031 if (fcp->isp_topo == TOPO_N_PORT) 3032 lim = 1; 3033 else 3034 lim = MAX_FC_TARG; 3035 3036 /* 3037 * Is it already in our list? 3038 */ 3039 for (target = base; target < lim; target++) { 3040 if (target >= FL_PORT_ID && target <= FC_SNS_ID) { 3041 continue; 3042 } 3043 lp = &fcp->portdb[target]; 3044 if (lp->port_wwn == clp->port_wwn && 3045 lp->node_wwn == clp->node_wwn) { 3046 lp->fabric_dev = 1; 3047 break; 3048 } 3049 } 3050 if (target < lim) { 3051 break; 3052 } 3053 for (target = base; target < lim; target++) { 3054 if (target >= FL_PORT_ID && target <= FC_SNS_ID) { 3055 continue; 3056 } 3057 lp = &fcp->portdb[target]; 3058 if (lp->port_wwn == 0) { 3059 break; 3060 } 3061 } 3062 if (target == lim) { 3063 isp_prt(isp, ISP_LOGWARN, 3064 "out of space for fabric devices"); 3065 break; 3066 } 3067 lp->port_type = clp->port_type; 3068 lp->fc4_type = clp->fc4_type; 3069 lp->node_wwn = clp->node_wwn; 3070 lp->port_wwn = clp->port_wwn; 3071 lp->portid = clp->portid; 3072 lp->fabric_dev = 1; 3073 break; 3074 } 3075 #ifdef ISP_TARGET_MODE 3076 case ISPASYNC_TARGET_NOTIFY: 3077 { 3078 tmd_notify_t *nt = arg; 3079 isp_prt(isp, ISP_LOGALL, 3080 "target notify code 0x%x", nt->nt_ncode); 3081 break; 3082 } 3083 case ISPASYNC_TARGET_ACTION: 3084 switch (((isphdr_t *)arg)->rqs_entry_type) { 3085 default: 3086 isp_prt(isp, ISP_LOGWARN, 3087 "event 0x%x for unhandled target action", 3088 ((isphdr_t *)arg)->rqs_entry_type); 3089 break; 3090 case RQSTYPE_NOTIFY: 3091 if (IS_SCSI(isp)) { 3092 rv = isp_handle_platform_notify_scsi(isp, 3093 (in_entry_t *) arg); 3094 } else { 3095 rv = isp_handle_platform_notify_fc(isp, 3096 (in_fcentry_t *) arg); 3097 } 3098 break; 3099 case RQSTYPE_ATIO: 3100 rv = isp_handle_platform_atio(isp, (at_entry_t *) arg); 3101 break; 3102 case RQSTYPE_ATIO2: 3103 rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg); 3104 break; 3105 case RQSTYPE_CTIO2: 3106 case RQSTYPE_CTIO: 3107 rv = isp_handle_platform_ctio(isp, arg); 3108 break; 3109 case RQSTYPE_ENABLE_LUN: 3110 case RQSTYPE_MODIFY_LUN: 3111 isp_ledone(isp, (lun_entry_t *) arg); 3112 break; 3113 } 3114 break; 3115 #endif 3116 case ISPASYNC_FW_CRASH: 3117 { 3118 uint16_t mbox1, mbox6; 3119 mbox1 = ISP_READ(isp, OUTMAILBOX1); 3120 if (IS_DUALBUS(isp)) { 3121 mbox6 = ISP_READ(isp, OUTMAILBOX6); 3122 } else { 3123 mbox6 = 0; 3124 } 3125 isp_prt(isp, ISP_LOGERR, 3126 "Internal Firmware Error on bus %d @ RISC Address 0x%x", 3127 mbox6, mbox1); 3128 #ifdef ISP_FW_CRASH_DUMP 3129 /* 3130 * XXX: really need a thread to do this right. 3131 */ 3132 if (IS_FC(isp)) { 3133 FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT; 3134 FCPARAM(isp)->isp_loopstate = LOOP_NIL; 3135 isp_freeze_loopdown(isp, "f/w crash"); 3136 isp_fw_dump(isp); 3137 } 3138 isp_reinit(isp); 3139 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL); 3140 #endif 3141 break; 3142 } 3143 case ISPASYNC_UNHANDLED_RESPONSE: 3144 break; 3145 default: 3146 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd); 3147 break; 3148 } 3149 return (rv); 3150 } 3151 3152 3153 /* 3154 * Locks are held before coming here. 3155 */ 3156 void 3157 isp_uninit(struct ispsoftc *isp) 3158 { 3159 ISP_WRITE(isp, HCCR, HCCR_CMD_RESET); 3160 DISABLE_INTS(isp); 3161 } 3162 3163 void 3164 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...) 3165 { 3166 va_list ap; 3167 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { 3168 return; 3169 } 3170 printf("%s: ", device_get_nameunit(isp->isp_dev)); 3171 va_start(ap, fmt); 3172 vprintf(fmt, ap); 3173 va_end(ap); 3174 printf("\n"); 3175 } 3176