1 /*- 2 * Copyright (c) 1997-2006 by Matthew Jacob 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /* 28 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters. 29 */ 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 #include <dev/isp/isp_freebsd.h> 33 #include <sys/unistd.h> 34 #include <sys/kthread.h> 35 #include <machine/stdarg.h> /* for use by isp_prt below */ 36 #include <sys/conf.h> 37 #include <sys/module.h> 38 #include <sys/ioccom.h> 39 #include <dev/isp/isp_ioctl.h> 40 #if __FreeBSD_version >= 500000 41 #include <sys/sysctl.h> 42 #else 43 #include <sys/devicestat.h> 44 #endif 45 #include <cam/cam_periph.h> 46 #include <cam/cam_xpt_periph.h> 47 48 #if !defined(CAM_NEW_TRAN_CODE) && __FreeBSD_version >= 700025 49 #define CAM_NEW_TRAN_CODE 1 50 #endif 51 52 53 MODULE_VERSION(isp, 1); 54 MODULE_DEPEND(isp, cam, 1, 1, 1); 55 int isp_announced = 0; 56 int isp_fabric_hysteresis = 5; 57 int isp_loop_down_limit = 300; /* default loop down limit */ 58 int isp_change_is_bad = 0; /* "changed" devices are bad */ 59 int isp_quickboot_time = 15; /* don't wait more than N secs for loop up */ 60 int isp_gone_device_time = 30; /* grace time before reporting device lost */ 61 static const char *roles[4] = { 62 "(none)", "Target", "Initiator", "Target/Initiator" 63 }; 64 static const char prom3[] = 65 "PortID 0x%06x Departed from Target %u because of %s"; 66 67 static void isp_freeze_loopdown(ispsoftc_t *, char *); 68 static d_ioctl_t ispioctl; 69 static void isp_intr_enable(void *); 70 static void isp_cam_async(void *, uint32_t, struct cam_path *, void *); 71 static void isp_poll(struct cam_sim *); 72 static timeout_t isp_watchdog; 73 static timeout_t isp_ldt; 74 static void isp_kthread(void *); 75 static void isp_action(struct cam_sim *, union ccb *); 76 77 #if __FreeBSD_version < 700000 78 ispfwfunc *isp_get_firmware_p = NULL; 79 #endif 80 81 #if __FreeBSD_version < 500000 82 #define ISP_CDEV_MAJOR 248 83 static struct cdevsw isp_cdevsw = { 84 /* open */ nullopen, 85 /* close */ nullclose, 86 /* read */ noread, 87 /* write */ nowrite, 88 /* ioctl */ ispioctl, 89 /* poll */ nopoll, 90 /* mmap */ nommap, 91 /* strategy */ nostrategy, 92 /* name */ "isp", 93 /* maj */ ISP_CDEV_MAJOR, 94 /* dump */ nodump, 95 /* psize */ nopsize, 96 /* flags */ D_TAPE, 97 }; 98 #define isp_sysctl_update(x) do { ; } while (0) 99 #else 100 static struct cdevsw isp_cdevsw = { 101 .d_version = D_VERSION, 102 .d_flags = D_NEEDGIANT, 103 .d_ioctl = ispioctl, 104 .d_name = "isp", 105 }; 106 static void isp_sysctl_update(ispsoftc_t *); 107 #endif 108 109 static ispsoftc_t *isplist = NULL; 110 111 void 112 isp_attach(ispsoftc_t *isp) 113 { 114 int primary, secondary; 115 struct ccb_setasync csa; 116 struct cam_devq *devq; 117 struct cam_sim *sim; 118 struct cam_path *path; 119 120 /* 121 * Establish (in case of 12X0) which bus is the primary. 122 */ 123 124 primary = 0; 125 secondary = 1; 126 127 /* 128 * Create the device queue for our SIM(s). 129 */ 130 devq = cam_simq_alloc(isp->isp_maxcmds); 131 if (devq == NULL) { 132 return; 133 } 134 135 /* 136 * Construct our SIM entry. 137 */ 138 ISPLOCK_2_CAMLOCK(isp); 139 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 140 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); 141 if (sim == NULL) { 142 cam_simq_free(devq); 143 CAMLOCK_2_ISPLOCK(isp); 144 return; 145 } 146 CAMLOCK_2_ISPLOCK(isp); 147 148 isp->isp_osinfo.ehook.ich_func = isp_intr_enable; 149 isp->isp_osinfo.ehook.ich_arg = isp; 150 ISPLOCK_2_CAMLOCK(isp); 151 if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) { 152 cam_sim_free(sim, TRUE); 153 CAMLOCK_2_ISPLOCK(isp); 154 isp_prt(isp, ISP_LOGERR, 155 "could not establish interrupt enable hook"); 156 return; 157 } 158 159 if (xpt_bus_register(sim, primary) != CAM_SUCCESS) { 160 cam_sim_free(sim, TRUE); 161 CAMLOCK_2_ISPLOCK(isp); 162 return; 163 } 164 165 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 166 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 167 xpt_bus_deregister(cam_sim_path(sim)); 168 cam_sim_free(sim, TRUE); 169 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 170 CAMLOCK_2_ISPLOCK(isp); 171 return; 172 } 173 174 xpt_setup_ccb(&csa.ccb_h, path, 5); 175 csa.ccb_h.func_code = XPT_SASYNC_CB; 176 csa.event_enable = AC_LOST_DEVICE; 177 csa.callback = isp_cam_async; 178 csa.callback_arg = sim; 179 xpt_action((union ccb *)&csa); 180 CAMLOCK_2_ISPLOCK(isp); 181 isp->isp_sim = sim; 182 isp->isp_path = path; 183 /* 184 * Create a kernel thread for fibre channel instances. We 185 * don't have dual channel FC cards. 186 */ 187 if (IS_FC(isp)) { 188 ISPLOCK_2_CAMLOCK(isp); 189 #if __FreeBSD_version >= 500000 190 cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv"); 191 if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc, 192 RFHIGHPID, 0, "%s: fc_thrd", 193 device_get_nameunit(isp->isp_dev))) 194 #else 195 if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc, 196 "%s: fc_thrd", device_get_nameunit(isp->isp_dev))) 197 #endif 198 { 199 xpt_bus_deregister(cam_sim_path(sim)); 200 cam_sim_free(sim, TRUE); 201 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 202 CAMLOCK_2_ISPLOCK(isp); 203 isp_prt(isp, ISP_LOGERR, "could not create kthread"); 204 return; 205 } 206 CAMLOCK_2_ISPLOCK(isp); 207 /* 208 * We start by being "loop down" if we have an initiator role 209 */ 210 if (isp->isp_role & ISP_ROLE_INITIATOR) { 211 isp_freeze_loopdown(isp, "isp_attach"); 212 isp->isp_osinfo.ldt = 213 timeout(isp_ldt, isp, isp_quickboot_time * hz); 214 isp->isp_osinfo.ldt_running = 1; 215 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 216 "Starting Initial Loop Down Timer"); 217 } 218 } 219 220 221 /* 222 * If we have a second channel, construct SIM entry for that. 223 */ 224 if (IS_DUALBUS(isp)) { 225 ISPLOCK_2_CAMLOCK(isp); 226 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 227 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); 228 if (sim == NULL) { 229 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 230 xpt_free_path(isp->isp_path); 231 cam_simq_free(devq); 232 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 233 return; 234 } 235 if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) { 236 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 237 xpt_free_path(isp->isp_path); 238 cam_sim_free(sim, TRUE); 239 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 240 CAMLOCK_2_ISPLOCK(isp); 241 return; 242 } 243 244 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 245 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 246 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 247 xpt_free_path(isp->isp_path); 248 xpt_bus_deregister(cam_sim_path(sim)); 249 cam_sim_free(sim, TRUE); 250 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 251 CAMLOCK_2_ISPLOCK(isp); 252 return; 253 } 254 255 xpt_setup_ccb(&csa.ccb_h, path, 5); 256 csa.ccb_h.func_code = XPT_SASYNC_CB; 257 csa.event_enable = AC_LOST_DEVICE; 258 csa.callback = isp_cam_async; 259 csa.callback_arg = sim; 260 xpt_action((union ccb *)&csa); 261 CAMLOCK_2_ISPLOCK(isp); 262 isp->isp_sim2 = sim; 263 isp->isp_path2 = path; 264 } 265 266 /* 267 * Create device nodes 268 */ 269 (void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT, 270 GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev)); 271 272 if (isp->isp_role != ISP_ROLE_NONE) { 273 isp->isp_state = ISP_RUNSTATE; 274 ISP_ENABLE_INTS(isp); 275 } 276 if (isplist == NULL) { 277 isplist = isp; 278 } else { 279 ispsoftc_t *tmp = isplist; 280 while (tmp->isp_osinfo.next) { 281 tmp = tmp->isp_osinfo.next; 282 } 283 tmp->isp_osinfo.next = isp; 284 } 285 isp_sysctl_update(isp); 286 } 287 288 static void 289 isp_freeze_loopdown(ispsoftc_t *isp, char *msg) 290 { 291 if (isp->isp_osinfo.simqfrozen == 0) { 292 isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg); 293 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 294 ISPLOCK_2_CAMLOCK(isp); 295 xpt_freeze_simq(isp->isp_sim, 1); 296 CAMLOCK_2_ISPLOCK(isp); 297 } else { 298 isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg); 299 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 300 } 301 } 302 303 304 #if __FreeBSD_version < 500000 305 #define _DEV dev_t 306 #define _IOP struct proc 307 #else 308 #define _IOP struct thread 309 #define _DEV struct cdev * 310 #endif 311 312 static int 313 ispioctl(_DEV dev, u_long c, caddr_t addr, int flags, _IOP *td) 314 { 315 ispsoftc_t *isp; 316 int nr, retval = ENOTTY; 317 318 isp = isplist; 319 while (isp) { 320 if (minor(dev) == device_get_unit(isp->isp_dev)) { 321 break; 322 } 323 isp = isp->isp_osinfo.next; 324 } 325 if (isp == NULL) 326 return (ENXIO); 327 328 switch (c) { 329 #ifdef ISP_FW_CRASH_DUMP 330 case ISP_GET_FW_CRASH_DUMP: 331 if (IS_FC(isp)) { 332 uint16_t *ptr = FCPARAM(isp)->isp_dump_data; 333 size_t sz; 334 335 retval = 0; 336 if (IS_2200(isp)) { 337 sz = QLA2200_RISC_IMAGE_DUMP_SIZE; 338 } else { 339 sz = QLA2300_RISC_IMAGE_DUMP_SIZE; 340 } 341 ISP_LOCK(isp); 342 if (ptr && *ptr) { 343 void *uaddr = *((void **) addr); 344 if (copyout(ptr, uaddr, sz)) { 345 retval = EFAULT; 346 } else { 347 *ptr = 0; 348 } 349 } else { 350 retval = ENXIO; 351 } 352 ISP_UNLOCK(isp); 353 } 354 break; 355 case ISP_FORCE_CRASH_DUMP: 356 if (IS_FC(isp)) { 357 ISP_LOCK(isp); 358 isp_freeze_loopdown(isp, 359 "ispioctl(ISP_FORCE_CRASH_DUMP)"); 360 isp_fw_dump(isp); 361 isp_reinit(isp); 362 ISP_UNLOCK(isp); 363 retval = 0; 364 } 365 break; 366 #endif 367 case ISP_SDBLEV: 368 { 369 int olddblev = isp->isp_dblev; 370 isp->isp_dblev = *(int *)addr; 371 *(int *)addr = olddblev; 372 retval = 0; 373 break; 374 } 375 case ISP_GETROLE: 376 *(int *)addr = isp->isp_role; 377 retval = 0; 378 break; 379 case ISP_SETROLE: 380 nr = *(int *)addr; 381 if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) { 382 retval = EINVAL; 383 break; 384 } 385 /* 386 * XXX: Current 387 */ 388 if (nr == ISP_ROLE_BOTH) { 389 isp_prt(isp, ISP_LOGERR, "dual roles not supported"); 390 retval = EINVAL; 391 break; 392 } 393 *(int *)addr = isp->isp_role; 394 isp->isp_role = nr; 395 /* FALLTHROUGH */ 396 case ISP_RESETHBA: 397 ISP_LOCK(isp); 398 isp_reinit(isp); 399 ISP_UNLOCK(isp); 400 retval = 0; 401 break; 402 case ISP_RESCAN: 403 if (IS_FC(isp)) { 404 ISP_LOCK(isp); 405 if (isp_fc_runstate(isp, 5 * 1000000)) { 406 retval = EIO; 407 } else { 408 retval = 0; 409 } 410 ISP_UNLOCK(isp); 411 } 412 break; 413 case ISP_FC_LIP: 414 if (IS_FC(isp)) { 415 ISP_LOCK(isp); 416 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) { 417 retval = EIO; 418 } else { 419 retval = 0; 420 } 421 ISP_UNLOCK(isp); 422 } 423 break; 424 case ISP_FC_GETDINFO: 425 { 426 struct isp_fc_device *ifc = (struct isp_fc_device *) addr; 427 fcportdb_t *lp; 428 429 if (IS_SCSI(isp)) { 430 break; 431 } 432 if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) { 433 retval = EINVAL; 434 break; 435 } 436 ISP_LOCK(isp); 437 lp = &FCPARAM(isp)->portdb[ifc->loopid]; 438 if (lp->state == FC_PORTDB_STATE_VALID) { 439 ifc->role = lp->roles; 440 ifc->loopid = lp->handle; 441 ifc->portid = lp->portid; 442 ifc->node_wwn = lp->node_wwn; 443 ifc->port_wwn = lp->port_wwn; 444 retval = 0; 445 } else { 446 retval = ENODEV; 447 } 448 ISP_UNLOCK(isp); 449 break; 450 } 451 case ISP_GET_STATS: 452 { 453 isp_stats_t *sp = (isp_stats_t *) addr; 454 455 MEMZERO(sp, sizeof (*sp)); 456 sp->isp_stat_version = ISP_STATS_VERSION; 457 sp->isp_type = isp->isp_type; 458 sp->isp_revision = isp->isp_revision; 459 ISP_LOCK(isp); 460 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt; 461 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus; 462 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc; 463 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync; 464 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt; 465 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt; 466 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater; 467 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater; 468 ISP_UNLOCK(isp); 469 retval = 0; 470 break; 471 } 472 case ISP_CLR_STATS: 473 ISP_LOCK(isp); 474 isp->isp_intcnt = 0; 475 isp->isp_intbogus = 0; 476 isp->isp_intmboxc = 0; 477 isp->isp_intoasync = 0; 478 isp->isp_rsltccmplt = 0; 479 isp->isp_fphccmplt = 0; 480 isp->isp_rscchiwater = 0; 481 isp->isp_fpcchiwater = 0; 482 ISP_UNLOCK(isp); 483 retval = 0; 484 break; 485 case ISP_FC_GETHINFO: 486 { 487 struct isp_hba_device *hba = (struct isp_hba_device *) addr; 488 MEMZERO(hba, sizeof (*hba)); 489 490 hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev); 491 hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev); 492 hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev); 493 if (IS_FC(isp)) { 494 hba->fc_speed = FCPARAM(isp)->isp_gbspeed; 495 hba->fc_scsi_supported = 1; 496 hba->fc_topology = FCPARAM(isp)->isp_topo + 1; 497 hba->fc_loopid = FCPARAM(isp)->isp_loopid; 498 hba->nvram_node_wwn = FCPARAM(isp)->isp_wwnn_nvram; 499 hba->nvram_port_wwn = FCPARAM(isp)->isp_wwpn_nvram; 500 hba->active_node_wwn = ISP_NODEWWN(isp); 501 hba->active_port_wwn = ISP_PORTWWN(isp); 502 } 503 retval = 0; 504 break; 505 } 506 case ISP_GET_FC_PARAM: 507 { 508 struct isp_fc_param *f = (struct isp_fc_param *) addr; 509 510 if (IS_SCSI(isp)) { 511 break; 512 } 513 f->parameter = 0; 514 if (strcmp(f->param_name, "framelength") == 0) { 515 f->parameter = FCPARAM(isp)->isp_maxfrmlen; 516 retval = 0; 517 break; 518 } 519 if (strcmp(f->param_name, "exec_throttle") == 0) { 520 f->parameter = FCPARAM(isp)->isp_execthrottle; 521 retval = 0; 522 break; 523 } 524 if (strcmp(f->param_name, "fullduplex") == 0) { 525 if (FCPARAM(isp)->isp_fwoptions & ICBOPT_FULL_DUPLEX) 526 f->parameter = 1; 527 retval = 0; 528 break; 529 } 530 if (strcmp(f->param_name, "loopid") == 0) { 531 f->parameter = FCPARAM(isp)->isp_loopid; 532 retval = 0; 533 break; 534 } 535 retval = EINVAL; 536 break; 537 } 538 case ISP_SET_FC_PARAM: 539 { 540 struct isp_fc_param *f = (struct isp_fc_param *) addr; 541 uint32_t param = f->parameter; 542 543 if (IS_SCSI(isp)) { 544 break; 545 } 546 f->parameter = 0; 547 if (strcmp(f->param_name, "framelength") == 0) { 548 if (param != 512 && param != 1024 && param != 1024) { 549 retval = EINVAL; 550 break; 551 } 552 FCPARAM(isp)->isp_maxfrmlen = param; 553 retval = 0; 554 break; 555 } 556 if (strcmp(f->param_name, "exec_throttle") == 0) { 557 if (param < 16 || param > 255) { 558 retval = EINVAL; 559 break; 560 } 561 FCPARAM(isp)->isp_execthrottle = param; 562 retval = 0; 563 break; 564 } 565 if (strcmp(f->param_name, "fullduplex") == 0) { 566 if (param != 0 && param != 1) { 567 retval = EINVAL; 568 break; 569 } 570 if (param) { 571 FCPARAM(isp)->isp_fwoptions |= 572 ICBOPT_FULL_DUPLEX; 573 } else { 574 FCPARAM(isp)->isp_fwoptions &= 575 ~ICBOPT_FULL_DUPLEX; 576 } 577 retval = 0; 578 break; 579 } 580 if (strcmp(f->param_name, "loopid") == 0) { 581 if (param < 0 || param > 125) { 582 retval = EINVAL; 583 break; 584 } 585 FCPARAM(isp)->isp_loopid = param; 586 retval = 0; 587 break; 588 } 589 retval = EINVAL; 590 break; 591 } 592 case ISP_TSK_MGMT: 593 { 594 int needmarker; 595 struct isp_fc_tsk_mgmt *fct = (struct isp_fc_tsk_mgmt *) addr; 596 uint16_t loopid; 597 mbreg_t mbs; 598 599 if (IS_SCSI(isp)) { 600 break; 601 } 602 603 memset(&mbs, 0, sizeof (mbs)); 604 needmarker = retval = 0; 605 loopid = fct->loopid; 606 if (FCPARAM(isp)->isp_2klogin == 0) { 607 loopid <<= 8; 608 } 609 switch (fct->action) { 610 case IPT_CLEAR_ACA: 611 mbs.param[0] = MBOX_CLEAR_ACA; 612 mbs.param[1] = loopid; 613 mbs.param[2] = fct->lun; 614 break; 615 case IPT_TARGET_RESET: 616 mbs.param[0] = MBOX_TARGET_RESET; 617 mbs.param[1] = loopid; 618 needmarker = 1; 619 break; 620 case IPT_LUN_RESET: 621 mbs.param[0] = MBOX_LUN_RESET; 622 mbs.param[1] = loopid; 623 mbs.param[2] = fct->lun; 624 needmarker = 1; 625 break; 626 case IPT_CLEAR_TASK_SET: 627 mbs.param[0] = MBOX_CLEAR_TASK_SET; 628 mbs.param[1] = loopid; 629 mbs.param[2] = fct->lun; 630 needmarker = 1; 631 break; 632 case IPT_ABORT_TASK_SET: 633 mbs.param[0] = MBOX_ABORT_TASK_SET; 634 mbs.param[1] = loopid; 635 mbs.param[2] = fct->lun; 636 needmarker = 1; 637 break; 638 default: 639 retval = EINVAL; 640 break; 641 } 642 if (retval == 0) { 643 ISP_LOCK(isp); 644 if (needmarker) { 645 isp->isp_sendmarker |= 1; 646 } 647 retval = isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs); 648 ISP_UNLOCK(isp); 649 if (retval) 650 retval = EIO; 651 } 652 break; 653 } 654 default: 655 break; 656 } 657 return (retval); 658 } 659 660 #if __FreeBSD_version >= 500000 661 static void 662 isp_sysctl_update(ispsoftc_t *isp) 663 { 664 struct sysctl_ctx_list *ctx = 665 device_get_sysctl_ctx(isp->isp_osinfo.dev); 666 struct sysctl_oid *tree = device_get_sysctl_tree(isp->isp_osinfo.dev); 667 668 if (IS_SCSI(isp)) { 669 return; 670 } 671 672 snprintf(isp->isp_osinfo.sysctl_info.fc.wwnn, 673 sizeof (isp->isp_osinfo.sysctl_info.fc.wwnn), "0x%08x%08x", 674 (uint32_t) (ISP_NODEWWN(isp) >> 32), (uint32_t) ISP_NODEWWN(isp)); 675 676 snprintf(isp->isp_osinfo.sysctl_info.fc.wwpn, 677 sizeof (isp->isp_osinfo.sysctl_info.fc.wwpn), "0x%08x%08x", 678 (uint32_t) (ISP_PORTWWN(isp) >> 32), (uint32_t) ISP_PORTWWN(isp)); 679 680 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 681 "wwnn", CTLFLAG_RD, isp->isp_osinfo.sysctl_info.fc.wwnn, 0, 682 "World Wide Node Name"); 683 684 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 685 "wwpn", CTLFLAG_RD, isp->isp_osinfo.sysctl_info.fc.wwpn, 0, 686 "World Wide Port Name"); 687 688 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 689 "loop_down_limit", 690 CTLFLAG_RW, &isp->isp_osinfo.loop_down_limit, 0, 691 "How long to wait for loop to come back up"); 692 693 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 694 "gone_device_time", 695 CTLFLAG_RW, &isp->isp_osinfo.gone_device_time, 0, 696 "How long to wait for a device to reappear"); 697 } 698 #endif 699 700 static void 701 isp_intr_enable(void *arg) 702 { 703 ispsoftc_t *isp = arg; 704 if (isp->isp_role != ISP_ROLE_NONE) { 705 ISP_ENABLE_INTS(isp); 706 } 707 /* Release our hook so that the boot can continue. */ 708 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 709 } 710 711 /* 712 * Put the target mode functions here, because some are inlines 713 */ 714 715 #ifdef ISP_TARGET_MODE 716 717 static __inline int is_lun_enabled(ispsoftc_t *, int, lun_id_t); 718 static __inline int are_any_luns_enabled(ispsoftc_t *, int); 719 static __inline tstate_t *get_lun_statep(ispsoftc_t *, int, lun_id_t); 720 static __inline void rls_lun_statep(ispsoftc_t *, tstate_t *); 721 static __inline atio_private_data_t *isp_get_atpd(ispsoftc_t *, int); 722 static cam_status 723 create_lun_state(ispsoftc_t *, int, struct cam_path *, tstate_t **); 724 static void destroy_lun_state(ispsoftc_t *, tstate_t *); 725 static int isp_en_lun(ispsoftc_t *, union ccb *); 726 static void isp_ledone(ispsoftc_t *, lun_entry_t *); 727 static cam_status isp_abort_tgt_ccb(ispsoftc_t *, union ccb *); 728 static timeout_t isp_refire_putback_atio; 729 static void isp_complete_ctio(union ccb *); 730 static void isp_target_putback_atio(union ccb *); 731 static void isp_target_start_ctio(ispsoftc_t *, union ccb *); 732 static int isp_handle_platform_atio(ispsoftc_t *, at_entry_t *); 733 static int isp_handle_platform_atio2(ispsoftc_t *, at2_entry_t *); 734 static int isp_handle_platform_ctio(ispsoftc_t *, void *); 735 static int isp_handle_platform_notify_scsi(ispsoftc_t *, in_entry_t *); 736 static int isp_handle_platform_notify_fc(ispsoftc_t *, in_fcentry_t *); 737 738 static __inline int 739 is_lun_enabled(ispsoftc_t *isp, int bus, lun_id_t lun) 740 { 741 tstate_t *tptr; 742 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 743 if (tptr == NULL) { 744 return (0); 745 } 746 do { 747 if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) { 748 return (1); 749 } 750 } while ((tptr = tptr->next) != NULL); 751 return (0); 752 } 753 754 static __inline int 755 are_any_luns_enabled(ispsoftc_t *isp, int port) 756 { 757 int lo, hi; 758 if (IS_DUALBUS(isp)) { 759 lo = (port * (LUN_HASH_SIZE >> 1)); 760 hi = lo + (LUN_HASH_SIZE >> 1); 761 } else { 762 lo = 0; 763 hi = LUN_HASH_SIZE; 764 } 765 for (lo = 0; lo < hi; lo++) { 766 if (isp->isp_osinfo.lun_hash[lo]) { 767 return (1); 768 } 769 } 770 return (0); 771 } 772 773 static __inline tstate_t * 774 get_lun_statep(ispsoftc_t *isp, int bus, lun_id_t lun) 775 { 776 tstate_t *tptr = NULL; 777 778 if (lun == CAM_LUN_WILDCARD) { 779 if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) { 780 tptr = &isp->isp_osinfo.tsdflt[bus]; 781 tptr->hold++; 782 return (tptr); 783 } 784 return (NULL); 785 } else { 786 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 787 if (tptr == NULL) { 788 return (NULL); 789 } 790 } 791 792 do { 793 if (tptr->lun == lun && tptr->bus == bus) { 794 tptr->hold++; 795 return (tptr); 796 } 797 } while ((tptr = tptr->next) != NULL); 798 return (tptr); 799 } 800 801 static __inline void 802 rls_lun_statep(ispsoftc_t *isp, tstate_t *tptr) 803 { 804 if (tptr->hold) 805 tptr->hold--; 806 } 807 808 static __inline atio_private_data_t * 809 isp_get_atpd(ispsoftc_t *isp, int tag) 810 { 811 atio_private_data_t *atp; 812 for (atp = isp->isp_osinfo.atpdp; 813 atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) { 814 if (atp->tag == tag) 815 return (atp); 816 } 817 return (NULL); 818 } 819 820 static cam_status 821 create_lun_state(ispsoftc_t *isp, int bus, 822 struct cam_path *path, tstate_t **rslt) 823 { 824 cam_status status; 825 lun_id_t lun; 826 int hfx; 827 tstate_t *tptr, *new; 828 829 lun = xpt_path_lun_id(path); 830 if (lun < 0) { 831 return (CAM_LUN_INVALID); 832 } 833 if (is_lun_enabled(isp, bus, lun)) { 834 return (CAM_LUN_ALRDY_ENA); 835 } 836 new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO); 837 if (new == NULL) { 838 return (CAM_RESRC_UNAVAIL); 839 } 840 841 status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path), 842 xpt_path_target_id(path), xpt_path_lun_id(path)); 843 if (status != CAM_REQ_CMP) { 844 free(new, M_DEVBUF); 845 return (status); 846 } 847 new->bus = bus; 848 new->lun = lun; 849 SLIST_INIT(&new->atios); 850 SLIST_INIT(&new->inots); 851 new->hold = 1; 852 853 hfx = LUN_HASH_FUNC(isp, new->bus, new->lun); 854 tptr = isp->isp_osinfo.lun_hash[hfx]; 855 if (tptr == NULL) { 856 isp->isp_osinfo.lun_hash[hfx] = new; 857 } else { 858 while (tptr->next) 859 tptr = tptr->next; 860 tptr->next = new; 861 } 862 *rslt = new; 863 return (CAM_REQ_CMP); 864 } 865 866 static __inline void 867 destroy_lun_state(ispsoftc_t *isp, tstate_t *tptr) 868 { 869 int hfx; 870 tstate_t *lw, *pw; 871 872 if (tptr->hold) { 873 return; 874 } 875 hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun); 876 pw = isp->isp_osinfo.lun_hash[hfx]; 877 if (pw == NULL) { 878 return; 879 } else if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 880 isp->isp_osinfo.lun_hash[hfx] = pw->next; 881 } else { 882 lw = pw; 883 pw = lw->next; 884 while (pw) { 885 if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 886 lw->next = pw->next; 887 break; 888 } 889 lw = pw; 890 pw = pw->next; 891 } 892 if (pw == NULL) { 893 return; 894 } 895 } 896 free(tptr, M_DEVBUF); 897 } 898 899 /* 900 * Enable luns. 901 */ 902 static int 903 isp_en_lun(ispsoftc_t *isp, union ccb *ccb) 904 { 905 struct ccb_en_lun *cel = &ccb->cel; 906 tstate_t *tptr; 907 uint32_t seq; 908 int bus, cmd, av, wildcard, tm_on; 909 lun_id_t lun; 910 target_id_t tgt; 911 912 bus = XS_CHANNEL(ccb); 913 if (bus > 1) { 914 xpt_print(ccb->ccb_h.path, "illegal bus %d\n", bus); 915 ccb->ccb_h.status = CAM_PATH_INVALID; 916 return (-1); 917 } 918 tgt = ccb->ccb_h.target_id; 919 lun = ccb->ccb_h.target_lun; 920 921 if (isp->isp_dblev & ISP_LOGTDEBUG0) { 922 xpt_print(ccb->ccb_h.path, "%sabling lun 0x%x on channel %d\n", 923 cel->enable? "en" : "dis", lun, bus); 924 } 925 926 if ((lun != CAM_LUN_WILDCARD) && 927 (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) { 928 ccb->ccb_h.status = CAM_LUN_INVALID; 929 return (-1); 930 } 931 932 if (IS_SCSI(isp)) { 933 sdparam *sdp = isp->isp_param; 934 sdp += bus; 935 if (tgt != CAM_TARGET_WILDCARD && 936 tgt != sdp->isp_initiator_id) { 937 ccb->ccb_h.status = CAM_TID_INVALID; 938 return (-1); 939 } 940 } else { 941 /* 942 * There's really no point in doing this yet w/o multi-tid 943 * capability. Even then, it's problematic. 944 */ 945 #if 0 946 if (tgt != CAM_TARGET_WILDCARD && 947 tgt != FCPARAM(isp)->isp_iid) { 948 ccb->ccb_h.status = CAM_TID_INVALID; 949 return (-1); 950 } 951 #endif 952 /* 953 * This is as a good a place as any to check f/w capabilities. 954 */ 955 if (FCPARAM(isp)->isp_tmode == 0) { 956 xpt_print(ccb->ccb_h.path, 957 "firmware does not support target mode\n"); 958 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 959 return (-1); 960 } 961 /* 962 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to 963 * XXX: dork with our already fragile enable/disable code. 964 */ 965 if (FCPARAM(isp)->isp_sccfw == 0) { 966 xpt_print(ccb->ccb_h.path, 967 "firmware not SCCLUN capable\n"); 968 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 969 return (-1); 970 } 971 } 972 973 if (tgt == CAM_TARGET_WILDCARD) { 974 if (lun == CAM_LUN_WILDCARD) { 975 wildcard = 1; 976 } else { 977 ccb->ccb_h.status = CAM_LUN_INVALID; 978 return (-1); 979 } 980 } else { 981 wildcard = 0; 982 } 983 984 tm_on = (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) != 0; 985 986 /* 987 * Next check to see whether this is a target/lun wildcard action. 988 * 989 * If so, we know that we can accept commands for luns that haven't 990 * been enabled yet and send them upstream. Otherwise, we have to 991 * handle them locally (if we see them at all). 992 */ 993 994 if (wildcard) { 995 tptr = &isp->isp_osinfo.tsdflt[bus]; 996 if (cel->enable) { 997 if (tm_on) { 998 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 999 return (-1); 1000 } 1001 ccb->ccb_h.status = 1002 xpt_create_path(&tptr->owner, NULL, 1003 xpt_path_path_id(ccb->ccb_h.path), 1004 xpt_path_target_id(ccb->ccb_h.path), 1005 xpt_path_lun_id(ccb->ccb_h.path)); 1006 if (ccb->ccb_h.status != CAM_REQ_CMP) { 1007 return (-1); 1008 } 1009 SLIST_INIT(&tptr->atios); 1010 SLIST_INIT(&tptr->inots); 1011 isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED; 1012 } else { 1013 if (tm_on == 0) { 1014 ccb->ccb_h.status = CAM_REQ_CMP; 1015 return (-1); 1016 } 1017 if (tptr->hold) { 1018 ccb->ccb_h.status = CAM_SCSI_BUSY; 1019 return (-1); 1020 } 1021 xpt_free_path(tptr->owner); 1022 isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED; 1023 } 1024 } 1025 1026 /* 1027 * Now check to see whether this bus needs to be 1028 * enabled/disabled with respect to target mode. 1029 */ 1030 av = bus << 31; 1031 if (cel->enable && tm_on == 0) { 1032 av |= ENABLE_TARGET_FLAG; 1033 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 1034 if (av) { 1035 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 1036 if (wildcard) { 1037 isp->isp_osinfo.tmflags[bus] &= 1038 ~TM_WILDCARD_ENABLED; 1039 xpt_free_path(tptr->owner); 1040 } 1041 return (-1); 1042 } 1043 isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED; 1044 xpt_print(ccb->ccb_h.path, "Target Mode Enabled\n"); 1045 } else if (cel->enable == 0 && tm_on && wildcard) { 1046 if (are_any_luns_enabled(isp, bus)) { 1047 ccb->ccb_h.status = CAM_SCSI_BUSY; 1048 return (-1); 1049 } 1050 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 1051 if (av) { 1052 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 1053 return (-1); 1054 } 1055 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; 1056 xpt_print(ccb->ccb_h.path, "Target Mode Disabled\n"); 1057 } 1058 1059 if (wildcard) { 1060 ccb->ccb_h.status = CAM_REQ_CMP; 1061 return (-1); 1062 } 1063 1064 /* 1065 * Find an empty slot 1066 */ 1067 for (seq = 0; seq < NLEACT; seq++) { 1068 if (isp->isp_osinfo.leact[seq] == 0) { 1069 break; 1070 } 1071 } 1072 if (seq >= NLEACT) { 1073 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1074 return (-1); 1075 1076 } 1077 isp->isp_osinfo.leact[seq] = ccb; 1078 1079 if (cel->enable) { 1080 ccb->ccb_h.status = 1081 create_lun_state(isp, bus, ccb->ccb_h.path, &tptr); 1082 if (ccb->ccb_h.status != CAM_REQ_CMP) { 1083 isp->isp_osinfo.leact[seq] = 0; 1084 return (-1); 1085 } 1086 } else { 1087 tptr = get_lun_statep(isp, bus, lun); 1088 if (tptr == NULL) { 1089 ccb->ccb_h.status = CAM_LUN_INVALID; 1090 return (-1); 1091 } 1092 } 1093 1094 if (cel->enable) { 1095 int c, n, ulun = lun; 1096 1097 cmd = RQSTYPE_ENABLE_LUN; 1098 c = DFLT_CMND_CNT; 1099 n = DFLT_INOT_CNT; 1100 if (IS_FC(isp) && lun != 0) { 1101 cmd = RQSTYPE_MODIFY_LUN; 1102 n = 0; 1103 /* 1104 * For SCC firmware, we only deal with setting 1105 * (enabling or modifying) lun 0. 1106 */ 1107 ulun = 0; 1108 } 1109 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) { 1110 rls_lun_statep(isp, tptr); 1111 ccb->ccb_h.status = CAM_REQ_INPROG; 1112 return (seq); 1113 } 1114 } else { 1115 int c, n, ulun = lun; 1116 1117 cmd = -RQSTYPE_MODIFY_LUN; 1118 c = DFLT_CMND_CNT; 1119 n = DFLT_INOT_CNT; 1120 if (IS_FC(isp) && lun != 0) { 1121 n = 0; 1122 /* 1123 * For SCC firmware, we only deal with setting 1124 * (enabling or modifying) lun 0. 1125 */ 1126 ulun = 0; 1127 } 1128 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) { 1129 rls_lun_statep(isp, tptr); 1130 ccb->ccb_h.status = CAM_REQ_INPROG; 1131 return (seq); 1132 } 1133 } 1134 rls_lun_statep(isp, tptr); 1135 xpt_print(ccb->ccb_h.path, "isp_lun_cmd failed\n"); 1136 isp->isp_osinfo.leact[seq] = 0; 1137 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1138 return (-1); 1139 } 1140 1141 static void 1142 isp_ledone(ispsoftc_t *isp, lun_entry_t *lep) 1143 { 1144 const char lfmt[] = "now %sabled for target mode\n"; 1145 union ccb *ccb; 1146 uint32_t seq; 1147 tstate_t *tptr; 1148 int av; 1149 struct ccb_en_lun *cel; 1150 1151 seq = lep->le_reserved - 1; 1152 if (seq >= NLEACT) { 1153 isp_prt(isp, ISP_LOGERR, 1154 "seq out of range (%u) in isp_ledone", seq); 1155 return; 1156 } 1157 ccb = isp->isp_osinfo.leact[seq]; 1158 if (ccb == 0) { 1159 isp_prt(isp, ISP_LOGERR, 1160 "no ccb for seq %u in isp_ledone", seq); 1161 return; 1162 } 1163 cel = &ccb->cel; 1164 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), XS_LUN(ccb)); 1165 if (tptr == NULL) { 1166 xpt_print(ccb->ccb_h.path, "null tptr in isp_ledone\n"); 1167 isp->isp_osinfo.leact[seq] = 0; 1168 return; 1169 } 1170 1171 if (lep->le_status != LUN_OK) { 1172 xpt_print(ccb->ccb_h.path, 1173 "ENABLE/MODIFY LUN returned 0x%x\n", lep->le_status); 1174 err: 1175 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1176 rls_lun_statep(isp, tptr); 1177 isp->isp_osinfo.leact[seq] = 0; 1178 ISPLOCK_2_CAMLOCK(isp); 1179 xpt_done(ccb); 1180 CAMLOCK_2_ISPLOCK(isp); 1181 return; 1182 } else { 1183 isp_prt(isp, ISP_LOGTDEBUG0, 1184 "isp_ledone: ENABLE/MODIFY done okay"); 1185 } 1186 1187 1188 if (cel->enable) { 1189 ccb->ccb_h.status = CAM_REQ_CMP; 1190 xpt_print(ccb->ccb_h.path, lfmt, "en"); 1191 rls_lun_statep(isp, tptr); 1192 isp->isp_osinfo.leact[seq] = 0; 1193 ISPLOCK_2_CAMLOCK(isp); 1194 xpt_done(ccb); 1195 CAMLOCK_2_ISPLOCK(isp); 1196 return; 1197 } 1198 1199 if (lep->le_header.rqs_entry_type == RQSTYPE_MODIFY_LUN) { 1200 if (isp_lun_cmd(isp, -RQSTYPE_ENABLE_LUN, XS_CHANNEL(ccb), 1201 XS_TGT(ccb), XS_LUN(ccb), 0, 0, seq+1)) { 1202 xpt_print(ccb->ccb_h.path, 1203 "isp_ledone: isp_lun_cmd failed\n"); 1204 goto err; 1205 } 1206 rls_lun_statep(isp, tptr); 1207 return; 1208 } 1209 1210 xpt_print(ccb->ccb_h.path, lfmt, "dis"); 1211 rls_lun_statep(isp, tptr); 1212 destroy_lun_state(isp, tptr); 1213 ccb->ccb_h.status = CAM_REQ_CMP; 1214 isp->isp_osinfo.leact[seq] = 0; 1215 ISPLOCK_2_CAMLOCK(isp); 1216 xpt_done(ccb); 1217 CAMLOCK_2_ISPLOCK(isp); 1218 if (are_any_luns_enabled(isp, XS_CHANNEL(ccb)) == 0) { 1219 int bus = XS_CHANNEL(ccb); 1220 av = bus << 31; 1221 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 1222 if (av) { 1223 isp_prt(isp, ISP_LOGWARN, 1224 "disable target mode on channel %d failed", bus); 1225 } 1226 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; 1227 } 1228 } 1229 1230 1231 static cam_status 1232 isp_abort_tgt_ccb(ispsoftc_t *isp, union ccb *ccb) 1233 { 1234 tstate_t *tptr; 1235 struct ccb_hdr_slist *lp; 1236 struct ccb_hdr *curelm; 1237 int found, *ctr; 1238 union ccb *accb = ccb->cab.abort_ccb; 1239 1240 xpt_print(ccb->ccb_h.path, "aborting ccb %p\n", accb); 1241 if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 1242 int badpath = 0; 1243 if (IS_FC(isp) && (accb->ccb_h.target_id != 1244 ((fcparam *) isp->isp_param)->isp_loopid)) { 1245 badpath = 1; 1246 } else if (IS_SCSI(isp) && (accb->ccb_h.target_id != 1247 ((sdparam *) isp->isp_param)->isp_initiator_id)) { 1248 badpath = 1; 1249 } 1250 if (badpath) { 1251 /* 1252 * Being restrictive about target ids is really about 1253 * making sure we're aborting for the right multi-tid 1254 * path. This doesn't really make much sense at present. 1255 */ 1256 #if 0 1257 return (CAM_PATH_INVALID); 1258 #endif 1259 } 1260 } 1261 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun); 1262 if (tptr == NULL) { 1263 xpt_print(ccb->ccb_h.path, "can't get statep\n"); 1264 return (CAM_PATH_INVALID); 1265 } 1266 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 1267 lp = &tptr->atios; 1268 ctr = &tptr->atio_count; 1269 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 1270 lp = &tptr->inots; 1271 ctr = &tptr->inot_count; 1272 } else { 1273 rls_lun_statep(isp, tptr); 1274 xpt_print(ccb->ccb_h.path, "bad function code %d\n", 1275 accb->ccb_h.func_code); 1276 return (CAM_UA_ABORT); 1277 } 1278 curelm = SLIST_FIRST(lp); 1279 found = 0; 1280 if (curelm == &accb->ccb_h) { 1281 found = 1; 1282 SLIST_REMOVE_HEAD(lp, sim_links.sle); 1283 } else { 1284 while(curelm != NULL) { 1285 struct ccb_hdr *nextelm; 1286 1287 nextelm = SLIST_NEXT(curelm, sim_links.sle); 1288 if (nextelm == &accb->ccb_h) { 1289 found = 1; 1290 SLIST_NEXT(curelm, sim_links.sle) = 1291 SLIST_NEXT(nextelm, sim_links.sle); 1292 break; 1293 } 1294 curelm = nextelm; 1295 } 1296 } 1297 rls_lun_statep(isp, tptr); 1298 if (found) { 1299 (*ctr)--; 1300 accb->ccb_h.status = CAM_REQ_ABORTED; 1301 xpt_done(accb); 1302 return (CAM_REQ_CMP); 1303 } 1304 xpt_print(ccb->ccb_h.path, "ccb %p not found\n", accb); 1305 return (CAM_PATH_INVALID); 1306 } 1307 1308 static void 1309 isp_target_start_ctio(ispsoftc_t *isp, union ccb *ccb) 1310 { 1311 void *qe; 1312 struct ccb_scsiio *cso = &ccb->csio; 1313 uint32_t nxti, optr, handle; 1314 uint8_t local[QENTRY_LEN]; 1315 1316 1317 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 1318 xpt_print(ccb->ccb_h.path, 1319 "Request Queue Overflow in isp_target_start_ctio\n"); 1320 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1321 goto out; 1322 } 1323 memset(local, 0, QENTRY_LEN); 1324 1325 /* 1326 * We're either moving data or completing a command here. 1327 */ 1328 1329 if (IS_FC(isp)) { 1330 atio_private_data_t *atp; 1331 ct2_entry_t *cto = (ct2_entry_t *) local; 1332 1333 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; 1334 cto->ct_header.rqs_entry_count = 1; 1335 if (FCPARAM(isp)->isp_2klogin) { 1336 ((ct2e_entry_t *)cto)->ct_iid = cso->init_id; 1337 } else { 1338 cto->ct_iid = cso->init_id; 1339 if (FCPARAM(isp)->isp_sccfw == 0) { 1340 cto->ct_lun = ccb->ccb_h.target_lun; 1341 } 1342 } 1343 1344 atp = isp_get_atpd(isp, cso->tag_id); 1345 if (atp == NULL) { 1346 xpt_print(ccb->ccb_h.path, 1347 "cannot find private data adjunct for tag %x\n", 1348 cso->tag_id); 1349 XS_SETERR(ccb, CAM_REQ_CMP_ERR); 1350 goto out; 1351 } 1352 1353 cto->ct_rxid = cso->tag_id; 1354 if (cso->dxfer_len == 0) { 1355 cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA; 1356 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1357 cto->ct_flags |= CT2_SENDSTATUS; 1358 cto->rsp.m1.ct_scsi_status = cso->scsi_status; 1359 cto->ct_resid = 1360 atp->orig_datalen - atp->bytes_xfered; 1361 if (cto->ct_resid < 0) { 1362 cto->rsp.m1.ct_scsi_status |= 1363 CT2_DATA_OVER; 1364 } else if (cto->ct_resid > 0) { 1365 cto->rsp.m1.ct_scsi_status |= 1366 CT2_DATA_UNDER; 1367 } 1368 } 1369 if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) { 1370 int m = min(cso->sense_len, MAXRESPLEN); 1371 memcpy(cto->rsp.m1.ct_resp, 1372 &cso->sense_data, m); 1373 cto->rsp.m1.ct_senselen = m; 1374 cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID; 1375 } 1376 } else { 1377 cto->ct_flags |= CT2_FLAG_MODE0; 1378 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1379 cto->ct_flags |= CT2_DATA_IN; 1380 } else { 1381 cto->ct_flags |= CT2_DATA_OUT; 1382 } 1383 cto->ct_reloff = atp->bytes_xfered; 1384 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 1385 cto->ct_flags |= CT2_SENDSTATUS; 1386 cto->rsp.m0.ct_scsi_status = cso->scsi_status; 1387 cto->ct_resid = 1388 atp->orig_datalen - 1389 (atp->bytes_xfered + cso->dxfer_len); 1390 if (cto->ct_resid < 0) { 1391 cto->rsp.m0.ct_scsi_status |= 1392 CT2_DATA_OVER; 1393 } else if (cto->ct_resid > 0) { 1394 cto->rsp.m0.ct_scsi_status |= 1395 CT2_DATA_UNDER; 1396 } 1397 } else { 1398 atp->last_xframt = cso->dxfer_len; 1399 } 1400 /* 1401 * If we're sending data and status back together, 1402 * we can't also send back sense data as well. 1403 */ 1404 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1405 } 1406 1407 if (cto->ct_flags & CT2_SENDSTATUS) { 1408 isp_prt(isp, ISP_LOGTDEBUG0, 1409 "CTIO2[%x] STATUS %x origd %u curd %u resid %u", 1410 cto->ct_rxid, cso->scsi_status, atp->orig_datalen, 1411 cso->dxfer_len, cto->ct_resid); 1412 cto->ct_flags |= CT2_CCINCR; 1413 atp->state = ATPD_STATE_LAST_CTIO; 1414 } else { 1415 atp->state = ATPD_STATE_CTIO; 1416 } 1417 cto->ct_timeout = 10; 1418 } else { 1419 ct_entry_t *cto = (ct_entry_t *) local; 1420 1421 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1422 cto->ct_header.rqs_entry_count = 1; 1423 cto->ct_iid = cso->init_id; 1424 cto->ct_iid |= XS_CHANNEL(ccb) << 7; 1425 cto->ct_tgt = ccb->ccb_h.target_id; 1426 cto->ct_lun = ccb->ccb_h.target_lun; 1427 cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id); 1428 if (AT_HAS_TAG(cso->tag_id)) { 1429 cto->ct_tag_val = (uint8_t) AT_GET_TAG(cso->tag_id); 1430 cto->ct_flags |= CT_TQAE; 1431 } 1432 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 1433 cto->ct_flags |= CT_NODISC; 1434 } 1435 if (cso->dxfer_len == 0) { 1436 cto->ct_flags |= CT_NO_DATA; 1437 } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1438 cto->ct_flags |= CT_DATA_IN; 1439 } else { 1440 cto->ct_flags |= CT_DATA_OUT; 1441 } 1442 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1443 cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR; 1444 cto->ct_scsi_status = cso->scsi_status; 1445 cto->ct_resid = cso->resid; 1446 isp_prt(isp, ISP_LOGTDEBUG0, 1447 "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x", 1448 cto->ct_fwhandle, cso->scsi_status, cso->resid, 1449 cso->tag_id); 1450 } 1451 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1452 cto->ct_timeout = 10; 1453 } 1454 1455 if (isp_save_xs_tgt(isp, ccb, &handle)) { 1456 xpt_print(ccb->ccb_h.path, 1457 "No XFLIST pointers for isp_target_start_ctio\n"); 1458 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1459 goto out; 1460 } 1461 1462 1463 /* 1464 * Call the dma setup routines for this entry (and any subsequent 1465 * CTIOs) if there's data to move, and then tell the f/w it's got 1466 * new things to play with. As with isp_start's usage of DMA setup, 1467 * any swizzling is done in the machine dependent layer. Because 1468 * of this, we put the request onto the queue area first in native 1469 * format. 1470 */ 1471 1472 if (IS_FC(isp)) { 1473 ct2_entry_t *cto = (ct2_entry_t *) local; 1474 cto->ct_syshandle = handle; 1475 } else { 1476 ct_entry_t *cto = (ct_entry_t *) local; 1477 cto->ct_syshandle = handle; 1478 } 1479 1480 switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) { 1481 case CMD_QUEUED: 1482 ISP_ADD_REQUEST(isp, nxti); 1483 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1484 return; 1485 1486 case CMD_EAGAIN: 1487 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1488 break; 1489 1490 default: 1491 break; 1492 } 1493 isp_destroy_tgt_handle(isp, handle); 1494 1495 out: 1496 ISPLOCK_2_CAMLOCK(isp); 1497 xpt_done(ccb); 1498 CAMLOCK_2_ISPLOCK(isp); 1499 } 1500 1501 static void 1502 isp_refire_putback_atio(void *arg) 1503 { 1504 int s = splcam(); 1505 isp_target_putback_atio(arg); 1506 splx(s); 1507 } 1508 1509 static void 1510 isp_target_putback_atio(union ccb *ccb) 1511 { 1512 ispsoftc_t *isp; 1513 struct ccb_scsiio *cso; 1514 uint32_t nxti, optr; 1515 void *qe; 1516 1517 isp = XS_ISP(ccb); 1518 1519 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 1520 xpt_print(ccb->ccb_h.path, 1521 "isp_target_putback_atio: Request Queue Overflow\n"); 1522 (void) timeout(isp_refire_putback_atio, ccb, 10); 1523 return; 1524 } 1525 memset(qe, 0, QENTRY_LEN); 1526 cso = &ccb->csio; 1527 if (IS_FC(isp)) { 1528 at2_entry_t local, *at = &local; 1529 MEMZERO(at, sizeof (at2_entry_t)); 1530 at->at_header.rqs_entry_type = RQSTYPE_ATIO2; 1531 at->at_header.rqs_entry_count = 1; 1532 if (FCPARAM(isp)->isp_sccfw) { 1533 at->at_scclun = (uint16_t) ccb->ccb_h.target_lun; 1534 } else { 1535 at->at_lun = (uint8_t) ccb->ccb_h.target_lun; 1536 } 1537 at->at_status = CT_OK; 1538 at->at_rxid = cso->tag_id; 1539 at->at_iid = cso->ccb_h.target_id; 1540 isp_put_atio2(isp, at, qe); 1541 } else { 1542 at_entry_t local, *at = &local; 1543 MEMZERO(at, sizeof (at_entry_t)); 1544 at->at_header.rqs_entry_type = RQSTYPE_ATIO; 1545 at->at_header.rqs_entry_count = 1; 1546 at->at_iid = cso->init_id; 1547 at->at_iid |= XS_CHANNEL(ccb) << 7; 1548 at->at_tgt = cso->ccb_h.target_id; 1549 at->at_lun = cso->ccb_h.target_lun; 1550 at->at_status = CT_OK; 1551 at->at_tag_val = AT_GET_TAG(cso->tag_id); 1552 at->at_handle = AT_GET_HANDLE(cso->tag_id); 1553 isp_put_atio(isp, at, qe); 1554 } 1555 ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe); 1556 ISP_ADD_REQUEST(isp, nxti); 1557 isp_complete_ctio(ccb); 1558 } 1559 1560 static void 1561 isp_complete_ctio(union ccb *ccb) 1562 { 1563 ISPLOCK_2_CAMLOCK(isp); 1564 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1565 ccb->ccb_h.status |= CAM_REQ_CMP; 1566 } 1567 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1568 xpt_done(ccb); 1569 CAMLOCK_2_ISPLOCK(isp); 1570 } 1571 1572 /* 1573 * Handle ATIO stuff that the generic code can't. 1574 * This means handling CDBs. 1575 */ 1576 1577 static int 1578 isp_handle_platform_atio(ispsoftc_t *isp, at_entry_t *aep) 1579 { 1580 tstate_t *tptr; 1581 int status, bus, iswildcard; 1582 struct ccb_accept_tio *atiop; 1583 1584 /* 1585 * The firmware status (except for the QLTM_SVALID bit) 1586 * indicates why this ATIO was sent to us. 1587 * 1588 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1589 * 1590 * If the DISCONNECTS DISABLED bit is set in the flags field, 1591 * we're still connected on the SCSI bus. 1592 */ 1593 status = aep->at_status; 1594 if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) { 1595 /* 1596 * Bus Phase Sequence error. We should have sense data 1597 * suggested by the f/w. I'm not sure quite yet what 1598 * to do about this for CAM. 1599 */ 1600 isp_prt(isp, ISP_LOGWARN, "PHASE ERROR"); 1601 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1602 return (0); 1603 } 1604 if ((status & ~QLTM_SVALID) != AT_CDB) { 1605 isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform", 1606 status); 1607 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1608 return (0); 1609 } 1610 1611 bus = GET_BUS_VAL(aep->at_iid); 1612 tptr = get_lun_statep(isp, bus, aep->at_lun); 1613 if (tptr == NULL) { 1614 tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD); 1615 if (tptr == NULL) { 1616 /* 1617 * Because we can't autofeed sense data back with 1618 * a command for parallel SCSI, we can't give back 1619 * a CHECK CONDITION. We'll give back a BUSY status 1620 * instead. This works out okay because the only 1621 * time we should, in fact, get this, is in the 1622 * case that somebody configured us without the 1623 * blackhole driver, so they get what they deserve. 1624 */ 1625 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1626 return (0); 1627 } 1628 iswildcard = 1; 1629 } else { 1630 iswildcard = 0; 1631 } 1632 1633 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1634 if (atiop == NULL) { 1635 /* 1636 * Because we can't autofeed sense data back with 1637 * a command for parallel SCSI, we can't give back 1638 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1639 * instead. This works out okay because the only time we 1640 * should, in fact, get this, is in the case that we've 1641 * run out of ATIOS. 1642 */ 1643 xpt_print(tptr->owner, 1644 "no ATIOS for lun %d from initiator %d on channel %d\n", 1645 aep->at_lun, GET_IID_VAL(aep->at_iid), bus); 1646 if (aep->at_flags & AT_TQAE) 1647 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1648 else 1649 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1650 rls_lun_statep(isp, tptr); 1651 return (0); 1652 } 1653 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1654 tptr->atio_count--; 1655 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d", 1656 aep->at_lun, tptr->atio_count); 1657 if (iswildcard) { 1658 atiop->ccb_h.target_id = aep->at_tgt; 1659 atiop->ccb_h.target_lun = aep->at_lun; 1660 } 1661 if (aep->at_flags & AT_NODISC) { 1662 atiop->ccb_h.flags = CAM_DIS_DISCONNECT; 1663 } else { 1664 atiop->ccb_h.flags = 0; 1665 } 1666 1667 if (status & QLTM_SVALID) { 1668 size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data)); 1669 atiop->sense_len = amt; 1670 MEMCPY(&atiop->sense_data, aep->at_sense, amt); 1671 } else { 1672 atiop->sense_len = 0; 1673 } 1674 1675 atiop->init_id = GET_IID_VAL(aep->at_iid); 1676 atiop->cdb_len = aep->at_cdblen; 1677 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen); 1678 atiop->ccb_h.status = CAM_CDB_RECVD; 1679 /* 1680 * Construct a tag 'id' based upon tag value (which may be 0..255) 1681 * and the handle (which we have to preserve). 1682 */ 1683 AT_MAKE_TAGID(atiop->tag_id, bus, device_get_unit(isp->isp_dev), aep); 1684 if (aep->at_flags & AT_TQAE) { 1685 atiop->tag_action = aep->at_tag_type; 1686 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; 1687 } 1688 xpt_done((union ccb*)atiop); 1689 isp_prt(isp, ISP_LOGTDEBUG0, 1690 "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s", 1691 aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid), 1692 GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff, 1693 aep->at_tag_type, (aep->at_flags & AT_NODISC)? 1694 "nondisc" : "disconnecting"); 1695 rls_lun_statep(isp, tptr); 1696 return (0); 1697 } 1698 1699 static int 1700 isp_handle_platform_atio2(ispsoftc_t *isp, at2_entry_t *aep) 1701 { 1702 lun_id_t lun; 1703 tstate_t *tptr; 1704 struct ccb_accept_tio *atiop; 1705 atio_private_data_t *atp; 1706 1707 /* 1708 * The firmware status (except for the QLTM_SVALID bit) 1709 * indicates why this ATIO was sent to us. 1710 * 1711 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1712 */ 1713 if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) { 1714 isp_prt(isp, ISP_LOGWARN, 1715 "bogus atio (0x%x) leaked to platform", aep->at_status); 1716 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1717 return (0); 1718 } 1719 1720 if (FCPARAM(isp)->isp_sccfw) { 1721 lun = aep->at_scclun; 1722 } else { 1723 lun = aep->at_lun; 1724 } 1725 tptr = get_lun_statep(isp, 0, lun); 1726 if (tptr == NULL) { 1727 isp_prt(isp, ISP_LOGTDEBUG0, 1728 "[0x%x] no state pointer for lun %d", aep->at_rxid, lun); 1729 tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD); 1730 if (tptr == NULL) { 1731 isp_endcmd(isp, aep, 1732 SCSI_STATUS_CHECK_COND | ECMD_SVALID | 1733 (0x5 << 12) | (0x25 << 16), 0); 1734 return (0); 1735 } 1736 } 1737 1738 atp = isp_get_atpd(isp, 0); 1739 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1740 if (atiop == NULL || atp == NULL) { 1741 1742 /* 1743 * Because we can't autofeed sense data back with 1744 * a command for parallel SCSI, we can't give back 1745 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1746 * instead. This works out okay because the only time we 1747 * should, in fact, get this, is in the case that we've 1748 * run out of ATIOS. 1749 */ 1750 xpt_print(tptr->owner, 1751 "no %s for lun %d from initiator %d\n", 1752 (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" : 1753 ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid); 1754 rls_lun_statep(isp, tptr); 1755 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1756 return (0); 1757 } 1758 atp->state = ATPD_STATE_ATIO; 1759 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1760 tptr->atio_count--; 1761 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d", 1762 lun, tptr->atio_count); 1763 1764 if (tptr == &isp->isp_osinfo.tsdflt[0]) { 1765 atiop->ccb_h.target_id = FCPARAM(isp)->isp_loopid; 1766 atiop->ccb_h.target_lun = lun; 1767 } 1768 /* 1769 * We don't get 'suggested' sense data as we do with SCSI cards. 1770 */ 1771 atiop->sense_len = 0; 1772 1773 atiop->init_id = aep->at_iid; 1774 atiop->cdb_len = ATIO2_CDBLEN; 1775 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN); 1776 atiop->ccb_h.status = CAM_CDB_RECVD; 1777 atiop->tag_id = aep->at_rxid; 1778 switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) { 1779 case ATIO2_TC_ATTR_SIMPLEQ: 1780 atiop->tag_action = MSG_SIMPLE_Q_TAG; 1781 break; 1782 case ATIO2_TC_ATTR_HEADOFQ: 1783 atiop->tag_action = MSG_HEAD_OF_Q_TAG; 1784 break; 1785 case ATIO2_TC_ATTR_ORDERED: 1786 atiop->tag_action = MSG_ORDERED_Q_TAG; 1787 break; 1788 case ATIO2_TC_ATTR_ACAQ: /* ?? */ 1789 case ATIO2_TC_ATTR_UNTAGGED: 1790 default: 1791 atiop->tag_action = 0; 1792 break; 1793 } 1794 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; 1795 1796 atp->tag = atiop->tag_id; 1797 atp->lun = lun; 1798 atp->orig_datalen = aep->at_datalen; 1799 atp->last_xframt = 0; 1800 atp->bytes_xfered = 0; 1801 atp->state = ATPD_STATE_CAM; 1802 ISPLOCK_2_CAMLOCK(siP); 1803 xpt_done((union ccb*)atiop); 1804 1805 isp_prt(isp, ISP_LOGTDEBUG0, 1806 "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u", 1807 aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid, 1808 lun, aep->at_taskflags, aep->at_datalen); 1809 rls_lun_statep(isp, tptr); 1810 return (0); 1811 } 1812 1813 static int 1814 isp_handle_platform_ctio(ispsoftc_t *isp, void *arg) 1815 { 1816 union ccb *ccb; 1817 int sentstatus, ok, notify_cam, resid = 0; 1818 uint16_t tval; 1819 1820 /* 1821 * CTIO and CTIO2 are close enough.... 1822 */ 1823 1824 ccb = isp_find_xs_tgt(isp, ((ct_entry_t *)arg)->ct_syshandle); 1825 KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio")); 1826 isp_destroy_tgt_handle(isp, ((ct_entry_t *)arg)->ct_syshandle); 1827 1828 if (IS_FC(isp)) { 1829 ct2_entry_t *ct = arg; 1830 atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid); 1831 if (atp == NULL) { 1832 isp_prt(isp, ISP_LOGERR, 1833 "cannot find adjunct for %x after I/O", 1834 ct->ct_rxid); 1835 return (0); 1836 } 1837 sentstatus = ct->ct_flags & CT2_SENDSTATUS; 1838 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1839 if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) { 1840 ccb->ccb_h.status |= CAM_SENT_SENSE; 1841 } 1842 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1843 if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) { 1844 resid = ct->ct_resid; 1845 atp->bytes_xfered += (atp->last_xframt - resid); 1846 atp->last_xframt = 0; 1847 } 1848 if (sentstatus || !ok) { 1849 atp->tag = 0; 1850 } 1851 isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, 1852 "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s", 1853 ct->ct_rxid, ct->ct_status, ct->ct_flags, 1854 (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, 1855 resid, sentstatus? "FIN" : "MID"); 1856 tval = ct->ct_rxid; 1857 1858 /* XXX: should really come after isp_complete_ctio */ 1859 atp->state = ATPD_STATE_PDON; 1860 } else { 1861 ct_entry_t *ct = arg; 1862 sentstatus = ct->ct_flags & CT_SENDSTATUS; 1863 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1864 /* 1865 * We *ought* to be able to get back to the original ATIO 1866 * here, but for some reason this gets lost. It's just as 1867 * well because it's squirrelled away as part of periph 1868 * private data. 1869 * 1870 * We can live without it as long as we continue to use 1871 * the auto-replenish feature for CTIOs. 1872 */ 1873 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1874 if (ct->ct_status & QLTM_SVALID) { 1875 char *sp = (char *)ct; 1876 sp += CTIO_SENSE_OFFSET; 1877 ccb->csio.sense_len = 1878 min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN); 1879 MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len); 1880 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1881 } 1882 if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) { 1883 resid = ct->ct_resid; 1884 } 1885 isp_prt(isp, ISP_LOGTDEBUG0, 1886 "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s", 1887 ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun, 1888 ct->ct_status, ct->ct_flags, resid, 1889 sentstatus? "FIN" : "MID"); 1890 tval = ct->ct_fwhandle; 1891 } 1892 ccb->csio.resid += resid; 1893 1894 /* 1895 * We're here either because intermediate data transfers are done 1896 * and/or the final status CTIO (which may have joined with a 1897 * Data Transfer) is done. 1898 * 1899 * In any case, for this platform, the upper layers figure out 1900 * what to do next, so all we do here is collect status and 1901 * pass information along. Any DMA handles have already been 1902 * freed. 1903 */ 1904 if (notify_cam == 0) { 1905 isp_prt(isp, ISP_LOGTDEBUG0, " INTER CTIO[0x%x] done", tval); 1906 return (0); 1907 } 1908 1909 isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done", 1910 (sentstatus)? " FINAL " : "MIDTERM ", tval); 1911 1912 if (!ok) { 1913 isp_target_putback_atio(ccb); 1914 } else { 1915 isp_complete_ctio(ccb); 1916 1917 } 1918 return (0); 1919 } 1920 1921 static int 1922 isp_handle_platform_notify_scsi(ispsoftc_t *isp, in_entry_t *inp) 1923 { 1924 return (0); /* XXXX */ 1925 } 1926 1927 static int 1928 isp_handle_platform_notify_fc(ispsoftc_t *isp, in_fcentry_t *inp) 1929 { 1930 1931 switch (inp->in_status) { 1932 case IN_PORT_LOGOUT: 1933 isp_prt(isp, ISP_LOGWARN, "port logout of iid %d", 1934 inp->in_iid); 1935 break; 1936 case IN_PORT_CHANGED: 1937 isp_prt(isp, ISP_LOGWARN, "port changed for iid %d", 1938 inp->in_iid); 1939 break; 1940 case IN_GLOBAL_LOGO: 1941 isp_prt(isp, ISP_LOGINFO, "all ports logged out"); 1942 break; 1943 case IN_ABORT_TASK: 1944 { 1945 atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid); 1946 struct ccb_immed_notify *inot = NULL; 1947 1948 if (atp) { 1949 tstate_t *tptr = get_lun_statep(isp, 0, atp->lun); 1950 if (tptr) { 1951 inot = (struct ccb_immed_notify *) 1952 SLIST_FIRST(&tptr->inots); 1953 if (inot) { 1954 tptr->inot_count--; 1955 SLIST_REMOVE_HEAD(&tptr->inots, 1956 sim_links.sle); 1957 isp_prt(isp, ISP_LOGTDEBUG0, 1958 "Take FREE INOT count now %d", 1959 tptr->inot_count); 1960 } 1961 } 1962 isp_prt(isp, ISP_LOGWARN, 1963 "abort task RX_ID %x IID %d state %d", 1964 inp->in_seqid, inp->in_iid, atp->state); 1965 } else { 1966 isp_prt(isp, ISP_LOGWARN, 1967 "abort task RX_ID %x from iid %d, state unknown", 1968 inp->in_seqid, inp->in_iid); 1969 } 1970 if (inot) { 1971 inot->initiator_id = inp->in_iid; 1972 inot->sense_len = 0; 1973 inot->message_args[0] = MSG_ABORT_TAG; 1974 inot->message_args[1] = inp->in_seqid & 0xff; 1975 inot->message_args[2] = (inp->in_seqid >> 8) & 0xff; 1976 inot->ccb_h.status = CAM_MESSAGE_RECV; 1977 xpt_done((union ccb *)inot); 1978 } 1979 break; 1980 } 1981 default: 1982 break; 1983 } 1984 return (0); 1985 } 1986 #endif 1987 1988 static void 1989 isp_cam_async(void *cbarg, uint32_t code, struct cam_path *path, void *arg) 1990 { 1991 struct cam_sim *sim; 1992 ispsoftc_t *isp; 1993 1994 sim = (struct cam_sim *)cbarg; 1995 isp = (ispsoftc_t *) cam_sim_softc(sim); 1996 switch (code) { 1997 case AC_LOST_DEVICE: 1998 if (IS_SCSI(isp)) { 1999 uint16_t oflags, nflags; 2000 sdparam *sdp = isp->isp_param; 2001 int tgt; 2002 2003 tgt = xpt_path_target_id(path); 2004 if (tgt >= 0) { 2005 sdp += cam_sim_bus(sim); 2006 ISP_LOCK(isp); 2007 nflags = sdp->isp_devparam[tgt].nvrm_flags; 2008 #ifndef ISP_TARGET_MODE 2009 nflags &= DPARM_SAFE_DFLT; 2010 if (isp->isp_loaded_fw) { 2011 nflags |= DPARM_NARROW | DPARM_ASYNC; 2012 } 2013 #else 2014 nflags = DPARM_DEFAULT; 2015 #endif 2016 oflags = sdp->isp_devparam[tgt].goal_flags; 2017 sdp->isp_devparam[tgt].goal_flags = nflags; 2018 sdp->isp_devparam[tgt].dev_update = 1; 2019 isp->isp_update |= (1 << cam_sim_bus(sim)); 2020 (void) isp_control(isp, 2021 ISPCTL_UPDATE_PARAMS, NULL); 2022 sdp->isp_devparam[tgt].goal_flags = oflags; 2023 ISP_UNLOCK(isp); 2024 } 2025 } 2026 break; 2027 default: 2028 isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code); 2029 break; 2030 } 2031 } 2032 2033 static void 2034 isp_poll(struct cam_sim *sim) 2035 { 2036 ispsoftc_t *isp = cam_sim_softc(sim); 2037 uint32_t isr; 2038 uint16_t sema, mbox; 2039 2040 ISP_LOCK(isp); 2041 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 2042 isp_intr(isp, isr, sema, mbox); 2043 } 2044 ISP_UNLOCK(isp); 2045 } 2046 2047 2048 static int isp_watchdog_work(ispsoftc_t *, XS_T *); 2049 2050 static int 2051 isp_watchdog_work(ispsoftc_t *isp, XS_T *xs) 2052 { 2053 uint32_t handle; 2054 2055 /* 2056 * We've decided this command is dead. Make sure we're not trying 2057 * to kill a command that's already dead by getting it's handle and 2058 * and seeing whether it's still alive. 2059 */ 2060 ISP_LOCK(isp); 2061 handle = isp_find_handle(isp, xs); 2062 if (handle) { 2063 uint32_t isr; 2064 uint16_t sema, mbox; 2065 2066 if (XS_CMD_DONE_P(xs)) { 2067 isp_prt(isp, ISP_LOGDEBUG1, 2068 "watchdog found done cmd (handle 0x%x)", handle); 2069 ISP_UNLOCK(isp); 2070 return (1);; 2071 } 2072 2073 if (XS_CMD_WDOG_P(xs)) { 2074 isp_prt(isp, ISP_LOGDEBUG2, 2075 "recursive watchdog (handle 0x%x)", handle); 2076 ISP_UNLOCK(isp); 2077 return (1); 2078 } 2079 2080 XS_CMD_S_WDOG(xs); 2081 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 2082 isp_intr(isp, isr, sema, mbox); 2083 } 2084 if (XS_CMD_DONE_P(xs)) { 2085 isp_prt(isp, ISP_LOGDEBUG2, 2086 "watchdog cleanup for handle 0x%x", handle); 2087 ISPLOCK_2_CAMLOCK(isp); 2088 xpt_done((union ccb *) xs); 2089 CAMLOCK_2_ISPLOCK(isp); 2090 } else if (XS_CMD_GRACE_P(xs)) { 2091 /* 2092 * Make sure the command is *really* dead before we 2093 * release the handle (and DMA resources) for reuse. 2094 */ 2095 (void) isp_control(isp, ISPCTL_ABORT_CMD, xs); 2096 2097 /* 2098 * After this point, the comamnd is really dead. 2099 */ 2100 if (XS_XFRLEN(xs)) { 2101 ISP_DMAFREE(isp, xs, handle); 2102 } 2103 isp_destroy_handle(isp, handle); 2104 xpt_print(xs->ccb_h.path, 2105 "watchdog timeout for handle 0x%x\n", handle); 2106 XS_SETERR(xs, CAM_CMD_TIMEOUT); 2107 XS_CMD_C_WDOG(xs); 2108 ISPLOCK_2_CAMLOCK(isp); 2109 isp_done(xs); 2110 CAMLOCK_2_ISPLOCK(isp); 2111 } else { 2112 XS_CMD_C_WDOG(xs); 2113 xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz); 2114 XS_CMD_S_GRACE(xs); 2115 isp->isp_sendmarker |= 1 << XS_CHANNEL(xs); 2116 } 2117 ISP_UNLOCK(isp); 2118 return (1); 2119 } 2120 ISP_UNLOCK(isp); 2121 return (0); 2122 } 2123 2124 static void 2125 isp_watchdog(void *arg) 2126 { 2127 ispsoftc_t *isp; 2128 XS_T *xs = arg; 2129 for (isp = isplist; isp != NULL; isp = isp->isp_osinfo.next) { 2130 if (isp_watchdog_work(isp, xs)) { 2131 break; 2132 } 2133 } 2134 if (isp == NULL) { 2135 printf("isp_watchdog: nobody had %p active\n", arg); 2136 } 2137 } 2138 2139 2140 #if __FreeBSD_version >= 600000 2141 static void 2142 isp_make_here(ispsoftc_t *isp, int tgt) 2143 { 2144 union ccb *ccb; 2145 ISPLOCK_2_CAMLOCK(mpt); 2146 /* 2147 * Allocate a CCB, create a wildcard path for this bus, 2148 * and schedule a rescan. 2149 */ 2150 ccb = xpt_alloc_ccb_nowait(); 2151 if (ccb == NULL) { 2152 isp_prt(isp, ISP_LOGWARN, "unable to alloc CCB for rescan"); 2153 CAMLOCK_2_ISPLOCK(mpt); 2154 return; 2155 } 2156 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, 2157 cam_sim_path(isp->isp_sim), tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2158 CAMLOCK_2_ISPLOCK(mpt); 2159 isp_prt(isp, ISP_LOGWARN, "unable to create path for rescan"); 2160 xpt_free_ccb(ccb); 2161 return; 2162 } 2163 xpt_rescan(ccb); 2164 CAMLOCK_2_ISPLOCK(mpt); 2165 } 2166 2167 static void 2168 isp_make_gone(ispsoftc_t *isp, int tgt) 2169 { 2170 struct cam_path *tp; 2171 ISPLOCK_2_CAMLOCK(isp); 2172 if (xpt_create_path(&tp, NULL, cam_sim_path(isp->isp_sim), tgt, 2173 CAM_LUN_WILDCARD) == CAM_REQ_CMP) { 2174 xpt_async(AC_LOST_DEVICE, tp, NULL); 2175 xpt_free_path(tp); 2176 } 2177 CAMLOCK_2_ISPLOCK(isp); 2178 } 2179 #else 2180 #define isp_make_here(isp, tgt) do { ; } while (0) 2181 #define isp_make_gone(isp, tgt) do { ; } while (0) 2182 #endif 2183 2184 2185 /* 2186 * Gone Device Timer Function- when we have decided that a device has gone 2187 * away, we wait a specific period of time prior to telling the OS it has 2188 * gone away. 2189 * 2190 * This timer function fires once a second and then scans the port database 2191 * for devices that are marked dead but still have a virtual target assigned. 2192 * We decrement a counter for that port database entry, and when it hits zero, 2193 * we tell the OS the device has gone away. 2194 */ 2195 static void 2196 isp_gdt(void *arg) 2197 { 2198 ispsoftc_t *isp = arg; 2199 fcportdb_t *lp; 2200 int dbidx, tgt, more_to_do = 0; 2201 2202 isp_prt(isp, ISP_LOGDEBUG0, "GDT timer expired"); 2203 ISP_LOCK(isp); 2204 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { 2205 lp = &FCPARAM(isp)->portdb[dbidx]; 2206 2207 if (lp->state != FC_PORTDB_STATE_ZOMBIE) { 2208 continue; 2209 } 2210 if (lp->ini_map_idx == 0) { 2211 continue; 2212 } 2213 if (lp->new_reserved == 0) { 2214 continue; 2215 } 2216 lp->new_reserved -= 1; 2217 if (lp->new_reserved != 0) { 2218 more_to_do++; 2219 continue; 2220 } 2221 tgt = lp->ini_map_idx - 1; 2222 FCPARAM(isp)->isp_ini_map[tgt] = 0; 2223 lp->ini_map_idx = 0; 2224 lp->state = FC_PORTDB_STATE_NIL; 2225 isp_prt(isp, ISP_LOGCONFIG, prom3, lp->portid, tgt, 2226 "Gone Device Timeout"); 2227 isp_make_gone(isp, tgt); 2228 } 2229 if (more_to_do) { 2230 isp->isp_osinfo.gdt = timeout(isp_gdt, isp, hz); 2231 } else { 2232 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2233 "stopping Gone Device Timer"); 2234 isp->isp_osinfo.gdt_running = 0; 2235 } 2236 ISP_UNLOCK(isp); 2237 } 2238 2239 /* 2240 * Loop Down Timer Function- when loop goes down, a timer is started and 2241 * and after it expires we come here and take all probational devices that 2242 * the OS knows about and the tell the OS that they've gone away. 2243 * 2244 * We don't clear the devices out of our port database because, when loop 2245 * come back up, we have to do some actual cleanup with the chip at that 2246 * point (implicit PLOGO, e.g., to get the chip's port database state right). 2247 */ 2248 static void 2249 isp_ldt(void *arg) 2250 { 2251 ispsoftc_t *isp = arg; 2252 fcportdb_t *lp; 2253 int dbidx, tgt; 2254 2255 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Loop Down Timer expired"); 2256 ISP_LOCK(isp); 2257 2258 /* 2259 * Notify to the OS all targets who we now consider have departed. 2260 */ 2261 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { 2262 lp = &FCPARAM(isp)->portdb[dbidx]; 2263 2264 if (lp->state != FC_PORTDB_STATE_PROBATIONAL) { 2265 continue; 2266 } 2267 if (lp->ini_map_idx == 0) { 2268 continue; 2269 } 2270 2271 /* 2272 * XXX: CLEAN UP AND COMPLETE ANY PENDING COMMANDS FIRST! 2273 */ 2274 2275 /* 2276 * Mark that we've announced that this device is gone.... 2277 */ 2278 lp->reserved = 1; 2279 2280 /* 2281 * but *don't* change the state of the entry. Just clear 2282 * any target id stuff and announce to CAM that the 2283 * device is gone. This way any necessary PLOGO stuff 2284 * will happen when loop comes back up. 2285 */ 2286 2287 tgt = lp->ini_map_idx - 1; 2288 FCPARAM(isp)->isp_ini_map[tgt] = 0; 2289 lp->ini_map_idx = 0; 2290 isp_prt(isp, ISP_LOGCONFIG, prom3, lp->portid, tgt, 2291 "Loop Down Timeout"); 2292 isp_make_gone(isp, tgt); 2293 } 2294 2295 /* 2296 * The loop down timer has expired. Wake up the kthread 2297 * to notice that fact (or make it false). 2298 */ 2299 isp->isp_osinfo.loop_down_time = isp->isp_osinfo.loop_down_limit+1; 2300 #if __FreeBSD_version < 500000 2301 wakeup(&isp->isp_osinfo.kproc); 2302 #else 2303 #ifdef ISP_SMPLOCK 2304 cv_signal(&isp->isp_osinfo.kthread_cv); 2305 #else 2306 wakeup(&isp->isp_osinfo.kthread_cv); 2307 #endif 2308 #endif 2309 ISP_UNLOCK(isp); 2310 } 2311 2312 static void 2313 isp_kthread(void *arg) 2314 { 2315 ispsoftc_t *isp = arg; 2316 int slp = 0; 2317 #if __FreeBSD_version < 500000 2318 int s; 2319 2320 s = splcam(); 2321 #else 2322 #ifdef ISP_SMPLOCK 2323 mtx_lock(&isp->isp_lock); 2324 #else 2325 mtx_lock(&Giant); 2326 #endif 2327 #endif 2328 /* 2329 * The first loop is for our usage where we have yet to have 2330 * gotten good fibre channel state. 2331 */ 2332 for (;;) { 2333 int wasfrozen, lb, lim; 2334 2335 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2336 "isp_kthread: checking FC state"); 2337 isp->isp_osinfo.mbox_sleep_ok = 1; 2338 lb = isp_fc_runstate(isp, 250000); 2339 isp->isp_osinfo.mbox_sleep_ok = 0; 2340 if (lb) { 2341 /* 2342 * Increment loop down time by the last sleep interval 2343 */ 2344 isp->isp_osinfo.loop_down_time += slp; 2345 2346 if (lb < 0) { 2347 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2348 "kthread: FC loop not up (down count %d)", 2349 isp->isp_osinfo.loop_down_time); 2350 } else { 2351 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2352 "kthread: FC got to %d (down count %d)", 2353 lb, isp->isp_osinfo.loop_down_time); 2354 } 2355 2356 2357 /* 2358 * If we've never seen loop up and we've waited longer 2359 * than quickboot time, or we've seen loop up but we've 2360 * waited longer than loop_down_limit, give up and go 2361 * to sleep until loop comes up. 2362 */ 2363 if (FCPARAM(isp)->loop_seen_once == 0) { 2364 lim = isp_quickboot_time; 2365 } else { 2366 lim = isp->isp_osinfo.loop_down_limit; 2367 } 2368 if (isp->isp_osinfo.loop_down_time >= lim) { 2369 isp_freeze_loopdown(isp, "loop limit hit"); 2370 slp = 0; 2371 } else if (isp->isp_osinfo.loop_down_time < 10) { 2372 slp = 1; 2373 } else if (isp->isp_osinfo.loop_down_time < 30) { 2374 slp = 5; 2375 } else if (isp->isp_osinfo.loop_down_time < 60) { 2376 slp = 10; 2377 } else if (isp->isp_osinfo.loop_down_time < 120) { 2378 slp = 20; 2379 } else { 2380 slp = 30; 2381 } 2382 2383 } else { 2384 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2385 "isp_kthread: FC state OK"); 2386 isp->isp_osinfo.loop_down_time = 0; 2387 slp = 0; 2388 } 2389 2390 /* 2391 * If we'd frozen the simq, unfreeze it now so that CAM 2392 * can start sending us commands. If the FC state isn't 2393 * okay yet, they'll hit that in isp_start which will 2394 * freeze the queue again. 2395 */ 2396 wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN; 2397 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN; 2398 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { 2399 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2400 "isp_kthread: releasing simq"); 2401 ISPLOCK_2_CAMLOCK(isp); 2402 xpt_release_simq(isp->isp_sim, 1); 2403 CAMLOCK_2_ISPLOCK(isp); 2404 } 2405 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2406 "isp_kthread: sleep time %d", slp); 2407 #if __FreeBSD_version < 500000 2408 tsleep(&isp->isp_osinfo.kproc, PRIBIO, "ispf", 2409 slp * hz); 2410 #else 2411 #ifdef ISP_SMPLOCK 2412 cv_timed_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock, 2413 slp * hz); 2414 #else 2415 (void) tsleep(&isp->isp_osinfo.kthread_cv, PRIBIO, "ispf", 2416 slp * hz); 2417 #endif 2418 #endif 2419 /* 2420 * If slp is zero, we're waking up for the first time after 2421 * things have been okay. In this case, we set a deferral state 2422 * for all commands and delay hysteresis seconds before starting 2423 * the FC state evaluation. This gives the loop/fabric a chance 2424 * to settle. 2425 */ 2426 if (slp == 0 && isp->isp_osinfo.hysteresis) { 2427 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2428 "isp_kthread: sleep hysteresis tick time %d", 2429 isp->isp_osinfo.hysteresis * hz); 2430 (void) tsleep(&isp_fabric_hysteresis, PRIBIO, "ispT", 2431 (isp->isp_osinfo.hysteresis * hz)); 2432 } 2433 } 2434 } 2435 2436 static void 2437 isp_action(struct cam_sim *sim, union ccb *ccb) 2438 { 2439 int bus, tgt, error, lim; 2440 ispsoftc_t *isp; 2441 struct ccb_trans_settings *cts; 2442 2443 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n")); 2444 2445 isp = (ispsoftc_t *)cam_sim_softc(sim); 2446 ccb->ccb_h.sim_priv.entries[0].field = 0; 2447 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 2448 if (isp->isp_state != ISP_RUNSTATE && 2449 ccb->ccb_h.func_code == XPT_SCSI_IO) { 2450 CAMLOCK_2_ISPLOCK(isp); 2451 isp_init(isp); 2452 if (isp->isp_state != ISP_INITSTATE) { 2453 ISP_UNLOCK(isp); 2454 /* 2455 * Lie. Say it was a selection timeout. 2456 */ 2457 ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN; 2458 xpt_freeze_devq(ccb->ccb_h.path, 1); 2459 xpt_done(ccb); 2460 return; 2461 } 2462 isp->isp_state = ISP_RUNSTATE; 2463 ISPLOCK_2_CAMLOCK(isp); 2464 } 2465 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code); 2466 2467 2468 switch (ccb->ccb_h.func_code) { 2469 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 2470 /* 2471 * Do a couple of preliminary checks... 2472 */ 2473 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 2474 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 2475 ccb->ccb_h.status = CAM_REQ_INVALID; 2476 xpt_done(ccb); 2477 break; 2478 } 2479 } 2480 #ifdef DIAGNOSTIC 2481 if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) { 2482 xpt_print(ccb->ccb_h.path, "invalid target\n"); 2483 ccb->ccb_h.status = CAM_PATH_INVALID; 2484 } else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) { 2485 xpt_print(ccb->ccb_h.path, "invalid lun\n"); 2486 ccb->ccb_h.status = CAM_PATH_INVALID; 2487 } 2488 if (ccb->ccb_h.status == CAM_PATH_INVALID) { 2489 xpt_done(ccb); 2490 break; 2491 } 2492 #endif 2493 ((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK; 2494 CAMLOCK_2_ISPLOCK(isp); 2495 error = isp_start((XS_T *) ccb); 2496 switch (error) { 2497 case CMD_QUEUED: 2498 XS_CMD_S_CLEAR(ccb); 2499 ISPLOCK_2_CAMLOCK(isp); 2500 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2501 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 2502 int ms = ccb->ccb_h.timeout; 2503 if (ms == CAM_TIME_DEFAULT) { 2504 ms = 60*1000; 2505 } 2506 ccb->ccb_h.timeout_ch = 2507 timeout(isp_watchdog, ccb, isp_mstohz(ms)); 2508 } else { 2509 callout_handle_init(&ccb->ccb_h.timeout_ch); 2510 } 2511 break; 2512 case CMD_RQLATER: 2513 /* 2514 * This can only happen for Fibre Channel 2515 */ 2516 KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only")); 2517 2518 /* 2519 * Handle initial and subsequent loop down cases 2520 */ 2521 if (FCPARAM(isp)->loop_seen_once == 0) { 2522 lim = isp_quickboot_time; 2523 } else { 2524 lim = isp->isp_osinfo.loop_down_limit; 2525 } 2526 if (isp->isp_osinfo.loop_down_time >= lim) { 2527 isp_prt(isp, ISP_LOGDEBUG0, 2528 "%d.%d downtime (%d) > lim (%d)", 2529 XS_TGT(ccb), XS_LUN(ccb), 2530 isp->isp_osinfo.loop_down_time, lim); 2531 ccb->ccb_h.status = 2532 CAM_SEL_TIMEOUT|CAM_DEV_QFRZN; 2533 xpt_freeze_devq(ccb->ccb_h.path, 1); 2534 ISPLOCK_2_CAMLOCK(isp); 2535 xpt_done(ccb); 2536 break; 2537 } 2538 isp_prt(isp, ISP_LOGDEBUG0, 2539 "%d.%d retry later", XS_TGT(ccb), XS_LUN(ccb)); 2540 /* 2541 * Otherwise, retry in a while. 2542 */ 2543 ISPLOCK_2_CAMLOCK(isp); 2544 cam_freeze_devq(ccb->ccb_h.path); 2545 cam_release_devq(ccb->ccb_h.path, 2546 RELSIM_RELEASE_AFTER_TIMEOUT, 0, 1000, 0); 2547 XS_SETERR(ccb, CAM_REQUEUE_REQ); 2548 xpt_done(ccb); 2549 break; 2550 case CMD_EAGAIN: 2551 ISPLOCK_2_CAMLOCK(isp); 2552 cam_freeze_devq(ccb->ccb_h.path); 2553 cam_release_devq(ccb->ccb_h.path, 2554 RELSIM_RELEASE_AFTER_TIMEOUT, 0, 250, 0); 2555 xpt_done(ccb); 2556 break; 2557 case CMD_COMPLETE: 2558 isp_done((struct ccb_scsiio *) ccb); 2559 ISPLOCK_2_CAMLOCK(isp); 2560 break; 2561 default: 2562 ISPLOCK_2_CAMLOCK(isp); 2563 isp_prt(isp, ISP_LOGERR, 2564 "What's this? 0x%x at %d in file %s", 2565 error, __LINE__, __FILE__); 2566 XS_SETERR(ccb, CAM_REQ_CMP_ERR); 2567 xpt_done(ccb); 2568 } 2569 break; 2570 2571 #ifdef ISP_TARGET_MODE 2572 case XPT_EN_LUN: /* Enable LUN as a target */ 2573 { 2574 int seq, i; 2575 CAMLOCK_2_ISPLOCK(isp); 2576 seq = isp_en_lun(isp, ccb); 2577 if (seq < 0) { 2578 ISPLOCK_2_CAMLOCK(isp); 2579 xpt_done(ccb); 2580 break; 2581 } 2582 for (i = 0; isp->isp_osinfo.leact[seq] && i < 30 * 1000; i++) { 2583 uint32_t isr; 2584 uint16_t sema, mbox; 2585 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 2586 isp_intr(isp, isr, sema, mbox); 2587 } 2588 DELAY(1000); 2589 } 2590 ISPLOCK_2_CAMLOCK(isp); 2591 break; 2592 } 2593 case XPT_NOTIFY_ACK: /* recycle notify ack */ 2594 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ 2595 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 2596 { 2597 tstate_t *tptr = 2598 get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun); 2599 if (tptr == NULL) { 2600 ccb->ccb_h.status = CAM_LUN_INVALID; 2601 xpt_done(ccb); 2602 break; 2603 } 2604 ccb->ccb_h.sim_priv.entries[0].field = 0; 2605 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 2606 ccb->ccb_h.flags = 0; 2607 2608 CAMLOCK_2_ISPLOCK(isp); 2609 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 2610 /* 2611 * Note that the command itself may not be done- 2612 * it may not even have had the first CTIO sent. 2613 */ 2614 tptr->atio_count++; 2615 isp_prt(isp, ISP_LOGTDEBUG0, 2616 "Put FREE ATIO, lun %d, count now %d", 2617 ccb->ccb_h.target_lun, tptr->atio_count); 2618 SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h, 2619 sim_links.sle); 2620 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 2621 tptr->inot_count++; 2622 isp_prt(isp, ISP_LOGTDEBUG0, 2623 "Put FREE INOT, lun %d, count now %d", 2624 ccb->ccb_h.target_lun, tptr->inot_count); 2625 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, 2626 sim_links.sle); 2627 } else { 2628 isp_prt(isp, ISP_LOGWARN, "Got Notify ACK");; 2629 } 2630 rls_lun_statep(isp, tptr); 2631 ccb->ccb_h.status = CAM_REQ_INPROG; 2632 ISPLOCK_2_CAMLOCK(isp); 2633 break; 2634 } 2635 case XPT_CONT_TARGET_IO: 2636 { 2637 CAMLOCK_2_ISPLOCK(isp); 2638 isp_target_start_ctio(isp, ccb); 2639 ISPLOCK_2_CAMLOCK(isp); 2640 break; 2641 } 2642 #endif 2643 case XPT_RESET_DEV: /* BDR the specified SCSI device */ 2644 2645 bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); 2646 tgt = ccb->ccb_h.target_id; 2647 tgt |= (bus << 16); 2648 2649 CAMLOCK_2_ISPLOCK(isp); 2650 error = isp_control(isp, ISPCTL_RESET_DEV, &tgt); 2651 ISPLOCK_2_CAMLOCK(isp); 2652 if (error) { 2653 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2654 } else { 2655 ccb->ccb_h.status = CAM_REQ_CMP; 2656 } 2657 xpt_done(ccb); 2658 break; 2659 case XPT_ABORT: /* Abort the specified CCB */ 2660 { 2661 union ccb *accb = ccb->cab.abort_ccb; 2662 CAMLOCK_2_ISPLOCK(isp); 2663 switch (accb->ccb_h.func_code) { 2664 #ifdef ISP_TARGET_MODE 2665 case XPT_ACCEPT_TARGET_IO: 2666 case XPT_IMMED_NOTIFY: 2667 ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb); 2668 break; 2669 case XPT_CONT_TARGET_IO: 2670 isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet"); 2671 ccb->ccb_h.status = CAM_UA_ABORT; 2672 break; 2673 #endif 2674 case XPT_SCSI_IO: 2675 error = isp_control(isp, ISPCTL_ABORT_CMD, ccb); 2676 if (error) { 2677 ccb->ccb_h.status = CAM_UA_ABORT; 2678 } else { 2679 ccb->ccb_h.status = CAM_REQ_CMP; 2680 } 2681 break; 2682 default: 2683 ccb->ccb_h.status = CAM_REQ_INVALID; 2684 break; 2685 } 2686 ISPLOCK_2_CAMLOCK(isp); 2687 xpt_done(ccb); 2688 break; 2689 } 2690 #ifdef CAM_NEW_TRAN_CODE 2691 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) 2692 #else 2693 #define IS_CURRENT_SETTINGS(c) (c->flags & CCB_TRANS_CURRENT_SETTINGS) 2694 #endif 2695 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 2696 cts = &ccb->cts; 2697 if (!IS_CURRENT_SETTINGS(cts)) { 2698 ccb->ccb_h.status = CAM_REQ_INVALID; 2699 xpt_done(ccb); 2700 break; 2701 } 2702 tgt = cts->ccb_h.target_id; 2703 CAMLOCK_2_ISPLOCK(isp); 2704 if (IS_SCSI(isp)) { 2705 #ifndef CAM_NEW_TRAN_CODE 2706 sdparam *sdp = isp->isp_param; 2707 uint16_t *dptr; 2708 2709 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2710 2711 sdp += bus; 2712 /* 2713 * We always update (internally) from goal_flags 2714 * so any request to change settings just gets 2715 * vectored to that location. 2716 */ 2717 dptr = &sdp->isp_devparam[tgt].goal_flags; 2718 2719 /* 2720 * Note that these operations affect the 2721 * the goal flags (goal_flags)- not 2722 * the current state flags. Then we mark 2723 * things so that the next operation to 2724 * this HBA will cause the update to occur. 2725 */ 2726 if (cts->valid & CCB_TRANS_DISC_VALID) { 2727 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) { 2728 *dptr |= DPARM_DISC; 2729 } else { 2730 *dptr &= ~DPARM_DISC; 2731 } 2732 } 2733 if (cts->valid & CCB_TRANS_TQ_VALID) { 2734 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) { 2735 *dptr |= DPARM_TQING; 2736 } else { 2737 *dptr &= ~DPARM_TQING; 2738 } 2739 } 2740 if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) { 2741 switch (cts->bus_width) { 2742 case MSG_EXT_WDTR_BUS_16_BIT: 2743 *dptr |= DPARM_WIDE; 2744 break; 2745 default: 2746 *dptr &= ~DPARM_WIDE; 2747 } 2748 } 2749 /* 2750 * Any SYNC RATE of nonzero and SYNC_OFFSET 2751 * of nonzero will cause us to go to the 2752 * selected (from NVRAM) maximum value for 2753 * this device. At a later point, we'll 2754 * allow finer control. 2755 */ 2756 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && 2757 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) && 2758 (cts->sync_offset > 0)) { 2759 *dptr |= DPARM_SYNC; 2760 } else { 2761 *dptr &= ~DPARM_SYNC; 2762 } 2763 *dptr |= DPARM_SAFE_DFLT; 2764 #else 2765 struct ccb_trans_settings_scsi *scsi = 2766 &cts->proto_specific.scsi; 2767 struct ccb_trans_settings_spi *spi = 2768 &cts->xport_specific.spi; 2769 sdparam *sdp = isp->isp_param; 2770 uint16_t *dptr; 2771 2772 if (spi->valid == 0 && scsi->valid == 0) { 2773 ISPLOCK_2_CAMLOCK(isp); 2774 ccb->ccb_h.status = CAM_REQ_CMP; 2775 xpt_done(ccb); 2776 break; 2777 } 2778 2779 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2780 sdp += bus; 2781 /* 2782 * We always update (internally) from goal_flags 2783 * so any request to change settings just gets 2784 * vectored to that location. 2785 */ 2786 dptr = &sdp->isp_devparam[tgt].goal_flags; 2787 2788 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 2789 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 2790 *dptr |= DPARM_DISC; 2791 else 2792 *dptr &= ~DPARM_DISC; 2793 } 2794 2795 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 2796 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 2797 *dptr |= DPARM_TQING; 2798 else 2799 *dptr &= ~DPARM_TQING; 2800 } 2801 2802 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 2803 if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) 2804 *dptr |= DPARM_WIDE; 2805 else 2806 *dptr &= ~DPARM_WIDE; 2807 } 2808 2809 /* 2810 * XXX: FIX ME 2811 */ 2812 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) && 2813 (spi->valid & CTS_SPI_VALID_SYNC_RATE) && 2814 (spi->sync_period && spi->sync_offset)) { 2815 *dptr |= DPARM_SYNC; 2816 /* 2817 * XXX: CHECK FOR LEGALITY 2818 */ 2819 sdp->isp_devparam[tgt].goal_period = 2820 spi->sync_period; 2821 sdp->isp_devparam[tgt].goal_offset = 2822 spi->sync_offset; 2823 } else { 2824 *dptr &= ~DPARM_SYNC; 2825 } 2826 #endif 2827 isp_prt(isp, ISP_LOGDEBUG0, 2828 "SET (%d.%d.%d) to flags %x off %x per %x", 2829 bus, tgt, cts->ccb_h.target_lun, 2830 sdp->isp_devparam[tgt].goal_flags, 2831 sdp->isp_devparam[tgt].goal_offset, 2832 sdp->isp_devparam[tgt].goal_period); 2833 sdp->isp_devparam[tgt].dev_update = 1; 2834 isp->isp_update |= (1 << bus); 2835 } 2836 ISPLOCK_2_CAMLOCK(isp); 2837 ccb->ccb_h.status = CAM_REQ_CMP; 2838 xpt_done(ccb); 2839 break; 2840 case XPT_GET_TRAN_SETTINGS: 2841 cts = &ccb->cts; 2842 tgt = cts->ccb_h.target_id; 2843 CAMLOCK_2_ISPLOCK(isp); 2844 if (IS_FC(isp)) { 2845 #ifndef CAM_NEW_TRAN_CODE 2846 /* 2847 * a lot of normal SCSI things don't make sense. 2848 */ 2849 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 2850 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2851 /* 2852 * How do you measure the width of a high 2853 * speed serial bus? Well, in bytes. 2854 * 2855 * Offset and period make no sense, though, so we set 2856 * (above) a 'base' transfer speed to be gigabit. 2857 */ 2858 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2859 #else 2860 fcparam *fcp = isp->isp_param; 2861 struct ccb_trans_settings_scsi *scsi = 2862 &cts->proto_specific.scsi; 2863 struct ccb_trans_settings_fc *fc = 2864 &cts->xport_specific.fc; 2865 2866 cts->protocol = PROTO_SCSI; 2867 cts->protocol_version = SCSI_REV_2; 2868 cts->transport = XPORT_FC; 2869 cts->transport_version = 0; 2870 2871 scsi->valid = CTS_SCSI_VALID_TQ; 2872 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 2873 fc->valid = CTS_FC_VALID_SPEED; 2874 if (fcp->isp_gbspeed == 2) { 2875 fc->bitrate = 200000; 2876 } else { 2877 fc->bitrate = 100000; 2878 } 2879 if (tgt > 0 && tgt < MAX_FC_TARG) { 2880 fcportdb_t *lp = &fcp->portdb[tgt]; 2881 fc->wwnn = lp->node_wwn; 2882 fc->wwpn = lp->port_wwn; 2883 fc->port = lp->portid; 2884 fc->valid |= CTS_FC_VALID_WWNN | 2885 CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT; 2886 } 2887 #endif 2888 } else { 2889 #ifdef CAM_NEW_TRAN_CODE 2890 struct ccb_trans_settings_scsi *scsi = 2891 &cts->proto_specific.scsi; 2892 struct ccb_trans_settings_spi *spi = 2893 &cts->xport_specific.spi; 2894 #endif 2895 sdparam *sdp = isp->isp_param; 2896 int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2897 uint16_t dval, pval, oval; 2898 2899 sdp += bus; 2900 2901 if (IS_CURRENT_SETTINGS(cts)) { 2902 sdp->isp_devparam[tgt].dev_refresh = 1; 2903 isp->isp_update |= (1 << bus); 2904 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, 2905 NULL); 2906 dval = sdp->isp_devparam[tgt].actv_flags; 2907 oval = sdp->isp_devparam[tgt].actv_offset; 2908 pval = sdp->isp_devparam[tgt].actv_period; 2909 } else { 2910 dval = sdp->isp_devparam[tgt].nvrm_flags; 2911 oval = sdp->isp_devparam[tgt].nvrm_offset; 2912 pval = sdp->isp_devparam[tgt].nvrm_period; 2913 } 2914 2915 #ifndef CAM_NEW_TRAN_CODE 2916 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 2917 2918 if (dval & DPARM_DISC) { 2919 cts->flags |= CCB_TRANS_DISC_ENB; 2920 } 2921 if (dval & DPARM_TQING) { 2922 cts->flags |= CCB_TRANS_TAG_ENB; 2923 } 2924 if (dval & DPARM_WIDE) { 2925 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2926 } else { 2927 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2928 } 2929 cts->valid = CCB_TRANS_BUS_WIDTH_VALID | 2930 CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2931 2932 if ((dval & DPARM_SYNC) && oval != 0) { 2933 cts->sync_period = pval; 2934 cts->sync_offset = oval; 2935 cts->valid |= 2936 CCB_TRANS_SYNC_RATE_VALID | 2937 CCB_TRANS_SYNC_OFFSET_VALID; 2938 } 2939 #else 2940 cts->protocol = PROTO_SCSI; 2941 cts->protocol_version = SCSI_REV_2; 2942 cts->transport = XPORT_SPI; 2943 cts->transport_version = 2; 2944 2945 spi->valid = 0; 2946 scsi->valid = 0; 2947 spi->flags = 0; 2948 scsi->flags = 0; 2949 if (dval & DPARM_DISC) { 2950 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 2951 } 2952 if ((dval & DPARM_SYNC) && oval && pval) { 2953 spi->sync_offset = oval; 2954 spi->sync_period = pval; 2955 } else { 2956 spi->sync_offset = 0; 2957 spi->sync_period = 0; 2958 } 2959 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 2960 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 2961 spi->valid |= CTS_SPI_VALID_BUS_WIDTH; 2962 if (dval & DPARM_WIDE) { 2963 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2964 } else { 2965 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2966 } 2967 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 2968 scsi->valid = CTS_SCSI_VALID_TQ; 2969 if (dval & DPARM_TQING) { 2970 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 2971 } 2972 spi->valid |= CTS_SPI_VALID_DISC; 2973 } 2974 #endif 2975 isp_prt(isp, ISP_LOGDEBUG0, 2976 "GET %s (%d.%d.%d) to flags %x off %x per %x", 2977 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM", 2978 bus, tgt, cts->ccb_h.target_lun, dval, oval, pval); 2979 } 2980 ISPLOCK_2_CAMLOCK(isp); 2981 ccb->ccb_h.status = CAM_REQ_CMP; 2982 xpt_done(ccb); 2983 break; 2984 2985 case XPT_CALC_GEOMETRY: 2986 #if __FreeBSD_version < 500000 2987 { 2988 struct ccb_calc_geometry *ccg; 2989 u_int32_t secs_per_cylinder; 2990 u_int32_t size_mb; 2991 2992 ccg = &ccb->ccg; 2993 if (ccg->block_size == 0) { 2994 ccb->ccb_h.status = CAM_REQ_INVALID; 2995 xpt_done(ccb); 2996 break; 2997 } 2998 size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size); 2999 if (size_mb > 1024) { 3000 ccg->heads = 255; 3001 ccg->secs_per_track = 63; 3002 } else { 3003 ccg->heads = 64; 3004 ccg->secs_per_track = 32; 3005 } 3006 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 3007 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 3008 ccb->ccb_h.status = CAM_REQ_CMP; 3009 xpt_done(ccb); 3010 break; 3011 } 3012 #else 3013 { 3014 cam_calc_geometry(&ccb->ccg, /*extended*/1); 3015 xpt_done(ccb); 3016 break; 3017 } 3018 #endif 3019 case XPT_RESET_BUS: /* Reset the specified bus */ 3020 bus = cam_sim_bus(sim); 3021 CAMLOCK_2_ISPLOCK(isp); 3022 error = isp_control(isp, ISPCTL_RESET_BUS, &bus); 3023 ISPLOCK_2_CAMLOCK(isp); 3024 if (error) 3025 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 3026 else { 3027 if (cam_sim_bus(sim) && isp->isp_path2 != NULL) 3028 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 3029 else if (isp->isp_path != NULL) 3030 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 3031 ccb->ccb_h.status = CAM_REQ_CMP; 3032 } 3033 xpt_done(ccb); 3034 break; 3035 3036 case XPT_TERM_IO: /* Terminate the I/O process */ 3037 ccb->ccb_h.status = CAM_REQ_INVALID; 3038 xpt_done(ccb); 3039 break; 3040 3041 case XPT_PATH_INQ: /* Path routing inquiry */ 3042 { 3043 struct ccb_pathinq *cpi = &ccb->cpi; 3044 3045 cpi->version_num = 1; 3046 #ifdef ISP_TARGET_MODE 3047 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 3048 #else 3049 cpi->target_sprt = 0; 3050 #endif 3051 cpi->hba_eng_cnt = 0; 3052 cpi->max_target = ISP_MAX_TARGETS(isp) - 1; 3053 cpi->max_lun = ISP_MAX_LUNS(isp) - 1; 3054 cpi->bus_id = cam_sim_bus(sim); 3055 if (IS_FC(isp)) { 3056 cpi->hba_misc = PIM_NOBUSRESET; 3057 /* 3058 * Because our loop ID can shift from time to time, 3059 * make our initiator ID out of range of our bus. 3060 */ 3061 cpi->initiator_id = cpi->max_target + 1; 3062 3063 /* 3064 * Set base transfer capabilities for Fibre Channel. 3065 * Technically not correct because we don't know 3066 * what media we're running on top of- but we'll 3067 * look good if we always say 100MB/s. 3068 */ 3069 if (FCPARAM(isp)->isp_gbspeed == 2) 3070 cpi->base_transfer_speed = 200000; 3071 else 3072 cpi->base_transfer_speed = 100000; 3073 cpi->hba_inquiry = PI_TAG_ABLE; 3074 #ifdef CAM_NEW_TRAN_CODE 3075 cpi->transport = XPORT_FC; 3076 cpi->transport_version = 0; 3077 #endif 3078 } else { 3079 sdparam *sdp = isp->isp_param; 3080 sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path)); 3081 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 3082 cpi->hba_misc = 0; 3083 cpi->initiator_id = sdp->isp_initiator_id; 3084 cpi->base_transfer_speed = 3300; 3085 #ifdef CAM_NEW_TRAN_CODE 3086 cpi->transport = XPORT_SPI; 3087 cpi->transport_version = 2; 3088 #endif 3089 } 3090 #ifdef CAM_NEW_TRAN_CODE 3091 cpi->protocol = PROTO_SCSI; 3092 cpi->protocol_version = SCSI_REV_2; 3093 #endif 3094 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 3095 strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN); 3096 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 3097 cpi->unit_number = cam_sim_unit(sim); 3098 cpi->ccb_h.status = CAM_REQ_CMP; 3099 xpt_done(ccb); 3100 break; 3101 } 3102 default: 3103 ccb->ccb_h.status = CAM_REQ_INVALID; 3104 xpt_done(ccb); 3105 break; 3106 } 3107 } 3108 3109 #define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB) 3110 3111 void 3112 isp_done(struct ccb_scsiio *sccb) 3113 { 3114 ispsoftc_t *isp = XS_ISP(sccb); 3115 3116 if (XS_NOERR(sccb)) 3117 XS_SETERR(sccb, CAM_REQ_CMP); 3118 3119 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && 3120 (sccb->scsi_status != SCSI_STATUS_OK)) { 3121 sccb->ccb_h.status &= ~CAM_STATUS_MASK; 3122 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && 3123 (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) { 3124 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; 3125 } else { 3126 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 3127 } 3128 } 3129 3130 sccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3131 if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 3132 isp_prt(isp, ISP_LOGDEBUG0, 3133 "target %d lun %d CAM status 0x%x SCSI status 0x%x", 3134 XS_TGT(sccb), XS_LUN(sccb), sccb->ccb_h.status, 3135 sccb->scsi_status); 3136 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 3137 sccb->ccb_h.status |= CAM_DEV_QFRZN; 3138 xpt_freeze_devq(sccb->ccb_h.path, 1); 3139 } 3140 } 3141 3142 if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) && 3143 (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 3144 xpt_print(sccb->ccb_h.path, 3145 "cam completion status 0x%x\n", sccb->ccb_h.status); 3146 } 3147 3148 XS_CMD_S_DONE(sccb); 3149 if (XS_CMD_WDOG_P(sccb) == 0) { 3150 untimeout(isp_watchdog, sccb, sccb->ccb_h.timeout_ch); 3151 if (XS_CMD_GRACE_P(sccb)) { 3152 isp_prt(isp, ISP_LOGDEBUG2, 3153 "finished command on borrowed time"); 3154 } 3155 XS_CMD_S_CLEAR(sccb); 3156 ISPLOCK_2_CAMLOCK(isp); 3157 xpt_done((union ccb *) sccb); 3158 CAMLOCK_2_ISPLOCK(isp); 3159 } 3160 } 3161 3162 int 3163 isp_async(ispsoftc_t *isp, ispasync_t cmd, void *arg) 3164 { 3165 int bus, rv = 0; 3166 static const char prom[] = 3167 "PortID 0x%06x handle 0x%x role %s %s\n" 3168 " WWNN 0x%08x%08x WWPN 0x%08x%08x"; 3169 static const char prom2[] = 3170 "PortID 0x%06x handle 0x%x role %s %s tgt %u\n" 3171 " WWNN 0x%08x%08x WWPN 0x%08x%08x"; 3172 char *msg = NULL; 3173 target_id_t tgt; 3174 fcportdb_t *lp; 3175 struct cam_path *tmppath; 3176 3177 switch (cmd) { 3178 case ISPASYNC_NEW_TGT_PARAMS: 3179 { 3180 #ifdef CAM_NEW_TRAN_CODE 3181 struct ccb_trans_settings_scsi *scsi; 3182 struct ccb_trans_settings_spi *spi; 3183 #endif 3184 int flags, tgt; 3185 sdparam *sdp = isp->isp_param; 3186 struct ccb_trans_settings cts; 3187 3188 memset(&cts, 0, sizeof (struct ccb_trans_settings)); 3189 3190 tgt = *((int *)arg); 3191 bus = (tgt >> 16) & 0xffff; 3192 tgt &= 0xffff; 3193 sdp += bus; 3194 ISPLOCK_2_CAMLOCK(isp); 3195 if (xpt_create_path(&tmppath, NULL, 3196 cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim), 3197 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 3198 CAMLOCK_2_ISPLOCK(isp); 3199 isp_prt(isp, ISP_LOGWARN, 3200 "isp_async cannot make temp path for %d.%d", 3201 tgt, bus); 3202 rv = -1; 3203 break; 3204 } 3205 CAMLOCK_2_ISPLOCK(isp); 3206 flags = sdp->isp_devparam[tgt].actv_flags; 3207 #ifdef CAM_NEW_TRAN_CODE 3208 cts.type = CTS_TYPE_CURRENT_SETTINGS; 3209 cts.protocol = PROTO_SCSI; 3210 cts.transport = XPORT_SPI; 3211 3212 scsi = &cts.proto_specific.scsi; 3213 spi = &cts.xport_specific.spi; 3214 3215 if (flags & DPARM_TQING) { 3216 scsi->valid |= CTS_SCSI_VALID_TQ; 3217 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 3218 } 3219 3220 if (flags & DPARM_DISC) { 3221 spi->valid |= CTS_SPI_VALID_DISC; 3222 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 3223 } 3224 spi->flags |= CTS_SPI_VALID_BUS_WIDTH; 3225 if (flags & DPARM_WIDE) { 3226 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3227 } else { 3228 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3229 } 3230 if (flags & DPARM_SYNC) { 3231 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 3232 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 3233 spi->sync_period = sdp->isp_devparam[tgt].actv_period; 3234 spi->sync_offset = sdp->isp_devparam[tgt].actv_offset; 3235 } 3236 #else 3237 cts.flags = CCB_TRANS_CURRENT_SETTINGS; 3238 cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3239 if (flags & DPARM_DISC) { 3240 cts.flags |= CCB_TRANS_DISC_ENB; 3241 } 3242 if (flags & DPARM_TQING) { 3243 cts.flags |= CCB_TRANS_TAG_ENB; 3244 } 3245 cts.valid |= CCB_TRANS_BUS_WIDTH_VALID; 3246 cts.bus_width = (flags & DPARM_WIDE)? 3247 MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT; 3248 cts.sync_period = sdp->isp_devparam[tgt].actv_period; 3249 cts.sync_offset = sdp->isp_devparam[tgt].actv_offset; 3250 if (flags & DPARM_SYNC) { 3251 cts.valid |= 3252 CCB_TRANS_SYNC_RATE_VALID | 3253 CCB_TRANS_SYNC_OFFSET_VALID; 3254 } 3255 #endif 3256 isp_prt(isp, ISP_LOGDEBUG2, 3257 "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x", 3258 bus, tgt, sdp->isp_devparam[tgt].actv_period, 3259 sdp->isp_devparam[tgt].actv_offset, flags); 3260 xpt_setup_ccb(&cts.ccb_h, tmppath, 1); 3261 ISPLOCK_2_CAMLOCK(isp); 3262 xpt_async(AC_TRANSFER_NEG, tmppath, &cts); 3263 xpt_free_path(tmppath); 3264 CAMLOCK_2_ISPLOCK(isp); 3265 break; 3266 } 3267 case ISPASYNC_BUS_RESET: 3268 bus = *((int *)arg); 3269 isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected", 3270 bus); 3271 if (bus > 0 && isp->isp_path2) { 3272 ISPLOCK_2_CAMLOCK(isp); 3273 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 3274 CAMLOCK_2_ISPLOCK(isp); 3275 } else if (isp->isp_path) { 3276 ISPLOCK_2_CAMLOCK(isp); 3277 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 3278 CAMLOCK_2_ISPLOCK(isp); 3279 } 3280 break; 3281 case ISPASYNC_LIP: 3282 if (msg == NULL) { 3283 msg = "LIP Received"; 3284 } 3285 /* FALLTHROUGH */ 3286 case ISPASYNC_LOOP_RESET: 3287 if (msg == NULL) { 3288 msg = "LOOP Reset"; 3289 } 3290 /* FALLTHROUGH */ 3291 case ISPASYNC_LOOP_DOWN: 3292 if (msg == NULL) { 3293 msg = "LOOP Down"; 3294 } 3295 if (isp->isp_path) { 3296 isp_freeze_loopdown(isp, msg); 3297 } 3298 if (isp->isp_osinfo.ldt_running == 0) { 3299 isp->isp_osinfo.ldt = timeout(isp_ldt, isp, 3300 isp->isp_osinfo.loop_down_limit * hz); 3301 isp->isp_osinfo.ldt_running = 1; 3302 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 3303 "starting Loop Down Timer"); 3304 } 3305 isp_prt(isp, ISP_LOGINFO, msg); 3306 break; 3307 case ISPASYNC_LOOP_UP: 3308 /* 3309 * Now we just note that Loop has come up. We don't 3310 * actually do anything because we're waiting for a 3311 * Change Notify before activating the FC cleanup 3312 * thread to look at the state of the loop again. 3313 */ 3314 isp_prt(isp, ISP_LOGINFO, "Loop UP"); 3315 break; 3316 case ISPASYNC_DEV_ARRIVED: 3317 lp = arg; 3318 lp->reserved = 0; 3319 if ((isp->isp_role & ISP_ROLE_INITIATOR) && 3320 (lp->roles & (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT))) { 3321 int dbidx = lp - FCPARAM(isp)->portdb; 3322 int i; 3323 3324 for (i = 0; i < MAX_FC_TARG; i++) { 3325 if (i >= FL_ID && i <= SNS_ID) { 3326 continue; 3327 } 3328 if (FCPARAM(isp)->isp_ini_map[i] == 0) { 3329 break; 3330 } 3331 } 3332 if (i < MAX_FC_TARG) { 3333 FCPARAM(isp)->isp_ini_map[i] = dbidx + 1; 3334 lp->ini_map_idx = i + 1; 3335 } else { 3336 isp_prt(isp, ISP_LOGWARN, "out of target ids"); 3337 isp_dump_portdb(isp); 3338 } 3339 } 3340 if (lp->ini_map_idx) { 3341 tgt = lp->ini_map_idx - 1; 3342 isp_prt(isp, ISP_LOGCONFIG, prom2, 3343 lp->portid, lp->handle, 3344 roles[lp->roles], "arrived at", tgt, 3345 (uint32_t) (lp->node_wwn >> 32), 3346 (uint32_t) lp->node_wwn, 3347 (uint32_t) (lp->port_wwn >> 32), 3348 (uint32_t) lp->port_wwn); 3349 isp_make_here(isp, tgt); 3350 } else { 3351 isp_prt(isp, ISP_LOGCONFIG, prom, 3352 lp->portid, lp->handle, 3353 roles[lp->roles], "arrived", 3354 (uint32_t) (lp->node_wwn >> 32), 3355 (uint32_t) lp->node_wwn, 3356 (uint32_t) (lp->port_wwn >> 32), 3357 (uint32_t) lp->port_wwn); 3358 } 3359 break; 3360 case ISPASYNC_DEV_CHANGED: 3361 lp = arg; 3362 if (isp_change_is_bad) { 3363 lp->state = FC_PORTDB_STATE_NIL; 3364 if (lp->ini_map_idx) { 3365 tgt = lp->ini_map_idx - 1; 3366 FCPARAM(isp)->isp_ini_map[tgt] = 0; 3367 lp->ini_map_idx = 0; 3368 isp_prt(isp, ISP_LOGCONFIG, prom3, 3369 lp->portid, tgt, "change is bad"); 3370 isp_make_gone(isp, tgt); 3371 } else { 3372 isp_prt(isp, ISP_LOGCONFIG, prom, 3373 lp->portid, lp->handle, 3374 roles[lp->roles], 3375 "changed and departed", 3376 (uint32_t) (lp->node_wwn >> 32), 3377 (uint32_t) lp->node_wwn, 3378 (uint32_t) (lp->port_wwn >> 32), 3379 (uint32_t) lp->port_wwn); 3380 } 3381 } else { 3382 lp->portid = lp->new_portid; 3383 lp->roles = lp->new_roles; 3384 if (lp->ini_map_idx) { 3385 int t = lp->ini_map_idx - 1; 3386 FCPARAM(isp)->isp_ini_map[t] = 3387 (lp - FCPARAM(isp)->portdb) + 1; 3388 tgt = lp->ini_map_idx - 1; 3389 isp_prt(isp, ISP_LOGCONFIG, prom2, 3390 lp->portid, lp->handle, 3391 roles[lp->roles], "changed at", tgt, 3392 (uint32_t) (lp->node_wwn >> 32), 3393 (uint32_t) lp->node_wwn, 3394 (uint32_t) (lp->port_wwn >> 32), 3395 (uint32_t) lp->port_wwn); 3396 } else { 3397 isp_prt(isp, ISP_LOGCONFIG, prom, 3398 lp->portid, lp->handle, 3399 roles[lp->roles], "changed", 3400 (uint32_t) (lp->node_wwn >> 32), 3401 (uint32_t) lp->node_wwn, 3402 (uint32_t) (lp->port_wwn >> 32), 3403 (uint32_t) lp->port_wwn); 3404 } 3405 } 3406 break; 3407 case ISPASYNC_DEV_STAYED: 3408 lp = arg; 3409 if (lp->ini_map_idx) { 3410 tgt = lp->ini_map_idx - 1; 3411 isp_prt(isp, ISP_LOGCONFIG, prom2, 3412 lp->portid, lp->handle, 3413 roles[lp->roles], "stayed at", tgt, 3414 (uint32_t) (lp->node_wwn >> 32), 3415 (uint32_t) lp->node_wwn, 3416 (uint32_t) (lp->port_wwn >> 32), 3417 (uint32_t) lp->port_wwn); 3418 } else { 3419 isp_prt(isp, ISP_LOGCONFIG, prom, 3420 lp->portid, lp->handle, 3421 roles[lp->roles], "stayed", 3422 (uint32_t) (lp->node_wwn >> 32), 3423 (uint32_t) lp->node_wwn, 3424 (uint32_t) (lp->port_wwn >> 32), 3425 (uint32_t) lp->port_wwn); 3426 } 3427 break; 3428 case ISPASYNC_DEV_GONE: 3429 lp = arg; 3430 /* 3431 * If this has a virtual target and we haven't marked it 3432 * that we're going to have isp_gdt tell the OS it's gone, 3433 * set the isp_gdt timer running on it. 3434 * 3435 * If it isn't marked that isp_gdt is going to get rid of it, 3436 * announce that it's gone. 3437 */ 3438 if (lp->ini_map_idx && lp->reserved == 0) { 3439 lp->reserved = 1; 3440 lp->new_reserved = isp->isp_osinfo.gone_device_time; 3441 lp->state = FC_PORTDB_STATE_ZOMBIE; 3442 if (isp->isp_osinfo.gdt_running == 0) { 3443 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 3444 "starting Gone Device Timer"); 3445 isp->isp_osinfo.gdt = timeout(isp_gdt, isp, hz); 3446 isp->isp_osinfo.gdt_running = 1; 3447 } 3448 tgt = lp->ini_map_idx - 1; 3449 isp_prt(isp, ISP_LOGCONFIG, prom2, 3450 lp->portid, lp->handle, 3451 roles[lp->roles], "gone zombie at", tgt, 3452 (uint32_t) (lp->node_wwn >> 32), 3453 (uint32_t) lp->node_wwn, 3454 (uint32_t) (lp->port_wwn >> 32), 3455 (uint32_t) lp->port_wwn); 3456 } else if (lp->reserved == 0) { 3457 isp_prt(isp, ISP_LOGCONFIG, prom, 3458 lp->portid, lp->handle, 3459 roles[lp->roles], "departed", 3460 (uint32_t) (lp->node_wwn >> 32), 3461 (uint32_t) lp->node_wwn, 3462 (uint32_t) (lp->port_wwn >> 32), 3463 (uint32_t) lp->port_wwn); 3464 } 3465 break; 3466 case ISPASYNC_CHANGE_NOTIFY: 3467 { 3468 char *msg; 3469 if (arg == ISPASYNC_CHANGE_PDB) { 3470 msg = "Port Database Changed"; 3471 } else if (arg == ISPASYNC_CHANGE_SNS) { 3472 msg = "Name Server Database Changed"; 3473 } else { 3474 msg = "Other Change Notify"; 3475 } 3476 /* 3477 * If the loop down timer is running, cancel it. 3478 */ 3479 if (isp->isp_osinfo.ldt_running) { 3480 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 3481 "Stopping Loop Down Timer"); 3482 isp->isp_osinfo.ldt_running = 0; 3483 untimeout(isp_ldt, isp, isp->isp_osinfo.ldt); 3484 callout_handle_init(&isp->isp_osinfo.ldt); 3485 } 3486 isp_prt(isp, ISP_LOGINFO, msg); 3487 isp_freeze_loopdown(isp, msg); 3488 #if __FreeBSD_version < 500000 3489 wakeup(&isp->isp_osinfo.kproc); 3490 #else 3491 #ifdef ISP_SMPLOCK 3492 cv_signal(&isp->isp_osinfo.kthread_cv); 3493 #else 3494 wakeup(&isp->isp_osinfo.kthread_cv); 3495 #endif 3496 #endif 3497 break; 3498 } 3499 #ifdef ISP_TARGET_MODE 3500 case ISPASYNC_TARGET_NOTIFY: 3501 { 3502 tmd_notify_t *nt = arg; 3503 isp_prt(isp, ISP_LOGALL, 3504 "target notify code 0x%x", nt->nt_ncode); 3505 break; 3506 } 3507 case ISPASYNC_TARGET_ACTION: 3508 switch (((isphdr_t *)arg)->rqs_entry_type) { 3509 default: 3510 isp_prt(isp, ISP_LOGWARN, 3511 "event 0x%x for unhandled target action", 3512 ((isphdr_t *)arg)->rqs_entry_type); 3513 break; 3514 case RQSTYPE_NOTIFY: 3515 if (IS_SCSI(isp)) { 3516 rv = isp_handle_platform_notify_scsi(isp, 3517 (in_entry_t *) arg); 3518 } else { 3519 rv = isp_handle_platform_notify_fc(isp, 3520 (in_fcentry_t *) arg); 3521 } 3522 break; 3523 case RQSTYPE_ATIO: 3524 rv = isp_handle_platform_atio(isp, (at_entry_t *) arg); 3525 break; 3526 case RQSTYPE_ATIO2: 3527 rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg); 3528 break; 3529 case RQSTYPE_CTIO3: 3530 case RQSTYPE_CTIO2: 3531 case RQSTYPE_CTIO: 3532 rv = isp_handle_platform_ctio(isp, arg); 3533 break; 3534 case RQSTYPE_ENABLE_LUN: 3535 case RQSTYPE_MODIFY_LUN: 3536 isp_ledone(isp, (lun_entry_t *) arg); 3537 break; 3538 } 3539 break; 3540 #endif 3541 case ISPASYNC_FW_CRASH: 3542 { 3543 uint16_t mbox1, mbox6; 3544 mbox1 = ISP_READ(isp, OUTMAILBOX1); 3545 if (IS_DUALBUS(isp)) { 3546 mbox6 = ISP_READ(isp, OUTMAILBOX6); 3547 } else { 3548 mbox6 = 0; 3549 } 3550 isp_prt(isp, ISP_LOGERR, 3551 "Internal Firmware Error on bus %d @ RISC Address 0x%x", 3552 mbox6, mbox1); 3553 #ifdef ISP_FW_CRASH_DUMP 3554 /* 3555 * XXX: really need a thread to do this right. 3556 */ 3557 if (IS_FC(isp)) { 3558 FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT; 3559 FCPARAM(isp)->isp_loopstate = LOOP_NIL; 3560 isp_freeze_loopdown(isp, "f/w crash"); 3561 isp_fw_dump(isp); 3562 } 3563 isp_reinit(isp); 3564 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL); 3565 #endif 3566 break; 3567 } 3568 case ISPASYNC_UNHANDLED_RESPONSE: 3569 break; 3570 default: 3571 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd); 3572 break; 3573 } 3574 return (rv); 3575 } 3576 3577 3578 /* 3579 * Locks are held before coming here. 3580 */ 3581 void 3582 isp_uninit(ispsoftc_t *isp) 3583 { 3584 if (IS_24XX(isp)) { 3585 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_RESET); 3586 } else { 3587 ISP_WRITE(isp, HCCR, HCCR_CMD_RESET); 3588 } 3589 ISP_DISABLE_INTS(isp); 3590 } 3591 3592 void 3593 isp_prt(ispsoftc_t *isp, int level, const char *fmt, ...) 3594 { 3595 va_list ap; 3596 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { 3597 return; 3598 } 3599 printf("%s: ", device_get_nameunit(isp->isp_dev)); 3600 va_start(ap, fmt); 3601 vprintf(fmt, ap); 3602 va_end(ap); 3603 printf("\n"); 3604 } 3605 3606 uint64_t 3607 isp_nanotime_sub(struct timespec *b, struct timespec *a) 3608 { 3609 uint64_t elapsed; 3610 struct timespec x = *b; 3611 timespecsub(&x, a); 3612 elapsed = GET_NANOSEC(&x); 3613 if (elapsed == 0) 3614 elapsed++; 3615 return (elapsed); 3616 } 3617 3618 int 3619 isp_mbox_acquire(ispsoftc_t *isp) 3620 { 3621 if (isp->isp_osinfo.mboxbsy) { 3622 return (1); 3623 } else { 3624 isp->isp_osinfo.mboxcmd_done = 0; 3625 isp->isp_osinfo.mboxbsy = 1; 3626 return (0); 3627 } 3628 } 3629 3630 void 3631 isp_mbox_wait_complete(ispsoftc_t *isp, mbreg_t *mbp) 3632 { 3633 unsigned int usecs = mbp->timeout; 3634 unsigned int max, olim, ilim; 3635 3636 if (usecs == 0) { 3637 usecs = MBCMD_DEFAULT_TIMEOUT; 3638 } 3639 max = isp->isp_mbxwrk0 + 1; 3640 3641 if (isp->isp_osinfo.mbox_sleep_ok) { 3642 unsigned int ms = (usecs + 999) / 1000; 3643 3644 isp->isp_osinfo.mbox_sleep_ok = 0; 3645 isp->isp_osinfo.mbox_sleeping = 1; 3646 for (olim = 0; olim < max; olim++) { 3647 #if __FreeBSD_version < 500000 || !defined(ISP_SMPLOCK) 3648 tsleep(&isp->isp_mbxworkp, PRIBIO, "ispmbx_sleep", 3649 isp_mstohz(ms)); 3650 #else 3651 msleep(&isp->isp_mbxworkp, &isp->isp_mtx, PRIBIO, 3652 "ispmbx_sleep", isp_mstohz(ms)); 3653 #endif 3654 if (isp->isp_osinfo.mboxcmd_done) { 3655 break; 3656 } 3657 } 3658 isp->isp_osinfo.mbox_sleep_ok = 1; 3659 isp->isp_osinfo.mbox_sleeping = 0; 3660 } else { 3661 for (olim = 0; olim < max; olim++) { 3662 for (ilim = 0; ilim < usecs; ilim += 100) { 3663 uint32_t isr; 3664 uint16_t sema, mbox; 3665 if (isp->isp_osinfo.mboxcmd_done) { 3666 break; 3667 } 3668 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 3669 isp_intr(isp, isr, sema, mbox); 3670 if (isp->isp_osinfo.mboxcmd_done) { 3671 break; 3672 } 3673 } 3674 USEC_DELAY(100); 3675 } 3676 if (isp->isp_osinfo.mboxcmd_done) { 3677 break; 3678 } 3679 } 3680 } 3681 if (isp->isp_osinfo.mboxcmd_done == 0) { 3682 isp_prt(isp, ISP_LOGWARN, 3683 "%s Mailbox Command (0x%x) Timeout (%uus)", 3684 isp->isp_osinfo.mbox_sleep_ok? "Interrupting" : "Polled", 3685 isp->isp_lastmbxcmd, usecs); 3686 mbp->param[0] = MBOX_TIMEOUT; 3687 isp->isp_osinfo.mboxcmd_done = 1; 3688 } 3689 } 3690 3691 void 3692 isp_mbox_notify_done(ispsoftc_t *isp) 3693 { 3694 if (isp->isp_osinfo.mbox_sleeping) { 3695 wakeup(&isp->isp_mbxworkp); 3696 } 3697 isp->isp_osinfo.mboxcmd_done = 1; 3698 } 3699 3700 void 3701 isp_mbox_release(ispsoftc_t *isp) 3702 { 3703 isp->isp_osinfo.mboxbsy = 0; 3704 } 3705 3706 int 3707 isp_mstohz(int ms) 3708 { 3709 int hz; 3710 struct timeval t; 3711 t.tv_sec = ms / 1000; 3712 t.tv_usec = (ms % 1000) * 1000; 3713 hz = tvtohz(&t); 3714 if (hz < 0) { 3715 hz = 0x7fffffff; 3716 } 3717 if (hz == 0) { 3718 hz = 1; 3719 } 3720 return (hz); 3721 } 3722