1 /*- 2 * Copyright (c) 1997-2006 by Matthew Jacob 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /* 28 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters. 29 */ 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 #include <dev/isp/isp_freebsd.h> 33 #include <sys/unistd.h> 34 #include <sys/kthread.h> 35 #include <machine/stdarg.h> /* for use by isp_prt below */ 36 #include <sys/conf.h> 37 #include <sys/module.h> 38 #include <sys/ioccom.h> 39 #include <dev/isp/isp_ioctl.h> 40 #if __FreeBSD_version >= 500000 41 #include <sys/sysctl.h> 42 #else 43 #include <sys/devicestat.h> 44 #endif 45 #include <cam/cam_periph.h> 46 #include <cam/cam_xpt_periph.h> 47 48 #if !defined(CAM_NEW_TRAN_CODE) && __FreeBSD_version >= 700025 49 #define CAM_NEW_TRAN_CODE 1 50 #endif 51 52 53 MODULE_VERSION(isp, 1); 54 MODULE_DEPEND(isp, cam, 1, 1, 1); 55 int isp_announced = 0; 56 int isp_fabric_hysteresis = 5; 57 int isp_loop_down_limit = 300; /* default loop down limit */ 58 int isp_change_is_bad = 0; /* "changed" devices are bad */ 59 int isp_quickboot_time = 15; /* don't wait more than N secs for loop up */ 60 int isp_gone_device_time = 30; /* grace time before reporting device lost */ 61 static const char *roles[4] = { 62 "(none)", "Target", "Initiator", "Target/Initiator" 63 }; 64 static const char prom3[] = 65 "PortID 0x%06x Departed from Target %u because of %s"; 66 67 static void isp_freeze_loopdown(ispsoftc_t *, char *); 68 static d_ioctl_t ispioctl; 69 static void isp_intr_enable(void *); 70 static void isp_cam_async(void *, uint32_t, struct cam_path *, void *); 71 static void isp_poll(struct cam_sim *); 72 static timeout_t isp_watchdog; 73 static timeout_t isp_ldt; 74 static void isp_kthread(void *); 75 static void isp_action(struct cam_sim *, union ccb *); 76 77 #if __FreeBSD_version < 700000 78 ispfwfunc *isp_get_firmware_p = NULL; 79 #endif 80 81 #if __FreeBSD_version < 500000 82 #define ISP_CDEV_MAJOR 248 83 static struct cdevsw isp_cdevsw = { 84 /* open */ nullopen, 85 /* close */ nullclose, 86 /* read */ noread, 87 /* write */ nowrite, 88 /* ioctl */ ispioctl, 89 /* poll */ nopoll, 90 /* mmap */ nommap, 91 /* strategy */ nostrategy, 92 /* name */ "isp", 93 /* maj */ ISP_CDEV_MAJOR, 94 /* dump */ nodump, 95 /* psize */ nopsize, 96 /* flags */ D_TAPE, 97 }; 98 #define isp_sysctl_update(x) do { ; } while (0) 99 #else 100 static struct cdevsw isp_cdevsw = { 101 .d_version = D_VERSION, 102 .d_flags = D_NEEDGIANT, 103 .d_ioctl = ispioctl, 104 .d_name = "isp", 105 }; 106 static void isp_sysctl_update(ispsoftc_t *); 107 #endif 108 109 static ispsoftc_t *isplist = NULL; 110 111 void 112 isp_attach(ispsoftc_t *isp) 113 { 114 int primary, secondary; 115 struct ccb_setasync csa; 116 struct cam_devq *devq; 117 struct cam_sim *sim; 118 struct cam_path *path; 119 120 /* 121 * Establish (in case of 12X0) which bus is the primary. 122 */ 123 124 primary = 0; 125 secondary = 1; 126 127 /* 128 * Create the device queue for our SIM(s). 129 */ 130 devq = cam_simq_alloc(isp->isp_maxcmds); 131 if (devq == NULL) { 132 return; 133 } 134 135 /* 136 * Construct our SIM entry. 137 */ 138 ISPLOCK_2_CAMLOCK(isp); 139 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 140 device_get_unit(isp->isp_dev), &Giant, 1, isp->isp_maxcmds, devq); 141 if (sim == NULL) { 142 cam_simq_free(devq); 143 CAMLOCK_2_ISPLOCK(isp); 144 return; 145 } 146 CAMLOCK_2_ISPLOCK(isp); 147 148 isp->isp_osinfo.ehook.ich_func = isp_intr_enable; 149 isp->isp_osinfo.ehook.ich_arg = isp; 150 ISPLOCK_2_CAMLOCK(isp); 151 if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) { 152 cam_sim_free(sim, TRUE); 153 CAMLOCK_2_ISPLOCK(isp); 154 isp_prt(isp, ISP_LOGERR, 155 "could not establish interrupt enable hook"); 156 return; 157 } 158 159 if (xpt_bus_register(sim, primary) != CAM_SUCCESS) { 160 cam_sim_free(sim, TRUE); 161 CAMLOCK_2_ISPLOCK(isp); 162 return; 163 } 164 165 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 166 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 167 xpt_bus_deregister(cam_sim_path(sim)); 168 cam_sim_free(sim, TRUE); 169 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 170 CAMLOCK_2_ISPLOCK(isp); 171 return; 172 } 173 174 xpt_setup_ccb(&csa.ccb_h, path, 5); 175 csa.ccb_h.func_code = XPT_SASYNC_CB; 176 csa.event_enable = AC_LOST_DEVICE; 177 csa.callback = isp_cam_async; 178 csa.callback_arg = sim; 179 xpt_action((union ccb *)&csa); 180 CAMLOCK_2_ISPLOCK(isp); 181 isp->isp_sim = sim; 182 isp->isp_path = path; 183 /* 184 * Create a kernel thread for fibre channel instances. We 185 * don't have dual channel FC cards. 186 */ 187 if (IS_FC(isp)) { 188 ISPLOCK_2_CAMLOCK(isp); 189 #if __FreeBSD_version >= 500000 190 cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv"); 191 if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc, 192 RFHIGHPID, 0, "%s: fc_thrd", 193 device_get_nameunit(isp->isp_dev))) 194 #else 195 if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc, 196 "%s: fc_thrd", device_get_nameunit(isp->isp_dev))) 197 #endif 198 { 199 xpt_bus_deregister(cam_sim_path(sim)); 200 cam_sim_free(sim, TRUE); 201 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 202 CAMLOCK_2_ISPLOCK(isp); 203 isp_prt(isp, ISP_LOGERR, "could not create kthread"); 204 return; 205 } 206 CAMLOCK_2_ISPLOCK(isp); 207 /* 208 * We start by being "loop down" if we have an initiator role 209 */ 210 if (isp->isp_role & ISP_ROLE_INITIATOR) { 211 isp_freeze_loopdown(isp, "isp_attach"); 212 isp->isp_osinfo.ldt = 213 timeout(isp_ldt, isp, isp_quickboot_time * hz); 214 isp->isp_osinfo.ldt_running = 1; 215 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 216 "Starting Initial Loop Down Timer"); 217 } 218 } 219 220 221 /* 222 * If we have a second channel, construct SIM entry for that. 223 */ 224 if (IS_DUALBUS(isp)) { 225 ISPLOCK_2_CAMLOCK(isp); 226 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 227 device_get_unit(isp->isp_dev), &Giant, 1, 228 isp->isp_maxcmds, devq); 229 if (sim == NULL) { 230 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 231 xpt_free_path(isp->isp_path); 232 cam_simq_free(devq); 233 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 234 return; 235 } 236 if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) { 237 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 238 xpt_free_path(isp->isp_path); 239 cam_sim_free(sim, TRUE); 240 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 241 CAMLOCK_2_ISPLOCK(isp); 242 return; 243 } 244 245 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 246 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 247 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 248 xpt_free_path(isp->isp_path); 249 xpt_bus_deregister(cam_sim_path(sim)); 250 cam_sim_free(sim, TRUE); 251 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 252 CAMLOCK_2_ISPLOCK(isp); 253 return; 254 } 255 256 xpt_setup_ccb(&csa.ccb_h, path, 5); 257 csa.ccb_h.func_code = XPT_SASYNC_CB; 258 csa.event_enable = AC_LOST_DEVICE; 259 csa.callback = isp_cam_async; 260 csa.callback_arg = sim; 261 xpt_action((union ccb *)&csa); 262 CAMLOCK_2_ISPLOCK(isp); 263 isp->isp_sim2 = sim; 264 isp->isp_path2 = path; 265 } 266 267 /* 268 * Create device nodes 269 */ 270 (void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT, 271 GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev)); 272 273 if (isp->isp_role != ISP_ROLE_NONE) { 274 isp->isp_state = ISP_RUNSTATE; 275 ISP_ENABLE_INTS(isp); 276 } 277 if (isplist == NULL) { 278 isplist = isp; 279 } else { 280 ispsoftc_t *tmp = isplist; 281 while (tmp->isp_osinfo.next) { 282 tmp = tmp->isp_osinfo.next; 283 } 284 tmp->isp_osinfo.next = isp; 285 } 286 isp_sysctl_update(isp); 287 } 288 289 static void 290 isp_freeze_loopdown(ispsoftc_t *isp, char *msg) 291 { 292 if (isp->isp_osinfo.simqfrozen == 0) { 293 isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg); 294 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 295 ISPLOCK_2_CAMLOCK(isp); 296 xpt_freeze_simq(isp->isp_sim, 1); 297 CAMLOCK_2_ISPLOCK(isp); 298 } else { 299 isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg); 300 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 301 } 302 } 303 304 305 #if __FreeBSD_version < 500000 306 #define _DEV dev_t 307 #define _IOP struct proc 308 #else 309 #define _IOP struct thread 310 #define _DEV struct cdev * 311 #endif 312 313 static int 314 ispioctl(_DEV dev, u_long c, caddr_t addr, int flags, _IOP *td) 315 { 316 ispsoftc_t *isp; 317 int nr, retval = ENOTTY; 318 319 isp = isplist; 320 while (isp) { 321 if (minor(dev) == device_get_unit(isp->isp_dev)) { 322 break; 323 } 324 isp = isp->isp_osinfo.next; 325 } 326 if (isp == NULL) 327 return (ENXIO); 328 329 switch (c) { 330 #ifdef ISP_FW_CRASH_DUMP 331 case ISP_GET_FW_CRASH_DUMP: 332 if (IS_FC(isp)) { 333 uint16_t *ptr = FCPARAM(isp)->isp_dump_data; 334 size_t sz; 335 336 retval = 0; 337 if (IS_2200(isp)) { 338 sz = QLA2200_RISC_IMAGE_DUMP_SIZE; 339 } else { 340 sz = QLA2300_RISC_IMAGE_DUMP_SIZE; 341 } 342 ISP_LOCK(isp); 343 if (ptr && *ptr) { 344 void *uaddr = *((void **) addr); 345 if (copyout(ptr, uaddr, sz)) { 346 retval = EFAULT; 347 } else { 348 *ptr = 0; 349 } 350 } else { 351 retval = ENXIO; 352 } 353 ISP_UNLOCK(isp); 354 } 355 break; 356 case ISP_FORCE_CRASH_DUMP: 357 if (IS_FC(isp)) { 358 ISP_LOCK(isp); 359 isp_freeze_loopdown(isp, 360 "ispioctl(ISP_FORCE_CRASH_DUMP)"); 361 isp_fw_dump(isp); 362 isp_reinit(isp); 363 ISP_UNLOCK(isp); 364 retval = 0; 365 } 366 break; 367 #endif 368 case ISP_SDBLEV: 369 { 370 int olddblev = isp->isp_dblev; 371 isp->isp_dblev = *(int *)addr; 372 *(int *)addr = olddblev; 373 retval = 0; 374 break; 375 } 376 case ISP_GETROLE: 377 *(int *)addr = isp->isp_role; 378 retval = 0; 379 break; 380 case ISP_SETROLE: 381 nr = *(int *)addr; 382 if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) { 383 retval = EINVAL; 384 break; 385 } 386 /* 387 * XXX: Current 388 */ 389 if (nr == ISP_ROLE_BOTH) { 390 isp_prt(isp, ISP_LOGERR, "dual roles not supported"); 391 retval = EINVAL; 392 break; 393 } 394 *(int *)addr = isp->isp_role; 395 isp->isp_role = nr; 396 /* FALLTHROUGH */ 397 case ISP_RESETHBA: 398 ISP_LOCK(isp); 399 isp_reinit(isp); 400 ISP_UNLOCK(isp); 401 retval = 0; 402 break; 403 case ISP_RESCAN: 404 if (IS_FC(isp)) { 405 ISP_LOCK(isp); 406 if (isp_fc_runstate(isp, 5 * 1000000)) { 407 retval = EIO; 408 } else { 409 retval = 0; 410 } 411 ISP_UNLOCK(isp); 412 } 413 break; 414 case ISP_FC_LIP: 415 if (IS_FC(isp)) { 416 ISP_LOCK(isp); 417 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) { 418 retval = EIO; 419 } else { 420 retval = 0; 421 } 422 ISP_UNLOCK(isp); 423 } 424 break; 425 case ISP_FC_GETDINFO: 426 { 427 struct isp_fc_device *ifc = (struct isp_fc_device *) addr; 428 fcportdb_t *lp; 429 430 if (IS_SCSI(isp)) { 431 break; 432 } 433 if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) { 434 retval = EINVAL; 435 break; 436 } 437 ISP_LOCK(isp); 438 lp = &FCPARAM(isp)->portdb[ifc->loopid]; 439 if (lp->state == FC_PORTDB_STATE_VALID) { 440 ifc->role = lp->roles; 441 ifc->loopid = lp->handle; 442 ifc->portid = lp->portid; 443 ifc->node_wwn = lp->node_wwn; 444 ifc->port_wwn = lp->port_wwn; 445 retval = 0; 446 } else { 447 retval = ENODEV; 448 } 449 ISP_UNLOCK(isp); 450 break; 451 } 452 case ISP_GET_STATS: 453 { 454 isp_stats_t *sp = (isp_stats_t *) addr; 455 456 MEMZERO(sp, sizeof (*sp)); 457 sp->isp_stat_version = ISP_STATS_VERSION; 458 sp->isp_type = isp->isp_type; 459 sp->isp_revision = isp->isp_revision; 460 ISP_LOCK(isp); 461 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt; 462 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus; 463 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc; 464 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync; 465 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt; 466 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt; 467 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater; 468 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater; 469 ISP_UNLOCK(isp); 470 retval = 0; 471 break; 472 } 473 case ISP_CLR_STATS: 474 ISP_LOCK(isp); 475 isp->isp_intcnt = 0; 476 isp->isp_intbogus = 0; 477 isp->isp_intmboxc = 0; 478 isp->isp_intoasync = 0; 479 isp->isp_rsltccmplt = 0; 480 isp->isp_fphccmplt = 0; 481 isp->isp_rscchiwater = 0; 482 isp->isp_fpcchiwater = 0; 483 ISP_UNLOCK(isp); 484 retval = 0; 485 break; 486 case ISP_FC_GETHINFO: 487 { 488 struct isp_hba_device *hba = (struct isp_hba_device *) addr; 489 MEMZERO(hba, sizeof (*hba)); 490 491 hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev); 492 hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev); 493 hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev); 494 if (IS_FC(isp)) { 495 hba->fc_speed = FCPARAM(isp)->isp_gbspeed; 496 hba->fc_scsi_supported = 1; 497 hba->fc_topology = FCPARAM(isp)->isp_topo + 1; 498 hba->fc_loopid = FCPARAM(isp)->isp_loopid; 499 hba->nvram_node_wwn = FCPARAM(isp)->isp_wwnn_nvram; 500 hba->nvram_port_wwn = FCPARAM(isp)->isp_wwpn_nvram; 501 hba->active_node_wwn = ISP_NODEWWN(isp); 502 hba->active_port_wwn = ISP_PORTWWN(isp); 503 } 504 retval = 0; 505 break; 506 } 507 case ISP_GET_FC_PARAM: 508 { 509 struct isp_fc_param *f = (struct isp_fc_param *) addr; 510 511 if (IS_SCSI(isp)) { 512 break; 513 } 514 f->parameter = 0; 515 if (strcmp(f->param_name, "framelength") == 0) { 516 f->parameter = FCPARAM(isp)->isp_maxfrmlen; 517 retval = 0; 518 break; 519 } 520 if (strcmp(f->param_name, "exec_throttle") == 0) { 521 f->parameter = FCPARAM(isp)->isp_execthrottle; 522 retval = 0; 523 break; 524 } 525 if (strcmp(f->param_name, "fullduplex") == 0) { 526 if (FCPARAM(isp)->isp_fwoptions & ICBOPT_FULL_DUPLEX) 527 f->parameter = 1; 528 retval = 0; 529 break; 530 } 531 if (strcmp(f->param_name, "loopid") == 0) { 532 f->parameter = FCPARAM(isp)->isp_loopid; 533 retval = 0; 534 break; 535 } 536 retval = EINVAL; 537 break; 538 } 539 case ISP_SET_FC_PARAM: 540 { 541 struct isp_fc_param *f = (struct isp_fc_param *) addr; 542 uint32_t param = f->parameter; 543 544 if (IS_SCSI(isp)) { 545 break; 546 } 547 f->parameter = 0; 548 if (strcmp(f->param_name, "framelength") == 0) { 549 if (param != 512 && param != 1024 && param != 1024) { 550 retval = EINVAL; 551 break; 552 } 553 FCPARAM(isp)->isp_maxfrmlen = param; 554 retval = 0; 555 break; 556 } 557 if (strcmp(f->param_name, "exec_throttle") == 0) { 558 if (param < 16 || param > 255) { 559 retval = EINVAL; 560 break; 561 } 562 FCPARAM(isp)->isp_execthrottle = param; 563 retval = 0; 564 break; 565 } 566 if (strcmp(f->param_name, "fullduplex") == 0) { 567 if (param != 0 && param != 1) { 568 retval = EINVAL; 569 break; 570 } 571 if (param) { 572 FCPARAM(isp)->isp_fwoptions |= 573 ICBOPT_FULL_DUPLEX; 574 } else { 575 FCPARAM(isp)->isp_fwoptions &= 576 ~ICBOPT_FULL_DUPLEX; 577 } 578 retval = 0; 579 break; 580 } 581 if (strcmp(f->param_name, "loopid") == 0) { 582 if (param < 0 || param > 125) { 583 retval = EINVAL; 584 break; 585 } 586 FCPARAM(isp)->isp_loopid = param; 587 retval = 0; 588 break; 589 } 590 retval = EINVAL; 591 break; 592 } 593 case ISP_TSK_MGMT: 594 { 595 int needmarker; 596 struct isp_fc_tsk_mgmt *fct = (struct isp_fc_tsk_mgmt *) addr; 597 uint16_t loopid; 598 mbreg_t mbs; 599 600 if (IS_SCSI(isp)) { 601 break; 602 } 603 604 memset(&mbs, 0, sizeof (mbs)); 605 needmarker = retval = 0; 606 loopid = fct->loopid; 607 if (FCPARAM(isp)->isp_2klogin == 0) { 608 loopid <<= 8; 609 } 610 switch (fct->action) { 611 case IPT_CLEAR_ACA: 612 mbs.param[0] = MBOX_CLEAR_ACA; 613 mbs.param[1] = loopid; 614 mbs.param[2] = fct->lun; 615 break; 616 case IPT_TARGET_RESET: 617 mbs.param[0] = MBOX_TARGET_RESET; 618 mbs.param[1] = loopid; 619 needmarker = 1; 620 break; 621 case IPT_LUN_RESET: 622 mbs.param[0] = MBOX_LUN_RESET; 623 mbs.param[1] = loopid; 624 mbs.param[2] = fct->lun; 625 needmarker = 1; 626 break; 627 case IPT_CLEAR_TASK_SET: 628 mbs.param[0] = MBOX_CLEAR_TASK_SET; 629 mbs.param[1] = loopid; 630 mbs.param[2] = fct->lun; 631 needmarker = 1; 632 break; 633 case IPT_ABORT_TASK_SET: 634 mbs.param[0] = MBOX_ABORT_TASK_SET; 635 mbs.param[1] = loopid; 636 mbs.param[2] = fct->lun; 637 needmarker = 1; 638 break; 639 default: 640 retval = EINVAL; 641 break; 642 } 643 if (retval == 0) { 644 ISP_LOCK(isp); 645 if (needmarker) { 646 isp->isp_sendmarker |= 1; 647 } 648 retval = isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs); 649 ISP_UNLOCK(isp); 650 if (retval) 651 retval = EIO; 652 } 653 break; 654 } 655 default: 656 break; 657 } 658 return (retval); 659 } 660 661 #if __FreeBSD_version >= 500000 662 static void 663 isp_sysctl_update(ispsoftc_t *isp) 664 { 665 struct sysctl_ctx_list *ctx = 666 device_get_sysctl_ctx(isp->isp_osinfo.dev); 667 struct sysctl_oid *tree = device_get_sysctl_tree(isp->isp_osinfo.dev); 668 669 if (IS_SCSI(isp)) { 670 return; 671 } 672 673 snprintf(isp->isp_osinfo.sysctl_info.fc.wwnn, 674 sizeof (isp->isp_osinfo.sysctl_info.fc.wwnn), "0x%08x%08x", 675 (uint32_t) (ISP_NODEWWN(isp) >> 32), (uint32_t) ISP_NODEWWN(isp)); 676 677 snprintf(isp->isp_osinfo.sysctl_info.fc.wwpn, 678 sizeof (isp->isp_osinfo.sysctl_info.fc.wwpn), "0x%08x%08x", 679 (uint32_t) (ISP_PORTWWN(isp) >> 32), (uint32_t) ISP_PORTWWN(isp)); 680 681 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 682 "wwnn", CTLFLAG_RD, isp->isp_osinfo.sysctl_info.fc.wwnn, 0, 683 "World Wide Node Name"); 684 685 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 686 "wwpn", CTLFLAG_RD, isp->isp_osinfo.sysctl_info.fc.wwpn, 0, 687 "World Wide Port Name"); 688 689 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 690 "loop_down_limit", 691 CTLFLAG_RW, &isp->isp_osinfo.loop_down_limit, 0, 692 "How long to wait for loop to come back up"); 693 694 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 695 "gone_device_time", 696 CTLFLAG_RW, &isp->isp_osinfo.gone_device_time, 0, 697 "How long to wait for a device to reappear"); 698 } 699 #endif 700 701 static void 702 isp_intr_enable(void *arg) 703 { 704 ispsoftc_t *isp = arg; 705 if (isp->isp_role != ISP_ROLE_NONE) { 706 ISP_ENABLE_INTS(isp); 707 } 708 /* Release our hook so that the boot can continue. */ 709 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 710 } 711 712 /* 713 * Put the target mode functions here, because some are inlines 714 */ 715 716 #ifdef ISP_TARGET_MODE 717 718 static __inline int is_lun_enabled(ispsoftc_t *, int, lun_id_t); 719 static __inline int are_any_luns_enabled(ispsoftc_t *, int); 720 static __inline tstate_t *get_lun_statep(ispsoftc_t *, int, lun_id_t); 721 static __inline void rls_lun_statep(ispsoftc_t *, tstate_t *); 722 static __inline atio_private_data_t *isp_get_atpd(ispsoftc_t *, int); 723 static cam_status 724 create_lun_state(ispsoftc_t *, int, struct cam_path *, tstate_t **); 725 static void destroy_lun_state(ispsoftc_t *, tstate_t *); 726 static int isp_en_lun(ispsoftc_t *, union ccb *); 727 static void isp_ledone(ispsoftc_t *, lun_entry_t *); 728 static cam_status isp_abort_tgt_ccb(ispsoftc_t *, union ccb *); 729 static timeout_t isp_refire_putback_atio; 730 static void isp_complete_ctio(union ccb *); 731 static void isp_target_putback_atio(union ccb *); 732 static void isp_target_start_ctio(ispsoftc_t *, union ccb *); 733 static int isp_handle_platform_atio(ispsoftc_t *, at_entry_t *); 734 static int isp_handle_platform_atio2(ispsoftc_t *, at2_entry_t *); 735 static int isp_handle_platform_ctio(ispsoftc_t *, void *); 736 static int isp_handle_platform_notify_scsi(ispsoftc_t *, in_entry_t *); 737 static int isp_handle_platform_notify_fc(ispsoftc_t *, in_fcentry_t *); 738 739 static __inline int 740 is_lun_enabled(ispsoftc_t *isp, int bus, lun_id_t lun) 741 { 742 tstate_t *tptr; 743 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 744 if (tptr == NULL) { 745 return (0); 746 } 747 do { 748 if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) { 749 return (1); 750 } 751 } while ((tptr = tptr->next) != NULL); 752 return (0); 753 } 754 755 static __inline int 756 are_any_luns_enabled(ispsoftc_t *isp, int port) 757 { 758 int lo, hi; 759 if (IS_DUALBUS(isp)) { 760 lo = (port * (LUN_HASH_SIZE >> 1)); 761 hi = lo + (LUN_HASH_SIZE >> 1); 762 } else { 763 lo = 0; 764 hi = LUN_HASH_SIZE; 765 } 766 for (lo = 0; lo < hi; lo++) { 767 if (isp->isp_osinfo.lun_hash[lo]) { 768 return (1); 769 } 770 } 771 return (0); 772 } 773 774 static __inline tstate_t * 775 get_lun_statep(ispsoftc_t *isp, int bus, lun_id_t lun) 776 { 777 tstate_t *tptr = NULL; 778 779 if (lun == CAM_LUN_WILDCARD) { 780 if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) { 781 tptr = &isp->isp_osinfo.tsdflt[bus]; 782 tptr->hold++; 783 return (tptr); 784 } 785 return (NULL); 786 } else { 787 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 788 if (tptr == NULL) { 789 return (NULL); 790 } 791 } 792 793 do { 794 if (tptr->lun == lun && tptr->bus == bus) { 795 tptr->hold++; 796 return (tptr); 797 } 798 } while ((tptr = tptr->next) != NULL); 799 return (tptr); 800 } 801 802 static __inline void 803 rls_lun_statep(ispsoftc_t *isp, tstate_t *tptr) 804 { 805 if (tptr->hold) 806 tptr->hold--; 807 } 808 809 static __inline atio_private_data_t * 810 isp_get_atpd(ispsoftc_t *isp, int tag) 811 { 812 atio_private_data_t *atp; 813 for (atp = isp->isp_osinfo.atpdp; 814 atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) { 815 if (atp->tag == tag) 816 return (atp); 817 } 818 return (NULL); 819 } 820 821 static cam_status 822 create_lun_state(ispsoftc_t *isp, int bus, 823 struct cam_path *path, tstate_t **rslt) 824 { 825 cam_status status; 826 lun_id_t lun; 827 int hfx; 828 tstate_t *tptr, *new; 829 830 lun = xpt_path_lun_id(path); 831 if (lun < 0) { 832 return (CAM_LUN_INVALID); 833 } 834 if (is_lun_enabled(isp, bus, lun)) { 835 return (CAM_LUN_ALRDY_ENA); 836 } 837 new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO); 838 if (new == NULL) { 839 return (CAM_RESRC_UNAVAIL); 840 } 841 842 status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path), 843 xpt_path_target_id(path), xpt_path_lun_id(path)); 844 if (status != CAM_REQ_CMP) { 845 free(new, M_DEVBUF); 846 return (status); 847 } 848 new->bus = bus; 849 new->lun = lun; 850 SLIST_INIT(&new->atios); 851 SLIST_INIT(&new->inots); 852 new->hold = 1; 853 854 hfx = LUN_HASH_FUNC(isp, new->bus, new->lun); 855 tptr = isp->isp_osinfo.lun_hash[hfx]; 856 if (tptr == NULL) { 857 isp->isp_osinfo.lun_hash[hfx] = new; 858 } else { 859 while (tptr->next) 860 tptr = tptr->next; 861 tptr->next = new; 862 } 863 *rslt = new; 864 return (CAM_REQ_CMP); 865 } 866 867 static __inline void 868 destroy_lun_state(ispsoftc_t *isp, tstate_t *tptr) 869 { 870 int hfx; 871 tstate_t *lw, *pw; 872 873 if (tptr->hold) { 874 return; 875 } 876 hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun); 877 pw = isp->isp_osinfo.lun_hash[hfx]; 878 if (pw == NULL) { 879 return; 880 } else if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 881 isp->isp_osinfo.lun_hash[hfx] = pw->next; 882 } else { 883 lw = pw; 884 pw = lw->next; 885 while (pw) { 886 if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 887 lw->next = pw->next; 888 break; 889 } 890 lw = pw; 891 pw = pw->next; 892 } 893 if (pw == NULL) { 894 return; 895 } 896 } 897 free(tptr, M_DEVBUF); 898 } 899 900 /* 901 * Enable luns. 902 */ 903 static int 904 isp_en_lun(ispsoftc_t *isp, union ccb *ccb) 905 { 906 struct ccb_en_lun *cel = &ccb->cel; 907 tstate_t *tptr; 908 uint32_t seq; 909 int bus, cmd, av, wildcard, tm_on; 910 lun_id_t lun; 911 target_id_t tgt; 912 913 bus = XS_CHANNEL(ccb); 914 if (bus > 1) { 915 xpt_print(ccb->ccb_h.path, "illegal bus %d\n", bus); 916 ccb->ccb_h.status = CAM_PATH_INVALID; 917 return (-1); 918 } 919 tgt = ccb->ccb_h.target_id; 920 lun = ccb->ccb_h.target_lun; 921 922 if (isp->isp_dblev & ISP_LOGTDEBUG0) { 923 xpt_print(ccb->ccb_h.path, "%sabling lun 0x%x on channel %d\n", 924 cel->enable? "en" : "dis", lun, bus); 925 } 926 927 if ((lun != CAM_LUN_WILDCARD) && 928 (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) { 929 ccb->ccb_h.status = CAM_LUN_INVALID; 930 return (-1); 931 } 932 933 if (IS_SCSI(isp)) { 934 sdparam *sdp = isp->isp_param; 935 sdp += bus; 936 if (tgt != CAM_TARGET_WILDCARD && 937 tgt != sdp->isp_initiator_id) { 938 ccb->ccb_h.status = CAM_TID_INVALID; 939 return (-1); 940 } 941 } else { 942 /* 943 * There's really no point in doing this yet w/o multi-tid 944 * capability. Even then, it's problematic. 945 */ 946 #if 0 947 if (tgt != CAM_TARGET_WILDCARD && 948 tgt != FCPARAM(isp)->isp_iid) { 949 ccb->ccb_h.status = CAM_TID_INVALID; 950 return (-1); 951 } 952 #endif 953 /* 954 * This is as a good a place as any to check f/w capabilities. 955 */ 956 if (FCPARAM(isp)->isp_tmode == 0) { 957 xpt_print(ccb->ccb_h.path, 958 "firmware does not support target mode\n"); 959 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 960 return (-1); 961 } 962 /* 963 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to 964 * XXX: dork with our already fragile enable/disable code. 965 */ 966 if (FCPARAM(isp)->isp_sccfw == 0) { 967 xpt_print(ccb->ccb_h.path, 968 "firmware not SCCLUN capable\n"); 969 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 970 return (-1); 971 } 972 } 973 974 if (tgt == CAM_TARGET_WILDCARD) { 975 if (lun == CAM_LUN_WILDCARD) { 976 wildcard = 1; 977 } else { 978 ccb->ccb_h.status = CAM_LUN_INVALID; 979 return (-1); 980 } 981 } else { 982 wildcard = 0; 983 } 984 985 tm_on = (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) != 0; 986 987 /* 988 * Next check to see whether this is a target/lun wildcard action. 989 * 990 * If so, we know that we can accept commands for luns that haven't 991 * been enabled yet and send them upstream. Otherwise, we have to 992 * handle them locally (if we see them at all). 993 */ 994 995 if (wildcard) { 996 tptr = &isp->isp_osinfo.tsdflt[bus]; 997 if (cel->enable) { 998 if (tm_on) { 999 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 1000 return (-1); 1001 } 1002 ccb->ccb_h.status = 1003 xpt_create_path(&tptr->owner, NULL, 1004 xpt_path_path_id(ccb->ccb_h.path), 1005 xpt_path_target_id(ccb->ccb_h.path), 1006 xpt_path_lun_id(ccb->ccb_h.path)); 1007 if (ccb->ccb_h.status != CAM_REQ_CMP) { 1008 return (-1); 1009 } 1010 SLIST_INIT(&tptr->atios); 1011 SLIST_INIT(&tptr->inots); 1012 isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED; 1013 } else { 1014 if (tm_on == 0) { 1015 ccb->ccb_h.status = CAM_REQ_CMP; 1016 return (-1); 1017 } 1018 if (tptr->hold) { 1019 ccb->ccb_h.status = CAM_SCSI_BUSY; 1020 return (-1); 1021 } 1022 xpt_free_path(tptr->owner); 1023 isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED; 1024 } 1025 } 1026 1027 /* 1028 * Now check to see whether this bus needs to be 1029 * enabled/disabled with respect to target mode. 1030 */ 1031 av = bus << 31; 1032 if (cel->enable && tm_on == 0) { 1033 av |= ENABLE_TARGET_FLAG; 1034 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 1035 if (av) { 1036 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 1037 if (wildcard) { 1038 isp->isp_osinfo.tmflags[bus] &= 1039 ~TM_WILDCARD_ENABLED; 1040 xpt_free_path(tptr->owner); 1041 } 1042 return (-1); 1043 } 1044 isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED; 1045 xpt_print(ccb->ccb_h.path, "Target Mode Enabled\n"); 1046 } else if (cel->enable == 0 && tm_on && wildcard) { 1047 if (are_any_luns_enabled(isp, bus)) { 1048 ccb->ccb_h.status = CAM_SCSI_BUSY; 1049 return (-1); 1050 } 1051 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 1052 if (av) { 1053 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 1054 return (-1); 1055 } 1056 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; 1057 xpt_print(ccb->ccb_h.path, "Target Mode Disabled\n"); 1058 } 1059 1060 if (wildcard) { 1061 ccb->ccb_h.status = CAM_REQ_CMP; 1062 return (-1); 1063 } 1064 1065 /* 1066 * Find an empty slot 1067 */ 1068 for (seq = 0; seq < NLEACT; seq++) { 1069 if (isp->isp_osinfo.leact[seq] == 0) { 1070 break; 1071 } 1072 } 1073 if (seq >= NLEACT) { 1074 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1075 return (-1); 1076 1077 } 1078 isp->isp_osinfo.leact[seq] = ccb; 1079 1080 if (cel->enable) { 1081 ccb->ccb_h.status = 1082 create_lun_state(isp, bus, ccb->ccb_h.path, &tptr); 1083 if (ccb->ccb_h.status != CAM_REQ_CMP) { 1084 isp->isp_osinfo.leact[seq] = 0; 1085 return (-1); 1086 } 1087 } else { 1088 tptr = get_lun_statep(isp, bus, lun); 1089 if (tptr == NULL) { 1090 ccb->ccb_h.status = CAM_LUN_INVALID; 1091 return (-1); 1092 } 1093 } 1094 1095 if (cel->enable) { 1096 int c, n, ulun = lun; 1097 1098 cmd = RQSTYPE_ENABLE_LUN; 1099 c = DFLT_CMND_CNT; 1100 n = DFLT_INOT_CNT; 1101 if (IS_FC(isp) && lun != 0) { 1102 cmd = RQSTYPE_MODIFY_LUN; 1103 n = 0; 1104 /* 1105 * For SCC firmware, we only deal with setting 1106 * (enabling or modifying) lun 0. 1107 */ 1108 ulun = 0; 1109 } 1110 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) { 1111 rls_lun_statep(isp, tptr); 1112 ccb->ccb_h.status = CAM_REQ_INPROG; 1113 return (seq); 1114 } 1115 } else { 1116 int c, n, ulun = lun; 1117 1118 cmd = -RQSTYPE_MODIFY_LUN; 1119 c = DFLT_CMND_CNT; 1120 n = DFLT_INOT_CNT; 1121 if (IS_FC(isp) && lun != 0) { 1122 n = 0; 1123 /* 1124 * For SCC firmware, we only deal with setting 1125 * (enabling or modifying) lun 0. 1126 */ 1127 ulun = 0; 1128 } 1129 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) { 1130 rls_lun_statep(isp, tptr); 1131 ccb->ccb_h.status = CAM_REQ_INPROG; 1132 return (seq); 1133 } 1134 } 1135 rls_lun_statep(isp, tptr); 1136 xpt_print(ccb->ccb_h.path, "isp_lun_cmd failed\n"); 1137 isp->isp_osinfo.leact[seq] = 0; 1138 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1139 return (-1); 1140 } 1141 1142 static void 1143 isp_ledone(ispsoftc_t *isp, lun_entry_t *lep) 1144 { 1145 const char lfmt[] = "now %sabled for target mode\n"; 1146 union ccb *ccb; 1147 uint32_t seq; 1148 tstate_t *tptr; 1149 int av; 1150 struct ccb_en_lun *cel; 1151 1152 seq = lep->le_reserved - 1; 1153 if (seq >= NLEACT) { 1154 isp_prt(isp, ISP_LOGERR, 1155 "seq out of range (%u) in isp_ledone", seq); 1156 return; 1157 } 1158 ccb = isp->isp_osinfo.leact[seq]; 1159 if (ccb == 0) { 1160 isp_prt(isp, ISP_LOGERR, 1161 "no ccb for seq %u in isp_ledone", seq); 1162 return; 1163 } 1164 cel = &ccb->cel; 1165 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), XS_LUN(ccb)); 1166 if (tptr == NULL) { 1167 xpt_print(ccb->ccb_h.path, "null tptr in isp_ledone\n"); 1168 isp->isp_osinfo.leact[seq] = 0; 1169 return; 1170 } 1171 1172 if (lep->le_status != LUN_OK) { 1173 xpt_print(ccb->ccb_h.path, 1174 "ENABLE/MODIFY LUN returned 0x%x\n", lep->le_status); 1175 err: 1176 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1177 rls_lun_statep(isp, tptr); 1178 isp->isp_osinfo.leact[seq] = 0; 1179 ISPLOCK_2_CAMLOCK(isp); 1180 xpt_done(ccb); 1181 CAMLOCK_2_ISPLOCK(isp); 1182 return; 1183 } else { 1184 isp_prt(isp, ISP_LOGTDEBUG0, 1185 "isp_ledone: ENABLE/MODIFY done okay"); 1186 } 1187 1188 1189 if (cel->enable) { 1190 ccb->ccb_h.status = CAM_REQ_CMP; 1191 xpt_print(ccb->ccb_h.path, lfmt, "en"); 1192 rls_lun_statep(isp, tptr); 1193 isp->isp_osinfo.leact[seq] = 0; 1194 ISPLOCK_2_CAMLOCK(isp); 1195 xpt_done(ccb); 1196 CAMLOCK_2_ISPLOCK(isp); 1197 return; 1198 } 1199 1200 if (lep->le_header.rqs_entry_type == RQSTYPE_MODIFY_LUN) { 1201 if (isp_lun_cmd(isp, -RQSTYPE_ENABLE_LUN, XS_CHANNEL(ccb), 1202 XS_TGT(ccb), XS_LUN(ccb), 0, 0, seq+1)) { 1203 xpt_print(ccb->ccb_h.path, 1204 "isp_ledone: isp_lun_cmd failed\n"); 1205 goto err; 1206 } 1207 rls_lun_statep(isp, tptr); 1208 return; 1209 } 1210 1211 xpt_print(ccb->ccb_h.path, lfmt, "dis"); 1212 rls_lun_statep(isp, tptr); 1213 destroy_lun_state(isp, tptr); 1214 ccb->ccb_h.status = CAM_REQ_CMP; 1215 isp->isp_osinfo.leact[seq] = 0; 1216 ISPLOCK_2_CAMLOCK(isp); 1217 xpt_done(ccb); 1218 CAMLOCK_2_ISPLOCK(isp); 1219 if (are_any_luns_enabled(isp, XS_CHANNEL(ccb)) == 0) { 1220 int bus = XS_CHANNEL(ccb); 1221 av = bus << 31; 1222 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 1223 if (av) { 1224 isp_prt(isp, ISP_LOGWARN, 1225 "disable target mode on channel %d failed", bus); 1226 } 1227 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; 1228 } 1229 } 1230 1231 1232 static cam_status 1233 isp_abort_tgt_ccb(ispsoftc_t *isp, union ccb *ccb) 1234 { 1235 tstate_t *tptr; 1236 struct ccb_hdr_slist *lp; 1237 struct ccb_hdr *curelm; 1238 int found, *ctr; 1239 union ccb *accb = ccb->cab.abort_ccb; 1240 1241 xpt_print(ccb->ccb_h.path, "aborting ccb %p\n", accb); 1242 if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 1243 int badpath = 0; 1244 if (IS_FC(isp) && (accb->ccb_h.target_id != 1245 ((fcparam *) isp->isp_param)->isp_loopid)) { 1246 badpath = 1; 1247 } else if (IS_SCSI(isp) && (accb->ccb_h.target_id != 1248 ((sdparam *) isp->isp_param)->isp_initiator_id)) { 1249 badpath = 1; 1250 } 1251 if (badpath) { 1252 /* 1253 * Being restrictive about target ids is really about 1254 * making sure we're aborting for the right multi-tid 1255 * path. This doesn't really make much sense at present. 1256 */ 1257 #if 0 1258 return (CAM_PATH_INVALID); 1259 #endif 1260 } 1261 } 1262 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun); 1263 if (tptr == NULL) { 1264 xpt_print(ccb->ccb_h.path, "can't get statep\n"); 1265 return (CAM_PATH_INVALID); 1266 } 1267 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 1268 lp = &tptr->atios; 1269 ctr = &tptr->atio_count; 1270 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 1271 lp = &tptr->inots; 1272 ctr = &tptr->inot_count; 1273 } else { 1274 rls_lun_statep(isp, tptr); 1275 xpt_print(ccb->ccb_h.path, "bad function code %d\n", 1276 accb->ccb_h.func_code); 1277 return (CAM_UA_ABORT); 1278 } 1279 curelm = SLIST_FIRST(lp); 1280 found = 0; 1281 if (curelm == &accb->ccb_h) { 1282 found = 1; 1283 SLIST_REMOVE_HEAD(lp, sim_links.sle); 1284 } else { 1285 while(curelm != NULL) { 1286 struct ccb_hdr *nextelm; 1287 1288 nextelm = SLIST_NEXT(curelm, sim_links.sle); 1289 if (nextelm == &accb->ccb_h) { 1290 found = 1; 1291 SLIST_NEXT(curelm, sim_links.sle) = 1292 SLIST_NEXT(nextelm, sim_links.sle); 1293 break; 1294 } 1295 curelm = nextelm; 1296 } 1297 } 1298 rls_lun_statep(isp, tptr); 1299 if (found) { 1300 (*ctr)--; 1301 accb->ccb_h.status = CAM_REQ_ABORTED; 1302 xpt_done(accb); 1303 return (CAM_REQ_CMP); 1304 } 1305 xpt_print(ccb->ccb_h.path, "ccb %p not found\n", accb); 1306 return (CAM_PATH_INVALID); 1307 } 1308 1309 static void 1310 isp_target_start_ctio(ispsoftc_t *isp, union ccb *ccb) 1311 { 1312 void *qe; 1313 struct ccb_scsiio *cso = &ccb->csio; 1314 uint32_t nxti, optr, handle; 1315 uint8_t local[QENTRY_LEN]; 1316 1317 1318 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 1319 xpt_print(ccb->ccb_h.path, 1320 "Request Queue Overflow in isp_target_start_ctio\n"); 1321 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1322 goto out; 1323 } 1324 memset(local, 0, QENTRY_LEN); 1325 1326 /* 1327 * We're either moving data or completing a command here. 1328 */ 1329 1330 if (IS_FC(isp)) { 1331 atio_private_data_t *atp; 1332 ct2_entry_t *cto = (ct2_entry_t *) local; 1333 1334 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; 1335 cto->ct_header.rqs_entry_count = 1; 1336 if (FCPARAM(isp)->isp_2klogin) { 1337 ((ct2e_entry_t *)cto)->ct_iid = cso->init_id; 1338 } else { 1339 cto->ct_iid = cso->init_id; 1340 if (FCPARAM(isp)->isp_sccfw == 0) { 1341 cto->ct_lun = ccb->ccb_h.target_lun; 1342 } 1343 } 1344 1345 atp = isp_get_atpd(isp, cso->tag_id); 1346 if (atp == NULL) { 1347 xpt_print(ccb->ccb_h.path, 1348 "cannot find private data adjunct for tag %x\n", 1349 cso->tag_id); 1350 XS_SETERR(ccb, CAM_REQ_CMP_ERR); 1351 goto out; 1352 } 1353 1354 cto->ct_rxid = cso->tag_id; 1355 if (cso->dxfer_len == 0) { 1356 cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA; 1357 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1358 cto->ct_flags |= CT2_SENDSTATUS; 1359 cto->rsp.m1.ct_scsi_status = cso->scsi_status; 1360 cto->ct_resid = 1361 atp->orig_datalen - atp->bytes_xfered; 1362 if (cto->ct_resid < 0) { 1363 cto->rsp.m1.ct_scsi_status |= 1364 CT2_DATA_OVER; 1365 } else if (cto->ct_resid > 0) { 1366 cto->rsp.m1.ct_scsi_status |= 1367 CT2_DATA_UNDER; 1368 } 1369 } 1370 if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) { 1371 int m = min(cso->sense_len, MAXRESPLEN); 1372 memcpy(cto->rsp.m1.ct_resp, 1373 &cso->sense_data, m); 1374 cto->rsp.m1.ct_senselen = m; 1375 cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID; 1376 } 1377 } else { 1378 cto->ct_flags |= CT2_FLAG_MODE0; 1379 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1380 cto->ct_flags |= CT2_DATA_IN; 1381 } else { 1382 cto->ct_flags |= CT2_DATA_OUT; 1383 } 1384 cto->ct_reloff = atp->bytes_xfered; 1385 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 1386 cto->ct_flags |= CT2_SENDSTATUS; 1387 cto->rsp.m0.ct_scsi_status = cso->scsi_status; 1388 cto->ct_resid = 1389 atp->orig_datalen - 1390 (atp->bytes_xfered + cso->dxfer_len); 1391 if (cto->ct_resid < 0) { 1392 cto->rsp.m0.ct_scsi_status |= 1393 CT2_DATA_OVER; 1394 } else if (cto->ct_resid > 0) { 1395 cto->rsp.m0.ct_scsi_status |= 1396 CT2_DATA_UNDER; 1397 } 1398 } else { 1399 atp->last_xframt = cso->dxfer_len; 1400 } 1401 /* 1402 * If we're sending data and status back together, 1403 * we can't also send back sense data as well. 1404 */ 1405 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1406 } 1407 1408 if (cto->ct_flags & CT2_SENDSTATUS) { 1409 isp_prt(isp, ISP_LOGTDEBUG0, 1410 "CTIO2[%x] STATUS %x origd %u curd %u resid %u", 1411 cto->ct_rxid, cso->scsi_status, atp->orig_datalen, 1412 cso->dxfer_len, cto->ct_resid); 1413 cto->ct_flags |= CT2_CCINCR; 1414 atp->state = ATPD_STATE_LAST_CTIO; 1415 } else { 1416 atp->state = ATPD_STATE_CTIO; 1417 } 1418 cto->ct_timeout = 10; 1419 } else { 1420 ct_entry_t *cto = (ct_entry_t *) local; 1421 1422 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1423 cto->ct_header.rqs_entry_count = 1; 1424 cto->ct_iid = cso->init_id; 1425 cto->ct_iid |= XS_CHANNEL(ccb) << 7; 1426 cto->ct_tgt = ccb->ccb_h.target_id; 1427 cto->ct_lun = ccb->ccb_h.target_lun; 1428 cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id); 1429 if (AT_HAS_TAG(cso->tag_id)) { 1430 cto->ct_tag_val = (uint8_t) AT_GET_TAG(cso->tag_id); 1431 cto->ct_flags |= CT_TQAE; 1432 } 1433 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 1434 cto->ct_flags |= CT_NODISC; 1435 } 1436 if (cso->dxfer_len == 0) { 1437 cto->ct_flags |= CT_NO_DATA; 1438 } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1439 cto->ct_flags |= CT_DATA_IN; 1440 } else { 1441 cto->ct_flags |= CT_DATA_OUT; 1442 } 1443 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1444 cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR; 1445 cto->ct_scsi_status = cso->scsi_status; 1446 cto->ct_resid = cso->resid; 1447 isp_prt(isp, ISP_LOGTDEBUG0, 1448 "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x", 1449 cto->ct_fwhandle, cso->scsi_status, cso->resid, 1450 cso->tag_id); 1451 } 1452 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1453 cto->ct_timeout = 10; 1454 } 1455 1456 if (isp_save_xs_tgt(isp, ccb, &handle)) { 1457 xpt_print(ccb->ccb_h.path, 1458 "No XFLIST pointers for isp_target_start_ctio\n"); 1459 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1460 goto out; 1461 } 1462 1463 1464 /* 1465 * Call the dma setup routines for this entry (and any subsequent 1466 * CTIOs) if there's data to move, and then tell the f/w it's got 1467 * new things to play with. As with isp_start's usage of DMA setup, 1468 * any swizzling is done in the machine dependent layer. Because 1469 * of this, we put the request onto the queue area first in native 1470 * format. 1471 */ 1472 1473 if (IS_FC(isp)) { 1474 ct2_entry_t *cto = (ct2_entry_t *) local; 1475 cto->ct_syshandle = handle; 1476 } else { 1477 ct_entry_t *cto = (ct_entry_t *) local; 1478 cto->ct_syshandle = handle; 1479 } 1480 1481 switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) { 1482 case CMD_QUEUED: 1483 ISP_ADD_REQUEST(isp, nxti); 1484 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1485 return; 1486 1487 case CMD_EAGAIN: 1488 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1489 break; 1490 1491 default: 1492 break; 1493 } 1494 isp_destroy_tgt_handle(isp, handle); 1495 1496 out: 1497 ISPLOCK_2_CAMLOCK(isp); 1498 xpt_done(ccb); 1499 CAMLOCK_2_ISPLOCK(isp); 1500 } 1501 1502 static void 1503 isp_refire_putback_atio(void *arg) 1504 { 1505 int s = splcam(); 1506 isp_target_putback_atio(arg); 1507 splx(s); 1508 } 1509 1510 static void 1511 isp_target_putback_atio(union ccb *ccb) 1512 { 1513 ispsoftc_t *isp; 1514 struct ccb_scsiio *cso; 1515 uint32_t nxti, optr; 1516 void *qe; 1517 1518 isp = XS_ISP(ccb); 1519 1520 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 1521 xpt_print(ccb->ccb_h.path, 1522 "isp_target_putback_atio: Request Queue Overflow\n"); 1523 (void) timeout(isp_refire_putback_atio, ccb, 10); 1524 return; 1525 } 1526 memset(qe, 0, QENTRY_LEN); 1527 cso = &ccb->csio; 1528 if (IS_FC(isp)) { 1529 at2_entry_t local, *at = &local; 1530 MEMZERO(at, sizeof (at2_entry_t)); 1531 at->at_header.rqs_entry_type = RQSTYPE_ATIO2; 1532 at->at_header.rqs_entry_count = 1; 1533 if (FCPARAM(isp)->isp_sccfw) { 1534 at->at_scclun = (uint16_t) ccb->ccb_h.target_lun; 1535 } else { 1536 at->at_lun = (uint8_t) ccb->ccb_h.target_lun; 1537 } 1538 at->at_status = CT_OK; 1539 at->at_rxid = cso->tag_id; 1540 at->at_iid = cso->ccb_h.target_id; 1541 isp_put_atio2(isp, at, qe); 1542 } else { 1543 at_entry_t local, *at = &local; 1544 MEMZERO(at, sizeof (at_entry_t)); 1545 at->at_header.rqs_entry_type = RQSTYPE_ATIO; 1546 at->at_header.rqs_entry_count = 1; 1547 at->at_iid = cso->init_id; 1548 at->at_iid |= XS_CHANNEL(ccb) << 7; 1549 at->at_tgt = cso->ccb_h.target_id; 1550 at->at_lun = cso->ccb_h.target_lun; 1551 at->at_status = CT_OK; 1552 at->at_tag_val = AT_GET_TAG(cso->tag_id); 1553 at->at_handle = AT_GET_HANDLE(cso->tag_id); 1554 isp_put_atio(isp, at, qe); 1555 } 1556 ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe); 1557 ISP_ADD_REQUEST(isp, nxti); 1558 isp_complete_ctio(ccb); 1559 } 1560 1561 static void 1562 isp_complete_ctio(union ccb *ccb) 1563 { 1564 ISPLOCK_2_CAMLOCK(isp); 1565 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1566 ccb->ccb_h.status |= CAM_REQ_CMP; 1567 } 1568 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1569 xpt_done(ccb); 1570 CAMLOCK_2_ISPLOCK(isp); 1571 } 1572 1573 /* 1574 * Handle ATIO stuff that the generic code can't. 1575 * This means handling CDBs. 1576 */ 1577 1578 static int 1579 isp_handle_platform_atio(ispsoftc_t *isp, at_entry_t *aep) 1580 { 1581 tstate_t *tptr; 1582 int status, bus, iswildcard; 1583 struct ccb_accept_tio *atiop; 1584 1585 /* 1586 * The firmware status (except for the QLTM_SVALID bit) 1587 * indicates why this ATIO was sent to us. 1588 * 1589 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1590 * 1591 * If the DISCONNECTS DISABLED bit is set in the flags field, 1592 * we're still connected on the SCSI bus. 1593 */ 1594 status = aep->at_status; 1595 if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) { 1596 /* 1597 * Bus Phase Sequence error. We should have sense data 1598 * suggested by the f/w. I'm not sure quite yet what 1599 * to do about this for CAM. 1600 */ 1601 isp_prt(isp, ISP_LOGWARN, "PHASE ERROR"); 1602 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1603 return (0); 1604 } 1605 if ((status & ~QLTM_SVALID) != AT_CDB) { 1606 isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform", 1607 status); 1608 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1609 return (0); 1610 } 1611 1612 bus = GET_BUS_VAL(aep->at_iid); 1613 tptr = get_lun_statep(isp, bus, aep->at_lun); 1614 if (tptr == NULL) { 1615 tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD); 1616 if (tptr == NULL) { 1617 /* 1618 * Because we can't autofeed sense data back with 1619 * a command for parallel SCSI, we can't give back 1620 * a CHECK CONDITION. We'll give back a BUSY status 1621 * instead. This works out okay because the only 1622 * time we should, in fact, get this, is in the 1623 * case that somebody configured us without the 1624 * blackhole driver, so they get what they deserve. 1625 */ 1626 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1627 return (0); 1628 } 1629 iswildcard = 1; 1630 } else { 1631 iswildcard = 0; 1632 } 1633 1634 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1635 if (atiop == NULL) { 1636 /* 1637 * Because we can't autofeed sense data back with 1638 * a command for parallel SCSI, we can't give back 1639 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1640 * instead. This works out okay because the only time we 1641 * should, in fact, get this, is in the case that we've 1642 * run out of ATIOS. 1643 */ 1644 xpt_print(tptr->owner, 1645 "no ATIOS for lun %d from initiator %d on channel %d\n", 1646 aep->at_lun, GET_IID_VAL(aep->at_iid), bus); 1647 if (aep->at_flags & AT_TQAE) 1648 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1649 else 1650 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1651 rls_lun_statep(isp, tptr); 1652 return (0); 1653 } 1654 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1655 tptr->atio_count--; 1656 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d", 1657 aep->at_lun, tptr->atio_count); 1658 if (iswildcard) { 1659 atiop->ccb_h.target_id = aep->at_tgt; 1660 atiop->ccb_h.target_lun = aep->at_lun; 1661 } 1662 if (aep->at_flags & AT_NODISC) { 1663 atiop->ccb_h.flags = CAM_DIS_DISCONNECT; 1664 } else { 1665 atiop->ccb_h.flags = 0; 1666 } 1667 1668 if (status & QLTM_SVALID) { 1669 size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data)); 1670 atiop->sense_len = amt; 1671 MEMCPY(&atiop->sense_data, aep->at_sense, amt); 1672 } else { 1673 atiop->sense_len = 0; 1674 } 1675 1676 atiop->init_id = GET_IID_VAL(aep->at_iid); 1677 atiop->cdb_len = aep->at_cdblen; 1678 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen); 1679 atiop->ccb_h.status = CAM_CDB_RECVD; 1680 /* 1681 * Construct a tag 'id' based upon tag value (which may be 0..255) 1682 * and the handle (which we have to preserve). 1683 */ 1684 AT_MAKE_TAGID(atiop->tag_id, bus, device_get_unit(isp->isp_dev), aep); 1685 if (aep->at_flags & AT_TQAE) { 1686 atiop->tag_action = aep->at_tag_type; 1687 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; 1688 } 1689 xpt_done((union ccb*)atiop); 1690 isp_prt(isp, ISP_LOGTDEBUG0, 1691 "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s", 1692 aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid), 1693 GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff, 1694 aep->at_tag_type, (aep->at_flags & AT_NODISC)? 1695 "nondisc" : "disconnecting"); 1696 rls_lun_statep(isp, tptr); 1697 return (0); 1698 } 1699 1700 static int 1701 isp_handle_platform_atio2(ispsoftc_t *isp, at2_entry_t *aep) 1702 { 1703 lun_id_t lun; 1704 tstate_t *tptr; 1705 struct ccb_accept_tio *atiop; 1706 atio_private_data_t *atp; 1707 1708 /* 1709 * The firmware status (except for the QLTM_SVALID bit) 1710 * indicates why this ATIO was sent to us. 1711 * 1712 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1713 */ 1714 if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) { 1715 isp_prt(isp, ISP_LOGWARN, 1716 "bogus atio (0x%x) leaked to platform", aep->at_status); 1717 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1718 return (0); 1719 } 1720 1721 if (FCPARAM(isp)->isp_sccfw) { 1722 lun = aep->at_scclun; 1723 } else { 1724 lun = aep->at_lun; 1725 } 1726 tptr = get_lun_statep(isp, 0, lun); 1727 if (tptr == NULL) { 1728 isp_prt(isp, ISP_LOGTDEBUG0, 1729 "[0x%x] no state pointer for lun %d", aep->at_rxid, lun); 1730 tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD); 1731 if (tptr == NULL) { 1732 isp_endcmd(isp, aep, 1733 SCSI_STATUS_CHECK_COND | ECMD_SVALID | 1734 (0x5 << 12) | (0x25 << 16), 0); 1735 return (0); 1736 } 1737 } 1738 1739 atp = isp_get_atpd(isp, 0); 1740 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1741 if (atiop == NULL || atp == NULL) { 1742 1743 /* 1744 * Because we can't autofeed sense data back with 1745 * a command for parallel SCSI, we can't give back 1746 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1747 * instead. This works out okay because the only time we 1748 * should, in fact, get this, is in the case that we've 1749 * run out of ATIOS. 1750 */ 1751 xpt_print(tptr->owner, 1752 "no %s for lun %d from initiator %d\n", 1753 (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" : 1754 ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid); 1755 rls_lun_statep(isp, tptr); 1756 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1757 return (0); 1758 } 1759 atp->state = ATPD_STATE_ATIO; 1760 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1761 tptr->atio_count--; 1762 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d", 1763 lun, tptr->atio_count); 1764 1765 if (tptr == &isp->isp_osinfo.tsdflt[0]) { 1766 atiop->ccb_h.target_id = FCPARAM(isp)->isp_loopid; 1767 atiop->ccb_h.target_lun = lun; 1768 } 1769 /* 1770 * We don't get 'suggested' sense data as we do with SCSI cards. 1771 */ 1772 atiop->sense_len = 0; 1773 1774 atiop->init_id = aep->at_iid; 1775 atiop->cdb_len = ATIO2_CDBLEN; 1776 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN); 1777 atiop->ccb_h.status = CAM_CDB_RECVD; 1778 atiop->tag_id = aep->at_rxid; 1779 switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) { 1780 case ATIO2_TC_ATTR_SIMPLEQ: 1781 atiop->tag_action = MSG_SIMPLE_Q_TAG; 1782 break; 1783 case ATIO2_TC_ATTR_HEADOFQ: 1784 atiop->tag_action = MSG_HEAD_OF_Q_TAG; 1785 break; 1786 case ATIO2_TC_ATTR_ORDERED: 1787 atiop->tag_action = MSG_ORDERED_Q_TAG; 1788 break; 1789 case ATIO2_TC_ATTR_ACAQ: /* ?? */ 1790 case ATIO2_TC_ATTR_UNTAGGED: 1791 default: 1792 atiop->tag_action = 0; 1793 break; 1794 } 1795 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; 1796 1797 atp->tag = atiop->tag_id; 1798 atp->lun = lun; 1799 atp->orig_datalen = aep->at_datalen; 1800 atp->last_xframt = 0; 1801 atp->bytes_xfered = 0; 1802 atp->state = ATPD_STATE_CAM; 1803 ISPLOCK_2_CAMLOCK(siP); 1804 xpt_done((union ccb*)atiop); 1805 1806 isp_prt(isp, ISP_LOGTDEBUG0, 1807 "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u", 1808 aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid, 1809 lun, aep->at_taskflags, aep->at_datalen); 1810 rls_lun_statep(isp, tptr); 1811 return (0); 1812 } 1813 1814 static int 1815 isp_handle_platform_ctio(ispsoftc_t *isp, void *arg) 1816 { 1817 union ccb *ccb; 1818 int sentstatus, ok, notify_cam, resid = 0; 1819 uint16_t tval; 1820 1821 /* 1822 * CTIO and CTIO2 are close enough.... 1823 */ 1824 1825 ccb = isp_find_xs_tgt(isp, ((ct_entry_t *)arg)->ct_syshandle); 1826 KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio")); 1827 isp_destroy_tgt_handle(isp, ((ct_entry_t *)arg)->ct_syshandle); 1828 1829 if (IS_FC(isp)) { 1830 ct2_entry_t *ct = arg; 1831 atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid); 1832 if (atp == NULL) { 1833 isp_prt(isp, ISP_LOGERR, 1834 "cannot find adjunct for %x after I/O", 1835 ct->ct_rxid); 1836 return (0); 1837 } 1838 sentstatus = ct->ct_flags & CT2_SENDSTATUS; 1839 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1840 if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) { 1841 ccb->ccb_h.status |= CAM_SENT_SENSE; 1842 } 1843 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1844 if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) { 1845 resid = ct->ct_resid; 1846 atp->bytes_xfered += (atp->last_xframt - resid); 1847 atp->last_xframt = 0; 1848 } 1849 if (sentstatus || !ok) { 1850 atp->tag = 0; 1851 } 1852 isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, 1853 "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s", 1854 ct->ct_rxid, ct->ct_status, ct->ct_flags, 1855 (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, 1856 resid, sentstatus? "FIN" : "MID"); 1857 tval = ct->ct_rxid; 1858 1859 /* XXX: should really come after isp_complete_ctio */ 1860 atp->state = ATPD_STATE_PDON; 1861 } else { 1862 ct_entry_t *ct = arg; 1863 sentstatus = ct->ct_flags & CT_SENDSTATUS; 1864 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1865 /* 1866 * We *ought* to be able to get back to the original ATIO 1867 * here, but for some reason this gets lost. It's just as 1868 * well because it's squirrelled away as part of periph 1869 * private data. 1870 * 1871 * We can live without it as long as we continue to use 1872 * the auto-replenish feature for CTIOs. 1873 */ 1874 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1875 if (ct->ct_status & QLTM_SVALID) { 1876 char *sp = (char *)ct; 1877 sp += CTIO_SENSE_OFFSET; 1878 ccb->csio.sense_len = 1879 min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN); 1880 MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len); 1881 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1882 } 1883 if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) { 1884 resid = ct->ct_resid; 1885 } 1886 isp_prt(isp, ISP_LOGTDEBUG0, 1887 "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s", 1888 ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun, 1889 ct->ct_status, ct->ct_flags, resid, 1890 sentstatus? "FIN" : "MID"); 1891 tval = ct->ct_fwhandle; 1892 } 1893 ccb->csio.resid += resid; 1894 1895 /* 1896 * We're here either because intermediate data transfers are done 1897 * and/or the final status CTIO (which may have joined with a 1898 * Data Transfer) is done. 1899 * 1900 * In any case, for this platform, the upper layers figure out 1901 * what to do next, so all we do here is collect status and 1902 * pass information along. Any DMA handles have already been 1903 * freed. 1904 */ 1905 if (notify_cam == 0) { 1906 isp_prt(isp, ISP_LOGTDEBUG0, " INTER CTIO[0x%x] done", tval); 1907 return (0); 1908 } 1909 1910 isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done", 1911 (sentstatus)? " FINAL " : "MIDTERM ", tval); 1912 1913 if (!ok) { 1914 isp_target_putback_atio(ccb); 1915 } else { 1916 isp_complete_ctio(ccb); 1917 1918 } 1919 return (0); 1920 } 1921 1922 static int 1923 isp_handle_platform_notify_scsi(ispsoftc_t *isp, in_entry_t *inp) 1924 { 1925 return (0); /* XXXX */ 1926 } 1927 1928 static int 1929 isp_handle_platform_notify_fc(ispsoftc_t *isp, in_fcentry_t *inp) 1930 { 1931 1932 switch (inp->in_status) { 1933 case IN_PORT_LOGOUT: 1934 isp_prt(isp, ISP_LOGWARN, "port logout of iid %d", 1935 inp->in_iid); 1936 break; 1937 case IN_PORT_CHANGED: 1938 isp_prt(isp, ISP_LOGWARN, "port changed for iid %d", 1939 inp->in_iid); 1940 break; 1941 case IN_GLOBAL_LOGO: 1942 isp_prt(isp, ISP_LOGINFO, "all ports logged out"); 1943 break; 1944 case IN_ABORT_TASK: 1945 { 1946 atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid); 1947 struct ccb_immed_notify *inot = NULL; 1948 1949 if (atp) { 1950 tstate_t *tptr = get_lun_statep(isp, 0, atp->lun); 1951 if (tptr) { 1952 inot = (struct ccb_immed_notify *) 1953 SLIST_FIRST(&tptr->inots); 1954 if (inot) { 1955 tptr->inot_count--; 1956 SLIST_REMOVE_HEAD(&tptr->inots, 1957 sim_links.sle); 1958 isp_prt(isp, ISP_LOGTDEBUG0, 1959 "Take FREE INOT count now %d", 1960 tptr->inot_count); 1961 } 1962 } 1963 isp_prt(isp, ISP_LOGWARN, 1964 "abort task RX_ID %x IID %d state %d", 1965 inp->in_seqid, inp->in_iid, atp->state); 1966 } else { 1967 isp_prt(isp, ISP_LOGWARN, 1968 "abort task RX_ID %x from iid %d, state unknown", 1969 inp->in_seqid, inp->in_iid); 1970 } 1971 if (inot) { 1972 inot->initiator_id = inp->in_iid; 1973 inot->sense_len = 0; 1974 inot->message_args[0] = MSG_ABORT_TAG; 1975 inot->message_args[1] = inp->in_seqid & 0xff; 1976 inot->message_args[2] = (inp->in_seqid >> 8) & 0xff; 1977 inot->ccb_h.status = CAM_MESSAGE_RECV; 1978 xpt_done((union ccb *)inot); 1979 } 1980 break; 1981 } 1982 default: 1983 break; 1984 } 1985 return (0); 1986 } 1987 #endif 1988 1989 static void 1990 isp_cam_async(void *cbarg, uint32_t code, struct cam_path *path, void *arg) 1991 { 1992 struct cam_sim *sim; 1993 ispsoftc_t *isp; 1994 1995 sim = (struct cam_sim *)cbarg; 1996 isp = (ispsoftc_t *) cam_sim_softc(sim); 1997 switch (code) { 1998 case AC_LOST_DEVICE: 1999 if (IS_SCSI(isp)) { 2000 uint16_t oflags, nflags; 2001 sdparam *sdp = isp->isp_param; 2002 int tgt; 2003 2004 tgt = xpt_path_target_id(path); 2005 if (tgt >= 0) { 2006 sdp += cam_sim_bus(sim); 2007 ISP_LOCK(isp); 2008 nflags = sdp->isp_devparam[tgt].nvrm_flags; 2009 #ifndef ISP_TARGET_MODE 2010 nflags &= DPARM_SAFE_DFLT; 2011 if (isp->isp_loaded_fw) { 2012 nflags |= DPARM_NARROW | DPARM_ASYNC; 2013 } 2014 #else 2015 nflags = DPARM_DEFAULT; 2016 #endif 2017 oflags = sdp->isp_devparam[tgt].goal_flags; 2018 sdp->isp_devparam[tgt].goal_flags = nflags; 2019 sdp->isp_devparam[tgt].dev_update = 1; 2020 isp->isp_update |= (1 << cam_sim_bus(sim)); 2021 (void) isp_control(isp, 2022 ISPCTL_UPDATE_PARAMS, NULL); 2023 sdp->isp_devparam[tgt].goal_flags = oflags; 2024 ISP_UNLOCK(isp); 2025 } 2026 } 2027 break; 2028 default: 2029 isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code); 2030 break; 2031 } 2032 } 2033 2034 static void 2035 isp_poll(struct cam_sim *sim) 2036 { 2037 ispsoftc_t *isp = cam_sim_softc(sim); 2038 uint32_t isr; 2039 uint16_t sema, mbox; 2040 2041 ISP_LOCK(isp); 2042 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 2043 isp_intr(isp, isr, sema, mbox); 2044 } 2045 ISP_UNLOCK(isp); 2046 } 2047 2048 2049 static int isp_watchdog_work(ispsoftc_t *, XS_T *); 2050 2051 static int 2052 isp_watchdog_work(ispsoftc_t *isp, XS_T *xs) 2053 { 2054 uint32_t handle; 2055 2056 /* 2057 * We've decided this command is dead. Make sure we're not trying 2058 * to kill a command that's already dead by getting it's handle and 2059 * and seeing whether it's still alive. 2060 */ 2061 ISP_LOCK(isp); 2062 handle = isp_find_handle(isp, xs); 2063 if (handle) { 2064 uint32_t isr; 2065 uint16_t sema, mbox; 2066 2067 if (XS_CMD_DONE_P(xs)) { 2068 isp_prt(isp, ISP_LOGDEBUG1, 2069 "watchdog found done cmd (handle 0x%x)", handle); 2070 ISP_UNLOCK(isp); 2071 return (1);; 2072 } 2073 2074 if (XS_CMD_WDOG_P(xs)) { 2075 isp_prt(isp, ISP_LOGDEBUG2, 2076 "recursive watchdog (handle 0x%x)", handle); 2077 ISP_UNLOCK(isp); 2078 return (1); 2079 } 2080 2081 XS_CMD_S_WDOG(xs); 2082 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 2083 isp_intr(isp, isr, sema, mbox); 2084 } 2085 if (XS_CMD_DONE_P(xs)) { 2086 isp_prt(isp, ISP_LOGDEBUG2, 2087 "watchdog cleanup for handle 0x%x", handle); 2088 ISPLOCK_2_CAMLOCK(isp); 2089 xpt_done((union ccb *) xs); 2090 CAMLOCK_2_ISPLOCK(isp); 2091 } else if (XS_CMD_GRACE_P(xs)) { 2092 /* 2093 * Make sure the command is *really* dead before we 2094 * release the handle (and DMA resources) for reuse. 2095 */ 2096 (void) isp_control(isp, ISPCTL_ABORT_CMD, xs); 2097 2098 /* 2099 * After this point, the comamnd is really dead. 2100 */ 2101 if (XS_XFRLEN(xs)) { 2102 ISP_DMAFREE(isp, xs, handle); 2103 } 2104 isp_destroy_handle(isp, handle); 2105 xpt_print(xs->ccb_h.path, 2106 "watchdog timeout for handle 0x%x\n", handle); 2107 XS_SETERR(xs, CAM_CMD_TIMEOUT); 2108 XS_CMD_C_WDOG(xs); 2109 ISPLOCK_2_CAMLOCK(isp); 2110 isp_done(xs); 2111 CAMLOCK_2_ISPLOCK(isp); 2112 } else { 2113 XS_CMD_C_WDOG(xs); 2114 xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz); 2115 XS_CMD_S_GRACE(xs); 2116 isp->isp_sendmarker |= 1 << XS_CHANNEL(xs); 2117 } 2118 ISP_UNLOCK(isp); 2119 return (1); 2120 } 2121 ISP_UNLOCK(isp); 2122 return (0); 2123 } 2124 2125 static void 2126 isp_watchdog(void *arg) 2127 { 2128 ispsoftc_t *isp; 2129 XS_T *xs = arg; 2130 for (isp = isplist; isp != NULL; isp = isp->isp_osinfo.next) { 2131 if (isp_watchdog_work(isp, xs)) { 2132 break; 2133 } 2134 } 2135 if (isp == NULL) { 2136 printf("isp_watchdog: nobody had %p active\n", arg); 2137 } 2138 } 2139 2140 2141 #if __FreeBSD_version >= 600000 2142 static void 2143 isp_make_here(ispsoftc_t *isp, int tgt) 2144 { 2145 union ccb *ccb; 2146 ISPLOCK_2_CAMLOCK(mpt); 2147 /* 2148 * Allocate a CCB, create a wildcard path for this bus, 2149 * and schedule a rescan. 2150 */ 2151 ccb = xpt_alloc_ccb_nowait(); 2152 if (ccb == NULL) { 2153 isp_prt(isp, ISP_LOGWARN, "unable to alloc CCB for rescan"); 2154 CAMLOCK_2_ISPLOCK(mpt); 2155 return; 2156 } 2157 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, 2158 cam_sim_path(isp->isp_sim), tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2159 CAMLOCK_2_ISPLOCK(mpt); 2160 isp_prt(isp, ISP_LOGWARN, "unable to create path for rescan"); 2161 xpt_free_ccb(ccb); 2162 return; 2163 } 2164 xpt_rescan(ccb); 2165 CAMLOCK_2_ISPLOCK(mpt); 2166 } 2167 2168 static void 2169 isp_make_gone(ispsoftc_t *isp, int tgt) 2170 { 2171 struct cam_path *tp; 2172 ISPLOCK_2_CAMLOCK(isp); 2173 if (xpt_create_path(&tp, NULL, cam_sim_path(isp->isp_sim), tgt, 2174 CAM_LUN_WILDCARD) == CAM_REQ_CMP) { 2175 xpt_async(AC_LOST_DEVICE, tp, NULL); 2176 xpt_free_path(tp); 2177 } 2178 CAMLOCK_2_ISPLOCK(isp); 2179 } 2180 #else 2181 #define isp_make_here(isp, tgt) do { ; } while (0) 2182 #define isp_make_gone(isp, tgt) do { ; } while (0) 2183 #endif 2184 2185 2186 /* 2187 * Gone Device Timer Function- when we have decided that a device has gone 2188 * away, we wait a specific period of time prior to telling the OS it has 2189 * gone away. 2190 * 2191 * This timer function fires once a second and then scans the port database 2192 * for devices that are marked dead but still have a virtual target assigned. 2193 * We decrement a counter for that port database entry, and when it hits zero, 2194 * we tell the OS the device has gone away. 2195 */ 2196 static void 2197 isp_gdt(void *arg) 2198 { 2199 ispsoftc_t *isp = arg; 2200 fcportdb_t *lp; 2201 int dbidx, tgt, more_to_do = 0; 2202 2203 isp_prt(isp, ISP_LOGDEBUG0, "GDT timer expired"); 2204 ISP_LOCK(isp); 2205 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { 2206 lp = &FCPARAM(isp)->portdb[dbidx]; 2207 2208 if (lp->state != FC_PORTDB_STATE_ZOMBIE) { 2209 continue; 2210 } 2211 if (lp->ini_map_idx == 0) { 2212 continue; 2213 } 2214 if (lp->new_reserved == 0) { 2215 continue; 2216 } 2217 lp->new_reserved -= 1; 2218 if (lp->new_reserved != 0) { 2219 more_to_do++; 2220 continue; 2221 } 2222 tgt = lp->ini_map_idx - 1; 2223 FCPARAM(isp)->isp_ini_map[tgt] = 0; 2224 lp->ini_map_idx = 0; 2225 lp->state = FC_PORTDB_STATE_NIL; 2226 isp_prt(isp, ISP_LOGCONFIG, prom3, lp->portid, tgt, 2227 "Gone Device Timeout"); 2228 isp_make_gone(isp, tgt); 2229 } 2230 if (more_to_do) { 2231 isp->isp_osinfo.gdt = timeout(isp_gdt, isp, hz); 2232 } else { 2233 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2234 "stopping Gone Device Timer"); 2235 isp->isp_osinfo.gdt_running = 0; 2236 } 2237 ISP_UNLOCK(isp); 2238 } 2239 2240 /* 2241 * Loop Down Timer Function- when loop goes down, a timer is started and 2242 * and after it expires we come here and take all probational devices that 2243 * the OS knows about and the tell the OS that they've gone away. 2244 * 2245 * We don't clear the devices out of our port database because, when loop 2246 * come back up, we have to do some actual cleanup with the chip at that 2247 * point (implicit PLOGO, e.g., to get the chip's port database state right). 2248 */ 2249 static void 2250 isp_ldt(void *arg) 2251 { 2252 ispsoftc_t *isp = arg; 2253 fcportdb_t *lp; 2254 int dbidx, tgt; 2255 2256 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Loop Down Timer expired"); 2257 ISP_LOCK(isp); 2258 2259 /* 2260 * Notify to the OS all targets who we now consider have departed. 2261 */ 2262 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { 2263 lp = &FCPARAM(isp)->portdb[dbidx]; 2264 2265 if (lp->state != FC_PORTDB_STATE_PROBATIONAL) { 2266 continue; 2267 } 2268 if (lp->ini_map_idx == 0) { 2269 continue; 2270 } 2271 2272 /* 2273 * XXX: CLEAN UP AND COMPLETE ANY PENDING COMMANDS FIRST! 2274 */ 2275 2276 /* 2277 * Mark that we've announced that this device is gone.... 2278 */ 2279 lp->reserved = 1; 2280 2281 /* 2282 * but *don't* change the state of the entry. Just clear 2283 * any target id stuff and announce to CAM that the 2284 * device is gone. This way any necessary PLOGO stuff 2285 * will happen when loop comes back up. 2286 */ 2287 2288 tgt = lp->ini_map_idx - 1; 2289 FCPARAM(isp)->isp_ini_map[tgt] = 0; 2290 lp->ini_map_idx = 0; 2291 isp_prt(isp, ISP_LOGCONFIG, prom3, lp->portid, tgt, 2292 "Loop Down Timeout"); 2293 isp_make_gone(isp, tgt); 2294 } 2295 2296 /* 2297 * The loop down timer has expired. Wake up the kthread 2298 * to notice that fact (or make it false). 2299 */ 2300 isp->isp_osinfo.loop_down_time = isp->isp_osinfo.loop_down_limit+1; 2301 #if __FreeBSD_version < 500000 2302 wakeup(&isp->isp_osinfo.kproc); 2303 #else 2304 #ifdef ISP_SMPLOCK 2305 cv_signal(&isp->isp_osinfo.kthread_cv); 2306 #else 2307 wakeup(&isp->isp_osinfo.kthread_cv); 2308 #endif 2309 #endif 2310 ISP_UNLOCK(isp); 2311 } 2312 2313 static void 2314 isp_kthread(void *arg) 2315 { 2316 ispsoftc_t *isp = arg; 2317 int slp = 0; 2318 #if __FreeBSD_version < 500000 2319 int s; 2320 2321 s = splcam(); 2322 #else 2323 #ifdef ISP_SMPLOCK 2324 mtx_lock(&isp->isp_lock); 2325 #else 2326 mtx_lock(&Giant); 2327 #endif 2328 #endif 2329 /* 2330 * The first loop is for our usage where we have yet to have 2331 * gotten good fibre channel state. 2332 */ 2333 for (;;) { 2334 int wasfrozen, lb, lim; 2335 2336 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2337 "isp_kthread: checking FC state"); 2338 isp->isp_osinfo.mbox_sleep_ok = 1; 2339 lb = isp_fc_runstate(isp, 250000); 2340 isp->isp_osinfo.mbox_sleep_ok = 0; 2341 if (lb) { 2342 /* 2343 * Increment loop down time by the last sleep interval 2344 */ 2345 isp->isp_osinfo.loop_down_time += slp; 2346 2347 if (lb < 0) { 2348 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2349 "kthread: FC loop not up (down count %d)", 2350 isp->isp_osinfo.loop_down_time); 2351 } else { 2352 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2353 "kthread: FC got to %d (down count %d)", 2354 lb, isp->isp_osinfo.loop_down_time); 2355 } 2356 2357 2358 /* 2359 * If we've never seen loop up and we've waited longer 2360 * than quickboot time, or we've seen loop up but we've 2361 * waited longer than loop_down_limit, give up and go 2362 * to sleep until loop comes up. 2363 */ 2364 if (FCPARAM(isp)->loop_seen_once == 0) { 2365 lim = isp_quickboot_time; 2366 } else { 2367 lim = isp->isp_osinfo.loop_down_limit; 2368 } 2369 if (isp->isp_osinfo.loop_down_time >= lim) { 2370 isp_freeze_loopdown(isp, "loop limit hit"); 2371 slp = 0; 2372 } else if (isp->isp_osinfo.loop_down_time < 10) { 2373 slp = 1; 2374 } else if (isp->isp_osinfo.loop_down_time < 30) { 2375 slp = 5; 2376 } else if (isp->isp_osinfo.loop_down_time < 60) { 2377 slp = 10; 2378 } else if (isp->isp_osinfo.loop_down_time < 120) { 2379 slp = 20; 2380 } else { 2381 slp = 30; 2382 } 2383 2384 } else { 2385 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2386 "isp_kthread: FC state OK"); 2387 isp->isp_osinfo.loop_down_time = 0; 2388 slp = 0; 2389 } 2390 2391 /* 2392 * If we'd frozen the simq, unfreeze it now so that CAM 2393 * can start sending us commands. If the FC state isn't 2394 * okay yet, they'll hit that in isp_start which will 2395 * freeze the queue again. 2396 */ 2397 wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN; 2398 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN; 2399 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { 2400 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2401 "isp_kthread: releasing simq"); 2402 ISPLOCK_2_CAMLOCK(isp); 2403 xpt_release_simq(isp->isp_sim, 1); 2404 CAMLOCK_2_ISPLOCK(isp); 2405 } 2406 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2407 "isp_kthread: sleep time %d", slp); 2408 #if __FreeBSD_version < 500000 2409 tsleep(&isp->isp_osinfo.kproc, PRIBIO, "ispf", 2410 slp * hz); 2411 #else 2412 #ifdef ISP_SMPLOCK 2413 cv_timed_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock, 2414 slp * hz); 2415 #else 2416 (void) tsleep(&isp->isp_osinfo.kthread_cv, PRIBIO, "ispf", 2417 slp * hz); 2418 #endif 2419 #endif 2420 /* 2421 * If slp is zero, we're waking up for the first time after 2422 * things have been okay. In this case, we set a deferral state 2423 * for all commands and delay hysteresis seconds before starting 2424 * the FC state evaluation. This gives the loop/fabric a chance 2425 * to settle. 2426 */ 2427 if (slp == 0 && isp->isp_osinfo.hysteresis) { 2428 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2429 "isp_kthread: sleep hysteresis tick time %d", 2430 isp->isp_osinfo.hysteresis * hz); 2431 (void) tsleep(&isp_fabric_hysteresis, PRIBIO, "ispT", 2432 (isp->isp_osinfo.hysteresis * hz)); 2433 } 2434 } 2435 } 2436 2437 static void 2438 isp_action(struct cam_sim *sim, union ccb *ccb) 2439 { 2440 int bus, tgt, error, lim; 2441 ispsoftc_t *isp; 2442 struct ccb_trans_settings *cts; 2443 2444 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n")); 2445 2446 isp = (ispsoftc_t *)cam_sim_softc(sim); 2447 ccb->ccb_h.sim_priv.entries[0].field = 0; 2448 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 2449 if (isp->isp_state != ISP_RUNSTATE && 2450 ccb->ccb_h.func_code == XPT_SCSI_IO) { 2451 CAMLOCK_2_ISPLOCK(isp); 2452 isp_init(isp); 2453 if (isp->isp_state != ISP_INITSTATE) { 2454 ISP_UNLOCK(isp); 2455 /* 2456 * Lie. Say it was a selection timeout. 2457 */ 2458 ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN; 2459 xpt_freeze_devq(ccb->ccb_h.path, 1); 2460 xpt_done(ccb); 2461 return; 2462 } 2463 isp->isp_state = ISP_RUNSTATE; 2464 ISPLOCK_2_CAMLOCK(isp); 2465 } 2466 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code); 2467 2468 2469 switch (ccb->ccb_h.func_code) { 2470 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 2471 /* 2472 * Do a couple of preliminary checks... 2473 */ 2474 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 2475 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 2476 ccb->ccb_h.status = CAM_REQ_INVALID; 2477 xpt_done(ccb); 2478 break; 2479 } 2480 } 2481 #ifdef DIAGNOSTIC 2482 if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) { 2483 xpt_print(ccb->ccb_h.path, "invalid target\n"); 2484 ccb->ccb_h.status = CAM_PATH_INVALID; 2485 } else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) { 2486 xpt_print(ccb->ccb_h.path, "invalid lun\n"); 2487 ccb->ccb_h.status = CAM_PATH_INVALID; 2488 } 2489 if (ccb->ccb_h.status == CAM_PATH_INVALID) { 2490 xpt_done(ccb); 2491 break; 2492 } 2493 #endif 2494 ((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK; 2495 CAMLOCK_2_ISPLOCK(isp); 2496 error = isp_start((XS_T *) ccb); 2497 switch (error) { 2498 case CMD_QUEUED: 2499 XS_CMD_S_CLEAR(ccb); 2500 ISPLOCK_2_CAMLOCK(isp); 2501 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2502 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 2503 int ms = ccb->ccb_h.timeout; 2504 if (ms == CAM_TIME_DEFAULT) { 2505 ms = 60*1000; 2506 } 2507 ccb->ccb_h.timeout_ch = 2508 timeout(isp_watchdog, ccb, isp_mstohz(ms)); 2509 } else { 2510 callout_handle_init(&ccb->ccb_h.timeout_ch); 2511 } 2512 break; 2513 case CMD_RQLATER: 2514 /* 2515 * This can only happen for Fibre Channel 2516 */ 2517 KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only")); 2518 2519 /* 2520 * Handle initial and subsequent loop down cases 2521 */ 2522 if (FCPARAM(isp)->loop_seen_once == 0) { 2523 lim = isp_quickboot_time; 2524 } else { 2525 lim = isp->isp_osinfo.loop_down_limit; 2526 } 2527 if (isp->isp_osinfo.loop_down_time >= lim) { 2528 isp_prt(isp, ISP_LOGDEBUG0, 2529 "%d.%d downtime (%d) > lim (%d)", 2530 XS_TGT(ccb), XS_LUN(ccb), 2531 isp->isp_osinfo.loop_down_time, lim); 2532 ccb->ccb_h.status = 2533 CAM_SEL_TIMEOUT|CAM_DEV_QFRZN; 2534 xpt_freeze_devq(ccb->ccb_h.path, 1); 2535 ISPLOCK_2_CAMLOCK(isp); 2536 xpt_done(ccb); 2537 break; 2538 } 2539 isp_prt(isp, ISP_LOGDEBUG0, 2540 "%d.%d retry later", XS_TGT(ccb), XS_LUN(ccb)); 2541 /* 2542 * Otherwise, retry in a while. 2543 */ 2544 ISPLOCK_2_CAMLOCK(isp); 2545 cam_freeze_devq(ccb->ccb_h.path); 2546 cam_release_devq(ccb->ccb_h.path, 2547 RELSIM_RELEASE_AFTER_TIMEOUT, 0, 1000, 0); 2548 XS_SETERR(ccb, CAM_REQUEUE_REQ); 2549 xpt_done(ccb); 2550 break; 2551 case CMD_EAGAIN: 2552 ISPLOCK_2_CAMLOCK(isp); 2553 cam_freeze_devq(ccb->ccb_h.path); 2554 cam_release_devq(ccb->ccb_h.path, 2555 RELSIM_RELEASE_AFTER_TIMEOUT, 0, 250, 0); 2556 xpt_done(ccb); 2557 break; 2558 case CMD_COMPLETE: 2559 isp_done((struct ccb_scsiio *) ccb); 2560 ISPLOCK_2_CAMLOCK(isp); 2561 break; 2562 default: 2563 ISPLOCK_2_CAMLOCK(isp); 2564 isp_prt(isp, ISP_LOGERR, 2565 "What's this? 0x%x at %d in file %s", 2566 error, __LINE__, __FILE__); 2567 XS_SETERR(ccb, CAM_REQ_CMP_ERR); 2568 xpt_done(ccb); 2569 } 2570 break; 2571 2572 #ifdef ISP_TARGET_MODE 2573 case XPT_EN_LUN: /* Enable LUN as a target */ 2574 { 2575 int seq, i; 2576 CAMLOCK_2_ISPLOCK(isp); 2577 seq = isp_en_lun(isp, ccb); 2578 if (seq < 0) { 2579 ISPLOCK_2_CAMLOCK(isp); 2580 xpt_done(ccb); 2581 break; 2582 } 2583 for (i = 0; isp->isp_osinfo.leact[seq] && i < 30 * 1000; i++) { 2584 uint32_t isr; 2585 uint16_t sema, mbox; 2586 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 2587 isp_intr(isp, isr, sema, mbox); 2588 } 2589 DELAY(1000); 2590 } 2591 ISPLOCK_2_CAMLOCK(isp); 2592 break; 2593 } 2594 case XPT_NOTIFY_ACK: /* recycle notify ack */ 2595 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ 2596 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 2597 { 2598 tstate_t *tptr = 2599 get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun); 2600 if (tptr == NULL) { 2601 ccb->ccb_h.status = CAM_LUN_INVALID; 2602 xpt_done(ccb); 2603 break; 2604 } 2605 ccb->ccb_h.sim_priv.entries[0].field = 0; 2606 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 2607 ccb->ccb_h.flags = 0; 2608 2609 CAMLOCK_2_ISPLOCK(isp); 2610 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 2611 /* 2612 * Note that the command itself may not be done- 2613 * it may not even have had the first CTIO sent. 2614 */ 2615 tptr->atio_count++; 2616 isp_prt(isp, ISP_LOGTDEBUG0, 2617 "Put FREE ATIO, lun %d, count now %d", 2618 ccb->ccb_h.target_lun, tptr->atio_count); 2619 SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h, 2620 sim_links.sle); 2621 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 2622 tptr->inot_count++; 2623 isp_prt(isp, ISP_LOGTDEBUG0, 2624 "Put FREE INOT, lun %d, count now %d", 2625 ccb->ccb_h.target_lun, tptr->inot_count); 2626 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, 2627 sim_links.sle); 2628 } else { 2629 isp_prt(isp, ISP_LOGWARN, "Got Notify ACK");; 2630 } 2631 rls_lun_statep(isp, tptr); 2632 ccb->ccb_h.status = CAM_REQ_INPROG; 2633 ISPLOCK_2_CAMLOCK(isp); 2634 break; 2635 } 2636 case XPT_CONT_TARGET_IO: 2637 { 2638 CAMLOCK_2_ISPLOCK(isp); 2639 isp_target_start_ctio(isp, ccb); 2640 ISPLOCK_2_CAMLOCK(isp); 2641 break; 2642 } 2643 #endif 2644 case XPT_RESET_DEV: /* BDR the specified SCSI device */ 2645 2646 bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); 2647 tgt = ccb->ccb_h.target_id; 2648 tgt |= (bus << 16); 2649 2650 CAMLOCK_2_ISPLOCK(isp); 2651 error = isp_control(isp, ISPCTL_RESET_DEV, &tgt); 2652 ISPLOCK_2_CAMLOCK(isp); 2653 if (error) { 2654 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2655 } else { 2656 ccb->ccb_h.status = CAM_REQ_CMP; 2657 } 2658 xpt_done(ccb); 2659 break; 2660 case XPT_ABORT: /* Abort the specified CCB */ 2661 { 2662 union ccb *accb = ccb->cab.abort_ccb; 2663 CAMLOCK_2_ISPLOCK(isp); 2664 switch (accb->ccb_h.func_code) { 2665 #ifdef ISP_TARGET_MODE 2666 case XPT_ACCEPT_TARGET_IO: 2667 case XPT_IMMED_NOTIFY: 2668 ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb); 2669 break; 2670 case XPT_CONT_TARGET_IO: 2671 isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet"); 2672 ccb->ccb_h.status = CAM_UA_ABORT; 2673 break; 2674 #endif 2675 case XPT_SCSI_IO: 2676 error = isp_control(isp, ISPCTL_ABORT_CMD, ccb); 2677 if (error) { 2678 ccb->ccb_h.status = CAM_UA_ABORT; 2679 } else { 2680 ccb->ccb_h.status = CAM_REQ_CMP; 2681 } 2682 break; 2683 default: 2684 ccb->ccb_h.status = CAM_REQ_INVALID; 2685 break; 2686 } 2687 ISPLOCK_2_CAMLOCK(isp); 2688 xpt_done(ccb); 2689 break; 2690 } 2691 #ifdef CAM_NEW_TRAN_CODE 2692 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) 2693 #else 2694 #define IS_CURRENT_SETTINGS(c) (c->flags & CCB_TRANS_CURRENT_SETTINGS) 2695 #endif 2696 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 2697 cts = &ccb->cts; 2698 if (!IS_CURRENT_SETTINGS(cts)) { 2699 ccb->ccb_h.status = CAM_REQ_INVALID; 2700 xpt_done(ccb); 2701 break; 2702 } 2703 tgt = cts->ccb_h.target_id; 2704 CAMLOCK_2_ISPLOCK(isp); 2705 if (IS_SCSI(isp)) { 2706 #ifndef CAM_NEW_TRAN_CODE 2707 sdparam *sdp = isp->isp_param; 2708 uint16_t *dptr; 2709 2710 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2711 2712 sdp += bus; 2713 /* 2714 * We always update (internally) from goal_flags 2715 * so any request to change settings just gets 2716 * vectored to that location. 2717 */ 2718 dptr = &sdp->isp_devparam[tgt].goal_flags; 2719 2720 /* 2721 * Note that these operations affect the 2722 * the goal flags (goal_flags)- not 2723 * the current state flags. Then we mark 2724 * things so that the next operation to 2725 * this HBA will cause the update to occur. 2726 */ 2727 if (cts->valid & CCB_TRANS_DISC_VALID) { 2728 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) { 2729 *dptr |= DPARM_DISC; 2730 } else { 2731 *dptr &= ~DPARM_DISC; 2732 } 2733 } 2734 if (cts->valid & CCB_TRANS_TQ_VALID) { 2735 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) { 2736 *dptr |= DPARM_TQING; 2737 } else { 2738 *dptr &= ~DPARM_TQING; 2739 } 2740 } 2741 if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) { 2742 switch (cts->bus_width) { 2743 case MSG_EXT_WDTR_BUS_16_BIT: 2744 *dptr |= DPARM_WIDE; 2745 break; 2746 default: 2747 *dptr &= ~DPARM_WIDE; 2748 } 2749 } 2750 /* 2751 * Any SYNC RATE of nonzero and SYNC_OFFSET 2752 * of nonzero will cause us to go to the 2753 * selected (from NVRAM) maximum value for 2754 * this device. At a later point, we'll 2755 * allow finer control. 2756 */ 2757 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && 2758 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) && 2759 (cts->sync_offset > 0)) { 2760 *dptr |= DPARM_SYNC; 2761 } else { 2762 *dptr &= ~DPARM_SYNC; 2763 } 2764 *dptr |= DPARM_SAFE_DFLT; 2765 #else 2766 struct ccb_trans_settings_scsi *scsi = 2767 &cts->proto_specific.scsi; 2768 struct ccb_trans_settings_spi *spi = 2769 &cts->xport_specific.spi; 2770 sdparam *sdp = isp->isp_param; 2771 uint16_t *dptr; 2772 2773 if (spi->valid == 0 && scsi->valid == 0) { 2774 ISPLOCK_2_CAMLOCK(isp); 2775 ccb->ccb_h.status = CAM_REQ_CMP; 2776 xpt_done(ccb); 2777 break; 2778 } 2779 2780 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2781 sdp += bus; 2782 /* 2783 * We always update (internally) from goal_flags 2784 * so any request to change settings just gets 2785 * vectored to that location. 2786 */ 2787 dptr = &sdp->isp_devparam[tgt].goal_flags; 2788 2789 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 2790 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 2791 *dptr |= DPARM_DISC; 2792 else 2793 *dptr &= ~DPARM_DISC; 2794 } 2795 2796 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 2797 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 2798 *dptr |= DPARM_TQING; 2799 else 2800 *dptr &= ~DPARM_TQING; 2801 } 2802 2803 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 2804 if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) 2805 *dptr |= DPARM_WIDE; 2806 else 2807 *dptr &= ~DPARM_WIDE; 2808 } 2809 2810 /* 2811 * XXX: FIX ME 2812 */ 2813 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) && 2814 (spi->valid & CTS_SPI_VALID_SYNC_RATE) && 2815 (spi->sync_period && spi->sync_offset)) { 2816 *dptr |= DPARM_SYNC; 2817 /* 2818 * XXX: CHECK FOR LEGALITY 2819 */ 2820 sdp->isp_devparam[tgt].goal_period = 2821 spi->sync_period; 2822 sdp->isp_devparam[tgt].goal_offset = 2823 spi->sync_offset; 2824 } else { 2825 *dptr &= ~DPARM_SYNC; 2826 } 2827 #endif 2828 isp_prt(isp, ISP_LOGDEBUG0, 2829 "SET (%d.%d.%d) to flags %x off %x per %x", 2830 bus, tgt, cts->ccb_h.target_lun, 2831 sdp->isp_devparam[tgt].goal_flags, 2832 sdp->isp_devparam[tgt].goal_offset, 2833 sdp->isp_devparam[tgt].goal_period); 2834 sdp->isp_devparam[tgt].dev_update = 1; 2835 isp->isp_update |= (1 << bus); 2836 } 2837 ISPLOCK_2_CAMLOCK(isp); 2838 ccb->ccb_h.status = CAM_REQ_CMP; 2839 xpt_done(ccb); 2840 break; 2841 case XPT_GET_TRAN_SETTINGS: 2842 cts = &ccb->cts; 2843 tgt = cts->ccb_h.target_id; 2844 CAMLOCK_2_ISPLOCK(isp); 2845 if (IS_FC(isp)) { 2846 #ifndef CAM_NEW_TRAN_CODE 2847 /* 2848 * a lot of normal SCSI things don't make sense. 2849 */ 2850 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 2851 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2852 /* 2853 * How do you measure the width of a high 2854 * speed serial bus? Well, in bytes. 2855 * 2856 * Offset and period make no sense, though, so we set 2857 * (above) a 'base' transfer speed to be gigabit. 2858 */ 2859 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2860 #else 2861 fcparam *fcp = isp->isp_param; 2862 struct ccb_trans_settings_scsi *scsi = 2863 &cts->proto_specific.scsi; 2864 struct ccb_trans_settings_fc *fc = 2865 &cts->xport_specific.fc; 2866 2867 cts->protocol = PROTO_SCSI; 2868 cts->protocol_version = SCSI_REV_2; 2869 cts->transport = XPORT_FC; 2870 cts->transport_version = 0; 2871 2872 scsi->valid = CTS_SCSI_VALID_TQ; 2873 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 2874 fc->valid = CTS_FC_VALID_SPEED; 2875 if (fcp->isp_gbspeed == 2) { 2876 fc->bitrate = 200000; 2877 } else { 2878 fc->bitrate = 100000; 2879 } 2880 if (tgt > 0 && tgt < MAX_FC_TARG) { 2881 fcportdb_t *lp = &fcp->portdb[tgt]; 2882 fc->wwnn = lp->node_wwn; 2883 fc->wwpn = lp->port_wwn; 2884 fc->port = lp->portid; 2885 fc->valid |= CTS_FC_VALID_WWNN | 2886 CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT; 2887 } 2888 #endif 2889 } else { 2890 #ifdef CAM_NEW_TRAN_CODE 2891 struct ccb_trans_settings_scsi *scsi = 2892 &cts->proto_specific.scsi; 2893 struct ccb_trans_settings_spi *spi = 2894 &cts->xport_specific.spi; 2895 #endif 2896 sdparam *sdp = isp->isp_param; 2897 int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2898 uint16_t dval, pval, oval; 2899 2900 sdp += bus; 2901 2902 if (IS_CURRENT_SETTINGS(cts)) { 2903 sdp->isp_devparam[tgt].dev_refresh = 1; 2904 isp->isp_update |= (1 << bus); 2905 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, 2906 NULL); 2907 dval = sdp->isp_devparam[tgt].actv_flags; 2908 oval = sdp->isp_devparam[tgt].actv_offset; 2909 pval = sdp->isp_devparam[tgt].actv_period; 2910 } else { 2911 dval = sdp->isp_devparam[tgt].nvrm_flags; 2912 oval = sdp->isp_devparam[tgt].nvrm_offset; 2913 pval = sdp->isp_devparam[tgt].nvrm_period; 2914 } 2915 2916 #ifndef CAM_NEW_TRAN_CODE 2917 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 2918 2919 if (dval & DPARM_DISC) { 2920 cts->flags |= CCB_TRANS_DISC_ENB; 2921 } 2922 if (dval & DPARM_TQING) { 2923 cts->flags |= CCB_TRANS_TAG_ENB; 2924 } 2925 if (dval & DPARM_WIDE) { 2926 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2927 } else { 2928 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2929 } 2930 cts->valid = CCB_TRANS_BUS_WIDTH_VALID | 2931 CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2932 2933 if ((dval & DPARM_SYNC) && oval != 0) { 2934 cts->sync_period = pval; 2935 cts->sync_offset = oval; 2936 cts->valid |= 2937 CCB_TRANS_SYNC_RATE_VALID | 2938 CCB_TRANS_SYNC_OFFSET_VALID; 2939 } 2940 #else 2941 cts->protocol = PROTO_SCSI; 2942 cts->protocol_version = SCSI_REV_2; 2943 cts->transport = XPORT_SPI; 2944 cts->transport_version = 2; 2945 2946 spi->valid = 0; 2947 scsi->valid = 0; 2948 spi->flags = 0; 2949 scsi->flags = 0; 2950 if (dval & DPARM_DISC) { 2951 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 2952 } 2953 if ((dval & DPARM_SYNC) && oval && pval) { 2954 spi->sync_offset = oval; 2955 spi->sync_period = pval; 2956 } else { 2957 spi->sync_offset = 0; 2958 spi->sync_period = 0; 2959 } 2960 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 2961 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 2962 spi->valid |= CTS_SPI_VALID_BUS_WIDTH; 2963 if (dval & DPARM_WIDE) { 2964 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2965 } else { 2966 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2967 } 2968 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 2969 scsi->valid = CTS_SCSI_VALID_TQ; 2970 if (dval & DPARM_TQING) { 2971 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 2972 } 2973 spi->valid |= CTS_SPI_VALID_DISC; 2974 } 2975 #endif 2976 isp_prt(isp, ISP_LOGDEBUG0, 2977 "GET %s (%d.%d.%d) to flags %x off %x per %x", 2978 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM", 2979 bus, tgt, cts->ccb_h.target_lun, dval, oval, pval); 2980 } 2981 ISPLOCK_2_CAMLOCK(isp); 2982 ccb->ccb_h.status = CAM_REQ_CMP; 2983 xpt_done(ccb); 2984 break; 2985 2986 case XPT_CALC_GEOMETRY: 2987 #if __FreeBSD_version < 500000 2988 { 2989 struct ccb_calc_geometry *ccg; 2990 u_int32_t secs_per_cylinder; 2991 u_int32_t size_mb; 2992 2993 ccg = &ccb->ccg; 2994 if (ccg->block_size == 0) { 2995 ccb->ccb_h.status = CAM_REQ_INVALID; 2996 xpt_done(ccb); 2997 break; 2998 } 2999 size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size); 3000 if (size_mb > 1024) { 3001 ccg->heads = 255; 3002 ccg->secs_per_track = 63; 3003 } else { 3004 ccg->heads = 64; 3005 ccg->secs_per_track = 32; 3006 } 3007 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 3008 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 3009 ccb->ccb_h.status = CAM_REQ_CMP; 3010 xpt_done(ccb); 3011 break; 3012 } 3013 #else 3014 { 3015 cam_calc_geometry(&ccb->ccg, /*extended*/1); 3016 xpt_done(ccb); 3017 break; 3018 } 3019 #endif 3020 case XPT_RESET_BUS: /* Reset the specified bus */ 3021 bus = cam_sim_bus(sim); 3022 CAMLOCK_2_ISPLOCK(isp); 3023 error = isp_control(isp, ISPCTL_RESET_BUS, &bus); 3024 ISPLOCK_2_CAMLOCK(isp); 3025 if (error) 3026 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 3027 else { 3028 if (cam_sim_bus(sim) && isp->isp_path2 != NULL) 3029 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 3030 else if (isp->isp_path != NULL) 3031 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 3032 ccb->ccb_h.status = CAM_REQ_CMP; 3033 } 3034 xpt_done(ccb); 3035 break; 3036 3037 case XPT_TERM_IO: /* Terminate the I/O process */ 3038 ccb->ccb_h.status = CAM_REQ_INVALID; 3039 xpt_done(ccb); 3040 break; 3041 3042 case XPT_PATH_INQ: /* Path routing inquiry */ 3043 { 3044 struct ccb_pathinq *cpi = &ccb->cpi; 3045 3046 cpi->version_num = 1; 3047 #ifdef ISP_TARGET_MODE 3048 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 3049 #else 3050 cpi->target_sprt = 0; 3051 #endif 3052 cpi->hba_eng_cnt = 0; 3053 cpi->max_target = ISP_MAX_TARGETS(isp) - 1; 3054 cpi->max_lun = ISP_MAX_LUNS(isp) - 1; 3055 cpi->bus_id = cam_sim_bus(sim); 3056 if (IS_FC(isp)) { 3057 cpi->hba_misc = PIM_NOBUSRESET; 3058 /* 3059 * Because our loop ID can shift from time to time, 3060 * make our initiator ID out of range of our bus. 3061 */ 3062 cpi->initiator_id = cpi->max_target + 1; 3063 3064 /* 3065 * Set base transfer capabilities for Fibre Channel. 3066 * Technically not correct because we don't know 3067 * what media we're running on top of- but we'll 3068 * look good if we always say 100MB/s. 3069 */ 3070 if (FCPARAM(isp)->isp_gbspeed == 2) 3071 cpi->base_transfer_speed = 200000; 3072 else 3073 cpi->base_transfer_speed = 100000; 3074 cpi->hba_inquiry = PI_TAG_ABLE; 3075 #ifdef CAM_NEW_TRAN_CODE 3076 cpi->transport = XPORT_FC; 3077 cpi->transport_version = 0; 3078 #endif 3079 } else { 3080 sdparam *sdp = isp->isp_param; 3081 sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path)); 3082 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 3083 cpi->hba_misc = 0; 3084 cpi->initiator_id = sdp->isp_initiator_id; 3085 cpi->base_transfer_speed = 3300; 3086 #ifdef CAM_NEW_TRAN_CODE 3087 cpi->transport = XPORT_SPI; 3088 cpi->transport_version = 2; 3089 #endif 3090 } 3091 #ifdef CAM_NEW_TRAN_CODE 3092 cpi->protocol = PROTO_SCSI; 3093 cpi->protocol_version = SCSI_REV_2; 3094 #endif 3095 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 3096 strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN); 3097 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 3098 cpi->unit_number = cam_sim_unit(sim); 3099 cpi->ccb_h.status = CAM_REQ_CMP; 3100 xpt_done(ccb); 3101 break; 3102 } 3103 default: 3104 ccb->ccb_h.status = CAM_REQ_INVALID; 3105 xpt_done(ccb); 3106 break; 3107 } 3108 } 3109 3110 #define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB) 3111 3112 void 3113 isp_done(struct ccb_scsiio *sccb) 3114 { 3115 ispsoftc_t *isp = XS_ISP(sccb); 3116 3117 if (XS_NOERR(sccb)) 3118 XS_SETERR(sccb, CAM_REQ_CMP); 3119 3120 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && 3121 (sccb->scsi_status != SCSI_STATUS_OK)) { 3122 sccb->ccb_h.status &= ~CAM_STATUS_MASK; 3123 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && 3124 (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) { 3125 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; 3126 } else { 3127 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 3128 } 3129 } 3130 3131 sccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3132 if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 3133 isp_prt(isp, ISP_LOGDEBUG0, 3134 "target %d lun %d CAM status 0x%x SCSI status 0x%x", 3135 XS_TGT(sccb), XS_LUN(sccb), sccb->ccb_h.status, 3136 sccb->scsi_status); 3137 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 3138 sccb->ccb_h.status |= CAM_DEV_QFRZN; 3139 xpt_freeze_devq(sccb->ccb_h.path, 1); 3140 } 3141 } 3142 3143 if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) && 3144 (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 3145 xpt_print(sccb->ccb_h.path, 3146 "cam completion status 0x%x\n", sccb->ccb_h.status); 3147 } 3148 3149 XS_CMD_S_DONE(sccb); 3150 if (XS_CMD_WDOG_P(sccb) == 0) { 3151 untimeout(isp_watchdog, sccb, sccb->ccb_h.timeout_ch); 3152 if (XS_CMD_GRACE_P(sccb)) { 3153 isp_prt(isp, ISP_LOGDEBUG2, 3154 "finished command on borrowed time"); 3155 } 3156 XS_CMD_S_CLEAR(sccb); 3157 ISPLOCK_2_CAMLOCK(isp); 3158 xpt_done((union ccb *) sccb); 3159 CAMLOCK_2_ISPLOCK(isp); 3160 } 3161 } 3162 3163 int 3164 isp_async(ispsoftc_t *isp, ispasync_t cmd, void *arg) 3165 { 3166 int bus, rv = 0; 3167 static const char prom[] = 3168 "PortID 0x%06x handle 0x%x role %s %s\n" 3169 " WWNN 0x%08x%08x WWPN 0x%08x%08x"; 3170 static const char prom2[] = 3171 "PortID 0x%06x handle 0x%x role %s %s tgt %u\n" 3172 " WWNN 0x%08x%08x WWPN 0x%08x%08x"; 3173 char *msg = NULL; 3174 target_id_t tgt; 3175 fcportdb_t *lp; 3176 struct cam_path *tmppath; 3177 3178 switch (cmd) { 3179 case ISPASYNC_NEW_TGT_PARAMS: 3180 { 3181 #ifdef CAM_NEW_TRAN_CODE 3182 struct ccb_trans_settings_scsi *scsi; 3183 struct ccb_trans_settings_spi *spi; 3184 #endif 3185 int flags, tgt; 3186 sdparam *sdp = isp->isp_param; 3187 struct ccb_trans_settings cts; 3188 3189 memset(&cts, 0, sizeof (struct ccb_trans_settings)); 3190 3191 tgt = *((int *)arg); 3192 bus = (tgt >> 16) & 0xffff; 3193 tgt &= 0xffff; 3194 sdp += bus; 3195 ISPLOCK_2_CAMLOCK(isp); 3196 if (xpt_create_path(&tmppath, NULL, 3197 cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim), 3198 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 3199 CAMLOCK_2_ISPLOCK(isp); 3200 isp_prt(isp, ISP_LOGWARN, 3201 "isp_async cannot make temp path for %d.%d", 3202 tgt, bus); 3203 rv = -1; 3204 break; 3205 } 3206 CAMLOCK_2_ISPLOCK(isp); 3207 flags = sdp->isp_devparam[tgt].actv_flags; 3208 #ifdef CAM_NEW_TRAN_CODE 3209 cts.type = CTS_TYPE_CURRENT_SETTINGS; 3210 cts.protocol = PROTO_SCSI; 3211 cts.transport = XPORT_SPI; 3212 3213 scsi = &cts.proto_specific.scsi; 3214 spi = &cts.xport_specific.spi; 3215 3216 if (flags & DPARM_TQING) { 3217 scsi->valid |= CTS_SCSI_VALID_TQ; 3218 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 3219 } 3220 3221 if (flags & DPARM_DISC) { 3222 spi->valid |= CTS_SPI_VALID_DISC; 3223 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 3224 } 3225 spi->flags |= CTS_SPI_VALID_BUS_WIDTH; 3226 if (flags & DPARM_WIDE) { 3227 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3228 } else { 3229 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3230 } 3231 if (flags & DPARM_SYNC) { 3232 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 3233 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 3234 spi->sync_period = sdp->isp_devparam[tgt].actv_period; 3235 spi->sync_offset = sdp->isp_devparam[tgt].actv_offset; 3236 } 3237 #else 3238 cts.flags = CCB_TRANS_CURRENT_SETTINGS; 3239 cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3240 if (flags & DPARM_DISC) { 3241 cts.flags |= CCB_TRANS_DISC_ENB; 3242 } 3243 if (flags & DPARM_TQING) { 3244 cts.flags |= CCB_TRANS_TAG_ENB; 3245 } 3246 cts.valid |= CCB_TRANS_BUS_WIDTH_VALID; 3247 cts.bus_width = (flags & DPARM_WIDE)? 3248 MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT; 3249 cts.sync_period = sdp->isp_devparam[tgt].actv_period; 3250 cts.sync_offset = sdp->isp_devparam[tgt].actv_offset; 3251 if (flags & DPARM_SYNC) { 3252 cts.valid |= 3253 CCB_TRANS_SYNC_RATE_VALID | 3254 CCB_TRANS_SYNC_OFFSET_VALID; 3255 } 3256 #endif 3257 isp_prt(isp, ISP_LOGDEBUG2, 3258 "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x", 3259 bus, tgt, sdp->isp_devparam[tgt].actv_period, 3260 sdp->isp_devparam[tgt].actv_offset, flags); 3261 xpt_setup_ccb(&cts.ccb_h, tmppath, 1); 3262 ISPLOCK_2_CAMLOCK(isp); 3263 xpt_async(AC_TRANSFER_NEG, tmppath, &cts); 3264 xpt_free_path(tmppath); 3265 CAMLOCK_2_ISPLOCK(isp); 3266 break; 3267 } 3268 case ISPASYNC_BUS_RESET: 3269 bus = *((int *)arg); 3270 isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected", 3271 bus); 3272 if (bus > 0 && isp->isp_path2) { 3273 ISPLOCK_2_CAMLOCK(isp); 3274 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 3275 CAMLOCK_2_ISPLOCK(isp); 3276 } else if (isp->isp_path) { 3277 ISPLOCK_2_CAMLOCK(isp); 3278 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 3279 CAMLOCK_2_ISPLOCK(isp); 3280 } 3281 break; 3282 case ISPASYNC_LIP: 3283 if (msg == NULL) { 3284 msg = "LIP Received"; 3285 } 3286 /* FALLTHROUGH */ 3287 case ISPASYNC_LOOP_RESET: 3288 if (msg == NULL) { 3289 msg = "LOOP Reset"; 3290 } 3291 /* FALLTHROUGH */ 3292 case ISPASYNC_LOOP_DOWN: 3293 if (msg == NULL) { 3294 msg = "LOOP Down"; 3295 } 3296 if (isp->isp_path) { 3297 isp_freeze_loopdown(isp, msg); 3298 } 3299 if (isp->isp_osinfo.ldt_running == 0) { 3300 isp->isp_osinfo.ldt = timeout(isp_ldt, isp, 3301 isp->isp_osinfo.loop_down_limit * hz); 3302 isp->isp_osinfo.ldt_running = 1; 3303 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 3304 "starting Loop Down Timer"); 3305 } 3306 isp_prt(isp, ISP_LOGINFO, msg); 3307 break; 3308 case ISPASYNC_LOOP_UP: 3309 /* 3310 * Now we just note that Loop has come up. We don't 3311 * actually do anything because we're waiting for a 3312 * Change Notify before activating the FC cleanup 3313 * thread to look at the state of the loop again. 3314 */ 3315 isp_prt(isp, ISP_LOGINFO, "Loop UP"); 3316 break; 3317 case ISPASYNC_DEV_ARRIVED: 3318 lp = arg; 3319 lp->reserved = 0; 3320 if ((isp->isp_role & ISP_ROLE_INITIATOR) && 3321 (lp->roles & (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT))) { 3322 int dbidx = lp - FCPARAM(isp)->portdb; 3323 int i; 3324 3325 for (i = 0; i < MAX_FC_TARG; i++) { 3326 if (i >= FL_ID && i <= SNS_ID) { 3327 continue; 3328 } 3329 if (FCPARAM(isp)->isp_ini_map[i] == 0) { 3330 break; 3331 } 3332 } 3333 if (i < MAX_FC_TARG) { 3334 FCPARAM(isp)->isp_ini_map[i] = dbidx + 1; 3335 lp->ini_map_idx = i + 1; 3336 } else { 3337 isp_prt(isp, ISP_LOGWARN, "out of target ids"); 3338 isp_dump_portdb(isp); 3339 } 3340 } 3341 if (lp->ini_map_idx) { 3342 tgt = lp->ini_map_idx - 1; 3343 isp_prt(isp, ISP_LOGCONFIG, prom2, 3344 lp->portid, lp->handle, 3345 roles[lp->roles], "arrived at", tgt, 3346 (uint32_t) (lp->node_wwn >> 32), 3347 (uint32_t) lp->node_wwn, 3348 (uint32_t) (lp->port_wwn >> 32), 3349 (uint32_t) lp->port_wwn); 3350 isp_make_here(isp, tgt); 3351 } else { 3352 isp_prt(isp, ISP_LOGCONFIG, prom, 3353 lp->portid, lp->handle, 3354 roles[lp->roles], "arrived", 3355 (uint32_t) (lp->node_wwn >> 32), 3356 (uint32_t) lp->node_wwn, 3357 (uint32_t) (lp->port_wwn >> 32), 3358 (uint32_t) lp->port_wwn); 3359 } 3360 break; 3361 case ISPASYNC_DEV_CHANGED: 3362 lp = arg; 3363 if (isp_change_is_bad) { 3364 lp->state = FC_PORTDB_STATE_NIL; 3365 if (lp->ini_map_idx) { 3366 tgt = lp->ini_map_idx - 1; 3367 FCPARAM(isp)->isp_ini_map[tgt] = 0; 3368 lp->ini_map_idx = 0; 3369 isp_prt(isp, ISP_LOGCONFIG, prom3, 3370 lp->portid, tgt, "change is bad"); 3371 isp_make_gone(isp, tgt); 3372 } else { 3373 isp_prt(isp, ISP_LOGCONFIG, prom, 3374 lp->portid, lp->handle, 3375 roles[lp->roles], 3376 "changed and departed", 3377 (uint32_t) (lp->node_wwn >> 32), 3378 (uint32_t) lp->node_wwn, 3379 (uint32_t) (lp->port_wwn >> 32), 3380 (uint32_t) lp->port_wwn); 3381 } 3382 } else { 3383 lp->portid = lp->new_portid; 3384 lp->roles = lp->new_roles; 3385 if (lp->ini_map_idx) { 3386 int t = lp->ini_map_idx - 1; 3387 FCPARAM(isp)->isp_ini_map[t] = 3388 (lp - FCPARAM(isp)->portdb) + 1; 3389 tgt = lp->ini_map_idx - 1; 3390 isp_prt(isp, ISP_LOGCONFIG, prom2, 3391 lp->portid, lp->handle, 3392 roles[lp->roles], "changed at", tgt, 3393 (uint32_t) (lp->node_wwn >> 32), 3394 (uint32_t) lp->node_wwn, 3395 (uint32_t) (lp->port_wwn >> 32), 3396 (uint32_t) lp->port_wwn); 3397 } else { 3398 isp_prt(isp, ISP_LOGCONFIG, prom, 3399 lp->portid, lp->handle, 3400 roles[lp->roles], "changed", 3401 (uint32_t) (lp->node_wwn >> 32), 3402 (uint32_t) lp->node_wwn, 3403 (uint32_t) (lp->port_wwn >> 32), 3404 (uint32_t) lp->port_wwn); 3405 } 3406 } 3407 break; 3408 case ISPASYNC_DEV_STAYED: 3409 lp = arg; 3410 if (lp->ini_map_idx) { 3411 tgt = lp->ini_map_idx - 1; 3412 isp_prt(isp, ISP_LOGCONFIG, prom2, 3413 lp->portid, lp->handle, 3414 roles[lp->roles], "stayed at", tgt, 3415 (uint32_t) (lp->node_wwn >> 32), 3416 (uint32_t) lp->node_wwn, 3417 (uint32_t) (lp->port_wwn >> 32), 3418 (uint32_t) lp->port_wwn); 3419 } else { 3420 isp_prt(isp, ISP_LOGCONFIG, prom, 3421 lp->portid, lp->handle, 3422 roles[lp->roles], "stayed", 3423 (uint32_t) (lp->node_wwn >> 32), 3424 (uint32_t) lp->node_wwn, 3425 (uint32_t) (lp->port_wwn >> 32), 3426 (uint32_t) lp->port_wwn); 3427 } 3428 break; 3429 case ISPASYNC_DEV_GONE: 3430 lp = arg; 3431 /* 3432 * If this has a virtual target and we haven't marked it 3433 * that we're going to have isp_gdt tell the OS it's gone, 3434 * set the isp_gdt timer running on it. 3435 * 3436 * If it isn't marked that isp_gdt is going to get rid of it, 3437 * announce that it's gone. 3438 */ 3439 if (lp->ini_map_idx && lp->reserved == 0) { 3440 lp->reserved = 1; 3441 lp->new_reserved = isp->isp_osinfo.gone_device_time; 3442 lp->state = FC_PORTDB_STATE_ZOMBIE; 3443 if (isp->isp_osinfo.gdt_running == 0) { 3444 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 3445 "starting Gone Device Timer"); 3446 isp->isp_osinfo.gdt = timeout(isp_gdt, isp, hz); 3447 isp->isp_osinfo.gdt_running = 1; 3448 } 3449 tgt = lp->ini_map_idx - 1; 3450 isp_prt(isp, ISP_LOGCONFIG, prom2, 3451 lp->portid, lp->handle, 3452 roles[lp->roles], "gone zombie at", tgt, 3453 (uint32_t) (lp->node_wwn >> 32), 3454 (uint32_t) lp->node_wwn, 3455 (uint32_t) (lp->port_wwn >> 32), 3456 (uint32_t) lp->port_wwn); 3457 } else if (lp->reserved == 0) { 3458 isp_prt(isp, ISP_LOGCONFIG, prom, 3459 lp->portid, lp->handle, 3460 roles[lp->roles], "departed", 3461 (uint32_t) (lp->node_wwn >> 32), 3462 (uint32_t) lp->node_wwn, 3463 (uint32_t) (lp->port_wwn >> 32), 3464 (uint32_t) lp->port_wwn); 3465 } 3466 break; 3467 case ISPASYNC_CHANGE_NOTIFY: 3468 { 3469 char *msg; 3470 if (arg == ISPASYNC_CHANGE_PDB) { 3471 msg = "Port Database Changed"; 3472 } else if (arg == ISPASYNC_CHANGE_SNS) { 3473 msg = "Name Server Database Changed"; 3474 } else { 3475 msg = "Other Change Notify"; 3476 } 3477 /* 3478 * If the loop down timer is running, cancel it. 3479 */ 3480 if (isp->isp_osinfo.ldt_running) { 3481 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 3482 "Stopping Loop Down Timer"); 3483 isp->isp_osinfo.ldt_running = 0; 3484 untimeout(isp_ldt, isp, isp->isp_osinfo.ldt); 3485 callout_handle_init(&isp->isp_osinfo.ldt); 3486 } 3487 isp_prt(isp, ISP_LOGINFO, msg); 3488 isp_freeze_loopdown(isp, msg); 3489 #if __FreeBSD_version < 500000 3490 wakeup(&isp->isp_osinfo.kproc); 3491 #else 3492 #ifdef ISP_SMPLOCK 3493 cv_signal(&isp->isp_osinfo.kthread_cv); 3494 #else 3495 wakeup(&isp->isp_osinfo.kthread_cv); 3496 #endif 3497 #endif 3498 break; 3499 } 3500 #ifdef ISP_TARGET_MODE 3501 case ISPASYNC_TARGET_NOTIFY: 3502 { 3503 tmd_notify_t *nt = arg; 3504 isp_prt(isp, ISP_LOGALL, 3505 "target notify code 0x%x", nt->nt_ncode); 3506 break; 3507 } 3508 case ISPASYNC_TARGET_ACTION: 3509 switch (((isphdr_t *)arg)->rqs_entry_type) { 3510 default: 3511 isp_prt(isp, ISP_LOGWARN, 3512 "event 0x%x for unhandled target action", 3513 ((isphdr_t *)arg)->rqs_entry_type); 3514 break; 3515 case RQSTYPE_NOTIFY: 3516 if (IS_SCSI(isp)) { 3517 rv = isp_handle_platform_notify_scsi(isp, 3518 (in_entry_t *) arg); 3519 } else { 3520 rv = isp_handle_platform_notify_fc(isp, 3521 (in_fcentry_t *) arg); 3522 } 3523 break; 3524 case RQSTYPE_ATIO: 3525 rv = isp_handle_platform_atio(isp, (at_entry_t *) arg); 3526 break; 3527 case RQSTYPE_ATIO2: 3528 rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg); 3529 break; 3530 case RQSTYPE_CTIO3: 3531 case RQSTYPE_CTIO2: 3532 case RQSTYPE_CTIO: 3533 rv = isp_handle_platform_ctio(isp, arg); 3534 break; 3535 case RQSTYPE_ENABLE_LUN: 3536 case RQSTYPE_MODIFY_LUN: 3537 isp_ledone(isp, (lun_entry_t *) arg); 3538 break; 3539 } 3540 break; 3541 #endif 3542 case ISPASYNC_FW_CRASH: 3543 { 3544 uint16_t mbox1, mbox6; 3545 mbox1 = ISP_READ(isp, OUTMAILBOX1); 3546 if (IS_DUALBUS(isp)) { 3547 mbox6 = ISP_READ(isp, OUTMAILBOX6); 3548 } else { 3549 mbox6 = 0; 3550 } 3551 isp_prt(isp, ISP_LOGERR, 3552 "Internal Firmware Error on bus %d @ RISC Address 0x%x", 3553 mbox6, mbox1); 3554 #ifdef ISP_FW_CRASH_DUMP 3555 /* 3556 * XXX: really need a thread to do this right. 3557 */ 3558 if (IS_FC(isp)) { 3559 FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT; 3560 FCPARAM(isp)->isp_loopstate = LOOP_NIL; 3561 isp_freeze_loopdown(isp, "f/w crash"); 3562 isp_fw_dump(isp); 3563 } 3564 isp_reinit(isp); 3565 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL); 3566 #endif 3567 break; 3568 } 3569 case ISPASYNC_UNHANDLED_RESPONSE: 3570 break; 3571 default: 3572 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd); 3573 break; 3574 } 3575 return (rv); 3576 } 3577 3578 3579 /* 3580 * Locks are held before coming here. 3581 */ 3582 void 3583 isp_uninit(ispsoftc_t *isp) 3584 { 3585 if (IS_24XX(isp)) { 3586 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_RESET); 3587 } else { 3588 ISP_WRITE(isp, HCCR, HCCR_CMD_RESET); 3589 } 3590 ISP_DISABLE_INTS(isp); 3591 } 3592 3593 void 3594 isp_prt(ispsoftc_t *isp, int level, const char *fmt, ...) 3595 { 3596 va_list ap; 3597 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { 3598 return; 3599 } 3600 printf("%s: ", device_get_nameunit(isp->isp_dev)); 3601 va_start(ap, fmt); 3602 vprintf(fmt, ap); 3603 va_end(ap); 3604 printf("\n"); 3605 } 3606 3607 uint64_t 3608 isp_nanotime_sub(struct timespec *b, struct timespec *a) 3609 { 3610 uint64_t elapsed; 3611 struct timespec x = *b; 3612 timespecsub(&x, a); 3613 elapsed = GET_NANOSEC(&x); 3614 if (elapsed == 0) 3615 elapsed++; 3616 return (elapsed); 3617 } 3618 3619 int 3620 isp_mbox_acquire(ispsoftc_t *isp) 3621 { 3622 if (isp->isp_osinfo.mboxbsy) { 3623 return (1); 3624 } else { 3625 isp->isp_osinfo.mboxcmd_done = 0; 3626 isp->isp_osinfo.mboxbsy = 1; 3627 return (0); 3628 } 3629 } 3630 3631 void 3632 isp_mbox_wait_complete(ispsoftc_t *isp, mbreg_t *mbp) 3633 { 3634 unsigned int usecs = mbp->timeout; 3635 unsigned int max, olim, ilim; 3636 3637 if (usecs == 0) { 3638 usecs = MBCMD_DEFAULT_TIMEOUT; 3639 } 3640 max = isp->isp_mbxwrk0 + 1; 3641 3642 if (isp->isp_osinfo.mbox_sleep_ok) { 3643 unsigned int ms = (usecs + 999) / 1000; 3644 3645 isp->isp_osinfo.mbox_sleep_ok = 0; 3646 isp->isp_osinfo.mbox_sleeping = 1; 3647 for (olim = 0; olim < max; olim++) { 3648 #if __FreeBSD_version < 500000 || !defined(ISP_SMPLOCK) 3649 tsleep(&isp->isp_mbxworkp, PRIBIO, "ispmbx_sleep", 3650 isp_mstohz(ms)); 3651 #else 3652 msleep(&isp->isp_mbxworkp, &isp->isp_mtx, PRIBIO, 3653 "ispmbx_sleep", isp_mstohz(ms)); 3654 #endif 3655 if (isp->isp_osinfo.mboxcmd_done) { 3656 break; 3657 } 3658 } 3659 isp->isp_osinfo.mbox_sleep_ok = 1; 3660 isp->isp_osinfo.mbox_sleeping = 0; 3661 } else { 3662 for (olim = 0; olim < max; olim++) { 3663 for (ilim = 0; ilim < usecs; ilim += 100) { 3664 uint32_t isr; 3665 uint16_t sema, mbox; 3666 if (isp->isp_osinfo.mboxcmd_done) { 3667 break; 3668 } 3669 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 3670 isp_intr(isp, isr, sema, mbox); 3671 if (isp->isp_osinfo.mboxcmd_done) { 3672 break; 3673 } 3674 } 3675 USEC_DELAY(100); 3676 } 3677 if (isp->isp_osinfo.mboxcmd_done) { 3678 break; 3679 } 3680 } 3681 } 3682 if (isp->isp_osinfo.mboxcmd_done == 0) { 3683 isp_prt(isp, ISP_LOGWARN, 3684 "%s Mailbox Command (0x%x) Timeout (%uus)", 3685 isp->isp_osinfo.mbox_sleep_ok? "Interrupting" : "Polled", 3686 isp->isp_lastmbxcmd, usecs); 3687 mbp->param[0] = MBOX_TIMEOUT; 3688 isp->isp_osinfo.mboxcmd_done = 1; 3689 } 3690 } 3691 3692 void 3693 isp_mbox_notify_done(ispsoftc_t *isp) 3694 { 3695 if (isp->isp_osinfo.mbox_sleeping) { 3696 wakeup(&isp->isp_mbxworkp); 3697 } 3698 isp->isp_osinfo.mboxcmd_done = 1; 3699 } 3700 3701 void 3702 isp_mbox_release(ispsoftc_t *isp) 3703 { 3704 isp->isp_osinfo.mboxbsy = 0; 3705 } 3706 3707 int 3708 isp_mstohz(int ms) 3709 { 3710 int hz; 3711 struct timeval t; 3712 t.tv_sec = ms / 1000; 3713 t.tv_usec = (ms % 1000) * 1000; 3714 hz = tvtohz(&t); 3715 if (hz < 0) { 3716 hz = 0x7fffffff; 3717 } 3718 if (hz == 0) { 3719 hz = 1; 3720 } 3721 return (hz); 3722 } 3723