1 /*- 2 * 3 * Copyright (c) 1997-2006 by Matthew Jacob 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice immediately at the beginning of the file, without modification, 11 * this list of conditions, and the following disclaimer. 12 * 2. The name of the author may not be used to endorse or promote products 13 * derived from this software without specific prior written permission. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 /* 29 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters. 30 */ 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 #include <dev/isp/isp_freebsd.h> 34 #include <sys/unistd.h> 35 #include <sys/kthread.h> 36 #include <machine/stdarg.h> /* for use by isp_prt below */ 37 #include <sys/conf.h> 38 #include <sys/module.h> 39 #include <sys/ioccom.h> 40 #include <dev/isp/isp_ioctl.h> 41 #if __FreeBSD_version >= 500000 42 #include <sys/sysctl.h> 43 #endif 44 #include <cam/cam_periph.h> 45 #include <cam/cam_xpt_periph.h> 46 47 #if !defined(CAM_NEW_TRAN_CODE) && __FreeBSD_version >= 700025 48 #define CAM_NEW_TRAN_CODE 1 49 #endif 50 51 52 MODULE_VERSION(isp, 1); 53 MODULE_DEPEND(isp, cam, 1, 1, 1); 54 int isp_announced = 0; 55 int isp_fabric_hysteresis = 5; 56 int isp_loop_down_limit = 300; /* default loop down limit */ 57 int isp_change_is_bad = 0; /* "changed" devices are bad */ 58 int isp_quickboot_time = 15; /* don't wait more than N secs for loop up */ 59 int isp_gone_device_time = 30; /* grace time before reporting device lost */ 60 static const char *roles[4] = { 61 "(none)", "Target", "Initiator", "Target/Initiator" 62 }; 63 static const char prom3[] = 64 "PortID 0x%06x Departed from Target %u because of %s"; 65 66 static void isp_freeze_loopdown(ispsoftc_t *, char *); 67 static d_ioctl_t ispioctl; 68 static void isp_intr_enable(void *); 69 static void isp_cam_async(void *, uint32_t, struct cam_path *, void *); 70 static void isp_poll(struct cam_sim *); 71 static timeout_t isp_watchdog; 72 static timeout_t isp_ldt; 73 static void isp_kthread(void *); 74 static void isp_action(struct cam_sim *, union ccb *); 75 76 #if __FreeBSD_version < 700000 77 ispfwfunc *isp_get_firmware_p = NULL; 78 #endif 79 80 #if __FreeBSD_version < 500000 81 #define ISP_CDEV_MAJOR 248 82 static struct cdevsw isp_cdevsw = { 83 /* open */ nullopen, 84 /* close */ nullclose, 85 /* read */ noread, 86 /* write */ nowrite, 87 /* ioctl */ ispioctl, 88 /* poll */ nopoll, 89 /* mmap */ nommap, 90 /* strategy */ nostrategy, 91 /* name */ "isp", 92 /* maj */ ISP_CDEV_MAJOR, 93 /* dump */ nodump, 94 /* psize */ nopsize, 95 /* flags */ D_TAPE, 96 }; 97 #define isp_sysctl_update(x) do { ; } while (0) 98 #else 99 static struct cdevsw isp_cdevsw = { 100 .d_version = D_VERSION, 101 .d_flags = D_NEEDGIANT, 102 .d_ioctl = ispioctl, 103 .d_name = "isp", 104 }; 105 static void isp_sysctl_update(ispsoftc_t *); 106 #endif 107 108 static ispsoftc_t *isplist = NULL; 109 110 void 111 isp_attach(ispsoftc_t *isp) 112 { 113 int primary, secondary; 114 struct ccb_setasync csa; 115 struct cam_devq *devq; 116 struct cam_sim *sim; 117 struct cam_path *path; 118 119 /* 120 * Establish (in case of 12X0) which bus is the primary. 121 */ 122 123 primary = 0; 124 secondary = 1; 125 126 /* 127 * Create the device queue for our SIM(s). 128 */ 129 devq = cam_simq_alloc(isp->isp_maxcmds); 130 if (devq == NULL) { 131 return; 132 } 133 134 /* 135 * Construct our SIM entry. 136 */ 137 ISPLOCK_2_CAMLOCK(isp); 138 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 139 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); 140 if (sim == NULL) { 141 cam_simq_free(devq); 142 CAMLOCK_2_ISPLOCK(isp); 143 return; 144 } 145 CAMLOCK_2_ISPLOCK(isp); 146 147 isp->isp_osinfo.ehook.ich_func = isp_intr_enable; 148 isp->isp_osinfo.ehook.ich_arg = isp; 149 ISPLOCK_2_CAMLOCK(isp); 150 if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) { 151 cam_sim_free(sim, TRUE); 152 CAMLOCK_2_ISPLOCK(isp); 153 isp_prt(isp, ISP_LOGERR, 154 "could not establish interrupt enable hook"); 155 return; 156 } 157 158 if (xpt_bus_register(sim, primary) != CAM_SUCCESS) { 159 cam_sim_free(sim, TRUE); 160 CAMLOCK_2_ISPLOCK(isp); 161 return; 162 } 163 164 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 165 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 166 xpt_bus_deregister(cam_sim_path(sim)); 167 cam_sim_free(sim, TRUE); 168 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 169 CAMLOCK_2_ISPLOCK(isp); 170 return; 171 } 172 173 xpt_setup_ccb(&csa.ccb_h, path, 5); 174 csa.ccb_h.func_code = XPT_SASYNC_CB; 175 csa.event_enable = AC_LOST_DEVICE; 176 csa.callback = isp_cam_async; 177 csa.callback_arg = sim; 178 xpt_action((union ccb *)&csa); 179 CAMLOCK_2_ISPLOCK(isp); 180 isp->isp_sim = sim; 181 isp->isp_path = path; 182 /* 183 * Create a kernel thread for fibre channel instances. We 184 * don't have dual channel FC cards. 185 */ 186 if (IS_FC(isp)) { 187 ISPLOCK_2_CAMLOCK(isp); 188 #if __FreeBSD_version >= 500000 189 cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv"); 190 if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc, 191 RFHIGHPID, 0, "%s: fc_thrd", 192 device_get_nameunit(isp->isp_dev))) 193 #else 194 if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc, 195 "%s: fc_thrd", device_get_nameunit(isp->isp_dev))) 196 #endif 197 { 198 xpt_bus_deregister(cam_sim_path(sim)); 199 cam_sim_free(sim, TRUE); 200 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 201 CAMLOCK_2_ISPLOCK(isp); 202 isp_prt(isp, ISP_LOGERR, "could not create kthread"); 203 return; 204 } 205 CAMLOCK_2_ISPLOCK(isp); 206 /* 207 * We start by being "loop down" if we have an initiator role 208 */ 209 if (isp->isp_role & ISP_ROLE_INITIATOR) { 210 isp_freeze_loopdown(isp, "isp_attach"); 211 isp->isp_osinfo.ldt = 212 timeout(isp_ldt, isp, isp_quickboot_time * hz); 213 isp->isp_osinfo.ldt_running = 1; 214 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 215 "Starting Initial Loop Down Timer"); 216 } 217 } 218 219 220 /* 221 * If we have a second channel, construct SIM entry for that. 222 */ 223 if (IS_DUALBUS(isp)) { 224 ISPLOCK_2_CAMLOCK(isp); 225 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 226 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); 227 if (sim == NULL) { 228 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 229 xpt_free_path(isp->isp_path); 230 cam_simq_free(devq); 231 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 232 return; 233 } 234 if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) { 235 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 236 xpt_free_path(isp->isp_path); 237 cam_sim_free(sim, TRUE); 238 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 239 CAMLOCK_2_ISPLOCK(isp); 240 return; 241 } 242 243 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 244 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 245 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 246 xpt_free_path(isp->isp_path); 247 xpt_bus_deregister(cam_sim_path(sim)); 248 cam_sim_free(sim, TRUE); 249 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 250 CAMLOCK_2_ISPLOCK(isp); 251 return; 252 } 253 254 xpt_setup_ccb(&csa.ccb_h, path, 5); 255 csa.ccb_h.func_code = XPT_SASYNC_CB; 256 csa.event_enable = AC_LOST_DEVICE; 257 csa.callback = isp_cam_async; 258 csa.callback_arg = sim; 259 xpt_action((union ccb *)&csa); 260 CAMLOCK_2_ISPLOCK(isp); 261 isp->isp_sim2 = sim; 262 isp->isp_path2 = path; 263 } 264 265 /* 266 * Create device nodes 267 */ 268 (void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT, 269 GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev)); 270 271 if (isp->isp_role != ISP_ROLE_NONE) { 272 isp->isp_state = ISP_RUNSTATE; 273 ISP_ENABLE_INTS(isp); 274 } 275 if (isplist == NULL) { 276 isplist = isp; 277 } else { 278 ispsoftc_t *tmp = isplist; 279 while (tmp->isp_osinfo.next) { 280 tmp = tmp->isp_osinfo.next; 281 } 282 tmp->isp_osinfo.next = isp; 283 } 284 isp_sysctl_update(isp); 285 } 286 287 static void 288 isp_freeze_loopdown(ispsoftc_t *isp, char *msg) 289 { 290 if (isp->isp_osinfo.simqfrozen == 0) { 291 isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg); 292 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 293 ISPLOCK_2_CAMLOCK(isp); 294 xpt_freeze_simq(isp->isp_sim, 1); 295 CAMLOCK_2_ISPLOCK(isp); 296 } else { 297 isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg); 298 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 299 } 300 } 301 302 303 #if __FreeBSD_version < 500000 304 #define _DEV dev_t 305 #define _IOP struct proc 306 #else 307 #define _IOP struct thread 308 #define _DEV struct cdev * 309 #endif 310 311 static int 312 ispioctl(_DEV dev, u_long c, caddr_t addr, int flags, _IOP *td) 313 { 314 ispsoftc_t *isp; 315 int nr, retval = ENOTTY; 316 317 isp = isplist; 318 while (isp) { 319 if (minor(dev) == device_get_unit(isp->isp_dev)) { 320 break; 321 } 322 isp = isp->isp_osinfo.next; 323 } 324 if (isp == NULL) 325 return (ENXIO); 326 327 switch (c) { 328 #ifdef ISP_FW_CRASH_DUMP 329 case ISP_GET_FW_CRASH_DUMP: 330 if (IS_FC(isp)) { 331 uint16_t *ptr = FCPARAM(isp)->isp_dump_data; 332 size_t sz; 333 334 retval = 0; 335 if (IS_2200(isp)) { 336 sz = QLA2200_RISC_IMAGE_DUMP_SIZE; 337 } else { 338 sz = QLA2300_RISC_IMAGE_DUMP_SIZE; 339 } 340 ISP_LOCK(isp); 341 if (ptr && *ptr) { 342 void *uaddr = *((void **) addr); 343 if (copyout(ptr, uaddr, sz)) { 344 retval = EFAULT; 345 } else { 346 *ptr = 0; 347 } 348 } else { 349 retval = ENXIO; 350 } 351 ISP_UNLOCK(isp); 352 } 353 break; 354 case ISP_FORCE_CRASH_DUMP: 355 if (IS_FC(isp)) { 356 ISP_LOCK(isp); 357 isp_freeze_loopdown(isp, 358 "ispioctl(ISP_FORCE_CRASH_DUMP)"); 359 isp_fw_dump(isp); 360 isp_reinit(isp); 361 ISP_UNLOCK(isp); 362 retval = 0; 363 } 364 break; 365 #endif 366 case ISP_SDBLEV: 367 { 368 int olddblev = isp->isp_dblev; 369 isp->isp_dblev = *(int *)addr; 370 *(int *)addr = olddblev; 371 retval = 0; 372 break; 373 } 374 case ISP_GETROLE: 375 *(int *)addr = isp->isp_role; 376 retval = 0; 377 break; 378 case ISP_SETROLE: 379 nr = *(int *)addr; 380 if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) { 381 retval = EINVAL; 382 break; 383 } 384 *(int *)addr = isp->isp_role; 385 isp->isp_role = nr; 386 /* FALLTHROUGH */ 387 case ISP_RESETHBA: 388 ISP_LOCK(isp); 389 isp_reinit(isp); 390 ISP_UNLOCK(isp); 391 retval = 0; 392 break; 393 case ISP_RESCAN: 394 if (IS_FC(isp)) { 395 ISP_LOCK(isp); 396 if (isp_fc_runstate(isp, 5 * 1000000)) { 397 retval = EIO; 398 } else { 399 retval = 0; 400 } 401 ISP_UNLOCK(isp); 402 } 403 break; 404 case ISP_FC_LIP: 405 if (IS_FC(isp)) { 406 ISP_LOCK(isp); 407 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) { 408 retval = EIO; 409 } else { 410 retval = 0; 411 } 412 ISP_UNLOCK(isp); 413 } 414 break; 415 case ISP_FC_GETDINFO: 416 { 417 struct isp_fc_device *ifc = (struct isp_fc_device *) addr; 418 fcportdb_t *lp; 419 420 if (IS_SCSI(isp)) { 421 break; 422 } 423 if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) { 424 retval = EINVAL; 425 break; 426 } 427 ISP_LOCK(isp); 428 lp = &FCPARAM(isp)->portdb[ifc->loopid]; 429 if (lp->state == FC_PORTDB_STATE_VALID) { 430 ifc->role = lp->roles; 431 ifc->loopid = lp->handle; 432 ifc->portid = lp->portid; 433 ifc->node_wwn = lp->node_wwn; 434 ifc->port_wwn = lp->port_wwn; 435 retval = 0; 436 } else { 437 retval = ENODEV; 438 } 439 ISP_UNLOCK(isp); 440 break; 441 } 442 case ISP_GET_STATS: 443 { 444 isp_stats_t *sp = (isp_stats_t *) addr; 445 446 MEMZERO(sp, sizeof (*sp)); 447 sp->isp_stat_version = ISP_STATS_VERSION; 448 sp->isp_type = isp->isp_type; 449 sp->isp_revision = isp->isp_revision; 450 ISP_LOCK(isp); 451 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt; 452 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus; 453 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc; 454 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync; 455 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt; 456 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt; 457 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater; 458 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater; 459 ISP_UNLOCK(isp); 460 retval = 0; 461 break; 462 } 463 case ISP_CLR_STATS: 464 ISP_LOCK(isp); 465 isp->isp_intcnt = 0; 466 isp->isp_intbogus = 0; 467 isp->isp_intmboxc = 0; 468 isp->isp_intoasync = 0; 469 isp->isp_rsltccmplt = 0; 470 isp->isp_fphccmplt = 0; 471 isp->isp_rscchiwater = 0; 472 isp->isp_fpcchiwater = 0; 473 ISP_UNLOCK(isp); 474 retval = 0; 475 break; 476 case ISP_FC_GETHINFO: 477 { 478 struct isp_hba_device *hba = (struct isp_hba_device *) addr; 479 MEMZERO(hba, sizeof (*hba)); 480 481 hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev); 482 hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev); 483 hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev); 484 if (IS_FC(isp)) { 485 hba->fc_speed = FCPARAM(isp)->isp_gbspeed; 486 hba->fc_scsi_supported = 1; 487 hba->fc_topology = FCPARAM(isp)->isp_topo + 1; 488 hba->fc_loopid = FCPARAM(isp)->isp_loopid; 489 hba->nvram_node_wwn = FCPARAM(isp)->isp_wwnn_nvram; 490 hba->nvram_port_wwn = FCPARAM(isp)->isp_wwpn_nvram; 491 hba->active_node_wwn = ISP_NODEWWN(isp); 492 hba->active_port_wwn = ISP_PORTWWN(isp); 493 } 494 retval = 0; 495 break; 496 } 497 case ISP_GET_FC_PARAM: 498 { 499 struct isp_fc_param *f = (struct isp_fc_param *) addr; 500 501 if (IS_SCSI(isp)) { 502 break; 503 } 504 f->parameter = 0; 505 if (strcmp(f->param_name, "framelength") == 0) { 506 f->parameter = FCPARAM(isp)->isp_maxfrmlen; 507 retval = 0; 508 break; 509 } 510 if (strcmp(f->param_name, "exec_throttle") == 0) { 511 f->parameter = FCPARAM(isp)->isp_execthrottle; 512 retval = 0; 513 break; 514 } 515 if (strcmp(f->param_name, "fullduplex") == 0) { 516 if (FCPARAM(isp)->isp_fwoptions & ICBOPT_FULL_DUPLEX) 517 f->parameter = 1; 518 retval = 0; 519 break; 520 } 521 if (strcmp(f->param_name, "loopid") == 0) { 522 f->parameter = FCPARAM(isp)->isp_loopid; 523 retval = 0; 524 break; 525 } 526 retval = EINVAL; 527 break; 528 } 529 case ISP_SET_FC_PARAM: 530 { 531 struct isp_fc_param *f = (struct isp_fc_param *) addr; 532 uint32_t param = f->parameter; 533 534 if (IS_SCSI(isp)) { 535 break; 536 } 537 f->parameter = 0; 538 if (strcmp(f->param_name, "framelength") == 0) { 539 if (param != 512 && param != 1024 && param != 1024) { 540 retval = EINVAL; 541 break; 542 } 543 FCPARAM(isp)->isp_maxfrmlen = param; 544 retval = 0; 545 break; 546 } 547 if (strcmp(f->param_name, "exec_throttle") == 0) { 548 if (param < 16 || param > 255) { 549 retval = EINVAL; 550 break; 551 } 552 FCPARAM(isp)->isp_execthrottle = param; 553 retval = 0; 554 break; 555 } 556 if (strcmp(f->param_name, "fullduplex") == 0) { 557 if (param != 0 && param != 1) { 558 retval = EINVAL; 559 break; 560 } 561 if (param) { 562 FCPARAM(isp)->isp_fwoptions |= 563 ICBOPT_FULL_DUPLEX; 564 } else { 565 FCPARAM(isp)->isp_fwoptions &= 566 ~ICBOPT_FULL_DUPLEX; 567 } 568 retval = 0; 569 break; 570 } 571 if (strcmp(f->param_name, "loopid") == 0) { 572 if (param < 0 || param > 125) { 573 retval = EINVAL; 574 break; 575 } 576 FCPARAM(isp)->isp_loopid = param; 577 retval = 0; 578 break; 579 } 580 retval = EINVAL; 581 break; 582 } 583 case ISP_TSK_MGMT: 584 { 585 int needmarker; 586 struct isp_fc_tsk_mgmt *fct = (struct isp_fc_tsk_mgmt *) addr; 587 uint16_t loopid; 588 mbreg_t mbs; 589 590 if (IS_SCSI(isp)) { 591 break; 592 } 593 594 memset(&mbs, 0, sizeof (mbs)); 595 needmarker = retval = 0; 596 loopid = fct->loopid; 597 if (FCPARAM(isp)->isp_2klogin == 0) { 598 loopid <<= 8; 599 } 600 switch (fct->action) { 601 case IPT_CLEAR_ACA: 602 mbs.param[0] = MBOX_CLEAR_ACA; 603 mbs.param[1] = loopid; 604 mbs.param[2] = fct->lun; 605 break; 606 case IPT_TARGET_RESET: 607 mbs.param[0] = MBOX_TARGET_RESET; 608 mbs.param[1] = loopid; 609 needmarker = 1; 610 break; 611 case IPT_LUN_RESET: 612 mbs.param[0] = MBOX_LUN_RESET; 613 mbs.param[1] = loopid; 614 mbs.param[2] = fct->lun; 615 needmarker = 1; 616 break; 617 case IPT_CLEAR_TASK_SET: 618 mbs.param[0] = MBOX_CLEAR_TASK_SET; 619 mbs.param[1] = loopid; 620 mbs.param[2] = fct->lun; 621 needmarker = 1; 622 break; 623 case IPT_ABORT_TASK_SET: 624 mbs.param[0] = MBOX_ABORT_TASK_SET; 625 mbs.param[1] = loopid; 626 mbs.param[2] = fct->lun; 627 needmarker = 1; 628 break; 629 default: 630 retval = EINVAL; 631 break; 632 } 633 if (retval == 0) { 634 ISP_LOCK(isp); 635 if (needmarker) { 636 isp->isp_sendmarker |= 1; 637 } 638 retval = isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs); 639 ISP_UNLOCK(isp); 640 if (retval) 641 retval = EIO; 642 } 643 break; 644 } 645 default: 646 break; 647 } 648 return (retval); 649 } 650 651 #if __FreeBSD_version >= 500000 652 static void 653 isp_sysctl_update(ispsoftc_t *isp) 654 { 655 struct sysctl_ctx_list *ctx = 656 device_get_sysctl_ctx(isp->isp_osinfo.dev); 657 struct sysctl_oid *tree = device_get_sysctl_tree(isp->isp_osinfo.dev); 658 659 if (IS_SCSI(isp)) { 660 return; 661 } 662 663 snprintf(isp->isp_osinfo.sysctl_info.fc.wwnn, 664 sizeof (isp->isp_osinfo.sysctl_info.fc.wwnn), "0x%08x%08x", 665 (uint32_t) (ISP_NODEWWN(isp) >> 32), (uint32_t) ISP_NODEWWN(isp)); 666 667 snprintf(isp->isp_osinfo.sysctl_info.fc.wwpn, 668 sizeof (isp->isp_osinfo.sysctl_info.fc.wwpn), "0x%08x%08x", 669 (uint32_t) (ISP_PORTWWN(isp) >> 32), (uint32_t) ISP_PORTWWN(isp)); 670 671 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 672 "wwnn", CTLFLAG_RD, isp->isp_osinfo.sysctl_info.fc.wwnn, 0, 673 "World Wide Node Name"); 674 675 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 676 "wwpn", CTLFLAG_RD, isp->isp_osinfo.sysctl_info.fc.wwpn, 0, 677 "World Wide Port Name"); 678 679 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 680 "loop_down_limit", 681 CTLFLAG_RW, &isp->isp_osinfo.loop_down_limit, 0, 682 "How long to wait for loop to come back up"); 683 684 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 685 "gone_device_time", 686 CTLFLAG_RW, &isp->isp_osinfo.gone_device_time, 0, 687 "How long to wait for a device to reappear"); 688 } 689 #endif 690 691 static void 692 isp_intr_enable(void *arg) 693 { 694 ispsoftc_t *isp = arg; 695 if (isp->isp_role != ISP_ROLE_NONE) { 696 ISP_ENABLE_INTS(isp); 697 } 698 /* Release our hook so that the boot can continue. */ 699 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 700 } 701 702 /* 703 * Put the target mode functions here, because some are inlines 704 */ 705 706 #ifdef ISP_TARGET_MODE 707 708 static __inline int is_lun_enabled(ispsoftc_t *, int, lun_id_t); 709 static __inline int are_any_luns_enabled(ispsoftc_t *, int); 710 static __inline tstate_t *get_lun_statep(ispsoftc_t *, int, lun_id_t); 711 static __inline void rls_lun_statep(ispsoftc_t *, tstate_t *); 712 static __inline atio_private_data_t *isp_get_atpd(ispsoftc_t *, int); 713 static cam_status 714 create_lun_state(ispsoftc_t *, int, struct cam_path *, tstate_t **); 715 static void destroy_lun_state(ispsoftc_t *, tstate_t *); 716 static int isp_en_lun(ispsoftc_t *, union ccb *); 717 static void isp_ledone(ispsoftc_t *, lun_entry_t *); 718 static cam_status isp_abort_tgt_ccb(ispsoftc_t *, union ccb *); 719 static timeout_t isp_refire_putback_atio; 720 static void isp_complete_ctio(union ccb *); 721 static void isp_target_putback_atio(union ccb *); 722 static void isp_target_start_ctio(ispsoftc_t *, union ccb *); 723 static int isp_handle_platform_atio(ispsoftc_t *, at_entry_t *); 724 static int isp_handle_platform_atio2(ispsoftc_t *, at2_entry_t *); 725 static int isp_handle_platform_ctio(ispsoftc_t *, void *); 726 static int isp_handle_platform_notify_scsi(ispsoftc_t *, in_entry_t *); 727 static int isp_handle_platform_notify_fc(ispsoftc_t *, in_fcentry_t *); 728 729 static __inline int 730 is_lun_enabled(ispsoftc_t *isp, int bus, lun_id_t lun) 731 { 732 tstate_t *tptr; 733 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 734 if (tptr == NULL) { 735 return (0); 736 } 737 do { 738 if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) { 739 return (1); 740 } 741 } while ((tptr = tptr->next) != NULL); 742 return (0); 743 } 744 745 static __inline int 746 are_any_luns_enabled(ispsoftc_t *isp, int port) 747 { 748 int lo, hi; 749 if (IS_DUALBUS(isp)) { 750 lo = (port * (LUN_HASH_SIZE >> 1)); 751 hi = lo + (LUN_HASH_SIZE >> 1); 752 } else { 753 lo = 0; 754 hi = LUN_HASH_SIZE; 755 } 756 for (lo = 0; lo < hi; lo++) { 757 if (isp->isp_osinfo.lun_hash[lo]) { 758 return (1); 759 } 760 } 761 return (0); 762 } 763 764 static __inline tstate_t * 765 get_lun_statep(ispsoftc_t *isp, int bus, lun_id_t lun) 766 { 767 tstate_t *tptr = NULL; 768 769 if (lun == CAM_LUN_WILDCARD) { 770 if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) { 771 tptr = &isp->isp_osinfo.tsdflt[bus]; 772 tptr->hold++; 773 return (tptr); 774 } 775 return (NULL); 776 } else { 777 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 778 if (tptr == NULL) { 779 return (NULL); 780 } 781 } 782 783 do { 784 if (tptr->lun == lun && tptr->bus == bus) { 785 tptr->hold++; 786 return (tptr); 787 } 788 } while ((tptr = tptr->next) != NULL); 789 return (tptr); 790 } 791 792 static __inline void 793 rls_lun_statep(ispsoftc_t *isp, tstate_t *tptr) 794 { 795 if (tptr->hold) 796 tptr->hold--; 797 } 798 799 static __inline atio_private_data_t * 800 isp_get_atpd(ispsoftc_t *isp, int tag) 801 { 802 atio_private_data_t *atp; 803 for (atp = isp->isp_osinfo.atpdp; 804 atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) { 805 if (atp->tag == tag) 806 return (atp); 807 } 808 return (NULL); 809 } 810 811 static cam_status 812 create_lun_state(ispsoftc_t *isp, int bus, 813 struct cam_path *path, tstate_t **rslt) 814 { 815 cam_status status; 816 lun_id_t lun; 817 int hfx; 818 tstate_t *tptr, *new; 819 820 lun = xpt_path_lun_id(path); 821 if (lun < 0) { 822 return (CAM_LUN_INVALID); 823 } 824 if (is_lun_enabled(isp, bus, lun)) { 825 return (CAM_LUN_ALRDY_ENA); 826 } 827 new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO); 828 if (new == NULL) { 829 return (CAM_RESRC_UNAVAIL); 830 } 831 832 status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path), 833 xpt_path_target_id(path), xpt_path_lun_id(path)); 834 if (status != CAM_REQ_CMP) { 835 free(new, M_DEVBUF); 836 return (status); 837 } 838 new->bus = bus; 839 new->lun = lun; 840 SLIST_INIT(&new->atios); 841 SLIST_INIT(&new->inots); 842 new->hold = 1; 843 844 hfx = LUN_HASH_FUNC(isp, new->bus, new->lun); 845 tptr = isp->isp_osinfo.lun_hash[hfx]; 846 if (tptr == NULL) { 847 isp->isp_osinfo.lun_hash[hfx] = new; 848 } else { 849 while (tptr->next) 850 tptr = tptr->next; 851 tptr->next = new; 852 } 853 *rslt = new; 854 return (CAM_REQ_CMP); 855 } 856 857 static __inline void 858 destroy_lun_state(ispsoftc_t *isp, tstate_t *tptr) 859 { 860 int hfx; 861 tstate_t *lw, *pw; 862 863 if (tptr->hold) { 864 return; 865 } 866 hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun); 867 pw = isp->isp_osinfo.lun_hash[hfx]; 868 if (pw == NULL) { 869 return; 870 } else if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 871 isp->isp_osinfo.lun_hash[hfx] = pw->next; 872 } else { 873 lw = pw; 874 pw = lw->next; 875 while (pw) { 876 if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 877 lw->next = pw->next; 878 break; 879 } 880 lw = pw; 881 pw = pw->next; 882 } 883 if (pw == NULL) { 884 return; 885 } 886 } 887 free(tptr, M_DEVBUF); 888 } 889 890 /* 891 * Enable luns. 892 */ 893 static int 894 isp_en_lun(ispsoftc_t *isp, union ccb *ccb) 895 { 896 struct ccb_en_lun *cel = &ccb->cel; 897 tstate_t *tptr; 898 uint32_t seq; 899 int bus, cmd, av, wildcard, tm_on; 900 lun_id_t lun; 901 target_id_t tgt; 902 903 bus = XS_CHANNEL(ccb); 904 if (bus > 1) { 905 xpt_print(ccb->ccb_h.path, "illegal bus %d\n", bus); 906 ccb->ccb_h.status = CAM_PATH_INVALID; 907 return (-1); 908 } 909 tgt = ccb->ccb_h.target_id; 910 lun = ccb->ccb_h.target_lun; 911 912 if (isp->isp_dblev & ISP_LOGTDEBUG0) { 913 xpt_print(ccb->ccb_h.path, "%sabling lun 0x%x on channel %d\n", 914 cel->enable? "en" : "dis", lun, bus); 915 } 916 917 if ((lun != CAM_LUN_WILDCARD) && 918 (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) { 919 ccb->ccb_h.status = CAM_LUN_INVALID; 920 return (-1); 921 } 922 923 if (IS_SCSI(isp)) { 924 sdparam *sdp = isp->isp_param; 925 sdp += bus; 926 if (tgt != CAM_TARGET_WILDCARD && 927 tgt != sdp->isp_initiator_id) { 928 ccb->ccb_h.status = CAM_TID_INVALID; 929 return (-1); 930 } 931 } else { 932 /* 933 * There's really no point in doing this yet w/o multi-tid 934 * capability. Even then, it's problematic. 935 */ 936 #if 0 937 if (tgt != CAM_TARGET_WILDCARD && 938 tgt != FCPARAM(isp)->isp_iid) { 939 ccb->ccb_h.status = CAM_TID_INVALID; 940 return (-1); 941 } 942 #endif 943 /* 944 * This is as a good a place as any to check f/w capabilities. 945 */ 946 if (FCPARAM(isp)->isp_tmode == 0) { 947 xpt_print(ccb->ccb_h.path, 948 "firmware does not support target mode\n"); 949 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 950 return (-1); 951 } 952 /* 953 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to 954 * XXX: dork with our already fragile enable/disable code. 955 */ 956 if (FCPARAM(isp)->isp_sccfw == 0) { 957 xpt_print(ccb->ccb_h.path, 958 "firmware not SCCLUN capable\n"); 959 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 960 return (-1); 961 } 962 } 963 964 if (tgt == CAM_TARGET_WILDCARD) { 965 if (lun == CAM_LUN_WILDCARD) { 966 wildcard = 1; 967 } else { 968 ccb->ccb_h.status = CAM_LUN_INVALID; 969 return (-1); 970 } 971 } else { 972 wildcard = 0; 973 } 974 975 tm_on = (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) != 0; 976 977 /* 978 * Next check to see whether this is a target/lun wildcard action. 979 * 980 * If so, we know that we can accept commands for luns that haven't 981 * been enabled yet and send them upstream. Otherwise, we have to 982 * handle them locally (if we see them at all). 983 */ 984 985 if (wildcard) { 986 tptr = &isp->isp_osinfo.tsdflt[bus]; 987 if (cel->enable) { 988 if (tm_on) { 989 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 990 return (-1); 991 } 992 ccb->ccb_h.status = 993 xpt_create_path(&tptr->owner, NULL, 994 xpt_path_path_id(ccb->ccb_h.path), 995 xpt_path_target_id(ccb->ccb_h.path), 996 xpt_path_lun_id(ccb->ccb_h.path)); 997 if (ccb->ccb_h.status != CAM_REQ_CMP) { 998 return (-1); 999 } 1000 SLIST_INIT(&tptr->atios); 1001 SLIST_INIT(&tptr->inots); 1002 isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED; 1003 } else { 1004 if (tm_on == 0) { 1005 ccb->ccb_h.status = CAM_REQ_CMP; 1006 return (-1); 1007 } 1008 if (tptr->hold) { 1009 ccb->ccb_h.status = CAM_SCSI_BUSY; 1010 return (-1); 1011 } 1012 xpt_free_path(tptr->owner); 1013 isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED; 1014 } 1015 } 1016 1017 /* 1018 * Now check to see whether this bus needs to be 1019 * enabled/disabled with respect to target mode. 1020 */ 1021 av = bus << 31; 1022 if (cel->enable && tm_on == 0) { 1023 av |= ENABLE_TARGET_FLAG; 1024 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 1025 if (av) { 1026 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 1027 if (wildcard) { 1028 isp->isp_osinfo.tmflags[bus] &= 1029 ~TM_WILDCARD_ENABLED; 1030 xpt_free_path(tptr->owner); 1031 } 1032 return (-1); 1033 } 1034 isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED; 1035 xpt_print(ccb->ccb_h.path, "Target Mode Enabled\n"); 1036 } else if (cel->enable == 0 && tm_on && wildcard) { 1037 if (are_any_luns_enabled(isp, bus)) { 1038 ccb->ccb_h.status = CAM_SCSI_BUSY; 1039 return (-1); 1040 } 1041 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 1042 if (av) { 1043 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 1044 return (-1); 1045 } 1046 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; 1047 xpt_print(ccb->ccb_h.path, "Target Mode Disabled\n"); 1048 } 1049 1050 if (wildcard) { 1051 ccb->ccb_h.status = CAM_REQ_CMP; 1052 return (-1); 1053 } 1054 1055 /* 1056 * Find an empty slot 1057 */ 1058 for (seq = 0; seq < NLEACT; seq++) { 1059 if (isp->isp_osinfo.leact[seq] == 0) { 1060 break; 1061 } 1062 } 1063 if (seq >= NLEACT) { 1064 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1065 return (-1); 1066 1067 } 1068 isp->isp_osinfo.leact[seq] = ccb; 1069 1070 if (cel->enable) { 1071 ccb->ccb_h.status = 1072 create_lun_state(isp, bus, ccb->ccb_h.path, &tptr); 1073 if (ccb->ccb_h.status != CAM_REQ_CMP) { 1074 isp->isp_osinfo.leact[seq] = 0; 1075 return (-1); 1076 } 1077 } else { 1078 tptr = get_lun_statep(isp, bus, lun); 1079 if (tptr == NULL) { 1080 ccb->ccb_h.status = CAM_LUN_INVALID; 1081 return (-1); 1082 } 1083 } 1084 1085 if (cel->enable) { 1086 int c, n, ulun = lun; 1087 1088 cmd = RQSTYPE_ENABLE_LUN; 1089 c = DFLT_CMND_CNT; 1090 n = DFLT_INOT_CNT; 1091 if (IS_FC(isp) && lun != 0) { 1092 cmd = RQSTYPE_MODIFY_LUN; 1093 n = 0; 1094 /* 1095 * For SCC firmware, we only deal with setting 1096 * (enabling or modifying) lun 0. 1097 */ 1098 ulun = 0; 1099 } 1100 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) { 1101 rls_lun_statep(isp, tptr); 1102 ccb->ccb_h.status = CAM_REQ_INPROG; 1103 return (seq); 1104 } 1105 } else { 1106 int c, n, ulun = lun; 1107 1108 cmd = -RQSTYPE_MODIFY_LUN; 1109 c = DFLT_CMND_CNT; 1110 n = DFLT_INOT_CNT; 1111 if (IS_FC(isp) && lun != 0) { 1112 n = 0; 1113 /* 1114 * For SCC firmware, we only deal with setting 1115 * (enabling or modifying) lun 0. 1116 */ 1117 ulun = 0; 1118 } 1119 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) { 1120 rls_lun_statep(isp, tptr); 1121 ccb->ccb_h.status = CAM_REQ_INPROG; 1122 return (seq); 1123 } 1124 } 1125 rls_lun_statep(isp, tptr); 1126 xpt_print(ccb->ccb_h.path, "isp_lun_cmd failed\n"); 1127 isp->isp_osinfo.leact[seq] = 0; 1128 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1129 return (-1); 1130 } 1131 1132 static void 1133 isp_ledone(ispsoftc_t *isp, lun_entry_t *lep) 1134 { 1135 const char lfmt[] = "now %sabled for target mode"; 1136 union ccb *ccb; 1137 uint32_t seq; 1138 tstate_t *tptr; 1139 int av; 1140 struct ccb_en_lun *cel; 1141 1142 seq = lep->le_reserved - 1; 1143 if (seq >= NLEACT) { 1144 isp_prt(isp, ISP_LOGERR, 1145 "seq out of range (%u) in isp_ledone", seq); 1146 return; 1147 } 1148 ccb = isp->isp_osinfo.leact[seq]; 1149 if (ccb == 0) { 1150 isp_prt(isp, ISP_LOGERR, 1151 "no ccb for seq %u in isp_ledone", seq); 1152 return; 1153 } 1154 cel = &ccb->cel; 1155 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), XS_LUN(ccb)); 1156 if (tptr == NULL) { 1157 xpt_print(ccb->ccb_h.path, "null tptr in isp_ledone\n"); 1158 isp->isp_osinfo.leact[seq] = 0; 1159 return; 1160 } 1161 1162 if (lep->le_status != LUN_OK) { 1163 xpt_print(ccb->ccb_h.path, 1164 "ENABLE/MODIFY LUN returned 0x%x\n", lep->le_status); 1165 err: 1166 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1167 rls_lun_statep(isp, tptr); 1168 isp->isp_osinfo.leact[seq] = 0; 1169 ISPLOCK_2_CAMLOCK(isp); 1170 xpt_done(ccb); 1171 CAMLOCK_2_ISPLOCK(isp); 1172 return; 1173 } else { 1174 isp_prt(isp, ISP_LOGTDEBUG0, 1175 "isp_ledone: ENABLE/MODIFY done okay"); 1176 } 1177 1178 1179 if (cel->enable) { 1180 ccb->ccb_h.status = CAM_REQ_CMP; 1181 xpt_print(ccb->ccb_h.path, lfmt, "en"); 1182 rls_lun_statep(isp, tptr); 1183 isp->isp_osinfo.leact[seq] = 0; 1184 ISPLOCK_2_CAMLOCK(isp); 1185 xpt_done(ccb); 1186 CAMLOCK_2_ISPLOCK(isp); 1187 return; 1188 } 1189 1190 if (lep->le_header.rqs_entry_type == RQSTYPE_MODIFY_LUN) { 1191 if (isp_lun_cmd(isp, -RQSTYPE_ENABLE_LUN, XS_CHANNEL(ccb), 1192 XS_TGT(ccb), XS_LUN(ccb), 0, 0, seq+1)) { 1193 xpt_print(ccb->ccb_h.path, 1194 "isp_ledone: isp_lun_cmd failed\n"); 1195 goto err; 1196 } 1197 rls_lun_statep(isp, tptr); 1198 return; 1199 } 1200 1201 xpt_print(ccb->ccb_h.path, lfmt, "dis"); 1202 rls_lun_statep(isp, tptr); 1203 destroy_lun_state(isp, tptr); 1204 ccb->ccb_h.status = CAM_REQ_CMP; 1205 isp->isp_osinfo.leact[seq] = 0; 1206 ISPLOCK_2_CAMLOCK(isp); 1207 xpt_done(ccb); 1208 CAMLOCK_2_ISPLOCK(isp); 1209 if (are_any_luns_enabled(isp, XS_CHANNEL(ccb)) == 0) { 1210 int bus = XS_CHANNEL(ccb); 1211 av = bus << 31; 1212 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 1213 if (av) { 1214 isp_prt(isp, ISP_LOGWARN, 1215 "disable target mode on channel %d failed", bus); 1216 } 1217 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; 1218 } 1219 } 1220 1221 1222 static cam_status 1223 isp_abort_tgt_ccb(ispsoftc_t *isp, union ccb *ccb) 1224 { 1225 tstate_t *tptr; 1226 struct ccb_hdr_slist *lp; 1227 struct ccb_hdr *curelm; 1228 int found, *ctr; 1229 union ccb *accb = ccb->cab.abort_ccb; 1230 1231 xpt_print(ccb->ccb_h.path, "aborting ccb %p\n", accb); 1232 if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 1233 int badpath = 0; 1234 if (IS_FC(isp) && (accb->ccb_h.target_id != 1235 ((fcparam *) isp->isp_param)->isp_loopid)) { 1236 badpath = 1; 1237 } else if (IS_SCSI(isp) && (accb->ccb_h.target_id != 1238 ((sdparam *) isp->isp_param)->isp_initiator_id)) { 1239 badpath = 1; 1240 } 1241 if (badpath) { 1242 /* 1243 * Being restrictive about target ids is really about 1244 * making sure we're aborting for the right multi-tid 1245 * path. This doesn't really make much sense at present. 1246 */ 1247 #if 0 1248 return (CAM_PATH_INVALID); 1249 #endif 1250 } 1251 } 1252 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun); 1253 if (tptr == NULL) { 1254 xpt_print(ccb->ccb_h.path, "can't get statep\n"); 1255 return (CAM_PATH_INVALID); 1256 } 1257 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 1258 lp = &tptr->atios; 1259 ctr = &tptr->atio_count; 1260 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 1261 lp = &tptr->inots; 1262 ctr = &tptr->inot_count; 1263 } else { 1264 rls_lun_statep(isp, tptr); 1265 xpt_print(ccb->ccb_h.path, "bad function code %d\n", 1266 accb->ccb_h.func_code); 1267 return (CAM_UA_ABORT); 1268 } 1269 curelm = SLIST_FIRST(lp); 1270 found = 0; 1271 if (curelm == &accb->ccb_h) { 1272 found = 1; 1273 SLIST_REMOVE_HEAD(lp, sim_links.sle); 1274 } else { 1275 while(curelm != NULL) { 1276 struct ccb_hdr *nextelm; 1277 1278 nextelm = SLIST_NEXT(curelm, sim_links.sle); 1279 if (nextelm == &accb->ccb_h) { 1280 found = 1; 1281 SLIST_NEXT(curelm, sim_links.sle) = 1282 SLIST_NEXT(nextelm, sim_links.sle); 1283 break; 1284 } 1285 curelm = nextelm; 1286 } 1287 } 1288 rls_lun_statep(isp, tptr); 1289 if (found) { 1290 (*ctr)--; 1291 accb->ccb_h.status = CAM_REQ_ABORTED; 1292 xpt_done(accb); 1293 return (CAM_REQ_CMP); 1294 } 1295 xpt_print(ccb->ccb_h.path, "ccb %p not found\n", accb); 1296 return (CAM_PATH_INVALID); 1297 } 1298 1299 static void 1300 isp_target_start_ctio(ispsoftc_t *isp, union ccb *ccb) 1301 { 1302 void *qe; 1303 struct ccb_scsiio *cso = &ccb->csio; 1304 uint32_t nxti, optr, handle; 1305 uint8_t local[QENTRY_LEN]; 1306 1307 1308 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 1309 xpt_print(ccb->ccb_h.path, 1310 "Request Queue Overflow in isp_target_start_ctio\n"); 1311 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1312 goto out; 1313 } 1314 memset(local, 0, QENTRY_LEN); 1315 1316 /* 1317 * We're either moving data or completing a command here. 1318 */ 1319 1320 if (IS_FC(isp)) { 1321 atio_private_data_t *atp; 1322 ct2_entry_t *cto = (ct2_entry_t *) local; 1323 1324 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; 1325 cto->ct_header.rqs_entry_count = 1; 1326 if (FCPARAM(isp)->isp_2klogin) { 1327 ((ct2e_entry_t *)cto)->ct_iid = cso->init_id; 1328 } else { 1329 cto->ct_iid = cso->init_id; 1330 if (FCPARAM(isp)->isp_sccfw == 0) { 1331 cto->ct_lun = ccb->ccb_h.target_lun; 1332 } 1333 } 1334 1335 atp = isp_get_atpd(isp, cso->tag_id); 1336 if (atp == NULL) { 1337 xpt_print(ccb->ccb_h.path, 1338 "cannot find private data adjunct for tag %x\n", 1339 cso->tag_id); 1340 XS_SETERR(ccb, CAM_REQ_CMP_ERR); 1341 goto out; 1342 } 1343 1344 cto->ct_rxid = cso->tag_id; 1345 if (cso->dxfer_len == 0) { 1346 cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA; 1347 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1348 cto->ct_flags |= CT2_SENDSTATUS; 1349 cto->rsp.m1.ct_scsi_status = cso->scsi_status; 1350 cto->ct_resid = 1351 atp->orig_datalen - atp->bytes_xfered; 1352 if (cto->ct_resid < 0) { 1353 cto->rsp.m1.ct_scsi_status |= 1354 CT2_DATA_OVER; 1355 } else if (cto->ct_resid > 0) { 1356 cto->rsp.m1.ct_scsi_status |= 1357 CT2_DATA_UNDER; 1358 } 1359 } 1360 if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) { 1361 int m = min(cso->sense_len, MAXRESPLEN); 1362 memcpy(cto->rsp.m1.ct_resp, 1363 &cso->sense_data, m); 1364 cto->rsp.m1.ct_senselen = m; 1365 cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID; 1366 } 1367 } else { 1368 cto->ct_flags |= CT2_FLAG_MODE0; 1369 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1370 cto->ct_flags |= CT2_DATA_IN; 1371 } else { 1372 cto->ct_flags |= CT2_DATA_OUT; 1373 } 1374 cto->ct_reloff = atp->bytes_xfered; 1375 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 1376 cto->ct_flags |= CT2_SENDSTATUS; 1377 cto->rsp.m0.ct_scsi_status = cso->scsi_status; 1378 cto->ct_resid = 1379 atp->orig_datalen - 1380 (atp->bytes_xfered + cso->dxfer_len); 1381 if (cto->ct_resid < 0) { 1382 cto->rsp.m0.ct_scsi_status |= 1383 CT2_DATA_OVER; 1384 } else if (cto->ct_resid > 0) { 1385 cto->rsp.m0.ct_scsi_status |= 1386 CT2_DATA_UNDER; 1387 } 1388 } else { 1389 atp->last_xframt = cso->dxfer_len; 1390 } 1391 /* 1392 * If we're sending data and status back together, 1393 * we can't also send back sense data as well. 1394 */ 1395 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1396 } 1397 1398 if (cto->ct_flags & CT2_SENDSTATUS) { 1399 isp_prt(isp, ISP_LOGTDEBUG0, 1400 "CTIO2[%x] STATUS %x origd %u curd %u resid %u", 1401 cto->ct_rxid, cso->scsi_status, atp->orig_datalen, 1402 cso->dxfer_len, cto->ct_resid); 1403 cto->ct_flags |= CT2_CCINCR; 1404 atp->state = ATPD_STATE_LAST_CTIO; 1405 } else { 1406 atp->state = ATPD_STATE_CTIO; 1407 } 1408 cto->ct_timeout = 10; 1409 } else { 1410 ct_entry_t *cto = (ct_entry_t *) local; 1411 1412 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1413 cto->ct_header.rqs_entry_count = 1; 1414 cto->ct_iid = cso->init_id; 1415 cto->ct_iid |= XS_CHANNEL(ccb) << 7; 1416 cto->ct_tgt = ccb->ccb_h.target_id; 1417 cto->ct_lun = ccb->ccb_h.target_lun; 1418 cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id); 1419 if (AT_HAS_TAG(cso->tag_id)) { 1420 cto->ct_tag_val = (uint8_t) AT_GET_TAG(cso->tag_id); 1421 cto->ct_flags |= CT_TQAE; 1422 } 1423 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 1424 cto->ct_flags |= CT_NODISC; 1425 } 1426 if (cso->dxfer_len == 0) { 1427 cto->ct_flags |= CT_NO_DATA; 1428 } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1429 cto->ct_flags |= CT_DATA_IN; 1430 } else { 1431 cto->ct_flags |= CT_DATA_OUT; 1432 } 1433 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1434 cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR; 1435 cto->ct_scsi_status = cso->scsi_status; 1436 cto->ct_resid = cso->resid; 1437 isp_prt(isp, ISP_LOGTDEBUG0, 1438 "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x", 1439 cto->ct_fwhandle, cso->scsi_status, cso->resid, 1440 cso->tag_id); 1441 } 1442 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1443 cto->ct_timeout = 10; 1444 } 1445 1446 if (isp_save_xs_tgt(isp, ccb, &handle)) { 1447 xpt_print(ccb->ccb_h.path, 1448 "No XFLIST pointers for isp_target_start_ctio\n"); 1449 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1450 goto out; 1451 } 1452 1453 1454 /* 1455 * Call the dma setup routines for this entry (and any subsequent 1456 * CTIOs) if there's data to move, and then tell the f/w it's got 1457 * new things to play with. As with isp_start's usage of DMA setup, 1458 * any swizzling is done in the machine dependent layer. Because 1459 * of this, we put the request onto the queue area first in native 1460 * format. 1461 */ 1462 1463 if (IS_FC(isp)) { 1464 ct2_entry_t *cto = (ct2_entry_t *) local; 1465 cto->ct_syshandle = handle; 1466 } else { 1467 ct_entry_t *cto = (ct_entry_t *) local; 1468 cto->ct_syshandle = handle; 1469 } 1470 1471 switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) { 1472 case CMD_QUEUED: 1473 ISP_ADD_REQUEST(isp, nxti); 1474 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1475 return; 1476 1477 case CMD_EAGAIN: 1478 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1479 break; 1480 1481 default: 1482 break; 1483 } 1484 isp_destroy_tgt_handle(isp, handle); 1485 1486 out: 1487 ISPLOCK_2_CAMLOCK(isp); 1488 xpt_done(ccb); 1489 CAMLOCK_2_ISPLOCK(isp); 1490 } 1491 1492 static void 1493 isp_refire_putback_atio(void *arg) 1494 { 1495 int s = splcam(); 1496 isp_target_putback_atio(arg); 1497 splx(s); 1498 } 1499 1500 static void 1501 isp_target_putback_atio(union ccb *ccb) 1502 { 1503 ispsoftc_t *isp; 1504 struct ccb_scsiio *cso; 1505 uint32_t nxti, optr; 1506 void *qe; 1507 1508 isp = XS_ISP(ccb); 1509 1510 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 1511 xpt_print(ccb->ccb_h.path, 1512 "isp_target_putback_atio: Request Queue Overflow\n"); 1513 (void) timeout(isp_refire_putback_atio, ccb, 10); 1514 return; 1515 } 1516 memset(qe, 0, QENTRY_LEN); 1517 cso = &ccb->csio; 1518 if (IS_FC(isp)) { 1519 at2_entry_t local, *at = &local; 1520 MEMZERO(at, sizeof (at2_entry_t)); 1521 at->at_header.rqs_entry_type = RQSTYPE_ATIO2; 1522 at->at_header.rqs_entry_count = 1; 1523 if (FCPARAM(isp)->isp_sccfw) { 1524 at->at_scclun = (uint16_t) ccb->ccb_h.target_lun; 1525 } else { 1526 at->at_lun = (uint8_t) ccb->ccb_h.target_lun; 1527 } 1528 at->at_status = CT_OK; 1529 at->at_rxid = cso->tag_id; 1530 at->at_iid = cso->ccb_h.target_id; 1531 isp_put_atio2(isp, at, qe); 1532 } else { 1533 at_entry_t local, *at = &local; 1534 MEMZERO(at, sizeof (at_entry_t)); 1535 at->at_header.rqs_entry_type = RQSTYPE_ATIO; 1536 at->at_header.rqs_entry_count = 1; 1537 at->at_iid = cso->init_id; 1538 at->at_iid |= XS_CHANNEL(ccb) << 7; 1539 at->at_tgt = cso->ccb_h.target_id; 1540 at->at_lun = cso->ccb_h.target_lun; 1541 at->at_status = CT_OK; 1542 at->at_tag_val = AT_GET_TAG(cso->tag_id); 1543 at->at_handle = AT_GET_HANDLE(cso->tag_id); 1544 isp_put_atio(isp, at, qe); 1545 } 1546 ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe); 1547 ISP_ADD_REQUEST(isp, nxti); 1548 isp_complete_ctio(ccb); 1549 } 1550 1551 static void 1552 isp_complete_ctio(union ccb *ccb) 1553 { 1554 ISPLOCK_2_CAMLOCK(isp); 1555 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1556 ccb->ccb_h.status |= CAM_REQ_CMP; 1557 } 1558 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1559 xpt_done(ccb); 1560 CAMLOCK_2_ISPLOCK(isp); 1561 } 1562 1563 /* 1564 * Handle ATIO stuff that the generic code can't. 1565 * This means handling CDBs. 1566 */ 1567 1568 static int 1569 isp_handle_platform_atio(ispsoftc_t *isp, at_entry_t *aep) 1570 { 1571 tstate_t *tptr; 1572 int status, bus, iswildcard; 1573 struct ccb_accept_tio *atiop; 1574 1575 /* 1576 * The firmware status (except for the QLTM_SVALID bit) 1577 * indicates why this ATIO was sent to us. 1578 * 1579 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1580 * 1581 * If the DISCONNECTS DISABLED bit is set in the flags field, 1582 * we're still connected on the SCSI bus. 1583 */ 1584 status = aep->at_status; 1585 if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) { 1586 /* 1587 * Bus Phase Sequence error. We should have sense data 1588 * suggested by the f/w. I'm not sure quite yet what 1589 * to do about this for CAM. 1590 */ 1591 isp_prt(isp, ISP_LOGWARN, "PHASE ERROR"); 1592 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1593 return (0); 1594 } 1595 if ((status & ~QLTM_SVALID) != AT_CDB) { 1596 isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform", 1597 status); 1598 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1599 return (0); 1600 } 1601 1602 bus = GET_BUS_VAL(aep->at_iid); 1603 tptr = get_lun_statep(isp, bus, aep->at_lun); 1604 if (tptr == NULL) { 1605 tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD); 1606 if (tptr == NULL) { 1607 /* 1608 * Because we can't autofeed sense data back with 1609 * a command for parallel SCSI, we can't give back 1610 * a CHECK CONDITION. We'll give back a BUSY status 1611 * instead. This works out okay because the only 1612 * time we should, in fact, get this, is in the 1613 * case that somebody configured us without the 1614 * blackhole driver, so they get what they deserve. 1615 */ 1616 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1617 return (0); 1618 } 1619 iswildcard = 1; 1620 } else { 1621 iswildcard = 0; 1622 } 1623 1624 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1625 if (atiop == NULL) { 1626 /* 1627 * Because we can't autofeed sense data back with 1628 * a command for parallel SCSI, we can't give back 1629 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1630 * instead. This works out okay because the only time we 1631 * should, in fact, get this, is in the case that we've 1632 * run out of ATIOS. 1633 */ 1634 xpt_print(tptr->owner, 1635 "no ATIOS for lun %d from initiator %d on channel %d\n", 1636 aep->at_lun, GET_IID_VAL(aep->at_iid), bus); 1637 if (aep->at_flags & AT_TQAE) 1638 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1639 else 1640 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1641 rls_lun_statep(isp, tptr); 1642 return (0); 1643 } 1644 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1645 tptr->atio_count--; 1646 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d", 1647 aep->at_lun, tptr->atio_count); 1648 if (iswildcard) { 1649 atiop->ccb_h.target_id = aep->at_tgt; 1650 atiop->ccb_h.target_lun = aep->at_lun; 1651 } 1652 if (aep->at_flags & AT_NODISC) { 1653 atiop->ccb_h.flags = CAM_DIS_DISCONNECT; 1654 } else { 1655 atiop->ccb_h.flags = 0; 1656 } 1657 1658 if (status & QLTM_SVALID) { 1659 size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data)); 1660 atiop->sense_len = amt; 1661 MEMCPY(&atiop->sense_data, aep->at_sense, amt); 1662 } else { 1663 atiop->sense_len = 0; 1664 } 1665 1666 atiop->init_id = GET_IID_VAL(aep->at_iid); 1667 atiop->cdb_len = aep->at_cdblen; 1668 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen); 1669 atiop->ccb_h.status = CAM_CDB_RECVD; 1670 /* 1671 * Construct a tag 'id' based upon tag value (which may be 0..255) 1672 * and the handle (which we have to preserve). 1673 */ 1674 AT_MAKE_TAGID(atiop->tag_id, bus, device_get_unit(isp->isp_dev), aep); 1675 if (aep->at_flags & AT_TQAE) { 1676 atiop->tag_action = aep->at_tag_type; 1677 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; 1678 } 1679 xpt_done((union ccb*)atiop); 1680 isp_prt(isp, ISP_LOGTDEBUG0, 1681 "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s", 1682 aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid), 1683 GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff, 1684 aep->at_tag_type, (aep->at_flags & AT_NODISC)? 1685 "nondisc" : "disconnecting"); 1686 rls_lun_statep(isp, tptr); 1687 return (0); 1688 } 1689 1690 static int 1691 isp_handle_platform_atio2(ispsoftc_t *isp, at2_entry_t *aep) 1692 { 1693 lun_id_t lun; 1694 tstate_t *tptr; 1695 struct ccb_accept_tio *atiop; 1696 atio_private_data_t *atp; 1697 1698 /* 1699 * The firmware status (except for the QLTM_SVALID bit) 1700 * indicates why this ATIO was sent to us. 1701 * 1702 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1703 */ 1704 if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) { 1705 isp_prt(isp, ISP_LOGWARN, 1706 "bogus atio (0x%x) leaked to platform", aep->at_status); 1707 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1708 return (0); 1709 } 1710 1711 if (FCPARAM(isp)->isp_sccfw) { 1712 lun = aep->at_scclun; 1713 } else { 1714 lun = aep->at_lun; 1715 } 1716 tptr = get_lun_statep(isp, 0, lun); 1717 if (tptr == NULL) { 1718 isp_prt(isp, ISP_LOGTDEBUG0, 1719 "[0x%x] no state pointer for lun %d", aep->at_rxid, lun); 1720 tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD); 1721 if (tptr == NULL) { 1722 isp_endcmd(isp, aep, 1723 SCSI_STATUS_CHECK_COND | ECMD_SVALID | 1724 (0x5 << 12) | (0x25 << 16), 0); 1725 return (0); 1726 } 1727 } 1728 1729 atp = isp_get_atpd(isp, 0); 1730 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1731 if (atiop == NULL || atp == NULL) { 1732 1733 /* 1734 * Because we can't autofeed sense data back with 1735 * a command for parallel SCSI, we can't give back 1736 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1737 * instead. This works out okay because the only time we 1738 * should, in fact, get this, is in the case that we've 1739 * run out of ATIOS. 1740 */ 1741 xpt_print(tptr->owner, 1742 "no %s for lun %d from initiator %d\n", 1743 (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" : 1744 ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid); 1745 rls_lun_statep(isp, tptr); 1746 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1747 return (0); 1748 } 1749 atp->state = ATPD_STATE_ATIO; 1750 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1751 tptr->atio_count--; 1752 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d", 1753 lun, tptr->atio_count); 1754 1755 if (tptr == &isp->isp_osinfo.tsdflt[0]) { 1756 atiop->ccb_h.target_id = FCPARAM(isp)->isp_loopid; 1757 atiop->ccb_h.target_lun = lun; 1758 } 1759 /* 1760 * We don't get 'suggested' sense data as we do with SCSI cards. 1761 */ 1762 atiop->sense_len = 0; 1763 1764 atiop->init_id = aep->at_iid; 1765 atiop->cdb_len = ATIO2_CDBLEN; 1766 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN); 1767 atiop->ccb_h.status = CAM_CDB_RECVD; 1768 atiop->tag_id = aep->at_rxid; 1769 switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) { 1770 case ATIO2_TC_ATTR_SIMPLEQ: 1771 atiop->tag_action = MSG_SIMPLE_Q_TAG; 1772 break; 1773 case ATIO2_TC_ATTR_HEADOFQ: 1774 atiop->tag_action = MSG_HEAD_OF_Q_TAG; 1775 break; 1776 case ATIO2_TC_ATTR_ORDERED: 1777 atiop->tag_action = MSG_ORDERED_Q_TAG; 1778 break; 1779 case ATIO2_TC_ATTR_ACAQ: /* ?? */ 1780 case ATIO2_TC_ATTR_UNTAGGED: 1781 default: 1782 atiop->tag_action = 0; 1783 break; 1784 } 1785 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; 1786 1787 atp->tag = atiop->tag_id; 1788 atp->lun = lun; 1789 atp->orig_datalen = aep->at_datalen; 1790 atp->last_xframt = 0; 1791 atp->bytes_xfered = 0; 1792 atp->state = ATPD_STATE_CAM; 1793 ISPLOCK_2_CAMLOCK(siP); 1794 xpt_done((union ccb*)atiop); 1795 1796 isp_prt(isp, ISP_LOGTDEBUG0, 1797 "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u", 1798 aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid, 1799 lun, aep->at_taskflags, aep->at_datalen); 1800 rls_lun_statep(isp, tptr); 1801 return (0); 1802 } 1803 1804 static int 1805 isp_handle_platform_ctio(ispsoftc_t *isp, void *arg) 1806 { 1807 union ccb *ccb; 1808 int sentstatus, ok, notify_cam, resid = 0; 1809 uint16_t tval; 1810 1811 /* 1812 * CTIO and CTIO2 are close enough.... 1813 */ 1814 1815 ccb = isp_find_xs_tgt(isp, ((ct_entry_t *)arg)->ct_syshandle); 1816 KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio")); 1817 isp_destroy_tgt_handle(isp, ((ct_entry_t *)arg)->ct_syshandle); 1818 1819 if (IS_FC(isp)) { 1820 ct2_entry_t *ct = arg; 1821 atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid); 1822 if (atp == NULL) { 1823 isp_prt(isp, ISP_LOGERR, 1824 "cannot find adjunct for %x after I/O", 1825 ct->ct_rxid); 1826 return (0); 1827 } 1828 sentstatus = ct->ct_flags & CT2_SENDSTATUS; 1829 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1830 if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) { 1831 ccb->ccb_h.status |= CAM_SENT_SENSE; 1832 } 1833 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1834 if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) { 1835 resid = ct->ct_resid; 1836 atp->bytes_xfered += (atp->last_xframt - resid); 1837 atp->last_xframt = 0; 1838 } 1839 if (sentstatus || !ok) { 1840 atp->tag = 0; 1841 } 1842 isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, 1843 "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s", 1844 ct->ct_rxid, ct->ct_status, ct->ct_flags, 1845 (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, 1846 resid, sentstatus? "FIN" : "MID"); 1847 tval = ct->ct_rxid; 1848 1849 /* XXX: should really come after isp_complete_ctio */ 1850 atp->state = ATPD_STATE_PDON; 1851 } else { 1852 ct_entry_t *ct = arg; 1853 sentstatus = ct->ct_flags & CT_SENDSTATUS; 1854 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1855 /* 1856 * We *ought* to be able to get back to the original ATIO 1857 * here, but for some reason this gets lost. It's just as 1858 * well because it's squirrelled away as part of periph 1859 * private data. 1860 * 1861 * We can live without it as long as we continue to use 1862 * the auto-replenish feature for CTIOs. 1863 */ 1864 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1865 if (ct->ct_status & QLTM_SVALID) { 1866 char *sp = (char *)ct; 1867 sp += CTIO_SENSE_OFFSET; 1868 ccb->csio.sense_len = 1869 min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN); 1870 MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len); 1871 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1872 } 1873 if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) { 1874 resid = ct->ct_resid; 1875 } 1876 isp_prt(isp, ISP_LOGTDEBUG0, 1877 "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s", 1878 ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun, 1879 ct->ct_status, ct->ct_flags, resid, 1880 sentstatus? "FIN" : "MID"); 1881 tval = ct->ct_fwhandle; 1882 } 1883 ccb->csio.resid += resid; 1884 1885 /* 1886 * We're here either because intermediate data transfers are done 1887 * and/or the final status CTIO (which may have joined with a 1888 * Data Transfer) is done. 1889 * 1890 * In any case, for this platform, the upper layers figure out 1891 * what to do next, so all we do here is collect status and 1892 * pass information along. Any DMA handles have already been 1893 * freed. 1894 */ 1895 if (notify_cam == 0) { 1896 isp_prt(isp, ISP_LOGTDEBUG0, " INTER CTIO[0x%x] done", tval); 1897 return (0); 1898 } 1899 1900 isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done", 1901 (sentstatus)? " FINAL " : "MIDTERM ", tval); 1902 1903 if (!ok) { 1904 isp_target_putback_atio(ccb); 1905 } else { 1906 isp_complete_ctio(ccb); 1907 1908 } 1909 return (0); 1910 } 1911 1912 static int 1913 isp_handle_platform_notify_scsi(ispsoftc_t *isp, in_entry_t *inp) 1914 { 1915 return (0); /* XXXX */ 1916 } 1917 1918 static int 1919 isp_handle_platform_notify_fc(ispsoftc_t *isp, in_fcentry_t *inp) 1920 { 1921 1922 switch (inp->in_status) { 1923 case IN_PORT_LOGOUT: 1924 isp_prt(isp, ISP_LOGWARN, "port logout of iid %d", 1925 inp->in_iid); 1926 break; 1927 case IN_PORT_CHANGED: 1928 isp_prt(isp, ISP_LOGWARN, "port changed for iid %d", 1929 inp->in_iid); 1930 break; 1931 case IN_GLOBAL_LOGO: 1932 isp_prt(isp, ISP_LOGINFO, "all ports logged out"); 1933 break; 1934 case IN_ABORT_TASK: 1935 { 1936 atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid); 1937 struct ccb_immed_notify *inot = NULL; 1938 1939 if (atp) { 1940 tstate_t *tptr = get_lun_statep(isp, 0, atp->lun); 1941 if (tptr) { 1942 inot = (struct ccb_immed_notify *) 1943 SLIST_FIRST(&tptr->inots); 1944 if (inot) { 1945 tptr->inot_count--; 1946 SLIST_REMOVE_HEAD(&tptr->inots, 1947 sim_links.sle); 1948 isp_prt(isp, ISP_LOGTDEBUG0, 1949 "Take FREE INOT count now %d", 1950 tptr->inot_count); 1951 } 1952 } 1953 isp_prt(isp, ISP_LOGWARN, 1954 "abort task RX_ID %x IID %d state %d", 1955 inp->in_seqid, inp->in_iid, atp->state); 1956 } else { 1957 isp_prt(isp, ISP_LOGWARN, 1958 "abort task RX_ID %x from iid %d, state unknown", 1959 inp->in_seqid, inp->in_iid); 1960 } 1961 if (inot) { 1962 inot->initiator_id = inp->in_iid; 1963 inot->sense_len = 0; 1964 inot->message_args[0] = MSG_ABORT_TAG; 1965 inot->message_args[1] = inp->in_seqid & 0xff; 1966 inot->message_args[2] = (inp->in_seqid >> 8) & 0xff; 1967 inot->ccb_h.status = CAM_MESSAGE_RECV; 1968 xpt_done((union ccb *)inot); 1969 } 1970 break; 1971 } 1972 default: 1973 break; 1974 } 1975 return (0); 1976 } 1977 #endif 1978 1979 static void 1980 isp_cam_async(void *cbarg, uint32_t code, struct cam_path *path, void *arg) 1981 { 1982 struct cam_sim *sim; 1983 ispsoftc_t *isp; 1984 1985 sim = (struct cam_sim *)cbarg; 1986 isp = (ispsoftc_t *) cam_sim_softc(sim); 1987 switch (code) { 1988 case AC_LOST_DEVICE: 1989 if (IS_SCSI(isp)) { 1990 uint16_t oflags, nflags; 1991 sdparam *sdp = isp->isp_param; 1992 int tgt; 1993 1994 tgt = xpt_path_target_id(path); 1995 if (tgt >= 0) { 1996 sdp += cam_sim_bus(sim); 1997 ISP_LOCK(isp); 1998 nflags = sdp->isp_devparam[tgt].nvrm_flags; 1999 #ifndef ISP_TARGET_MODE 2000 nflags &= DPARM_SAFE_DFLT; 2001 if (isp->isp_loaded_fw) { 2002 nflags |= DPARM_NARROW | DPARM_ASYNC; 2003 } 2004 #else 2005 nflags = DPARM_DEFAULT; 2006 #endif 2007 oflags = sdp->isp_devparam[tgt].goal_flags; 2008 sdp->isp_devparam[tgt].goal_flags = nflags; 2009 sdp->isp_devparam[tgt].dev_update = 1; 2010 isp->isp_update |= (1 << cam_sim_bus(sim)); 2011 (void) isp_control(isp, 2012 ISPCTL_UPDATE_PARAMS, NULL); 2013 sdp->isp_devparam[tgt].goal_flags = oflags; 2014 ISP_UNLOCK(isp); 2015 } 2016 } 2017 break; 2018 default: 2019 isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code); 2020 break; 2021 } 2022 } 2023 2024 static void 2025 isp_poll(struct cam_sim *sim) 2026 { 2027 ispsoftc_t *isp = cam_sim_softc(sim); 2028 uint32_t isr; 2029 uint16_t sema, mbox; 2030 2031 ISP_LOCK(isp); 2032 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 2033 isp_intr(isp, isr, sema, mbox); 2034 } 2035 ISP_UNLOCK(isp); 2036 } 2037 2038 2039 static int isp_watchdog_work(ispsoftc_t *, XS_T *); 2040 2041 static int 2042 isp_watchdog_work(ispsoftc_t *isp, XS_T *xs) 2043 { 2044 uint32_t handle; 2045 2046 /* 2047 * We've decided this command is dead. Make sure we're not trying 2048 * to kill a command that's already dead by getting it's handle and 2049 * and seeing whether it's still alive. 2050 */ 2051 ISP_LOCK(isp); 2052 handle = isp_find_handle(isp, xs); 2053 if (handle) { 2054 uint32_t isr; 2055 uint16_t sema, mbox; 2056 2057 if (XS_CMD_DONE_P(xs)) { 2058 isp_prt(isp, ISP_LOGDEBUG1, 2059 "watchdog found done cmd (handle 0x%x)", handle); 2060 ISP_UNLOCK(isp); 2061 return (1);; 2062 } 2063 2064 if (XS_CMD_WDOG_P(xs)) { 2065 isp_prt(isp, ISP_LOGDEBUG2, 2066 "recursive watchdog (handle 0x%x)", handle); 2067 ISP_UNLOCK(isp); 2068 return (1); 2069 } 2070 2071 XS_CMD_S_WDOG(xs); 2072 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 2073 isp_intr(isp, isr, sema, mbox); 2074 } 2075 if (XS_CMD_DONE_P(xs)) { 2076 isp_prt(isp, ISP_LOGDEBUG2, 2077 "watchdog cleanup for handle 0x%x", handle); 2078 ISPLOCK_2_CAMLOCK(isp); 2079 xpt_done((union ccb *) xs); 2080 CAMLOCK_2_ISPLOCK(isp); 2081 } else if (XS_CMD_GRACE_P(xs)) { 2082 /* 2083 * Make sure the command is *really* dead before we 2084 * release the handle (and DMA resources) for reuse. 2085 */ 2086 (void) isp_control(isp, ISPCTL_ABORT_CMD, xs); 2087 2088 /* 2089 * After this point, the comamnd is really dead. 2090 */ 2091 if (XS_XFRLEN(xs)) { 2092 ISP_DMAFREE(isp, xs, handle); 2093 } 2094 isp_destroy_handle(isp, handle); 2095 xpt_print(xs->ccb_h.path, 2096 "watchdog timeout for handle 0x%x\n", handle); 2097 XS_SETERR(xs, CAM_CMD_TIMEOUT); 2098 XS_CMD_C_WDOG(xs); 2099 ISPLOCK_2_CAMLOCK(isp); 2100 isp_done(xs); 2101 CAMLOCK_2_ISPLOCK(isp); 2102 } else { 2103 XS_CMD_C_WDOG(xs); 2104 xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz); 2105 XS_CMD_S_GRACE(xs); 2106 isp->isp_sendmarker |= 1 << XS_CHANNEL(xs); 2107 } 2108 ISP_UNLOCK(isp); 2109 return (1); 2110 } 2111 ISP_UNLOCK(isp); 2112 return (0); 2113 } 2114 2115 static void 2116 isp_watchdog(void *arg) 2117 { 2118 ispsoftc_t *isp; 2119 XS_T *xs = arg; 2120 for (isp = isplist; isp != NULL; isp = isp->isp_osinfo.next) { 2121 if (isp_watchdog_work(isp, xs)) { 2122 break; 2123 } 2124 } 2125 if (isp == NULL) { 2126 printf("isp_watchdog: nobody had %p active\n", arg); 2127 } 2128 } 2129 2130 2131 #if __FreeBSD_version >= 500000 2132 /* 2133 * Support functions for Found/Lost 2134 */ 2135 static void 2136 isp_make_here(ispsoftc_t *isp, int tgt) 2137 { 2138 union ccb *ccb; 2139 ISPLOCK_2_CAMLOCK(mpt); 2140 /* 2141 * Allocate a CCB, create a wildcard path for this bus, 2142 * and schedule a rescan. 2143 */ 2144 ccb = xpt_alloc_ccb_nowait(); 2145 if (ccb == NULL) { 2146 isp_prt(isp, ISP_LOGWARN, "unable to alloc CCB for rescan"); 2147 CAMLOCK_2_ISPLOCK(mpt); 2148 return; 2149 } 2150 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, 2151 cam_sim_path(isp->isp_sim), tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2152 CAMLOCK_2_ISPLOCK(mpt); 2153 isp_prt(isp, ISP_LOGWARN, "unable to create path for rescan"); 2154 xpt_free_ccb(ccb); 2155 return; 2156 } 2157 xpt_rescan(ccb); 2158 CAMLOCK_2_ISPLOCK(mpt); 2159 } 2160 2161 static void 2162 isp_make_gone(ispsoftc_t *isp, int tgt) 2163 { 2164 struct cam_path *tp; 2165 ISPLOCK_2_CAMLOCK(isp); 2166 if (xpt_create_path(&tp, NULL, cam_sim_path(isp->isp_sim), tgt, 2167 CAM_LUN_WILDCARD) == CAM_REQ_CMP) { 2168 xpt_async(AC_LOST_DEVICE, tp, NULL); 2169 xpt_free_path(tp); 2170 } 2171 CAMLOCK_2_ISPLOCK(isp); 2172 } 2173 #else 2174 #define isp_make_here(isp, tgt) do { ; } while (0) 2175 #define isp_make_gone(isp, tgt) do { ; } while (0) 2176 #endif 2177 2178 2179 /* 2180 * Gone Device Timer Function- when we have decided that a device has gone 2181 * away, we wait a specific period of time prior to telling the OS it has 2182 * gone away. 2183 * 2184 * This timer function fires once a second and then scans the port database 2185 * for devices that are marked dead but still have a virtual target assigned. 2186 * We decrement a counter for that port database entry, and when it hits zero, 2187 * we tell the OS the device has gone away. 2188 */ 2189 static void 2190 isp_gdt(void *arg) 2191 { 2192 ispsoftc_t *isp = arg; 2193 fcportdb_t *lp; 2194 int dbidx, tgt, more_to_do = 0; 2195 2196 isp_prt(isp, ISP_LOGDEBUG0, "GDT timer expired"); 2197 ISP_LOCK(isp); 2198 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { 2199 lp = &FCPARAM(isp)->portdb[dbidx]; 2200 2201 if (lp->state != FC_PORTDB_STATE_ZOMBIE) { 2202 continue; 2203 } 2204 if (lp->ini_map_idx == 0) { 2205 continue; 2206 } 2207 if (lp->new_reserved == 0) { 2208 continue; 2209 } 2210 lp->new_reserved -= 1; 2211 if (lp->new_reserved != 0) { 2212 more_to_do++; 2213 continue; 2214 } 2215 tgt = lp->ini_map_idx - 1; 2216 FCPARAM(isp)->isp_ini_map[tgt] = 0; 2217 lp->ini_map_idx = 0; 2218 lp->state = FC_PORTDB_STATE_NIL; 2219 isp_prt(isp, ISP_LOGCONFIG, prom3, lp->portid, tgt, 2220 "Gone Device Timeout"); 2221 isp_make_gone(isp, tgt); 2222 } 2223 if (more_to_do) { 2224 isp->isp_osinfo.gdt = timeout(isp_gdt, isp, hz); 2225 } else { 2226 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2227 "stopping Gone Device Timer"); 2228 isp->isp_osinfo.gdt_running = 0; 2229 } 2230 ISP_UNLOCK(isp); 2231 } 2232 2233 /* 2234 * Loop Down Timer Function- when loop goes down, a timer is started and 2235 * and after it expires we come here and take all probational devices that 2236 * the OS knows about and the tell the OS that they've gone away. 2237 * 2238 * We don't clear the devices out of our port database because, when loop 2239 * come back up, we have to do some actual cleanup with the chip at that 2240 * point (implicit PLOGO, e.g., to get the chip's port database state right). 2241 */ 2242 static void 2243 isp_ldt(void *arg) 2244 { 2245 ispsoftc_t *isp = arg; 2246 fcportdb_t *lp; 2247 int dbidx, tgt; 2248 2249 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Loop Down Timer expired"); 2250 ISP_LOCK(isp); 2251 2252 /* 2253 * Notify to the OS all targets who we now consider have departed. 2254 */ 2255 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { 2256 lp = &FCPARAM(isp)->portdb[dbidx]; 2257 2258 if (lp->state != FC_PORTDB_STATE_PROBATIONAL) { 2259 continue; 2260 } 2261 if (lp->ini_map_idx == 0) { 2262 continue; 2263 } 2264 2265 /* 2266 * XXX: CLEAN UP AND COMPLETE ANY PENDING COMMANDS FIRST! 2267 */ 2268 2269 /* 2270 * Mark that we've announced that this device is gone.... 2271 */ 2272 lp->reserved = 1; 2273 2274 /* 2275 * but *don't* change the state of the entry. Just clear 2276 * any target id stuff and announce to CAM that the 2277 * device is gone. This way any necessary PLOGO stuff 2278 * will happen when loop comes back up. 2279 */ 2280 2281 tgt = lp->ini_map_idx - 1; 2282 FCPARAM(isp)->isp_ini_map[tgt] = 0; 2283 lp->ini_map_idx = 0; 2284 isp_prt(isp, ISP_LOGCONFIG, prom3, lp->portid, tgt, 2285 "Loop Down Timeout"); 2286 isp_make_gone(isp, tgt); 2287 } 2288 2289 /* 2290 * The loop down timer has expired. Wake up the kthread 2291 * to notice that fact (or make it false). 2292 */ 2293 isp->isp_osinfo.loop_down_time = isp->isp_osinfo.loop_down_limit+1; 2294 #if __FreeBSD_version < 500000 2295 wakeup(&isp->isp_osinfo.kproc); 2296 #else 2297 #ifdef ISP_SMPLOCK 2298 cv_signal(&isp->isp_osinfo.kthread_cv); 2299 #else 2300 wakeup(&isp->isp_osinfo.kthread_cv); 2301 #endif 2302 #endif 2303 ISP_UNLOCK(isp); 2304 } 2305 2306 static void 2307 isp_kthread(void *arg) 2308 { 2309 ispsoftc_t *isp = arg; 2310 int slp = 0; 2311 #if __FreeBSD_version < 500000 2312 int s; 2313 2314 s = splcam(); 2315 #else 2316 #ifdef ISP_SMPLOCK 2317 mtx_lock(&isp->isp_lock); 2318 #else 2319 mtx_lock(&Giant); 2320 #endif 2321 #endif 2322 /* 2323 * The first loop is for our usage where we have yet to have 2324 * gotten good fibre channel state. 2325 */ 2326 for (;;) { 2327 int wasfrozen, lb, lim; 2328 2329 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2330 "isp_kthread: checking FC state"); 2331 isp->isp_osinfo.mbox_sleep_ok = 1; 2332 lb = isp_fc_runstate(isp, 250000); 2333 isp->isp_osinfo.mbox_sleep_ok = 0; 2334 if (lb) { 2335 /* 2336 * Increment loop down time by the last sleep interval 2337 */ 2338 isp->isp_osinfo.loop_down_time += slp; 2339 2340 if (lb < 0) { 2341 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2342 "kthread: FC loop not up (down count %d)", 2343 isp->isp_osinfo.loop_down_time); 2344 } else { 2345 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2346 "kthread: FC got to %d (down count %d)", 2347 lb, isp->isp_osinfo.loop_down_time); 2348 } 2349 2350 2351 /* 2352 * If we've never seen loop up and we've waited longer 2353 * than quickboot time, or we've seen loop up but we've 2354 * waited longer than loop_down_limit, give up and go 2355 * to sleep until loop comes up. 2356 */ 2357 if (FCPARAM(isp)->loop_seen_once == 0) { 2358 lim = isp_quickboot_time; 2359 } else { 2360 lim = isp->isp_osinfo.loop_down_limit; 2361 } 2362 if (isp->isp_osinfo.loop_down_time >= lim) { 2363 isp_freeze_loopdown(isp, "loop limit hit"); 2364 slp = 0; 2365 } else if (isp->isp_osinfo.loop_down_time < 10) { 2366 slp = 1; 2367 } else if (isp->isp_osinfo.loop_down_time < 30) { 2368 slp = 5; 2369 } else if (isp->isp_osinfo.loop_down_time < 60) { 2370 slp = 10; 2371 } else if (isp->isp_osinfo.loop_down_time < 120) { 2372 slp = 20; 2373 } else { 2374 slp = 30; 2375 } 2376 2377 } else { 2378 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2379 "isp_kthread: FC state OK"); 2380 isp->isp_osinfo.loop_down_time = 0; 2381 slp = 0; 2382 } 2383 2384 /* 2385 * If we'd frozen the simq, unfreeze it now so that CAM 2386 * can start sending us commands. If the FC state isn't 2387 * okay yet, they'll hit that in isp_start which will 2388 * freeze the queue again. 2389 */ 2390 wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN; 2391 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN; 2392 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { 2393 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2394 "isp_kthread: releasing simq"); 2395 ISPLOCK_2_CAMLOCK(isp); 2396 xpt_release_simq(isp->isp_sim, 1); 2397 CAMLOCK_2_ISPLOCK(isp); 2398 } 2399 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2400 "isp_kthread: sleep time %d", slp); 2401 #if __FreeBSD_version < 500000 2402 tsleep(&isp->isp_osinfo.kproc, PRIBIO, "ispf", 2403 slp * hz); 2404 #else 2405 #ifdef ISP_SMPLOCK 2406 cv_timed_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock, 2407 slp * hz); 2408 #else 2409 (void) tsleep(&isp->isp_osinfo.kthread_cv, PRIBIO, "ispf", 2410 slp * hz); 2411 #endif 2412 #endif 2413 /* 2414 * If slp is zero, we're waking up for the first time after 2415 * things have been okay. In this case, we set a deferral state 2416 * for all commands and delay hysteresis seconds before starting 2417 * the FC state evaluation. This gives the loop/fabric a chance 2418 * to settle. 2419 */ 2420 if (slp == 0 && isp->isp_osinfo.hysteresis) { 2421 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2422 "isp_kthread: sleep hysteresis tick time %d", 2423 isp->isp_osinfo.hysteresis * hz); 2424 (void) tsleep(&isp_fabric_hysteresis, PRIBIO, "ispT", 2425 (isp->isp_osinfo.hysteresis * hz)); 2426 } 2427 } 2428 } 2429 2430 static void 2431 isp_action(struct cam_sim *sim, union ccb *ccb) 2432 { 2433 int bus, tgt, error, lim; 2434 ispsoftc_t *isp; 2435 struct ccb_trans_settings *cts; 2436 2437 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n")); 2438 2439 isp = (ispsoftc_t *)cam_sim_softc(sim); 2440 ccb->ccb_h.sim_priv.entries[0].field = 0; 2441 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 2442 if (isp->isp_state != ISP_RUNSTATE && 2443 ccb->ccb_h.func_code == XPT_SCSI_IO) { 2444 CAMLOCK_2_ISPLOCK(isp); 2445 isp_init(isp); 2446 if (isp->isp_state != ISP_INITSTATE) { 2447 ISP_UNLOCK(isp); 2448 /* 2449 * Lie. Say it was a selection timeout. 2450 */ 2451 ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN; 2452 xpt_freeze_devq(ccb->ccb_h.path, 1); 2453 xpt_done(ccb); 2454 return; 2455 } 2456 isp->isp_state = ISP_RUNSTATE; 2457 ISPLOCK_2_CAMLOCK(isp); 2458 } 2459 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code); 2460 2461 2462 switch (ccb->ccb_h.func_code) { 2463 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 2464 /* 2465 * Do a couple of preliminary checks... 2466 */ 2467 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 2468 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 2469 ccb->ccb_h.status = CAM_REQ_INVALID; 2470 xpt_done(ccb); 2471 break; 2472 } 2473 } 2474 #ifdef DIAGNOSTIC 2475 if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) { 2476 xpt_print(ccb->ccb_h.path, "invalid target\n"); 2477 ccb->ccb_h.status = CAM_PATH_INVALID; 2478 } else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) { 2479 xpt_print(ccb->ccb_h.path, "invalid lun\n"); 2480 ccb->ccb_h.status = CAM_PATH_INVALID; 2481 } 2482 if (ccb->ccb_h.status == CAM_PATH_INVALID) { 2483 xpt_done(ccb); 2484 break; 2485 } 2486 #endif 2487 ((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK; 2488 CAMLOCK_2_ISPLOCK(isp); 2489 error = isp_start((XS_T *) ccb); 2490 switch (error) { 2491 case CMD_QUEUED: 2492 XS_CMD_S_CLEAR(ccb); 2493 ISPLOCK_2_CAMLOCK(isp); 2494 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2495 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 2496 int ms = ccb->ccb_h.timeout; 2497 if (ms == CAM_TIME_DEFAULT) { 2498 ms = 60*1000; 2499 } 2500 ccb->ccb_h.timeout_ch = 2501 timeout(isp_watchdog, ccb, isp_mstohz(ms)); 2502 } else { 2503 callout_handle_init(&ccb->ccb_h.timeout_ch); 2504 } 2505 break; 2506 case CMD_RQLATER: 2507 /* 2508 * This can only happen for Fibre Channel 2509 */ 2510 KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only")); 2511 2512 /* 2513 * Handle initial and subsequent loop down cases 2514 */ 2515 if (FCPARAM(isp)->loop_seen_once == 0) { 2516 lim = isp_quickboot_time; 2517 } else { 2518 lim = isp->isp_osinfo.loop_down_limit; 2519 } 2520 if (isp->isp_osinfo.loop_down_time >= lim) { 2521 isp_prt(isp, ISP_LOGDEBUG0, 2522 "%d.%d downtime (%d) > lim (%d)", 2523 XS_TGT(ccb), XS_LUN(ccb), 2524 isp->isp_osinfo.loop_down_time, lim); 2525 ccb->ccb_h.status = 2526 CAM_SEL_TIMEOUT|CAM_DEV_QFRZN; 2527 xpt_freeze_devq(ccb->ccb_h.path, 1); 2528 ISPLOCK_2_CAMLOCK(isp); 2529 xpt_done(ccb); 2530 break; 2531 } 2532 isp_prt(isp, ISP_LOGDEBUG0, 2533 "%d.%d retry later", XS_TGT(ccb), XS_LUN(ccb)); 2534 /* 2535 * Otherwise, retry in a while. 2536 */ 2537 ISPLOCK_2_CAMLOCK(isp); 2538 cam_freeze_devq(ccb->ccb_h.path); 2539 cam_release_devq(ccb->ccb_h.path, 2540 RELSIM_RELEASE_AFTER_TIMEOUT, 0, 1000, 0); 2541 XS_SETERR(ccb, CAM_REQUEUE_REQ); 2542 xpt_done(ccb); 2543 break; 2544 case CMD_EAGAIN: 2545 ISPLOCK_2_CAMLOCK(isp); 2546 cam_freeze_devq(ccb->ccb_h.path); 2547 cam_release_devq(ccb->ccb_h.path, 2548 RELSIM_RELEASE_AFTER_TIMEOUT, 0, 250, 0); 2549 xpt_done(ccb); 2550 break; 2551 case CMD_COMPLETE: 2552 isp_done((struct ccb_scsiio *) ccb); 2553 ISPLOCK_2_CAMLOCK(isp); 2554 break; 2555 default: 2556 ISPLOCK_2_CAMLOCK(isp); 2557 isp_prt(isp, ISP_LOGERR, 2558 "What's this? 0x%x at %d in file %s", 2559 error, __LINE__, __FILE__); 2560 XS_SETERR(ccb, CAM_REQ_CMP_ERR); 2561 xpt_done(ccb); 2562 } 2563 break; 2564 2565 #ifdef ISP_TARGET_MODE 2566 case XPT_EN_LUN: /* Enable LUN as a target */ 2567 { 2568 int seq, i; 2569 CAMLOCK_2_ISPLOCK(isp); 2570 seq = isp_en_lun(isp, ccb); 2571 if (seq < 0) { 2572 ISPLOCK_2_CAMLOCK(isp); 2573 xpt_done(ccb); 2574 break; 2575 } 2576 for (i = 0; isp->isp_osinfo.leact[seq] && i < 30 * 1000; i++) { 2577 uint32_t isr; 2578 uint16_t sema, mbox; 2579 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 2580 isp_intr(isp, isr, sema, mbox); 2581 } 2582 DELAY(1000); 2583 } 2584 ISPLOCK_2_CAMLOCK(isp); 2585 break; 2586 } 2587 case XPT_NOTIFY_ACK: /* recycle notify ack */ 2588 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ 2589 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 2590 { 2591 tstate_t *tptr = 2592 get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun); 2593 if (tptr == NULL) { 2594 ccb->ccb_h.status = CAM_LUN_INVALID; 2595 xpt_done(ccb); 2596 break; 2597 } 2598 ccb->ccb_h.sim_priv.entries[0].field = 0; 2599 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 2600 ccb->ccb_h.flags = 0; 2601 2602 CAMLOCK_2_ISPLOCK(isp); 2603 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 2604 /* 2605 * Note that the command itself may not be done- 2606 * it may not even have had the first CTIO sent. 2607 */ 2608 tptr->atio_count++; 2609 isp_prt(isp, ISP_LOGTDEBUG0, 2610 "Put FREE ATIO, lun %d, count now %d", 2611 ccb->ccb_h.target_lun, tptr->atio_count); 2612 SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h, 2613 sim_links.sle); 2614 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 2615 tptr->inot_count++; 2616 isp_prt(isp, ISP_LOGTDEBUG0, 2617 "Put FREE INOT, lun %d, count now %d", 2618 ccb->ccb_h.target_lun, tptr->inot_count); 2619 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, 2620 sim_links.sle); 2621 } else { 2622 isp_prt(isp, ISP_LOGWARN, "Got Notify ACK");; 2623 } 2624 rls_lun_statep(isp, tptr); 2625 ccb->ccb_h.status = CAM_REQ_INPROG; 2626 ISPLOCK_2_CAMLOCK(isp); 2627 break; 2628 } 2629 case XPT_CONT_TARGET_IO: 2630 { 2631 CAMLOCK_2_ISPLOCK(isp); 2632 isp_target_start_ctio(isp, ccb); 2633 ISPLOCK_2_CAMLOCK(isp); 2634 break; 2635 } 2636 #endif 2637 case XPT_RESET_DEV: /* BDR the specified SCSI device */ 2638 2639 bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); 2640 tgt = ccb->ccb_h.target_id; 2641 tgt |= (bus << 16); 2642 2643 CAMLOCK_2_ISPLOCK(isp); 2644 error = isp_control(isp, ISPCTL_RESET_DEV, &tgt); 2645 ISPLOCK_2_CAMLOCK(isp); 2646 if (error) { 2647 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2648 } else { 2649 ccb->ccb_h.status = CAM_REQ_CMP; 2650 } 2651 xpt_done(ccb); 2652 break; 2653 case XPT_ABORT: /* Abort the specified CCB */ 2654 { 2655 union ccb *accb = ccb->cab.abort_ccb; 2656 CAMLOCK_2_ISPLOCK(isp); 2657 switch (accb->ccb_h.func_code) { 2658 #ifdef ISP_TARGET_MODE 2659 case XPT_ACCEPT_TARGET_IO: 2660 case XPT_IMMED_NOTIFY: 2661 ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb); 2662 break; 2663 case XPT_CONT_TARGET_IO: 2664 isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet"); 2665 ccb->ccb_h.status = CAM_UA_ABORT; 2666 break; 2667 #endif 2668 case XPT_SCSI_IO: 2669 error = isp_control(isp, ISPCTL_ABORT_CMD, ccb); 2670 if (error) { 2671 ccb->ccb_h.status = CAM_UA_ABORT; 2672 } else { 2673 ccb->ccb_h.status = CAM_REQ_CMP; 2674 } 2675 break; 2676 default: 2677 ccb->ccb_h.status = CAM_REQ_INVALID; 2678 break; 2679 } 2680 ISPLOCK_2_CAMLOCK(isp); 2681 xpt_done(ccb); 2682 break; 2683 } 2684 #ifdef CAM_NEW_TRAN_CODE 2685 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) 2686 #else 2687 #define IS_CURRENT_SETTINGS(c) (c->flags & CCB_TRANS_CURRENT_SETTINGS) 2688 #endif 2689 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 2690 cts = &ccb->cts; 2691 if (!IS_CURRENT_SETTINGS(cts)) { 2692 ccb->ccb_h.status = CAM_REQ_INVALID; 2693 xpt_done(ccb); 2694 break; 2695 } 2696 tgt = cts->ccb_h.target_id; 2697 CAMLOCK_2_ISPLOCK(isp); 2698 if (IS_SCSI(isp)) { 2699 #ifndef CAM_NEW_TRAN_CODE 2700 sdparam *sdp = isp->isp_param; 2701 uint16_t *dptr; 2702 2703 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2704 2705 sdp += bus; 2706 /* 2707 * We always update (internally) from goal_flags 2708 * so any request to change settings just gets 2709 * vectored to that location. 2710 */ 2711 dptr = &sdp->isp_devparam[tgt].goal_flags; 2712 2713 /* 2714 * Note that these operations affect the 2715 * the goal flags (goal_flags)- not 2716 * the current state flags. Then we mark 2717 * things so that the next operation to 2718 * this HBA will cause the update to occur. 2719 */ 2720 if (cts->valid & CCB_TRANS_DISC_VALID) { 2721 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) { 2722 *dptr |= DPARM_DISC; 2723 } else { 2724 *dptr &= ~DPARM_DISC; 2725 } 2726 } 2727 if (cts->valid & CCB_TRANS_TQ_VALID) { 2728 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) { 2729 *dptr |= DPARM_TQING; 2730 } else { 2731 *dptr &= ~DPARM_TQING; 2732 } 2733 } 2734 if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) { 2735 switch (cts->bus_width) { 2736 case MSG_EXT_WDTR_BUS_16_BIT: 2737 *dptr |= DPARM_WIDE; 2738 break; 2739 default: 2740 *dptr &= ~DPARM_WIDE; 2741 } 2742 } 2743 /* 2744 * Any SYNC RATE of nonzero and SYNC_OFFSET 2745 * of nonzero will cause us to go to the 2746 * selected (from NVRAM) maximum value for 2747 * this device. At a later point, we'll 2748 * allow finer control. 2749 */ 2750 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && 2751 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) && 2752 (cts->sync_offset > 0)) { 2753 *dptr |= DPARM_SYNC; 2754 } else { 2755 *dptr &= ~DPARM_SYNC; 2756 } 2757 *dptr |= DPARM_SAFE_DFLT; 2758 #else 2759 struct ccb_trans_settings_scsi *scsi = 2760 &cts->proto_specific.scsi; 2761 struct ccb_trans_settings_spi *spi = 2762 &cts->xport_specific.spi; 2763 sdparam *sdp = isp->isp_param; 2764 uint16_t *dptr; 2765 2766 if (spi->valid == 0 && scsi->valid == 0) { 2767 ISPLOCK_2_CAMLOCK(isp); 2768 ccb->ccb_h.status = CAM_REQ_CMP; 2769 xpt_done(ccb); 2770 break; 2771 } 2772 2773 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2774 sdp += bus; 2775 /* 2776 * We always update (internally) from goal_flags 2777 * so any request to change settings just gets 2778 * vectored to that location. 2779 */ 2780 dptr = &sdp->isp_devparam[tgt].goal_flags; 2781 2782 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 2783 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 2784 *dptr |= DPARM_DISC; 2785 else 2786 *dptr &= ~DPARM_DISC; 2787 } 2788 2789 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 2790 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 2791 *dptr |= DPARM_TQING; 2792 else 2793 *dptr &= ~DPARM_TQING; 2794 } 2795 2796 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 2797 if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) 2798 *dptr |= DPARM_WIDE; 2799 else 2800 *dptr &= ~DPARM_WIDE; 2801 } 2802 2803 /* 2804 * XXX: FIX ME 2805 */ 2806 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) && 2807 (spi->valid & CTS_SPI_VALID_SYNC_RATE) && 2808 (spi->sync_period && spi->sync_offset)) { 2809 *dptr |= DPARM_SYNC; 2810 /* 2811 * XXX: CHECK FOR LEGALITY 2812 */ 2813 sdp->isp_devparam[tgt].goal_period = 2814 spi->sync_period; 2815 sdp->isp_devparam[tgt].goal_offset = 2816 spi->sync_offset; 2817 } else { 2818 *dptr &= ~DPARM_SYNC; 2819 } 2820 #endif 2821 isp_prt(isp, ISP_LOGDEBUG0, 2822 "SET (%d.%d.%d) to flags %x off %x per %x", 2823 bus, tgt, cts->ccb_h.target_lun, 2824 sdp->isp_devparam[tgt].goal_flags, 2825 sdp->isp_devparam[tgt].goal_offset, 2826 sdp->isp_devparam[tgt].goal_period); 2827 sdp->isp_devparam[tgt].dev_update = 1; 2828 isp->isp_update |= (1 << bus); 2829 } 2830 ISPLOCK_2_CAMLOCK(isp); 2831 ccb->ccb_h.status = CAM_REQ_CMP; 2832 xpt_done(ccb); 2833 break; 2834 case XPT_GET_TRAN_SETTINGS: 2835 cts = &ccb->cts; 2836 tgt = cts->ccb_h.target_id; 2837 CAMLOCK_2_ISPLOCK(isp); 2838 if (IS_FC(isp)) { 2839 #ifndef CAM_NEW_TRAN_CODE 2840 /* 2841 * a lot of normal SCSI things don't make sense. 2842 */ 2843 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 2844 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2845 /* 2846 * How do you measure the width of a high 2847 * speed serial bus? Well, in bytes. 2848 * 2849 * Offset and period make no sense, though, so we set 2850 * (above) a 'base' transfer speed to be gigabit. 2851 */ 2852 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2853 #else 2854 fcparam *fcp = isp->isp_param; 2855 struct ccb_trans_settings_scsi *scsi = 2856 &cts->proto_specific.scsi; 2857 struct ccb_trans_settings_fc *fc = 2858 &cts->xport_specific.fc; 2859 2860 cts->protocol = PROTO_SCSI; 2861 cts->protocol_version = SCSI_REV_2; 2862 cts->transport = XPORT_FC; 2863 cts->transport_version = 0; 2864 2865 scsi->valid = CTS_SCSI_VALID_TQ; 2866 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 2867 fc->valid = CTS_FC_VALID_SPEED; 2868 if (fcp->isp_gbspeed == 2) { 2869 fc->bitrate = 200000; 2870 } else { 2871 fc->bitrate = 100000; 2872 } 2873 if (tgt > 0 && tgt < MAX_FC_TARG) { 2874 fcportdb_t *lp = &fcp->portdb[tgt]; 2875 fc->wwnn = lp->node_wwn; 2876 fc->wwpn = lp->port_wwn; 2877 fc->port = lp->portid; 2878 fc->valid |= CTS_FC_VALID_WWNN | 2879 CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT; 2880 } 2881 #endif 2882 } else { 2883 #ifdef CAM_NEW_TRAN_CODE 2884 struct ccb_trans_settings_scsi *scsi = 2885 &cts->proto_specific.scsi; 2886 struct ccb_trans_settings_spi *spi = 2887 &cts->xport_specific.spi; 2888 #endif 2889 sdparam *sdp = isp->isp_param; 2890 int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2891 uint16_t dval, pval, oval; 2892 2893 sdp += bus; 2894 2895 if (IS_CURRENT_SETTINGS(cts)) { 2896 sdp->isp_devparam[tgt].dev_refresh = 1; 2897 isp->isp_update |= (1 << bus); 2898 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, 2899 NULL); 2900 dval = sdp->isp_devparam[tgt].actv_flags; 2901 oval = sdp->isp_devparam[tgt].actv_offset; 2902 pval = sdp->isp_devparam[tgt].actv_period; 2903 } else { 2904 dval = sdp->isp_devparam[tgt].nvrm_flags; 2905 oval = sdp->isp_devparam[tgt].nvrm_offset; 2906 pval = sdp->isp_devparam[tgt].nvrm_period; 2907 } 2908 2909 #ifndef CAM_NEW_TRAN_CODE 2910 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 2911 2912 if (dval & DPARM_DISC) { 2913 cts->flags |= CCB_TRANS_DISC_ENB; 2914 } 2915 if (dval & DPARM_TQING) { 2916 cts->flags |= CCB_TRANS_TAG_ENB; 2917 } 2918 if (dval & DPARM_WIDE) { 2919 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2920 } else { 2921 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2922 } 2923 cts->valid = CCB_TRANS_BUS_WIDTH_VALID | 2924 CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2925 2926 if ((dval & DPARM_SYNC) && oval != 0) { 2927 cts->sync_period = pval; 2928 cts->sync_offset = oval; 2929 cts->valid |= 2930 CCB_TRANS_SYNC_RATE_VALID | 2931 CCB_TRANS_SYNC_OFFSET_VALID; 2932 } 2933 #else 2934 cts->protocol = PROTO_SCSI; 2935 cts->protocol_version = SCSI_REV_2; 2936 cts->transport = XPORT_SPI; 2937 cts->transport_version = 2; 2938 2939 spi->valid = 0; 2940 scsi->valid = 0; 2941 spi->flags = 0; 2942 scsi->flags = 0; 2943 if (dval & DPARM_DISC) { 2944 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 2945 } 2946 if ((dval & DPARM_SYNC) && oval && pval) { 2947 spi->sync_offset = oval; 2948 spi->sync_period = pval; 2949 } else { 2950 spi->sync_offset = 0; 2951 spi->sync_period = 0; 2952 } 2953 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 2954 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 2955 spi->valid |= CTS_SPI_VALID_BUS_WIDTH; 2956 if (dval & DPARM_WIDE) { 2957 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2958 } else { 2959 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2960 } 2961 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 2962 scsi->valid = CTS_SCSI_VALID_TQ; 2963 if (dval & DPARM_TQING) { 2964 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 2965 } 2966 spi->valid |= CTS_SPI_VALID_DISC; 2967 } 2968 #endif 2969 isp_prt(isp, ISP_LOGDEBUG0, 2970 "GET %s (%d.%d.%d) to flags %x off %x per %x", 2971 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM", 2972 bus, tgt, cts->ccb_h.target_lun, dval, oval, pval); 2973 } 2974 ISPLOCK_2_CAMLOCK(isp); 2975 ccb->ccb_h.status = CAM_REQ_CMP; 2976 xpt_done(ccb); 2977 break; 2978 2979 case XPT_CALC_GEOMETRY: 2980 #if __FreeBSD_version < 500000 2981 { 2982 struct ccb_calc_geometry *ccg; 2983 u_int32_t secs_per_cylinder; 2984 u_int32_t size_mb; 2985 2986 ccg = &ccb->ccg; 2987 if (ccg->block_size == 0) { 2988 ccb->ccb_h.status = CAM_REQ_INVALID; 2989 xpt_done(ccb); 2990 break; 2991 } 2992 size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size); 2993 if (size_mb > 1024) { 2994 ccg->heads = 255; 2995 ccg->secs_per_track = 63; 2996 } else { 2997 ccg->heads = 64; 2998 ccg->secs_per_track = 32; 2999 } 3000 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 3001 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 3002 ccb->ccb_h.status = CAM_REQ_CMP; 3003 xpt_done(ccb); 3004 break; 3005 } 3006 #else 3007 { 3008 cam_calc_geometry(&ccb->ccg, /*extended*/1); 3009 xpt_done(ccb); 3010 break; 3011 } 3012 #endif 3013 case XPT_RESET_BUS: /* Reset the specified bus */ 3014 bus = cam_sim_bus(sim); 3015 CAMLOCK_2_ISPLOCK(isp); 3016 error = isp_control(isp, ISPCTL_RESET_BUS, &bus); 3017 ISPLOCK_2_CAMLOCK(isp); 3018 if (error) 3019 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 3020 else { 3021 if (cam_sim_bus(sim) && isp->isp_path2 != NULL) 3022 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 3023 else if (isp->isp_path != NULL) 3024 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 3025 ccb->ccb_h.status = CAM_REQ_CMP; 3026 } 3027 xpt_done(ccb); 3028 break; 3029 3030 case XPT_TERM_IO: /* Terminate the I/O process */ 3031 ccb->ccb_h.status = CAM_REQ_INVALID; 3032 xpt_done(ccb); 3033 break; 3034 3035 case XPT_PATH_INQ: /* Path routing inquiry */ 3036 { 3037 struct ccb_pathinq *cpi = &ccb->cpi; 3038 3039 cpi->version_num = 1; 3040 #ifdef ISP_TARGET_MODE 3041 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 3042 #else 3043 cpi->target_sprt = 0; 3044 #endif 3045 cpi->hba_eng_cnt = 0; 3046 cpi->max_target = ISP_MAX_TARGETS(isp) - 1; 3047 cpi->max_lun = ISP_MAX_LUNS(isp) - 1; 3048 cpi->bus_id = cam_sim_bus(sim); 3049 if (IS_FC(isp)) { 3050 cpi->hba_misc = PIM_NOBUSRESET; 3051 /* 3052 * Because our loop ID can shift from time to time, 3053 * make our initiator ID out of range of our bus. 3054 */ 3055 cpi->initiator_id = cpi->max_target + 1; 3056 3057 /* 3058 * Set base transfer capabilities for Fibre Channel. 3059 * Technically not correct because we don't know 3060 * what media we're running on top of- but we'll 3061 * look good if we always say 100MB/s. 3062 */ 3063 if (FCPARAM(isp)->isp_gbspeed == 2) 3064 cpi->base_transfer_speed = 200000; 3065 else 3066 cpi->base_transfer_speed = 100000; 3067 cpi->hba_inquiry = PI_TAG_ABLE; 3068 #ifdef CAM_NEW_TRAN_CODE 3069 cpi->transport = XPORT_FC; 3070 cpi->transport_version = 0; 3071 #endif 3072 } else { 3073 sdparam *sdp = isp->isp_param; 3074 sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path)); 3075 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 3076 cpi->hba_misc = 0; 3077 cpi->initiator_id = sdp->isp_initiator_id; 3078 cpi->base_transfer_speed = 3300; 3079 #ifdef CAM_NEW_TRAN_CODE 3080 cpi->transport = XPORT_SPI; 3081 cpi->transport_version = 2; 3082 #endif 3083 } 3084 #ifdef CAM_NEW_TRAN_CODE 3085 cpi->protocol = PROTO_SCSI; 3086 cpi->protocol_version = SCSI_REV_2; 3087 #endif 3088 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 3089 strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN); 3090 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 3091 cpi->unit_number = cam_sim_unit(sim); 3092 cpi->ccb_h.status = CAM_REQ_CMP; 3093 xpt_done(ccb); 3094 break; 3095 } 3096 default: 3097 ccb->ccb_h.status = CAM_REQ_INVALID; 3098 xpt_done(ccb); 3099 break; 3100 } 3101 } 3102 3103 #define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB) 3104 3105 void 3106 isp_done(struct ccb_scsiio *sccb) 3107 { 3108 ispsoftc_t *isp = XS_ISP(sccb); 3109 3110 if (XS_NOERR(sccb)) 3111 XS_SETERR(sccb, CAM_REQ_CMP); 3112 3113 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && 3114 (sccb->scsi_status != SCSI_STATUS_OK)) { 3115 sccb->ccb_h.status &= ~CAM_STATUS_MASK; 3116 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && 3117 (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) { 3118 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; 3119 } else { 3120 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 3121 } 3122 } 3123 3124 sccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3125 if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 3126 isp_prt(isp, ISP_LOGDEBUG0, 3127 "target %d lun %d CAM status 0x%x SCSI status 0x%x", 3128 XS_TGT(sccb), XS_LUN(sccb), sccb->ccb_h.status, 3129 sccb->scsi_status); 3130 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 3131 sccb->ccb_h.status |= CAM_DEV_QFRZN; 3132 xpt_freeze_devq(sccb->ccb_h.path, 1); 3133 } 3134 } 3135 3136 if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) && 3137 (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 3138 xpt_print(sccb->ccb_h.path, 3139 "cam completion status 0x%x\n", sccb->ccb_h.status); 3140 } 3141 3142 XS_CMD_S_DONE(sccb); 3143 if (XS_CMD_WDOG_P(sccb) == 0) { 3144 untimeout(isp_watchdog, sccb, sccb->ccb_h.timeout_ch); 3145 if (XS_CMD_GRACE_P(sccb)) { 3146 isp_prt(isp, ISP_LOGDEBUG2, 3147 "finished command on borrowed time"); 3148 } 3149 XS_CMD_S_CLEAR(sccb); 3150 ISPLOCK_2_CAMLOCK(isp); 3151 xpt_done((union ccb *) sccb); 3152 CAMLOCK_2_ISPLOCK(isp); 3153 } 3154 } 3155 3156 int 3157 isp_async(ispsoftc_t *isp, ispasync_t cmd, void *arg) 3158 { 3159 int bus, rv = 0; 3160 static const char prom[] = 3161 "PortID 0x%06x handle 0x%x role %s %s\n" 3162 " WWNN 0x%08x%08x WWPN 0x%08x%08x"; 3163 static const char prom2[] = 3164 "PortID 0x%06x handle 0x%x role %s %s tgt %u\n" 3165 " WWNN 0x%08x%08x WWPN 0x%08x%08x"; 3166 char *msg = NULL; 3167 target_id_t tgt; 3168 fcportdb_t *lp; 3169 struct cam_path *tmppath; 3170 3171 switch (cmd) { 3172 case ISPASYNC_NEW_TGT_PARAMS: 3173 { 3174 #ifdef CAM_NEW_TRAN_CODE 3175 struct ccb_trans_settings_scsi *scsi; 3176 struct ccb_trans_settings_spi *spi; 3177 #endif 3178 int flags, tgt; 3179 sdparam *sdp = isp->isp_param; 3180 struct ccb_trans_settings cts; 3181 3182 memset(&cts, 0, sizeof (struct ccb_trans_settings)); 3183 3184 tgt = *((int *)arg); 3185 bus = (tgt >> 16) & 0xffff; 3186 tgt &= 0xffff; 3187 sdp += bus; 3188 ISPLOCK_2_CAMLOCK(isp); 3189 if (xpt_create_path(&tmppath, NULL, 3190 cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim), 3191 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 3192 CAMLOCK_2_ISPLOCK(isp); 3193 isp_prt(isp, ISP_LOGWARN, 3194 "isp_async cannot make temp path for %d.%d", 3195 tgt, bus); 3196 rv = -1; 3197 break; 3198 } 3199 CAMLOCK_2_ISPLOCK(isp); 3200 flags = sdp->isp_devparam[tgt].actv_flags; 3201 #ifdef CAM_NEW_TRAN_CODE 3202 cts.type = CTS_TYPE_CURRENT_SETTINGS; 3203 cts.protocol = PROTO_SCSI; 3204 cts.transport = XPORT_SPI; 3205 3206 scsi = &cts.proto_specific.scsi; 3207 spi = &cts.xport_specific.spi; 3208 3209 if (flags & DPARM_TQING) { 3210 scsi->valid |= CTS_SCSI_VALID_TQ; 3211 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 3212 } 3213 3214 if (flags & DPARM_DISC) { 3215 spi->valid |= CTS_SPI_VALID_DISC; 3216 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 3217 } 3218 spi->flags |= CTS_SPI_VALID_BUS_WIDTH; 3219 if (flags & DPARM_WIDE) { 3220 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3221 } else { 3222 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3223 } 3224 if (flags & DPARM_SYNC) { 3225 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 3226 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 3227 spi->sync_period = sdp->isp_devparam[tgt].actv_period; 3228 spi->sync_offset = sdp->isp_devparam[tgt].actv_offset; 3229 } 3230 #else 3231 cts.flags = CCB_TRANS_CURRENT_SETTINGS; 3232 cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3233 if (flags & DPARM_DISC) { 3234 cts.flags |= CCB_TRANS_DISC_ENB; 3235 } 3236 if (flags & DPARM_TQING) { 3237 cts.flags |= CCB_TRANS_TAG_ENB; 3238 } 3239 cts.valid |= CCB_TRANS_BUS_WIDTH_VALID; 3240 cts.bus_width = (flags & DPARM_WIDE)? 3241 MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT; 3242 cts.sync_period = sdp->isp_devparam[tgt].actv_period; 3243 cts.sync_offset = sdp->isp_devparam[tgt].actv_offset; 3244 if (flags & DPARM_SYNC) { 3245 cts.valid |= 3246 CCB_TRANS_SYNC_RATE_VALID | 3247 CCB_TRANS_SYNC_OFFSET_VALID; 3248 } 3249 #endif 3250 isp_prt(isp, ISP_LOGDEBUG2, 3251 "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x", 3252 bus, tgt, sdp->isp_devparam[tgt].actv_period, 3253 sdp->isp_devparam[tgt].actv_offset, flags); 3254 xpt_setup_ccb(&cts.ccb_h, tmppath, 1); 3255 ISPLOCK_2_CAMLOCK(isp); 3256 xpt_async(AC_TRANSFER_NEG, tmppath, &cts); 3257 xpt_free_path(tmppath); 3258 CAMLOCK_2_ISPLOCK(isp); 3259 break; 3260 } 3261 case ISPASYNC_BUS_RESET: 3262 bus = *((int *)arg); 3263 isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected", 3264 bus); 3265 if (bus > 0 && isp->isp_path2) { 3266 ISPLOCK_2_CAMLOCK(isp); 3267 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 3268 CAMLOCK_2_ISPLOCK(isp); 3269 } else if (isp->isp_path) { 3270 ISPLOCK_2_CAMLOCK(isp); 3271 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 3272 CAMLOCK_2_ISPLOCK(isp); 3273 } 3274 break; 3275 case ISPASYNC_LIP: 3276 if (msg == NULL) { 3277 msg = "LIP Received"; 3278 } 3279 /* FALLTHROUGH */ 3280 case ISPASYNC_LOOP_RESET: 3281 if (msg == NULL) { 3282 msg = "LOOP Reset"; 3283 } 3284 /* FALLTHROUGH */ 3285 case ISPASYNC_LOOP_DOWN: 3286 if (msg == NULL) { 3287 msg = "LOOP Down"; 3288 } 3289 if (isp->isp_path) { 3290 isp_freeze_loopdown(isp, msg); 3291 } 3292 if (isp->isp_osinfo.ldt_running == 0) { 3293 isp->isp_osinfo.ldt = timeout(isp_ldt, isp, 3294 isp->isp_osinfo.loop_down_limit * hz); 3295 isp->isp_osinfo.ldt_running = 1; 3296 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 3297 "starting Loop Down Timer"); 3298 } 3299 isp_prt(isp, ISP_LOGINFO, msg); 3300 break; 3301 case ISPASYNC_LOOP_UP: 3302 /* 3303 * Now we just note that Loop has come up. We don't 3304 * actually do anything because we're waiting for a 3305 * Change Notify before activating the FC cleanup 3306 * thread to look at the state of the loop again. 3307 */ 3308 isp_prt(isp, ISP_LOGINFO, "Loop UP"); 3309 break; 3310 case ISPASYNC_DEV_ARRIVED: 3311 lp = arg; 3312 lp->reserved = 0; 3313 if ((isp->isp_role & ISP_ROLE_INITIATOR) && 3314 (lp->roles & (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT))) { 3315 int dbidx = lp - FCPARAM(isp)->portdb; 3316 int i; 3317 3318 for (i = 0; i < MAX_FC_TARG; i++) { 3319 if (i >= FL_ID && i <= SNS_ID) { 3320 continue; 3321 } 3322 if (FCPARAM(isp)->isp_ini_map[i] == 0) { 3323 break; 3324 } 3325 } 3326 if (i < MAX_FC_TARG) { 3327 FCPARAM(isp)->isp_ini_map[i] = dbidx + 1; 3328 lp->ini_map_idx = i + 1; 3329 } else { 3330 isp_prt(isp, ISP_LOGWARN, "out of target ids"); 3331 isp_dump_portdb(isp); 3332 } 3333 } 3334 if (lp->ini_map_idx) { 3335 tgt = lp->ini_map_idx - 1; 3336 isp_prt(isp, ISP_LOGCONFIG, prom2, 3337 lp->portid, lp->handle, 3338 roles[lp->roles], "arrived at", tgt, 3339 (uint32_t) (lp->node_wwn >> 32), 3340 (uint32_t) lp->node_wwn, 3341 (uint32_t) (lp->port_wwn >> 32), 3342 (uint32_t) lp->port_wwn); 3343 isp_make_here(isp, tgt); 3344 } else { 3345 isp_prt(isp, ISP_LOGCONFIG, prom, 3346 lp->portid, lp->handle, 3347 roles[lp->roles], "arrived", 3348 (uint32_t) (lp->node_wwn >> 32), 3349 (uint32_t) lp->node_wwn, 3350 (uint32_t) (lp->port_wwn >> 32), 3351 (uint32_t) lp->port_wwn); 3352 } 3353 break; 3354 case ISPASYNC_DEV_CHANGED: 3355 lp = arg; 3356 if (isp_change_is_bad) { 3357 lp->state = FC_PORTDB_STATE_NIL; 3358 if (lp->ini_map_idx) { 3359 tgt = lp->ini_map_idx - 1; 3360 FCPARAM(isp)->isp_ini_map[tgt] = 0; 3361 lp->ini_map_idx = 0; 3362 isp_prt(isp, ISP_LOGCONFIG, prom3, 3363 lp->portid, tgt, "change is bad"); 3364 isp_make_gone(isp, tgt); 3365 } else { 3366 isp_prt(isp, ISP_LOGCONFIG, prom, 3367 lp->portid, lp->handle, 3368 roles[lp->roles], 3369 "changed and departed", 3370 (uint32_t) (lp->node_wwn >> 32), 3371 (uint32_t) lp->node_wwn, 3372 (uint32_t) (lp->port_wwn >> 32), 3373 (uint32_t) lp->port_wwn); 3374 } 3375 } else { 3376 lp->portid = lp->new_portid; 3377 lp->roles = lp->new_roles; 3378 if (lp->ini_map_idx) { 3379 int t = lp->ini_map_idx - 1; 3380 FCPARAM(isp)->isp_ini_map[t] = 3381 (lp - FCPARAM(isp)->portdb) + 1; 3382 tgt = lp->ini_map_idx - 1; 3383 isp_prt(isp, ISP_LOGCONFIG, prom2, 3384 lp->portid, lp->handle, 3385 roles[lp->roles], "changed at", tgt, 3386 (uint32_t) (lp->node_wwn >> 32), 3387 (uint32_t) lp->node_wwn, 3388 (uint32_t) (lp->port_wwn >> 32), 3389 (uint32_t) lp->port_wwn); 3390 } else { 3391 isp_prt(isp, ISP_LOGCONFIG, prom, 3392 lp->portid, lp->handle, 3393 roles[lp->roles], "changed", 3394 (uint32_t) (lp->node_wwn >> 32), 3395 (uint32_t) lp->node_wwn, 3396 (uint32_t) (lp->port_wwn >> 32), 3397 (uint32_t) lp->port_wwn); 3398 } 3399 } 3400 break; 3401 case ISPASYNC_DEV_STAYED: 3402 lp = arg; 3403 if (lp->ini_map_idx) { 3404 tgt = lp->ini_map_idx - 1; 3405 isp_prt(isp, ISP_LOGCONFIG, prom2, 3406 lp->portid, lp->handle, 3407 roles[lp->roles], "stayed at", tgt, 3408 (uint32_t) (lp->node_wwn >> 32), 3409 (uint32_t) lp->node_wwn, 3410 (uint32_t) (lp->port_wwn >> 32), 3411 (uint32_t) lp->port_wwn); 3412 } else { 3413 isp_prt(isp, ISP_LOGCONFIG, prom, 3414 lp->portid, lp->handle, 3415 roles[lp->roles], "stayed", 3416 (uint32_t) (lp->node_wwn >> 32), 3417 (uint32_t) lp->node_wwn, 3418 (uint32_t) (lp->port_wwn >> 32), 3419 (uint32_t) lp->port_wwn); 3420 } 3421 break; 3422 case ISPASYNC_DEV_GONE: 3423 lp = arg; 3424 /* 3425 * If this has a virtual target and we haven't marked it 3426 * that we're going to have isp_gdt tell the OS it's gone, 3427 * set the isp_gdt timer running on it. 3428 * 3429 * If it isn't marked that isp_gdt is going to get rid of it, 3430 * announce that it's gone. 3431 */ 3432 if (lp->ini_map_idx && lp->reserved == 0) { 3433 lp->reserved = 1; 3434 lp->new_reserved = isp->isp_osinfo.gone_device_time; 3435 lp->state = FC_PORTDB_STATE_ZOMBIE; 3436 if (isp->isp_osinfo.gdt_running == 0) { 3437 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 3438 "starting Gone Device Timer"); 3439 isp->isp_osinfo.gdt = timeout(isp_gdt, isp, hz); 3440 isp->isp_osinfo.gdt_running = 1; 3441 } 3442 tgt = lp->ini_map_idx - 1; 3443 isp_prt(isp, ISP_LOGCONFIG, prom2, 3444 lp->portid, lp->handle, 3445 roles[lp->roles], "gone zombie at", tgt, 3446 (uint32_t) (lp->node_wwn >> 32), 3447 (uint32_t) lp->node_wwn, 3448 (uint32_t) (lp->port_wwn >> 32), 3449 (uint32_t) lp->port_wwn); 3450 } else if (lp->reserved == 0) { 3451 isp_prt(isp, ISP_LOGCONFIG, prom, 3452 lp->portid, lp->handle, 3453 roles[lp->roles], "departed", 3454 (uint32_t) (lp->node_wwn >> 32), 3455 (uint32_t) lp->node_wwn, 3456 (uint32_t) (lp->port_wwn >> 32), 3457 (uint32_t) lp->port_wwn); 3458 } 3459 break; 3460 case ISPASYNC_CHANGE_NOTIFY: 3461 { 3462 char *msg; 3463 if (arg == ISPASYNC_CHANGE_PDB) { 3464 msg = "Port Database Changed"; 3465 } else if (arg == ISPASYNC_CHANGE_SNS) { 3466 msg = "Name Server Database Changed"; 3467 } else { 3468 msg = "Other Change Notify"; 3469 } 3470 /* 3471 * If the loop down timer is running, cancel it. 3472 */ 3473 if (isp->isp_osinfo.ldt_running) { 3474 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 3475 "Stopping Loop Down Timer"); 3476 isp->isp_osinfo.ldt_running = 0; 3477 untimeout(isp_ldt, isp, isp->isp_osinfo.ldt); 3478 callout_handle_init(&isp->isp_osinfo.ldt); 3479 } 3480 isp_prt(isp, ISP_LOGINFO, msg); 3481 isp_freeze_loopdown(isp, msg); 3482 #if __FreeBSD_version < 500000 3483 wakeup(&isp->isp_osinfo.kproc); 3484 #else 3485 #ifdef ISP_SMPLOCK 3486 cv_signal(&isp->isp_osinfo.kthread_cv); 3487 #else 3488 wakeup(&isp->isp_osinfo.kthread_cv); 3489 #endif 3490 #endif 3491 break; 3492 } 3493 #ifdef ISP_TARGET_MODE 3494 case ISPASYNC_TARGET_NOTIFY: 3495 { 3496 tmd_notify_t *nt = arg; 3497 isp_prt(isp, ISP_LOGALL, 3498 "target notify code 0x%x", nt->nt_ncode); 3499 break; 3500 } 3501 case ISPASYNC_TARGET_ACTION: 3502 switch (((isphdr_t *)arg)->rqs_entry_type) { 3503 default: 3504 isp_prt(isp, ISP_LOGWARN, 3505 "event 0x%x for unhandled target action", 3506 ((isphdr_t *)arg)->rqs_entry_type); 3507 break; 3508 case RQSTYPE_NOTIFY: 3509 if (IS_SCSI(isp)) { 3510 rv = isp_handle_platform_notify_scsi(isp, 3511 (in_entry_t *) arg); 3512 } else { 3513 rv = isp_handle_platform_notify_fc(isp, 3514 (in_fcentry_t *) arg); 3515 } 3516 break; 3517 case RQSTYPE_ATIO: 3518 rv = isp_handle_platform_atio(isp, (at_entry_t *) arg); 3519 break; 3520 case RQSTYPE_ATIO2: 3521 rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg); 3522 break; 3523 case RQSTYPE_CTIO3: 3524 case RQSTYPE_CTIO2: 3525 case RQSTYPE_CTIO: 3526 rv = isp_handle_platform_ctio(isp, arg); 3527 break; 3528 case RQSTYPE_ENABLE_LUN: 3529 case RQSTYPE_MODIFY_LUN: 3530 isp_ledone(isp, (lun_entry_t *) arg); 3531 break; 3532 } 3533 break; 3534 #endif 3535 case ISPASYNC_FW_CRASH: 3536 { 3537 uint16_t mbox1, mbox6; 3538 mbox1 = ISP_READ(isp, OUTMAILBOX1); 3539 if (IS_DUALBUS(isp)) { 3540 mbox6 = ISP_READ(isp, OUTMAILBOX6); 3541 } else { 3542 mbox6 = 0; 3543 } 3544 isp_prt(isp, ISP_LOGERR, 3545 "Internal Firmware Error on bus %d @ RISC Address 0x%x", 3546 mbox6, mbox1); 3547 #ifdef ISP_FW_CRASH_DUMP 3548 /* 3549 * XXX: really need a thread to do this right. 3550 */ 3551 if (IS_FC(isp)) { 3552 FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT; 3553 FCPARAM(isp)->isp_loopstate = LOOP_NIL; 3554 isp_freeze_loopdown(isp, "f/w crash"); 3555 isp_fw_dump(isp); 3556 } 3557 isp_reinit(isp); 3558 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL); 3559 #endif 3560 break; 3561 } 3562 case ISPASYNC_UNHANDLED_RESPONSE: 3563 break; 3564 default: 3565 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd); 3566 break; 3567 } 3568 return (rv); 3569 } 3570 3571 3572 /* 3573 * Locks are held before coming here. 3574 */ 3575 void 3576 isp_uninit(ispsoftc_t *isp) 3577 { 3578 if (IS_24XX(isp)) { 3579 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_RESET); 3580 } else { 3581 ISP_WRITE(isp, HCCR, HCCR_CMD_RESET); 3582 } 3583 ISP_DISABLE_INTS(isp); 3584 } 3585 3586 void 3587 isp_prt(ispsoftc_t *isp, int level, const char *fmt, ...) 3588 { 3589 va_list ap; 3590 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { 3591 return; 3592 } 3593 printf("%s: ", device_get_nameunit(isp->isp_dev)); 3594 va_start(ap, fmt); 3595 vprintf(fmt, ap); 3596 va_end(ap); 3597 printf("\n"); 3598 } 3599 3600 uint64_t 3601 isp_nanotime_sub(struct timespec *b, struct timespec *a) 3602 { 3603 uint64_t elapsed; 3604 struct timespec x = *b; 3605 timespecsub(&x, a); 3606 elapsed = GET_NANOSEC(&x); 3607 if (elapsed == 0) 3608 elapsed++; 3609 return (elapsed); 3610 } 3611 3612 int 3613 isp_mbox_acquire(ispsoftc_t *isp) 3614 { 3615 if (isp->isp_osinfo.mboxbsy) { 3616 return (1); 3617 } else { 3618 isp->isp_osinfo.mboxcmd_done = 0; 3619 isp->isp_osinfo.mboxbsy = 1; 3620 return (0); 3621 } 3622 } 3623 3624 void 3625 isp_mbox_wait_complete(ispsoftc_t *isp, mbreg_t *mbp) 3626 { 3627 unsigned int usecs = mbp->timeout; 3628 unsigned int max, olim, ilim; 3629 3630 if (usecs == 0) { 3631 usecs = MBCMD_DEFAULT_TIMEOUT; 3632 } 3633 max = isp->isp_mbxwrk0 + 1; 3634 3635 if (isp->isp_osinfo.mbox_sleep_ok) { 3636 unsigned int ms = (usecs + 999) / 1000; 3637 3638 isp->isp_osinfo.mbox_sleep_ok = 0; 3639 isp->isp_osinfo.mbox_sleeping = 1; 3640 for (olim = 0; olim < max; olim++) { 3641 #if __FreeBSD_version < 500000 || !defined(ISP_SMPLOCK) 3642 tsleep(&isp->isp_mbxworkp, PRIBIO, "ispmbx_sleep", 3643 isp_mstohz(ms)); 3644 #else 3645 msleep(&isp->isp_mbxworkp, &isp->isp_mtx, PRIBIO, 3646 "ispmbx_sleep", isp_mstohz(ms)); 3647 #endif 3648 if (isp->isp_osinfo.mboxcmd_done) { 3649 break; 3650 } 3651 } 3652 isp->isp_osinfo.mbox_sleep_ok = 1; 3653 isp->isp_osinfo.mbox_sleeping = 0; 3654 } else { 3655 for (olim = 0; olim < max; olim++) { 3656 for (ilim = 0; ilim < usecs; ilim += 100) { 3657 uint32_t isr; 3658 uint16_t sema, mbox; 3659 if (isp->isp_osinfo.mboxcmd_done) { 3660 break; 3661 } 3662 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 3663 isp_intr(isp, isr, sema, mbox); 3664 if (isp->isp_osinfo.mboxcmd_done) { 3665 break; 3666 } 3667 } 3668 USEC_DELAY(100); 3669 } 3670 if (isp->isp_osinfo.mboxcmd_done) { 3671 break; 3672 } 3673 } 3674 } 3675 if (isp->isp_osinfo.mboxcmd_done == 0) { 3676 isp_prt(isp, ISP_LOGWARN, 3677 "%s Mailbox Command (0x%x) Timeout (%uus)", 3678 isp->isp_osinfo.mbox_sleep_ok? "Interrupting" : "Polled", 3679 isp->isp_lastmbxcmd, usecs); 3680 mbp->param[0] = MBOX_TIMEOUT; 3681 isp->isp_osinfo.mboxcmd_done = 1; 3682 } 3683 } 3684 3685 void 3686 isp_mbox_notify_done(ispsoftc_t *isp) 3687 { 3688 if (isp->isp_osinfo.mbox_sleeping) { 3689 wakeup(&isp->isp_mbxworkp); 3690 } 3691 isp->isp_osinfo.mboxcmd_done = 1; 3692 } 3693 3694 void 3695 isp_mbox_release(ispsoftc_t *isp) 3696 { 3697 isp->isp_osinfo.mboxbsy = 0; 3698 } 3699 3700 int 3701 isp_mstohz(int ms) 3702 { 3703 int hz; 3704 struct timeval t; 3705 t.tv_sec = ms / 1000; 3706 t.tv_usec = (ms % 1000) * 1000; 3707 hz = tvtohz(&t); 3708 if (hz < 0) { 3709 hz = 0x7fffffff; 3710 } 3711 if (hz == 0) { 3712 hz = 1; 3713 } 3714 return (hz); 3715 } 3716