1 /*- 2 * Copyright (c) 1997-2006 by Matthew Jacob 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /* 28 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters. 29 */ 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 #include <dev/isp/isp_freebsd.h> 33 #include <sys/unistd.h> 34 #include <sys/kthread.h> 35 #include <machine/stdarg.h> /* for use by isp_prt below */ 36 #include <sys/conf.h> 37 #include <sys/module.h> 38 #include <sys/ioccom.h> 39 #include <dev/isp/isp_ioctl.h> 40 #if __FreeBSD_version >= 500000 41 #include <sys/sysctl.h> 42 #else 43 #include <sys/devicestat.h> 44 #endif 45 #include <cam/cam_periph.h> 46 #include <cam/cam_xpt_periph.h> 47 48 #if !defined(CAM_NEW_TRAN_CODE) && __FreeBSD_version >= 700025 49 #define CAM_NEW_TRAN_CODE 1 50 #endif 51 52 53 MODULE_VERSION(isp, 1); 54 MODULE_DEPEND(isp, cam, 1, 1, 1); 55 int isp_announced = 0; 56 int isp_fabric_hysteresis = 5; 57 int isp_loop_down_limit = 300; /* default loop down limit */ 58 int isp_change_is_bad = 0; /* "changed" devices are bad */ 59 int isp_quickboot_time = 15; /* don't wait more than N secs for loop up */ 60 int isp_gone_device_time = 30; /* grace time before reporting device lost */ 61 static const char *roles[4] = { 62 "(none)", "Target", "Initiator", "Target/Initiator" 63 }; 64 static const char prom3[] = 65 "PortID 0x%06x Departed from Target %u because of %s"; 66 67 static void isp_freeze_loopdown(ispsoftc_t *, char *); 68 static d_ioctl_t ispioctl; 69 static void isp_intr_enable(void *); 70 static void isp_cam_async(void *, uint32_t, struct cam_path *, void *); 71 static void isp_poll(struct cam_sim *); 72 static timeout_t isp_watchdog; 73 static timeout_t isp_ldt; 74 static void isp_kthread(void *); 75 static void isp_action(struct cam_sim *, union ccb *); 76 77 #if __FreeBSD_version < 700000 78 ispfwfunc *isp_get_firmware_p = NULL; 79 #endif 80 81 #if __FreeBSD_version < 500000 82 #define ISP_CDEV_MAJOR 248 83 static struct cdevsw isp_cdevsw = { 84 /* open */ nullopen, 85 /* close */ nullclose, 86 /* read */ noread, 87 /* write */ nowrite, 88 /* ioctl */ ispioctl, 89 /* poll */ nopoll, 90 /* mmap */ nommap, 91 /* strategy */ nostrategy, 92 /* name */ "isp", 93 /* maj */ ISP_CDEV_MAJOR, 94 /* dump */ nodump, 95 /* psize */ nopsize, 96 /* flags */ D_TAPE, 97 }; 98 #define isp_sysctl_update(x) do { ; } while (0) 99 #else 100 static struct cdevsw isp_cdevsw = { 101 .d_version = D_VERSION, 102 .d_flags = D_NEEDGIANT, 103 .d_ioctl = ispioctl, 104 .d_name = "isp", 105 }; 106 static void isp_sysctl_update(ispsoftc_t *); 107 #endif 108 109 static ispsoftc_t *isplist = NULL; 110 111 void 112 isp_attach(ispsoftc_t *isp) 113 { 114 int primary, secondary; 115 struct ccb_setasync csa; 116 struct cam_devq *devq; 117 struct cam_sim *sim; 118 struct cam_path *path; 119 120 /* 121 * Establish (in case of 12X0) which bus is the primary. 122 */ 123 124 primary = 0; 125 secondary = 1; 126 127 /* 128 * Create the device queue for our SIM(s). 129 */ 130 devq = cam_simq_alloc(isp->isp_maxcmds); 131 if (devq == NULL) { 132 return; 133 } 134 135 /* 136 * Construct our SIM entry. 137 */ 138 ISPLOCK_2_CAMLOCK(isp); 139 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 140 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); 141 if (sim == NULL) { 142 cam_simq_free(devq); 143 CAMLOCK_2_ISPLOCK(isp); 144 return; 145 } 146 CAMLOCK_2_ISPLOCK(isp); 147 148 isp->isp_osinfo.ehook.ich_func = isp_intr_enable; 149 isp->isp_osinfo.ehook.ich_arg = isp; 150 ISPLOCK_2_CAMLOCK(isp); 151 if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) { 152 cam_sim_free(sim, TRUE); 153 CAMLOCK_2_ISPLOCK(isp); 154 isp_prt(isp, ISP_LOGERR, 155 "could not establish interrupt enable hook"); 156 return; 157 } 158 159 if (xpt_bus_register(sim, primary) != CAM_SUCCESS) { 160 cam_sim_free(sim, TRUE); 161 CAMLOCK_2_ISPLOCK(isp); 162 return; 163 } 164 165 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 166 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 167 xpt_bus_deregister(cam_sim_path(sim)); 168 cam_sim_free(sim, TRUE); 169 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 170 CAMLOCK_2_ISPLOCK(isp); 171 return; 172 } 173 174 xpt_setup_ccb(&csa.ccb_h, path, 5); 175 csa.ccb_h.func_code = XPT_SASYNC_CB; 176 csa.event_enable = AC_LOST_DEVICE; 177 csa.callback = isp_cam_async; 178 csa.callback_arg = sim; 179 xpt_action((union ccb *)&csa); 180 CAMLOCK_2_ISPLOCK(isp); 181 isp->isp_sim = sim; 182 isp->isp_path = path; 183 /* 184 * Create a kernel thread for fibre channel instances. We 185 * don't have dual channel FC cards. 186 */ 187 if (IS_FC(isp)) { 188 ISPLOCK_2_CAMLOCK(isp); 189 #if __FreeBSD_version >= 500000 190 cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv"); 191 if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc, 192 RFHIGHPID, 0, "%s: fc_thrd", 193 device_get_nameunit(isp->isp_dev))) 194 #else 195 if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc, 196 "%s: fc_thrd", device_get_nameunit(isp->isp_dev))) 197 #endif 198 { 199 xpt_bus_deregister(cam_sim_path(sim)); 200 cam_sim_free(sim, TRUE); 201 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 202 CAMLOCK_2_ISPLOCK(isp); 203 isp_prt(isp, ISP_LOGERR, "could not create kthread"); 204 return; 205 } 206 CAMLOCK_2_ISPLOCK(isp); 207 /* 208 * We start by being "loop down" if we have an initiator role 209 */ 210 if (isp->isp_role & ISP_ROLE_INITIATOR) { 211 isp_freeze_loopdown(isp, "isp_attach"); 212 isp->isp_osinfo.ldt = 213 timeout(isp_ldt, isp, isp_quickboot_time * hz); 214 isp->isp_osinfo.ldt_running = 1; 215 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 216 "Starting Initial Loop Down Timer"); 217 } 218 } 219 220 221 /* 222 * If we have a second channel, construct SIM entry for that. 223 */ 224 if (IS_DUALBUS(isp)) { 225 ISPLOCK_2_CAMLOCK(isp); 226 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 227 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); 228 if (sim == NULL) { 229 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 230 xpt_free_path(isp->isp_path); 231 cam_simq_free(devq); 232 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 233 return; 234 } 235 if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) { 236 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 237 xpt_free_path(isp->isp_path); 238 cam_sim_free(sim, TRUE); 239 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 240 CAMLOCK_2_ISPLOCK(isp); 241 return; 242 } 243 244 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 245 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 246 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 247 xpt_free_path(isp->isp_path); 248 xpt_bus_deregister(cam_sim_path(sim)); 249 cam_sim_free(sim, TRUE); 250 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 251 CAMLOCK_2_ISPLOCK(isp); 252 return; 253 } 254 255 xpt_setup_ccb(&csa.ccb_h, path, 5); 256 csa.ccb_h.func_code = XPT_SASYNC_CB; 257 csa.event_enable = AC_LOST_DEVICE; 258 csa.callback = isp_cam_async; 259 csa.callback_arg = sim; 260 xpt_action((union ccb *)&csa); 261 CAMLOCK_2_ISPLOCK(isp); 262 isp->isp_sim2 = sim; 263 isp->isp_path2 = path; 264 } 265 266 /* 267 * Create device nodes 268 */ 269 (void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT, 270 GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev)); 271 272 if (isp->isp_role != ISP_ROLE_NONE) { 273 isp->isp_state = ISP_RUNSTATE; 274 ISP_ENABLE_INTS(isp); 275 } 276 if (isplist == NULL) { 277 isplist = isp; 278 } else { 279 ispsoftc_t *tmp = isplist; 280 while (tmp->isp_osinfo.next) { 281 tmp = tmp->isp_osinfo.next; 282 } 283 tmp->isp_osinfo.next = isp; 284 } 285 isp_sysctl_update(isp); 286 } 287 288 static void 289 isp_freeze_loopdown(ispsoftc_t *isp, char *msg) 290 { 291 if (isp->isp_osinfo.simqfrozen == 0) { 292 isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg); 293 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 294 ISPLOCK_2_CAMLOCK(isp); 295 xpt_freeze_simq(isp->isp_sim, 1); 296 CAMLOCK_2_ISPLOCK(isp); 297 } else { 298 isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg); 299 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 300 } 301 } 302 303 304 #if __FreeBSD_version < 500000 305 #define _DEV dev_t 306 #define _IOP struct proc 307 #else 308 #define _IOP struct thread 309 #define _DEV struct cdev * 310 #endif 311 312 static int 313 ispioctl(_DEV dev, u_long c, caddr_t addr, int flags, _IOP *td) 314 { 315 ispsoftc_t *isp; 316 int nr, retval = ENOTTY; 317 318 isp = isplist; 319 while (isp) { 320 if (minor(dev) == device_get_unit(isp->isp_dev)) { 321 break; 322 } 323 isp = isp->isp_osinfo.next; 324 } 325 if (isp == NULL) 326 return (ENXIO); 327 328 switch (c) { 329 #ifdef ISP_FW_CRASH_DUMP 330 case ISP_GET_FW_CRASH_DUMP: 331 if (IS_FC(isp)) { 332 uint16_t *ptr = FCPARAM(isp)->isp_dump_data; 333 size_t sz; 334 335 retval = 0; 336 if (IS_2200(isp)) { 337 sz = QLA2200_RISC_IMAGE_DUMP_SIZE; 338 } else { 339 sz = QLA2300_RISC_IMAGE_DUMP_SIZE; 340 } 341 ISP_LOCK(isp); 342 if (ptr && *ptr) { 343 void *uaddr = *((void **) addr); 344 if (copyout(ptr, uaddr, sz)) { 345 retval = EFAULT; 346 } else { 347 *ptr = 0; 348 } 349 } else { 350 retval = ENXIO; 351 } 352 ISP_UNLOCK(isp); 353 } 354 break; 355 case ISP_FORCE_CRASH_DUMP: 356 if (IS_FC(isp)) { 357 ISP_LOCK(isp); 358 isp_freeze_loopdown(isp, 359 "ispioctl(ISP_FORCE_CRASH_DUMP)"); 360 isp_fw_dump(isp); 361 isp_reinit(isp); 362 ISP_UNLOCK(isp); 363 retval = 0; 364 } 365 break; 366 #endif 367 case ISP_SDBLEV: 368 { 369 int olddblev = isp->isp_dblev; 370 isp->isp_dblev = *(int *)addr; 371 *(int *)addr = olddblev; 372 retval = 0; 373 break; 374 } 375 case ISP_GETROLE: 376 *(int *)addr = isp->isp_role; 377 retval = 0; 378 break; 379 case ISP_SETROLE: 380 nr = *(int *)addr; 381 if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) { 382 retval = EINVAL; 383 break; 384 } 385 *(int *)addr = isp->isp_role; 386 isp->isp_role = nr; 387 /* FALLTHROUGH */ 388 case ISP_RESETHBA: 389 ISP_LOCK(isp); 390 isp_reinit(isp); 391 ISP_UNLOCK(isp); 392 retval = 0; 393 break; 394 case ISP_RESCAN: 395 if (IS_FC(isp)) { 396 ISP_LOCK(isp); 397 if (isp_fc_runstate(isp, 5 * 1000000)) { 398 retval = EIO; 399 } else { 400 retval = 0; 401 } 402 ISP_UNLOCK(isp); 403 } 404 break; 405 case ISP_FC_LIP: 406 if (IS_FC(isp)) { 407 ISP_LOCK(isp); 408 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) { 409 retval = EIO; 410 } else { 411 retval = 0; 412 } 413 ISP_UNLOCK(isp); 414 } 415 break; 416 case ISP_FC_GETDINFO: 417 { 418 struct isp_fc_device *ifc = (struct isp_fc_device *) addr; 419 fcportdb_t *lp; 420 421 if (IS_SCSI(isp)) { 422 break; 423 } 424 if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) { 425 retval = EINVAL; 426 break; 427 } 428 ISP_LOCK(isp); 429 lp = &FCPARAM(isp)->portdb[ifc->loopid]; 430 if (lp->state == FC_PORTDB_STATE_VALID) { 431 ifc->role = lp->roles; 432 ifc->loopid = lp->handle; 433 ifc->portid = lp->portid; 434 ifc->node_wwn = lp->node_wwn; 435 ifc->port_wwn = lp->port_wwn; 436 retval = 0; 437 } else { 438 retval = ENODEV; 439 } 440 ISP_UNLOCK(isp); 441 break; 442 } 443 case ISP_GET_STATS: 444 { 445 isp_stats_t *sp = (isp_stats_t *) addr; 446 447 MEMZERO(sp, sizeof (*sp)); 448 sp->isp_stat_version = ISP_STATS_VERSION; 449 sp->isp_type = isp->isp_type; 450 sp->isp_revision = isp->isp_revision; 451 ISP_LOCK(isp); 452 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt; 453 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus; 454 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc; 455 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync; 456 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt; 457 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt; 458 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater; 459 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater; 460 ISP_UNLOCK(isp); 461 retval = 0; 462 break; 463 } 464 case ISP_CLR_STATS: 465 ISP_LOCK(isp); 466 isp->isp_intcnt = 0; 467 isp->isp_intbogus = 0; 468 isp->isp_intmboxc = 0; 469 isp->isp_intoasync = 0; 470 isp->isp_rsltccmplt = 0; 471 isp->isp_fphccmplt = 0; 472 isp->isp_rscchiwater = 0; 473 isp->isp_fpcchiwater = 0; 474 ISP_UNLOCK(isp); 475 retval = 0; 476 break; 477 case ISP_FC_GETHINFO: 478 { 479 struct isp_hba_device *hba = (struct isp_hba_device *) addr; 480 MEMZERO(hba, sizeof (*hba)); 481 482 hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev); 483 hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev); 484 hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev); 485 if (IS_FC(isp)) { 486 hba->fc_speed = FCPARAM(isp)->isp_gbspeed; 487 hba->fc_scsi_supported = 1; 488 hba->fc_topology = FCPARAM(isp)->isp_topo + 1; 489 hba->fc_loopid = FCPARAM(isp)->isp_loopid; 490 hba->nvram_node_wwn = FCPARAM(isp)->isp_wwnn_nvram; 491 hba->nvram_port_wwn = FCPARAM(isp)->isp_wwpn_nvram; 492 hba->active_node_wwn = ISP_NODEWWN(isp); 493 hba->active_port_wwn = ISP_PORTWWN(isp); 494 } 495 retval = 0; 496 break; 497 } 498 case ISP_GET_FC_PARAM: 499 { 500 struct isp_fc_param *f = (struct isp_fc_param *) addr; 501 502 if (IS_SCSI(isp)) { 503 break; 504 } 505 f->parameter = 0; 506 if (strcmp(f->param_name, "framelength") == 0) { 507 f->parameter = FCPARAM(isp)->isp_maxfrmlen; 508 retval = 0; 509 break; 510 } 511 if (strcmp(f->param_name, "exec_throttle") == 0) { 512 f->parameter = FCPARAM(isp)->isp_execthrottle; 513 retval = 0; 514 break; 515 } 516 if (strcmp(f->param_name, "fullduplex") == 0) { 517 if (FCPARAM(isp)->isp_fwoptions & ICBOPT_FULL_DUPLEX) 518 f->parameter = 1; 519 retval = 0; 520 break; 521 } 522 if (strcmp(f->param_name, "loopid") == 0) { 523 f->parameter = FCPARAM(isp)->isp_loopid; 524 retval = 0; 525 break; 526 } 527 retval = EINVAL; 528 break; 529 } 530 case ISP_SET_FC_PARAM: 531 { 532 struct isp_fc_param *f = (struct isp_fc_param *) addr; 533 uint32_t param = f->parameter; 534 535 if (IS_SCSI(isp)) { 536 break; 537 } 538 f->parameter = 0; 539 if (strcmp(f->param_name, "framelength") == 0) { 540 if (param != 512 && param != 1024 && param != 1024) { 541 retval = EINVAL; 542 break; 543 } 544 FCPARAM(isp)->isp_maxfrmlen = param; 545 retval = 0; 546 break; 547 } 548 if (strcmp(f->param_name, "exec_throttle") == 0) { 549 if (param < 16 || param > 255) { 550 retval = EINVAL; 551 break; 552 } 553 FCPARAM(isp)->isp_execthrottle = param; 554 retval = 0; 555 break; 556 } 557 if (strcmp(f->param_name, "fullduplex") == 0) { 558 if (param != 0 && param != 1) { 559 retval = EINVAL; 560 break; 561 } 562 if (param) { 563 FCPARAM(isp)->isp_fwoptions |= 564 ICBOPT_FULL_DUPLEX; 565 } else { 566 FCPARAM(isp)->isp_fwoptions &= 567 ~ICBOPT_FULL_DUPLEX; 568 } 569 retval = 0; 570 break; 571 } 572 if (strcmp(f->param_name, "loopid") == 0) { 573 if (param < 0 || param > 125) { 574 retval = EINVAL; 575 break; 576 } 577 FCPARAM(isp)->isp_loopid = param; 578 retval = 0; 579 break; 580 } 581 retval = EINVAL; 582 break; 583 } 584 case ISP_TSK_MGMT: 585 { 586 int needmarker; 587 struct isp_fc_tsk_mgmt *fct = (struct isp_fc_tsk_mgmt *) addr; 588 uint16_t loopid; 589 mbreg_t mbs; 590 591 if (IS_SCSI(isp)) { 592 break; 593 } 594 595 memset(&mbs, 0, sizeof (mbs)); 596 needmarker = retval = 0; 597 loopid = fct->loopid; 598 if (FCPARAM(isp)->isp_2klogin == 0) { 599 loopid <<= 8; 600 } 601 switch (fct->action) { 602 case IPT_CLEAR_ACA: 603 mbs.param[0] = MBOX_CLEAR_ACA; 604 mbs.param[1] = loopid; 605 mbs.param[2] = fct->lun; 606 break; 607 case IPT_TARGET_RESET: 608 mbs.param[0] = MBOX_TARGET_RESET; 609 mbs.param[1] = loopid; 610 needmarker = 1; 611 break; 612 case IPT_LUN_RESET: 613 mbs.param[0] = MBOX_LUN_RESET; 614 mbs.param[1] = loopid; 615 mbs.param[2] = fct->lun; 616 needmarker = 1; 617 break; 618 case IPT_CLEAR_TASK_SET: 619 mbs.param[0] = MBOX_CLEAR_TASK_SET; 620 mbs.param[1] = loopid; 621 mbs.param[2] = fct->lun; 622 needmarker = 1; 623 break; 624 case IPT_ABORT_TASK_SET: 625 mbs.param[0] = MBOX_ABORT_TASK_SET; 626 mbs.param[1] = loopid; 627 mbs.param[2] = fct->lun; 628 needmarker = 1; 629 break; 630 default: 631 retval = EINVAL; 632 break; 633 } 634 if (retval == 0) { 635 ISP_LOCK(isp); 636 if (needmarker) { 637 isp->isp_sendmarker |= 1; 638 } 639 retval = isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs); 640 ISP_UNLOCK(isp); 641 if (retval) 642 retval = EIO; 643 } 644 break; 645 } 646 default: 647 break; 648 } 649 return (retval); 650 } 651 652 #if __FreeBSD_version >= 500000 653 static void 654 isp_sysctl_update(ispsoftc_t *isp) 655 { 656 struct sysctl_ctx_list *ctx = 657 device_get_sysctl_ctx(isp->isp_osinfo.dev); 658 struct sysctl_oid *tree = device_get_sysctl_tree(isp->isp_osinfo.dev); 659 660 if (IS_SCSI(isp)) { 661 return; 662 } 663 664 snprintf(isp->isp_osinfo.sysctl_info.fc.wwnn, 665 sizeof (isp->isp_osinfo.sysctl_info.fc.wwnn), "0x%08x%08x", 666 (uint32_t) (ISP_NODEWWN(isp) >> 32), (uint32_t) ISP_NODEWWN(isp)); 667 668 snprintf(isp->isp_osinfo.sysctl_info.fc.wwpn, 669 sizeof (isp->isp_osinfo.sysctl_info.fc.wwpn), "0x%08x%08x", 670 (uint32_t) (ISP_PORTWWN(isp) >> 32), (uint32_t) ISP_PORTWWN(isp)); 671 672 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 673 "wwnn", CTLFLAG_RD, isp->isp_osinfo.sysctl_info.fc.wwnn, 0, 674 "World Wide Node Name"); 675 676 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 677 "wwpn", CTLFLAG_RD, isp->isp_osinfo.sysctl_info.fc.wwpn, 0, 678 "World Wide Port Name"); 679 680 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 681 "loop_down_limit", 682 CTLFLAG_RW, &isp->isp_osinfo.loop_down_limit, 0, 683 "How long to wait for loop to come back up"); 684 685 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 686 "gone_device_time", 687 CTLFLAG_RW, &isp->isp_osinfo.gone_device_time, 0, 688 "How long to wait for a device to reappear"); 689 } 690 #endif 691 692 static void 693 isp_intr_enable(void *arg) 694 { 695 ispsoftc_t *isp = arg; 696 if (isp->isp_role != ISP_ROLE_NONE) { 697 ISP_ENABLE_INTS(isp); 698 } 699 /* Release our hook so that the boot can continue. */ 700 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 701 } 702 703 /* 704 * Put the target mode functions here, because some are inlines 705 */ 706 707 #ifdef ISP_TARGET_MODE 708 709 static __inline int is_lun_enabled(ispsoftc_t *, int, lun_id_t); 710 static __inline int are_any_luns_enabled(ispsoftc_t *, int); 711 static __inline tstate_t *get_lun_statep(ispsoftc_t *, int, lun_id_t); 712 static __inline void rls_lun_statep(ispsoftc_t *, tstate_t *); 713 static __inline atio_private_data_t *isp_get_atpd(ispsoftc_t *, int); 714 static cam_status 715 create_lun_state(ispsoftc_t *, int, struct cam_path *, tstate_t **); 716 static void destroy_lun_state(ispsoftc_t *, tstate_t *); 717 static int isp_en_lun(ispsoftc_t *, union ccb *); 718 static void isp_ledone(ispsoftc_t *, lun_entry_t *); 719 static cam_status isp_abort_tgt_ccb(ispsoftc_t *, union ccb *); 720 static timeout_t isp_refire_putback_atio; 721 static void isp_complete_ctio(union ccb *); 722 static void isp_target_putback_atio(union ccb *); 723 static void isp_target_start_ctio(ispsoftc_t *, union ccb *); 724 static int isp_handle_platform_atio(ispsoftc_t *, at_entry_t *); 725 static int isp_handle_platform_atio2(ispsoftc_t *, at2_entry_t *); 726 static int isp_handle_platform_ctio(ispsoftc_t *, void *); 727 static int isp_handle_platform_notify_scsi(ispsoftc_t *, in_entry_t *); 728 static int isp_handle_platform_notify_fc(ispsoftc_t *, in_fcentry_t *); 729 730 static __inline int 731 is_lun_enabled(ispsoftc_t *isp, int bus, lun_id_t lun) 732 { 733 tstate_t *tptr; 734 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 735 if (tptr == NULL) { 736 return (0); 737 } 738 do { 739 if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) { 740 return (1); 741 } 742 } while ((tptr = tptr->next) != NULL); 743 return (0); 744 } 745 746 static __inline int 747 are_any_luns_enabled(ispsoftc_t *isp, int port) 748 { 749 int lo, hi; 750 if (IS_DUALBUS(isp)) { 751 lo = (port * (LUN_HASH_SIZE >> 1)); 752 hi = lo + (LUN_HASH_SIZE >> 1); 753 } else { 754 lo = 0; 755 hi = LUN_HASH_SIZE; 756 } 757 for (lo = 0; lo < hi; lo++) { 758 if (isp->isp_osinfo.lun_hash[lo]) { 759 return (1); 760 } 761 } 762 return (0); 763 } 764 765 static __inline tstate_t * 766 get_lun_statep(ispsoftc_t *isp, int bus, lun_id_t lun) 767 { 768 tstate_t *tptr = NULL; 769 770 if (lun == CAM_LUN_WILDCARD) { 771 if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) { 772 tptr = &isp->isp_osinfo.tsdflt[bus]; 773 tptr->hold++; 774 return (tptr); 775 } 776 return (NULL); 777 } else { 778 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 779 if (tptr == NULL) { 780 return (NULL); 781 } 782 } 783 784 do { 785 if (tptr->lun == lun && tptr->bus == bus) { 786 tptr->hold++; 787 return (tptr); 788 } 789 } while ((tptr = tptr->next) != NULL); 790 return (tptr); 791 } 792 793 static __inline void 794 rls_lun_statep(ispsoftc_t *isp, tstate_t *tptr) 795 { 796 if (tptr->hold) 797 tptr->hold--; 798 } 799 800 static __inline atio_private_data_t * 801 isp_get_atpd(ispsoftc_t *isp, int tag) 802 { 803 atio_private_data_t *atp; 804 for (atp = isp->isp_osinfo.atpdp; 805 atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) { 806 if (atp->tag == tag) 807 return (atp); 808 } 809 return (NULL); 810 } 811 812 static cam_status 813 create_lun_state(ispsoftc_t *isp, int bus, 814 struct cam_path *path, tstate_t **rslt) 815 { 816 cam_status status; 817 lun_id_t lun; 818 int hfx; 819 tstate_t *tptr, *new; 820 821 lun = xpt_path_lun_id(path); 822 if (lun < 0) { 823 return (CAM_LUN_INVALID); 824 } 825 if (is_lun_enabled(isp, bus, lun)) { 826 return (CAM_LUN_ALRDY_ENA); 827 } 828 new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO); 829 if (new == NULL) { 830 return (CAM_RESRC_UNAVAIL); 831 } 832 833 status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path), 834 xpt_path_target_id(path), xpt_path_lun_id(path)); 835 if (status != CAM_REQ_CMP) { 836 free(new, M_DEVBUF); 837 return (status); 838 } 839 new->bus = bus; 840 new->lun = lun; 841 SLIST_INIT(&new->atios); 842 SLIST_INIT(&new->inots); 843 new->hold = 1; 844 845 hfx = LUN_HASH_FUNC(isp, new->bus, new->lun); 846 tptr = isp->isp_osinfo.lun_hash[hfx]; 847 if (tptr == NULL) { 848 isp->isp_osinfo.lun_hash[hfx] = new; 849 } else { 850 while (tptr->next) 851 tptr = tptr->next; 852 tptr->next = new; 853 } 854 *rslt = new; 855 return (CAM_REQ_CMP); 856 } 857 858 static __inline void 859 destroy_lun_state(ispsoftc_t *isp, tstate_t *tptr) 860 { 861 int hfx; 862 tstate_t *lw, *pw; 863 864 if (tptr->hold) { 865 return; 866 } 867 hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun); 868 pw = isp->isp_osinfo.lun_hash[hfx]; 869 if (pw == NULL) { 870 return; 871 } else if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 872 isp->isp_osinfo.lun_hash[hfx] = pw->next; 873 } else { 874 lw = pw; 875 pw = lw->next; 876 while (pw) { 877 if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 878 lw->next = pw->next; 879 break; 880 } 881 lw = pw; 882 pw = pw->next; 883 } 884 if (pw == NULL) { 885 return; 886 } 887 } 888 free(tptr, M_DEVBUF); 889 } 890 891 /* 892 * Enable luns. 893 */ 894 static int 895 isp_en_lun(ispsoftc_t *isp, union ccb *ccb) 896 { 897 struct ccb_en_lun *cel = &ccb->cel; 898 tstate_t *tptr; 899 uint32_t seq; 900 int bus, cmd, av, wildcard, tm_on; 901 lun_id_t lun; 902 target_id_t tgt; 903 904 bus = XS_CHANNEL(ccb); 905 if (bus > 1) { 906 xpt_print(ccb->ccb_h.path, "illegal bus %d\n", bus); 907 ccb->ccb_h.status = CAM_PATH_INVALID; 908 return (-1); 909 } 910 tgt = ccb->ccb_h.target_id; 911 lun = ccb->ccb_h.target_lun; 912 913 if (isp->isp_dblev & ISP_LOGTDEBUG0) { 914 xpt_print(ccb->ccb_h.path, "%sabling lun 0x%x on channel %d\n", 915 cel->enable? "en" : "dis", lun, bus); 916 } 917 918 if ((lun != CAM_LUN_WILDCARD) && 919 (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) { 920 ccb->ccb_h.status = CAM_LUN_INVALID; 921 return (-1); 922 } 923 924 if (IS_SCSI(isp)) { 925 sdparam *sdp = isp->isp_param; 926 sdp += bus; 927 if (tgt != CAM_TARGET_WILDCARD && 928 tgt != sdp->isp_initiator_id) { 929 ccb->ccb_h.status = CAM_TID_INVALID; 930 return (-1); 931 } 932 } else { 933 /* 934 * There's really no point in doing this yet w/o multi-tid 935 * capability. Even then, it's problematic. 936 */ 937 #if 0 938 if (tgt != CAM_TARGET_WILDCARD && 939 tgt != FCPARAM(isp)->isp_iid) { 940 ccb->ccb_h.status = CAM_TID_INVALID; 941 return (-1); 942 } 943 #endif 944 /* 945 * This is as a good a place as any to check f/w capabilities. 946 */ 947 if (FCPARAM(isp)->isp_tmode == 0) { 948 xpt_print(ccb->ccb_h.path, 949 "firmware does not support target mode\n"); 950 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 951 return (-1); 952 } 953 /* 954 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to 955 * XXX: dork with our already fragile enable/disable code. 956 */ 957 if (FCPARAM(isp)->isp_sccfw == 0) { 958 xpt_print(ccb->ccb_h.path, 959 "firmware not SCCLUN capable\n"); 960 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 961 return (-1); 962 } 963 } 964 965 if (tgt == CAM_TARGET_WILDCARD) { 966 if (lun == CAM_LUN_WILDCARD) { 967 wildcard = 1; 968 } else { 969 ccb->ccb_h.status = CAM_LUN_INVALID; 970 return (-1); 971 } 972 } else { 973 wildcard = 0; 974 } 975 976 tm_on = (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) != 0; 977 978 /* 979 * Next check to see whether this is a target/lun wildcard action. 980 * 981 * If so, we know that we can accept commands for luns that haven't 982 * been enabled yet and send them upstream. Otherwise, we have to 983 * handle them locally (if we see them at all). 984 */ 985 986 if (wildcard) { 987 tptr = &isp->isp_osinfo.tsdflt[bus]; 988 if (cel->enable) { 989 if (tm_on) { 990 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 991 return (-1); 992 } 993 ccb->ccb_h.status = 994 xpt_create_path(&tptr->owner, NULL, 995 xpt_path_path_id(ccb->ccb_h.path), 996 xpt_path_target_id(ccb->ccb_h.path), 997 xpt_path_lun_id(ccb->ccb_h.path)); 998 if (ccb->ccb_h.status != CAM_REQ_CMP) { 999 return (-1); 1000 } 1001 SLIST_INIT(&tptr->atios); 1002 SLIST_INIT(&tptr->inots); 1003 isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED; 1004 } else { 1005 if (tm_on == 0) { 1006 ccb->ccb_h.status = CAM_REQ_CMP; 1007 return (-1); 1008 } 1009 if (tptr->hold) { 1010 ccb->ccb_h.status = CAM_SCSI_BUSY; 1011 return (-1); 1012 } 1013 xpt_free_path(tptr->owner); 1014 isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED; 1015 } 1016 } 1017 1018 /* 1019 * Now check to see whether this bus needs to be 1020 * enabled/disabled with respect to target mode. 1021 */ 1022 av = bus << 31; 1023 if (cel->enable && tm_on == 0) { 1024 av |= ENABLE_TARGET_FLAG; 1025 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 1026 if (av) { 1027 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 1028 if (wildcard) { 1029 isp->isp_osinfo.tmflags[bus] &= 1030 ~TM_WILDCARD_ENABLED; 1031 xpt_free_path(tptr->owner); 1032 } 1033 return (-1); 1034 } 1035 isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED; 1036 xpt_print(ccb->ccb_h.path, "Target Mode Enabled\n"); 1037 } else if (cel->enable == 0 && tm_on && wildcard) { 1038 if (are_any_luns_enabled(isp, bus)) { 1039 ccb->ccb_h.status = CAM_SCSI_BUSY; 1040 return (-1); 1041 } 1042 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 1043 if (av) { 1044 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 1045 return (-1); 1046 } 1047 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; 1048 xpt_print(ccb->ccb_h.path, "Target Mode Disabled\n"); 1049 } 1050 1051 if (wildcard) { 1052 ccb->ccb_h.status = CAM_REQ_CMP; 1053 return (-1); 1054 } 1055 1056 /* 1057 * Find an empty slot 1058 */ 1059 for (seq = 0; seq < NLEACT; seq++) { 1060 if (isp->isp_osinfo.leact[seq] == 0) { 1061 break; 1062 } 1063 } 1064 if (seq >= NLEACT) { 1065 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1066 return (-1); 1067 1068 } 1069 isp->isp_osinfo.leact[seq] = ccb; 1070 1071 if (cel->enable) { 1072 ccb->ccb_h.status = 1073 create_lun_state(isp, bus, ccb->ccb_h.path, &tptr); 1074 if (ccb->ccb_h.status != CAM_REQ_CMP) { 1075 isp->isp_osinfo.leact[seq] = 0; 1076 return (-1); 1077 } 1078 } else { 1079 tptr = get_lun_statep(isp, bus, lun); 1080 if (tptr == NULL) { 1081 ccb->ccb_h.status = CAM_LUN_INVALID; 1082 return (-1); 1083 } 1084 } 1085 1086 if (cel->enable) { 1087 int c, n, ulun = lun; 1088 1089 cmd = RQSTYPE_ENABLE_LUN; 1090 c = DFLT_CMND_CNT; 1091 n = DFLT_INOT_CNT; 1092 if (IS_FC(isp) && lun != 0) { 1093 cmd = RQSTYPE_MODIFY_LUN; 1094 n = 0; 1095 /* 1096 * For SCC firmware, we only deal with setting 1097 * (enabling or modifying) lun 0. 1098 */ 1099 ulun = 0; 1100 } 1101 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) { 1102 rls_lun_statep(isp, tptr); 1103 ccb->ccb_h.status = CAM_REQ_INPROG; 1104 return (seq); 1105 } 1106 } else { 1107 int c, n, ulun = lun; 1108 1109 cmd = -RQSTYPE_MODIFY_LUN; 1110 c = DFLT_CMND_CNT; 1111 n = DFLT_INOT_CNT; 1112 if (IS_FC(isp) && lun != 0) { 1113 n = 0; 1114 /* 1115 * For SCC firmware, we only deal with setting 1116 * (enabling or modifying) lun 0. 1117 */ 1118 ulun = 0; 1119 } 1120 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) { 1121 rls_lun_statep(isp, tptr); 1122 ccb->ccb_h.status = CAM_REQ_INPROG; 1123 return (seq); 1124 } 1125 } 1126 rls_lun_statep(isp, tptr); 1127 xpt_print(ccb->ccb_h.path, "isp_lun_cmd failed\n"); 1128 isp->isp_osinfo.leact[seq] = 0; 1129 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1130 return (-1); 1131 } 1132 1133 static void 1134 isp_ledone(ispsoftc_t *isp, lun_entry_t *lep) 1135 { 1136 const char lfmt[] = "now %sabled for target mode"; 1137 union ccb *ccb; 1138 uint32_t seq; 1139 tstate_t *tptr; 1140 int av; 1141 struct ccb_en_lun *cel; 1142 1143 seq = lep->le_reserved - 1; 1144 if (seq >= NLEACT) { 1145 isp_prt(isp, ISP_LOGERR, 1146 "seq out of range (%u) in isp_ledone", seq); 1147 return; 1148 } 1149 ccb = isp->isp_osinfo.leact[seq]; 1150 if (ccb == 0) { 1151 isp_prt(isp, ISP_LOGERR, 1152 "no ccb for seq %u in isp_ledone", seq); 1153 return; 1154 } 1155 cel = &ccb->cel; 1156 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), XS_LUN(ccb)); 1157 if (tptr == NULL) { 1158 xpt_print(ccb->ccb_h.path, "null tptr in isp_ledone\n"); 1159 isp->isp_osinfo.leact[seq] = 0; 1160 return; 1161 } 1162 1163 if (lep->le_status != LUN_OK) { 1164 xpt_print(ccb->ccb_h.path, 1165 "ENABLE/MODIFY LUN returned 0x%x\n", lep->le_status); 1166 err: 1167 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1168 rls_lun_statep(isp, tptr); 1169 isp->isp_osinfo.leact[seq] = 0; 1170 ISPLOCK_2_CAMLOCK(isp); 1171 xpt_done(ccb); 1172 CAMLOCK_2_ISPLOCK(isp); 1173 return; 1174 } else { 1175 isp_prt(isp, ISP_LOGTDEBUG0, 1176 "isp_ledone: ENABLE/MODIFY done okay"); 1177 } 1178 1179 1180 if (cel->enable) { 1181 ccb->ccb_h.status = CAM_REQ_CMP; 1182 xpt_print(ccb->ccb_h.path, lfmt, "en"); 1183 rls_lun_statep(isp, tptr); 1184 isp->isp_osinfo.leact[seq] = 0; 1185 ISPLOCK_2_CAMLOCK(isp); 1186 xpt_done(ccb); 1187 CAMLOCK_2_ISPLOCK(isp); 1188 return; 1189 } 1190 1191 if (lep->le_header.rqs_entry_type == RQSTYPE_MODIFY_LUN) { 1192 if (isp_lun_cmd(isp, -RQSTYPE_ENABLE_LUN, XS_CHANNEL(ccb), 1193 XS_TGT(ccb), XS_LUN(ccb), 0, 0, seq+1)) { 1194 xpt_print(ccb->ccb_h.path, 1195 "isp_ledone: isp_lun_cmd failed\n"); 1196 goto err; 1197 } 1198 rls_lun_statep(isp, tptr); 1199 return; 1200 } 1201 1202 xpt_print(ccb->ccb_h.path, lfmt, "dis"); 1203 rls_lun_statep(isp, tptr); 1204 destroy_lun_state(isp, tptr); 1205 ccb->ccb_h.status = CAM_REQ_CMP; 1206 isp->isp_osinfo.leact[seq] = 0; 1207 ISPLOCK_2_CAMLOCK(isp); 1208 xpt_done(ccb); 1209 CAMLOCK_2_ISPLOCK(isp); 1210 if (are_any_luns_enabled(isp, XS_CHANNEL(ccb)) == 0) { 1211 int bus = XS_CHANNEL(ccb); 1212 av = bus << 31; 1213 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 1214 if (av) { 1215 isp_prt(isp, ISP_LOGWARN, 1216 "disable target mode on channel %d failed", bus); 1217 } 1218 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; 1219 } 1220 } 1221 1222 1223 static cam_status 1224 isp_abort_tgt_ccb(ispsoftc_t *isp, union ccb *ccb) 1225 { 1226 tstate_t *tptr; 1227 struct ccb_hdr_slist *lp; 1228 struct ccb_hdr *curelm; 1229 int found, *ctr; 1230 union ccb *accb = ccb->cab.abort_ccb; 1231 1232 xpt_print(ccb->ccb_h.path, "aborting ccb %p\n", accb); 1233 if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 1234 int badpath = 0; 1235 if (IS_FC(isp) && (accb->ccb_h.target_id != 1236 ((fcparam *) isp->isp_param)->isp_loopid)) { 1237 badpath = 1; 1238 } else if (IS_SCSI(isp) && (accb->ccb_h.target_id != 1239 ((sdparam *) isp->isp_param)->isp_initiator_id)) { 1240 badpath = 1; 1241 } 1242 if (badpath) { 1243 /* 1244 * Being restrictive about target ids is really about 1245 * making sure we're aborting for the right multi-tid 1246 * path. This doesn't really make much sense at present. 1247 */ 1248 #if 0 1249 return (CAM_PATH_INVALID); 1250 #endif 1251 } 1252 } 1253 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun); 1254 if (tptr == NULL) { 1255 xpt_print(ccb->ccb_h.path, "can't get statep\n"); 1256 return (CAM_PATH_INVALID); 1257 } 1258 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 1259 lp = &tptr->atios; 1260 ctr = &tptr->atio_count; 1261 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 1262 lp = &tptr->inots; 1263 ctr = &tptr->inot_count; 1264 } else { 1265 rls_lun_statep(isp, tptr); 1266 xpt_print(ccb->ccb_h.path, "bad function code %d\n", 1267 accb->ccb_h.func_code); 1268 return (CAM_UA_ABORT); 1269 } 1270 curelm = SLIST_FIRST(lp); 1271 found = 0; 1272 if (curelm == &accb->ccb_h) { 1273 found = 1; 1274 SLIST_REMOVE_HEAD(lp, sim_links.sle); 1275 } else { 1276 while(curelm != NULL) { 1277 struct ccb_hdr *nextelm; 1278 1279 nextelm = SLIST_NEXT(curelm, sim_links.sle); 1280 if (nextelm == &accb->ccb_h) { 1281 found = 1; 1282 SLIST_NEXT(curelm, sim_links.sle) = 1283 SLIST_NEXT(nextelm, sim_links.sle); 1284 break; 1285 } 1286 curelm = nextelm; 1287 } 1288 } 1289 rls_lun_statep(isp, tptr); 1290 if (found) { 1291 (*ctr)--; 1292 accb->ccb_h.status = CAM_REQ_ABORTED; 1293 xpt_done(accb); 1294 return (CAM_REQ_CMP); 1295 } 1296 xpt_print(ccb->ccb_h.path, "ccb %p not found\n", accb); 1297 return (CAM_PATH_INVALID); 1298 } 1299 1300 static void 1301 isp_target_start_ctio(ispsoftc_t *isp, union ccb *ccb) 1302 { 1303 void *qe; 1304 struct ccb_scsiio *cso = &ccb->csio; 1305 uint32_t nxti, optr, handle; 1306 uint8_t local[QENTRY_LEN]; 1307 1308 1309 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 1310 xpt_print(ccb->ccb_h.path, 1311 "Request Queue Overflow in isp_target_start_ctio\n"); 1312 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1313 goto out; 1314 } 1315 memset(local, 0, QENTRY_LEN); 1316 1317 /* 1318 * We're either moving data or completing a command here. 1319 */ 1320 1321 if (IS_FC(isp)) { 1322 atio_private_data_t *atp; 1323 ct2_entry_t *cto = (ct2_entry_t *) local; 1324 1325 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; 1326 cto->ct_header.rqs_entry_count = 1; 1327 if (FCPARAM(isp)->isp_2klogin) { 1328 ((ct2e_entry_t *)cto)->ct_iid = cso->init_id; 1329 } else { 1330 cto->ct_iid = cso->init_id; 1331 if (FCPARAM(isp)->isp_sccfw == 0) { 1332 cto->ct_lun = ccb->ccb_h.target_lun; 1333 } 1334 } 1335 1336 atp = isp_get_atpd(isp, cso->tag_id); 1337 if (atp == NULL) { 1338 xpt_print(ccb->ccb_h.path, 1339 "cannot find private data adjunct for tag %x\n", 1340 cso->tag_id); 1341 XS_SETERR(ccb, CAM_REQ_CMP_ERR); 1342 goto out; 1343 } 1344 1345 cto->ct_rxid = cso->tag_id; 1346 if (cso->dxfer_len == 0) { 1347 cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA; 1348 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1349 cto->ct_flags |= CT2_SENDSTATUS; 1350 cto->rsp.m1.ct_scsi_status = cso->scsi_status; 1351 cto->ct_resid = 1352 atp->orig_datalen - atp->bytes_xfered; 1353 if (cto->ct_resid < 0) { 1354 cto->rsp.m1.ct_scsi_status |= 1355 CT2_DATA_OVER; 1356 } else if (cto->ct_resid > 0) { 1357 cto->rsp.m1.ct_scsi_status |= 1358 CT2_DATA_UNDER; 1359 } 1360 } 1361 if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) { 1362 int m = min(cso->sense_len, MAXRESPLEN); 1363 memcpy(cto->rsp.m1.ct_resp, 1364 &cso->sense_data, m); 1365 cto->rsp.m1.ct_senselen = m; 1366 cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID; 1367 } 1368 } else { 1369 cto->ct_flags |= CT2_FLAG_MODE0; 1370 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1371 cto->ct_flags |= CT2_DATA_IN; 1372 } else { 1373 cto->ct_flags |= CT2_DATA_OUT; 1374 } 1375 cto->ct_reloff = atp->bytes_xfered; 1376 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 1377 cto->ct_flags |= CT2_SENDSTATUS; 1378 cto->rsp.m0.ct_scsi_status = cso->scsi_status; 1379 cto->ct_resid = 1380 atp->orig_datalen - 1381 (atp->bytes_xfered + cso->dxfer_len); 1382 if (cto->ct_resid < 0) { 1383 cto->rsp.m0.ct_scsi_status |= 1384 CT2_DATA_OVER; 1385 } else if (cto->ct_resid > 0) { 1386 cto->rsp.m0.ct_scsi_status |= 1387 CT2_DATA_UNDER; 1388 } 1389 } else { 1390 atp->last_xframt = cso->dxfer_len; 1391 } 1392 /* 1393 * If we're sending data and status back together, 1394 * we can't also send back sense data as well. 1395 */ 1396 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1397 } 1398 1399 if (cto->ct_flags & CT2_SENDSTATUS) { 1400 isp_prt(isp, ISP_LOGTDEBUG0, 1401 "CTIO2[%x] STATUS %x origd %u curd %u resid %u", 1402 cto->ct_rxid, cso->scsi_status, atp->orig_datalen, 1403 cso->dxfer_len, cto->ct_resid); 1404 cto->ct_flags |= CT2_CCINCR; 1405 atp->state = ATPD_STATE_LAST_CTIO; 1406 } else { 1407 atp->state = ATPD_STATE_CTIO; 1408 } 1409 cto->ct_timeout = 10; 1410 } else { 1411 ct_entry_t *cto = (ct_entry_t *) local; 1412 1413 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1414 cto->ct_header.rqs_entry_count = 1; 1415 cto->ct_iid = cso->init_id; 1416 cto->ct_iid |= XS_CHANNEL(ccb) << 7; 1417 cto->ct_tgt = ccb->ccb_h.target_id; 1418 cto->ct_lun = ccb->ccb_h.target_lun; 1419 cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id); 1420 if (AT_HAS_TAG(cso->tag_id)) { 1421 cto->ct_tag_val = (uint8_t) AT_GET_TAG(cso->tag_id); 1422 cto->ct_flags |= CT_TQAE; 1423 } 1424 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 1425 cto->ct_flags |= CT_NODISC; 1426 } 1427 if (cso->dxfer_len == 0) { 1428 cto->ct_flags |= CT_NO_DATA; 1429 } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1430 cto->ct_flags |= CT_DATA_IN; 1431 } else { 1432 cto->ct_flags |= CT_DATA_OUT; 1433 } 1434 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1435 cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR; 1436 cto->ct_scsi_status = cso->scsi_status; 1437 cto->ct_resid = cso->resid; 1438 isp_prt(isp, ISP_LOGTDEBUG0, 1439 "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x", 1440 cto->ct_fwhandle, cso->scsi_status, cso->resid, 1441 cso->tag_id); 1442 } 1443 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1444 cto->ct_timeout = 10; 1445 } 1446 1447 if (isp_save_xs_tgt(isp, ccb, &handle)) { 1448 xpt_print(ccb->ccb_h.path, 1449 "No XFLIST pointers for isp_target_start_ctio\n"); 1450 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1451 goto out; 1452 } 1453 1454 1455 /* 1456 * Call the dma setup routines for this entry (and any subsequent 1457 * CTIOs) if there's data to move, and then tell the f/w it's got 1458 * new things to play with. As with isp_start's usage of DMA setup, 1459 * any swizzling is done in the machine dependent layer. Because 1460 * of this, we put the request onto the queue area first in native 1461 * format. 1462 */ 1463 1464 if (IS_FC(isp)) { 1465 ct2_entry_t *cto = (ct2_entry_t *) local; 1466 cto->ct_syshandle = handle; 1467 } else { 1468 ct_entry_t *cto = (ct_entry_t *) local; 1469 cto->ct_syshandle = handle; 1470 } 1471 1472 switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) { 1473 case CMD_QUEUED: 1474 ISP_ADD_REQUEST(isp, nxti); 1475 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1476 return; 1477 1478 case CMD_EAGAIN: 1479 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1480 break; 1481 1482 default: 1483 break; 1484 } 1485 isp_destroy_tgt_handle(isp, handle); 1486 1487 out: 1488 ISPLOCK_2_CAMLOCK(isp); 1489 xpt_done(ccb); 1490 CAMLOCK_2_ISPLOCK(isp); 1491 } 1492 1493 static void 1494 isp_refire_putback_atio(void *arg) 1495 { 1496 int s = splcam(); 1497 isp_target_putback_atio(arg); 1498 splx(s); 1499 } 1500 1501 static void 1502 isp_target_putback_atio(union ccb *ccb) 1503 { 1504 ispsoftc_t *isp; 1505 struct ccb_scsiio *cso; 1506 uint32_t nxti, optr; 1507 void *qe; 1508 1509 isp = XS_ISP(ccb); 1510 1511 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 1512 xpt_print(ccb->ccb_h.path, 1513 "isp_target_putback_atio: Request Queue Overflow\n"); 1514 (void) timeout(isp_refire_putback_atio, ccb, 10); 1515 return; 1516 } 1517 memset(qe, 0, QENTRY_LEN); 1518 cso = &ccb->csio; 1519 if (IS_FC(isp)) { 1520 at2_entry_t local, *at = &local; 1521 MEMZERO(at, sizeof (at2_entry_t)); 1522 at->at_header.rqs_entry_type = RQSTYPE_ATIO2; 1523 at->at_header.rqs_entry_count = 1; 1524 if (FCPARAM(isp)->isp_sccfw) { 1525 at->at_scclun = (uint16_t) ccb->ccb_h.target_lun; 1526 } else { 1527 at->at_lun = (uint8_t) ccb->ccb_h.target_lun; 1528 } 1529 at->at_status = CT_OK; 1530 at->at_rxid = cso->tag_id; 1531 at->at_iid = cso->ccb_h.target_id; 1532 isp_put_atio2(isp, at, qe); 1533 } else { 1534 at_entry_t local, *at = &local; 1535 MEMZERO(at, sizeof (at_entry_t)); 1536 at->at_header.rqs_entry_type = RQSTYPE_ATIO; 1537 at->at_header.rqs_entry_count = 1; 1538 at->at_iid = cso->init_id; 1539 at->at_iid |= XS_CHANNEL(ccb) << 7; 1540 at->at_tgt = cso->ccb_h.target_id; 1541 at->at_lun = cso->ccb_h.target_lun; 1542 at->at_status = CT_OK; 1543 at->at_tag_val = AT_GET_TAG(cso->tag_id); 1544 at->at_handle = AT_GET_HANDLE(cso->tag_id); 1545 isp_put_atio(isp, at, qe); 1546 } 1547 ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe); 1548 ISP_ADD_REQUEST(isp, nxti); 1549 isp_complete_ctio(ccb); 1550 } 1551 1552 static void 1553 isp_complete_ctio(union ccb *ccb) 1554 { 1555 ISPLOCK_2_CAMLOCK(isp); 1556 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1557 ccb->ccb_h.status |= CAM_REQ_CMP; 1558 } 1559 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1560 xpt_done(ccb); 1561 CAMLOCK_2_ISPLOCK(isp); 1562 } 1563 1564 /* 1565 * Handle ATIO stuff that the generic code can't. 1566 * This means handling CDBs. 1567 */ 1568 1569 static int 1570 isp_handle_platform_atio(ispsoftc_t *isp, at_entry_t *aep) 1571 { 1572 tstate_t *tptr; 1573 int status, bus, iswildcard; 1574 struct ccb_accept_tio *atiop; 1575 1576 /* 1577 * The firmware status (except for the QLTM_SVALID bit) 1578 * indicates why this ATIO was sent to us. 1579 * 1580 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1581 * 1582 * If the DISCONNECTS DISABLED bit is set in the flags field, 1583 * we're still connected on the SCSI bus. 1584 */ 1585 status = aep->at_status; 1586 if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) { 1587 /* 1588 * Bus Phase Sequence error. We should have sense data 1589 * suggested by the f/w. I'm not sure quite yet what 1590 * to do about this for CAM. 1591 */ 1592 isp_prt(isp, ISP_LOGWARN, "PHASE ERROR"); 1593 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1594 return (0); 1595 } 1596 if ((status & ~QLTM_SVALID) != AT_CDB) { 1597 isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform", 1598 status); 1599 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1600 return (0); 1601 } 1602 1603 bus = GET_BUS_VAL(aep->at_iid); 1604 tptr = get_lun_statep(isp, bus, aep->at_lun); 1605 if (tptr == NULL) { 1606 tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD); 1607 if (tptr == NULL) { 1608 /* 1609 * Because we can't autofeed sense data back with 1610 * a command for parallel SCSI, we can't give back 1611 * a CHECK CONDITION. We'll give back a BUSY status 1612 * instead. This works out okay because the only 1613 * time we should, in fact, get this, is in the 1614 * case that somebody configured us without the 1615 * blackhole driver, so they get what they deserve. 1616 */ 1617 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1618 return (0); 1619 } 1620 iswildcard = 1; 1621 } else { 1622 iswildcard = 0; 1623 } 1624 1625 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1626 if (atiop == NULL) { 1627 /* 1628 * Because we can't autofeed sense data back with 1629 * a command for parallel SCSI, we can't give back 1630 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1631 * instead. This works out okay because the only time we 1632 * should, in fact, get this, is in the case that we've 1633 * run out of ATIOS. 1634 */ 1635 xpt_print(tptr->owner, 1636 "no ATIOS for lun %d from initiator %d on channel %d\n", 1637 aep->at_lun, GET_IID_VAL(aep->at_iid), bus); 1638 if (aep->at_flags & AT_TQAE) 1639 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1640 else 1641 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1642 rls_lun_statep(isp, tptr); 1643 return (0); 1644 } 1645 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1646 tptr->atio_count--; 1647 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d", 1648 aep->at_lun, tptr->atio_count); 1649 if (iswildcard) { 1650 atiop->ccb_h.target_id = aep->at_tgt; 1651 atiop->ccb_h.target_lun = aep->at_lun; 1652 } 1653 if (aep->at_flags & AT_NODISC) { 1654 atiop->ccb_h.flags = CAM_DIS_DISCONNECT; 1655 } else { 1656 atiop->ccb_h.flags = 0; 1657 } 1658 1659 if (status & QLTM_SVALID) { 1660 size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data)); 1661 atiop->sense_len = amt; 1662 MEMCPY(&atiop->sense_data, aep->at_sense, amt); 1663 } else { 1664 atiop->sense_len = 0; 1665 } 1666 1667 atiop->init_id = GET_IID_VAL(aep->at_iid); 1668 atiop->cdb_len = aep->at_cdblen; 1669 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen); 1670 atiop->ccb_h.status = CAM_CDB_RECVD; 1671 /* 1672 * Construct a tag 'id' based upon tag value (which may be 0..255) 1673 * and the handle (which we have to preserve). 1674 */ 1675 AT_MAKE_TAGID(atiop->tag_id, bus, device_get_unit(isp->isp_dev), aep); 1676 if (aep->at_flags & AT_TQAE) { 1677 atiop->tag_action = aep->at_tag_type; 1678 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; 1679 } 1680 xpt_done((union ccb*)atiop); 1681 isp_prt(isp, ISP_LOGTDEBUG0, 1682 "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s", 1683 aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid), 1684 GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff, 1685 aep->at_tag_type, (aep->at_flags & AT_NODISC)? 1686 "nondisc" : "disconnecting"); 1687 rls_lun_statep(isp, tptr); 1688 return (0); 1689 } 1690 1691 static int 1692 isp_handle_platform_atio2(ispsoftc_t *isp, at2_entry_t *aep) 1693 { 1694 lun_id_t lun; 1695 tstate_t *tptr; 1696 struct ccb_accept_tio *atiop; 1697 atio_private_data_t *atp; 1698 1699 /* 1700 * The firmware status (except for the QLTM_SVALID bit) 1701 * indicates why this ATIO was sent to us. 1702 * 1703 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1704 */ 1705 if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) { 1706 isp_prt(isp, ISP_LOGWARN, 1707 "bogus atio (0x%x) leaked to platform", aep->at_status); 1708 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1709 return (0); 1710 } 1711 1712 if (FCPARAM(isp)->isp_sccfw) { 1713 lun = aep->at_scclun; 1714 } else { 1715 lun = aep->at_lun; 1716 } 1717 tptr = get_lun_statep(isp, 0, lun); 1718 if (tptr == NULL) { 1719 isp_prt(isp, ISP_LOGTDEBUG0, 1720 "[0x%x] no state pointer for lun %d", aep->at_rxid, lun); 1721 tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD); 1722 if (tptr == NULL) { 1723 isp_endcmd(isp, aep, 1724 SCSI_STATUS_CHECK_COND | ECMD_SVALID | 1725 (0x5 << 12) | (0x25 << 16), 0); 1726 return (0); 1727 } 1728 } 1729 1730 atp = isp_get_atpd(isp, 0); 1731 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1732 if (atiop == NULL || atp == NULL) { 1733 1734 /* 1735 * Because we can't autofeed sense data back with 1736 * a command for parallel SCSI, we can't give back 1737 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1738 * instead. This works out okay because the only time we 1739 * should, in fact, get this, is in the case that we've 1740 * run out of ATIOS. 1741 */ 1742 xpt_print(tptr->owner, 1743 "no %s for lun %d from initiator %d\n", 1744 (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" : 1745 ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid); 1746 rls_lun_statep(isp, tptr); 1747 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1748 return (0); 1749 } 1750 atp->state = ATPD_STATE_ATIO; 1751 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1752 tptr->atio_count--; 1753 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d", 1754 lun, tptr->atio_count); 1755 1756 if (tptr == &isp->isp_osinfo.tsdflt[0]) { 1757 atiop->ccb_h.target_id = FCPARAM(isp)->isp_loopid; 1758 atiop->ccb_h.target_lun = lun; 1759 } 1760 /* 1761 * We don't get 'suggested' sense data as we do with SCSI cards. 1762 */ 1763 atiop->sense_len = 0; 1764 1765 atiop->init_id = aep->at_iid; 1766 atiop->cdb_len = ATIO2_CDBLEN; 1767 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN); 1768 atiop->ccb_h.status = CAM_CDB_RECVD; 1769 atiop->tag_id = aep->at_rxid; 1770 switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) { 1771 case ATIO2_TC_ATTR_SIMPLEQ: 1772 atiop->tag_action = MSG_SIMPLE_Q_TAG; 1773 break; 1774 case ATIO2_TC_ATTR_HEADOFQ: 1775 atiop->tag_action = MSG_HEAD_OF_Q_TAG; 1776 break; 1777 case ATIO2_TC_ATTR_ORDERED: 1778 atiop->tag_action = MSG_ORDERED_Q_TAG; 1779 break; 1780 case ATIO2_TC_ATTR_ACAQ: /* ?? */ 1781 case ATIO2_TC_ATTR_UNTAGGED: 1782 default: 1783 atiop->tag_action = 0; 1784 break; 1785 } 1786 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; 1787 1788 atp->tag = atiop->tag_id; 1789 atp->lun = lun; 1790 atp->orig_datalen = aep->at_datalen; 1791 atp->last_xframt = 0; 1792 atp->bytes_xfered = 0; 1793 atp->state = ATPD_STATE_CAM; 1794 ISPLOCK_2_CAMLOCK(siP); 1795 xpt_done((union ccb*)atiop); 1796 1797 isp_prt(isp, ISP_LOGTDEBUG0, 1798 "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u", 1799 aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid, 1800 lun, aep->at_taskflags, aep->at_datalen); 1801 rls_lun_statep(isp, tptr); 1802 return (0); 1803 } 1804 1805 static int 1806 isp_handle_platform_ctio(ispsoftc_t *isp, void *arg) 1807 { 1808 union ccb *ccb; 1809 int sentstatus, ok, notify_cam, resid = 0; 1810 uint16_t tval; 1811 1812 /* 1813 * CTIO and CTIO2 are close enough.... 1814 */ 1815 1816 ccb = isp_find_xs_tgt(isp, ((ct_entry_t *)arg)->ct_syshandle); 1817 KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio")); 1818 isp_destroy_tgt_handle(isp, ((ct_entry_t *)arg)->ct_syshandle); 1819 1820 if (IS_FC(isp)) { 1821 ct2_entry_t *ct = arg; 1822 atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid); 1823 if (atp == NULL) { 1824 isp_prt(isp, ISP_LOGERR, 1825 "cannot find adjunct for %x after I/O", 1826 ct->ct_rxid); 1827 return (0); 1828 } 1829 sentstatus = ct->ct_flags & CT2_SENDSTATUS; 1830 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1831 if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) { 1832 ccb->ccb_h.status |= CAM_SENT_SENSE; 1833 } 1834 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1835 if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) { 1836 resid = ct->ct_resid; 1837 atp->bytes_xfered += (atp->last_xframt - resid); 1838 atp->last_xframt = 0; 1839 } 1840 if (sentstatus || !ok) { 1841 atp->tag = 0; 1842 } 1843 isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, 1844 "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s", 1845 ct->ct_rxid, ct->ct_status, ct->ct_flags, 1846 (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, 1847 resid, sentstatus? "FIN" : "MID"); 1848 tval = ct->ct_rxid; 1849 1850 /* XXX: should really come after isp_complete_ctio */ 1851 atp->state = ATPD_STATE_PDON; 1852 } else { 1853 ct_entry_t *ct = arg; 1854 sentstatus = ct->ct_flags & CT_SENDSTATUS; 1855 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1856 /* 1857 * We *ought* to be able to get back to the original ATIO 1858 * here, but for some reason this gets lost. It's just as 1859 * well because it's squirrelled away as part of periph 1860 * private data. 1861 * 1862 * We can live without it as long as we continue to use 1863 * the auto-replenish feature for CTIOs. 1864 */ 1865 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1866 if (ct->ct_status & QLTM_SVALID) { 1867 char *sp = (char *)ct; 1868 sp += CTIO_SENSE_OFFSET; 1869 ccb->csio.sense_len = 1870 min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN); 1871 MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len); 1872 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1873 } 1874 if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) { 1875 resid = ct->ct_resid; 1876 } 1877 isp_prt(isp, ISP_LOGTDEBUG0, 1878 "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s", 1879 ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun, 1880 ct->ct_status, ct->ct_flags, resid, 1881 sentstatus? "FIN" : "MID"); 1882 tval = ct->ct_fwhandle; 1883 } 1884 ccb->csio.resid += resid; 1885 1886 /* 1887 * We're here either because intermediate data transfers are done 1888 * and/or the final status CTIO (which may have joined with a 1889 * Data Transfer) is done. 1890 * 1891 * In any case, for this platform, the upper layers figure out 1892 * what to do next, so all we do here is collect status and 1893 * pass information along. Any DMA handles have already been 1894 * freed. 1895 */ 1896 if (notify_cam == 0) { 1897 isp_prt(isp, ISP_LOGTDEBUG0, " INTER CTIO[0x%x] done", tval); 1898 return (0); 1899 } 1900 1901 isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done", 1902 (sentstatus)? " FINAL " : "MIDTERM ", tval); 1903 1904 if (!ok) { 1905 isp_target_putback_atio(ccb); 1906 } else { 1907 isp_complete_ctio(ccb); 1908 1909 } 1910 return (0); 1911 } 1912 1913 static int 1914 isp_handle_platform_notify_scsi(ispsoftc_t *isp, in_entry_t *inp) 1915 { 1916 return (0); /* XXXX */ 1917 } 1918 1919 static int 1920 isp_handle_platform_notify_fc(ispsoftc_t *isp, in_fcentry_t *inp) 1921 { 1922 1923 switch (inp->in_status) { 1924 case IN_PORT_LOGOUT: 1925 isp_prt(isp, ISP_LOGWARN, "port logout of iid %d", 1926 inp->in_iid); 1927 break; 1928 case IN_PORT_CHANGED: 1929 isp_prt(isp, ISP_LOGWARN, "port changed for iid %d", 1930 inp->in_iid); 1931 break; 1932 case IN_GLOBAL_LOGO: 1933 isp_prt(isp, ISP_LOGINFO, "all ports logged out"); 1934 break; 1935 case IN_ABORT_TASK: 1936 { 1937 atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid); 1938 struct ccb_immed_notify *inot = NULL; 1939 1940 if (atp) { 1941 tstate_t *tptr = get_lun_statep(isp, 0, atp->lun); 1942 if (tptr) { 1943 inot = (struct ccb_immed_notify *) 1944 SLIST_FIRST(&tptr->inots); 1945 if (inot) { 1946 tptr->inot_count--; 1947 SLIST_REMOVE_HEAD(&tptr->inots, 1948 sim_links.sle); 1949 isp_prt(isp, ISP_LOGTDEBUG0, 1950 "Take FREE INOT count now %d", 1951 tptr->inot_count); 1952 } 1953 } 1954 isp_prt(isp, ISP_LOGWARN, 1955 "abort task RX_ID %x IID %d state %d", 1956 inp->in_seqid, inp->in_iid, atp->state); 1957 } else { 1958 isp_prt(isp, ISP_LOGWARN, 1959 "abort task RX_ID %x from iid %d, state unknown", 1960 inp->in_seqid, inp->in_iid); 1961 } 1962 if (inot) { 1963 inot->initiator_id = inp->in_iid; 1964 inot->sense_len = 0; 1965 inot->message_args[0] = MSG_ABORT_TAG; 1966 inot->message_args[1] = inp->in_seqid & 0xff; 1967 inot->message_args[2] = (inp->in_seqid >> 8) & 0xff; 1968 inot->ccb_h.status = CAM_MESSAGE_RECV; 1969 xpt_done((union ccb *)inot); 1970 } 1971 break; 1972 } 1973 default: 1974 break; 1975 } 1976 return (0); 1977 } 1978 #endif 1979 1980 static void 1981 isp_cam_async(void *cbarg, uint32_t code, struct cam_path *path, void *arg) 1982 { 1983 struct cam_sim *sim; 1984 ispsoftc_t *isp; 1985 1986 sim = (struct cam_sim *)cbarg; 1987 isp = (ispsoftc_t *) cam_sim_softc(sim); 1988 switch (code) { 1989 case AC_LOST_DEVICE: 1990 if (IS_SCSI(isp)) { 1991 uint16_t oflags, nflags; 1992 sdparam *sdp = isp->isp_param; 1993 int tgt; 1994 1995 tgt = xpt_path_target_id(path); 1996 if (tgt >= 0) { 1997 sdp += cam_sim_bus(sim); 1998 ISP_LOCK(isp); 1999 nflags = sdp->isp_devparam[tgt].nvrm_flags; 2000 #ifndef ISP_TARGET_MODE 2001 nflags &= DPARM_SAFE_DFLT; 2002 if (isp->isp_loaded_fw) { 2003 nflags |= DPARM_NARROW | DPARM_ASYNC; 2004 } 2005 #else 2006 nflags = DPARM_DEFAULT; 2007 #endif 2008 oflags = sdp->isp_devparam[tgt].goal_flags; 2009 sdp->isp_devparam[tgt].goal_flags = nflags; 2010 sdp->isp_devparam[tgt].dev_update = 1; 2011 isp->isp_update |= (1 << cam_sim_bus(sim)); 2012 (void) isp_control(isp, 2013 ISPCTL_UPDATE_PARAMS, NULL); 2014 sdp->isp_devparam[tgt].goal_flags = oflags; 2015 ISP_UNLOCK(isp); 2016 } 2017 } 2018 break; 2019 default: 2020 isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code); 2021 break; 2022 } 2023 } 2024 2025 static void 2026 isp_poll(struct cam_sim *sim) 2027 { 2028 ispsoftc_t *isp = cam_sim_softc(sim); 2029 uint32_t isr; 2030 uint16_t sema, mbox; 2031 2032 ISP_LOCK(isp); 2033 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 2034 isp_intr(isp, isr, sema, mbox); 2035 } 2036 ISP_UNLOCK(isp); 2037 } 2038 2039 2040 static int isp_watchdog_work(ispsoftc_t *, XS_T *); 2041 2042 static int 2043 isp_watchdog_work(ispsoftc_t *isp, XS_T *xs) 2044 { 2045 uint32_t handle; 2046 2047 /* 2048 * We've decided this command is dead. Make sure we're not trying 2049 * to kill a command that's already dead by getting it's handle and 2050 * and seeing whether it's still alive. 2051 */ 2052 ISP_LOCK(isp); 2053 handle = isp_find_handle(isp, xs); 2054 if (handle) { 2055 uint32_t isr; 2056 uint16_t sema, mbox; 2057 2058 if (XS_CMD_DONE_P(xs)) { 2059 isp_prt(isp, ISP_LOGDEBUG1, 2060 "watchdog found done cmd (handle 0x%x)", handle); 2061 ISP_UNLOCK(isp); 2062 return (1);; 2063 } 2064 2065 if (XS_CMD_WDOG_P(xs)) { 2066 isp_prt(isp, ISP_LOGDEBUG2, 2067 "recursive watchdog (handle 0x%x)", handle); 2068 ISP_UNLOCK(isp); 2069 return (1); 2070 } 2071 2072 XS_CMD_S_WDOG(xs); 2073 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 2074 isp_intr(isp, isr, sema, mbox); 2075 } 2076 if (XS_CMD_DONE_P(xs)) { 2077 isp_prt(isp, ISP_LOGDEBUG2, 2078 "watchdog cleanup for handle 0x%x", handle); 2079 ISPLOCK_2_CAMLOCK(isp); 2080 xpt_done((union ccb *) xs); 2081 CAMLOCK_2_ISPLOCK(isp); 2082 } else if (XS_CMD_GRACE_P(xs)) { 2083 /* 2084 * Make sure the command is *really* dead before we 2085 * release the handle (and DMA resources) for reuse. 2086 */ 2087 (void) isp_control(isp, ISPCTL_ABORT_CMD, xs); 2088 2089 /* 2090 * After this point, the comamnd is really dead. 2091 */ 2092 if (XS_XFRLEN(xs)) { 2093 ISP_DMAFREE(isp, xs, handle); 2094 } 2095 isp_destroy_handle(isp, handle); 2096 xpt_print(xs->ccb_h.path, 2097 "watchdog timeout for handle 0x%x\n", handle); 2098 XS_SETERR(xs, CAM_CMD_TIMEOUT); 2099 XS_CMD_C_WDOG(xs); 2100 ISPLOCK_2_CAMLOCK(isp); 2101 isp_done(xs); 2102 CAMLOCK_2_ISPLOCK(isp); 2103 } else { 2104 XS_CMD_C_WDOG(xs); 2105 xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz); 2106 XS_CMD_S_GRACE(xs); 2107 isp->isp_sendmarker |= 1 << XS_CHANNEL(xs); 2108 } 2109 ISP_UNLOCK(isp); 2110 return (1); 2111 } 2112 ISP_UNLOCK(isp); 2113 return (0); 2114 } 2115 2116 static void 2117 isp_watchdog(void *arg) 2118 { 2119 ispsoftc_t *isp; 2120 XS_T *xs = arg; 2121 for (isp = isplist; isp != NULL; isp = isp->isp_osinfo.next) { 2122 if (isp_watchdog_work(isp, xs)) { 2123 break; 2124 } 2125 } 2126 if (isp == NULL) { 2127 printf("isp_watchdog: nobody had %p active\n", arg); 2128 } 2129 } 2130 2131 2132 #if __FreeBSD_version >= 600000 2133 static void 2134 isp_make_here(ispsoftc_t *isp, int tgt) 2135 { 2136 union ccb *ccb; 2137 ISPLOCK_2_CAMLOCK(mpt); 2138 /* 2139 * Allocate a CCB, create a wildcard path for this bus, 2140 * and schedule a rescan. 2141 */ 2142 ccb = xpt_alloc_ccb_nowait(); 2143 if (ccb == NULL) { 2144 isp_prt(isp, ISP_LOGWARN, "unable to alloc CCB for rescan"); 2145 CAMLOCK_2_ISPLOCK(mpt); 2146 return; 2147 } 2148 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, 2149 cam_sim_path(isp->isp_sim), tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2150 CAMLOCK_2_ISPLOCK(mpt); 2151 isp_prt(isp, ISP_LOGWARN, "unable to create path for rescan"); 2152 xpt_free_ccb(ccb); 2153 return; 2154 } 2155 xpt_rescan(ccb); 2156 CAMLOCK_2_ISPLOCK(mpt); 2157 } 2158 2159 static void 2160 isp_make_gone(ispsoftc_t *isp, int tgt) 2161 { 2162 struct cam_path *tp; 2163 ISPLOCK_2_CAMLOCK(isp); 2164 if (xpt_create_path(&tp, NULL, cam_sim_path(isp->isp_sim), tgt, 2165 CAM_LUN_WILDCARD) == CAM_REQ_CMP) { 2166 xpt_async(AC_LOST_DEVICE, tp, NULL); 2167 xpt_free_path(tp); 2168 } 2169 CAMLOCK_2_ISPLOCK(isp); 2170 } 2171 #else 2172 #define isp_make_here(isp, tgt) do { ; } while (0) 2173 #define isp_make_gone(isp, tgt) do { ; } while (0) 2174 #endif 2175 2176 2177 /* 2178 * Gone Device Timer Function- when we have decided that a device has gone 2179 * away, we wait a specific period of time prior to telling the OS it has 2180 * gone away. 2181 * 2182 * This timer function fires once a second and then scans the port database 2183 * for devices that are marked dead but still have a virtual target assigned. 2184 * We decrement a counter for that port database entry, and when it hits zero, 2185 * we tell the OS the device has gone away. 2186 */ 2187 static void 2188 isp_gdt(void *arg) 2189 { 2190 ispsoftc_t *isp = arg; 2191 fcportdb_t *lp; 2192 int dbidx, tgt, more_to_do = 0; 2193 2194 isp_prt(isp, ISP_LOGDEBUG0, "GDT timer expired"); 2195 ISP_LOCK(isp); 2196 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { 2197 lp = &FCPARAM(isp)->portdb[dbidx]; 2198 2199 if (lp->state != FC_PORTDB_STATE_ZOMBIE) { 2200 continue; 2201 } 2202 if (lp->ini_map_idx == 0) { 2203 continue; 2204 } 2205 if (lp->new_reserved == 0) { 2206 continue; 2207 } 2208 lp->new_reserved -= 1; 2209 if (lp->new_reserved != 0) { 2210 more_to_do++; 2211 continue; 2212 } 2213 tgt = lp->ini_map_idx - 1; 2214 FCPARAM(isp)->isp_ini_map[tgt] = 0; 2215 lp->ini_map_idx = 0; 2216 lp->state = FC_PORTDB_STATE_NIL; 2217 isp_prt(isp, ISP_LOGCONFIG, prom3, lp->portid, tgt, 2218 "Gone Device Timeout"); 2219 isp_make_gone(isp, tgt); 2220 } 2221 if (more_to_do) { 2222 isp->isp_osinfo.gdt = timeout(isp_gdt, isp, hz); 2223 } else { 2224 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2225 "stopping Gone Device Timer"); 2226 isp->isp_osinfo.gdt_running = 0; 2227 } 2228 ISP_UNLOCK(isp); 2229 } 2230 2231 /* 2232 * Loop Down Timer Function- when loop goes down, a timer is started and 2233 * and after it expires we come here and take all probational devices that 2234 * the OS knows about and the tell the OS that they've gone away. 2235 * 2236 * We don't clear the devices out of our port database because, when loop 2237 * come back up, we have to do some actual cleanup with the chip at that 2238 * point (implicit PLOGO, e.g., to get the chip's port database state right). 2239 */ 2240 static void 2241 isp_ldt(void *arg) 2242 { 2243 ispsoftc_t *isp = arg; 2244 fcportdb_t *lp; 2245 int dbidx, tgt; 2246 2247 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Loop Down Timer expired"); 2248 ISP_LOCK(isp); 2249 2250 /* 2251 * Notify to the OS all targets who we now consider have departed. 2252 */ 2253 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { 2254 lp = &FCPARAM(isp)->portdb[dbidx]; 2255 2256 if (lp->state != FC_PORTDB_STATE_PROBATIONAL) { 2257 continue; 2258 } 2259 if (lp->ini_map_idx == 0) { 2260 continue; 2261 } 2262 2263 /* 2264 * XXX: CLEAN UP AND COMPLETE ANY PENDING COMMANDS FIRST! 2265 */ 2266 2267 /* 2268 * Mark that we've announced that this device is gone.... 2269 */ 2270 lp->reserved = 1; 2271 2272 /* 2273 * but *don't* change the state of the entry. Just clear 2274 * any target id stuff and announce to CAM that the 2275 * device is gone. This way any necessary PLOGO stuff 2276 * will happen when loop comes back up. 2277 */ 2278 2279 tgt = lp->ini_map_idx - 1; 2280 FCPARAM(isp)->isp_ini_map[tgt] = 0; 2281 lp->ini_map_idx = 0; 2282 isp_prt(isp, ISP_LOGCONFIG, prom3, lp->portid, tgt, 2283 "Loop Down Timeout"); 2284 isp_make_gone(isp, tgt); 2285 } 2286 2287 /* 2288 * The loop down timer has expired. Wake up the kthread 2289 * to notice that fact (or make it false). 2290 */ 2291 isp->isp_osinfo.loop_down_time = isp->isp_osinfo.loop_down_limit+1; 2292 #if __FreeBSD_version < 500000 2293 wakeup(&isp->isp_osinfo.kproc); 2294 #else 2295 #ifdef ISP_SMPLOCK 2296 cv_signal(&isp->isp_osinfo.kthread_cv); 2297 #else 2298 wakeup(&isp->isp_osinfo.kthread_cv); 2299 #endif 2300 #endif 2301 ISP_UNLOCK(isp); 2302 } 2303 2304 static void 2305 isp_kthread(void *arg) 2306 { 2307 ispsoftc_t *isp = arg; 2308 int slp = 0; 2309 #if __FreeBSD_version < 500000 2310 int s; 2311 2312 s = splcam(); 2313 #else 2314 #ifdef ISP_SMPLOCK 2315 mtx_lock(&isp->isp_lock); 2316 #else 2317 mtx_lock(&Giant); 2318 #endif 2319 #endif 2320 /* 2321 * The first loop is for our usage where we have yet to have 2322 * gotten good fibre channel state. 2323 */ 2324 for (;;) { 2325 int wasfrozen, lb, lim; 2326 2327 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2328 "isp_kthread: checking FC state"); 2329 isp->isp_osinfo.mbox_sleep_ok = 1; 2330 lb = isp_fc_runstate(isp, 250000); 2331 isp->isp_osinfo.mbox_sleep_ok = 0; 2332 if (lb) { 2333 /* 2334 * Increment loop down time by the last sleep interval 2335 */ 2336 isp->isp_osinfo.loop_down_time += slp; 2337 2338 if (lb < 0) { 2339 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2340 "kthread: FC loop not up (down count %d)", 2341 isp->isp_osinfo.loop_down_time); 2342 } else { 2343 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2344 "kthread: FC got to %d (down count %d)", 2345 lb, isp->isp_osinfo.loop_down_time); 2346 } 2347 2348 2349 /* 2350 * If we've never seen loop up and we've waited longer 2351 * than quickboot time, or we've seen loop up but we've 2352 * waited longer than loop_down_limit, give up and go 2353 * to sleep until loop comes up. 2354 */ 2355 if (FCPARAM(isp)->loop_seen_once == 0) { 2356 lim = isp_quickboot_time; 2357 } else { 2358 lim = isp->isp_osinfo.loop_down_limit; 2359 } 2360 if (isp->isp_osinfo.loop_down_time >= lim) { 2361 isp_freeze_loopdown(isp, "loop limit hit"); 2362 slp = 0; 2363 } else if (isp->isp_osinfo.loop_down_time < 10) { 2364 slp = 1; 2365 } else if (isp->isp_osinfo.loop_down_time < 30) { 2366 slp = 5; 2367 } else if (isp->isp_osinfo.loop_down_time < 60) { 2368 slp = 10; 2369 } else if (isp->isp_osinfo.loop_down_time < 120) { 2370 slp = 20; 2371 } else { 2372 slp = 30; 2373 } 2374 2375 } else { 2376 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2377 "isp_kthread: FC state OK"); 2378 isp->isp_osinfo.loop_down_time = 0; 2379 slp = 0; 2380 } 2381 2382 /* 2383 * If we'd frozen the simq, unfreeze it now so that CAM 2384 * can start sending us commands. If the FC state isn't 2385 * okay yet, they'll hit that in isp_start which will 2386 * freeze the queue again. 2387 */ 2388 wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN; 2389 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN; 2390 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { 2391 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2392 "isp_kthread: releasing simq"); 2393 ISPLOCK_2_CAMLOCK(isp); 2394 xpt_release_simq(isp->isp_sim, 1); 2395 CAMLOCK_2_ISPLOCK(isp); 2396 } 2397 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2398 "isp_kthread: sleep time %d", slp); 2399 #if __FreeBSD_version < 500000 2400 tsleep(&isp->isp_osinfo.kproc, PRIBIO, "ispf", 2401 slp * hz); 2402 #else 2403 #ifdef ISP_SMPLOCK 2404 cv_timed_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock, 2405 slp * hz); 2406 #else 2407 (void) tsleep(&isp->isp_osinfo.kthread_cv, PRIBIO, "ispf", 2408 slp * hz); 2409 #endif 2410 #endif 2411 /* 2412 * If slp is zero, we're waking up for the first time after 2413 * things have been okay. In this case, we set a deferral state 2414 * for all commands and delay hysteresis seconds before starting 2415 * the FC state evaluation. This gives the loop/fabric a chance 2416 * to settle. 2417 */ 2418 if (slp == 0 && isp->isp_osinfo.hysteresis) { 2419 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2420 "isp_kthread: sleep hysteresis tick time %d", 2421 isp->isp_osinfo.hysteresis * hz); 2422 (void) tsleep(&isp_fabric_hysteresis, PRIBIO, "ispT", 2423 (isp->isp_osinfo.hysteresis * hz)); 2424 } 2425 } 2426 } 2427 2428 static void 2429 isp_action(struct cam_sim *sim, union ccb *ccb) 2430 { 2431 int bus, tgt, error, lim; 2432 ispsoftc_t *isp; 2433 struct ccb_trans_settings *cts; 2434 2435 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n")); 2436 2437 isp = (ispsoftc_t *)cam_sim_softc(sim); 2438 ccb->ccb_h.sim_priv.entries[0].field = 0; 2439 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 2440 if (isp->isp_state != ISP_RUNSTATE && 2441 ccb->ccb_h.func_code == XPT_SCSI_IO) { 2442 CAMLOCK_2_ISPLOCK(isp); 2443 isp_init(isp); 2444 if (isp->isp_state != ISP_INITSTATE) { 2445 ISP_UNLOCK(isp); 2446 /* 2447 * Lie. Say it was a selection timeout. 2448 */ 2449 ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN; 2450 xpt_freeze_devq(ccb->ccb_h.path, 1); 2451 xpt_done(ccb); 2452 return; 2453 } 2454 isp->isp_state = ISP_RUNSTATE; 2455 ISPLOCK_2_CAMLOCK(isp); 2456 } 2457 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code); 2458 2459 2460 switch (ccb->ccb_h.func_code) { 2461 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 2462 /* 2463 * Do a couple of preliminary checks... 2464 */ 2465 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 2466 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 2467 ccb->ccb_h.status = CAM_REQ_INVALID; 2468 xpt_done(ccb); 2469 break; 2470 } 2471 } 2472 #ifdef DIAGNOSTIC 2473 if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) { 2474 xpt_print(ccb->ccb_h.path, "invalid target\n"); 2475 ccb->ccb_h.status = CAM_PATH_INVALID; 2476 } else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) { 2477 xpt_print(ccb->ccb_h.path, "invalid lun\n"); 2478 ccb->ccb_h.status = CAM_PATH_INVALID; 2479 } 2480 if (ccb->ccb_h.status == CAM_PATH_INVALID) { 2481 xpt_done(ccb); 2482 break; 2483 } 2484 #endif 2485 ((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK; 2486 CAMLOCK_2_ISPLOCK(isp); 2487 error = isp_start((XS_T *) ccb); 2488 switch (error) { 2489 case CMD_QUEUED: 2490 XS_CMD_S_CLEAR(ccb); 2491 ISPLOCK_2_CAMLOCK(isp); 2492 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2493 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 2494 int ms = ccb->ccb_h.timeout; 2495 if (ms == CAM_TIME_DEFAULT) { 2496 ms = 60*1000; 2497 } 2498 ccb->ccb_h.timeout_ch = 2499 timeout(isp_watchdog, ccb, isp_mstohz(ms)); 2500 } else { 2501 callout_handle_init(&ccb->ccb_h.timeout_ch); 2502 } 2503 break; 2504 case CMD_RQLATER: 2505 /* 2506 * This can only happen for Fibre Channel 2507 */ 2508 KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only")); 2509 2510 /* 2511 * Handle initial and subsequent loop down cases 2512 */ 2513 if (FCPARAM(isp)->loop_seen_once == 0) { 2514 lim = isp_quickboot_time; 2515 } else { 2516 lim = isp->isp_osinfo.loop_down_limit; 2517 } 2518 if (isp->isp_osinfo.loop_down_time >= lim) { 2519 isp_prt(isp, ISP_LOGDEBUG0, 2520 "%d.%d downtime (%d) > lim (%d)", 2521 XS_TGT(ccb), XS_LUN(ccb), 2522 isp->isp_osinfo.loop_down_time, lim); 2523 ccb->ccb_h.status = 2524 CAM_SEL_TIMEOUT|CAM_DEV_QFRZN; 2525 xpt_freeze_devq(ccb->ccb_h.path, 1); 2526 ISPLOCK_2_CAMLOCK(isp); 2527 xpt_done(ccb); 2528 break; 2529 } 2530 isp_prt(isp, ISP_LOGDEBUG0, 2531 "%d.%d retry later", XS_TGT(ccb), XS_LUN(ccb)); 2532 /* 2533 * Otherwise, retry in a while. 2534 */ 2535 ISPLOCK_2_CAMLOCK(isp); 2536 cam_freeze_devq(ccb->ccb_h.path); 2537 cam_release_devq(ccb->ccb_h.path, 2538 RELSIM_RELEASE_AFTER_TIMEOUT, 0, 1000, 0); 2539 XS_SETERR(ccb, CAM_REQUEUE_REQ); 2540 xpt_done(ccb); 2541 break; 2542 case CMD_EAGAIN: 2543 ISPLOCK_2_CAMLOCK(isp); 2544 cam_freeze_devq(ccb->ccb_h.path); 2545 cam_release_devq(ccb->ccb_h.path, 2546 RELSIM_RELEASE_AFTER_TIMEOUT, 0, 250, 0); 2547 xpt_done(ccb); 2548 break; 2549 case CMD_COMPLETE: 2550 isp_done((struct ccb_scsiio *) ccb); 2551 ISPLOCK_2_CAMLOCK(isp); 2552 break; 2553 default: 2554 ISPLOCK_2_CAMLOCK(isp); 2555 isp_prt(isp, ISP_LOGERR, 2556 "What's this? 0x%x at %d in file %s", 2557 error, __LINE__, __FILE__); 2558 XS_SETERR(ccb, CAM_REQ_CMP_ERR); 2559 xpt_done(ccb); 2560 } 2561 break; 2562 2563 #ifdef ISP_TARGET_MODE 2564 case XPT_EN_LUN: /* Enable LUN as a target */ 2565 { 2566 int seq, i; 2567 CAMLOCK_2_ISPLOCK(isp); 2568 seq = isp_en_lun(isp, ccb); 2569 if (seq < 0) { 2570 ISPLOCK_2_CAMLOCK(isp); 2571 xpt_done(ccb); 2572 break; 2573 } 2574 for (i = 0; isp->isp_osinfo.leact[seq] && i < 30 * 1000; i++) { 2575 uint32_t isr; 2576 uint16_t sema, mbox; 2577 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 2578 isp_intr(isp, isr, sema, mbox); 2579 } 2580 DELAY(1000); 2581 } 2582 ISPLOCK_2_CAMLOCK(isp); 2583 break; 2584 } 2585 case XPT_NOTIFY_ACK: /* recycle notify ack */ 2586 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ 2587 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 2588 { 2589 tstate_t *tptr = 2590 get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun); 2591 if (tptr == NULL) { 2592 ccb->ccb_h.status = CAM_LUN_INVALID; 2593 xpt_done(ccb); 2594 break; 2595 } 2596 ccb->ccb_h.sim_priv.entries[0].field = 0; 2597 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 2598 ccb->ccb_h.flags = 0; 2599 2600 CAMLOCK_2_ISPLOCK(isp); 2601 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 2602 /* 2603 * Note that the command itself may not be done- 2604 * it may not even have had the first CTIO sent. 2605 */ 2606 tptr->atio_count++; 2607 isp_prt(isp, ISP_LOGTDEBUG0, 2608 "Put FREE ATIO, lun %d, count now %d", 2609 ccb->ccb_h.target_lun, tptr->atio_count); 2610 SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h, 2611 sim_links.sle); 2612 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 2613 tptr->inot_count++; 2614 isp_prt(isp, ISP_LOGTDEBUG0, 2615 "Put FREE INOT, lun %d, count now %d", 2616 ccb->ccb_h.target_lun, tptr->inot_count); 2617 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, 2618 sim_links.sle); 2619 } else { 2620 isp_prt(isp, ISP_LOGWARN, "Got Notify ACK");; 2621 } 2622 rls_lun_statep(isp, tptr); 2623 ccb->ccb_h.status = CAM_REQ_INPROG; 2624 ISPLOCK_2_CAMLOCK(isp); 2625 break; 2626 } 2627 case XPT_CONT_TARGET_IO: 2628 { 2629 CAMLOCK_2_ISPLOCK(isp); 2630 isp_target_start_ctio(isp, ccb); 2631 ISPLOCK_2_CAMLOCK(isp); 2632 break; 2633 } 2634 #endif 2635 case XPT_RESET_DEV: /* BDR the specified SCSI device */ 2636 2637 bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); 2638 tgt = ccb->ccb_h.target_id; 2639 tgt |= (bus << 16); 2640 2641 CAMLOCK_2_ISPLOCK(isp); 2642 error = isp_control(isp, ISPCTL_RESET_DEV, &tgt); 2643 ISPLOCK_2_CAMLOCK(isp); 2644 if (error) { 2645 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2646 } else { 2647 ccb->ccb_h.status = CAM_REQ_CMP; 2648 } 2649 xpt_done(ccb); 2650 break; 2651 case XPT_ABORT: /* Abort the specified CCB */ 2652 { 2653 union ccb *accb = ccb->cab.abort_ccb; 2654 CAMLOCK_2_ISPLOCK(isp); 2655 switch (accb->ccb_h.func_code) { 2656 #ifdef ISP_TARGET_MODE 2657 case XPT_ACCEPT_TARGET_IO: 2658 case XPT_IMMED_NOTIFY: 2659 ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb); 2660 break; 2661 case XPT_CONT_TARGET_IO: 2662 isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet"); 2663 ccb->ccb_h.status = CAM_UA_ABORT; 2664 break; 2665 #endif 2666 case XPT_SCSI_IO: 2667 error = isp_control(isp, ISPCTL_ABORT_CMD, ccb); 2668 if (error) { 2669 ccb->ccb_h.status = CAM_UA_ABORT; 2670 } else { 2671 ccb->ccb_h.status = CAM_REQ_CMP; 2672 } 2673 break; 2674 default: 2675 ccb->ccb_h.status = CAM_REQ_INVALID; 2676 break; 2677 } 2678 ISPLOCK_2_CAMLOCK(isp); 2679 xpt_done(ccb); 2680 break; 2681 } 2682 #ifdef CAM_NEW_TRAN_CODE 2683 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) 2684 #else 2685 #define IS_CURRENT_SETTINGS(c) (c->flags & CCB_TRANS_CURRENT_SETTINGS) 2686 #endif 2687 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 2688 cts = &ccb->cts; 2689 if (!IS_CURRENT_SETTINGS(cts)) { 2690 ccb->ccb_h.status = CAM_REQ_INVALID; 2691 xpt_done(ccb); 2692 break; 2693 } 2694 tgt = cts->ccb_h.target_id; 2695 CAMLOCK_2_ISPLOCK(isp); 2696 if (IS_SCSI(isp)) { 2697 #ifndef CAM_NEW_TRAN_CODE 2698 sdparam *sdp = isp->isp_param; 2699 uint16_t *dptr; 2700 2701 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2702 2703 sdp += bus; 2704 /* 2705 * We always update (internally) from goal_flags 2706 * so any request to change settings just gets 2707 * vectored to that location. 2708 */ 2709 dptr = &sdp->isp_devparam[tgt].goal_flags; 2710 2711 /* 2712 * Note that these operations affect the 2713 * the goal flags (goal_flags)- not 2714 * the current state flags. Then we mark 2715 * things so that the next operation to 2716 * this HBA will cause the update to occur. 2717 */ 2718 if (cts->valid & CCB_TRANS_DISC_VALID) { 2719 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) { 2720 *dptr |= DPARM_DISC; 2721 } else { 2722 *dptr &= ~DPARM_DISC; 2723 } 2724 } 2725 if (cts->valid & CCB_TRANS_TQ_VALID) { 2726 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) { 2727 *dptr |= DPARM_TQING; 2728 } else { 2729 *dptr &= ~DPARM_TQING; 2730 } 2731 } 2732 if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) { 2733 switch (cts->bus_width) { 2734 case MSG_EXT_WDTR_BUS_16_BIT: 2735 *dptr |= DPARM_WIDE; 2736 break; 2737 default: 2738 *dptr &= ~DPARM_WIDE; 2739 } 2740 } 2741 /* 2742 * Any SYNC RATE of nonzero and SYNC_OFFSET 2743 * of nonzero will cause us to go to the 2744 * selected (from NVRAM) maximum value for 2745 * this device. At a later point, we'll 2746 * allow finer control. 2747 */ 2748 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && 2749 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) && 2750 (cts->sync_offset > 0)) { 2751 *dptr |= DPARM_SYNC; 2752 } else { 2753 *dptr &= ~DPARM_SYNC; 2754 } 2755 *dptr |= DPARM_SAFE_DFLT; 2756 #else 2757 struct ccb_trans_settings_scsi *scsi = 2758 &cts->proto_specific.scsi; 2759 struct ccb_trans_settings_spi *spi = 2760 &cts->xport_specific.spi; 2761 sdparam *sdp = isp->isp_param; 2762 uint16_t *dptr; 2763 2764 if (spi->valid == 0 && scsi->valid == 0) { 2765 ISPLOCK_2_CAMLOCK(isp); 2766 ccb->ccb_h.status = CAM_REQ_CMP; 2767 xpt_done(ccb); 2768 break; 2769 } 2770 2771 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2772 sdp += bus; 2773 /* 2774 * We always update (internally) from goal_flags 2775 * so any request to change settings just gets 2776 * vectored to that location. 2777 */ 2778 dptr = &sdp->isp_devparam[tgt].goal_flags; 2779 2780 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 2781 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 2782 *dptr |= DPARM_DISC; 2783 else 2784 *dptr &= ~DPARM_DISC; 2785 } 2786 2787 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 2788 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 2789 *dptr |= DPARM_TQING; 2790 else 2791 *dptr &= ~DPARM_TQING; 2792 } 2793 2794 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 2795 if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) 2796 *dptr |= DPARM_WIDE; 2797 else 2798 *dptr &= ~DPARM_WIDE; 2799 } 2800 2801 /* 2802 * XXX: FIX ME 2803 */ 2804 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) && 2805 (spi->valid & CTS_SPI_VALID_SYNC_RATE) && 2806 (spi->sync_period && spi->sync_offset)) { 2807 *dptr |= DPARM_SYNC; 2808 /* 2809 * XXX: CHECK FOR LEGALITY 2810 */ 2811 sdp->isp_devparam[tgt].goal_period = 2812 spi->sync_period; 2813 sdp->isp_devparam[tgt].goal_offset = 2814 spi->sync_offset; 2815 } else { 2816 *dptr &= ~DPARM_SYNC; 2817 } 2818 #endif 2819 isp_prt(isp, ISP_LOGDEBUG0, 2820 "SET (%d.%d.%d) to flags %x off %x per %x", 2821 bus, tgt, cts->ccb_h.target_lun, 2822 sdp->isp_devparam[tgt].goal_flags, 2823 sdp->isp_devparam[tgt].goal_offset, 2824 sdp->isp_devparam[tgt].goal_period); 2825 sdp->isp_devparam[tgt].dev_update = 1; 2826 isp->isp_update |= (1 << bus); 2827 } 2828 ISPLOCK_2_CAMLOCK(isp); 2829 ccb->ccb_h.status = CAM_REQ_CMP; 2830 xpt_done(ccb); 2831 break; 2832 case XPT_GET_TRAN_SETTINGS: 2833 cts = &ccb->cts; 2834 tgt = cts->ccb_h.target_id; 2835 CAMLOCK_2_ISPLOCK(isp); 2836 if (IS_FC(isp)) { 2837 #ifndef CAM_NEW_TRAN_CODE 2838 /* 2839 * a lot of normal SCSI things don't make sense. 2840 */ 2841 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 2842 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2843 /* 2844 * How do you measure the width of a high 2845 * speed serial bus? Well, in bytes. 2846 * 2847 * Offset and period make no sense, though, so we set 2848 * (above) a 'base' transfer speed to be gigabit. 2849 */ 2850 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2851 #else 2852 fcparam *fcp = isp->isp_param; 2853 struct ccb_trans_settings_scsi *scsi = 2854 &cts->proto_specific.scsi; 2855 struct ccb_trans_settings_fc *fc = 2856 &cts->xport_specific.fc; 2857 2858 cts->protocol = PROTO_SCSI; 2859 cts->protocol_version = SCSI_REV_2; 2860 cts->transport = XPORT_FC; 2861 cts->transport_version = 0; 2862 2863 scsi->valid = CTS_SCSI_VALID_TQ; 2864 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 2865 fc->valid = CTS_FC_VALID_SPEED; 2866 if (fcp->isp_gbspeed == 2) { 2867 fc->bitrate = 200000; 2868 } else { 2869 fc->bitrate = 100000; 2870 } 2871 if (tgt > 0 && tgt < MAX_FC_TARG) { 2872 fcportdb_t *lp = &fcp->portdb[tgt]; 2873 fc->wwnn = lp->node_wwn; 2874 fc->wwpn = lp->port_wwn; 2875 fc->port = lp->portid; 2876 fc->valid |= CTS_FC_VALID_WWNN | 2877 CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT; 2878 } 2879 #endif 2880 } else { 2881 #ifdef CAM_NEW_TRAN_CODE 2882 struct ccb_trans_settings_scsi *scsi = 2883 &cts->proto_specific.scsi; 2884 struct ccb_trans_settings_spi *spi = 2885 &cts->xport_specific.spi; 2886 #endif 2887 sdparam *sdp = isp->isp_param; 2888 int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2889 uint16_t dval, pval, oval; 2890 2891 sdp += bus; 2892 2893 if (IS_CURRENT_SETTINGS(cts)) { 2894 sdp->isp_devparam[tgt].dev_refresh = 1; 2895 isp->isp_update |= (1 << bus); 2896 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, 2897 NULL); 2898 dval = sdp->isp_devparam[tgt].actv_flags; 2899 oval = sdp->isp_devparam[tgt].actv_offset; 2900 pval = sdp->isp_devparam[tgt].actv_period; 2901 } else { 2902 dval = sdp->isp_devparam[tgt].nvrm_flags; 2903 oval = sdp->isp_devparam[tgt].nvrm_offset; 2904 pval = sdp->isp_devparam[tgt].nvrm_period; 2905 } 2906 2907 #ifndef CAM_NEW_TRAN_CODE 2908 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 2909 2910 if (dval & DPARM_DISC) { 2911 cts->flags |= CCB_TRANS_DISC_ENB; 2912 } 2913 if (dval & DPARM_TQING) { 2914 cts->flags |= CCB_TRANS_TAG_ENB; 2915 } 2916 if (dval & DPARM_WIDE) { 2917 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2918 } else { 2919 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2920 } 2921 cts->valid = CCB_TRANS_BUS_WIDTH_VALID | 2922 CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2923 2924 if ((dval & DPARM_SYNC) && oval != 0) { 2925 cts->sync_period = pval; 2926 cts->sync_offset = oval; 2927 cts->valid |= 2928 CCB_TRANS_SYNC_RATE_VALID | 2929 CCB_TRANS_SYNC_OFFSET_VALID; 2930 } 2931 #else 2932 cts->protocol = PROTO_SCSI; 2933 cts->protocol_version = SCSI_REV_2; 2934 cts->transport = XPORT_SPI; 2935 cts->transport_version = 2; 2936 2937 spi->valid = 0; 2938 scsi->valid = 0; 2939 spi->flags = 0; 2940 scsi->flags = 0; 2941 if (dval & DPARM_DISC) { 2942 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 2943 } 2944 if ((dval & DPARM_SYNC) && oval && pval) { 2945 spi->sync_offset = oval; 2946 spi->sync_period = pval; 2947 } else { 2948 spi->sync_offset = 0; 2949 spi->sync_period = 0; 2950 } 2951 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 2952 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 2953 spi->valid |= CTS_SPI_VALID_BUS_WIDTH; 2954 if (dval & DPARM_WIDE) { 2955 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2956 } else { 2957 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2958 } 2959 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 2960 scsi->valid = CTS_SCSI_VALID_TQ; 2961 if (dval & DPARM_TQING) { 2962 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 2963 } 2964 spi->valid |= CTS_SPI_VALID_DISC; 2965 } 2966 #endif 2967 isp_prt(isp, ISP_LOGDEBUG0, 2968 "GET %s (%d.%d.%d) to flags %x off %x per %x", 2969 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM", 2970 bus, tgt, cts->ccb_h.target_lun, dval, oval, pval); 2971 } 2972 ISPLOCK_2_CAMLOCK(isp); 2973 ccb->ccb_h.status = CAM_REQ_CMP; 2974 xpt_done(ccb); 2975 break; 2976 2977 case XPT_CALC_GEOMETRY: 2978 #if __FreeBSD_version < 500000 2979 { 2980 struct ccb_calc_geometry *ccg; 2981 u_int32_t secs_per_cylinder; 2982 u_int32_t size_mb; 2983 2984 ccg = &ccb->ccg; 2985 if (ccg->block_size == 0) { 2986 ccb->ccb_h.status = CAM_REQ_INVALID; 2987 xpt_done(ccb); 2988 break; 2989 } 2990 size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size); 2991 if (size_mb > 1024) { 2992 ccg->heads = 255; 2993 ccg->secs_per_track = 63; 2994 } else { 2995 ccg->heads = 64; 2996 ccg->secs_per_track = 32; 2997 } 2998 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 2999 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 3000 ccb->ccb_h.status = CAM_REQ_CMP; 3001 xpt_done(ccb); 3002 break; 3003 } 3004 #else 3005 { 3006 cam_calc_geometry(&ccb->ccg, /*extended*/1); 3007 xpt_done(ccb); 3008 break; 3009 } 3010 #endif 3011 case XPT_RESET_BUS: /* Reset the specified bus */ 3012 bus = cam_sim_bus(sim); 3013 CAMLOCK_2_ISPLOCK(isp); 3014 error = isp_control(isp, ISPCTL_RESET_BUS, &bus); 3015 ISPLOCK_2_CAMLOCK(isp); 3016 if (error) 3017 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 3018 else { 3019 if (cam_sim_bus(sim) && isp->isp_path2 != NULL) 3020 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 3021 else if (isp->isp_path != NULL) 3022 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 3023 ccb->ccb_h.status = CAM_REQ_CMP; 3024 } 3025 xpt_done(ccb); 3026 break; 3027 3028 case XPT_TERM_IO: /* Terminate the I/O process */ 3029 ccb->ccb_h.status = CAM_REQ_INVALID; 3030 xpt_done(ccb); 3031 break; 3032 3033 case XPT_PATH_INQ: /* Path routing inquiry */ 3034 { 3035 struct ccb_pathinq *cpi = &ccb->cpi; 3036 3037 cpi->version_num = 1; 3038 #ifdef ISP_TARGET_MODE 3039 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 3040 #else 3041 cpi->target_sprt = 0; 3042 #endif 3043 cpi->hba_eng_cnt = 0; 3044 cpi->max_target = ISP_MAX_TARGETS(isp) - 1; 3045 cpi->max_lun = ISP_MAX_LUNS(isp) - 1; 3046 cpi->bus_id = cam_sim_bus(sim); 3047 if (IS_FC(isp)) { 3048 cpi->hba_misc = PIM_NOBUSRESET; 3049 /* 3050 * Because our loop ID can shift from time to time, 3051 * make our initiator ID out of range of our bus. 3052 */ 3053 cpi->initiator_id = cpi->max_target + 1; 3054 3055 /* 3056 * Set base transfer capabilities for Fibre Channel. 3057 * Technically not correct because we don't know 3058 * what media we're running on top of- but we'll 3059 * look good if we always say 100MB/s. 3060 */ 3061 if (FCPARAM(isp)->isp_gbspeed == 2) 3062 cpi->base_transfer_speed = 200000; 3063 else 3064 cpi->base_transfer_speed = 100000; 3065 cpi->hba_inquiry = PI_TAG_ABLE; 3066 #ifdef CAM_NEW_TRAN_CODE 3067 cpi->transport = XPORT_FC; 3068 cpi->transport_version = 0; 3069 #endif 3070 } else { 3071 sdparam *sdp = isp->isp_param; 3072 sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path)); 3073 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 3074 cpi->hba_misc = 0; 3075 cpi->initiator_id = sdp->isp_initiator_id; 3076 cpi->base_transfer_speed = 3300; 3077 #ifdef CAM_NEW_TRAN_CODE 3078 cpi->transport = XPORT_SPI; 3079 cpi->transport_version = 2; 3080 #endif 3081 } 3082 #ifdef CAM_NEW_TRAN_CODE 3083 cpi->protocol = PROTO_SCSI; 3084 cpi->protocol_version = SCSI_REV_2; 3085 #endif 3086 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 3087 strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN); 3088 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 3089 cpi->unit_number = cam_sim_unit(sim); 3090 cpi->ccb_h.status = CAM_REQ_CMP; 3091 xpt_done(ccb); 3092 break; 3093 } 3094 default: 3095 ccb->ccb_h.status = CAM_REQ_INVALID; 3096 xpt_done(ccb); 3097 break; 3098 } 3099 } 3100 3101 #define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB) 3102 3103 void 3104 isp_done(struct ccb_scsiio *sccb) 3105 { 3106 ispsoftc_t *isp = XS_ISP(sccb); 3107 3108 if (XS_NOERR(sccb)) 3109 XS_SETERR(sccb, CAM_REQ_CMP); 3110 3111 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && 3112 (sccb->scsi_status != SCSI_STATUS_OK)) { 3113 sccb->ccb_h.status &= ~CAM_STATUS_MASK; 3114 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && 3115 (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) { 3116 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; 3117 } else { 3118 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 3119 } 3120 } 3121 3122 sccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3123 if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 3124 isp_prt(isp, ISP_LOGDEBUG0, 3125 "target %d lun %d CAM status 0x%x SCSI status 0x%x", 3126 XS_TGT(sccb), XS_LUN(sccb), sccb->ccb_h.status, 3127 sccb->scsi_status); 3128 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 3129 sccb->ccb_h.status |= CAM_DEV_QFRZN; 3130 xpt_freeze_devq(sccb->ccb_h.path, 1); 3131 } 3132 } 3133 3134 if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) && 3135 (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 3136 xpt_print(sccb->ccb_h.path, 3137 "cam completion status 0x%x\n", sccb->ccb_h.status); 3138 } 3139 3140 XS_CMD_S_DONE(sccb); 3141 if (XS_CMD_WDOG_P(sccb) == 0) { 3142 untimeout(isp_watchdog, sccb, sccb->ccb_h.timeout_ch); 3143 if (XS_CMD_GRACE_P(sccb)) { 3144 isp_prt(isp, ISP_LOGDEBUG2, 3145 "finished command on borrowed time"); 3146 } 3147 XS_CMD_S_CLEAR(sccb); 3148 ISPLOCK_2_CAMLOCK(isp); 3149 xpt_done((union ccb *) sccb); 3150 CAMLOCK_2_ISPLOCK(isp); 3151 } 3152 } 3153 3154 int 3155 isp_async(ispsoftc_t *isp, ispasync_t cmd, void *arg) 3156 { 3157 int bus, rv = 0; 3158 static const char prom[] = 3159 "PortID 0x%06x handle 0x%x role %s %s\n" 3160 " WWNN 0x%08x%08x WWPN 0x%08x%08x"; 3161 static const char prom2[] = 3162 "PortID 0x%06x handle 0x%x role %s %s tgt %u\n" 3163 " WWNN 0x%08x%08x WWPN 0x%08x%08x"; 3164 char *msg = NULL; 3165 target_id_t tgt; 3166 fcportdb_t *lp; 3167 struct cam_path *tmppath; 3168 3169 switch (cmd) { 3170 case ISPASYNC_NEW_TGT_PARAMS: 3171 { 3172 #ifdef CAM_NEW_TRAN_CODE 3173 struct ccb_trans_settings_scsi *scsi; 3174 struct ccb_trans_settings_spi *spi; 3175 #endif 3176 int flags, tgt; 3177 sdparam *sdp = isp->isp_param; 3178 struct ccb_trans_settings cts; 3179 3180 memset(&cts, 0, sizeof (struct ccb_trans_settings)); 3181 3182 tgt = *((int *)arg); 3183 bus = (tgt >> 16) & 0xffff; 3184 tgt &= 0xffff; 3185 sdp += bus; 3186 ISPLOCK_2_CAMLOCK(isp); 3187 if (xpt_create_path(&tmppath, NULL, 3188 cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim), 3189 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 3190 CAMLOCK_2_ISPLOCK(isp); 3191 isp_prt(isp, ISP_LOGWARN, 3192 "isp_async cannot make temp path for %d.%d", 3193 tgt, bus); 3194 rv = -1; 3195 break; 3196 } 3197 CAMLOCK_2_ISPLOCK(isp); 3198 flags = sdp->isp_devparam[tgt].actv_flags; 3199 #ifdef CAM_NEW_TRAN_CODE 3200 cts.type = CTS_TYPE_CURRENT_SETTINGS; 3201 cts.protocol = PROTO_SCSI; 3202 cts.transport = XPORT_SPI; 3203 3204 scsi = &cts.proto_specific.scsi; 3205 spi = &cts.xport_specific.spi; 3206 3207 if (flags & DPARM_TQING) { 3208 scsi->valid |= CTS_SCSI_VALID_TQ; 3209 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 3210 } 3211 3212 if (flags & DPARM_DISC) { 3213 spi->valid |= CTS_SPI_VALID_DISC; 3214 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 3215 } 3216 spi->flags |= CTS_SPI_VALID_BUS_WIDTH; 3217 if (flags & DPARM_WIDE) { 3218 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3219 } else { 3220 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3221 } 3222 if (flags & DPARM_SYNC) { 3223 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 3224 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 3225 spi->sync_period = sdp->isp_devparam[tgt].actv_period; 3226 spi->sync_offset = sdp->isp_devparam[tgt].actv_offset; 3227 } 3228 #else 3229 cts.flags = CCB_TRANS_CURRENT_SETTINGS; 3230 cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3231 if (flags & DPARM_DISC) { 3232 cts.flags |= CCB_TRANS_DISC_ENB; 3233 } 3234 if (flags & DPARM_TQING) { 3235 cts.flags |= CCB_TRANS_TAG_ENB; 3236 } 3237 cts.valid |= CCB_TRANS_BUS_WIDTH_VALID; 3238 cts.bus_width = (flags & DPARM_WIDE)? 3239 MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT; 3240 cts.sync_period = sdp->isp_devparam[tgt].actv_period; 3241 cts.sync_offset = sdp->isp_devparam[tgt].actv_offset; 3242 if (flags & DPARM_SYNC) { 3243 cts.valid |= 3244 CCB_TRANS_SYNC_RATE_VALID | 3245 CCB_TRANS_SYNC_OFFSET_VALID; 3246 } 3247 #endif 3248 isp_prt(isp, ISP_LOGDEBUG2, 3249 "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x", 3250 bus, tgt, sdp->isp_devparam[tgt].actv_period, 3251 sdp->isp_devparam[tgt].actv_offset, flags); 3252 xpt_setup_ccb(&cts.ccb_h, tmppath, 1); 3253 ISPLOCK_2_CAMLOCK(isp); 3254 xpt_async(AC_TRANSFER_NEG, tmppath, &cts); 3255 xpt_free_path(tmppath); 3256 CAMLOCK_2_ISPLOCK(isp); 3257 break; 3258 } 3259 case ISPASYNC_BUS_RESET: 3260 bus = *((int *)arg); 3261 isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected", 3262 bus); 3263 if (bus > 0 && isp->isp_path2) { 3264 ISPLOCK_2_CAMLOCK(isp); 3265 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 3266 CAMLOCK_2_ISPLOCK(isp); 3267 } else if (isp->isp_path) { 3268 ISPLOCK_2_CAMLOCK(isp); 3269 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 3270 CAMLOCK_2_ISPLOCK(isp); 3271 } 3272 break; 3273 case ISPASYNC_LIP: 3274 if (msg == NULL) { 3275 msg = "LIP Received"; 3276 } 3277 /* FALLTHROUGH */ 3278 case ISPASYNC_LOOP_RESET: 3279 if (msg == NULL) { 3280 msg = "LOOP Reset"; 3281 } 3282 /* FALLTHROUGH */ 3283 case ISPASYNC_LOOP_DOWN: 3284 if (msg == NULL) { 3285 msg = "LOOP Down"; 3286 } 3287 if (isp->isp_path) { 3288 isp_freeze_loopdown(isp, msg); 3289 } 3290 if (isp->isp_osinfo.ldt_running == 0) { 3291 isp->isp_osinfo.ldt = timeout(isp_ldt, isp, 3292 isp->isp_osinfo.loop_down_limit * hz); 3293 isp->isp_osinfo.ldt_running = 1; 3294 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 3295 "starting Loop Down Timer"); 3296 } 3297 isp_prt(isp, ISP_LOGINFO, msg); 3298 break; 3299 case ISPASYNC_LOOP_UP: 3300 /* 3301 * Now we just note that Loop has come up. We don't 3302 * actually do anything because we're waiting for a 3303 * Change Notify before activating the FC cleanup 3304 * thread to look at the state of the loop again. 3305 */ 3306 isp_prt(isp, ISP_LOGINFO, "Loop UP"); 3307 break; 3308 case ISPASYNC_DEV_ARRIVED: 3309 lp = arg; 3310 lp->reserved = 0; 3311 if ((isp->isp_role & ISP_ROLE_INITIATOR) && 3312 (lp->roles & (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT))) { 3313 int dbidx = lp - FCPARAM(isp)->portdb; 3314 int i; 3315 3316 for (i = 0; i < MAX_FC_TARG; i++) { 3317 if (i >= FL_ID && i <= SNS_ID) { 3318 continue; 3319 } 3320 if (FCPARAM(isp)->isp_ini_map[i] == 0) { 3321 break; 3322 } 3323 } 3324 if (i < MAX_FC_TARG) { 3325 FCPARAM(isp)->isp_ini_map[i] = dbidx + 1; 3326 lp->ini_map_idx = i + 1; 3327 } else { 3328 isp_prt(isp, ISP_LOGWARN, "out of target ids"); 3329 isp_dump_portdb(isp); 3330 } 3331 } 3332 if (lp->ini_map_idx) { 3333 tgt = lp->ini_map_idx - 1; 3334 isp_prt(isp, ISP_LOGCONFIG, prom2, 3335 lp->portid, lp->handle, 3336 roles[lp->roles], "arrived at", tgt, 3337 (uint32_t) (lp->node_wwn >> 32), 3338 (uint32_t) lp->node_wwn, 3339 (uint32_t) (lp->port_wwn >> 32), 3340 (uint32_t) lp->port_wwn); 3341 isp_make_here(isp, tgt); 3342 } else { 3343 isp_prt(isp, ISP_LOGCONFIG, prom, 3344 lp->portid, lp->handle, 3345 roles[lp->roles], "arrived", 3346 (uint32_t) (lp->node_wwn >> 32), 3347 (uint32_t) lp->node_wwn, 3348 (uint32_t) (lp->port_wwn >> 32), 3349 (uint32_t) lp->port_wwn); 3350 } 3351 break; 3352 case ISPASYNC_DEV_CHANGED: 3353 lp = arg; 3354 if (isp_change_is_bad) { 3355 lp->state = FC_PORTDB_STATE_NIL; 3356 if (lp->ini_map_idx) { 3357 tgt = lp->ini_map_idx - 1; 3358 FCPARAM(isp)->isp_ini_map[tgt] = 0; 3359 lp->ini_map_idx = 0; 3360 isp_prt(isp, ISP_LOGCONFIG, prom3, 3361 lp->portid, tgt, "change is bad"); 3362 isp_make_gone(isp, tgt); 3363 } else { 3364 isp_prt(isp, ISP_LOGCONFIG, prom, 3365 lp->portid, lp->handle, 3366 roles[lp->roles], 3367 "changed and departed", 3368 (uint32_t) (lp->node_wwn >> 32), 3369 (uint32_t) lp->node_wwn, 3370 (uint32_t) (lp->port_wwn >> 32), 3371 (uint32_t) lp->port_wwn); 3372 } 3373 } else { 3374 lp->portid = lp->new_portid; 3375 lp->roles = lp->new_roles; 3376 if (lp->ini_map_idx) { 3377 int t = lp->ini_map_idx - 1; 3378 FCPARAM(isp)->isp_ini_map[t] = 3379 (lp - FCPARAM(isp)->portdb) + 1; 3380 tgt = lp->ini_map_idx - 1; 3381 isp_prt(isp, ISP_LOGCONFIG, prom2, 3382 lp->portid, lp->handle, 3383 roles[lp->roles], "changed at", tgt, 3384 (uint32_t) (lp->node_wwn >> 32), 3385 (uint32_t) lp->node_wwn, 3386 (uint32_t) (lp->port_wwn >> 32), 3387 (uint32_t) lp->port_wwn); 3388 } else { 3389 isp_prt(isp, ISP_LOGCONFIG, prom, 3390 lp->portid, lp->handle, 3391 roles[lp->roles], "changed", 3392 (uint32_t) (lp->node_wwn >> 32), 3393 (uint32_t) lp->node_wwn, 3394 (uint32_t) (lp->port_wwn >> 32), 3395 (uint32_t) lp->port_wwn); 3396 } 3397 } 3398 break; 3399 case ISPASYNC_DEV_STAYED: 3400 lp = arg; 3401 if (lp->ini_map_idx) { 3402 tgt = lp->ini_map_idx - 1; 3403 isp_prt(isp, ISP_LOGCONFIG, prom2, 3404 lp->portid, lp->handle, 3405 roles[lp->roles], "stayed at", tgt, 3406 (uint32_t) (lp->node_wwn >> 32), 3407 (uint32_t) lp->node_wwn, 3408 (uint32_t) (lp->port_wwn >> 32), 3409 (uint32_t) lp->port_wwn); 3410 } else { 3411 isp_prt(isp, ISP_LOGCONFIG, prom, 3412 lp->portid, lp->handle, 3413 roles[lp->roles], "stayed", 3414 (uint32_t) (lp->node_wwn >> 32), 3415 (uint32_t) lp->node_wwn, 3416 (uint32_t) (lp->port_wwn >> 32), 3417 (uint32_t) lp->port_wwn); 3418 } 3419 break; 3420 case ISPASYNC_DEV_GONE: 3421 lp = arg; 3422 /* 3423 * If this has a virtual target and we haven't marked it 3424 * that we're going to have isp_gdt tell the OS it's gone, 3425 * set the isp_gdt timer running on it. 3426 * 3427 * If it isn't marked that isp_gdt is going to get rid of it, 3428 * announce that it's gone. 3429 */ 3430 if (lp->ini_map_idx && lp->reserved == 0) { 3431 lp->reserved = 1; 3432 lp->new_reserved = isp->isp_osinfo.gone_device_time; 3433 lp->state = FC_PORTDB_STATE_ZOMBIE; 3434 if (isp->isp_osinfo.gdt_running == 0) { 3435 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 3436 "starting Gone Device Timer"); 3437 isp->isp_osinfo.gdt = timeout(isp_gdt, isp, hz); 3438 isp->isp_osinfo.gdt_running = 1; 3439 } 3440 tgt = lp->ini_map_idx - 1; 3441 isp_prt(isp, ISP_LOGCONFIG, prom2, 3442 lp->portid, lp->handle, 3443 roles[lp->roles], "gone zombie at", tgt, 3444 (uint32_t) (lp->node_wwn >> 32), 3445 (uint32_t) lp->node_wwn, 3446 (uint32_t) (lp->port_wwn >> 32), 3447 (uint32_t) lp->port_wwn); 3448 } else if (lp->reserved == 0) { 3449 isp_prt(isp, ISP_LOGCONFIG, prom, 3450 lp->portid, lp->handle, 3451 roles[lp->roles], "departed", 3452 (uint32_t) (lp->node_wwn >> 32), 3453 (uint32_t) lp->node_wwn, 3454 (uint32_t) (lp->port_wwn >> 32), 3455 (uint32_t) lp->port_wwn); 3456 } 3457 break; 3458 case ISPASYNC_CHANGE_NOTIFY: 3459 { 3460 char *msg; 3461 if (arg == ISPASYNC_CHANGE_PDB) { 3462 msg = "Port Database Changed"; 3463 } else if (arg == ISPASYNC_CHANGE_SNS) { 3464 msg = "Name Server Database Changed"; 3465 } else { 3466 msg = "Other Change Notify"; 3467 } 3468 /* 3469 * If the loop down timer is running, cancel it. 3470 */ 3471 if (isp->isp_osinfo.ldt_running) { 3472 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 3473 "Stopping Loop Down Timer"); 3474 isp->isp_osinfo.ldt_running = 0; 3475 untimeout(isp_ldt, isp, isp->isp_osinfo.ldt); 3476 callout_handle_init(&isp->isp_osinfo.ldt); 3477 } 3478 isp_prt(isp, ISP_LOGINFO, msg); 3479 isp_freeze_loopdown(isp, msg); 3480 #if __FreeBSD_version < 500000 3481 wakeup(&isp->isp_osinfo.kproc); 3482 #else 3483 #ifdef ISP_SMPLOCK 3484 cv_signal(&isp->isp_osinfo.kthread_cv); 3485 #else 3486 wakeup(&isp->isp_osinfo.kthread_cv); 3487 #endif 3488 #endif 3489 break; 3490 } 3491 #ifdef ISP_TARGET_MODE 3492 case ISPASYNC_TARGET_NOTIFY: 3493 { 3494 tmd_notify_t *nt = arg; 3495 isp_prt(isp, ISP_LOGALL, 3496 "target notify code 0x%x", nt->nt_ncode); 3497 break; 3498 } 3499 case ISPASYNC_TARGET_ACTION: 3500 switch (((isphdr_t *)arg)->rqs_entry_type) { 3501 default: 3502 isp_prt(isp, ISP_LOGWARN, 3503 "event 0x%x for unhandled target action", 3504 ((isphdr_t *)arg)->rqs_entry_type); 3505 break; 3506 case RQSTYPE_NOTIFY: 3507 if (IS_SCSI(isp)) { 3508 rv = isp_handle_platform_notify_scsi(isp, 3509 (in_entry_t *) arg); 3510 } else { 3511 rv = isp_handle_platform_notify_fc(isp, 3512 (in_fcentry_t *) arg); 3513 } 3514 break; 3515 case RQSTYPE_ATIO: 3516 rv = isp_handle_platform_atio(isp, (at_entry_t *) arg); 3517 break; 3518 case RQSTYPE_ATIO2: 3519 rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg); 3520 break; 3521 case RQSTYPE_CTIO3: 3522 case RQSTYPE_CTIO2: 3523 case RQSTYPE_CTIO: 3524 rv = isp_handle_platform_ctio(isp, arg); 3525 break; 3526 case RQSTYPE_ENABLE_LUN: 3527 case RQSTYPE_MODIFY_LUN: 3528 isp_ledone(isp, (lun_entry_t *) arg); 3529 break; 3530 } 3531 break; 3532 #endif 3533 case ISPASYNC_FW_CRASH: 3534 { 3535 uint16_t mbox1, mbox6; 3536 mbox1 = ISP_READ(isp, OUTMAILBOX1); 3537 if (IS_DUALBUS(isp)) { 3538 mbox6 = ISP_READ(isp, OUTMAILBOX6); 3539 } else { 3540 mbox6 = 0; 3541 } 3542 isp_prt(isp, ISP_LOGERR, 3543 "Internal Firmware Error on bus %d @ RISC Address 0x%x", 3544 mbox6, mbox1); 3545 #ifdef ISP_FW_CRASH_DUMP 3546 /* 3547 * XXX: really need a thread to do this right. 3548 */ 3549 if (IS_FC(isp)) { 3550 FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT; 3551 FCPARAM(isp)->isp_loopstate = LOOP_NIL; 3552 isp_freeze_loopdown(isp, "f/w crash"); 3553 isp_fw_dump(isp); 3554 } 3555 isp_reinit(isp); 3556 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL); 3557 #endif 3558 break; 3559 } 3560 case ISPASYNC_UNHANDLED_RESPONSE: 3561 break; 3562 default: 3563 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd); 3564 break; 3565 } 3566 return (rv); 3567 } 3568 3569 3570 /* 3571 * Locks are held before coming here. 3572 */ 3573 void 3574 isp_uninit(ispsoftc_t *isp) 3575 { 3576 if (IS_24XX(isp)) { 3577 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_RESET); 3578 } else { 3579 ISP_WRITE(isp, HCCR, HCCR_CMD_RESET); 3580 } 3581 ISP_DISABLE_INTS(isp); 3582 } 3583 3584 void 3585 isp_prt(ispsoftc_t *isp, int level, const char *fmt, ...) 3586 { 3587 va_list ap; 3588 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { 3589 return; 3590 } 3591 printf("%s: ", device_get_nameunit(isp->isp_dev)); 3592 va_start(ap, fmt); 3593 vprintf(fmt, ap); 3594 va_end(ap); 3595 printf("\n"); 3596 } 3597 3598 uint64_t 3599 isp_nanotime_sub(struct timespec *b, struct timespec *a) 3600 { 3601 uint64_t elapsed; 3602 struct timespec x = *b; 3603 timespecsub(&x, a); 3604 elapsed = GET_NANOSEC(&x); 3605 if (elapsed == 0) 3606 elapsed++; 3607 return (elapsed); 3608 } 3609 3610 int 3611 isp_mbox_acquire(ispsoftc_t *isp) 3612 { 3613 if (isp->isp_osinfo.mboxbsy) { 3614 return (1); 3615 } else { 3616 isp->isp_osinfo.mboxcmd_done = 0; 3617 isp->isp_osinfo.mboxbsy = 1; 3618 return (0); 3619 } 3620 } 3621 3622 void 3623 isp_mbox_wait_complete(ispsoftc_t *isp, mbreg_t *mbp) 3624 { 3625 unsigned int usecs = mbp->timeout; 3626 unsigned int max, olim, ilim; 3627 3628 if (usecs == 0) { 3629 usecs = MBCMD_DEFAULT_TIMEOUT; 3630 } 3631 max = isp->isp_mbxwrk0 + 1; 3632 3633 if (isp->isp_osinfo.mbox_sleep_ok) { 3634 unsigned int ms = (usecs + 999) / 1000; 3635 3636 isp->isp_osinfo.mbox_sleep_ok = 0; 3637 isp->isp_osinfo.mbox_sleeping = 1; 3638 for (olim = 0; olim < max; olim++) { 3639 #if __FreeBSD_version < 500000 || !defined(ISP_SMPLOCK) 3640 tsleep(&isp->isp_mbxworkp, PRIBIO, "ispmbx_sleep", 3641 isp_mstohz(ms)); 3642 #else 3643 msleep(&isp->isp_mbxworkp, &isp->isp_mtx, PRIBIO, 3644 "ispmbx_sleep", isp_mstohz(ms)); 3645 #endif 3646 if (isp->isp_osinfo.mboxcmd_done) { 3647 break; 3648 } 3649 } 3650 isp->isp_osinfo.mbox_sleep_ok = 1; 3651 isp->isp_osinfo.mbox_sleeping = 0; 3652 } else { 3653 for (olim = 0; olim < max; olim++) { 3654 for (ilim = 0; ilim < usecs; ilim += 100) { 3655 uint32_t isr; 3656 uint16_t sema, mbox; 3657 if (isp->isp_osinfo.mboxcmd_done) { 3658 break; 3659 } 3660 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 3661 isp_intr(isp, isr, sema, mbox); 3662 if (isp->isp_osinfo.mboxcmd_done) { 3663 break; 3664 } 3665 } 3666 USEC_DELAY(100); 3667 } 3668 if (isp->isp_osinfo.mboxcmd_done) { 3669 break; 3670 } 3671 } 3672 } 3673 if (isp->isp_osinfo.mboxcmd_done == 0) { 3674 isp_prt(isp, ISP_LOGWARN, 3675 "%s Mailbox Command (0x%x) Timeout (%uus)", 3676 isp->isp_osinfo.mbox_sleep_ok? "Interrupting" : "Polled", 3677 isp->isp_lastmbxcmd, usecs); 3678 mbp->param[0] = MBOX_TIMEOUT; 3679 isp->isp_osinfo.mboxcmd_done = 1; 3680 } 3681 } 3682 3683 void 3684 isp_mbox_notify_done(ispsoftc_t *isp) 3685 { 3686 if (isp->isp_osinfo.mbox_sleeping) { 3687 wakeup(&isp->isp_mbxworkp); 3688 } 3689 isp->isp_osinfo.mboxcmd_done = 1; 3690 } 3691 3692 void 3693 isp_mbox_release(ispsoftc_t *isp) 3694 { 3695 isp->isp_osinfo.mboxbsy = 0; 3696 } 3697 3698 int 3699 isp_mstohz(int ms) 3700 { 3701 int hz; 3702 struct timeval t; 3703 t.tv_sec = ms / 1000; 3704 t.tv_usec = (ms % 1000) * 1000; 3705 hz = tvtohz(&t); 3706 if (hz < 0) { 3707 hz = 0x7fffffff; 3708 } 3709 if (hz == 0) { 3710 hz = 1; 3711 } 3712 return (hz); 3713 } 3714