1 /*- 2 * Copyright (c) 1997-2006 by Matthew Jacob 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /* 28 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters. 29 */ 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 #include <dev/isp/isp_freebsd.h> 33 #include <sys/unistd.h> 34 #include <sys/kthread.h> 35 #include <machine/stdarg.h> /* for use by isp_prt below */ 36 #include <sys/conf.h> 37 #include <sys/module.h> 38 #include <sys/ioccom.h> 39 #include <dev/isp/isp_ioctl.h> 40 #if __FreeBSD_version >= 500000 41 #include <sys/sysctl.h> 42 #else 43 #include <sys/devicestat.h> 44 #endif 45 #include <cam/cam_periph.h> 46 #include <cam/cam_xpt_periph.h> 47 48 #if !defined(CAM_NEW_TRAN_CODE) && __FreeBSD_version >= 700025 49 #define CAM_NEW_TRAN_CODE 1 50 #endif 51 52 53 MODULE_VERSION(isp, 1); 54 MODULE_DEPEND(isp, cam, 1, 1, 1); 55 int isp_announced = 0; 56 int isp_fabric_hysteresis = 5; 57 int isp_loop_down_limit = 300; /* default loop down limit */ 58 int isp_change_is_bad = 0; /* "changed" devices are bad */ 59 int isp_quickboot_time = 15; /* don't wait more than N secs for loop up */ 60 int isp_gone_device_time = 30; /* grace time before reporting device lost */ 61 static const char *roles[4] = { 62 "(none)", "Target", "Initiator", "Target/Initiator" 63 }; 64 static const char prom3[] = 65 "PortID 0x%06x Departed from Target %u because of %s"; 66 67 static void isp_freeze_loopdown(ispsoftc_t *, char *); 68 static d_ioctl_t ispioctl; 69 static void isp_intr_enable(void *); 70 static void isp_cam_async(void *, uint32_t, struct cam_path *, void *); 71 static void isp_poll(struct cam_sim *); 72 static timeout_t isp_watchdog; 73 static timeout_t isp_ldt; 74 static void isp_kthread(void *); 75 static void isp_action(struct cam_sim *, union ccb *); 76 77 #if __FreeBSD_version < 700000 78 ispfwfunc *isp_get_firmware_p = NULL; 79 #endif 80 81 #if __FreeBSD_version < 500000 82 #define ISP_CDEV_MAJOR 248 83 static struct cdevsw isp_cdevsw = { 84 /* open */ nullopen, 85 /* close */ nullclose, 86 /* read */ noread, 87 /* write */ nowrite, 88 /* ioctl */ ispioctl, 89 /* poll */ nopoll, 90 /* mmap */ nommap, 91 /* strategy */ nostrategy, 92 /* name */ "isp", 93 /* maj */ ISP_CDEV_MAJOR, 94 /* dump */ nodump, 95 /* psize */ nopsize, 96 /* flags */ D_TAPE, 97 }; 98 #define isp_sysctl_update(x) do { ; } while (0) 99 #else 100 static struct cdevsw isp_cdevsw = { 101 .d_version = D_VERSION, 102 #if __FreeBSD_version < 700037 103 .d_flags = D_NEEDGIANT, 104 #endif 105 .d_ioctl = ispioctl, 106 .d_name = "isp", 107 }; 108 static void isp_sysctl_update(ispsoftc_t *); 109 #endif 110 111 static ispsoftc_t *isplist = NULL; 112 113 void 114 isp_attach(ispsoftc_t *isp) 115 { 116 int primary, secondary; 117 struct ccb_setasync csa; 118 struct cam_devq *devq; 119 struct cam_sim *sim; 120 struct cam_path *path; 121 122 /* 123 * Establish (in case of 12X0) which bus is the primary. 124 */ 125 126 primary = 0; 127 secondary = 1; 128 129 /* 130 * Create the device queue for our SIM(s). 131 */ 132 devq = cam_simq_alloc(isp->isp_maxcmds); 133 if (devq == NULL) { 134 return; 135 } 136 137 /* 138 * Construct our SIM entry. 139 */ 140 sim = isp_sim_alloc(isp_action, isp_poll, "isp", isp, 141 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); 142 if (sim == NULL) { 143 cam_simq_free(devq); 144 return; 145 } 146 147 isp->isp_osinfo.ehook.ich_func = isp_intr_enable; 148 isp->isp_osinfo.ehook.ich_arg = isp; 149 ISP_UNLOCK(isp); 150 if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) { 151 ISP_LOCK(isp); 152 cam_sim_free(sim, TRUE); 153 isp_prt(isp, ISP_LOGERR, 154 "could not establish interrupt enable hook"); 155 return; 156 } 157 ISP_LOCK(isp); 158 159 if (xpt_bus_register(sim, isp->isp_dev, primary) != CAM_SUCCESS) { 160 cam_sim_free(sim, TRUE); 161 return; 162 } 163 164 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 165 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 166 xpt_bus_deregister(cam_sim_path(sim)); 167 cam_sim_free(sim, TRUE); 168 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 169 return; 170 } 171 172 xpt_setup_ccb(&csa.ccb_h, path, 5); 173 csa.ccb_h.func_code = XPT_SASYNC_CB; 174 csa.event_enable = AC_LOST_DEVICE; 175 csa.callback = isp_cam_async; 176 csa.callback_arg = sim; 177 xpt_action((union ccb *)&csa); 178 isp->isp_sim = sim; 179 isp->isp_path = path; 180 181 /* 182 * If we have a second channel, construct SIM entry for that. 183 */ 184 if (IS_DUALBUS(isp)) { 185 sim = isp_sim_alloc(isp_action, isp_poll, "isp", isp, 186 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); 187 if (sim == NULL) { 188 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 189 xpt_free_path(isp->isp_path); 190 cam_simq_free(devq); 191 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 192 return; 193 } 194 if (xpt_bus_register(sim, isp->isp_dev, secondary) != 195 CAM_SUCCESS) { 196 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 197 xpt_free_path(isp->isp_path); 198 cam_sim_free(sim, TRUE); 199 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 200 return; 201 } 202 203 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 204 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 205 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 206 xpt_free_path(isp->isp_path); 207 xpt_bus_deregister(cam_sim_path(sim)); 208 cam_sim_free(sim, TRUE); 209 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 210 return; 211 } 212 213 xpt_setup_ccb(&csa.ccb_h, path, 5); 214 csa.ccb_h.func_code = XPT_SASYNC_CB; 215 csa.event_enable = AC_LOST_DEVICE; 216 csa.callback = isp_cam_async; 217 csa.callback_arg = sim; 218 xpt_action((union ccb *)&csa); 219 isp->isp_sim2 = sim; 220 isp->isp_path2 = path; 221 } 222 223 /* 224 * Create device nodes 225 */ 226 ISP_UNLOCK(isp); 227 (void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT, 228 GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev)); 229 isp_sysctl_update(isp); 230 ISP_LOCK(isp); 231 232 if (isp->isp_role != ISP_ROLE_NONE) { 233 isp->isp_state = ISP_RUNSTATE; 234 ISP_ENABLE_INTS(isp); 235 } 236 if (isplist == NULL) { 237 isplist = isp; 238 } else { 239 ispsoftc_t *tmp = isplist; 240 while (tmp->isp_osinfo.next) { 241 tmp = tmp->isp_osinfo.next; 242 } 243 tmp->isp_osinfo.next = isp; 244 } 245 246 /* 247 * Create a kernel thread for fibre channel instances. 248 */ 249 if (IS_FC(isp)) { 250 isp_callout_init(&isp->isp_osinfo.ldt); 251 isp_callout_init(&isp->isp_osinfo.gdt); 252 ISP_UNLOCK(isp); 253 #if __FreeBSD_version >= 500000 254 if (kproc_create(isp_kthread, isp, &isp->isp_osinfo.kproc, 255 RFHIGHPID, 0, "%s: fc_thrd", 256 device_get_nameunit(isp->isp_dev))) 257 #else 258 if (kproc_create(isp_kthread, isp, &isp->isp_osinfo.kproc, 259 "%s: fc_thrd", device_get_nameunit(isp->isp_dev))) 260 #endif 261 { 262 ISP_LOCK(isp); 263 xpt_bus_deregister(cam_sim_path(sim)); 264 cam_sim_free(sim, TRUE); 265 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 266 isp_prt(isp, ISP_LOGERR, "could not create kthread"); 267 return; 268 } 269 ISP_LOCK(isp); 270 /* 271 * We start by being "loop down" if we have an initiator role 272 */ 273 if (isp->isp_role & ISP_ROLE_INITIATOR) { 274 isp_freeze_loopdown(isp, "isp_attach"); 275 isp->isp_osinfo.ldt_running = 1; 276 callout_reset(&isp->isp_osinfo.ldt, 277 isp_quickboot_time * hz, isp_ldt, isp); 278 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 279 "Starting Initial Loop Down Timer"); 280 } 281 } 282 } 283 284 static void 285 isp_freeze_loopdown(ispsoftc_t *isp, char *msg) 286 { 287 if (isp->isp_osinfo.simqfrozen == 0) { 288 isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg); 289 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 290 xpt_freeze_simq(isp->isp_sim, 1); 291 } else { 292 isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg); 293 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 294 } 295 } 296 297 298 #if __FreeBSD_version < 500000 299 #define _DEV dev_t 300 #define _IOP struct proc 301 #else 302 #define _IOP struct thread 303 #define _DEV struct cdev * 304 #endif 305 306 static int 307 ispioctl(_DEV dev, u_long c, caddr_t addr, int flags, _IOP *td) 308 { 309 ispsoftc_t *isp; 310 int nr, retval = ENOTTY; 311 312 isp = isplist; 313 while (isp) { 314 if (dev2unit(dev) == device_get_unit(isp->isp_dev)) { 315 break; 316 } 317 isp = isp->isp_osinfo.next; 318 } 319 if (isp == NULL) { 320 return (ENXIO); 321 } 322 323 switch (c) { 324 #ifdef ISP_FW_CRASH_DUMP 325 case ISP_GET_FW_CRASH_DUMP: 326 if (IS_FC(isp)) { 327 uint16_t *ptr = FCPARAM(isp)->isp_dump_data; 328 size_t sz; 329 330 retval = 0; 331 if (IS_2200(isp)) { 332 sz = QLA2200_RISC_IMAGE_DUMP_SIZE; 333 } else { 334 sz = QLA2300_RISC_IMAGE_DUMP_SIZE; 335 } 336 if (ptr && *ptr) { 337 void *uaddr = *((void **) addr); 338 if (copyout(ptr, uaddr, sz)) { 339 retval = EFAULT; 340 } else { 341 *ptr = 0; 342 } 343 } else { 344 retval = ENXIO; 345 } 346 } 347 break; 348 case ISP_FORCE_CRASH_DUMP: 349 if (IS_FC(isp)) { 350 ISP_LOCK(isp); 351 isp_freeze_loopdown(isp, 352 "ispioctl(ISP_FORCE_CRASH_DUMP)"); 353 isp_fw_dump(isp); 354 isp_reinit(isp); 355 ISP_UNLOCK(isp); 356 retval = 0; 357 } 358 break; 359 #endif 360 case ISP_SDBLEV: 361 { 362 int olddblev = isp->isp_dblev; 363 isp->isp_dblev = *(int *)addr; 364 *(int *)addr = olddblev; 365 retval = 0; 366 break; 367 } 368 case ISP_GETROLE: 369 *(int *)addr = isp->isp_role; 370 retval = 0; 371 break; 372 case ISP_SETROLE: 373 nr = *(int *)addr; 374 if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) { 375 retval = EINVAL; 376 break; 377 } 378 *(int *)addr = isp->isp_role; 379 isp->isp_role = nr; 380 /* FALLTHROUGH */ 381 case ISP_RESETHBA: 382 ISP_LOCK(isp); 383 isp_reinit(isp); 384 ISP_UNLOCK(isp); 385 retval = 0; 386 break; 387 case ISP_RESCAN: 388 if (IS_FC(isp)) { 389 ISP_LOCK(isp); 390 if (isp_fc_runstate(isp, 5 * 1000000)) { 391 retval = EIO; 392 } else { 393 retval = 0; 394 } 395 ISP_UNLOCK(isp); 396 } 397 break; 398 case ISP_FC_LIP: 399 if (IS_FC(isp)) { 400 ISP_LOCK(isp); 401 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) { 402 retval = EIO; 403 } else { 404 retval = 0; 405 } 406 ISP_UNLOCK(isp); 407 } 408 break; 409 case ISP_FC_GETDINFO: 410 { 411 struct isp_fc_device *ifc = (struct isp_fc_device *) addr; 412 fcportdb_t *lp; 413 414 if (IS_SCSI(isp)) { 415 break; 416 } 417 if (ifc->loopid >= MAX_FC_TARG) { 418 retval = EINVAL; 419 break; 420 } 421 lp = &FCPARAM(isp)->portdb[ifc->loopid]; 422 if (lp->state == FC_PORTDB_STATE_VALID) { 423 ifc->role = lp->roles; 424 ifc->loopid = lp->handle; 425 ifc->portid = lp->portid; 426 ifc->node_wwn = lp->node_wwn; 427 ifc->port_wwn = lp->port_wwn; 428 retval = 0; 429 } else { 430 retval = ENODEV; 431 } 432 break; 433 } 434 case ISP_GET_STATS: 435 { 436 isp_stats_t *sp = (isp_stats_t *) addr; 437 438 MEMZERO(sp, sizeof (*sp)); 439 sp->isp_stat_version = ISP_STATS_VERSION; 440 sp->isp_type = isp->isp_type; 441 sp->isp_revision = isp->isp_revision; 442 ISP_LOCK(isp); 443 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt; 444 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus; 445 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc; 446 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync; 447 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt; 448 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt; 449 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater; 450 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater; 451 ISP_UNLOCK(isp); 452 retval = 0; 453 break; 454 } 455 case ISP_CLR_STATS: 456 ISP_LOCK(isp); 457 isp->isp_intcnt = 0; 458 isp->isp_intbogus = 0; 459 isp->isp_intmboxc = 0; 460 isp->isp_intoasync = 0; 461 isp->isp_rsltccmplt = 0; 462 isp->isp_fphccmplt = 0; 463 isp->isp_rscchiwater = 0; 464 isp->isp_fpcchiwater = 0; 465 ISP_UNLOCK(isp); 466 retval = 0; 467 break; 468 case ISP_FC_GETHINFO: 469 { 470 struct isp_hba_device *hba = (struct isp_hba_device *) addr; 471 MEMZERO(hba, sizeof (*hba)); 472 473 hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev); 474 hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev); 475 hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev); 476 if (IS_FC(isp)) { 477 hba->fc_speed = FCPARAM(isp)->isp_gbspeed; 478 hba->fc_scsi_supported = 1; 479 hba->fc_topology = FCPARAM(isp)->isp_topo + 1; 480 hba->fc_loopid = FCPARAM(isp)->isp_loopid; 481 hba->nvram_node_wwn = FCPARAM(isp)->isp_wwnn_nvram; 482 hba->nvram_port_wwn = FCPARAM(isp)->isp_wwpn_nvram; 483 hba->active_node_wwn = ISP_NODEWWN(isp); 484 hba->active_port_wwn = ISP_PORTWWN(isp); 485 } 486 retval = 0; 487 break; 488 } 489 case ISP_TSK_MGMT: 490 { 491 int needmarker; 492 struct isp_fc_tsk_mgmt *fct = (struct isp_fc_tsk_mgmt *) addr; 493 uint16_t loopid; 494 mbreg_t mbs; 495 496 if (IS_SCSI(isp)) { 497 break; 498 } 499 500 memset(&mbs, 0, sizeof (mbs)); 501 needmarker = retval = 0; 502 loopid = fct->loopid; 503 if (FCPARAM(isp)->isp_2klogin == 0) { 504 loopid <<= 8; 505 } 506 switch (fct->action) { 507 case IPT_CLEAR_ACA: 508 mbs.param[0] = MBOX_CLEAR_ACA; 509 mbs.param[1] = loopid; 510 mbs.param[2] = fct->lun; 511 break; 512 case IPT_TARGET_RESET: 513 mbs.param[0] = MBOX_TARGET_RESET; 514 mbs.param[1] = loopid; 515 needmarker = 1; 516 break; 517 case IPT_LUN_RESET: 518 mbs.param[0] = MBOX_LUN_RESET; 519 mbs.param[1] = loopid; 520 mbs.param[2] = fct->lun; 521 needmarker = 1; 522 break; 523 case IPT_CLEAR_TASK_SET: 524 mbs.param[0] = MBOX_CLEAR_TASK_SET; 525 mbs.param[1] = loopid; 526 mbs.param[2] = fct->lun; 527 needmarker = 1; 528 break; 529 case IPT_ABORT_TASK_SET: 530 mbs.param[0] = MBOX_ABORT_TASK_SET; 531 mbs.param[1] = loopid; 532 mbs.param[2] = fct->lun; 533 needmarker = 1; 534 break; 535 default: 536 retval = EINVAL; 537 break; 538 } 539 if (retval == 0) { 540 if (needmarker) { 541 isp->isp_sendmarker |= 1; 542 } 543 ISP_LOCK(isp); 544 retval = isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs); 545 ISP_UNLOCK(isp); 546 if (retval) 547 retval = EIO; 548 } 549 break; 550 } 551 default: 552 break; 553 } 554 return (retval); 555 } 556 557 #if __FreeBSD_version >= 500000 558 static void 559 isp_sysctl_update(ispsoftc_t *isp) 560 { 561 struct sysctl_ctx_list *ctx = 562 device_get_sysctl_ctx(isp->isp_osinfo.dev); 563 struct sysctl_oid *tree = device_get_sysctl_tree(isp->isp_osinfo.dev); 564 565 if (IS_SCSI(isp)) { 566 return; 567 } 568 569 snprintf(isp->isp_osinfo.sysctl_info.fc.wwnn, 570 sizeof (isp->isp_osinfo.sysctl_info.fc.wwnn), "0x%08x%08x", 571 (uint32_t) (ISP_NODEWWN(isp) >> 32), (uint32_t) ISP_NODEWWN(isp)); 572 573 snprintf(isp->isp_osinfo.sysctl_info.fc.wwpn, 574 sizeof (isp->isp_osinfo.sysctl_info.fc.wwpn), "0x%08x%08x", 575 (uint32_t) (ISP_PORTWWN(isp) >> 32), (uint32_t) ISP_PORTWWN(isp)); 576 577 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 578 "wwnn", CTLFLAG_RD, isp->isp_osinfo.sysctl_info.fc.wwnn, 0, 579 "World Wide Node Name"); 580 581 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 582 "wwpn", CTLFLAG_RD, isp->isp_osinfo.sysctl_info.fc.wwpn, 0, 583 "World Wide Port Name"); 584 585 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 586 "loop_down_limit", 587 CTLFLAG_RW, &isp->isp_osinfo.loop_down_limit, 0, 588 "How long to wait for loop to come back up"); 589 590 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 591 "gone_device_time", 592 CTLFLAG_RW, &isp->isp_osinfo.gone_device_time, 0, 593 "How long to wait for a device to reappear"); 594 } 595 #endif 596 597 static void 598 isp_intr_enable(void *arg) 599 { 600 ispsoftc_t *isp = arg; 601 ISP_LOCK(isp); 602 if (isp->isp_role != ISP_ROLE_NONE) { 603 ISP_ENABLE_INTS(isp); 604 } 605 ISP_UNLOCK(isp); 606 /* Release our hook so that the boot can continue. */ 607 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 608 } 609 610 /* 611 * Put the target mode functions here, because some are inlines 612 */ 613 614 #ifdef ISP_TARGET_MODE 615 616 static __inline int is_lun_enabled(ispsoftc_t *, int, lun_id_t); 617 static __inline int are_any_luns_enabled(ispsoftc_t *, int); 618 static __inline tstate_t *get_lun_statep(ispsoftc_t *, int, lun_id_t); 619 static __inline void rls_lun_statep(ispsoftc_t *, tstate_t *); 620 static __inline atio_private_data_t *isp_get_atpd(ispsoftc_t *, int); 621 static cam_status 622 create_lun_state(ispsoftc_t *, int, struct cam_path *, tstate_t **); 623 static void destroy_lun_state(ispsoftc_t *, tstate_t *); 624 static int isp_en_lun(ispsoftc_t *, union ccb *); 625 static void isp_ledone(ispsoftc_t *, lun_entry_t *); 626 static cam_status isp_abort_tgt_ccb(ispsoftc_t *, union ccb *); 627 static timeout_t isp_refire_putback_atio; 628 static void isp_complete_ctio(union ccb *); 629 static void isp_target_putback_atio(union ccb *); 630 static void isp_target_start_ctio(ispsoftc_t *, union ccb *); 631 static int isp_handle_platform_atio(ispsoftc_t *, at_entry_t *); 632 static int isp_handle_platform_atio2(ispsoftc_t *, at2_entry_t *); 633 static int isp_handle_platform_ctio(ispsoftc_t *, void *); 634 static int isp_handle_platform_notify_scsi(ispsoftc_t *, in_entry_t *); 635 static int isp_handle_platform_notify_fc(ispsoftc_t *, in_fcentry_t *); 636 637 static __inline int 638 is_lun_enabled(ispsoftc_t *isp, int bus, lun_id_t lun) 639 { 640 tstate_t *tptr; 641 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 642 if (tptr == NULL) { 643 return (0); 644 } 645 do { 646 if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) { 647 return (1); 648 } 649 } while ((tptr = tptr->next) != NULL); 650 return (0); 651 } 652 653 static __inline int 654 are_any_luns_enabled(ispsoftc_t *isp, int port) 655 { 656 int lo, hi; 657 if (IS_DUALBUS(isp)) { 658 lo = (port * (LUN_HASH_SIZE >> 1)); 659 hi = lo + (LUN_HASH_SIZE >> 1); 660 } else { 661 lo = 0; 662 hi = LUN_HASH_SIZE; 663 } 664 for (lo = 0; lo < hi; lo++) { 665 if (isp->isp_osinfo.lun_hash[lo]) { 666 return (1); 667 } 668 } 669 return (0); 670 } 671 672 static __inline tstate_t * 673 get_lun_statep(ispsoftc_t *isp, int bus, lun_id_t lun) 674 { 675 tstate_t *tptr = NULL; 676 677 if (lun == CAM_LUN_WILDCARD) { 678 if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) { 679 tptr = &isp->isp_osinfo.tsdflt[bus]; 680 tptr->hold++; 681 return (tptr); 682 } 683 return (NULL); 684 } else { 685 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 686 if (tptr == NULL) { 687 return (NULL); 688 } 689 } 690 691 do { 692 if (tptr->lun == lun && tptr->bus == bus) { 693 tptr->hold++; 694 return (tptr); 695 } 696 } while ((tptr = tptr->next) != NULL); 697 return (tptr); 698 } 699 700 static __inline void 701 rls_lun_statep(ispsoftc_t *isp, tstate_t *tptr) 702 { 703 if (tptr->hold) 704 tptr->hold--; 705 } 706 707 static __inline atio_private_data_t * 708 isp_get_atpd(ispsoftc_t *isp, int tag) 709 { 710 atio_private_data_t *atp; 711 for (atp = isp->isp_osinfo.atpdp; 712 atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) { 713 if (atp->tag == tag) 714 return (atp); 715 } 716 return (NULL); 717 } 718 719 static cam_status 720 create_lun_state(ispsoftc_t *isp, int bus, 721 struct cam_path *path, tstate_t **rslt) 722 { 723 cam_status status; 724 lun_id_t lun; 725 int hfx; 726 tstate_t *tptr, *new; 727 728 lun = xpt_path_lun_id(path); 729 if (lun >= ISP_MAX_LUNS(isp)) { 730 return (CAM_LUN_INVALID); 731 } 732 if (is_lun_enabled(isp, bus, lun)) { 733 return (CAM_LUN_ALRDY_ENA); 734 } 735 new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO); 736 if (new == NULL) { 737 return (CAM_RESRC_UNAVAIL); 738 } 739 740 status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path), 741 xpt_path_target_id(path), xpt_path_lun_id(path)); 742 if (status != CAM_REQ_CMP) { 743 free(new, M_DEVBUF); 744 return (status); 745 } 746 new->bus = bus; 747 new->lun = lun; 748 SLIST_INIT(&new->atios); 749 SLIST_INIT(&new->inots); 750 new->hold = 1; 751 752 hfx = LUN_HASH_FUNC(isp, new->bus, new->lun); 753 tptr = isp->isp_osinfo.lun_hash[hfx]; 754 if (tptr == NULL) { 755 isp->isp_osinfo.lun_hash[hfx] = new; 756 } else { 757 while (tptr->next) 758 tptr = tptr->next; 759 tptr->next = new; 760 } 761 *rslt = new; 762 return (CAM_REQ_CMP); 763 } 764 765 static __inline void 766 destroy_lun_state(ispsoftc_t *isp, tstate_t *tptr) 767 { 768 int hfx; 769 tstate_t *lw, *pw; 770 771 if (tptr->hold) { 772 return; 773 } 774 hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun); 775 pw = isp->isp_osinfo.lun_hash[hfx]; 776 if (pw == NULL) { 777 return; 778 } else if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 779 isp->isp_osinfo.lun_hash[hfx] = pw->next; 780 } else { 781 lw = pw; 782 pw = lw->next; 783 while (pw) { 784 if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 785 lw->next = pw->next; 786 break; 787 } 788 lw = pw; 789 pw = pw->next; 790 } 791 if (pw == NULL) { 792 return; 793 } 794 } 795 free(tptr, M_DEVBUF); 796 } 797 798 /* 799 * Enable luns. 800 */ 801 static int 802 isp_en_lun(ispsoftc_t *isp, union ccb *ccb) 803 { 804 struct ccb_en_lun *cel = &ccb->cel; 805 tstate_t *tptr = NULL; 806 uint32_t seq; 807 int bus, cmd, av, wildcard, tm_on; 808 lun_id_t lun; 809 target_id_t tgt; 810 811 bus = XS_CHANNEL(ccb); 812 if (bus > 1) { 813 xpt_print(ccb->ccb_h.path, "illegal bus %d\n", bus); 814 ccb->ccb_h.status = CAM_PATH_INVALID; 815 return (-1); 816 } 817 tgt = ccb->ccb_h.target_id; 818 lun = ccb->ccb_h.target_lun; 819 820 if (isp->isp_dblev & ISP_LOGTDEBUG0) { 821 xpt_print(ccb->ccb_h.path, "%sabling lun 0x%x on channel %d\n", 822 cel->enable? "en" : "dis", lun, bus); 823 } 824 825 if ((lun != CAM_LUN_WILDCARD) && 826 (lun >= (lun_id_t) isp->isp_maxluns)) { 827 ccb->ccb_h.status = CAM_LUN_INVALID; 828 return (-1); 829 } 830 831 if (IS_SCSI(isp)) { 832 sdparam *sdp = isp->isp_param; 833 sdp += bus; 834 if (tgt != CAM_TARGET_WILDCARD && 835 tgt != sdp->isp_initiator_id) { 836 ccb->ccb_h.status = CAM_TID_INVALID; 837 return (-1); 838 } 839 } else { 840 /* 841 * There's really no point in doing this yet w/o multi-tid 842 * capability. Even then, it's problematic. 843 */ 844 #if 0 845 if (tgt != CAM_TARGET_WILDCARD && 846 tgt != FCPARAM(isp)->isp_iid) { 847 ccb->ccb_h.status = CAM_TID_INVALID; 848 return (-1); 849 } 850 #endif 851 /* 852 * This is as a good a place as any to check f/w capabilities. 853 */ 854 if (FCPARAM(isp)->isp_tmode == 0) { 855 xpt_print(ccb->ccb_h.path, 856 "firmware does not support target mode\n"); 857 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 858 return (-1); 859 } 860 /* 861 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to 862 * XXX: dork with our already fragile enable/disable code. 863 */ 864 if (FCPARAM(isp)->isp_sccfw == 0) { 865 xpt_print(ccb->ccb_h.path, 866 "firmware not SCCLUN capable\n"); 867 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 868 return (-1); 869 } 870 } 871 872 if (tgt == CAM_TARGET_WILDCARD) { 873 if (lun == CAM_LUN_WILDCARD) { 874 wildcard = 1; 875 } else { 876 ccb->ccb_h.status = CAM_LUN_INVALID; 877 return (-1); 878 } 879 } else { 880 wildcard = 0; 881 } 882 883 tm_on = (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) != 0; 884 885 /* 886 * Next check to see whether this is a target/lun wildcard action. 887 * 888 * If so, we know that we can accept commands for luns that haven't 889 * been enabled yet and send them upstream. Otherwise, we have to 890 * handle them locally (if we see them at all). 891 */ 892 893 if (wildcard) { 894 tptr = &isp->isp_osinfo.tsdflt[bus]; 895 if (cel->enable) { 896 if (tm_on) { 897 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 898 return (-1); 899 } 900 ccb->ccb_h.status = 901 xpt_create_path(&tptr->owner, NULL, 902 xpt_path_path_id(ccb->ccb_h.path), 903 xpt_path_target_id(ccb->ccb_h.path), 904 xpt_path_lun_id(ccb->ccb_h.path)); 905 if (ccb->ccb_h.status != CAM_REQ_CMP) { 906 return (-1); 907 } 908 SLIST_INIT(&tptr->atios); 909 SLIST_INIT(&tptr->inots); 910 isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED; 911 } else { 912 if (tm_on == 0) { 913 ccb->ccb_h.status = CAM_REQ_CMP; 914 return (-1); 915 } 916 if (tptr->hold) { 917 ccb->ccb_h.status = CAM_SCSI_BUSY; 918 return (-1); 919 } 920 xpt_free_path(tptr->owner); 921 isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED; 922 } 923 } 924 925 /* 926 * Now check to see whether this bus needs to be 927 * enabled/disabled with respect to target mode. 928 */ 929 av = bus << 31; 930 if (cel->enable && tm_on == 0) { 931 av |= ENABLE_TARGET_FLAG; 932 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 933 if (av) { 934 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 935 if (wildcard) { 936 isp->isp_osinfo.tmflags[bus] &= 937 ~TM_WILDCARD_ENABLED; 938 xpt_free_path(tptr->owner); 939 } 940 return (-1); 941 } 942 isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED; 943 xpt_print(ccb->ccb_h.path, "Target Mode Enabled\n"); 944 } else if (cel->enable == 0 && tm_on && wildcard) { 945 if (are_any_luns_enabled(isp, bus)) { 946 ccb->ccb_h.status = CAM_SCSI_BUSY; 947 return (-1); 948 } 949 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 950 if (av) { 951 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 952 return (-1); 953 } 954 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; 955 xpt_print(ccb->ccb_h.path, "Target Mode Disabled\n"); 956 } 957 958 if (wildcard) { 959 ccb->ccb_h.status = CAM_REQ_CMP; 960 return (-1); 961 } 962 963 /* 964 * Find an empty slot 965 */ 966 for (seq = 0; seq < NLEACT; seq++) { 967 if (isp->isp_osinfo.leact[seq] == 0) { 968 break; 969 } 970 } 971 if (seq >= NLEACT) { 972 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 973 return (-1); 974 975 } 976 isp->isp_osinfo.leact[seq] = ccb; 977 978 if (cel->enable) { 979 ccb->ccb_h.status = 980 create_lun_state(isp, bus, ccb->ccb_h.path, &tptr); 981 if (ccb->ccb_h.status != CAM_REQ_CMP) { 982 isp->isp_osinfo.leact[seq] = 0; 983 return (-1); 984 } 985 } else { 986 tptr = get_lun_statep(isp, bus, lun); 987 if (tptr == NULL) { 988 ccb->ccb_h.status = CAM_LUN_INVALID; 989 return (-1); 990 } 991 } 992 993 if (cel->enable) { 994 int c, n, ulun = lun; 995 996 cmd = RQSTYPE_ENABLE_LUN; 997 c = DFLT_CMND_CNT; 998 n = DFLT_INOT_CNT; 999 if (IS_FC(isp) && lun != 0) { 1000 cmd = RQSTYPE_MODIFY_LUN; 1001 n = 0; 1002 /* 1003 * For SCC firmware, we only deal with setting 1004 * (enabling or modifying) lun 0. 1005 */ 1006 ulun = 0; 1007 } 1008 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) { 1009 rls_lun_statep(isp, tptr); 1010 ccb->ccb_h.status = CAM_REQ_INPROG; 1011 return (seq); 1012 } 1013 } else { 1014 int c, n, ulun = lun; 1015 1016 cmd = -RQSTYPE_MODIFY_LUN; 1017 c = DFLT_CMND_CNT; 1018 n = DFLT_INOT_CNT; 1019 if (IS_FC(isp) && lun != 0) { 1020 n = 0; 1021 /* 1022 * For SCC firmware, we only deal with setting 1023 * (enabling or modifying) lun 0. 1024 */ 1025 ulun = 0; 1026 } 1027 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) { 1028 rls_lun_statep(isp, tptr); 1029 ccb->ccb_h.status = CAM_REQ_INPROG; 1030 return (seq); 1031 } 1032 } 1033 rls_lun_statep(isp, tptr); 1034 xpt_print(ccb->ccb_h.path, "isp_lun_cmd failed\n"); 1035 isp->isp_osinfo.leact[seq] = 0; 1036 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1037 return (-1); 1038 } 1039 1040 static void 1041 isp_ledone(ispsoftc_t *isp, lun_entry_t *lep) 1042 { 1043 const char lfmt[] = "now %sabled for target mode\n"; 1044 union ccb *ccb; 1045 uint32_t seq; 1046 tstate_t *tptr; 1047 int av; 1048 struct ccb_en_lun *cel; 1049 1050 seq = lep->le_reserved - 1; 1051 if (seq >= NLEACT) { 1052 isp_prt(isp, ISP_LOGERR, 1053 "seq out of range (%u) in isp_ledone", seq); 1054 return; 1055 } 1056 ccb = isp->isp_osinfo.leact[seq]; 1057 if (ccb == 0) { 1058 isp_prt(isp, ISP_LOGERR, 1059 "no ccb for seq %u in isp_ledone", seq); 1060 return; 1061 } 1062 cel = &ccb->cel; 1063 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), XS_LUN(ccb)); 1064 if (tptr == NULL) { 1065 xpt_print(ccb->ccb_h.path, "null tptr in isp_ledone\n"); 1066 isp->isp_osinfo.leact[seq] = 0; 1067 return; 1068 } 1069 1070 if (lep->le_status != LUN_OK) { 1071 xpt_print(ccb->ccb_h.path, 1072 "ENABLE/MODIFY LUN returned 0x%x\n", lep->le_status); 1073 err: 1074 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1075 rls_lun_statep(isp, tptr); 1076 isp->isp_osinfo.leact[seq] = 0; 1077 xpt_done(ccb); 1078 return; 1079 } else { 1080 isp_prt(isp, ISP_LOGTDEBUG0, 1081 "isp_ledone: ENABLE/MODIFY done okay"); 1082 } 1083 1084 1085 if (cel->enable) { 1086 ccb->ccb_h.status = CAM_REQ_CMP; 1087 xpt_print(ccb->ccb_h.path, lfmt, "en"); 1088 rls_lun_statep(isp, tptr); 1089 isp->isp_osinfo.leact[seq] = 0; 1090 xpt_done(ccb); 1091 return; 1092 } 1093 1094 if (lep->le_header.rqs_entry_type == RQSTYPE_MODIFY_LUN) { 1095 if (isp_lun_cmd(isp, -RQSTYPE_ENABLE_LUN, XS_CHANNEL(ccb), 1096 XS_TGT(ccb), XS_LUN(ccb), 0, 0, seq+1)) { 1097 xpt_print(ccb->ccb_h.path, 1098 "isp_ledone: isp_lun_cmd failed\n"); 1099 goto err; 1100 } 1101 rls_lun_statep(isp, tptr); 1102 return; 1103 } 1104 1105 xpt_print(ccb->ccb_h.path, lfmt, "dis"); 1106 rls_lun_statep(isp, tptr); 1107 destroy_lun_state(isp, tptr); 1108 ccb->ccb_h.status = CAM_REQ_CMP; 1109 isp->isp_osinfo.leact[seq] = 0; 1110 xpt_done(ccb); 1111 if (are_any_luns_enabled(isp, XS_CHANNEL(ccb)) == 0) { 1112 int bus = XS_CHANNEL(ccb); 1113 av = bus << 31; 1114 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 1115 if (av) { 1116 isp_prt(isp, ISP_LOGWARN, 1117 "disable target mode on channel %d failed", bus); 1118 } 1119 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; 1120 } 1121 } 1122 1123 1124 static cam_status 1125 isp_abort_tgt_ccb(ispsoftc_t *isp, union ccb *ccb) 1126 { 1127 tstate_t *tptr; 1128 struct ccb_hdr_slist *lp; 1129 struct ccb_hdr *curelm; 1130 int found, *ctr; 1131 union ccb *accb = ccb->cab.abort_ccb; 1132 1133 xpt_print(ccb->ccb_h.path, "aborting ccb %p\n", accb); 1134 if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 1135 int badpath = 0; 1136 if (IS_FC(isp) && (accb->ccb_h.target_id != 1137 ((fcparam *) isp->isp_param)->isp_loopid)) { 1138 badpath = 1; 1139 } else if (IS_SCSI(isp) && (accb->ccb_h.target_id != 1140 ((sdparam *) isp->isp_param)->isp_initiator_id)) { 1141 badpath = 1; 1142 } 1143 if (badpath) { 1144 /* 1145 * Being restrictive about target ids is really about 1146 * making sure we're aborting for the right multi-tid 1147 * path. This doesn't really make much sense at present. 1148 */ 1149 #if 0 1150 return (CAM_PATH_INVALID); 1151 #endif 1152 } 1153 } 1154 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun); 1155 if (tptr == NULL) { 1156 xpt_print(ccb->ccb_h.path, "can't get statep\n"); 1157 return (CAM_PATH_INVALID); 1158 } 1159 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 1160 lp = &tptr->atios; 1161 ctr = &tptr->atio_count; 1162 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 1163 lp = &tptr->inots; 1164 ctr = &tptr->inot_count; 1165 } else { 1166 rls_lun_statep(isp, tptr); 1167 xpt_print(ccb->ccb_h.path, "bad function code %d\n", 1168 accb->ccb_h.func_code); 1169 return (CAM_UA_ABORT); 1170 } 1171 curelm = SLIST_FIRST(lp); 1172 found = 0; 1173 if (curelm == &accb->ccb_h) { 1174 found = 1; 1175 SLIST_REMOVE_HEAD(lp, sim_links.sle); 1176 } else { 1177 while(curelm != NULL) { 1178 struct ccb_hdr *nextelm; 1179 1180 nextelm = SLIST_NEXT(curelm, sim_links.sle); 1181 if (nextelm == &accb->ccb_h) { 1182 found = 1; 1183 SLIST_NEXT(curelm, sim_links.sle) = 1184 SLIST_NEXT(nextelm, sim_links.sle); 1185 break; 1186 } 1187 curelm = nextelm; 1188 } 1189 } 1190 rls_lun_statep(isp, tptr); 1191 if (found) { 1192 (*ctr)--; 1193 accb->ccb_h.status = CAM_REQ_ABORTED; 1194 xpt_done(accb); 1195 return (CAM_REQ_CMP); 1196 } 1197 xpt_print(ccb->ccb_h.path, "ccb %p not found\n", accb); 1198 return (CAM_PATH_INVALID); 1199 } 1200 1201 static void 1202 isp_target_start_ctio(ispsoftc_t *isp, union ccb *ccb) 1203 { 1204 void *qe; 1205 struct ccb_scsiio *cso = &ccb->csio; 1206 uint32_t nxti, optr, handle; 1207 uint8_t local[QENTRY_LEN]; 1208 1209 1210 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 1211 xpt_print(ccb->ccb_h.path, 1212 "Request Queue Overflow in isp_target_start_ctio\n"); 1213 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1214 goto out; 1215 } 1216 memset(local, 0, QENTRY_LEN); 1217 1218 /* 1219 * We're either moving data or completing a command here. 1220 */ 1221 1222 if (IS_FC(isp)) { 1223 atio_private_data_t *atp; 1224 ct2_entry_t *cto = (ct2_entry_t *) local; 1225 1226 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; 1227 cto->ct_header.rqs_entry_count = 1; 1228 if (FCPARAM(isp)->isp_2klogin) { 1229 ((ct2e_entry_t *)cto)->ct_iid = cso->init_id; 1230 } else { 1231 cto->ct_iid = cso->init_id; 1232 if (FCPARAM(isp)->isp_sccfw == 0) { 1233 cto->ct_lun = ccb->ccb_h.target_lun; 1234 } 1235 } 1236 1237 atp = isp_get_atpd(isp, cso->tag_id); 1238 if (atp == NULL) { 1239 xpt_print(ccb->ccb_h.path, 1240 "cannot find private data adjunct for tag %x\n", 1241 cso->tag_id); 1242 XS_SETERR(ccb, CAM_REQ_CMP_ERR); 1243 goto out; 1244 } 1245 1246 cto->ct_rxid = cso->tag_id; 1247 if (cso->dxfer_len == 0) { 1248 cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA; 1249 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1250 cto->ct_flags |= CT2_SENDSTATUS; 1251 cto->rsp.m1.ct_scsi_status = cso->scsi_status; 1252 cto->ct_resid = 1253 atp->orig_datalen - atp->bytes_xfered; 1254 if (cto->ct_resid < 0) { 1255 cto->rsp.m1.ct_scsi_status |= 1256 CT2_DATA_OVER; 1257 } else if (cto->ct_resid > 0) { 1258 cto->rsp.m1.ct_scsi_status |= 1259 CT2_DATA_UNDER; 1260 } 1261 } 1262 if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) { 1263 int m = min(cso->sense_len, MAXRESPLEN); 1264 memcpy(cto->rsp.m1.ct_resp, 1265 &cso->sense_data, m); 1266 cto->rsp.m1.ct_senselen = m; 1267 cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID; 1268 } 1269 } else { 1270 cto->ct_flags |= CT2_FLAG_MODE0; 1271 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1272 cto->ct_flags |= CT2_DATA_IN; 1273 } else { 1274 cto->ct_flags |= CT2_DATA_OUT; 1275 } 1276 cto->ct_reloff = atp->bytes_xfered; 1277 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 1278 cto->ct_flags |= CT2_SENDSTATUS; 1279 cto->rsp.m0.ct_scsi_status = cso->scsi_status; 1280 cto->ct_resid = 1281 atp->orig_datalen - 1282 (atp->bytes_xfered + cso->dxfer_len); 1283 if (cto->ct_resid < 0) { 1284 cto->rsp.m0.ct_scsi_status |= 1285 CT2_DATA_OVER; 1286 } else if (cto->ct_resid > 0) { 1287 cto->rsp.m0.ct_scsi_status |= 1288 CT2_DATA_UNDER; 1289 } 1290 } else { 1291 atp->last_xframt = cso->dxfer_len; 1292 } 1293 /* 1294 * If we're sending data and status back together, 1295 * we can't also send back sense data as well. 1296 */ 1297 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1298 } 1299 1300 if (cto->ct_flags & CT2_SENDSTATUS) { 1301 isp_prt(isp, ISP_LOGTDEBUG0, 1302 "CTIO2[%x] STATUS %x origd %u curd %u resid %u", 1303 cto->ct_rxid, cso->scsi_status, atp->orig_datalen, 1304 cso->dxfer_len, cto->ct_resid); 1305 cto->ct_flags |= CT2_CCINCR; 1306 atp->state = ATPD_STATE_LAST_CTIO; 1307 } else { 1308 atp->state = ATPD_STATE_CTIO; 1309 } 1310 cto->ct_timeout = 10; 1311 } else { 1312 ct_entry_t *cto = (ct_entry_t *) local; 1313 1314 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1315 cto->ct_header.rqs_entry_count = 1; 1316 cto->ct_iid = cso->init_id; 1317 cto->ct_iid |= XS_CHANNEL(ccb) << 7; 1318 cto->ct_tgt = ccb->ccb_h.target_id; 1319 cto->ct_lun = ccb->ccb_h.target_lun; 1320 cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id); 1321 if (AT_HAS_TAG(cso->tag_id)) { 1322 cto->ct_tag_val = (uint8_t) AT_GET_TAG(cso->tag_id); 1323 cto->ct_flags |= CT_TQAE; 1324 } 1325 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 1326 cto->ct_flags |= CT_NODISC; 1327 } 1328 if (cso->dxfer_len == 0) { 1329 cto->ct_flags |= CT_NO_DATA; 1330 } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1331 cto->ct_flags |= CT_DATA_IN; 1332 } else { 1333 cto->ct_flags |= CT_DATA_OUT; 1334 } 1335 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1336 cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR; 1337 cto->ct_scsi_status = cso->scsi_status; 1338 cto->ct_resid = cso->resid; 1339 isp_prt(isp, ISP_LOGTDEBUG0, 1340 "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x", 1341 cto->ct_fwhandle, cso->scsi_status, cso->resid, 1342 cso->tag_id); 1343 } 1344 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1345 cto->ct_timeout = 10; 1346 } 1347 1348 if (isp_save_xs_tgt(isp, ccb, &handle)) { 1349 xpt_print(ccb->ccb_h.path, 1350 "No XFLIST pointers for isp_target_start_ctio\n"); 1351 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1352 goto out; 1353 } 1354 1355 1356 /* 1357 * Call the dma setup routines for this entry (and any subsequent 1358 * CTIOs) if there's data to move, and then tell the f/w it's got 1359 * new things to play with. As with isp_start's usage of DMA setup, 1360 * any swizzling is done in the machine dependent layer. Because 1361 * of this, we put the request onto the queue area first in native 1362 * format. 1363 */ 1364 1365 if (IS_FC(isp)) { 1366 ct2_entry_t *cto = (ct2_entry_t *) local; 1367 cto->ct_syshandle = handle; 1368 } else { 1369 ct_entry_t *cto = (ct_entry_t *) local; 1370 cto->ct_syshandle = handle; 1371 } 1372 1373 switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) { 1374 case CMD_QUEUED: 1375 ISP_ADD_REQUEST(isp, nxti); 1376 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1377 return; 1378 1379 case CMD_EAGAIN: 1380 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1381 break; 1382 1383 default: 1384 break; 1385 } 1386 isp_destroy_tgt_handle(isp, handle); 1387 1388 out: 1389 xpt_done(ccb); 1390 } 1391 1392 static void 1393 isp_refire_putback_atio(void *arg) 1394 { 1395 int s = splcam(); 1396 isp_target_putback_atio(arg); 1397 splx(s); 1398 } 1399 1400 static void 1401 isp_target_putback_atio(union ccb *ccb) 1402 { 1403 ispsoftc_t *isp; 1404 struct ccb_scsiio *cso; 1405 uint32_t nxti, optr; 1406 void *qe; 1407 1408 isp = XS_ISP(ccb); 1409 1410 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 1411 xpt_print(ccb->ccb_h.path, 1412 "isp_target_putback_atio: Request Queue Overflow\n"); 1413 (void) timeout(isp_refire_putback_atio, ccb, 10); 1414 return; 1415 } 1416 memset(qe, 0, QENTRY_LEN); 1417 cso = &ccb->csio; 1418 if (IS_FC(isp)) { 1419 at2_entry_t local, *at = &local; 1420 MEMZERO(at, sizeof (at2_entry_t)); 1421 at->at_header.rqs_entry_type = RQSTYPE_ATIO2; 1422 at->at_header.rqs_entry_count = 1; 1423 if (FCPARAM(isp)->isp_sccfw) { 1424 at->at_scclun = (uint16_t) ccb->ccb_h.target_lun; 1425 } else { 1426 at->at_lun = (uint8_t) ccb->ccb_h.target_lun; 1427 } 1428 at->at_status = CT_OK; 1429 at->at_rxid = cso->tag_id; 1430 at->at_iid = cso->ccb_h.target_id; 1431 isp_put_atio2(isp, at, qe); 1432 } else { 1433 at_entry_t local, *at = &local; 1434 MEMZERO(at, sizeof (at_entry_t)); 1435 at->at_header.rqs_entry_type = RQSTYPE_ATIO; 1436 at->at_header.rqs_entry_count = 1; 1437 at->at_iid = cso->init_id; 1438 at->at_iid |= XS_CHANNEL(ccb) << 7; 1439 at->at_tgt = cso->ccb_h.target_id; 1440 at->at_lun = cso->ccb_h.target_lun; 1441 at->at_status = CT_OK; 1442 at->at_tag_val = AT_GET_TAG(cso->tag_id); 1443 at->at_handle = AT_GET_HANDLE(cso->tag_id); 1444 isp_put_atio(isp, at, qe); 1445 } 1446 ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe); 1447 ISP_ADD_REQUEST(isp, nxti); 1448 isp_complete_ctio(ccb); 1449 } 1450 1451 static void 1452 isp_complete_ctio(union ccb *ccb) 1453 { 1454 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1455 ccb->ccb_h.status |= CAM_REQ_CMP; 1456 } 1457 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1458 xpt_done(ccb); 1459 } 1460 1461 /* 1462 * Handle ATIO stuff that the generic code can't. 1463 * This means handling CDBs. 1464 */ 1465 1466 static int 1467 isp_handle_platform_atio(ispsoftc_t *isp, at_entry_t *aep) 1468 { 1469 tstate_t *tptr; 1470 int status, bus, iswildcard; 1471 struct ccb_accept_tio *atiop; 1472 1473 /* 1474 * The firmware status (except for the QLTM_SVALID bit) 1475 * indicates why this ATIO was sent to us. 1476 * 1477 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1478 * 1479 * If the DISCONNECTS DISABLED bit is set in the flags field, 1480 * we're still connected on the SCSI bus. 1481 */ 1482 status = aep->at_status; 1483 if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) { 1484 /* 1485 * Bus Phase Sequence error. We should have sense data 1486 * suggested by the f/w. I'm not sure quite yet what 1487 * to do about this for CAM. 1488 */ 1489 isp_prt(isp, ISP_LOGWARN, "PHASE ERROR"); 1490 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1491 return (0); 1492 } 1493 if ((status & ~QLTM_SVALID) != AT_CDB) { 1494 isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform", 1495 status); 1496 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1497 return (0); 1498 } 1499 1500 bus = GET_BUS_VAL(aep->at_iid); 1501 tptr = get_lun_statep(isp, bus, aep->at_lun); 1502 if (tptr == NULL) { 1503 tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD); 1504 if (tptr == NULL) { 1505 /* 1506 * Because we can't autofeed sense data back with 1507 * a command for parallel SCSI, we can't give back 1508 * a CHECK CONDITION. We'll give back a BUSY status 1509 * instead. This works out okay because the only 1510 * time we should, in fact, get this, is in the 1511 * case that somebody configured us without the 1512 * blackhole driver, so they get what they deserve. 1513 */ 1514 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1515 return (0); 1516 } 1517 iswildcard = 1; 1518 } else { 1519 iswildcard = 0; 1520 } 1521 1522 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1523 if (atiop == NULL) { 1524 /* 1525 * Because we can't autofeed sense data back with 1526 * a command for parallel SCSI, we can't give back 1527 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1528 * instead. This works out okay because the only time we 1529 * should, in fact, get this, is in the case that we've 1530 * run out of ATIOS. 1531 */ 1532 xpt_print(tptr->owner, 1533 "no ATIOS for lun %d from initiator %d on channel %d\n", 1534 aep->at_lun, GET_IID_VAL(aep->at_iid), bus); 1535 if (aep->at_flags & AT_TQAE) 1536 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1537 else 1538 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1539 rls_lun_statep(isp, tptr); 1540 return (0); 1541 } 1542 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1543 tptr->atio_count--; 1544 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d", 1545 aep->at_lun, tptr->atio_count); 1546 if (iswildcard) { 1547 atiop->ccb_h.target_id = aep->at_tgt; 1548 atiop->ccb_h.target_lun = aep->at_lun; 1549 } 1550 if (aep->at_flags & AT_NODISC) { 1551 atiop->ccb_h.flags = CAM_DIS_DISCONNECT; 1552 } else { 1553 atiop->ccb_h.flags = 0; 1554 } 1555 1556 if (status & QLTM_SVALID) { 1557 size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data)); 1558 atiop->sense_len = amt; 1559 MEMCPY(&atiop->sense_data, aep->at_sense, amt); 1560 } else { 1561 atiop->sense_len = 0; 1562 } 1563 1564 atiop->init_id = GET_IID_VAL(aep->at_iid); 1565 atiop->cdb_len = aep->at_cdblen; 1566 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen); 1567 atiop->ccb_h.status = CAM_CDB_RECVD; 1568 /* 1569 * Construct a tag 'id' based upon tag value (which may be 0..255) 1570 * and the handle (which we have to preserve). 1571 */ 1572 AT_MAKE_TAGID(atiop->tag_id, bus, device_get_unit(isp->isp_dev), aep); 1573 if (aep->at_flags & AT_TQAE) { 1574 atiop->tag_action = aep->at_tag_type; 1575 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; 1576 } 1577 xpt_done((union ccb*)atiop); 1578 isp_prt(isp, ISP_LOGTDEBUG0, 1579 "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s", 1580 aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid), 1581 GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff, 1582 aep->at_tag_type, (aep->at_flags & AT_NODISC)? 1583 "nondisc" : "disconnecting"); 1584 rls_lun_statep(isp, tptr); 1585 return (0); 1586 } 1587 1588 static int 1589 isp_handle_platform_atio2(ispsoftc_t *isp, at2_entry_t *aep) 1590 { 1591 lun_id_t lun; 1592 tstate_t *tptr; 1593 struct ccb_accept_tio *atiop; 1594 atio_private_data_t *atp; 1595 1596 /* 1597 * The firmware status (except for the QLTM_SVALID bit) 1598 * indicates why this ATIO was sent to us. 1599 * 1600 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1601 */ 1602 if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) { 1603 isp_prt(isp, ISP_LOGWARN, 1604 "bogus atio (0x%x) leaked to platform", aep->at_status); 1605 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1606 return (0); 1607 } 1608 1609 if (FCPARAM(isp)->isp_sccfw) { 1610 lun = aep->at_scclun; 1611 } else { 1612 lun = aep->at_lun; 1613 } 1614 tptr = get_lun_statep(isp, 0, lun); 1615 if (tptr == NULL) { 1616 isp_prt(isp, ISP_LOGTDEBUG0, 1617 "[0x%x] no state pointer for lun %d", aep->at_rxid, lun); 1618 tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD); 1619 if (tptr == NULL) { 1620 isp_endcmd(isp, aep, 1621 SCSI_STATUS_CHECK_COND | ECMD_SVALID | 1622 (0x5 << 12) | (0x25 << 16), 0); 1623 return (0); 1624 } 1625 } 1626 1627 atp = isp_get_atpd(isp, 0); 1628 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1629 if (atiop == NULL || atp == NULL) { 1630 1631 /* 1632 * Because we can't autofeed sense data back with 1633 * a command for parallel SCSI, we can't give back 1634 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1635 * instead. This works out okay because the only time we 1636 * should, in fact, get this, is in the case that we've 1637 * run out of ATIOS. 1638 */ 1639 xpt_print(tptr->owner, 1640 "no %s for lun %d from initiator %d\n", 1641 (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" : 1642 ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid); 1643 rls_lun_statep(isp, tptr); 1644 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1645 return (0); 1646 } 1647 atp->state = ATPD_STATE_ATIO; 1648 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1649 tptr->atio_count--; 1650 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d", 1651 lun, tptr->atio_count); 1652 1653 if (tptr == &isp->isp_osinfo.tsdflt[0]) { 1654 atiop->ccb_h.target_id = FCPARAM(isp)->isp_loopid; 1655 atiop->ccb_h.target_lun = lun; 1656 } 1657 /* 1658 * We don't get 'suggested' sense data as we do with SCSI cards. 1659 */ 1660 atiop->sense_len = 0; 1661 1662 atiop->init_id = aep->at_iid; 1663 atiop->cdb_len = ATIO2_CDBLEN; 1664 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN); 1665 atiop->ccb_h.status = CAM_CDB_RECVD; 1666 atiop->tag_id = aep->at_rxid; 1667 switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) { 1668 case ATIO2_TC_ATTR_SIMPLEQ: 1669 atiop->tag_action = MSG_SIMPLE_Q_TAG; 1670 break; 1671 case ATIO2_TC_ATTR_HEADOFQ: 1672 atiop->tag_action = MSG_HEAD_OF_Q_TAG; 1673 break; 1674 case ATIO2_TC_ATTR_ORDERED: 1675 atiop->tag_action = MSG_ORDERED_Q_TAG; 1676 break; 1677 case ATIO2_TC_ATTR_ACAQ: /* ?? */ 1678 case ATIO2_TC_ATTR_UNTAGGED: 1679 default: 1680 atiop->tag_action = 0; 1681 break; 1682 } 1683 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; 1684 1685 atp->tag = atiop->tag_id; 1686 atp->lun = lun; 1687 atp->orig_datalen = aep->at_datalen; 1688 atp->last_xframt = 0; 1689 atp->bytes_xfered = 0; 1690 atp->state = ATPD_STATE_CAM; 1691 xpt_done((union ccb*)atiop); 1692 1693 isp_prt(isp, ISP_LOGTDEBUG0, 1694 "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u", 1695 aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid, 1696 lun, aep->at_taskflags, aep->at_datalen); 1697 rls_lun_statep(isp, tptr); 1698 return (0); 1699 } 1700 1701 static int 1702 isp_handle_platform_ctio(ispsoftc_t *isp, void *arg) 1703 { 1704 union ccb *ccb; 1705 int sentstatus, ok, notify_cam, resid = 0; 1706 uint16_t tval; 1707 1708 /* 1709 * CTIO and CTIO2 are close enough.... 1710 */ 1711 1712 ccb = isp_find_xs_tgt(isp, ((ct_entry_t *)arg)->ct_syshandle); 1713 KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio")); 1714 isp_destroy_tgt_handle(isp, ((ct_entry_t *)arg)->ct_syshandle); 1715 1716 if (IS_FC(isp)) { 1717 ct2_entry_t *ct = arg; 1718 atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid); 1719 if (atp == NULL) { 1720 isp_prt(isp, ISP_LOGERR, 1721 "cannot find adjunct for %x after I/O", 1722 ct->ct_rxid); 1723 return (0); 1724 } 1725 sentstatus = ct->ct_flags & CT2_SENDSTATUS; 1726 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1727 if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) { 1728 ccb->ccb_h.status |= CAM_SENT_SENSE; 1729 } 1730 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1731 if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) { 1732 resid = ct->ct_resid; 1733 atp->bytes_xfered += (atp->last_xframt - resid); 1734 atp->last_xframt = 0; 1735 } 1736 if (sentstatus || !ok) { 1737 atp->tag = 0; 1738 } 1739 isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, 1740 "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s", 1741 ct->ct_rxid, ct->ct_status, ct->ct_flags, 1742 (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, 1743 resid, sentstatus? "FIN" : "MID"); 1744 tval = ct->ct_rxid; 1745 1746 /* XXX: should really come after isp_complete_ctio */ 1747 atp->state = ATPD_STATE_PDON; 1748 } else { 1749 ct_entry_t *ct = arg; 1750 sentstatus = ct->ct_flags & CT_SENDSTATUS; 1751 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1752 /* 1753 * We *ought* to be able to get back to the original ATIO 1754 * here, but for some reason this gets lost. It's just as 1755 * well because it's squirrelled away as part of periph 1756 * private data. 1757 * 1758 * We can live without it as long as we continue to use 1759 * the auto-replenish feature for CTIOs. 1760 */ 1761 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1762 if (ct->ct_status & QLTM_SVALID) { 1763 char *sp = (char *)ct; 1764 sp += CTIO_SENSE_OFFSET; 1765 ccb->csio.sense_len = 1766 min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN); 1767 MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len); 1768 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1769 } 1770 if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) { 1771 resid = ct->ct_resid; 1772 } 1773 isp_prt(isp, ISP_LOGTDEBUG0, 1774 "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s", 1775 ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun, 1776 ct->ct_status, ct->ct_flags, resid, 1777 sentstatus? "FIN" : "MID"); 1778 tval = ct->ct_fwhandle; 1779 } 1780 ccb->csio.resid += resid; 1781 1782 /* 1783 * We're here either because intermediate data transfers are done 1784 * and/or the final status CTIO (which may have joined with a 1785 * Data Transfer) is done. 1786 * 1787 * In any case, for this platform, the upper layers figure out 1788 * what to do next, so all we do here is collect status and 1789 * pass information along. Any DMA handles have already been 1790 * freed. 1791 */ 1792 if (notify_cam == 0) { 1793 isp_prt(isp, ISP_LOGTDEBUG0, " INTER CTIO[0x%x] done", tval); 1794 return (0); 1795 } 1796 1797 isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done", 1798 (sentstatus)? " FINAL " : "MIDTERM ", tval); 1799 1800 if (!ok) { 1801 isp_target_putback_atio(ccb); 1802 } else { 1803 isp_complete_ctio(ccb); 1804 1805 } 1806 return (0); 1807 } 1808 1809 static int 1810 isp_handle_platform_notify_scsi(ispsoftc_t *isp, in_entry_t *inp) 1811 { 1812 return (0); /* XXXX */ 1813 } 1814 1815 static int 1816 isp_handle_platform_notify_fc(ispsoftc_t *isp, in_fcentry_t *inp) 1817 { 1818 1819 switch (inp->in_status) { 1820 case IN_PORT_LOGOUT: 1821 isp_prt(isp, ISP_LOGWARN, "port logout of iid %d", 1822 inp->in_iid); 1823 break; 1824 case IN_PORT_CHANGED: 1825 isp_prt(isp, ISP_LOGWARN, "port changed for iid %d", 1826 inp->in_iid); 1827 break; 1828 case IN_GLOBAL_LOGO: 1829 isp_prt(isp, ISP_LOGINFO, "all ports logged out"); 1830 break; 1831 case IN_ABORT_TASK: 1832 { 1833 atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid); 1834 struct ccb_immed_notify *inot = NULL; 1835 1836 if (atp) { 1837 tstate_t *tptr = get_lun_statep(isp, 0, atp->lun); 1838 if (tptr) { 1839 inot = (struct ccb_immed_notify *) 1840 SLIST_FIRST(&tptr->inots); 1841 if (inot) { 1842 tptr->inot_count--; 1843 SLIST_REMOVE_HEAD(&tptr->inots, 1844 sim_links.sle); 1845 isp_prt(isp, ISP_LOGTDEBUG0, 1846 "Take FREE INOT count now %d", 1847 tptr->inot_count); 1848 } 1849 } 1850 isp_prt(isp, ISP_LOGWARN, 1851 "abort task RX_ID %x IID %d state %d", 1852 inp->in_seqid, inp->in_iid, atp->state); 1853 } else { 1854 isp_prt(isp, ISP_LOGWARN, 1855 "abort task RX_ID %x from iid %d, state unknown", 1856 inp->in_seqid, inp->in_iid); 1857 } 1858 if (inot) { 1859 inot->initiator_id = inp->in_iid; 1860 inot->sense_len = 0; 1861 inot->message_args[0] = MSG_ABORT_TAG; 1862 inot->message_args[1] = inp->in_seqid & 0xff; 1863 inot->message_args[2] = (inp->in_seqid >> 8) & 0xff; 1864 inot->ccb_h.status = CAM_MESSAGE_RECV; 1865 xpt_done((union ccb *)inot); 1866 } 1867 break; 1868 } 1869 default: 1870 break; 1871 } 1872 return (0); 1873 } 1874 #endif 1875 1876 static void 1877 isp_cam_async(void *cbarg, uint32_t code, struct cam_path *path, void *arg) 1878 { 1879 struct cam_sim *sim; 1880 ispsoftc_t *isp; 1881 1882 sim = (struct cam_sim *)cbarg; 1883 isp = (ispsoftc_t *) cam_sim_softc(sim); 1884 switch (code) { 1885 case AC_LOST_DEVICE: 1886 if (IS_SCSI(isp)) { 1887 uint16_t oflags, nflags; 1888 sdparam *sdp = isp->isp_param; 1889 int tgt; 1890 1891 tgt = xpt_path_target_id(path); 1892 if (tgt >= 0) { 1893 sdp += cam_sim_bus(sim); 1894 nflags = sdp->isp_devparam[tgt].nvrm_flags; 1895 #ifndef ISP_TARGET_MODE 1896 nflags &= DPARM_SAFE_DFLT; 1897 if (isp->isp_loaded_fw) { 1898 nflags |= DPARM_NARROW | DPARM_ASYNC; 1899 } 1900 #else 1901 nflags = DPARM_DEFAULT; 1902 #endif 1903 oflags = sdp->isp_devparam[tgt].goal_flags; 1904 sdp->isp_devparam[tgt].goal_flags = nflags; 1905 sdp->isp_devparam[tgt].dev_update = 1; 1906 isp->isp_update |= (1 << cam_sim_bus(sim)); 1907 (void) isp_control(isp, 1908 ISPCTL_UPDATE_PARAMS, NULL); 1909 sdp->isp_devparam[tgt].goal_flags = oflags; 1910 } 1911 } 1912 break; 1913 default: 1914 isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code); 1915 break; 1916 } 1917 } 1918 1919 static void 1920 isp_poll(struct cam_sim *sim) 1921 { 1922 ispsoftc_t *isp = cam_sim_softc(sim); 1923 uint32_t isr; 1924 uint16_t sema, mbox; 1925 1926 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 1927 isp_intr(isp, isr, sema, mbox); 1928 } 1929 } 1930 1931 1932 static int isp_watchdog_work(ispsoftc_t *, XS_T *); 1933 1934 static int 1935 isp_watchdog_work(ispsoftc_t *isp, XS_T *xs) 1936 { 1937 uint32_t handle; 1938 1939 /* 1940 * We've decided this command is dead. Make sure we're not trying 1941 * to kill a command that's already dead by getting it's handle and 1942 * and seeing whether it's still alive. 1943 */ 1944 handle = isp_find_handle(isp, xs); 1945 if (handle) { 1946 uint32_t isr; 1947 uint16_t sema, mbox; 1948 1949 if (XS_CMD_DONE_P(xs)) { 1950 isp_prt(isp, ISP_LOGDEBUG1, 1951 "watchdog found done cmd (handle 0x%x)", handle); 1952 return (1);; 1953 } 1954 1955 if (XS_CMD_WDOG_P(xs)) { 1956 isp_prt(isp, ISP_LOGDEBUG2, 1957 "recursive watchdog (handle 0x%x)", handle); 1958 return (1); 1959 } 1960 1961 XS_CMD_S_WDOG(xs); 1962 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 1963 isp_intr(isp, isr, sema, mbox); 1964 } 1965 if (XS_CMD_DONE_P(xs)) { 1966 isp_prt(isp, ISP_LOGDEBUG2, 1967 "watchdog cleanup for handle 0x%x", handle); 1968 isp_free_pcmd(isp, (union ccb *)xs); 1969 xpt_done((union ccb *) xs); 1970 } else if (XS_CMD_GRACE_P(xs)) { 1971 /* 1972 * Make sure the command is *really* dead before we 1973 * release the handle (and DMA resources) for reuse. 1974 */ 1975 (void) isp_control(isp, ISPCTL_ABORT_CMD, xs); 1976 1977 /* 1978 * After this point, the comamnd is really dead. 1979 */ 1980 if (XS_XFRLEN(xs)) { 1981 ISP_DMAFREE(isp, xs, handle); 1982 } 1983 isp_destroy_handle(isp, handle); 1984 xpt_print(xs->ccb_h.path, 1985 "watchdog timeout for handle 0x%x\n", handle); 1986 XS_SETERR(xs, CAM_CMD_TIMEOUT); 1987 XS_CMD_C_WDOG(xs); 1988 isp_done(xs); 1989 } else { 1990 XS_CMD_C_WDOG(xs); 1991 callout_reset(&PISP_PCMD((union ccb *)xs)->wdog, hz, 1992 isp_watchdog, xs); 1993 XS_CMD_S_GRACE(xs); 1994 isp->isp_sendmarker |= 1 << XS_CHANNEL(xs); 1995 } 1996 return (1); 1997 } 1998 return (0); 1999 } 2000 2001 static void 2002 isp_watchdog(void *arg) 2003 { 2004 ispsoftc_t *isp; 2005 XS_T *xs = arg; 2006 int r; 2007 2008 for (r = 0, isp = isplist; r && isp; isp = isp->isp_osinfo.next) { 2009 ISP_LOCK(isp); 2010 r = isp_watchdog_work(isp, xs); 2011 ISP_UNLOCK(isp); 2012 } 2013 if (isp == NULL) { 2014 printf("isp_watchdog: nobody had %p active\n", arg); 2015 } 2016 } 2017 2018 2019 #if __FreeBSD_version >= 600000 2020 static void 2021 isp_make_here(ispsoftc_t *isp, int tgt) 2022 { 2023 union ccb *ccb; 2024 /* 2025 * Allocate a CCB, create a wildcard path for this bus, 2026 * and schedule a rescan. 2027 */ 2028 ccb = xpt_alloc_ccb_nowait(); 2029 if (ccb == NULL) { 2030 isp_prt(isp, ISP_LOGWARN, "unable to alloc CCB for rescan"); 2031 return; 2032 } 2033 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, 2034 cam_sim_path(isp->isp_sim), tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2035 isp_prt(isp, ISP_LOGWARN, "unable to create path for rescan"); 2036 xpt_free_ccb(ccb); 2037 return; 2038 } 2039 xpt_rescan(ccb); 2040 } 2041 2042 static void 2043 isp_make_gone(ispsoftc_t *isp, int tgt) 2044 { 2045 struct cam_path *tp; 2046 if (xpt_create_path(&tp, NULL, cam_sim_path(isp->isp_sim), tgt, 2047 CAM_LUN_WILDCARD) == CAM_REQ_CMP) { 2048 xpt_async(AC_LOST_DEVICE, tp, NULL); 2049 xpt_free_path(tp); 2050 } 2051 } 2052 #else 2053 #define isp_make_here(isp, tgt) do { ; } while (0) 2054 #define isp_make_gone(isp, tgt) do { ; } while (0) 2055 #endif 2056 2057 2058 /* 2059 * Gone Device Timer Function- when we have decided that a device has gone 2060 * away, we wait a specific period of time prior to telling the OS it has 2061 * gone away. 2062 * 2063 * This timer function fires once a second and then scans the port database 2064 * for devices that are marked dead but still have a virtual target assigned. 2065 * We decrement a counter for that port database entry, and when it hits zero, 2066 * we tell the OS the device has gone away. 2067 */ 2068 static void 2069 isp_gdt(void *arg) 2070 { 2071 ispsoftc_t *isp = arg; 2072 fcportdb_t *lp; 2073 int dbidx, tgt, more_to_do = 0; 2074 2075 ISP_LOCK(isp); 2076 isp_prt(isp, ISP_LOGDEBUG0, "GDT timer expired"); 2077 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { 2078 lp = &FCPARAM(isp)->portdb[dbidx]; 2079 2080 if (lp->state != FC_PORTDB_STATE_ZOMBIE) { 2081 continue; 2082 } 2083 if (lp->ini_map_idx == 0) { 2084 continue; 2085 } 2086 if (lp->new_reserved == 0) { 2087 continue; 2088 } 2089 lp->new_reserved -= 1; 2090 if (lp->new_reserved != 0) { 2091 more_to_do++; 2092 continue; 2093 } 2094 tgt = lp->ini_map_idx - 1; 2095 FCPARAM(isp)->isp_ini_map[tgt] = 0; 2096 lp->ini_map_idx = 0; 2097 lp->state = FC_PORTDB_STATE_NIL; 2098 isp_prt(isp, ISP_LOGCONFIG, prom3, lp->portid, tgt, 2099 "Gone Device Timeout"); 2100 isp_make_gone(isp, tgt); 2101 } 2102 if (more_to_do) { 2103 isp->isp_osinfo.gdt_running = 1; 2104 callout_reset(&isp->isp_osinfo.gdt, hz, isp_gdt, isp); 2105 } else { 2106 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2107 "stopping Gone Device Timer"); 2108 isp->isp_osinfo.gdt_running = 0; 2109 } 2110 ISP_UNLOCK(isp); 2111 } 2112 2113 /* 2114 * Loop Down Timer Function- when loop goes down, a timer is started and 2115 * and after it expires we come here and take all probational devices that 2116 * the OS knows about and the tell the OS that they've gone away. 2117 * 2118 * We don't clear the devices out of our port database because, when loop 2119 * come back up, we have to do some actual cleanup with the chip at that 2120 * point (implicit PLOGO, e.g., to get the chip's port database state right). 2121 */ 2122 static void 2123 isp_ldt(void *arg) 2124 { 2125 ispsoftc_t *isp = arg; 2126 fcportdb_t *lp; 2127 int dbidx, tgt; 2128 2129 ISP_LOCK(isp); 2130 2131 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Loop Down Timer expired"); 2132 2133 /* 2134 * Notify to the OS all targets who we now consider have departed. 2135 */ 2136 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { 2137 lp = &FCPARAM(isp)->portdb[dbidx]; 2138 2139 if (lp->state != FC_PORTDB_STATE_PROBATIONAL) { 2140 continue; 2141 } 2142 if (lp->ini_map_idx == 0) { 2143 continue; 2144 } 2145 2146 /* 2147 * XXX: CLEAN UP AND COMPLETE ANY PENDING COMMANDS FIRST! 2148 */ 2149 2150 /* 2151 * Mark that we've announced that this device is gone.... 2152 */ 2153 lp->reserved = 1; 2154 2155 /* 2156 * but *don't* change the state of the entry. Just clear 2157 * any target id stuff and announce to CAM that the 2158 * device is gone. This way any necessary PLOGO stuff 2159 * will happen when loop comes back up. 2160 */ 2161 2162 tgt = lp->ini_map_idx - 1; 2163 FCPARAM(isp)->isp_ini_map[tgt] = 0; 2164 lp->ini_map_idx = 0; 2165 isp_prt(isp, ISP_LOGCONFIG, prom3, lp->portid, tgt, 2166 "Loop Down Timeout"); 2167 isp_make_gone(isp, tgt); 2168 } 2169 2170 /* 2171 * The loop down timer has expired. Wake up the kthread 2172 * to notice that fact (or make it false). 2173 */ 2174 isp->isp_osinfo.loop_down_time = isp->isp_osinfo.loop_down_limit+1; 2175 wakeup(ISP_KT_WCHAN(isp)); 2176 ISP_UNLOCK(isp); 2177 } 2178 2179 static void 2180 isp_kthread(void *arg) 2181 { 2182 ispsoftc_t *isp = arg; 2183 int slp = 0; 2184 #if __FreeBSD_version < 500000 2185 int s = splcam(); 2186 #elif __FreeBSD_version < 700037 2187 mtx_lock(&Giant); 2188 #else 2189 mtx_lock(&isp->isp_osinfo.lock); 2190 #endif 2191 /* 2192 * The first loop is for our usage where we have yet to have 2193 * gotten good fibre channel state. 2194 */ 2195 for (;;) { 2196 int wasfrozen, lb, lim; 2197 2198 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2199 "isp_kthread: checking FC state"); 2200 isp->isp_osinfo.mbox_sleep_ok = 1; 2201 lb = isp_fc_runstate(isp, 250000); 2202 isp->isp_osinfo.mbox_sleep_ok = 0; 2203 if (lb) { 2204 /* 2205 * Increment loop down time by the last sleep interval 2206 */ 2207 isp->isp_osinfo.loop_down_time += slp; 2208 2209 if (lb < 0) { 2210 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2211 "kthread: FC loop not up (down count %d)", 2212 isp->isp_osinfo.loop_down_time); 2213 } else { 2214 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2215 "kthread: FC got to %d (down count %d)", 2216 lb, isp->isp_osinfo.loop_down_time); 2217 } 2218 2219 2220 /* 2221 * If we've never seen loop up and we've waited longer 2222 * than quickboot time, or we've seen loop up but we've 2223 * waited longer than loop_down_limit, give up and go 2224 * to sleep until loop comes up. 2225 */ 2226 if (FCPARAM(isp)->loop_seen_once == 0) { 2227 lim = isp_quickboot_time; 2228 } else { 2229 lim = isp->isp_osinfo.loop_down_limit; 2230 } 2231 if (isp->isp_osinfo.loop_down_time >= lim) { 2232 isp_freeze_loopdown(isp, "loop limit hit"); 2233 slp = 0; 2234 } else if (isp->isp_osinfo.loop_down_time < 10) { 2235 slp = 1; 2236 } else if (isp->isp_osinfo.loop_down_time < 30) { 2237 slp = 5; 2238 } else if (isp->isp_osinfo.loop_down_time < 60) { 2239 slp = 10; 2240 } else if (isp->isp_osinfo.loop_down_time < 120) { 2241 slp = 20; 2242 } else { 2243 slp = 30; 2244 } 2245 2246 } else { 2247 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2248 "isp_kthread: FC state OK"); 2249 isp->isp_osinfo.loop_down_time = 0; 2250 slp = 0; 2251 } 2252 2253 /* 2254 * If we'd frozen the simq, unfreeze it now so that CAM 2255 * can start sending us commands. If the FC state isn't 2256 * okay yet, they'll hit that in isp_start which will 2257 * freeze the queue again. 2258 */ 2259 wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN; 2260 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN; 2261 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { 2262 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2263 "isp_kthread: releasing simq"); 2264 xpt_release_simq(isp->isp_sim, 1); 2265 } 2266 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2267 "isp_kthread: sleep time %d", slp); 2268 #if __FreeBSD_version < 700037 2269 tsleep(ISP_KT_WCHAN(isp), PRIBIO, "ispf", slp * hz); 2270 #else 2271 msleep(ISP_KT_WCHAN(isp), &isp->isp_osinfo.lock, 2272 PRIBIO, "ispf", slp * hz); 2273 #endif 2274 /* 2275 * If slp is zero, we're waking up for the first time after 2276 * things have been okay. In this case, we set a deferral state 2277 * for all commands and delay hysteresis seconds before starting 2278 * the FC state evaluation. This gives the loop/fabric a chance 2279 * to settle. 2280 */ 2281 if (slp == 0 && isp->isp_osinfo.hysteresis) { 2282 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 2283 "isp_kthread: sleep hysteresis tick time %d", 2284 isp->isp_osinfo.hysteresis * hz); 2285 #if __FreeBSD_version < 700037 2286 (void) tsleep(&isp_fabric_hysteresis, PRIBIO, "ispT", 2287 (isp->isp_osinfo.hysteresis * hz)); 2288 #else 2289 (void) msleep(&isp_fabric_hysteresis, 2290 &isp->isp_osinfo.lock, PRIBIO, "ispT", 2291 (isp->isp_osinfo.hysteresis * hz)); 2292 #endif 2293 } 2294 } 2295 #if __FreeBSD_version < 500000 2296 splx(s); 2297 #elif __FreeBSD_version < 700037 2298 mtx_unlock(&Giant); 2299 #else 2300 mtx_unlock(&isp->isp_osinfo.lock); 2301 #endif 2302 } 2303 2304 #if __FreeBSD_version < 500000 2305 static void isp_action_wrk(struct cam_sim *, union ccb *); 2306 static void 2307 isp_action(struct cam_sim *sim, union ccb *ccb) 2308 { 2309 ispsoftc_t *isp = (ispsoftc_t *)cam_sim_softc(sim); 2310 ISP_LOCK(isp); 2311 isp_action_wrk(sim, ccb); 2312 ISP_UNLOCK(isp); 2313 } 2314 #define isp_action isp_action_wrk 2315 #endif 2316 2317 static void 2318 isp_action(struct cam_sim *sim, union ccb *ccb) 2319 { 2320 int bus, tgt, ts, error, lim; 2321 ispsoftc_t *isp; 2322 struct ccb_trans_settings *cts; 2323 2324 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n")); 2325 2326 isp = (ispsoftc_t *)cam_sim_softc(sim); 2327 if (isp->isp_state != ISP_RUNSTATE && 2328 ccb->ccb_h.func_code == XPT_SCSI_IO) { 2329 isp_init(isp); 2330 if (isp->isp_state != ISP_INITSTATE) { 2331 /* 2332 * Lie. Say it was a selection timeout. 2333 */ 2334 ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN; 2335 xpt_freeze_devq(ccb->ccb_h.path, 1); 2336 xpt_done(ccb); 2337 return; 2338 } 2339 isp->isp_state = ISP_RUNSTATE; 2340 } 2341 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code); 2342 ISP_PCMD(ccb) = NULL; 2343 2344 switch (ccb->ccb_h.func_code) { 2345 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 2346 /* 2347 * Do a couple of preliminary checks... 2348 */ 2349 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 2350 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 2351 ccb->ccb_h.status = CAM_REQ_INVALID; 2352 xpt_done(ccb); 2353 break; 2354 } 2355 } 2356 #ifdef DIAGNOSTIC 2357 if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) { 2358 xpt_print(ccb->ccb_h.path, "invalid target\n"); 2359 ccb->ccb_h.status = CAM_PATH_INVALID; 2360 } else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) { 2361 xpt_print(ccb->ccb_h.path, "invalid lun\n"); 2362 ccb->ccb_h.status = CAM_PATH_INVALID; 2363 } 2364 if (ccb->ccb_h.status == CAM_PATH_INVALID) { 2365 xpt_done(ccb); 2366 break; 2367 } 2368 #endif 2369 ccb->csio.scsi_status = SCSI_STATUS_OK; 2370 if (isp_get_pcmd(isp, ccb)) { 2371 isp_prt(isp, ISP_LOGWARN, "out of PCMDs"); 2372 cam_freeze_devq(ccb->ccb_h.path); 2373 cam_release_devq(ccb->ccb_h.path, 2374 RELSIM_RELEASE_AFTER_TIMEOUT, 0, 250, 0); 2375 xpt_done(ccb); 2376 break; 2377 } 2378 error = isp_start((XS_T *) ccb); 2379 switch (error) { 2380 case CMD_QUEUED: 2381 XS_CMD_S_CLEAR(ccb); 2382 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2383 if (ccb->ccb_h.timeout == CAM_TIME_INFINITY) { 2384 break; 2385 } 2386 ts = ccb->ccb_h.timeout; 2387 if (ts == CAM_TIME_DEFAULT) { 2388 ts = 60*1000; 2389 } 2390 ts = isp_mstohz(ts); 2391 callout_reset(&PISP_PCMD(ccb)->wdog, ts, 2392 isp_watchdog, ccb); 2393 break; 2394 case CMD_RQLATER: 2395 /* 2396 * Handle initial and subsequent loop down cases 2397 */ 2398 if (FCPARAM(isp)->loop_seen_once == 0) { 2399 lim = isp_quickboot_time; 2400 } else { 2401 lim = isp->isp_osinfo.loop_down_limit; 2402 } 2403 if (isp->isp_osinfo.loop_down_time >= lim) { 2404 isp_prt(isp, ISP_LOGDEBUG0, 2405 "%d.%d downtime (%d) > lim (%d)", 2406 XS_TGT(ccb), XS_LUN(ccb), 2407 isp->isp_osinfo.loop_down_time, lim); 2408 ccb->ccb_h.status = 2409 CAM_SEL_TIMEOUT|CAM_DEV_QFRZN; 2410 xpt_freeze_devq(ccb->ccb_h.path, 1); 2411 isp_free_pcmd(isp, ccb); 2412 xpt_done(ccb); 2413 break; 2414 } 2415 isp_prt(isp, ISP_LOGDEBUG0, 2416 "%d.%d retry later", XS_TGT(ccb), XS_LUN(ccb)); 2417 /* 2418 * Otherwise, retry in a while. 2419 */ 2420 cam_freeze_devq(ccb->ccb_h.path); 2421 cam_release_devq(ccb->ccb_h.path, 2422 RELSIM_RELEASE_AFTER_TIMEOUT, 0, 1000, 0); 2423 XS_SETERR(ccb, CAM_REQUEUE_REQ); 2424 isp_free_pcmd(isp, ccb); 2425 xpt_done(ccb); 2426 break; 2427 case CMD_EAGAIN: 2428 XS_SETERR(ccb, CAM_REQUEUE_REQ); 2429 isp_free_pcmd(isp, ccb); 2430 xpt_done(ccb); 2431 break; 2432 case CMD_COMPLETE: 2433 isp_done((struct ccb_scsiio *) ccb); 2434 break; 2435 default: 2436 isp_prt(isp, ISP_LOGERR, 2437 "What's this? 0x%x at %d in file %s", 2438 error, __LINE__, __FILE__); 2439 XS_SETERR(ccb, CAM_REQ_CMP_ERR); 2440 isp_free_pcmd(isp, ccb); 2441 xpt_done(ccb); 2442 } 2443 break; 2444 2445 #ifdef ISP_TARGET_MODE 2446 case XPT_EN_LUN: /* Enable LUN as a target */ 2447 { 2448 int seq, i; 2449 seq = isp_en_lun(isp, ccb); 2450 if (seq < 0) { 2451 xpt_done(ccb); 2452 break; 2453 } 2454 for (i = 0; isp->isp_osinfo.leact[seq] && i < 30 * 1000; i++) { 2455 uint32_t isr; 2456 uint16_t sema, mbox; 2457 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 2458 isp_intr(isp, isr, sema, mbox); 2459 } 2460 DELAY(1000); 2461 } 2462 break; 2463 } 2464 case XPT_NOTIFY_ACK: /* recycle notify ack */ 2465 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ 2466 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 2467 { 2468 tstate_t *tptr = 2469 get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun); 2470 if (tptr == NULL) { 2471 ccb->ccb_h.status = CAM_LUN_INVALID; 2472 xpt_done(ccb); 2473 break; 2474 } 2475 ccb->ccb_h.sim_priv.entries[0].field = 0; 2476 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 2477 ccb->ccb_h.flags = 0; 2478 2479 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 2480 /* 2481 * Note that the command itself may not be done- 2482 * it may not even have had the first CTIO sent. 2483 */ 2484 tptr->atio_count++; 2485 isp_prt(isp, ISP_LOGTDEBUG0, 2486 "Put FREE ATIO, lun %d, count now %d", 2487 ccb->ccb_h.target_lun, tptr->atio_count); 2488 SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h, 2489 sim_links.sle); 2490 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 2491 tptr->inot_count++; 2492 isp_prt(isp, ISP_LOGTDEBUG0, 2493 "Put FREE INOT, lun %d, count now %d", 2494 ccb->ccb_h.target_lun, tptr->inot_count); 2495 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, 2496 sim_links.sle); 2497 } else { 2498 isp_prt(isp, ISP_LOGWARN, "Got Notify ACK");; 2499 } 2500 rls_lun_statep(isp, tptr); 2501 ccb->ccb_h.status = CAM_REQ_INPROG; 2502 break; 2503 } 2504 case XPT_CONT_TARGET_IO: 2505 { 2506 isp_target_start_ctio(isp, ccb); 2507 break; 2508 } 2509 #endif 2510 case XPT_RESET_DEV: /* BDR the specified SCSI device */ 2511 2512 bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); 2513 tgt = ccb->ccb_h.target_id; 2514 tgt |= (bus << 16); 2515 2516 error = isp_control(isp, ISPCTL_RESET_DEV, &tgt); 2517 if (error) { 2518 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2519 } else { 2520 ccb->ccb_h.status = CAM_REQ_CMP; 2521 } 2522 xpt_done(ccb); 2523 break; 2524 case XPT_ABORT: /* Abort the specified CCB */ 2525 { 2526 union ccb *accb = ccb->cab.abort_ccb; 2527 switch (accb->ccb_h.func_code) { 2528 #ifdef ISP_TARGET_MODE 2529 case XPT_ACCEPT_TARGET_IO: 2530 case XPT_IMMED_NOTIFY: 2531 ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb); 2532 break; 2533 case XPT_CONT_TARGET_IO: 2534 isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet"); 2535 ccb->ccb_h.status = CAM_UA_ABORT; 2536 break; 2537 #endif 2538 case XPT_SCSI_IO: 2539 error = isp_control(isp, ISPCTL_ABORT_CMD, ccb); 2540 if (error) { 2541 ccb->ccb_h.status = CAM_UA_ABORT; 2542 } else { 2543 ccb->ccb_h.status = CAM_REQ_CMP; 2544 } 2545 break; 2546 default: 2547 ccb->ccb_h.status = CAM_REQ_INVALID; 2548 break; 2549 } 2550 xpt_done(ccb); 2551 break; 2552 } 2553 #ifdef CAM_NEW_TRAN_CODE 2554 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) 2555 #else 2556 #define IS_CURRENT_SETTINGS(c) (c->flags & CCB_TRANS_CURRENT_SETTINGS) 2557 #endif 2558 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 2559 cts = &ccb->cts; 2560 if (!IS_CURRENT_SETTINGS(cts)) { 2561 ccb->ccb_h.status = CAM_REQ_INVALID; 2562 xpt_done(ccb); 2563 break; 2564 } 2565 tgt = cts->ccb_h.target_id; 2566 if (IS_SCSI(isp)) { 2567 #ifndef CAM_NEW_TRAN_CODE 2568 sdparam *sdp = isp->isp_param; 2569 uint16_t *dptr; 2570 2571 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2572 2573 sdp += bus; 2574 /* 2575 * We always update (internally) from goal_flags 2576 * so any request to change settings just gets 2577 * vectored to that location. 2578 */ 2579 dptr = &sdp->isp_devparam[tgt].goal_flags; 2580 2581 /* 2582 * Note that these operations affect the 2583 * the goal flags (goal_flags)- not 2584 * the current state flags. Then we mark 2585 * things so that the next operation to 2586 * this HBA will cause the update to occur. 2587 */ 2588 if (cts->valid & CCB_TRANS_DISC_VALID) { 2589 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) { 2590 *dptr |= DPARM_DISC; 2591 } else { 2592 *dptr &= ~DPARM_DISC; 2593 } 2594 } 2595 if (cts->valid & CCB_TRANS_TQ_VALID) { 2596 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) { 2597 *dptr |= DPARM_TQING; 2598 } else { 2599 *dptr &= ~DPARM_TQING; 2600 } 2601 } 2602 if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) { 2603 switch (cts->bus_width) { 2604 case MSG_EXT_WDTR_BUS_16_BIT: 2605 *dptr |= DPARM_WIDE; 2606 break; 2607 default: 2608 *dptr &= ~DPARM_WIDE; 2609 } 2610 } 2611 /* 2612 * Any SYNC RATE of nonzero and SYNC_OFFSET 2613 * of nonzero will cause us to go to the 2614 * selected (from NVRAM) maximum value for 2615 * this device. At a later point, we'll 2616 * allow finer control. 2617 */ 2618 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && 2619 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) && 2620 (cts->sync_offset > 0)) { 2621 *dptr |= DPARM_SYNC; 2622 } else { 2623 *dptr &= ~DPARM_SYNC; 2624 } 2625 *dptr |= DPARM_SAFE_DFLT; 2626 #else 2627 struct ccb_trans_settings_scsi *scsi = 2628 &cts->proto_specific.scsi; 2629 struct ccb_trans_settings_spi *spi = 2630 &cts->xport_specific.spi; 2631 sdparam *sdp = isp->isp_param; 2632 uint16_t *dptr; 2633 2634 if (spi->valid == 0 && scsi->valid == 0) { 2635 ccb->ccb_h.status = CAM_REQ_CMP; 2636 xpt_done(ccb); 2637 break; 2638 } 2639 2640 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2641 sdp += bus; 2642 /* 2643 * We always update (internally) from goal_flags 2644 * so any request to change settings just gets 2645 * vectored to that location. 2646 */ 2647 dptr = &sdp->isp_devparam[tgt].goal_flags; 2648 2649 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 2650 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 2651 *dptr |= DPARM_DISC; 2652 else 2653 *dptr &= ~DPARM_DISC; 2654 } 2655 2656 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 2657 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 2658 *dptr |= DPARM_TQING; 2659 else 2660 *dptr &= ~DPARM_TQING; 2661 } 2662 2663 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 2664 if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) 2665 *dptr |= DPARM_WIDE; 2666 else 2667 *dptr &= ~DPARM_WIDE; 2668 } 2669 2670 /* 2671 * XXX: FIX ME 2672 */ 2673 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) && 2674 (spi->valid & CTS_SPI_VALID_SYNC_RATE) && 2675 (spi->sync_period && spi->sync_offset)) { 2676 *dptr |= DPARM_SYNC; 2677 /* 2678 * XXX: CHECK FOR LEGALITY 2679 */ 2680 sdp->isp_devparam[tgt].goal_period = 2681 spi->sync_period; 2682 sdp->isp_devparam[tgt].goal_offset = 2683 spi->sync_offset; 2684 } else { 2685 *dptr &= ~DPARM_SYNC; 2686 } 2687 #endif 2688 isp_prt(isp, ISP_LOGDEBUG0, 2689 "SET (%d.%d.%d) to flags %x off %x per %x", 2690 bus, tgt, cts->ccb_h.target_lun, 2691 sdp->isp_devparam[tgt].goal_flags, 2692 sdp->isp_devparam[tgt].goal_offset, 2693 sdp->isp_devparam[tgt].goal_period); 2694 sdp->isp_devparam[tgt].dev_update = 1; 2695 isp->isp_update |= (1 << bus); 2696 } 2697 ccb->ccb_h.status = CAM_REQ_CMP; 2698 xpt_done(ccb); 2699 break; 2700 case XPT_GET_TRAN_SETTINGS: 2701 cts = &ccb->cts; 2702 tgt = cts->ccb_h.target_id; 2703 if (IS_FC(isp)) { 2704 #ifndef CAM_NEW_TRAN_CODE 2705 /* 2706 * a lot of normal SCSI things don't make sense. 2707 */ 2708 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 2709 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2710 /* 2711 * How do you measure the width of a high 2712 * speed serial bus? Well, in bytes. 2713 * 2714 * Offset and period make no sense, though, so we set 2715 * (above) a 'base' transfer speed to be gigabit. 2716 */ 2717 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2718 #else 2719 fcparam *fcp = isp->isp_param; 2720 struct ccb_trans_settings_scsi *scsi = 2721 &cts->proto_specific.scsi; 2722 struct ccb_trans_settings_fc *fc = 2723 &cts->xport_specific.fc; 2724 2725 cts->protocol = PROTO_SCSI; 2726 cts->protocol_version = SCSI_REV_2; 2727 cts->transport = XPORT_FC; 2728 cts->transport_version = 0; 2729 2730 scsi->valid = CTS_SCSI_VALID_TQ; 2731 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 2732 fc->valid = CTS_FC_VALID_SPEED; 2733 fc->bitrate = 100000; 2734 if (fcp->isp_gbspeed == 4 || fcp->isp_gbspeed == 2) 2735 fc->bitrate *= fcp->isp_gbspeed; 2736 if (tgt > 0 && tgt < MAX_FC_TARG) { 2737 fcportdb_t *lp = &fcp->portdb[tgt]; 2738 fc->wwnn = lp->node_wwn; 2739 fc->wwpn = lp->port_wwn; 2740 fc->port = lp->portid; 2741 fc->valid |= CTS_FC_VALID_WWNN | 2742 CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT; 2743 } 2744 #endif 2745 } else { 2746 #ifdef CAM_NEW_TRAN_CODE 2747 struct ccb_trans_settings_scsi *scsi = 2748 &cts->proto_specific.scsi; 2749 struct ccb_trans_settings_spi *spi = 2750 &cts->xport_specific.spi; 2751 #endif 2752 sdparam *sdp = isp->isp_param; 2753 int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2754 uint16_t dval, pval, oval; 2755 2756 sdp += bus; 2757 2758 if (IS_CURRENT_SETTINGS(cts)) { 2759 sdp->isp_devparam[tgt].dev_refresh = 1; 2760 isp->isp_update |= (1 << bus); 2761 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, 2762 NULL); 2763 dval = sdp->isp_devparam[tgt].actv_flags; 2764 oval = sdp->isp_devparam[tgt].actv_offset; 2765 pval = sdp->isp_devparam[tgt].actv_period; 2766 } else { 2767 dval = sdp->isp_devparam[tgt].nvrm_flags; 2768 oval = sdp->isp_devparam[tgt].nvrm_offset; 2769 pval = sdp->isp_devparam[tgt].nvrm_period; 2770 } 2771 2772 #ifndef CAM_NEW_TRAN_CODE 2773 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 2774 2775 if (dval & DPARM_DISC) { 2776 cts->flags |= CCB_TRANS_DISC_ENB; 2777 } 2778 if (dval & DPARM_TQING) { 2779 cts->flags |= CCB_TRANS_TAG_ENB; 2780 } 2781 if (dval & DPARM_WIDE) { 2782 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2783 } else { 2784 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2785 } 2786 cts->valid = CCB_TRANS_BUS_WIDTH_VALID | 2787 CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2788 2789 if ((dval & DPARM_SYNC) && oval != 0) { 2790 cts->sync_period = pval; 2791 cts->sync_offset = oval; 2792 cts->valid |= 2793 CCB_TRANS_SYNC_RATE_VALID | 2794 CCB_TRANS_SYNC_OFFSET_VALID; 2795 } 2796 #else 2797 cts->protocol = PROTO_SCSI; 2798 cts->protocol_version = SCSI_REV_2; 2799 cts->transport = XPORT_SPI; 2800 cts->transport_version = 2; 2801 2802 spi->valid = 0; 2803 scsi->valid = 0; 2804 spi->flags = 0; 2805 scsi->flags = 0; 2806 if (dval & DPARM_DISC) { 2807 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 2808 } 2809 if ((dval & DPARM_SYNC) && oval && pval) { 2810 spi->sync_offset = oval; 2811 spi->sync_period = pval; 2812 } else { 2813 spi->sync_offset = 0; 2814 spi->sync_period = 0; 2815 } 2816 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 2817 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 2818 spi->valid |= CTS_SPI_VALID_BUS_WIDTH; 2819 if (dval & DPARM_WIDE) { 2820 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2821 } else { 2822 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2823 } 2824 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 2825 scsi->valid = CTS_SCSI_VALID_TQ; 2826 if (dval & DPARM_TQING) { 2827 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 2828 } 2829 spi->valid |= CTS_SPI_VALID_DISC; 2830 } 2831 #endif 2832 isp_prt(isp, ISP_LOGDEBUG0, 2833 "GET %s (%d.%d.%d) to flags %x off %x per %x", 2834 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM", 2835 bus, tgt, cts->ccb_h.target_lun, dval, oval, pval); 2836 } 2837 ccb->ccb_h.status = CAM_REQ_CMP; 2838 xpt_done(ccb); 2839 break; 2840 2841 case XPT_CALC_GEOMETRY: 2842 #if __FreeBSD_version < 500000 2843 { 2844 struct ccb_calc_geometry *ccg; 2845 u_int32_t secs_per_cylinder; 2846 u_int32_t size_mb; 2847 2848 ccg = &ccb->ccg; 2849 if (ccg->block_size == 0) { 2850 ccb->ccb_h.status = CAM_REQ_INVALID; 2851 xpt_done(ccb); 2852 break; 2853 } 2854 size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size); 2855 if (size_mb > 1024) { 2856 ccg->heads = 255; 2857 ccg->secs_per_track = 63; 2858 } else { 2859 ccg->heads = 64; 2860 ccg->secs_per_track = 32; 2861 } 2862 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 2863 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 2864 ccb->ccb_h.status = CAM_REQ_CMP; 2865 xpt_done(ccb); 2866 break; 2867 } 2868 #else 2869 { 2870 cam_calc_geometry(&ccb->ccg, /*extended*/1); 2871 xpt_done(ccb); 2872 break; 2873 } 2874 #endif 2875 case XPT_RESET_BUS: /* Reset the specified bus */ 2876 bus = cam_sim_bus(sim); 2877 error = isp_control(isp, ISPCTL_RESET_BUS, &bus); 2878 if (error) 2879 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2880 else { 2881 if (bootverbose) { 2882 xpt_print(ccb->ccb_h.path, "reset bus\n"); 2883 } 2884 if (cam_sim_bus(sim) && isp->isp_path2 != NULL) 2885 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 2886 else if (isp->isp_path != NULL) 2887 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 2888 ccb->ccb_h.status = CAM_REQ_CMP; 2889 } 2890 xpt_done(ccb); 2891 break; 2892 2893 case XPT_TERM_IO: /* Terminate the I/O process */ 2894 ccb->ccb_h.status = CAM_REQ_INVALID; 2895 xpt_done(ccb); 2896 break; 2897 2898 case XPT_PATH_INQ: /* Path routing inquiry */ 2899 { 2900 struct ccb_pathinq *cpi = &ccb->cpi; 2901 2902 cpi->version_num = 1; 2903 #ifdef ISP_TARGET_MODE 2904 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 2905 #else 2906 cpi->target_sprt = 0; 2907 #endif 2908 cpi->hba_eng_cnt = 0; 2909 cpi->max_target = ISP_MAX_TARGETS(isp) - 1; 2910 cpi->max_lun = ISP_MAX_LUNS(isp) - 1; 2911 cpi->bus_id = cam_sim_bus(sim); 2912 if (IS_FC(isp)) { 2913 cpi->hba_misc = PIM_NOBUSRESET; 2914 /* 2915 * Because our loop ID can shift from time to time, 2916 * make our initiator ID out of range of our bus. 2917 */ 2918 cpi->initiator_id = cpi->max_target + 1; 2919 2920 /* 2921 * Set base transfer capabilities for Fibre Channel. 2922 * Technically not correct because we don't know 2923 * what media we're running on top of- but we'll 2924 * look good if we always say 100MB/s. 2925 */ 2926 cpi->base_transfer_speed = 100000; 2927 if (FCPARAM(isp)->isp_gbspeed == 4 || 2928 FCPARAM(isp)->isp_gbspeed == 2) 2929 cpi->base_transfer_speed *= 2930 FCPARAM(isp)->isp_gbspeed; 2931 cpi->hba_inquiry = PI_TAG_ABLE; 2932 #ifdef CAM_NEW_TRAN_CODE 2933 cpi->transport = XPORT_FC; 2934 cpi->transport_version = 0; 2935 #endif 2936 } else { 2937 sdparam *sdp = isp->isp_param; 2938 sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path)); 2939 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 2940 cpi->hba_misc = 0; 2941 cpi->initiator_id = sdp->isp_initiator_id; 2942 cpi->base_transfer_speed = 3300; 2943 #ifdef CAM_NEW_TRAN_CODE 2944 cpi->transport = XPORT_SPI; 2945 cpi->transport_version = 2; 2946 #endif 2947 } 2948 #ifdef CAM_NEW_TRAN_CODE 2949 cpi->protocol = PROTO_SCSI; 2950 cpi->protocol_version = SCSI_REV_2; 2951 #endif 2952 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 2953 strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN); 2954 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 2955 cpi->unit_number = cam_sim_unit(sim); 2956 cpi->ccb_h.status = CAM_REQ_CMP; 2957 xpt_done(ccb); 2958 break; 2959 } 2960 default: 2961 ccb->ccb_h.status = CAM_REQ_INVALID; 2962 xpt_done(ccb); 2963 break; 2964 } 2965 } 2966 2967 #define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB) 2968 2969 void 2970 isp_done(struct ccb_scsiio *sccb) 2971 { 2972 ispsoftc_t *isp = XS_ISP(sccb); 2973 2974 if (XS_NOERR(sccb)) 2975 XS_SETERR(sccb, CAM_REQ_CMP); 2976 2977 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && 2978 (sccb->scsi_status != SCSI_STATUS_OK)) { 2979 sccb->ccb_h.status &= ~CAM_STATUS_MASK; 2980 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && 2981 (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) { 2982 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; 2983 } else { 2984 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 2985 } 2986 } 2987 2988 sccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2989 if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2990 isp_prt(isp, ISP_LOGDEBUG0, 2991 "target %d lun %d CAM status 0x%x SCSI status 0x%x", 2992 XS_TGT(sccb), XS_LUN(sccb), sccb->ccb_h.status, 2993 sccb->scsi_status); 2994 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 2995 sccb->ccb_h.status |= CAM_DEV_QFRZN; 2996 xpt_freeze_devq(sccb->ccb_h.path, 1); 2997 } 2998 } 2999 3000 if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) && 3001 (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 3002 xpt_print(sccb->ccb_h.path, 3003 "cam completion status 0x%x\n", sccb->ccb_h.status); 3004 } 3005 3006 XS_CMD_S_DONE(sccb); 3007 if (XS_CMD_WDOG_P(sccb) == 0) { 3008 callout_stop(&PISP_PCMD(sccb)->wdog); 3009 if (XS_CMD_GRACE_P(sccb)) { 3010 isp_prt(isp, ISP_LOGDEBUG2, 3011 "finished command on borrowed time"); 3012 } 3013 XS_CMD_S_CLEAR(sccb); 3014 isp_free_pcmd(isp, (union ccb *) sccb); 3015 xpt_done((union ccb *) sccb); 3016 } 3017 } 3018 3019 int 3020 isp_async(ispsoftc_t *isp, ispasync_t cmd, void *arg) 3021 { 3022 int bus, rv = 0; 3023 static const char prom[] = 3024 "PortID 0x%06x handle 0x%x role %s %s\n" 3025 " WWNN 0x%08x%08x WWPN 0x%08x%08x"; 3026 static const char prom2[] = 3027 "PortID 0x%06x handle 0x%x role %s %s tgt %u\n" 3028 " WWNN 0x%08x%08x WWPN 0x%08x%08x"; 3029 char *msg = NULL; 3030 target_id_t tgt; 3031 fcportdb_t *lp; 3032 struct cam_path *tmppath; 3033 3034 switch (cmd) { 3035 case ISPASYNC_NEW_TGT_PARAMS: 3036 { 3037 #ifdef CAM_NEW_TRAN_CODE 3038 struct ccb_trans_settings_scsi *scsi; 3039 struct ccb_trans_settings_spi *spi; 3040 #endif 3041 int flags, tgt; 3042 sdparam *sdp = isp->isp_param; 3043 struct ccb_trans_settings cts; 3044 3045 memset(&cts, 0, sizeof (struct ccb_trans_settings)); 3046 3047 tgt = *((int *)arg); 3048 bus = (tgt >> 16) & 0xffff; 3049 tgt &= 0xffff; 3050 sdp += bus; 3051 if (xpt_create_path(&tmppath, NULL, 3052 cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim), 3053 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 3054 isp_prt(isp, ISP_LOGWARN, 3055 "isp_async cannot make temp path for %d.%d", 3056 tgt, bus); 3057 rv = -1; 3058 break; 3059 } 3060 flags = sdp->isp_devparam[tgt].actv_flags; 3061 #ifdef CAM_NEW_TRAN_CODE 3062 cts.type = CTS_TYPE_CURRENT_SETTINGS; 3063 cts.protocol = PROTO_SCSI; 3064 cts.transport = XPORT_SPI; 3065 3066 scsi = &cts.proto_specific.scsi; 3067 spi = &cts.xport_specific.spi; 3068 3069 if (flags & DPARM_TQING) { 3070 scsi->valid |= CTS_SCSI_VALID_TQ; 3071 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 3072 } 3073 3074 if (flags & DPARM_DISC) { 3075 spi->valid |= CTS_SPI_VALID_DISC; 3076 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 3077 } 3078 spi->flags |= CTS_SPI_VALID_BUS_WIDTH; 3079 if (flags & DPARM_WIDE) { 3080 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3081 } else { 3082 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3083 } 3084 if (flags & DPARM_SYNC) { 3085 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 3086 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 3087 spi->sync_period = sdp->isp_devparam[tgt].actv_period; 3088 spi->sync_offset = sdp->isp_devparam[tgt].actv_offset; 3089 } 3090 #else 3091 cts.flags = CCB_TRANS_CURRENT_SETTINGS; 3092 cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3093 if (flags & DPARM_DISC) { 3094 cts.flags |= CCB_TRANS_DISC_ENB; 3095 } 3096 if (flags & DPARM_TQING) { 3097 cts.flags |= CCB_TRANS_TAG_ENB; 3098 } 3099 cts.valid |= CCB_TRANS_BUS_WIDTH_VALID; 3100 cts.bus_width = (flags & DPARM_WIDE)? 3101 MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT; 3102 cts.sync_period = sdp->isp_devparam[tgt].actv_period; 3103 cts.sync_offset = sdp->isp_devparam[tgt].actv_offset; 3104 if (flags & DPARM_SYNC) { 3105 cts.valid |= 3106 CCB_TRANS_SYNC_RATE_VALID | 3107 CCB_TRANS_SYNC_OFFSET_VALID; 3108 } 3109 #endif 3110 isp_prt(isp, ISP_LOGDEBUG2, 3111 "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x", 3112 bus, tgt, sdp->isp_devparam[tgt].actv_period, 3113 sdp->isp_devparam[tgt].actv_offset, flags); 3114 xpt_setup_ccb(&cts.ccb_h, tmppath, 1); 3115 xpt_async(AC_TRANSFER_NEG, tmppath, &cts); 3116 xpt_free_path(tmppath); 3117 break; 3118 } 3119 case ISPASYNC_BUS_RESET: 3120 bus = *((int *)arg); 3121 isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected", 3122 bus); 3123 if (bus > 0 && isp->isp_path2) { 3124 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 3125 } else if (isp->isp_path) { 3126 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 3127 } 3128 break; 3129 case ISPASYNC_LIP: 3130 if (msg == NULL) { 3131 msg = "LIP Received"; 3132 } 3133 /* FALLTHROUGH */ 3134 case ISPASYNC_LOOP_RESET: 3135 if (msg == NULL) { 3136 msg = "LOOP Reset"; 3137 } 3138 /* FALLTHROUGH */ 3139 case ISPASYNC_LOOP_DOWN: 3140 if (msg == NULL) { 3141 msg = "LOOP Down"; 3142 } 3143 if (isp->isp_path) { 3144 isp_freeze_loopdown(isp, msg); 3145 } 3146 if (isp->isp_osinfo.ldt_running == 0) { 3147 isp->isp_osinfo.ldt_running = 1; 3148 callout_reset(&isp->isp_osinfo.ldt, 3149 isp->isp_osinfo.loop_down_limit * hz, isp_ldt, isp); 3150 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 3151 "starting Loop Down Timer"); 3152 } 3153 isp_prt(isp, ISP_LOGINFO, msg); 3154 break; 3155 case ISPASYNC_LOOP_UP: 3156 /* 3157 * Now we just note that Loop has come up. We don't 3158 * actually do anything because we're waiting for a 3159 * Change Notify before activating the FC cleanup 3160 * thread to look at the state of the loop again. 3161 */ 3162 isp_prt(isp, ISP_LOGINFO, "Loop UP"); 3163 break; 3164 case ISPASYNC_DEV_ARRIVED: 3165 lp = arg; 3166 lp->reserved = 0; 3167 if ((isp->isp_role & ISP_ROLE_INITIATOR) && 3168 (lp->roles & (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT))) { 3169 int dbidx = lp - FCPARAM(isp)->portdb; 3170 int i; 3171 3172 for (i = 0; i < MAX_FC_TARG; i++) { 3173 if (i >= FL_ID && i <= SNS_ID) { 3174 continue; 3175 } 3176 if (FCPARAM(isp)->isp_ini_map[i] == 0) { 3177 break; 3178 } 3179 } 3180 if (i < MAX_FC_TARG) { 3181 FCPARAM(isp)->isp_ini_map[i] = dbidx + 1; 3182 lp->ini_map_idx = i + 1; 3183 } else { 3184 isp_prt(isp, ISP_LOGWARN, "out of target ids"); 3185 isp_dump_portdb(isp); 3186 } 3187 } 3188 if (lp->ini_map_idx) { 3189 tgt = lp->ini_map_idx - 1; 3190 isp_prt(isp, ISP_LOGCONFIG, prom2, 3191 lp->portid, lp->handle, 3192 roles[lp->roles], "arrived at", tgt, 3193 (uint32_t) (lp->node_wwn >> 32), 3194 (uint32_t) lp->node_wwn, 3195 (uint32_t) (lp->port_wwn >> 32), 3196 (uint32_t) lp->port_wwn); 3197 isp_make_here(isp, tgt); 3198 } else { 3199 isp_prt(isp, ISP_LOGCONFIG, prom, 3200 lp->portid, lp->handle, 3201 roles[lp->roles], "arrived", 3202 (uint32_t) (lp->node_wwn >> 32), 3203 (uint32_t) lp->node_wwn, 3204 (uint32_t) (lp->port_wwn >> 32), 3205 (uint32_t) lp->port_wwn); 3206 } 3207 break; 3208 case ISPASYNC_DEV_CHANGED: 3209 lp = arg; 3210 if (isp_change_is_bad) { 3211 lp->state = FC_PORTDB_STATE_NIL; 3212 if (lp->ini_map_idx) { 3213 tgt = lp->ini_map_idx - 1; 3214 FCPARAM(isp)->isp_ini_map[tgt] = 0; 3215 lp->ini_map_idx = 0; 3216 isp_prt(isp, ISP_LOGCONFIG, prom3, 3217 lp->portid, tgt, "change is bad"); 3218 isp_make_gone(isp, tgt); 3219 } else { 3220 isp_prt(isp, ISP_LOGCONFIG, prom, 3221 lp->portid, lp->handle, 3222 roles[lp->roles], 3223 "changed and departed", 3224 (uint32_t) (lp->node_wwn >> 32), 3225 (uint32_t) lp->node_wwn, 3226 (uint32_t) (lp->port_wwn >> 32), 3227 (uint32_t) lp->port_wwn); 3228 } 3229 } else { 3230 lp->portid = lp->new_portid; 3231 lp->roles = lp->new_roles; 3232 if (lp->ini_map_idx) { 3233 int t = lp->ini_map_idx - 1; 3234 FCPARAM(isp)->isp_ini_map[t] = 3235 (lp - FCPARAM(isp)->portdb) + 1; 3236 tgt = lp->ini_map_idx - 1; 3237 isp_prt(isp, ISP_LOGCONFIG, prom2, 3238 lp->portid, lp->handle, 3239 roles[lp->roles], "changed at", tgt, 3240 (uint32_t) (lp->node_wwn >> 32), 3241 (uint32_t) lp->node_wwn, 3242 (uint32_t) (lp->port_wwn >> 32), 3243 (uint32_t) lp->port_wwn); 3244 } else { 3245 isp_prt(isp, ISP_LOGCONFIG, prom, 3246 lp->portid, lp->handle, 3247 roles[lp->roles], "changed", 3248 (uint32_t) (lp->node_wwn >> 32), 3249 (uint32_t) lp->node_wwn, 3250 (uint32_t) (lp->port_wwn >> 32), 3251 (uint32_t) lp->port_wwn); 3252 } 3253 } 3254 break; 3255 case ISPASYNC_DEV_STAYED: 3256 lp = arg; 3257 if (lp->ini_map_idx) { 3258 tgt = lp->ini_map_idx - 1; 3259 isp_prt(isp, ISP_LOGCONFIG, prom2, 3260 lp->portid, lp->handle, 3261 roles[lp->roles], "stayed at", tgt, 3262 (uint32_t) (lp->node_wwn >> 32), 3263 (uint32_t) lp->node_wwn, 3264 (uint32_t) (lp->port_wwn >> 32), 3265 (uint32_t) lp->port_wwn); 3266 } else { 3267 isp_prt(isp, ISP_LOGCONFIG, prom, 3268 lp->portid, lp->handle, 3269 roles[lp->roles], "stayed", 3270 (uint32_t) (lp->node_wwn >> 32), 3271 (uint32_t) lp->node_wwn, 3272 (uint32_t) (lp->port_wwn >> 32), 3273 (uint32_t) lp->port_wwn); 3274 } 3275 break; 3276 case ISPASYNC_DEV_GONE: 3277 lp = arg; 3278 /* 3279 * If this has a virtual target and we haven't marked it 3280 * that we're going to have isp_gdt tell the OS it's gone, 3281 * set the isp_gdt timer running on it. 3282 * 3283 * If it isn't marked that isp_gdt is going to get rid of it, 3284 * announce that it's gone. 3285 */ 3286 if (lp->ini_map_idx && lp->reserved == 0) { 3287 lp->reserved = 1; 3288 lp->new_reserved = isp->isp_osinfo.gone_device_time; 3289 lp->state = FC_PORTDB_STATE_ZOMBIE; 3290 if (isp->isp_osinfo.gdt_running == 0) { 3291 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 3292 "starting Gone Device Timer"); 3293 isp->isp_osinfo.gdt_running = 1; 3294 callout_reset(&isp->isp_osinfo.gdt, hz, 3295 isp_gdt, isp); 3296 } 3297 tgt = lp->ini_map_idx - 1; 3298 isp_prt(isp, ISP_LOGCONFIG, prom2, 3299 lp->portid, lp->handle, 3300 roles[lp->roles], "gone zombie at", tgt, 3301 (uint32_t) (lp->node_wwn >> 32), 3302 (uint32_t) lp->node_wwn, 3303 (uint32_t) (lp->port_wwn >> 32), 3304 (uint32_t) lp->port_wwn); 3305 } else if (lp->reserved == 0) { 3306 isp_prt(isp, ISP_LOGCONFIG, prom, 3307 lp->portid, lp->handle, 3308 roles[lp->roles], "departed", 3309 (uint32_t) (lp->node_wwn >> 32), 3310 (uint32_t) lp->node_wwn, 3311 (uint32_t) (lp->port_wwn >> 32), 3312 (uint32_t) lp->port_wwn); 3313 } 3314 break; 3315 case ISPASYNC_CHANGE_NOTIFY: 3316 { 3317 char *msg; 3318 if (arg == ISPASYNC_CHANGE_PDB) { 3319 msg = "Port Database Changed"; 3320 } else if (arg == ISPASYNC_CHANGE_SNS) { 3321 msg = "Name Server Database Changed"; 3322 } else { 3323 msg = "Other Change Notify"; 3324 } 3325 /* 3326 * If the loop down timer is running, cancel it. 3327 */ 3328 if (isp->isp_osinfo.ldt_running) { 3329 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, 3330 "Stopping Loop Down Timer"); 3331 isp->isp_osinfo.ldt_running = 0; 3332 callout_stop(&isp->isp_osinfo.ldt); 3333 } 3334 isp_prt(isp, ISP_LOGINFO, msg); 3335 isp_freeze_loopdown(isp, msg); 3336 wakeup(ISP_KT_WCHAN(isp)); 3337 break; 3338 } 3339 #ifdef ISP_TARGET_MODE 3340 case ISPASYNC_TARGET_NOTIFY: 3341 { 3342 tmd_notify_t *nt = arg; 3343 isp_prt(isp, ISP_LOGALL, 3344 "target notify code 0x%x", nt->nt_ncode); 3345 break; 3346 } 3347 case ISPASYNC_TARGET_ACTION: 3348 switch (((isphdr_t *)arg)->rqs_entry_type) { 3349 default: 3350 isp_prt(isp, ISP_LOGWARN, 3351 "event 0x%x for unhandled target action", 3352 ((isphdr_t *)arg)->rqs_entry_type); 3353 break; 3354 case RQSTYPE_NOTIFY: 3355 if (IS_SCSI(isp)) { 3356 rv = isp_handle_platform_notify_scsi(isp, 3357 (in_entry_t *) arg); 3358 } else { 3359 rv = isp_handle_platform_notify_fc(isp, 3360 (in_fcentry_t *) arg); 3361 } 3362 break; 3363 case RQSTYPE_ATIO: 3364 rv = isp_handle_platform_atio(isp, (at_entry_t *) arg); 3365 break; 3366 case RQSTYPE_ATIO2: 3367 rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg); 3368 break; 3369 case RQSTYPE_CTIO3: 3370 case RQSTYPE_CTIO2: 3371 case RQSTYPE_CTIO: 3372 rv = isp_handle_platform_ctio(isp, arg); 3373 break; 3374 case RQSTYPE_ENABLE_LUN: 3375 case RQSTYPE_MODIFY_LUN: 3376 isp_ledone(isp, (lun_entry_t *) arg); 3377 break; 3378 } 3379 break; 3380 #endif 3381 case ISPASYNC_FW_CRASH: 3382 { 3383 uint16_t mbox1, mbox6; 3384 mbox1 = ISP_READ(isp, OUTMAILBOX1); 3385 if (IS_DUALBUS(isp)) { 3386 mbox6 = ISP_READ(isp, OUTMAILBOX6); 3387 } else { 3388 mbox6 = 0; 3389 } 3390 isp_prt(isp, ISP_LOGERR, 3391 "Internal Firmware Error on bus %d @ RISC Address 0x%x", 3392 mbox6, mbox1); 3393 #ifdef ISP_FW_CRASH_DUMP 3394 mbox1 = isp->isp_osinfo.mbox_sleep_ok; 3395 isp->isp_osinfo.mbox_sleep_ok = 0; 3396 if (IS_FC(isp)) { 3397 FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT; 3398 FCPARAM(isp)->isp_loopstate = LOOP_NIL; 3399 isp_freeze_loopdown(isp, "f/w crash"); 3400 isp_fw_dump(isp); 3401 } 3402 isp_reinit(isp); 3403 isp->isp_osinfo.mbox_sleep_ok = mbox1; 3404 #else 3405 mbox1 = isp->isp_osinfo.mbox_sleep_ok; 3406 isp->isp_osinfo.mbox_sleep_ok = 0; 3407 isp_reinit(isp); 3408 isp->isp_osinfo.mbox_sleep_ok = mbox1; 3409 #endif 3410 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL); 3411 break; 3412 } 3413 case ISPASYNC_UNHANDLED_RESPONSE: 3414 break; 3415 default: 3416 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd); 3417 break; 3418 } 3419 return (rv); 3420 } 3421 3422 3423 /* 3424 * Locks are held before coming here. 3425 */ 3426 void 3427 isp_uninit(ispsoftc_t *isp) 3428 { 3429 if (IS_24XX(isp)) { 3430 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_RESET); 3431 } else { 3432 ISP_WRITE(isp, HCCR, HCCR_CMD_RESET); 3433 } 3434 ISP_DISABLE_INTS(isp); 3435 } 3436 3437 void 3438 isp_prt(ispsoftc_t *isp, int level, const char *fmt, ...) 3439 { 3440 va_list ap; 3441 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { 3442 return; 3443 } 3444 printf("%s: ", device_get_nameunit(isp->isp_dev)); 3445 va_start(ap, fmt); 3446 vprintf(fmt, ap); 3447 va_end(ap); 3448 printf("\n"); 3449 } 3450 3451 uint64_t 3452 isp_nanotime_sub(struct timespec *b, struct timespec *a) 3453 { 3454 uint64_t elapsed; 3455 struct timespec x = *b; 3456 timespecsub(&x, a); 3457 elapsed = GET_NANOSEC(&x); 3458 if (elapsed == 0) 3459 elapsed++; 3460 return (elapsed); 3461 } 3462 3463 int 3464 isp_mbox_acquire(ispsoftc_t *isp) 3465 { 3466 if (isp->isp_osinfo.mboxbsy) { 3467 return (1); 3468 } else { 3469 isp->isp_osinfo.mboxcmd_done = 0; 3470 isp->isp_osinfo.mboxbsy = 1; 3471 return (0); 3472 } 3473 } 3474 3475 void 3476 isp_mbox_wait_complete(ispsoftc_t *isp, mbreg_t *mbp) 3477 { 3478 unsigned int usecs = mbp->timeout; 3479 unsigned int max, olim, ilim; 3480 3481 if (usecs == 0) { 3482 usecs = MBCMD_DEFAULT_TIMEOUT; 3483 } 3484 max = isp->isp_mbxwrk0 + 1; 3485 3486 if (isp->isp_osinfo.mbox_sleep_ok) { 3487 unsigned int ms = (usecs + 999) / 1000; 3488 3489 isp->isp_osinfo.mbox_sleep_ok = 0; 3490 isp->isp_osinfo.mbox_sleeping = 1; 3491 for (olim = 0; olim < max; olim++) { 3492 #if __FreeBSD_version < 700037 3493 tsleep(&isp->isp_mbxworkp, PRIBIO, "ispmbx_sleep", 3494 isp_mstohz(ms)); 3495 #else 3496 msleep(&isp->isp_mbxworkp, &isp->isp_osinfo.lock, 3497 PRIBIO, "ispmbx_sleep", isp_mstohz(ms)); 3498 #endif 3499 if (isp->isp_osinfo.mboxcmd_done) { 3500 break; 3501 } 3502 } 3503 isp->isp_osinfo.mbox_sleep_ok = 1; 3504 isp->isp_osinfo.mbox_sleeping = 0; 3505 } else { 3506 for (olim = 0; olim < max; olim++) { 3507 for (ilim = 0; ilim < usecs; ilim += 100) { 3508 uint32_t isr; 3509 uint16_t sema, mbox; 3510 if (isp->isp_osinfo.mboxcmd_done) { 3511 break; 3512 } 3513 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 3514 isp_intr(isp, isr, sema, mbox); 3515 if (isp->isp_osinfo.mboxcmd_done) { 3516 break; 3517 } 3518 } 3519 USEC_DELAY(100); 3520 } 3521 if (isp->isp_osinfo.mboxcmd_done) { 3522 break; 3523 } 3524 } 3525 } 3526 if (isp->isp_osinfo.mboxcmd_done == 0) { 3527 isp_prt(isp, ISP_LOGWARN, 3528 "%s Mailbox Command (0x%x) Timeout (%uus)", 3529 isp->isp_osinfo.mbox_sleep_ok? "Interrupting" : "Polled", 3530 isp->isp_lastmbxcmd, usecs); 3531 mbp->param[0] = MBOX_TIMEOUT; 3532 isp->isp_osinfo.mboxcmd_done = 1; 3533 } 3534 } 3535 3536 void 3537 isp_mbox_notify_done(ispsoftc_t *isp) 3538 { 3539 if (isp->isp_osinfo.mbox_sleeping) { 3540 wakeup(&isp->isp_mbxworkp); 3541 } 3542 isp->isp_osinfo.mboxcmd_done = 1; 3543 } 3544 3545 void 3546 isp_mbox_release(ispsoftc_t *isp) 3547 { 3548 isp->isp_osinfo.mboxbsy = 0; 3549 } 3550 3551 int 3552 isp_mstohz(int ms) 3553 { 3554 int hz; 3555 struct timeval t; 3556 t.tv_sec = ms / 1000; 3557 t.tv_usec = (ms % 1000) * 1000; 3558 hz = tvtohz(&t); 3559 if (hz < 0) { 3560 hz = 0x7fffffff; 3561 } 3562 if (hz == 0) { 3563 hz = 1; 3564 } 3565 return (hz); 3566 } 3567 3568 void 3569 isp_platform_intr(void *arg) 3570 { 3571 ispsoftc_t *isp = arg; 3572 uint32_t isr; 3573 uint16_t sema, mbox; 3574 3575 ISP_LOCK(isp); 3576 isp->isp_intcnt++; 3577 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) { 3578 isp->isp_intbogus++; 3579 } else { 3580 isp_intr(isp, isr, sema, mbox); 3581 } 3582 ISP_UNLOCK(isp); 3583 } 3584 3585 void 3586 isp_common_dmateardown(ispsoftc_t *isp, struct ccb_scsiio *csio, uint32_t hdl) 3587 { 3588 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 3589 bus_dmamap_sync(isp->isp_osinfo.dmat, 3590 PISP_PCMD(csio)->dmap, BUS_DMASYNC_POSTREAD); 3591 } else { 3592 bus_dmamap_sync(isp->isp_osinfo.dmat, 3593 PISP_PCMD(csio)->dmap, BUS_DMASYNC_POSTWRITE); 3594 } 3595 bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap); 3596 } 3597