1 /*- 2 * Copyright (c) 1997-2009 by Matthew Jacob 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /* 28 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters. 29 */ 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <dev/isp/isp_freebsd.h> 34 #include <sys/unistd.h> 35 #include <sys/kthread.h> 36 #include <sys/conf.h> 37 #include <sys/module.h> 38 #include <sys/ioccom.h> 39 #include <dev/isp/isp_ioctl.h> 40 #include <sys/devicestat.h> 41 #include <cam/cam_periph.h> 42 #include <cam/cam_xpt_periph.h> 43 44 #if __FreeBSD_version < 800002 45 #define THREAD_CREATE kthread_create 46 #else 47 #define THREAD_CREATE kproc_create 48 #endif 49 50 MODULE_VERSION(isp, 1); 51 MODULE_DEPEND(isp, cam, 1, 1, 1); 52 int isp_announced = 0; 53 int isp_fabric_hysteresis = 5; 54 int isp_loop_down_limit = 60; /* default loop down limit */ 55 int isp_quickboot_time = 7; /* don't wait more than N secs for loop up */ 56 int isp_gone_device_time = 30; /* grace time before reporting device lost */ 57 static const char prom3[] = "Chan %d [%u] PortID 0x%06x Departed because of %s"; 58 59 static void isp_freeze_loopdown(ispsoftc_t *, int, char *); 60 static d_ioctl_t ispioctl; 61 static void isp_intr_enable(void *); 62 static void isp_cam_async(void *, uint32_t, struct cam_path *, void *); 63 static void isp_poll(struct cam_sim *); 64 static timeout_t isp_watchdog; 65 static timeout_t isp_gdt; 66 static task_fn_t isp_gdt_task; 67 static timeout_t isp_ldt; 68 static task_fn_t isp_ldt_task; 69 static void isp_kthread(void *); 70 static void isp_action(struct cam_sim *, union ccb *); 71 static int isp_timer_count; 72 static void isp_timer(void *); 73 74 static struct cdevsw isp_cdevsw = { 75 .d_version = D_VERSION, 76 .d_ioctl = ispioctl, 77 .d_name = "isp", 78 }; 79 80 static int 81 isp_role_sysctl(SYSCTL_HANDLER_ARGS) 82 { 83 ispsoftc_t *isp = (ispsoftc_t *)arg1; 84 int chan = arg2; 85 int error, old, value; 86 87 value = FCPARAM(isp, chan)->role; 88 89 error = sysctl_handle_int(oidp, &value, 0, req); 90 if ((error != 0) || (req->newptr == NULL)) 91 return (error); 92 93 if (value < ISP_ROLE_NONE || value > ISP_ROLE_BOTH) 94 return (EINVAL); 95 96 ISP_LOCK(isp); 97 old = FCPARAM(isp, chan)->role; 98 99 /* We don't allow target mode switch from here. */ 100 value = (old & ISP_ROLE_TARGET) | (value & ISP_ROLE_INITIATOR); 101 102 /* If nothing has changed -- we are done. */ 103 if (value == old) { 104 ISP_UNLOCK(isp); 105 return (0); 106 } 107 108 /* Actually change the role. */ 109 error = isp_control(isp, ISPCTL_CHANGE_ROLE, chan, value); 110 ISP_UNLOCK(isp); 111 return (error); 112 } 113 114 static int 115 isp_attach_chan(ispsoftc_t *isp, struct cam_devq *devq, int chan) 116 { 117 struct ccb_setasync csa; 118 struct cam_sim *sim; 119 struct cam_path *path; 120 121 /* 122 * Construct our SIM entry. 123 */ 124 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, device_get_unit(isp->isp_dev), &isp->isp_osinfo.lock, isp->isp_maxcmds, isp->isp_maxcmds, devq); 125 126 if (sim == NULL) { 127 return (ENOMEM); 128 } 129 130 ISP_LOCK(isp); 131 if (xpt_bus_register(sim, isp->isp_dev, chan) != CAM_SUCCESS) { 132 ISP_UNLOCK(isp); 133 cam_sim_free(sim, FALSE); 134 return (EIO); 135 } 136 ISP_UNLOCK(isp); 137 if (xpt_create_path(&path, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 138 ISP_LOCK(isp); 139 xpt_bus_deregister(cam_sim_path(sim)); 140 ISP_UNLOCK(isp); 141 cam_sim_free(sim, FALSE); 142 return (ENXIO); 143 } 144 xpt_setup_ccb(&csa.ccb_h, path, 5); 145 csa.ccb_h.func_code = XPT_SASYNC_CB; 146 csa.event_enable = AC_LOST_DEVICE; 147 csa.callback = isp_cam_async; 148 csa.callback_arg = sim; 149 150 ISP_LOCK(isp); 151 xpt_action((union ccb *)&csa); 152 ISP_UNLOCK(isp); 153 154 if (IS_SCSI(isp)) { 155 struct isp_spi *spi = ISP_SPI_PC(isp, chan); 156 spi->sim = sim; 157 spi->path = path; 158 } else { 159 fcparam *fcp = FCPARAM(isp, chan); 160 struct isp_fc *fc = ISP_FC_PC(isp, chan); 161 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(isp->isp_osinfo.dev); 162 struct sysctl_oid *tree = device_get_sysctl_tree(isp->isp_osinfo.dev); 163 char name[16]; 164 165 ISP_LOCK(isp); 166 fc->sim = sim; 167 fc->path = path; 168 fc->isp = isp; 169 fc->ready = 1; 170 171 callout_init_mtx(&fc->ldt, &isp->isp_osinfo.lock, 0); 172 callout_init_mtx(&fc->gdt, &isp->isp_osinfo.lock, 0); 173 TASK_INIT(&fc->ltask, 1, isp_ldt_task, fc); 174 TASK_INIT(&fc->gtask, 1, isp_gdt_task, fc); 175 176 /* 177 * We start by being "loop down" if we have an initiator role 178 */ 179 if (fcp->role & ISP_ROLE_INITIATOR) { 180 isp_freeze_loopdown(isp, chan, "isp_attach"); 181 callout_reset(&fc->ldt, isp_quickboot_time * hz, isp_ldt, fc); 182 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Starting Initial Loop Down Timer @ %lu", (unsigned long) time_uptime); 183 } 184 ISP_UNLOCK(isp); 185 if (THREAD_CREATE(isp_kthread, fc, &fc->kproc, 0, 0, "%s: fc_thrd%d", device_get_nameunit(isp->isp_osinfo.dev), chan)) { 186 xpt_free_path(fc->path); 187 ISP_LOCK(isp); 188 if (callout_active(&fc->ldt)) 189 callout_stop(&fc->ldt); 190 xpt_bus_deregister(cam_sim_path(fc->sim)); 191 ISP_UNLOCK(isp); 192 cam_sim_free(fc->sim, FALSE); 193 return (ENOMEM); 194 } 195 fc->num_threads += 1; 196 if (chan > 0) { 197 snprintf(name, sizeof(name), "chan%d", chan); 198 tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree), 199 OID_AUTO, name, CTLFLAG_RW, 0, "Virtual channel"); 200 } 201 SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 202 "wwnn", CTLFLAG_RD, &fcp->isp_wwnn, 203 "World Wide Node Name"); 204 SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 205 "wwpn", CTLFLAG_RD, &fcp->isp_wwpn, 206 "World Wide Port Name"); 207 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 208 "loop_down_limit", CTLFLAG_RW, &fc->loop_down_limit, 0, 209 "Loop Down Limit"); 210 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 211 "gone_device_time", CTLFLAG_RW, &fc->gone_device_time, 0, 212 "Gone Device Time"); 213 #if defined(ISP_TARGET_MODE) && defined(DEBUG) 214 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 215 "inject_lost_data_frame", CTLFLAG_RW, &fc->inject_lost_data_frame, 0, 216 "Cause a Lost Frame on a Read"); 217 #endif 218 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 219 "role", CTLTYPE_INT | CTLFLAG_RW, isp, chan, 220 isp_role_sysctl, "I", "Current role"); 221 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 222 "speed", CTLFLAG_RD, &fcp->isp_gbspeed, 0, 223 "Connection speed in gigabits"); 224 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 225 "linkstate", CTLFLAG_RD, &fcp->isp_linkstate, 0, 226 "Link state"); 227 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 228 "fwstate", CTLFLAG_RD, &fcp->isp_fwstate, 0, 229 "Firmware state"); 230 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 231 "loopstate", CTLFLAG_RD, &fcp->isp_loopstate, 0, 232 "Loop state"); 233 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 234 "topo", CTLFLAG_RD, &fcp->isp_topo, 0, 235 "Connection topology"); 236 } 237 return (0); 238 } 239 240 static void 241 isp_detach_chan(ispsoftc_t *isp, int chan) 242 { 243 struct cam_sim *sim; 244 struct cam_path *path; 245 struct ccb_setasync csa; 246 int *num_threads; 247 248 ISP_GET_PC(isp, chan, sim, sim); 249 ISP_GET_PC(isp, chan, path, path); 250 ISP_GET_PC_ADDR(isp, chan, num_threads, num_threads); 251 252 xpt_setup_ccb(&csa.ccb_h, path, 5); 253 csa.ccb_h.func_code = XPT_SASYNC_CB; 254 csa.event_enable = 0; 255 csa.callback = isp_cam_async; 256 csa.callback_arg = sim; 257 xpt_action((union ccb *)&csa); 258 xpt_free_path(path); 259 xpt_bus_deregister(cam_sim_path(sim)); 260 cam_sim_free(sim, FALSE); 261 262 /* Wait for the channel's spawned threads to exit. */ 263 wakeup(isp->isp_osinfo.pc.ptr); 264 while (*num_threads != 0) 265 mtx_sleep(isp, &isp->isp_osinfo.lock, PRIBIO, "isp_reap", 100); 266 } 267 268 int 269 isp_attach(ispsoftc_t *isp) 270 { 271 const char *nu = device_get_nameunit(isp->isp_osinfo.dev); 272 int du = device_get_unit(isp->isp_dev); 273 int chan; 274 275 isp->isp_osinfo.ehook.ich_func = isp_intr_enable; 276 isp->isp_osinfo.ehook.ich_arg = isp; 277 /* 278 * Haha. Set this first, because if we're loaded as a module isp_intr_enable 279 * will be called right awawy, which will clear isp_osinfo.ehook_active, 280 * which would be unwise to then set again later. 281 */ 282 isp->isp_osinfo.ehook_active = 1; 283 if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) { 284 isp_prt(isp, ISP_LOGERR, "could not establish interrupt enable hook"); 285 return (-EIO); 286 } 287 288 /* 289 * Create the device queue for our SIM(s). 290 */ 291 isp->isp_osinfo.devq = cam_simq_alloc(isp->isp_maxcmds); 292 if (isp->isp_osinfo.devq == NULL) { 293 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 294 return (EIO); 295 } 296 297 for (chan = 0; chan < isp->isp_nchan; chan++) { 298 if (isp_attach_chan(isp, isp->isp_osinfo.devq, chan)) { 299 goto unwind; 300 } 301 } 302 303 callout_init_mtx(&isp->isp_osinfo.tmo, &isp->isp_osinfo.lock, 0); 304 isp_timer_count = hz >> 2; 305 callout_reset(&isp->isp_osinfo.tmo, isp_timer_count, isp_timer, isp); 306 isp->isp_osinfo.timer_active = 1; 307 308 isp->isp_osinfo.cdev = make_dev(&isp_cdevsw, du, UID_ROOT, GID_OPERATOR, 0600, "%s", nu); 309 if (isp->isp_osinfo.cdev) { 310 isp->isp_osinfo.cdev->si_drv1 = isp; 311 } 312 return (0); 313 314 unwind: 315 while (--chan >= 0) { 316 struct cam_sim *sim; 317 struct cam_path *path; 318 319 ISP_GET_PC(isp, chan, sim, sim); 320 ISP_GET_PC(isp, chan, path, path); 321 xpt_free_path(path); 322 ISP_LOCK(isp); 323 xpt_bus_deregister(cam_sim_path(sim)); 324 ISP_UNLOCK(isp); 325 cam_sim_free(sim, FALSE); 326 } 327 if (isp->isp_osinfo.ehook_active) { 328 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 329 isp->isp_osinfo.ehook_active = 0; 330 } 331 if (isp->isp_osinfo.cdev) { 332 destroy_dev(isp->isp_osinfo.cdev); 333 isp->isp_osinfo.cdev = NULL; 334 } 335 cam_simq_free(isp->isp_osinfo.devq); 336 isp->isp_osinfo.devq = NULL; 337 return (-1); 338 } 339 340 int 341 isp_detach(ispsoftc_t *isp) 342 { 343 struct cam_sim *sim; 344 int chan; 345 346 ISP_LOCK(isp); 347 for (chan = isp->isp_nchan - 1; chan >= 0; chan -= 1) { 348 ISP_GET_PC(isp, chan, sim, sim); 349 if (sim->refcount > 2) { 350 ISP_UNLOCK(isp); 351 return (EBUSY); 352 } 353 } 354 /* Tell spawned threads that we're exiting. */ 355 isp->isp_osinfo.is_exiting = 1; 356 if (isp->isp_osinfo.timer_active) { 357 callout_stop(&isp->isp_osinfo.tmo); 358 isp->isp_osinfo.timer_active = 0; 359 } 360 for (chan = isp->isp_nchan - 1; chan >= 0; chan -= 1) 361 isp_detach_chan(isp, chan); 362 ISP_UNLOCK(isp); 363 364 if (isp->isp_osinfo.cdev) { 365 destroy_dev(isp->isp_osinfo.cdev); 366 isp->isp_osinfo.cdev = NULL; 367 } 368 if (isp->isp_osinfo.ehook_active) { 369 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 370 isp->isp_osinfo.ehook_active = 0; 371 } 372 if (isp->isp_osinfo.devq != NULL) { 373 cam_simq_free(isp->isp_osinfo.devq); 374 isp->isp_osinfo.devq = NULL; 375 } 376 return (0); 377 } 378 379 static void 380 isp_freeze_loopdown(ispsoftc_t *isp, int chan, char *msg) 381 { 382 if (IS_FC(isp)) { 383 struct isp_fc *fc = ISP_FC_PC(isp, chan); 384 if (fc->simqfrozen == 0) { 385 isp_prt(isp, ISP_LOGDEBUG0, 386 "Chan %d %s -- freeze simq (loopdown)", chan, msg); 387 fc->simqfrozen = SIMQFRZ_LOOPDOWN; 388 #if __FreeBSD_version >= 1000039 389 xpt_hold_boot(); 390 #endif 391 xpt_freeze_simq(fc->sim, 1); 392 } else { 393 isp_prt(isp, ISP_LOGDEBUG0, 394 "Chan %d %s -- mark frozen (loopdown)", chan, msg); 395 fc->simqfrozen |= SIMQFRZ_LOOPDOWN; 396 } 397 } 398 } 399 400 static void 401 isp_unfreeze_loopdown(ispsoftc_t *isp, int chan) 402 { 403 if (IS_FC(isp)) { 404 struct isp_fc *fc = ISP_FC_PC(isp, chan); 405 int wasfrozen = fc->simqfrozen & SIMQFRZ_LOOPDOWN; 406 fc->simqfrozen &= ~SIMQFRZ_LOOPDOWN; 407 if (wasfrozen && fc->simqfrozen == 0) { 408 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "%s: Chan %d releasing simq", __func__, chan); 409 xpt_release_simq(fc->sim, 1); 410 #if __FreeBSD_version >= 1000039 411 xpt_release_boot(); 412 #endif 413 } 414 } 415 } 416 417 418 static int 419 ispioctl(struct cdev *dev, u_long c, caddr_t addr, int flags, struct thread *td) 420 { 421 ispsoftc_t *isp; 422 int nr, chan, retval = ENOTTY; 423 424 isp = dev->si_drv1; 425 426 switch (c) { 427 case ISP_SDBLEV: 428 { 429 int olddblev = isp->isp_dblev; 430 isp->isp_dblev = *(int *)addr; 431 *(int *)addr = olddblev; 432 retval = 0; 433 break; 434 } 435 case ISP_GETROLE: 436 chan = *(int *)addr; 437 if (chan < 0 || chan >= isp->isp_nchan) { 438 retval = -ENXIO; 439 break; 440 } 441 if (IS_FC(isp)) { 442 *(int *)addr = FCPARAM(isp, chan)->role; 443 } else { 444 *(int *)addr = SDPARAM(isp, chan)->role; 445 } 446 retval = 0; 447 break; 448 case ISP_SETROLE: 449 nr = *(int *)addr; 450 chan = nr >> 8; 451 if (chan < 0 || chan >= isp->isp_nchan) { 452 retval = -ENXIO; 453 break; 454 } 455 nr &= 0xff; 456 if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) { 457 retval = EINVAL; 458 break; 459 } 460 ISP_LOCK(isp); 461 if (IS_FC(isp)) 462 *(int *)addr = FCPARAM(isp, chan)->role; 463 else 464 *(int *)addr = SDPARAM(isp, chan)->role; 465 retval = isp_control(isp, ISPCTL_CHANGE_ROLE, chan, nr); 466 ISP_UNLOCK(isp); 467 retval = 0; 468 break; 469 470 case ISP_RESETHBA: 471 ISP_LOCK(isp); 472 isp_reinit(isp, 0); 473 ISP_UNLOCK(isp); 474 retval = 0; 475 break; 476 477 case ISP_RESCAN: 478 if (IS_FC(isp)) { 479 chan = *(int *)addr; 480 if (chan < 0 || chan >= isp->isp_nchan) { 481 retval = -ENXIO; 482 break; 483 } 484 ISP_LOCK(isp); 485 if (isp_fc_runstate(isp, chan, 5 * 1000000)) { 486 retval = EIO; 487 } else { 488 retval = 0; 489 } 490 ISP_UNLOCK(isp); 491 } 492 break; 493 494 case ISP_FC_LIP: 495 if (IS_FC(isp)) { 496 chan = *(int *)addr; 497 if (chan < 0 || chan >= isp->isp_nchan) { 498 retval = -ENXIO; 499 break; 500 } 501 ISP_LOCK(isp); 502 if (isp_control(isp, ISPCTL_SEND_LIP, chan)) { 503 retval = EIO; 504 } else { 505 retval = 0; 506 } 507 ISP_UNLOCK(isp); 508 } 509 break; 510 case ISP_FC_GETDINFO: 511 { 512 struct isp_fc_device *ifc = (struct isp_fc_device *) addr; 513 fcportdb_t *lp; 514 515 if (IS_SCSI(isp)) { 516 break; 517 } 518 if (ifc->loopid >= MAX_FC_TARG) { 519 retval = EINVAL; 520 break; 521 } 522 lp = &FCPARAM(isp, ifc->chan)->portdb[ifc->loopid]; 523 if (lp->state != FC_PORTDB_STATE_NIL) { 524 ifc->role = (lp->prli_word3 & SVC3_ROLE_MASK) >> SVC3_ROLE_SHIFT; 525 ifc->loopid = lp->handle; 526 ifc->portid = lp->portid; 527 ifc->node_wwn = lp->node_wwn; 528 ifc->port_wwn = lp->port_wwn; 529 retval = 0; 530 } else { 531 retval = ENODEV; 532 } 533 break; 534 } 535 case ISP_GET_STATS: 536 { 537 isp_stats_t *sp = (isp_stats_t *) addr; 538 539 ISP_MEMZERO(sp, sizeof (*sp)); 540 sp->isp_stat_version = ISP_STATS_VERSION; 541 sp->isp_type = isp->isp_type; 542 sp->isp_revision = isp->isp_revision; 543 ISP_LOCK(isp); 544 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt; 545 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus; 546 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc; 547 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync; 548 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt; 549 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt; 550 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater; 551 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater; 552 ISP_UNLOCK(isp); 553 retval = 0; 554 break; 555 } 556 case ISP_CLR_STATS: 557 ISP_LOCK(isp); 558 isp->isp_intcnt = 0; 559 isp->isp_intbogus = 0; 560 isp->isp_intmboxc = 0; 561 isp->isp_intoasync = 0; 562 isp->isp_rsltccmplt = 0; 563 isp->isp_fphccmplt = 0; 564 isp->isp_rscchiwater = 0; 565 isp->isp_fpcchiwater = 0; 566 ISP_UNLOCK(isp); 567 retval = 0; 568 break; 569 case ISP_FC_GETHINFO: 570 { 571 struct isp_hba_device *hba = (struct isp_hba_device *) addr; 572 int chan = hba->fc_channel; 573 574 if (chan < 0 || chan >= isp->isp_nchan) { 575 retval = ENXIO; 576 break; 577 } 578 hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev); 579 hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev); 580 hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev); 581 hba->fc_nchannels = isp->isp_nchan; 582 if (IS_FC(isp)) { 583 hba->fc_nports = MAX_FC_TARG; 584 hba->fc_speed = FCPARAM(isp, hba->fc_channel)->isp_gbspeed; 585 hba->fc_topology = FCPARAM(isp, chan)->isp_topo + 1; 586 hba->fc_loopid = FCPARAM(isp, chan)->isp_loopid; 587 hba->nvram_node_wwn = FCPARAM(isp, chan)->isp_wwnn_nvram; 588 hba->nvram_port_wwn = FCPARAM(isp, chan)->isp_wwpn_nvram; 589 hba->active_node_wwn = FCPARAM(isp, chan)->isp_wwnn; 590 hba->active_port_wwn = FCPARAM(isp, chan)->isp_wwpn; 591 } else { 592 hba->fc_nports = MAX_TARGETS; 593 hba->fc_speed = 0; 594 hba->fc_topology = 0; 595 hba->nvram_node_wwn = 0ull; 596 hba->nvram_port_wwn = 0ull; 597 hba->active_node_wwn = 0ull; 598 hba->active_port_wwn = 0ull; 599 } 600 retval = 0; 601 break; 602 } 603 case ISP_TSK_MGMT: 604 { 605 int needmarker; 606 struct isp_fc_tsk_mgmt *fct = (struct isp_fc_tsk_mgmt *) addr; 607 uint16_t loopid; 608 mbreg_t mbs; 609 610 if (IS_SCSI(isp)) { 611 break; 612 } 613 614 chan = fct->chan; 615 if (chan < 0 || chan >= isp->isp_nchan) { 616 retval = -ENXIO; 617 break; 618 } 619 620 needmarker = retval = 0; 621 loopid = fct->loopid; 622 ISP_LOCK(isp); 623 if (IS_24XX(isp)) { 624 uint8_t local[QENTRY_LEN]; 625 isp24xx_tmf_t *tmf; 626 isp24xx_statusreq_t *sp; 627 fcparam *fcp = FCPARAM(isp, chan); 628 fcportdb_t *lp; 629 int i; 630 631 for (i = 0; i < MAX_FC_TARG; i++) { 632 lp = &fcp->portdb[i]; 633 if (lp->handle == loopid) { 634 break; 635 } 636 } 637 if (i == MAX_FC_TARG) { 638 retval = ENXIO; 639 ISP_UNLOCK(isp); 640 break; 641 } 642 /* XXX VALIDATE LP XXX */ 643 tmf = (isp24xx_tmf_t *) local; 644 ISP_MEMZERO(tmf, QENTRY_LEN); 645 tmf->tmf_header.rqs_entry_type = RQSTYPE_TSK_MGMT; 646 tmf->tmf_header.rqs_entry_count = 1; 647 tmf->tmf_nphdl = lp->handle; 648 tmf->tmf_delay = 2; 649 tmf->tmf_timeout = 2; 650 tmf->tmf_tidlo = lp->portid; 651 tmf->tmf_tidhi = lp->portid >> 16; 652 tmf->tmf_vpidx = ISP_GET_VPIDX(isp, chan); 653 tmf->tmf_lun[1] = fct->lun & 0xff; 654 if (fct->lun >= 256) { 655 tmf->tmf_lun[0] = 0x40 | (fct->lun >> 8); 656 } 657 switch (fct->action) { 658 case IPT_CLEAR_ACA: 659 tmf->tmf_flags = ISP24XX_TMF_CLEAR_ACA; 660 break; 661 case IPT_TARGET_RESET: 662 tmf->tmf_flags = ISP24XX_TMF_TARGET_RESET; 663 needmarker = 1; 664 break; 665 case IPT_LUN_RESET: 666 tmf->tmf_flags = ISP24XX_TMF_LUN_RESET; 667 needmarker = 1; 668 break; 669 case IPT_CLEAR_TASK_SET: 670 tmf->tmf_flags = ISP24XX_TMF_CLEAR_TASK_SET; 671 needmarker = 1; 672 break; 673 case IPT_ABORT_TASK_SET: 674 tmf->tmf_flags = ISP24XX_TMF_ABORT_TASK_SET; 675 needmarker = 1; 676 break; 677 default: 678 retval = EINVAL; 679 break; 680 } 681 if (retval) { 682 ISP_UNLOCK(isp); 683 break; 684 } 685 MBSINIT(&mbs, MBOX_EXEC_COMMAND_IOCB_A64, MBLOGALL, 5000000); 686 mbs.param[1] = QENTRY_LEN; 687 mbs.param[2] = DMA_WD1(fcp->isp_scdma); 688 mbs.param[3] = DMA_WD0(fcp->isp_scdma); 689 mbs.param[6] = DMA_WD3(fcp->isp_scdma); 690 mbs.param[7] = DMA_WD2(fcp->isp_scdma); 691 692 if (FC_SCRATCH_ACQUIRE(isp, chan)) { 693 ISP_UNLOCK(isp); 694 retval = ENOMEM; 695 break; 696 } 697 isp_put_24xx_tmf(isp, tmf, fcp->isp_scratch); 698 MEMORYBARRIER(isp, SYNC_SFORDEV, 0, QENTRY_LEN, chan); 699 sp = (isp24xx_statusreq_t *) local; 700 sp->req_completion_status = 1; 701 retval = isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs); 702 MEMORYBARRIER(isp, SYNC_SFORCPU, QENTRY_LEN, QENTRY_LEN, chan); 703 isp_get_24xx_response(isp, &((isp24xx_statusreq_t *)fcp->isp_scratch)[1], sp); 704 FC_SCRATCH_RELEASE(isp, chan); 705 if (retval || sp->req_completion_status != 0) { 706 FC_SCRATCH_RELEASE(isp, chan); 707 retval = EIO; 708 } 709 if (retval == 0) { 710 if (needmarker) { 711 fcp->sendmarker = 1; 712 } 713 } 714 } else { 715 MBSINIT(&mbs, 0, MBLOGALL, 0); 716 if (ISP_CAP_2KLOGIN(isp) == 0) { 717 loopid <<= 8; 718 } 719 switch (fct->action) { 720 case IPT_CLEAR_ACA: 721 mbs.param[0] = MBOX_CLEAR_ACA; 722 mbs.param[1] = loopid; 723 mbs.param[2] = fct->lun; 724 break; 725 case IPT_TARGET_RESET: 726 mbs.param[0] = MBOX_TARGET_RESET; 727 mbs.param[1] = loopid; 728 needmarker = 1; 729 break; 730 case IPT_LUN_RESET: 731 mbs.param[0] = MBOX_LUN_RESET; 732 mbs.param[1] = loopid; 733 mbs.param[2] = fct->lun; 734 needmarker = 1; 735 break; 736 case IPT_CLEAR_TASK_SET: 737 mbs.param[0] = MBOX_CLEAR_TASK_SET; 738 mbs.param[1] = loopid; 739 mbs.param[2] = fct->lun; 740 needmarker = 1; 741 break; 742 case IPT_ABORT_TASK_SET: 743 mbs.param[0] = MBOX_ABORT_TASK_SET; 744 mbs.param[1] = loopid; 745 mbs.param[2] = fct->lun; 746 needmarker = 1; 747 break; 748 default: 749 retval = EINVAL; 750 break; 751 } 752 if (retval == 0) { 753 if (needmarker) { 754 FCPARAM(isp, chan)->sendmarker = 1; 755 } 756 retval = isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs); 757 if (retval) { 758 retval = EIO; 759 } 760 } 761 } 762 ISP_UNLOCK(isp); 763 break; 764 } 765 default: 766 break; 767 } 768 return (retval); 769 } 770 771 static void 772 isp_intr_enable(void *arg) 773 { 774 int chan; 775 ispsoftc_t *isp = arg; 776 ISP_LOCK(isp); 777 for (chan = 0; chan < isp->isp_nchan; chan++) { 778 if (IS_FC(isp)) { 779 if (FCPARAM(isp, chan)->role != ISP_ROLE_NONE) { 780 ISP_ENABLE_INTS(isp); 781 break; 782 } 783 } else { 784 if (SDPARAM(isp, chan)->role != ISP_ROLE_NONE) { 785 ISP_ENABLE_INTS(isp); 786 break; 787 } 788 } 789 } 790 isp->isp_osinfo.ehook_active = 0; 791 ISP_UNLOCK(isp); 792 /* Release our hook so that the boot can continue. */ 793 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 794 } 795 796 /* 797 * Local Inlines 798 */ 799 800 static ISP_INLINE int isp_get_pcmd(ispsoftc_t *, union ccb *); 801 static ISP_INLINE void isp_free_pcmd(ispsoftc_t *, union ccb *); 802 803 static ISP_INLINE int 804 isp_get_pcmd(ispsoftc_t *isp, union ccb *ccb) 805 { 806 ISP_PCMD(ccb) = isp->isp_osinfo.pcmd_free; 807 if (ISP_PCMD(ccb) == NULL) { 808 return (-1); 809 } 810 isp->isp_osinfo.pcmd_free = ((struct isp_pcmd *)ISP_PCMD(ccb))->next; 811 return (0); 812 } 813 814 static ISP_INLINE void 815 isp_free_pcmd(ispsoftc_t *isp, union ccb *ccb) 816 { 817 if (ISP_PCMD(ccb)) { 818 #ifdef ISP_TARGET_MODE 819 PISP_PCMD(ccb)->datalen = 0; 820 PISP_PCMD(ccb)->totslen = 0; 821 PISP_PCMD(ccb)->cumslen = 0; 822 PISP_PCMD(ccb)->crn = 0; 823 #endif 824 PISP_PCMD(ccb)->next = isp->isp_osinfo.pcmd_free; 825 isp->isp_osinfo.pcmd_free = ISP_PCMD(ccb); 826 ISP_PCMD(ccb) = NULL; 827 } 828 } 829 830 /* 831 * Put the target mode functions here, because some are inlines 832 */ 833 #ifdef ISP_TARGET_MODE 834 static ISP_INLINE void isp_tmlock(ispsoftc_t *, const char *); 835 static ISP_INLINE void isp_tmunlk(ispsoftc_t *); 836 static ISP_INLINE int is_any_lun_enabled(ispsoftc_t *, int); 837 static ISP_INLINE int is_lun_enabled(ispsoftc_t *, int, lun_id_t); 838 static ISP_INLINE tstate_t *get_lun_statep(ispsoftc_t *, int, lun_id_t); 839 static ISP_INLINE tstate_t *get_lun_statep_from_tag(ispsoftc_t *, int, uint32_t); 840 static ISP_INLINE void rls_lun_statep(ispsoftc_t *, tstate_t *); 841 static ISP_INLINE inot_private_data_t *get_ntp_from_tagdata(ispsoftc_t *, uint32_t, uint32_t, tstate_t **); 842 static ISP_INLINE atio_private_data_t *isp_get_atpd(ispsoftc_t *, tstate_t *, uint32_t); 843 static ISP_INLINE atio_private_data_t *isp_find_atpd(ispsoftc_t *, tstate_t *, uint32_t); 844 static ISP_INLINE void isp_put_atpd(ispsoftc_t *, tstate_t *, atio_private_data_t *); 845 static ISP_INLINE inot_private_data_t *isp_get_ntpd(ispsoftc_t *, tstate_t *); 846 static ISP_INLINE inot_private_data_t *isp_find_ntpd(ispsoftc_t *, tstate_t *, uint32_t, uint32_t); 847 static ISP_INLINE void isp_put_ntpd(ispsoftc_t *, tstate_t *, inot_private_data_t *); 848 static cam_status create_lun_state(ispsoftc_t *, int, struct cam_path *, tstate_t **); 849 static void destroy_lun_state(ispsoftc_t *, tstate_t *); 850 static void isp_enable_lun(ispsoftc_t *, union ccb *); 851 static cam_status isp_enable_deferred_luns(ispsoftc_t *, int); 852 static cam_status isp_enable_deferred(ispsoftc_t *, int, lun_id_t); 853 static void isp_disable_lun(ispsoftc_t *, union ccb *); 854 static int isp_enable_target_mode(ispsoftc_t *, int); 855 static int isp_disable_target_mode(ispsoftc_t *, int); 856 static void isp_ledone(ispsoftc_t *, lun_entry_t *); 857 static timeout_t isp_refire_putback_atio; 858 static timeout_t isp_refire_notify_ack; 859 static void isp_complete_ctio(union ccb *); 860 static void isp_target_putback_atio(union ccb *); 861 enum Start_Ctio_How { FROM_CAM, FROM_TIMER, FROM_SRR, FROM_CTIO_DONE }; 862 static void isp_target_start_ctio(ispsoftc_t *, union ccb *, enum Start_Ctio_How); 863 static void isp_handle_platform_atio(ispsoftc_t *, at_entry_t *); 864 static void isp_handle_platform_atio2(ispsoftc_t *, at2_entry_t *); 865 static void isp_handle_platform_atio7(ispsoftc_t *, at7_entry_t *); 866 static void isp_handle_platform_ctio(ispsoftc_t *, void *); 867 static void isp_handle_platform_notify_scsi(ispsoftc_t *, in_entry_t *); 868 static void isp_handle_platform_notify_fc(ispsoftc_t *, in_fcentry_t *); 869 static void isp_handle_platform_notify_24xx(ispsoftc_t *, in_fcentry_24xx_t *); 870 static int isp_handle_platform_target_notify_ack(ispsoftc_t *, isp_notify_t *); 871 static void isp_handle_platform_target_tmf(ispsoftc_t *, isp_notify_t *); 872 static void isp_target_mark_aborted(ispsoftc_t *, union ccb *); 873 static void isp_target_mark_aborted_early(ispsoftc_t *, tstate_t *, uint32_t); 874 875 static ISP_INLINE void 876 isp_tmlock(ispsoftc_t *isp, const char *msg) 877 { 878 while (isp->isp_osinfo.tmbusy) { 879 isp->isp_osinfo.tmwanted = 1; 880 mtx_sleep(isp, &isp->isp_lock, PRIBIO, msg, 0); 881 } 882 isp->isp_osinfo.tmbusy = 1; 883 } 884 885 static ISP_INLINE void 886 isp_tmunlk(ispsoftc_t *isp) 887 { 888 isp->isp_osinfo.tmbusy = 0; 889 if (isp->isp_osinfo.tmwanted) { 890 isp->isp_osinfo.tmwanted = 0; 891 wakeup(isp); 892 } 893 } 894 895 static ISP_INLINE int 896 is_any_lun_enabled(ispsoftc_t *isp, int bus) 897 { 898 struct tslist *lhp; 899 int i; 900 901 for (i = 0; i < LUN_HASH_SIZE; i++) { 902 ISP_GET_PC_ADDR(isp, bus, lun_hash[i], lhp); 903 if (SLIST_FIRST(lhp)) 904 return (1); 905 } 906 return (0); 907 } 908 909 static ISP_INLINE int 910 is_lun_enabled(ispsoftc_t *isp, int bus, lun_id_t lun) 911 { 912 tstate_t *tptr; 913 struct tslist *lhp; 914 915 ISP_GET_PC_ADDR(isp, bus, lun_hash[LUN_HASH_FUNC(lun)], lhp); 916 SLIST_FOREACH(tptr, lhp, next) { 917 if (tptr->ts_lun == lun) { 918 return (1); 919 } 920 } 921 return (0); 922 } 923 924 static void 925 dump_tstates(ispsoftc_t *isp, int bus) 926 { 927 int i, j; 928 struct tslist *lhp; 929 tstate_t *tptr = NULL; 930 931 if (bus >= isp->isp_nchan) { 932 return; 933 } 934 for (i = 0; i < LUN_HASH_SIZE; i++) { 935 ISP_GET_PC_ADDR(isp, bus, lun_hash[i], lhp); 936 j = 0; 937 SLIST_FOREACH(tptr, lhp, next) { 938 xpt_print(tptr->owner, "[%d, %d] atio_cnt=%d inot_cnt=%d\n", i, j, tptr->atio_count, tptr->inot_count); 939 j++; 940 } 941 } 942 } 943 944 static ISP_INLINE tstate_t * 945 get_lun_statep(ispsoftc_t *isp, int bus, lun_id_t lun) 946 { 947 tstate_t *tptr = NULL; 948 struct tslist *lhp; 949 950 if (bus < isp->isp_nchan) { 951 ISP_GET_PC_ADDR(isp, bus, lun_hash[LUN_HASH_FUNC(lun)], lhp); 952 SLIST_FOREACH(tptr, lhp, next) { 953 if (tptr->ts_lun == lun) { 954 tptr->hold++; 955 return (tptr); 956 } 957 } 958 } 959 return (NULL); 960 } 961 962 static ISP_INLINE tstate_t * 963 get_lun_statep_from_tag(ispsoftc_t *isp, int bus, uint32_t tagval) 964 { 965 tstate_t *tptr = NULL; 966 atio_private_data_t *atp; 967 struct tslist *lhp; 968 int i; 969 970 if (bus < isp->isp_nchan && tagval != 0) { 971 for (i = 0; i < LUN_HASH_SIZE; i++) { 972 ISP_GET_PC_ADDR(isp, bus, lun_hash[i], lhp); 973 SLIST_FOREACH(tptr, lhp, next) { 974 atp = isp_find_atpd(isp, tptr, tagval); 975 if (atp) { 976 tptr->hold++; 977 return (tptr); 978 } 979 } 980 } 981 } 982 return (NULL); 983 } 984 985 static ISP_INLINE inot_private_data_t * 986 get_ntp_from_tagdata(ispsoftc_t *isp, uint32_t tag_id, uint32_t seq_id, tstate_t **rslt) 987 { 988 inot_private_data_t *ntp; 989 tstate_t *tptr; 990 struct tslist *lhp; 991 int bus, i; 992 993 for (bus = 0; bus < isp->isp_nchan; bus++) { 994 for (i = 0; i < LUN_HASH_SIZE; i++) { 995 ISP_GET_PC_ADDR(isp, bus, lun_hash[i], lhp); 996 SLIST_FOREACH(tptr, lhp, next) { 997 ntp = isp_find_ntpd(isp, tptr, tag_id, seq_id); 998 if (ntp) { 999 *rslt = tptr; 1000 tptr->hold++; 1001 return (ntp); 1002 } 1003 } 1004 } 1005 } 1006 return (NULL); 1007 } 1008 1009 static ISP_INLINE void 1010 rls_lun_statep(ispsoftc_t *isp, tstate_t *tptr) 1011 { 1012 KASSERT((tptr->hold), ("tptr not held")); 1013 tptr->hold--; 1014 } 1015 1016 static void 1017 isp_tmcmd_restart(ispsoftc_t *isp) 1018 { 1019 inot_private_data_t *ntp; 1020 inot_private_data_t *restart_queue; 1021 tstate_t *tptr; 1022 union ccb *ccb; 1023 struct tslist *lhp; 1024 int bus, i; 1025 1026 for (bus = 0; bus < isp->isp_nchan; bus++) { 1027 for (i = 0; i < LUN_HASH_SIZE; i++) { 1028 ISP_GET_PC_ADDR(isp, bus, lun_hash[i], lhp); 1029 SLIST_FOREACH(tptr, lhp, next) { 1030 if ((restart_queue = tptr->restart_queue) != NULL) 1031 tptr->restart_queue = NULL; 1032 while (restart_queue) { 1033 ntp = restart_queue; 1034 restart_queue = ntp->rd.nt.nt_hba; 1035 if (IS_24XX(isp)) { 1036 isp_prt(isp, ISP_LOGTDEBUG0, "%s: restarting resrc deprived %x", __func__, ((at7_entry_t *)ntp->rd.data)->at_rxid); 1037 isp_handle_platform_atio7(isp, (at7_entry_t *) ntp->rd.data); 1038 } else { 1039 isp_prt(isp, ISP_LOGTDEBUG0, "%s: restarting resrc deprived %x", __func__, ((at2_entry_t *)ntp->rd.data)->at_rxid); 1040 isp_handle_platform_atio2(isp, (at2_entry_t *) ntp->rd.data); 1041 } 1042 isp_put_ntpd(isp, tptr, ntp); 1043 if (tptr->restart_queue && restart_queue != NULL) { 1044 ntp = tptr->restart_queue; 1045 tptr->restart_queue = restart_queue; 1046 while (restart_queue->rd.nt.nt_hba) { 1047 restart_queue = restart_queue->rd.nt.nt_hba; 1048 } 1049 restart_queue->rd.nt.nt_hba = ntp; 1050 break; 1051 } 1052 } 1053 /* 1054 * We only need to do this once per tptr 1055 */ 1056 if (!TAILQ_EMPTY(&tptr->waitq)) { 1057 ccb = (union ccb *)TAILQ_LAST(&tptr->waitq, isp_ccbq); 1058 TAILQ_REMOVE(&tptr->waitq, &ccb->ccb_h, periph_links.tqe); 1059 isp_target_start_ctio(isp, ccb, FROM_TIMER); 1060 } 1061 } 1062 } 1063 } 1064 } 1065 1066 static ISP_INLINE atio_private_data_t * 1067 isp_get_atpd(ispsoftc_t *isp, tstate_t *tptr, uint32_t tag) 1068 { 1069 atio_private_data_t *atp; 1070 1071 atp = LIST_FIRST(&tptr->atfree); 1072 if (atp) { 1073 LIST_REMOVE(atp, next); 1074 atp->tag = tag; 1075 LIST_INSERT_HEAD(&tptr->atused[ATPDPHASH(tag)], atp, next); 1076 } 1077 return (atp); 1078 } 1079 1080 static ISP_INLINE atio_private_data_t * 1081 isp_find_atpd(ispsoftc_t *isp, tstate_t *tptr, uint32_t tag) 1082 { 1083 atio_private_data_t *atp; 1084 1085 LIST_FOREACH(atp, &tptr->atused[ATPDPHASH(tag)], next) { 1086 if (atp->tag == tag) 1087 return (atp); 1088 } 1089 return (NULL); 1090 } 1091 1092 static ISP_INLINE void 1093 isp_put_atpd(ispsoftc_t *isp, tstate_t *tptr, atio_private_data_t *atp) 1094 { 1095 if (atp->ests) { 1096 isp_put_ecmd(isp, atp->ests); 1097 } 1098 LIST_REMOVE(atp, next); 1099 memset(atp, 0, sizeof (*atp)); 1100 LIST_INSERT_HEAD(&tptr->atfree, atp, next); 1101 } 1102 1103 static void 1104 isp_dump_atpd(ispsoftc_t *isp, tstate_t *tptr) 1105 { 1106 atio_private_data_t *atp; 1107 const char *states[8] = { "Free", "ATIO", "CAM", "CTIO", "LAST_CTIO", "PDON", "?6", "7" }; 1108 1109 for (atp = tptr->atpool; atp < &tptr->atpool[ATPDPSIZE]; atp++) { 1110 xpt_print(tptr->owner, "ATP: [0x%x] origdlen %u bytes_xfrd %u lun %u nphdl 0x%04x s_id 0x%06x d_id 0x%06x oxid 0x%04x state %s\n", 1111 atp->tag, atp->orig_datalen, atp->bytes_xfered, atp->lun, atp->nphdl, atp->sid, atp->portid, atp->oxid, states[atp->state & 0x7]); 1112 } 1113 } 1114 1115 1116 static ISP_INLINE inot_private_data_t * 1117 isp_get_ntpd(ispsoftc_t *isp, tstate_t *tptr) 1118 { 1119 inot_private_data_t *ntp; 1120 ntp = tptr->ntfree; 1121 if (ntp) { 1122 tptr->ntfree = ntp->next; 1123 } 1124 return (ntp); 1125 } 1126 1127 static ISP_INLINE inot_private_data_t * 1128 isp_find_ntpd(ispsoftc_t *isp, tstate_t *tptr, uint32_t tag_id, uint32_t seq_id) 1129 { 1130 inot_private_data_t *ntp; 1131 for (ntp = tptr->ntpool; ntp < &tptr->ntpool[ATPDPSIZE]; ntp++) { 1132 if (ntp->rd.tag_id == tag_id && ntp->rd.seq_id == seq_id) { 1133 return (ntp); 1134 } 1135 } 1136 return (NULL); 1137 } 1138 1139 static ISP_INLINE void 1140 isp_put_ntpd(ispsoftc_t *isp, tstate_t *tptr, inot_private_data_t *ntp) 1141 { 1142 ntp->rd.tag_id = ntp->rd.seq_id = 0; 1143 ntp->next = tptr->ntfree; 1144 tptr->ntfree = ntp; 1145 } 1146 1147 static cam_status 1148 create_lun_state(ispsoftc_t *isp, int bus, struct cam_path *path, tstate_t **rslt) 1149 { 1150 cam_status status; 1151 lun_id_t lun; 1152 struct tslist *lhp; 1153 tstate_t *tptr; 1154 int i; 1155 1156 lun = xpt_path_lun_id(path); 1157 if (lun != CAM_LUN_WILDCARD) { 1158 if (ISP_MAX_LUNS(isp) > 0 && lun >= ISP_MAX_LUNS(isp)) { 1159 return (CAM_LUN_INVALID); 1160 } 1161 } 1162 if (is_lun_enabled(isp, bus, lun)) { 1163 return (CAM_LUN_ALRDY_ENA); 1164 } 1165 tptr = malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO); 1166 if (tptr == NULL) { 1167 return (CAM_RESRC_UNAVAIL); 1168 } 1169 tptr->ts_lun = lun; 1170 status = xpt_create_path(&tptr->owner, NULL, xpt_path_path_id(path), xpt_path_target_id(path), lun); 1171 if (status != CAM_REQ_CMP) { 1172 free(tptr, M_DEVBUF); 1173 return (status); 1174 } 1175 SLIST_INIT(&tptr->atios); 1176 SLIST_INIT(&tptr->inots); 1177 TAILQ_INIT(&tptr->waitq); 1178 LIST_INIT(&tptr->atfree); 1179 for (i = ATPDPSIZE-1; i >= 0; i--) 1180 LIST_INSERT_HEAD(&tptr->atfree, &tptr->atpool[i], next); 1181 for (i = 0; i < ATPDPHASHSIZE; i++) 1182 LIST_INIT(&tptr->atused[i]); 1183 for (i = 0; i < ATPDPSIZE-1; i++) 1184 tptr->ntpool[i].next = &tptr->ntpool[i+1]; 1185 tptr->ntfree = tptr->ntpool; 1186 tptr->hold = 1; 1187 ISP_GET_PC_ADDR(isp, bus, lun_hash[LUN_HASH_FUNC(lun)], lhp); 1188 SLIST_INSERT_HEAD(lhp, tptr, next); 1189 *rslt = tptr; 1190 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, path, "created tstate\n"); 1191 return (CAM_REQ_CMP); 1192 } 1193 1194 static ISP_INLINE void 1195 destroy_lun_state(ispsoftc_t *isp, tstate_t *tptr) 1196 { 1197 union ccb *ccb; 1198 struct tslist *lhp; 1199 1200 KASSERT((tptr->hold != 0), ("tptr is not held")); 1201 KASSERT((tptr->hold == 1), ("tptr still held (%d)", tptr->hold)); 1202 do { 1203 ccb = (union ccb *)SLIST_FIRST(&tptr->atios); 1204 if (ccb) { 1205 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1206 ccb->ccb_h.status = CAM_REQ_ABORTED; 1207 xpt_done(ccb); 1208 } 1209 } while (ccb); 1210 do { 1211 ccb = (union ccb *)SLIST_FIRST(&tptr->inots); 1212 if (ccb) { 1213 SLIST_REMOVE_HEAD(&tptr->inots, sim_links.sle); 1214 ccb->ccb_h.status = CAM_REQ_ABORTED; 1215 xpt_done(ccb); 1216 } 1217 } while (ccb); 1218 ISP_GET_PC_ADDR(isp, cam_sim_bus(xpt_path_sim(tptr->owner)), lun_hash[LUN_HASH_FUNC(tptr->ts_lun)], lhp); 1219 SLIST_REMOVE(lhp, tptr, tstate, next); 1220 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, tptr->owner, "destroyed tstate\n"); 1221 xpt_free_path(tptr->owner); 1222 free(tptr, M_DEVBUF); 1223 } 1224 1225 /* 1226 * Enable a lun. 1227 */ 1228 static void 1229 isp_enable_lun(ispsoftc_t *isp, union ccb *ccb) 1230 { 1231 tstate_t *tptr = NULL; 1232 int bus, tm_enabled, target_role; 1233 target_id_t target; 1234 lun_id_t lun; 1235 1236 1237 /* 1238 * We only support either a wildcard target/lun or a target ID of zero and a non-wildcard lun 1239 */ 1240 bus = XS_CHANNEL(ccb); 1241 target = ccb->ccb_h.target_id; 1242 lun = ccb->ccb_h.target_lun; 1243 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0|ISP_LOGCONFIG, ccb->ccb_h.path, 1244 "enabling lun %jx\n", (uintmax_t)lun); 1245 if (target == CAM_TARGET_WILDCARD && lun != CAM_LUN_WILDCARD) { 1246 ccb->ccb_h.status = CAM_LUN_INVALID; 1247 xpt_done(ccb); 1248 return; 1249 } 1250 1251 if (target != CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { 1252 ccb->ccb_h.status = CAM_LUN_INVALID; 1253 xpt_done(ccb); 1254 return; 1255 } 1256 if (isp->isp_dblev & ISP_LOGTDEBUG0) { 1257 xpt_print(ccb->ccb_h.path, 1258 "enabling lun 0x%jx on channel %d\n", (uintmax_t)lun, bus); 1259 } 1260 1261 /* 1262 * Wait until we're not busy with the lun enables subsystem 1263 */ 1264 isp_tmlock(isp, "isp_enable_lun"); 1265 1266 /* 1267 * This is as a good a place as any to check f/w capabilities. 1268 */ 1269 1270 if (IS_FC(isp)) { 1271 if (ISP_CAP_TMODE(isp) == 0) { 1272 xpt_print(ccb->ccb_h.path, "firmware does not support target mode\n"); 1273 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 1274 goto done; 1275 } 1276 /* 1277 * We *could* handle non-SCCLUN f/w, but we'd have to 1278 * dork with our already fragile enable/disable code. 1279 */ 1280 if (ISP_CAP_SCCFW(isp) == 0) { 1281 xpt_print(ccb->ccb_h.path, "firmware not SCCLUN capable\n"); 1282 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 1283 goto done; 1284 } 1285 1286 target_role = (FCPARAM(isp, bus)->role & ISP_ROLE_TARGET) != 0; 1287 1288 } else { 1289 target_role = (SDPARAM(isp, bus)->role & ISP_ROLE_TARGET) != 0; 1290 } 1291 1292 /* 1293 * Create the state pointer. 1294 * It should not already exist. 1295 */ 1296 tptr = get_lun_statep(isp, bus, lun); 1297 if (tptr) { 1298 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 1299 goto done; 1300 } 1301 ccb->ccb_h.status = create_lun_state(isp, bus, ccb->ccb_h.path, &tptr); 1302 if (ccb->ccb_h.status != CAM_REQ_CMP) { 1303 goto done; 1304 } 1305 1306 /* 1307 * We have a tricky maneuver to perform here. 1308 * 1309 * If target mode isn't already enabled here, 1310 * *and* our current role includes target mode, 1311 * we enable target mode here. 1312 * 1313 */ 1314 ISP_GET_PC(isp, bus, tm_enabled, tm_enabled); 1315 if (tm_enabled == 0 && target_role != 0) { 1316 if (isp_enable_target_mode(isp, bus)) { 1317 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1318 destroy_lun_state(isp, tptr); 1319 tptr = NULL; 1320 goto done; 1321 } 1322 tm_enabled = 1; 1323 } 1324 1325 /* 1326 * Now check to see whether this bus is in target mode already. 1327 * 1328 * If not, a later role change into target mode will finish the job. 1329 */ 1330 if (tm_enabled == 0) { 1331 ISP_SET_PC(isp, bus, tm_enable_defer, 1); 1332 ccb->ccb_h.status = CAM_REQ_CMP; 1333 xpt_print(ccb->ccb_h.path, "Target Mode not enabled yet- lun enable deferred\n"); 1334 goto done1; 1335 } 1336 1337 /* 1338 * Enable the lun. 1339 */ 1340 ccb->ccb_h.status = isp_enable_deferred(isp, bus, lun); 1341 1342 done: 1343 if (ccb->ccb_h.status != CAM_REQ_CMP) { 1344 if (tptr) { 1345 destroy_lun_state(isp, tptr); 1346 tptr = NULL; 1347 } 1348 } else { 1349 tptr->enabled = 1; 1350 } 1351 done1: 1352 if (tptr) { 1353 rls_lun_statep(isp, tptr); 1354 } 1355 1356 /* 1357 * And we're outta here.... 1358 */ 1359 isp_tmunlk(isp); 1360 xpt_done(ccb); 1361 } 1362 1363 static cam_status 1364 isp_enable_deferred_luns(ispsoftc_t *isp, int bus) 1365 { 1366 tstate_t *tptr = NULL; 1367 struct tslist *lhp; 1368 int i, n; 1369 1370 1371 ISP_GET_PC(isp, bus, tm_enabled, i); 1372 if (i == 1) { 1373 return (CAM_REQ_CMP); 1374 } 1375 ISP_GET_PC(isp, bus, tm_enable_defer, i); 1376 if (i == 0) { 1377 return (CAM_REQ_CMP); 1378 } 1379 /* 1380 * If this succeeds, it will set tm_enable 1381 */ 1382 if (isp_enable_target_mode(isp, bus)) { 1383 return (CAM_REQ_CMP_ERR); 1384 } 1385 isp_tmlock(isp, "isp_enable_deferred_luns"); 1386 for (n = i = 0; i < LUN_HASH_SIZE; i++) { 1387 ISP_GET_PC_ADDR(isp, bus, lun_hash[i], lhp); 1388 SLIST_FOREACH(tptr, lhp, next) { 1389 tptr->hold++; 1390 if (tptr->enabled == 0) { 1391 if (isp_enable_deferred(isp, bus, tptr->ts_lun) == CAM_REQ_CMP) { 1392 tptr->enabled = 1; 1393 n++; 1394 } 1395 } else { 1396 n++; 1397 } 1398 tptr->hold--; 1399 } 1400 } 1401 isp_tmunlk(isp); 1402 if (n == 0) { 1403 return (CAM_REQ_CMP_ERR); 1404 } 1405 ISP_SET_PC(isp, bus, tm_enable_defer, 0); 1406 return (CAM_REQ_CMP); 1407 } 1408 1409 static cam_status 1410 isp_enable_deferred(ispsoftc_t *isp, int bus, lun_id_t lun) 1411 { 1412 cam_status status; 1413 int luns_already_enabled; 1414 1415 ISP_GET_PC(isp, bus, tm_luns_enabled, luns_already_enabled); 1416 isp_prt(isp, ISP_LOGTINFO, "%s: bus %d lun %jx luns_enabled %d", __func__, bus, (uintmax_t)lun, luns_already_enabled); 1417 if (IS_24XX(isp) || (IS_FC(isp) && luns_already_enabled)) { 1418 status = CAM_REQ_CMP; 1419 } else { 1420 int cmd_cnt, not_cnt; 1421 1422 if (IS_23XX(isp)) { 1423 cmd_cnt = DFLT_CMND_CNT; 1424 not_cnt = DFLT_INOT_CNT; 1425 } else { 1426 cmd_cnt = 64; 1427 not_cnt = 8; 1428 } 1429 status = CAM_REQ_INPROG; 1430 isp->isp_osinfo.rptr = &status; 1431 if (isp_lun_cmd(isp, RQSTYPE_ENABLE_LUN, bus, lun == CAM_LUN_WILDCARD? 0 : lun, cmd_cnt, not_cnt)) { 1432 status = CAM_RESRC_UNAVAIL; 1433 } else { 1434 mtx_sleep(&status, &isp->isp_lock, PRIBIO, "isp_enable_deferred", 0); 1435 } 1436 isp->isp_osinfo.rptr = NULL; 1437 } 1438 if (status == CAM_REQ_CMP) { 1439 ISP_SET_PC(isp, bus, tm_luns_enabled, 1); 1440 isp_prt(isp, ISP_LOGCONFIG|ISP_LOGTINFO, "bus %d lun %jx now enabled for target mode", bus, (uintmax_t)lun); 1441 } 1442 return (status); 1443 } 1444 1445 static void 1446 isp_disable_lun(ispsoftc_t *isp, union ccb *ccb) 1447 { 1448 tstate_t *tptr = NULL; 1449 int bus; 1450 cam_status status; 1451 target_id_t target; 1452 lun_id_t lun; 1453 1454 bus = XS_CHANNEL(ccb); 1455 target = ccb->ccb_h.target_id; 1456 lun = ccb->ccb_h.target_lun; 1457 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0|ISP_LOGCONFIG, ccb->ccb_h.path, 1458 "disabling lun %jx\n", (uintmax_t)lun); 1459 if (target == CAM_TARGET_WILDCARD && lun != CAM_LUN_WILDCARD) { 1460 ccb->ccb_h.status = CAM_LUN_INVALID; 1461 xpt_done(ccb); 1462 return; 1463 } 1464 1465 if (target != CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { 1466 ccb->ccb_h.status = CAM_LUN_INVALID; 1467 xpt_done(ccb); 1468 return; 1469 } 1470 1471 /* 1472 * See if we're busy disabling a lun now. 1473 */ 1474 isp_tmlock(isp, "isp_disable_lun"); 1475 status = CAM_REQ_INPROG; 1476 1477 /* 1478 * Find the state pointer. 1479 */ 1480 if ((tptr = get_lun_statep(isp, bus, lun)) == NULL) { 1481 status = CAM_PATH_INVALID; 1482 goto done; 1483 } 1484 1485 /* 1486 * If we're a 24XX card, we're done. 1487 */ 1488 if (IS_24XX(isp)) { 1489 status = CAM_REQ_CMP; 1490 goto done; 1491 } 1492 1493 /* 1494 * For SCC FW, we only deal with lun zero. 1495 */ 1496 if (IS_FC(isp) && lun > 0) { 1497 status = CAM_REQ_CMP; 1498 goto done; 1499 } 1500 isp->isp_osinfo.rptr = &status; 1501 if (isp_lun_cmd(isp, RQSTYPE_ENABLE_LUN, bus, lun, 0, 0)) { 1502 status = CAM_RESRC_UNAVAIL; 1503 } else { 1504 mtx_sleep(ccb, &isp->isp_lock, PRIBIO, "isp_disable_lun", 0); 1505 } 1506 isp->isp_osinfo.rptr = NULL; 1507 done: 1508 if (status == CAM_REQ_CMP) { 1509 tptr->enabled = 0; 1510 if (is_any_lun_enabled(isp, bus) == 0) { 1511 if (isp_disable_target_mode(isp, bus)) { 1512 status = CAM_REQ_CMP_ERR; 1513 } 1514 } 1515 } 1516 ccb->ccb_h.status = status; 1517 if (status == CAM_REQ_CMP) { 1518 destroy_lun_state(isp, tptr); 1519 xpt_print(ccb->ccb_h.path, "lun now disabled for target mode\n"); 1520 } else { 1521 if (tptr) 1522 rls_lun_statep(isp, tptr); 1523 } 1524 isp_tmunlk(isp); 1525 xpt_done(ccb); 1526 } 1527 1528 static int 1529 isp_enable_target_mode(ispsoftc_t *isp, int bus) 1530 { 1531 int tm_enabled; 1532 1533 ISP_GET_PC(isp, bus, tm_enabled, tm_enabled); 1534 if (tm_enabled != 0) { 1535 return (0); 1536 } 1537 if (IS_SCSI(isp)) { 1538 mbreg_t mbs; 1539 MBSINIT(&mbs, MBOX_ENABLE_TARGET_MODE, MBLOGALL, 0); 1540 mbs.param[0] = MBOX_ENABLE_TARGET_MODE; 1541 mbs.param[1] = ENABLE_TARGET_FLAG|ENABLE_TQING_FLAG; 1542 mbs.param[2] = bus << 7; 1543 if (isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs) < 0 || mbs.param[0] != MBOX_COMMAND_COMPLETE) { 1544 isp_prt(isp, ISP_LOGERR, "Unable to enable Target Role on Bus %d", bus); 1545 return (EIO); 1546 } 1547 } 1548 ISP_SET_PC(isp, bus, tm_enabled, 1); 1549 isp_prt(isp, ISP_LOGINFO, "Target Role enabled on Bus %d", bus); 1550 return (0); 1551 } 1552 1553 static int 1554 isp_disable_target_mode(ispsoftc_t *isp, int bus) 1555 { 1556 int tm_enabled; 1557 1558 ISP_GET_PC(isp, bus, tm_enabled, tm_enabled); 1559 if (tm_enabled == 0) { 1560 return (0); 1561 } 1562 if (IS_SCSI(isp)) { 1563 mbreg_t mbs; 1564 MBSINIT(&mbs, MBOX_ENABLE_TARGET_MODE, MBLOGALL, 0); 1565 mbs.param[2] = bus << 7; 1566 if (isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs) < 0 || mbs.param[0] != MBOX_COMMAND_COMPLETE) { 1567 isp_prt(isp, ISP_LOGERR, "Unable to disable Target Role on Bus %d", bus); 1568 return (EIO); 1569 } 1570 } 1571 ISP_SET_PC(isp, bus, tm_enabled, 0); 1572 isp_prt(isp, ISP_LOGINFO, "Target Role disabled on Bus %d", bus); 1573 return (0); 1574 } 1575 1576 static void 1577 isp_ledone(ispsoftc_t *isp, lun_entry_t *lep) 1578 { 1579 uint32_t *rptr; 1580 1581 rptr = isp->isp_osinfo.rptr; 1582 if (lep->le_status != LUN_OK) { 1583 isp_prt(isp, ISP_LOGERR, "ENABLE/MODIFY LUN returned 0x%x", lep->le_status); 1584 if (rptr) { 1585 *rptr = CAM_REQ_CMP_ERR; 1586 wakeup_one(rptr); 1587 } 1588 } else { 1589 if (rptr) { 1590 *rptr = CAM_REQ_CMP; 1591 wakeup_one(rptr); 1592 } 1593 } 1594 } 1595 1596 static void 1597 isp_target_start_ctio(ispsoftc_t *isp, union ccb *ccb, enum Start_Ctio_How how) 1598 { 1599 int fctape, sendstatus, resid; 1600 tstate_t *tptr; 1601 fcparam *fcp; 1602 atio_private_data_t *atp; 1603 struct ccb_scsiio *cso; 1604 uint32_t dmaresult, handle, xfrlen, sense_length, tmp; 1605 uint8_t local[QENTRY_LEN]; 1606 1607 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), XS_LUN(ccb)); 1608 if (tptr == NULL) { 1609 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), CAM_LUN_WILDCARD); 1610 if (tptr == NULL) { 1611 isp_prt(isp, ISP_LOGERR, "%s: [0x%x] cannot find tstate pointer", __func__, ccb->csio.tag_id); 1612 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 1613 xpt_done(ccb); 1614 return; 1615 } 1616 } 1617 isp_prt(isp, ISP_LOGTDEBUG0, "%s: ENTRY[0x%x] how %u xfrlen %u sendstatus %d sense_len %u", __func__, ccb->csio.tag_id, how, ccb->csio.dxfer_len, 1618 (ccb->ccb_h.flags & CAM_SEND_STATUS) != 0, ((ccb->ccb_h.flags & CAM_SEND_SENSE)? ccb->csio.sense_len : 0)); 1619 1620 switch (how) { 1621 case FROM_TIMER: 1622 case FROM_CAM: 1623 /* 1624 * Insert at the tail of the list, if any, waiting CTIO CCBs 1625 */ 1626 TAILQ_INSERT_TAIL(&tptr->waitq, &ccb->ccb_h, periph_links.tqe); 1627 break; 1628 case FROM_SRR: 1629 case FROM_CTIO_DONE: 1630 TAILQ_INSERT_HEAD(&tptr->waitq, &ccb->ccb_h, periph_links.tqe); 1631 break; 1632 } 1633 1634 while (TAILQ_FIRST(&tptr->waitq) != NULL) { 1635 ccb = (union ccb *) TAILQ_FIRST(&tptr->waitq); 1636 TAILQ_REMOVE(&tptr->waitq, &ccb->ccb_h, periph_links.tqe); 1637 1638 cso = &ccb->csio; 1639 xfrlen = cso->dxfer_len; 1640 if (xfrlen == 0) { 1641 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { 1642 ISP_PATH_PRT(isp, ISP_LOGERR, ccb->ccb_h.path, "a data transfer length of zero but no status to send is wrong\n"); 1643 ccb->ccb_h.status = CAM_REQ_INVALID; 1644 xpt_done(ccb); 1645 continue; 1646 } 1647 } 1648 1649 atp = isp_find_atpd(isp, tptr, cso->tag_id); 1650 if (atp == NULL) { 1651 isp_prt(isp, ISP_LOGERR, "%s: [0x%x] cannot find private data adjunct in %s", __func__, cso->tag_id, __func__); 1652 isp_dump_atpd(isp, tptr); 1653 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1654 xpt_done(ccb); 1655 continue; 1656 } 1657 1658 /* 1659 * Is this command a dead duck? 1660 */ 1661 if (atp->dead) { 1662 isp_prt(isp, ISP_LOGERR, "%s: [0x%x] not sending a CTIO for a dead command", __func__, cso->tag_id); 1663 ccb->ccb_h.status = CAM_REQ_ABORTED; 1664 xpt_done(ccb); 1665 continue; 1666 } 1667 1668 /* 1669 * Check to make sure we're still in target mode. 1670 */ 1671 fcp = FCPARAM(isp, XS_CHANNEL(ccb)); 1672 if ((fcp->role & ISP_ROLE_TARGET) == 0) { 1673 isp_prt(isp, ISP_LOGERR, "%s: [0x%x] stopping sending a CTIO because we're no longer in target mode", __func__, cso->tag_id); 1674 ccb->ccb_h.status = CAM_PROVIDE_FAIL; 1675 xpt_done(ccb); 1676 continue; 1677 } 1678 1679 /* 1680 * We're only handling ATPD_CCB_OUTSTANDING outstanding CCB at a time (one of which 1681 * could be split into two CTIOs to split data and status). 1682 */ 1683 if (atp->ctcnt >= ATPD_CCB_OUTSTANDING) { 1684 isp_prt(isp, ISP_LOGTINFO, "[0x%x] handling only %d CCBs at a time (flags for this ccb: 0x%x)", cso->tag_id, ATPD_CCB_OUTSTANDING, ccb->ccb_h.flags); 1685 TAILQ_INSERT_HEAD(&tptr->waitq, &ccb->ccb_h, periph_links.tqe); 1686 break; 1687 } 1688 1689 /* 1690 * Does the initiator expect FC-Tape style responses? 1691 */ 1692 if ((atp->word3 & PRLI_WD3_RETRY) && fcp->fctape_enabled) { 1693 fctape = 1; 1694 } else { 1695 fctape = 0; 1696 } 1697 1698 /* 1699 * If we already did the data xfer portion of a CTIO that sends data 1700 * and status, don't do it again and do the status portion now. 1701 */ 1702 if (atp->sendst) { 1703 isp_prt(isp, ISP_LOGTINFO, "[0x%x] now sending synthesized status orig_dl=%u xfered=%u bit=%u", 1704 cso->tag_id, atp->orig_datalen, atp->bytes_xfered, atp->bytes_in_transit); 1705 xfrlen = 0; /* we already did the data transfer */ 1706 atp->sendst = 0; 1707 } 1708 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1709 sendstatus = 1; 1710 } else { 1711 sendstatus = 0; 1712 } 1713 1714 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 1715 KASSERT((sendstatus != 0), ("how can you have CAM_SEND_SENSE w/o CAM_SEND_STATUS?")); 1716 /* 1717 * Sense length is not the entire sense data structure size. Periph 1718 * drivers don't seem to be setting sense_len to reflect the actual 1719 * size. We'll peek inside to get the right amount. 1720 */ 1721 sense_length = cso->sense_len; 1722 1723 /* 1724 * This 'cannot' happen 1725 */ 1726 if (sense_length > (XCMD_SIZE - MIN_FCP_RESPONSE_SIZE)) { 1727 sense_length = XCMD_SIZE - MIN_FCP_RESPONSE_SIZE; 1728 } 1729 } else { 1730 sense_length = 0; 1731 } 1732 1733 memset(local, 0, QENTRY_LEN); 1734 1735 /* 1736 * Check for overflow 1737 */ 1738 tmp = atp->bytes_xfered + atp->bytes_in_transit + xfrlen; 1739 if (tmp > atp->orig_datalen) { 1740 isp_prt(isp, ISP_LOGERR, "%s: [0x%x] data overflow by %u bytes", __func__, cso->tag_id, tmp - atp->orig_datalen); 1741 ccb->ccb_h.status = CAM_DATA_RUN_ERR; 1742 xpt_done(ccb); 1743 continue; 1744 } 1745 1746 if (IS_24XX(isp)) { 1747 ct7_entry_t *cto = (ct7_entry_t *) local; 1748 1749 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7; 1750 cto->ct_header.rqs_entry_count = 1; 1751 cto->ct_header.rqs_seqno |= ATPD_SEQ_NOTIFY_CAM; 1752 ATPD_SET_SEQNO(cto, atp); 1753 cto->ct_nphdl = atp->nphdl; 1754 cto->ct_rxid = atp->tag; 1755 cto->ct_iid_lo = atp->portid; 1756 cto->ct_iid_hi = atp->portid >> 16; 1757 cto->ct_oxid = atp->oxid; 1758 cto->ct_vpidx = ISP_GET_VPIDX(isp, XS_CHANNEL(ccb)); 1759 cto->ct_timeout = 120; 1760 cto->ct_flags = atp->tattr << CT7_TASK_ATTR_SHIFT; 1761 1762 /* 1763 * Mode 1, status, no data. Only possible when we are sending status, have 1764 * no data to transfer, and any sense data can fit into a ct7_entry_t. 1765 * 1766 * Mode 2, status, no data. We have to use this in the case that 1767 * the sense data won't fit into a ct7_entry_t. 1768 * 1769 */ 1770 if (sendstatus && xfrlen == 0) { 1771 cto->ct_flags |= CT7_SENDSTATUS | CT7_NO_DATA; 1772 resid = atp->orig_datalen - atp->bytes_xfered - atp->bytes_in_transit; 1773 if (sense_length <= MAXRESPLEN_24XX) { 1774 if (resid < 0) { 1775 cto->ct_resid = -resid; 1776 } else if (resid > 0) { 1777 cto->ct_resid = resid; 1778 } 1779 cto->ct_flags |= CT7_FLAG_MODE1; 1780 cto->ct_scsi_status = cso->scsi_status; 1781 if (resid < 0) { 1782 cto->ct_scsi_status |= (FCP_RESID_OVERFLOW << 8); 1783 } else if (resid > 0) { 1784 cto->ct_scsi_status |= (FCP_RESID_UNDERFLOW << 8); 1785 } 1786 if (fctape) { 1787 cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF; 1788 } 1789 if (sense_length) { 1790 cto->ct_scsi_status |= (FCP_SNSLEN_VALID << 8); 1791 cto->rsp.m1.ct_resplen = cto->ct_senselen = sense_length; 1792 memcpy(cto->rsp.m1.ct_resp, &cso->sense_data, sense_length); 1793 } 1794 } else { 1795 bus_addr_t addr; 1796 char buf[XCMD_SIZE]; 1797 fcp_rsp_iu_t *rp; 1798 1799 if (atp->ests == NULL) { 1800 atp->ests = isp_get_ecmd(isp); 1801 if (atp->ests == NULL) { 1802 TAILQ_INSERT_HEAD(&tptr->waitq, &ccb->ccb_h, periph_links.tqe); 1803 break; 1804 } 1805 } 1806 memset(buf, 0, sizeof (buf)); 1807 rp = (fcp_rsp_iu_t *)buf; 1808 if (fctape) { 1809 cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF; 1810 rp->fcp_rsp_bits |= FCP_CONF_REQ; 1811 } 1812 cto->ct_flags |= CT7_FLAG_MODE2; 1813 rp->fcp_rsp_scsi_status = cso->scsi_status; 1814 if (resid < 0) { 1815 rp->fcp_rsp_resid = -resid; 1816 rp->fcp_rsp_bits |= FCP_RESID_OVERFLOW; 1817 } else if (resid > 0) { 1818 rp->fcp_rsp_resid = resid; 1819 rp->fcp_rsp_bits |= FCP_RESID_UNDERFLOW; 1820 } 1821 if (sense_length) { 1822 rp->fcp_rsp_snslen = sense_length; 1823 cto->ct_senselen = sense_length; 1824 rp->fcp_rsp_bits |= FCP_SNSLEN_VALID; 1825 isp_put_fcp_rsp_iu(isp, rp, atp->ests); 1826 memcpy(((fcp_rsp_iu_t *)atp->ests)->fcp_rsp_extra, &cso->sense_data, sense_length); 1827 } else { 1828 isp_put_fcp_rsp_iu(isp, rp, atp->ests); 1829 } 1830 if (isp->isp_dblev & ISP_LOGTDEBUG1) { 1831 isp_print_bytes(isp, "FCP Response Frame After Swizzling", MIN_FCP_RESPONSE_SIZE + sense_length, atp->ests); 1832 } 1833 addr = isp->isp_osinfo.ecmd_dma; 1834 addr += ((((isp_ecmd_t *)atp->ests) - isp->isp_osinfo.ecmd_base) * XCMD_SIZE); 1835 isp_prt(isp, ISP_LOGTDEBUG0, "%s: ests base %p vaddr %p ecmd_dma %jx addr %jx len %u", __func__, isp->isp_osinfo.ecmd_base, atp->ests, 1836 (uintmax_t) isp->isp_osinfo.ecmd_dma, (uintmax_t)addr, MIN_FCP_RESPONSE_SIZE + sense_length); 1837 cto->rsp.m2.ct_datalen = MIN_FCP_RESPONSE_SIZE + sense_length; 1838 cto->rsp.m2.ct_fcp_rsp_iudata.ds_base = DMA_LO32(addr); 1839 cto->rsp.m2.ct_fcp_rsp_iudata.ds_basehi = DMA_HI32(addr); 1840 cto->rsp.m2.ct_fcp_rsp_iudata.ds_count = MIN_FCP_RESPONSE_SIZE + sense_length; 1841 } 1842 if (sense_length) { 1843 isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d slen %u sense: %x %x/%x/%x", __func__, 1844 cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, cto->ct_resid, sense_length, 1845 cso->sense_data.error_code, cso->sense_data.sense_buf[1], cso->sense_data.sense_buf[11], cso->sense_data.sense_buf[12]); 1846 } else { 1847 isp_prt(isp, ISP_LOGDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d", __func__, 1848 cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, cto->ct_resid); 1849 } 1850 atp->state = ATPD_STATE_LAST_CTIO; 1851 } 1852 1853 /* 1854 * Mode 0 data transfers, *possibly* with status. 1855 */ 1856 if (xfrlen != 0) { 1857 cto->ct_flags |= CT7_FLAG_MODE0; 1858 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1859 cto->ct_flags |= CT7_DATA_IN; 1860 } else { 1861 cto->ct_flags |= CT7_DATA_OUT; 1862 } 1863 1864 cto->rsp.m0.reloff = atp->bytes_xfered + atp->bytes_in_transit; 1865 cto->rsp.m0.ct_xfrlen = xfrlen; 1866 1867 #ifdef DEBUG 1868 if (ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame && xfrlen > ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame) { 1869 isp_prt(isp, ISP_LOGWARN, "%s: truncating data frame with xfrlen %d to %d", __func__, xfrlen, xfrlen - (xfrlen >> 2)); 1870 ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame = 0; 1871 cto->rsp.m0.ct_xfrlen -= xfrlen >> 2; 1872 } 1873 #endif 1874 if (sendstatus) { 1875 resid = atp->orig_datalen - atp->bytes_xfered - xfrlen; 1876 if (cso->scsi_status == SCSI_STATUS_OK && resid == 0 /* && fctape == 0 */) { 1877 cto->ct_flags |= CT7_SENDSTATUS; 1878 atp->state = ATPD_STATE_LAST_CTIO; 1879 if (fctape) { 1880 cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF; 1881 } 1882 } else { 1883 atp->sendst = 1; /* send status later */ 1884 cto->ct_header.rqs_seqno &= ~ATPD_SEQ_NOTIFY_CAM; 1885 atp->state = ATPD_STATE_CTIO; 1886 } 1887 } else { 1888 atp->state = ATPD_STATE_CTIO; 1889 } 1890 isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x xfrlen=%u off=%u", __func__, 1891 cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, xfrlen, atp->bytes_xfered); 1892 } 1893 } else if (IS_FC(isp)) { 1894 ct2_entry_t *cto = (ct2_entry_t *) local; 1895 1896 if (isp->isp_osinfo.sixtyfourbit) 1897 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO3; 1898 else 1899 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; 1900 cto->ct_header.rqs_entry_count = 1; 1901 cto->ct_header.rqs_seqno |= ATPD_SEQ_NOTIFY_CAM; 1902 ATPD_SET_SEQNO(cto, atp); 1903 if (ISP_CAP_2KLOGIN(isp)) { 1904 ((ct2e_entry_t *)cto)->ct_iid = atp->nphdl; 1905 } else { 1906 cto->ct_iid = atp->nphdl; 1907 if (ISP_CAP_SCCFW(isp) == 0) { 1908 cto->ct_lun = ccb->ccb_h.target_lun; 1909 } 1910 } 1911 cto->ct_timeout = 10; 1912 cto->ct_rxid = cso->tag_id; 1913 1914 /* 1915 * Mode 1, status, no data. Only possible when we are sending status, have 1916 * no data to transfer, and the sense length can fit in the ct7_entry. 1917 * 1918 * Mode 2, status, no data. We have to use this in the case the response 1919 * length won't fit into a ct2_entry_t. 1920 * 1921 * We'll fill out this structure with information as if this were a 1922 * Mode 1. The hardware layer will create the Mode 2 FCP RSP IU as 1923 * needed based upon this. 1924 */ 1925 if (sendstatus && xfrlen == 0) { 1926 cto->ct_flags |= CT2_SENDSTATUS | CT2_NO_DATA; 1927 resid = atp->orig_datalen - atp->bytes_xfered - atp->bytes_in_transit; 1928 if (sense_length <= MAXRESPLEN) { 1929 if (resid < 0) { 1930 cto->ct_resid = -resid; 1931 } else if (resid > 0) { 1932 cto->ct_resid = resid; 1933 } 1934 cto->ct_flags |= CT2_FLAG_MODE1; 1935 cto->rsp.m1.ct_scsi_status = cso->scsi_status; 1936 if (resid < 0) { 1937 cto->rsp.m1.ct_scsi_status |= CT2_DATA_OVER; 1938 } else if (resid > 0) { 1939 cto->rsp.m1.ct_scsi_status |= CT2_DATA_UNDER; 1940 } 1941 if (fctape) { 1942 cto->ct_flags |= CT2_CONFIRM; 1943 } 1944 if (sense_length) { 1945 cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID; 1946 cto->rsp.m1.ct_resplen = cto->rsp.m1.ct_senselen = sense_length; 1947 memcpy(cto->rsp.m1.ct_resp, &cso->sense_data, sense_length); 1948 } 1949 } else { 1950 bus_addr_t addr; 1951 char buf[XCMD_SIZE]; 1952 fcp_rsp_iu_t *rp; 1953 1954 if (atp->ests == NULL) { 1955 atp->ests = isp_get_ecmd(isp); 1956 if (atp->ests == NULL) { 1957 TAILQ_INSERT_HEAD(&tptr->waitq, &ccb->ccb_h, periph_links.tqe); 1958 break; 1959 } 1960 } 1961 memset(buf, 0, sizeof (buf)); 1962 rp = (fcp_rsp_iu_t *)buf; 1963 if (fctape) { 1964 cto->ct_flags |= CT2_CONFIRM; 1965 rp->fcp_rsp_bits |= FCP_CONF_REQ; 1966 } 1967 cto->ct_flags |= CT2_FLAG_MODE2; 1968 rp->fcp_rsp_scsi_status = cso->scsi_status; 1969 if (resid < 0) { 1970 rp->fcp_rsp_resid = -resid; 1971 rp->fcp_rsp_bits |= FCP_RESID_OVERFLOW; 1972 } else if (resid > 0) { 1973 rp->fcp_rsp_resid = resid; 1974 rp->fcp_rsp_bits |= FCP_RESID_UNDERFLOW; 1975 } 1976 if (sense_length) { 1977 rp->fcp_rsp_snslen = sense_length; 1978 rp->fcp_rsp_bits |= FCP_SNSLEN_VALID; 1979 isp_put_fcp_rsp_iu(isp, rp, atp->ests); 1980 memcpy(((fcp_rsp_iu_t *)atp->ests)->fcp_rsp_extra, &cso->sense_data, sense_length); 1981 } else { 1982 isp_put_fcp_rsp_iu(isp, rp, atp->ests); 1983 } 1984 if (isp->isp_dblev & ISP_LOGTDEBUG1) { 1985 isp_print_bytes(isp, "FCP Response Frame After Swizzling", MIN_FCP_RESPONSE_SIZE + sense_length, atp->ests); 1986 } 1987 addr = isp->isp_osinfo.ecmd_dma; 1988 addr += ((((isp_ecmd_t *)atp->ests) - isp->isp_osinfo.ecmd_base) * XCMD_SIZE); 1989 isp_prt(isp, ISP_LOGTDEBUG0, "%s: ests base %p vaddr %p ecmd_dma %jx addr %jx len %u", __func__, isp->isp_osinfo.ecmd_base, atp->ests, 1990 (uintmax_t) isp->isp_osinfo.ecmd_dma, (uintmax_t)addr, MIN_FCP_RESPONSE_SIZE + sense_length); 1991 cto->rsp.m2.ct_datalen = MIN_FCP_RESPONSE_SIZE + sense_length; 1992 if (isp->isp_osinfo.sixtyfourbit) { 1993 cto->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_base = DMA_LO32(addr); 1994 cto->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_basehi = DMA_HI32(addr); 1995 cto->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_count = MIN_FCP_RESPONSE_SIZE + sense_length; 1996 } else { 1997 cto->rsp.m2.u.ct_fcp_rsp_iudata_32.ds_base = DMA_LO32(addr); 1998 cto->rsp.m2.u.ct_fcp_rsp_iudata_32.ds_count = MIN_FCP_RESPONSE_SIZE + sense_length; 1999 } 2000 } 2001 if (sense_length) { 2002 isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO2[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d sense: %x %x/%x/%x", __func__, 2003 cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cso->scsi_status, cto->ct_flags, cto->ct_resid, 2004 cso->sense_data.error_code, cso->sense_data.sense_buf[1], cso->sense_data.sense_buf[11], cso->sense_data.sense_buf[12]); 2005 } else { 2006 isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO2[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d", __func__, cto->ct_rxid, 2007 ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cso->scsi_status, cto->ct_flags, cto->ct_resid); 2008 } 2009 atp->state = ATPD_STATE_LAST_CTIO; 2010 } 2011 2012 if (xfrlen != 0) { 2013 cto->ct_flags |= CT2_FLAG_MODE0; 2014 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2015 cto->ct_flags |= CT2_DATA_IN; 2016 } else { 2017 cto->ct_flags |= CT2_DATA_OUT; 2018 } 2019 2020 cto->ct_reloff = atp->bytes_xfered + atp->bytes_in_transit; 2021 cto->rsp.m0.ct_xfrlen = xfrlen; 2022 2023 if (sendstatus) { 2024 resid = atp->orig_datalen - atp->bytes_xfered - xfrlen; 2025 if (cso->scsi_status == SCSI_STATUS_OK && resid == 0 /*&& fctape == 0*/) { 2026 cto->ct_flags |= CT2_SENDSTATUS; 2027 atp->state = ATPD_STATE_LAST_CTIO; 2028 if (fctape) { 2029 cto->ct_flags |= CT2_CONFIRM; 2030 } 2031 } else { 2032 atp->sendst = 1; /* send status later */ 2033 cto->ct_header.rqs_seqno &= ~ATPD_SEQ_NOTIFY_CAM; 2034 atp->state = ATPD_STATE_CTIO; 2035 } 2036 } else { 2037 atp->state = ATPD_STATE_CTIO; 2038 } 2039 } 2040 isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO2[%x] seq %u nc %d CDB0=%x scsi status %x flags %x resid %d xfrlen %u offset %u", __func__, cto->ct_rxid, 2041 ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cso->scsi_status, cto->ct_flags, cto->ct_resid, cso->dxfer_len, atp->bytes_xfered); 2042 } else { 2043 ct_entry_t *cto = (ct_entry_t *) local; 2044 2045 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 2046 cto->ct_header.rqs_entry_count = 1; 2047 cto->ct_header.rqs_seqno |= ATPD_SEQ_NOTIFY_CAM; 2048 ATPD_SET_SEQNO(cto, atp); 2049 cto->ct_iid = cso->init_id; 2050 cto->ct_iid |= XS_CHANNEL(ccb) << 7; 2051 cto->ct_tgt = ccb->ccb_h.target_id; 2052 cto->ct_lun = ccb->ccb_h.target_lun; 2053 cto->ct_fwhandle = cso->tag_id; 2054 if (atp->rxid) { 2055 cto->ct_tag_val = atp->rxid; 2056 cto->ct_flags |= CT_TQAE; 2057 } 2058 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 2059 cto->ct_flags |= CT_NODISC; 2060 } 2061 if (cso->dxfer_len == 0) { 2062 cto->ct_flags |= CT_NO_DATA; 2063 } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2064 cto->ct_flags |= CT_DATA_IN; 2065 } else { 2066 cto->ct_flags |= CT_DATA_OUT; 2067 } 2068 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 2069 cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR; 2070 cto->ct_scsi_status = cso->scsi_status; 2071 cto->ct_resid = atp->orig_datalen - atp->bytes_xfered - atp->bytes_in_transit - xfrlen; 2072 isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO[%x] seq %u nc %d scsi status %x resid %d tag_id %x", __func__, 2073 cto->ct_fwhandle, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), cso->scsi_status, cso->resid, cso->tag_id); 2074 } 2075 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 2076 cto->ct_timeout = 10; 2077 } 2078 2079 if (isp_get_pcmd(isp, ccb)) { 2080 ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "out of PCMDs\n"); 2081 TAILQ_INSERT_HEAD(&tptr->waitq, &ccb->ccb_h, periph_links.tqe); 2082 break; 2083 } 2084 if (isp_allocate_xs_tgt(isp, ccb, &handle)) { 2085 ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "No XFLIST pointers for %s\n", __func__); 2086 TAILQ_INSERT_HEAD(&tptr->waitq, &ccb->ccb_h, periph_links.tqe); 2087 isp_free_pcmd(isp, ccb); 2088 break; 2089 } 2090 atp->bytes_in_transit += xfrlen; 2091 PISP_PCMD(ccb)->datalen = xfrlen; 2092 2093 2094 /* 2095 * Call the dma setup routines for this entry (and any subsequent 2096 * CTIOs) if there's data to move, and then tell the f/w it's got 2097 * new things to play with. As with isp_start's usage of DMA setup, 2098 * any swizzling is done in the machine dependent layer. Because 2099 * of this, we put the request onto the queue area first in native 2100 * format. 2101 */ 2102 2103 if (IS_24XX(isp)) { 2104 ct7_entry_t *cto = (ct7_entry_t *) local; 2105 cto->ct_syshandle = handle; 2106 } else if (IS_FC(isp)) { 2107 ct2_entry_t *cto = (ct2_entry_t *) local; 2108 cto->ct_syshandle = handle; 2109 } else { 2110 ct_entry_t *cto = (ct_entry_t *) local; 2111 cto->ct_syshandle = handle; 2112 } 2113 2114 dmaresult = ISP_DMASETUP(isp, cso, (ispreq_t *) local); 2115 if (dmaresult != CMD_QUEUED) { 2116 isp_destroy_tgt_handle(isp, handle); 2117 isp_free_pcmd(isp, ccb); 2118 if (dmaresult == CMD_EAGAIN) { 2119 TAILQ_INSERT_HEAD(&tptr->waitq, &ccb->ccb_h, periph_links.tqe); 2120 break; 2121 } 2122 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2123 xpt_done(ccb); 2124 continue; 2125 } 2126 isp->isp_nactive++; 2127 ccb->ccb_h.status = CAM_REQ_INPROG | CAM_SIM_QUEUED; 2128 if (xfrlen) { 2129 ccb->ccb_h.spriv_field0 = atp->bytes_xfered; 2130 } else { 2131 ccb->ccb_h.spriv_field0 = ~0; 2132 } 2133 atp->ctcnt++; 2134 atp->seqno++; 2135 } 2136 rls_lun_statep(isp, tptr); 2137 } 2138 2139 static void 2140 isp_refire_putback_atio(void *arg) 2141 { 2142 union ccb *ccb = arg; 2143 2144 ISP_ASSERT_LOCKED((ispsoftc_t *)XS_ISP(ccb)); 2145 isp_target_putback_atio(ccb); 2146 } 2147 2148 static void 2149 isp_refire_notify_ack(void *arg) 2150 { 2151 isp_tna_t *tp = arg; 2152 ispsoftc_t *isp = tp->isp; 2153 2154 ISP_ASSERT_LOCKED(isp); 2155 if (isp_notify_ack(isp, tp->not)) { 2156 callout_schedule(&tp->timer, 5); 2157 } else { 2158 free(tp, M_DEVBUF); 2159 } 2160 } 2161 2162 2163 static void 2164 isp_target_putback_atio(union ccb *ccb) 2165 { 2166 ispsoftc_t *isp; 2167 struct ccb_scsiio *cso; 2168 void *qe; 2169 2170 isp = XS_ISP(ccb); 2171 2172 qe = isp_getrqentry(isp); 2173 if (qe == NULL) { 2174 xpt_print(ccb->ccb_h.path, 2175 "%s: Request Queue Overflow\n", __func__); 2176 callout_reset(&PISP_PCMD(ccb)->wdog, 10, 2177 isp_refire_putback_atio, ccb); 2178 return; 2179 } 2180 memset(qe, 0, QENTRY_LEN); 2181 cso = &ccb->csio; 2182 if (IS_FC(isp)) { 2183 at2_entry_t local, *at = &local; 2184 ISP_MEMZERO(at, sizeof (at2_entry_t)); 2185 at->at_header.rqs_entry_type = RQSTYPE_ATIO2; 2186 at->at_header.rqs_entry_count = 1; 2187 if (ISP_CAP_SCCFW(isp)) { 2188 at->at_scclun = (uint16_t) ccb->ccb_h.target_lun; 2189 #if __FreeBSD_version < 1000700 2190 if (at->at_scclun >= 256) 2191 at->at_scclun |= 0x4000; 2192 #endif 2193 } else { 2194 at->at_lun = (uint8_t) ccb->ccb_h.target_lun; 2195 } 2196 at->at_status = CT_OK; 2197 at->at_rxid = cso->tag_id; 2198 at->at_iid = cso->ccb_h.target_id; 2199 isp_put_atio2(isp, at, qe); 2200 } else { 2201 at_entry_t local, *at = &local; 2202 ISP_MEMZERO(at, sizeof (at_entry_t)); 2203 at->at_header.rqs_entry_type = RQSTYPE_ATIO; 2204 at->at_header.rqs_entry_count = 1; 2205 at->at_iid = cso->init_id; 2206 at->at_iid |= XS_CHANNEL(ccb) << 7; 2207 at->at_tgt = cso->ccb_h.target_id; 2208 at->at_lun = cso->ccb_h.target_lun; 2209 at->at_status = CT_OK; 2210 at->at_tag_val = AT_GET_TAG(cso->tag_id); 2211 at->at_handle = AT_GET_HANDLE(cso->tag_id); 2212 isp_put_atio(isp, at, qe); 2213 } 2214 ISP_TDQE(isp, "isp_target_putback_atio", isp->isp_reqidx, qe); 2215 ISP_SYNC_REQUEST(isp); 2216 isp_complete_ctio(ccb); 2217 } 2218 2219 static void 2220 isp_complete_ctio(union ccb *ccb) 2221 { 2222 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 2223 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2224 xpt_done(ccb); 2225 } 2226 } 2227 2228 /* 2229 * Handle ATIO stuff that the generic code can't. 2230 * This means handling CDBs. 2231 */ 2232 2233 static void 2234 isp_handle_platform_atio(ispsoftc_t *isp, at_entry_t *aep) 2235 { 2236 tstate_t *tptr; 2237 int status, bus; 2238 struct ccb_accept_tio *atiop; 2239 atio_private_data_t *atp; 2240 2241 /* 2242 * The firmware status (except for the QLTM_SVALID bit) 2243 * indicates why this ATIO was sent to us. 2244 * 2245 * If QLTM_SVALID is set, the firmware has recommended Sense Data. 2246 * 2247 * If the DISCONNECTS DISABLED bit is set in the flags field, 2248 * we're still connected on the SCSI bus. 2249 */ 2250 status = aep->at_status; 2251 if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) { 2252 /* 2253 * Bus Phase Sequence error. We should have sense data 2254 * suggested by the f/w. I'm not sure quite yet what 2255 * to do about this for CAM. 2256 */ 2257 isp_prt(isp, ISP_LOGWARN, "PHASE ERROR"); 2258 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 2259 return; 2260 } 2261 if ((status & ~QLTM_SVALID) != AT_CDB) { 2262 isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform", status); 2263 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 2264 return; 2265 } 2266 2267 bus = GET_BUS_VAL(aep->at_iid); 2268 tptr = get_lun_statep(isp, bus, aep->at_lun); 2269 if (tptr == NULL) { 2270 tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD); 2271 if (tptr == NULL) { 2272 /* 2273 * Because we can't autofeed sense data back with 2274 * a command for parallel SCSI, we can't give back 2275 * a CHECK CONDITION. We'll give back a BUSY status 2276 * instead. This works out okay because the only 2277 * time we should, in fact, get this, is in the 2278 * case that somebody configured us without the 2279 * blackhole driver, so they get what they deserve. 2280 */ 2281 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 2282 return; 2283 } 2284 } 2285 2286 atp = isp_get_atpd(isp, tptr, aep->at_handle); 2287 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 2288 if (atiop == NULL || atp == NULL) { 2289 /* 2290 * Because we can't autofeed sense data back with 2291 * a command for parallel SCSI, we can't give back 2292 * a CHECK CONDITION. We'll give back a QUEUE FULL status 2293 * instead. This works out okay because the only time we 2294 * should, in fact, get this, is in the case that we've 2295 * run out of ATIOS. 2296 */ 2297 xpt_print(tptr->owner, "no %s for lun %d from initiator %d\n", (atp == NULL && atiop == NULL)? "ATIOs *or* ATPS" : 2298 ((atp == NULL)? "ATPs" : "ATIOs"), aep->at_lun, aep->at_iid); 2299 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 2300 if (atp) { 2301 isp_put_atpd(isp, tptr, atp); 2302 } 2303 rls_lun_statep(isp, tptr); 2304 return; 2305 } 2306 atp->rxid = aep->at_tag_val; 2307 atp->state = ATPD_STATE_ATIO; 2308 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 2309 tptr->atio_count--; 2310 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, atiop->ccb_h.path, "Take FREE ATIO count now %d\n", tptr->atio_count); 2311 atiop->ccb_h.target_id = aep->at_tgt; 2312 atiop->ccb_h.target_lun = aep->at_lun; 2313 if (aep->at_flags & AT_NODISC) { 2314 atiop->ccb_h.flags |= CAM_DIS_DISCONNECT; 2315 } else { 2316 atiop->ccb_h.flags &= ~CAM_DIS_DISCONNECT; 2317 } 2318 2319 if (status & QLTM_SVALID) { 2320 size_t amt = ISP_MIN(QLTM_SENSELEN, sizeof (atiop->sense_data)); 2321 atiop->sense_len = amt; 2322 ISP_MEMCPY(&atiop->sense_data, aep->at_sense, amt); 2323 } else { 2324 atiop->sense_len = 0; 2325 } 2326 2327 atiop->init_id = GET_IID_VAL(aep->at_iid); 2328 atiop->cdb_len = aep->at_cdblen; 2329 ISP_MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen); 2330 atiop->ccb_h.status = CAM_CDB_RECVD; 2331 /* 2332 * Construct a tag 'id' based upon tag value (which may be 0..255) 2333 * and the handle (which we have to preserve). 2334 */ 2335 atiop->tag_id = atp->tag; 2336 if (aep->at_flags & AT_TQAE) { 2337 atiop->tag_action = aep->at_tag_type; 2338 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; 2339 } 2340 atp->orig_datalen = 0; 2341 atp->bytes_xfered = 0; 2342 atp->lun = aep->at_lun; 2343 atp->nphdl = aep->at_iid; 2344 atp->portid = PORT_NONE; 2345 atp->oxid = 0; 2346 atp->cdb0 = atiop->cdb_io.cdb_bytes[0]; 2347 atp->tattr = aep->at_tag_type; 2348 atp->state = ATPD_STATE_CAM; 2349 isp_prt(isp, ISP_LOGTDEBUG0, "ATIO[0x%x] CDB=0x%x lun %d", aep->at_tag_val, atp->cdb0, atp->lun); 2350 rls_lun_statep(isp, tptr); 2351 } 2352 2353 static void 2354 isp_handle_platform_atio2(ispsoftc_t *isp, at2_entry_t *aep) 2355 { 2356 lun_id_t lun; 2357 fcportdb_t *lp; 2358 tstate_t *tptr; 2359 struct ccb_accept_tio *atiop; 2360 uint16_t nphdl; 2361 atio_private_data_t *atp; 2362 inot_private_data_t *ntp; 2363 2364 /* 2365 * The firmware status (except for the QLTM_SVALID bit) 2366 * indicates why this ATIO was sent to us. 2367 * 2368 * If QLTM_SVALID is set, the firmware has recommended Sense Data. 2369 */ 2370 if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) { 2371 isp_prt(isp, ISP_LOGWARN, "bogus atio (0x%x) leaked to platform", aep->at_status); 2372 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 2373 return; 2374 } 2375 2376 if (ISP_CAP_SCCFW(isp)) { 2377 lun = aep->at_scclun; 2378 #if __FreeBSD_version < 1000700 2379 lun &= 0x3fff; 2380 #endif 2381 } else { 2382 lun = aep->at_lun; 2383 } 2384 if (ISP_CAP_2KLOGIN(isp)) { 2385 nphdl = ((at2e_entry_t *)aep)->at_iid; 2386 } else { 2387 nphdl = aep->at_iid; 2388 } 2389 tptr = get_lun_statep(isp, 0, lun); 2390 if (tptr == NULL) { 2391 tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD); 2392 if (tptr == NULL) { 2393 isp_prt(isp, ISP_LOGWARN, "%s: [0x%x] no state pointer for lun %jx or wildcard", __func__, aep->at_rxid, (uintmax_t)lun); 2394 if (lun == 0) { 2395 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 2396 } else { 2397 isp_endcmd(isp, aep, SCSI_STATUS_CHECK_COND | ECMD_SVALID | (0x5 << 12) | (0x25 << 16), 0); 2398 } 2399 return; 2400 } 2401 } 2402 2403 /* 2404 * Start any commands pending resources first. 2405 */ 2406 if (tptr->restart_queue) { 2407 inot_private_data_t *restart_queue = tptr->restart_queue; 2408 tptr->restart_queue = NULL; 2409 while (restart_queue) { 2410 ntp = restart_queue; 2411 restart_queue = ntp->rd.nt.nt_hba; 2412 isp_prt(isp, ISP_LOGTDEBUG0, "%s: restarting resrc deprived %x", __func__, ((at2_entry_t *)ntp->rd.data)->at_rxid); 2413 isp_handle_platform_atio2(isp, (at2_entry_t *) ntp->rd.data); 2414 isp_put_ntpd(isp, tptr, ntp); 2415 /* 2416 * If a recursion caused the restart queue to start to fill again, 2417 * stop and splice the new list on top of the old list and restore 2418 * it and go to noresrc. 2419 */ 2420 if (tptr->restart_queue) { 2421 ntp = tptr->restart_queue; 2422 tptr->restart_queue = restart_queue; 2423 while (restart_queue->rd.nt.nt_hba) { 2424 restart_queue = restart_queue->rd.nt.nt_hba; 2425 } 2426 restart_queue->rd.nt.nt_hba = ntp; 2427 goto noresrc; 2428 } 2429 } 2430 } 2431 2432 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 2433 if (atiop == NULL) { 2434 goto noresrc; 2435 } 2436 2437 atp = isp_get_atpd(isp, tptr, aep->at_rxid); 2438 if (atp == NULL) { 2439 goto noresrc; 2440 } 2441 2442 atp->state = ATPD_STATE_ATIO; 2443 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 2444 tptr->atio_count--; 2445 isp_prt(isp, ISP_LOGTDEBUG2, "Take FREE ATIO count now %d", tptr->atio_count); 2446 atiop->ccb_h.target_id = FCPARAM(isp, 0)->isp_loopid; 2447 atiop->ccb_h.target_lun = lun; 2448 2449 /* 2450 * We don't get 'suggested' sense data as we do with SCSI cards. 2451 */ 2452 atiop->sense_len = 0; 2453 2454 /* 2455 * If we're not in the port database, add ourselves. 2456 */ 2457 if (IS_2100(isp)) 2458 atiop->init_id = nphdl; 2459 else { 2460 if ((isp_find_pdb_by_handle(isp, 0, nphdl, &lp) == 0 || 2461 lp->state == FC_PORTDB_STATE_ZOMBIE)) { 2462 uint64_t wwpn = 2463 (((uint64_t) aep->at_wwpn[0]) << 48) | 2464 (((uint64_t) aep->at_wwpn[1]) << 32) | 2465 (((uint64_t) aep->at_wwpn[2]) << 16) | 2466 (((uint64_t) aep->at_wwpn[3]) << 0); 2467 isp_add_wwn_entry(isp, 0, wwpn, INI_NONE, 2468 nphdl, PORT_ANY, 0); 2469 isp_find_pdb_by_handle(isp, 0, nphdl, &lp); 2470 } 2471 atiop->init_id = FC_PORTDB_TGT(isp, 0, lp); 2472 } 2473 atiop->cdb_len = ATIO2_CDBLEN; 2474 ISP_MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN); 2475 atiop->ccb_h.status = CAM_CDB_RECVD; 2476 atiop->tag_id = atp->tag; 2477 switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) { 2478 case ATIO2_TC_ATTR_SIMPLEQ: 2479 atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; 2480 atiop->tag_action = MSG_SIMPLE_Q_TAG; 2481 break; 2482 case ATIO2_TC_ATTR_HEADOFQ: 2483 atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; 2484 atiop->tag_action = MSG_HEAD_OF_Q_TAG; 2485 break; 2486 case ATIO2_TC_ATTR_ORDERED: 2487 atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; 2488 atiop->tag_action = MSG_ORDERED_Q_TAG; 2489 break; 2490 case ATIO2_TC_ATTR_ACAQ: /* ?? */ 2491 case ATIO2_TC_ATTR_UNTAGGED: 2492 default: 2493 atiop->tag_action = 0; 2494 break; 2495 } 2496 2497 atp->orig_datalen = aep->at_datalen; 2498 atp->bytes_xfered = 0; 2499 atp->lun = lun; 2500 atp->nphdl = nphdl; 2501 atp->sid = PORT_ANY; 2502 atp->oxid = aep->at_oxid; 2503 atp->cdb0 = aep->at_cdb[0]; 2504 atp->tattr = aep->at_taskflags & ATIO2_TC_ATTR_MASK; 2505 atp->state = ATPD_STATE_CAM; 2506 xpt_done((union ccb *)atiop); 2507 isp_prt(isp, ISP_LOGTDEBUG0, "ATIO2[0x%x] CDB=0x%x lun %jx datalen %u", aep->at_rxid, atp->cdb0, (uintmax_t)lun, atp->orig_datalen); 2508 rls_lun_statep(isp, tptr); 2509 return; 2510 noresrc: 2511 ntp = isp_get_ntpd(isp, tptr); 2512 if (ntp == NULL) { 2513 rls_lun_statep(isp, tptr); 2514 isp_endcmd(isp, aep, nphdl, 0, SCSI_STATUS_BUSY, 0); 2515 return; 2516 } 2517 memcpy(ntp->rd.data, aep, QENTRY_LEN); 2518 ntp->rd.nt.nt_hba = tptr->restart_queue; 2519 tptr->restart_queue = ntp; 2520 rls_lun_statep(isp, tptr); 2521 } 2522 2523 static void 2524 isp_handle_platform_atio7(ispsoftc_t *isp, at7_entry_t *aep) 2525 { 2526 int cdbxlen; 2527 lun_id_t lun; 2528 uint16_t chan, nphdl = NIL_HANDLE; 2529 uint32_t did, sid; 2530 fcportdb_t *lp; 2531 tstate_t *tptr; 2532 struct ccb_accept_tio *atiop; 2533 atio_private_data_t *atp = NULL; 2534 atio_private_data_t *oatp; 2535 inot_private_data_t *ntp; 2536 2537 did = (aep->at_hdr.d_id[0] << 16) | (aep->at_hdr.d_id[1] << 8) | aep->at_hdr.d_id[2]; 2538 sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | aep->at_hdr.s_id[2]; 2539 #if __FreeBSD_version >= 1000700 2540 lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(aep->at_cmnd.fcp_cmnd_lun)); 2541 #else 2542 lun = (aep->at_cmnd.fcp_cmnd_lun[0] & 0x3f << 8) | 2543 aep->at_cmnd.fcp_cmnd_lun[1]; 2544 #endif 2545 2546 /* 2547 * Find the N-port handle, and Virtual Port Index for this command. 2548 * 2549 * If we can't, we're somewhat in trouble because we can't actually respond w/o that information. 2550 * We also, as a matter of course, need to know the WWN of the initiator too. 2551 */ 2552 if (ISP_CAP_MULTI_ID(isp) && isp->isp_nchan > 1) { 2553 /* 2554 * Find the right channel based upon D_ID 2555 */ 2556 isp_find_chan_by_did(isp, did, &chan); 2557 2558 if (chan == ISP_NOCHAN) { 2559 NANOTIME_T now; 2560 2561 /* 2562 * If we don't recognizer our own D_DID, terminate the exchange, unless we're within 2 seconds of startup 2563 * It's a bit tricky here as we need to stash this command *somewhere*. 2564 */ 2565 GET_NANOTIME(&now); 2566 if (NANOTIME_SUB(&isp->isp_init_time, &now) > 2000000000ULL) { 2567 isp_prt(isp, ISP_LOGWARN, "%s: [RX_ID 0x%x] D_ID %x not found on any channel- dropping", __func__, aep->at_rxid, did); 2568 isp_endcmd(isp, aep, NIL_HANDLE, ISP_NOCHAN, ECMD_TERMINATE, 0); 2569 return; 2570 } 2571 tptr = get_lun_statep(isp, 0, 0); 2572 if (tptr == NULL) { 2573 tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD); 2574 if (tptr == NULL) { 2575 isp_prt(isp, ISP_LOGWARN, "%s: [RX_ID 0x%x] D_ID %x not found on any channel and no tptr- dropping", __func__, aep->at_rxid, did); 2576 isp_endcmd(isp, aep, NIL_HANDLE, ISP_NOCHAN, ECMD_TERMINATE, 0); 2577 return; 2578 } 2579 } 2580 isp_prt(isp, ISP_LOGWARN, "%s: [RX_ID 0x%x] D_ID %x not found on any channel- deferring", __func__, aep->at_rxid, did); 2581 goto noresrc; 2582 } 2583 isp_prt(isp, ISP_LOGTDEBUG0, "%s: [RX_ID 0x%x] D_ID 0x%06x found on Chan %d for S_ID 0x%06x", __func__, aep->at_rxid, did, chan, sid); 2584 } else { 2585 chan = 0; 2586 } 2587 2588 /* 2589 * Find the PDB entry for this initiator 2590 */ 2591 if (isp_find_pdb_by_sid(isp, chan, sid, &lp) == 0) { 2592 /* 2593 * If we're not in the port database terminate the exchange. 2594 */ 2595 isp_prt(isp, ISP_LOGTINFO, "%s: [RX_ID 0x%x] D_ID 0x%06x found on Chan %d for S_ID 0x%06x wasn't in PDB already", 2596 __func__, aep->at_rxid, did, chan, sid); 2597 isp_dump_portdb(isp, chan); 2598 isp_endcmd(isp, aep, NIL_HANDLE, chan, ECMD_TERMINATE, 0); 2599 return; 2600 } 2601 nphdl = lp->handle; 2602 2603 /* 2604 * Get the tstate pointer 2605 */ 2606 tptr = get_lun_statep(isp, chan, lun); 2607 if (tptr == NULL) { 2608 tptr = get_lun_statep(isp, chan, CAM_LUN_WILDCARD); 2609 if (tptr == NULL) { 2610 isp_prt(isp, ISP_LOGWARN, 2611 "%s: [0x%x] no state pointer for lun %jx or wildcard", 2612 __func__, aep->at_rxid, (uintmax_t)lun); 2613 if (lun == 0) { 2614 isp_endcmd(isp, aep, nphdl, SCSI_STATUS_BUSY, 0); 2615 } else { 2616 isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_CHECK_COND | ECMD_SVALID | (0x5 << 12) | (0x25 << 16), 0); 2617 } 2618 return; 2619 } 2620 } 2621 2622 /* 2623 * Start any commands pending resources first. 2624 */ 2625 if (tptr->restart_queue) { 2626 inot_private_data_t *restart_queue = tptr->restart_queue; 2627 tptr->restart_queue = NULL; 2628 while (restart_queue) { 2629 ntp = restart_queue; 2630 restart_queue = ntp->rd.nt.nt_hba; 2631 isp_prt(isp, ISP_LOGTDEBUG0, "%s: restarting resrc deprived %x", __func__, ((at7_entry_t *)ntp->rd.data)->at_rxid); 2632 isp_handle_platform_atio7(isp, (at7_entry_t *) ntp->rd.data); 2633 isp_put_ntpd(isp, tptr, ntp); 2634 /* 2635 * If a recursion caused the restart queue to start to fill again, 2636 * stop and splice the new list on top of the old list and restore 2637 * it and go to noresrc. 2638 */ 2639 if (tptr->restart_queue) { 2640 isp_prt(isp, ISP_LOGTDEBUG0, "%s: restart queue refilling", __func__); 2641 if (restart_queue) { 2642 ntp = tptr->restart_queue; 2643 tptr->restart_queue = restart_queue; 2644 while (restart_queue->rd.nt.nt_hba) { 2645 restart_queue = restart_queue->rd.nt.nt_hba; 2646 } 2647 restart_queue->rd.nt.nt_hba = ntp; 2648 } 2649 goto noresrc; 2650 } 2651 } 2652 } 2653 2654 /* 2655 * If the f/w is out of resources, just send a BUSY status back. 2656 */ 2657 if (aep->at_rxid == AT7_NORESRC_RXID) { 2658 rls_lun_statep(isp, tptr); 2659 isp_endcmd(isp, aep, nphdl, chan, SCSI_BUSY, 0); 2660 return; 2661 } 2662 2663 /* 2664 * If we're out of resources, just send a BUSY status back. 2665 */ 2666 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 2667 if (atiop == NULL) { 2668 isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] out of atios", aep->at_rxid); 2669 goto noresrc; 2670 } 2671 2672 oatp = isp_find_atpd(isp, tptr, aep->at_rxid); 2673 if (oatp) { 2674 isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] tag wraparound in isp_handle_platforms_atio7 (N-Port Handle 0x%04x S_ID 0x%04x OX_ID 0x%04x) oatp state %d", 2675 aep->at_rxid, nphdl, sid, aep->at_hdr.ox_id, oatp->state); 2676 /* 2677 * It's not a "no resource" condition- but we can treat it like one 2678 */ 2679 goto noresrc; 2680 } 2681 atp = isp_get_atpd(isp, tptr, aep->at_rxid); 2682 if (atp == NULL) { 2683 isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] out of atps", aep->at_rxid); 2684 goto noresrc; 2685 } 2686 atp->word3 = lp->prli_word3; 2687 atp->state = ATPD_STATE_ATIO; 2688 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 2689 tptr->atio_count--; 2690 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, atiop->ccb_h.path, "Take FREE ATIO count now %d\n", tptr->atio_count); 2691 atiop->init_id = FC_PORTDB_TGT(isp, chan, lp); 2692 atiop->ccb_h.target_id = FCPARAM(isp, chan)->isp_loopid; 2693 atiop->ccb_h.target_lun = lun; 2694 atiop->sense_len = 0; 2695 cdbxlen = aep->at_cmnd.fcp_cmnd_alen_datadir >> FCP_CMND_ADDTL_CDBLEN_SHIFT; 2696 if (cdbxlen) { 2697 isp_prt(isp, ISP_LOGWARN, "additional CDBLEN ignored"); 2698 } 2699 cdbxlen = sizeof (aep->at_cmnd.cdb_dl.sf.fcp_cmnd_cdb); 2700 ISP_MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cmnd.cdb_dl.sf.fcp_cmnd_cdb, cdbxlen); 2701 atiop->cdb_len = cdbxlen; 2702 atiop->ccb_h.status = CAM_CDB_RECVD; 2703 atiop->tag_id = atp->tag; 2704 switch (aep->at_cmnd.fcp_cmnd_task_attribute & FCP_CMND_TASK_ATTR_MASK) { 2705 case FCP_CMND_TASK_ATTR_SIMPLE: 2706 atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; 2707 atiop->tag_action = MSG_SIMPLE_Q_TAG; 2708 break; 2709 case FCP_CMND_TASK_ATTR_HEAD: 2710 atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; 2711 atiop->tag_action = MSG_HEAD_OF_Q_TAG; 2712 break; 2713 case FCP_CMND_TASK_ATTR_ORDERED: 2714 atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; 2715 atiop->tag_action = MSG_ORDERED_Q_TAG; 2716 break; 2717 default: 2718 /* FALLTHROUGH */ 2719 case FCP_CMND_TASK_ATTR_ACA: 2720 case FCP_CMND_TASK_ATTR_UNTAGGED: 2721 atiop->tag_action = 0; 2722 break; 2723 } 2724 atp->orig_datalen = aep->at_cmnd.cdb_dl.sf.fcp_cmnd_dl; 2725 atp->bytes_xfered = 0; 2726 atp->lun = lun; 2727 atp->nphdl = nphdl; 2728 atp->portid = sid; 2729 atp->oxid = aep->at_hdr.ox_id; 2730 atp->rxid = aep->at_hdr.rx_id; 2731 atp->cdb0 = atiop->cdb_io.cdb_bytes[0]; 2732 atp->tattr = aep->at_cmnd.fcp_cmnd_task_attribute & FCP_CMND_TASK_ATTR_MASK; 2733 atp->state = ATPD_STATE_CAM; 2734 isp_prt(isp, ISP_LOGTDEBUG0, "ATIO7[0x%x] CDB=0x%x lun %jx datalen %u", 2735 aep->at_rxid, atp->cdb0, (uintmax_t)lun, atp->orig_datalen); 2736 xpt_done((union ccb *)atiop); 2737 rls_lun_statep(isp, tptr); 2738 return; 2739 noresrc: 2740 if (atp) { 2741 isp_put_atpd(isp, tptr, atp); 2742 } 2743 ntp = isp_get_ntpd(isp, tptr); 2744 if (ntp == NULL) { 2745 rls_lun_statep(isp, tptr); 2746 isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_BUSY, 0); 2747 return; 2748 } 2749 memcpy(ntp->rd.data, aep, QENTRY_LEN); 2750 ntp->rd.nt.nt_hba = tptr->restart_queue; 2751 tptr->restart_queue = ntp; 2752 rls_lun_statep(isp, tptr); 2753 } 2754 2755 2756 /* 2757 * Handle starting an SRR (sequence retransmit request) 2758 * We get here when we've gotten the immediate notify 2759 * and the return of all outstanding CTIOs for this 2760 * transaction. 2761 */ 2762 static void 2763 isp_handle_srr_start(ispsoftc_t *isp, tstate_t *tptr, atio_private_data_t *atp) 2764 { 2765 in_fcentry_24xx_t *inot; 2766 uint32_t srr_off, ccb_off, ccb_len, ccb_end; 2767 union ccb *ccb; 2768 2769 inot = (in_fcentry_24xx_t *)atp->srr; 2770 srr_off = inot->in_srr_reloff_lo | (inot->in_srr_reloff_hi << 16); 2771 ccb = atp->srr_ccb; 2772 atp->srr_ccb = NULL; 2773 atp->nsrr++; 2774 if (ccb == NULL) { 2775 isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] null ccb", atp->tag); 2776 goto fail; 2777 } 2778 2779 ccb_off = ccb->ccb_h.spriv_field0; 2780 ccb_len = ccb->csio.dxfer_len; 2781 ccb_end = (ccb_off == ~0)? ~0 : ccb_off + ccb_len; 2782 2783 switch (inot->in_srr_iu) { 2784 case R_CTL_INFO_SOLICITED_DATA: 2785 /* 2786 * We have to restart a FCP_DATA data out transaction 2787 */ 2788 atp->sendst = 0; 2789 atp->bytes_xfered = srr_off; 2790 if (ccb_len == 0) { 2791 isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x but current CCB doesn't transfer data", atp->tag, srr_off); 2792 goto mdp; 2793 } 2794 if (srr_off < ccb_off || ccb_off > srr_off + ccb_len) { 2795 isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x not covered by current CCB data range [0x%x..0x%x]", atp->tag, srr_off, ccb_off, ccb_end); 2796 goto mdp; 2797 } 2798 isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x covered by current CCB data range [0x%x..0x%x]", atp->tag, srr_off, ccb_off, ccb_end); 2799 break; 2800 case R_CTL_INFO_COMMAND_STATUS: 2801 isp_prt(isp, ISP_LOGTINFO, "SRR[0x%x] Got an FCP RSP SRR- resending status", atp->tag); 2802 atp->sendst = 1; 2803 /* 2804 * We have to restart a FCP_RSP IU transaction 2805 */ 2806 break; 2807 case R_CTL_INFO_DATA_DESCRIPTOR: 2808 /* 2809 * We have to restart an FCP DATA in transaction 2810 */ 2811 isp_prt(isp, ISP_LOGWARN, "Got an FCP DATA IN SRR- dropping"); 2812 goto fail; 2813 2814 default: 2815 isp_prt(isp, ISP_LOGWARN, "Got an unknown information (%x) SRR- dropping", inot->in_srr_iu); 2816 goto fail; 2817 } 2818 2819 /* 2820 * We can't do anything until this is acked, so we might as well start it now. 2821 * We aren't going to do the usual asynchronous ack issue because we need 2822 * to make sure this gets on the wire first. 2823 */ 2824 if (isp_notify_ack(isp, inot)) { 2825 isp_prt(isp, ISP_LOGWARN, "could not push positive ack for SRR- you lose"); 2826 goto fail; 2827 } 2828 isp_target_start_ctio(isp, ccb, FROM_SRR); 2829 return; 2830 fail: 2831 inot->in_reserved = 1; 2832 isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot); 2833 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2834 ccb->ccb_h.status |= CAM_REQ_CMP_ERR; 2835 isp_complete_ctio(ccb); 2836 return; 2837 mdp: 2838 if (isp_notify_ack(isp, inot)) { 2839 isp_prt(isp, ISP_LOGWARN, "could not push positive ack for SRR- you lose"); 2840 goto fail; 2841 } 2842 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2843 ccb->ccb_h.status = CAM_MESSAGE_RECV; 2844 /* 2845 * This is not a strict interpretation of MDP, but it's close 2846 */ 2847 ccb->csio.msg_ptr = &ccb->csio.sense_data.sense_buf[SSD_FULL_SIZE - 16]; 2848 ccb->csio.msg_len = 7; 2849 ccb->csio.msg_ptr[0] = MSG_EXTENDED; 2850 ccb->csio.msg_ptr[1] = 5; 2851 ccb->csio.msg_ptr[2] = 0; /* modify data pointer */ 2852 ccb->csio.msg_ptr[3] = srr_off >> 24; 2853 ccb->csio.msg_ptr[4] = srr_off >> 16; 2854 ccb->csio.msg_ptr[5] = srr_off >> 8; 2855 ccb->csio.msg_ptr[6] = srr_off; 2856 isp_complete_ctio(ccb); 2857 } 2858 2859 2860 static void 2861 isp_handle_srr_notify(ispsoftc_t *isp, void *inot_raw) 2862 { 2863 tstate_t *tptr; 2864 in_fcentry_24xx_t *inot = inot_raw; 2865 atio_private_data_t *atp; 2866 uint32_t tag = inot->in_rxid; 2867 uint32_t bus = inot->in_vpidx; 2868 2869 if (!IS_24XX(isp)) { 2870 isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot_raw); 2871 return; 2872 } 2873 2874 tptr = get_lun_statep_from_tag(isp, bus, tag); 2875 if (tptr == NULL) { 2876 isp_prt(isp, ISP_LOGERR, "%s: cannot find tptr for tag %x in SRR Notify", __func__, tag); 2877 isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot); 2878 return; 2879 } 2880 atp = isp_find_atpd(isp, tptr, tag); 2881 if (atp == NULL) { 2882 rls_lun_statep(isp, tptr); 2883 isp_prt(isp, ISP_LOGERR, "%s: cannot find adjunct for %x in SRR Notify", __func__, tag); 2884 isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot); 2885 return; 2886 } 2887 atp->srr_notify_rcvd = 1; 2888 memcpy(atp->srr, inot, sizeof (atp->srr)); 2889 isp_prt(isp, ISP_LOGTINFO /* ISP_LOGTDEBUG0 */, "SRR[0x%x] inot->in_rxid flags 0x%x srr_iu=%x reloff 0x%x", inot->in_rxid, inot->in_flags, inot->in_srr_iu, 2890 inot->in_srr_reloff_lo | (inot->in_srr_reloff_hi << 16)); 2891 if (atp->srr_ccb) 2892 isp_handle_srr_start(isp, tptr, atp); 2893 rls_lun_statep(isp, tptr); 2894 } 2895 2896 static void 2897 isp_handle_platform_ctio(ispsoftc_t *isp, void *arg) 2898 { 2899 union ccb *ccb; 2900 int sentstatus = 0, ok = 0, notify_cam = 0, resid = 0, failure = 0; 2901 tstate_t *tptr = NULL; 2902 atio_private_data_t *atp = NULL; 2903 int bus; 2904 uint32_t handle, moved_data = 0, data_requested; 2905 2906 /* 2907 * CTIO handles are 16 bits. 2908 * CTIO2 and CTIO7 are 32 bits. 2909 */ 2910 2911 if (IS_SCSI(isp)) { 2912 handle = ((ct_entry_t *)arg)->ct_syshandle; 2913 } else { 2914 handle = ((ct2_entry_t *)arg)->ct_syshandle; 2915 } 2916 ccb = isp_find_xs_tgt(isp, handle); 2917 if (ccb == NULL) { 2918 isp_print_bytes(isp, "null ccb in isp_handle_platform_ctio", QENTRY_LEN, arg); 2919 return; 2920 } 2921 isp_destroy_tgt_handle(isp, handle); 2922 data_requested = PISP_PCMD(ccb)->datalen; 2923 isp_free_pcmd(isp, ccb); 2924 if (isp->isp_nactive) { 2925 isp->isp_nactive--; 2926 } 2927 2928 bus = XS_CHANNEL(ccb); 2929 tptr = get_lun_statep(isp, bus, XS_LUN(ccb)); 2930 if (tptr == NULL) { 2931 tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD); 2932 } 2933 if (tptr == NULL) { 2934 isp_prt(isp, ISP_LOGERR, "%s: cannot find tptr for tag %x after I/O", __func__, ccb->csio.tag_id); 2935 return; 2936 } 2937 2938 if (IS_24XX(isp)) { 2939 atp = isp_find_atpd(isp, tptr, ((ct7_entry_t *)arg)->ct_rxid); 2940 } else if (IS_FC(isp)) { 2941 atp = isp_find_atpd(isp, tptr, ((ct2_entry_t *)arg)->ct_rxid); 2942 } else { 2943 atp = isp_find_atpd(isp, tptr, ((ct_entry_t *)arg)->ct_fwhandle); 2944 } 2945 if (atp == NULL) { 2946 /* 2947 * XXX: isp_clear_commands() generates fake CTIO with zero 2948 * ct_rxid value, filling only ct_syshandle. Workaround 2949 * that using tag_id from the CCB, pointed by ct_syshandle. 2950 */ 2951 atp = isp_find_atpd(isp, tptr, ccb->csio.tag_id); 2952 } 2953 if (atp == NULL) { 2954 rls_lun_statep(isp, tptr); 2955 isp_prt(isp, ISP_LOGERR, "%s: cannot find adjunct for %x after I/O", __func__, ccb->csio.tag_id); 2956 return; 2957 } 2958 KASSERT((atp->ctcnt > 0), ("ctio count not greater than zero")); 2959 atp->bytes_in_transit -= data_requested; 2960 atp->ctcnt -= 1; 2961 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2962 2963 if (IS_24XX(isp)) { 2964 ct7_entry_t *ct = arg; 2965 2966 if (ct->ct_nphdl == CT7_SRR) { 2967 atp->srr_ccb = ccb; 2968 if (atp->srr_notify_rcvd) 2969 isp_handle_srr_start(isp, tptr, atp); 2970 rls_lun_statep(isp, tptr); 2971 return; 2972 } 2973 if (ct->ct_nphdl == CT_HBA_RESET) { 2974 failure = CAM_UNREC_HBA_ERROR; 2975 } else { 2976 sentstatus = ct->ct_flags & CT7_SENDSTATUS; 2977 ok = (ct->ct_nphdl == CT7_OK); 2978 notify_cam = (ct->ct_header.rqs_seqno & ATPD_SEQ_NOTIFY_CAM) != 0; 2979 if ((ct->ct_flags & CT7_DATAMASK) != CT7_NO_DATA) { 2980 resid = ct->ct_resid; 2981 moved_data = data_requested - resid; 2982 } 2983 } 2984 isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, "%s: CTIO7[%x] seq %u nc %d sts 0x%x flg 0x%x sns %d resid %d %s", __func__, ct->ct_rxid, ATPD_GET_SEQNO(ct), 2985 notify_cam, ct->ct_nphdl, ct->ct_flags, (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, resid, sentstatus? "FIN" : "MID"); 2986 } else if (IS_FC(isp)) { 2987 ct2_entry_t *ct = arg; 2988 if (ct->ct_status == CT_SRR) { 2989 atp->srr_ccb = ccb; 2990 if (atp->srr_notify_rcvd) 2991 isp_handle_srr_start(isp, tptr, atp); 2992 rls_lun_statep(isp, tptr); 2993 isp_target_putback_atio(ccb); 2994 return; 2995 } 2996 if (ct->ct_status == CT_HBA_RESET) { 2997 failure = CAM_UNREC_HBA_ERROR; 2998 } else { 2999 sentstatus = ct->ct_flags & CT2_SENDSTATUS; 3000 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 3001 notify_cam = (ct->ct_header.rqs_seqno & ATPD_SEQ_NOTIFY_CAM) != 0; 3002 if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) { 3003 resid = ct->ct_resid; 3004 moved_data = data_requested - resid; 3005 } 3006 } 3007 isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, "%s: CTIO2[%x] seq %u nc %d sts 0x%x flg 0x%x sns %d resid %d %s", __func__, ct->ct_rxid, ATPD_GET_SEQNO(ct), 3008 notify_cam, ct->ct_status, ct->ct_flags, (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, resid, sentstatus? "FIN" : "MID"); 3009 } else { 3010 ct_entry_t *ct = arg; 3011 3012 if (ct->ct_status == (CT_HBA_RESET & 0xff)) { 3013 failure = CAM_UNREC_HBA_ERROR; 3014 } else { 3015 sentstatus = ct->ct_flags & CT_SENDSTATUS; 3016 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 3017 notify_cam = (ct->ct_header.rqs_seqno & ATPD_SEQ_NOTIFY_CAM) != 0; 3018 } 3019 if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) { 3020 resid = ct->ct_resid; 3021 moved_data = data_requested - resid; 3022 } 3023 isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO[%x] seq %u nc %d tag %x S_ID 0x%x lun %x sts %x flg %x resid %d %s", __func__, ct->ct_fwhandle, ATPD_GET_SEQNO(ct), 3024 notify_cam, ct->ct_tag_val, ct->ct_iid, ct->ct_lun, ct->ct_status, ct->ct_flags, resid, sentstatus? "FIN" : "MID"); 3025 } 3026 if (ok) { 3027 if (moved_data) { 3028 atp->bytes_xfered += moved_data; 3029 ccb->csio.resid = atp->orig_datalen - atp->bytes_xfered - atp->bytes_in_transit; 3030 } 3031 if (sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) { 3032 ccb->ccb_h.status |= CAM_SENT_SENSE; 3033 } 3034 ccb->ccb_h.status |= CAM_REQ_CMP; 3035 } else { 3036 notify_cam = 1; 3037 if (failure == CAM_UNREC_HBA_ERROR) 3038 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR; 3039 else 3040 ccb->ccb_h.status |= CAM_REQ_CMP_ERR; 3041 } 3042 atp->state = ATPD_STATE_PDON; 3043 rls_lun_statep(isp, tptr); 3044 3045 /* 3046 * We never *not* notify CAM when there has been any error (ok == 0), 3047 * so we never need to do an ATIO putback if we're not notifying CAM. 3048 */ 3049 isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done (ok=%d nc=%d nowsendstatus=%d ccb ss=%d)", 3050 (sentstatus)? " FINAL " : "MIDTERM ", atp->tag, ok, notify_cam, atp->sendst, (ccb->ccb_h.flags & CAM_SEND_STATUS) != 0); 3051 if (notify_cam == 0) { 3052 if (atp->sendst) { 3053 isp_target_start_ctio(isp, ccb, FROM_CTIO_DONE); 3054 } 3055 return; 3056 } 3057 3058 /* 3059 * We're telling CAM we're done with this CTIO transaction. 3060 * 3061 * 24XX cards never need an ATIO put back. 3062 * 3063 * Other cards need one put back only on error. 3064 * In the latter case, a timeout will re-fire 3065 * and try again in case we didn't have 3066 * queue resources to do so at first. In any case, 3067 * once the putback is done we do the completion 3068 * call. 3069 */ 3070 if (ok || IS_24XX(isp)) { 3071 isp_complete_ctio(ccb); 3072 } else { 3073 isp_target_putback_atio(ccb); 3074 } 3075 } 3076 3077 static void 3078 isp_handle_platform_notify_scsi(ispsoftc_t *isp, in_entry_t *inot) 3079 { 3080 isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot); 3081 } 3082 3083 static void 3084 isp_handle_platform_notify_fc(ispsoftc_t *isp, in_fcentry_t *inp) 3085 { 3086 int needack = 1; 3087 switch (inp->in_status) { 3088 case IN_PORT_LOGOUT: 3089 /* 3090 * XXX: Need to delete this initiator's WWN from the database 3091 * XXX: Need to send this LOGOUT upstream 3092 */ 3093 isp_prt(isp, ISP_LOGWARN, "port logout of S_ID 0x%x", inp->in_iid); 3094 break; 3095 case IN_PORT_CHANGED: 3096 isp_prt(isp, ISP_LOGWARN, "port changed for S_ID 0x%x", inp->in_iid); 3097 break; 3098 case IN_GLOBAL_LOGO: 3099 isp_del_all_wwn_entries(isp, 0); 3100 isp_prt(isp, ISP_LOGINFO, "all ports logged out"); 3101 break; 3102 case IN_ABORT_TASK: 3103 { 3104 tstate_t *tptr; 3105 uint16_t lun; 3106 uint32_t loopid, sid; 3107 uint64_t wwn; 3108 atio_private_data_t *atp; 3109 fcportdb_t *lp; 3110 struct ccb_immediate_notify *inot = NULL; 3111 3112 if (ISP_CAP_SCCFW(isp)) { 3113 lun = inp->in_scclun; 3114 #if __FreeBSD_version < 1000700 3115 lun &= 0x3fff; 3116 #endif 3117 } else { 3118 lun = inp->in_lun; 3119 } 3120 if (ISP_CAP_2KLOGIN(isp)) { 3121 loopid = ((in_fcentry_e_t *)inp)->in_iid; 3122 } else { 3123 loopid = inp->in_iid; 3124 } 3125 if (isp_find_pdb_by_handle(isp, 0, loopid, &lp)) { 3126 wwn = lp->port_wwn; 3127 sid = lp->portid; 3128 } else { 3129 wwn = INI_ANY; 3130 sid = PORT_ANY; 3131 } 3132 tptr = get_lun_statep(isp, 0, lun); 3133 if (tptr == NULL) { 3134 tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD); 3135 if (tptr == NULL) { 3136 isp_prt(isp, ISP_LOGWARN, "ABORT TASK for lun %u- but no tstate", lun); 3137 return; 3138 } 3139 } 3140 atp = isp_find_atpd(isp, tptr, inp->in_seqid); 3141 3142 if (atp) { 3143 inot = (struct ccb_immediate_notify *) SLIST_FIRST(&tptr->inots); 3144 isp_prt(isp, ISP_LOGTDEBUG0, "ABORT TASK RX_ID %x WWN 0x%016llx state %d", inp->in_seqid, (unsigned long long) wwn, atp->state); 3145 if (inot) { 3146 tptr->inot_count--; 3147 SLIST_REMOVE_HEAD(&tptr->inots, sim_links.sle); 3148 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, inot->ccb_h.path, "%s: Take FREE INOT count now %d\n", __func__, tptr->inot_count); 3149 } else { 3150 ISP_PATH_PRT(isp, ISP_LOGWARN, tptr->owner, "out of INOT structures\n"); 3151 } 3152 } else { 3153 ISP_PATH_PRT(isp, ISP_LOGWARN, tptr->owner, "abort task RX_ID %x from wwn 0x%016llx, state unknown\n", inp->in_seqid, wwn); 3154 } 3155 if (inot) { 3156 isp_notify_t tmp, *nt = &tmp; 3157 ISP_MEMZERO(nt, sizeof (isp_notify_t)); 3158 nt->nt_hba = isp; 3159 nt->nt_tgt = FCPARAM(isp, 0)->isp_wwpn; 3160 nt->nt_wwn = wwn; 3161 nt->nt_nphdl = loopid; 3162 nt->nt_sid = sid; 3163 nt->nt_did = PORT_ANY; 3164 nt->nt_lun = lun; 3165 nt->nt_need_ack = 1; 3166 nt->nt_channel = 0; 3167 nt->nt_ncode = NT_ABORT_TASK; 3168 nt->nt_lreserved = inot; 3169 isp_handle_platform_target_tmf(isp, nt); 3170 needack = 0; 3171 } 3172 rls_lun_statep(isp, tptr); 3173 break; 3174 } 3175 default: 3176 break; 3177 } 3178 if (needack) { 3179 isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inp); 3180 } 3181 } 3182 3183 static void 3184 isp_handle_platform_notify_24xx(ispsoftc_t *isp, in_fcentry_24xx_t *inot) 3185 { 3186 uint16_t nphdl; 3187 uint16_t prli_options = 0; 3188 uint32_t portid; 3189 fcportdb_t *lp; 3190 char *msg = NULL; 3191 uint8_t *ptr = (uint8_t *)inot; 3192 uint64_t wwpn = INI_NONE, wwnn = INI_NONE; 3193 3194 nphdl = inot->in_nphdl; 3195 if (nphdl != NIL_HANDLE) { 3196 portid = inot->in_portid_hi << 16 | inot->in_portid_lo; 3197 } else { 3198 portid = PORT_ANY; 3199 } 3200 3201 switch (inot->in_status) { 3202 case IN24XX_ELS_RCVD: 3203 { 3204 char buf[16]; 3205 int chan = ISP_GET_VPIDX(isp, inot->in_vpidx); 3206 3207 /* 3208 * Note that we're just getting notification that an ELS was received 3209 * (possibly with some associated information sent upstream). This is 3210 * *not* the same as being given the ELS frame to accept or reject. 3211 */ 3212 switch (inot->in_status_subcode) { 3213 case LOGO: 3214 msg = "LOGO"; 3215 wwpn = be64dec(&ptr[IN24XX_PLOGI_WWPN_OFF]); 3216 isp_del_wwn_entry(isp, chan, wwpn, nphdl, portid); 3217 break; 3218 case PRLO: 3219 msg = "PRLO"; 3220 break; 3221 case PLOGI: 3222 msg = "PLOGI"; 3223 wwnn = be64dec(&ptr[IN24XX_PLOGI_WWNN_OFF]); 3224 wwpn = be64dec(&ptr[IN24XX_PLOGI_WWPN_OFF]); 3225 isp_add_wwn_entry(isp, chan, wwpn, wwnn, 3226 nphdl, portid, prli_options); 3227 break; 3228 case PRLI: 3229 msg = "PRLI"; 3230 prli_options = inot->in_prli_options; 3231 if (inot->in_flags & IN24XX_FLAG_PN_NN_VALID) 3232 wwnn = be64dec(&ptr[IN24XX_PRLI_WWNN_OFF]); 3233 wwpn = be64dec(&ptr[IN24XX_PRLI_WWPN_OFF]); 3234 isp_add_wwn_entry(isp, chan, wwpn, wwnn, 3235 nphdl, portid, prli_options); 3236 break; 3237 case PDISC: 3238 msg = "PDISC"; 3239 break; 3240 case ADISC: 3241 msg = "ADISC"; 3242 break; 3243 default: 3244 ISP_SNPRINTF(buf, sizeof (buf), "ELS 0x%x", inot->in_status_subcode); 3245 msg = buf; 3246 break; 3247 } 3248 if (inot->in_flags & IN24XX_FLAG_PUREX_IOCB) { 3249 isp_prt(isp, ISP_LOGERR, "%s Chan %d ELS N-port handle %x PortID 0x%06x marked as needing a PUREX response", msg, chan, nphdl, portid); 3250 break; 3251 } 3252 isp_prt(isp, ISP_LOGTDEBUG0, "%s Chan %d ELS N-port handle %x PortID 0x%06x RX_ID 0x%x OX_ID 0x%x", msg, chan, nphdl, portid, 3253 inot->in_rxid, inot->in_oxid); 3254 isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot); 3255 break; 3256 } 3257 3258 case IN24XX_PORT_LOGOUT: 3259 msg = "PORT LOGOUT"; 3260 if (isp_find_pdb_by_handle(isp, ISP_GET_VPIDX(isp, inot->in_vpidx), nphdl, &lp)) { 3261 isp_del_wwn_entry(isp, ISP_GET_VPIDX(isp, inot->in_vpidx), lp->port_wwn, nphdl, lp->portid); 3262 } 3263 /* FALLTHROUGH */ 3264 case IN24XX_PORT_CHANGED: 3265 if (msg == NULL) 3266 msg = "PORT CHANGED"; 3267 /* FALLTHROUGH */ 3268 case IN24XX_LIP_RESET: 3269 if (msg == NULL) 3270 msg = "LIP RESET"; 3271 isp_prt(isp, ISP_LOGINFO, "Chan %d %s (sub-status 0x%x) for N-port handle 0x%x", ISP_GET_VPIDX(isp, inot->in_vpidx), msg, inot->in_status_subcode, nphdl); 3272 3273 /* 3274 * All subcodes here are irrelevant. What is relevant 3275 * is that we need to terminate all active commands from 3276 * this initiator (known by N-port handle). 3277 */ 3278 /* XXX IMPLEMENT XXX */ 3279 isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot); 3280 break; 3281 3282 case IN24XX_SRR_RCVD: 3283 #ifdef ISP_TARGET_MODE 3284 isp_handle_srr_notify(isp, inot); 3285 break; 3286 #else 3287 if (msg == NULL) 3288 msg = "SRR RCVD"; 3289 /* FALLTHROUGH */ 3290 #endif 3291 case IN24XX_LINK_RESET: 3292 if (msg == NULL) 3293 msg = "LINK RESET"; 3294 case IN24XX_LINK_FAILED: 3295 if (msg == NULL) 3296 msg = "LINK FAILED"; 3297 default: 3298 isp_prt(isp, ISP_LOGWARN, "Chan %d %s", ISP_GET_VPIDX(isp, inot->in_vpidx), msg); 3299 isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot); 3300 break; 3301 } 3302 } 3303 3304 static int 3305 isp_handle_platform_target_notify_ack(ispsoftc_t *isp, isp_notify_t *mp) 3306 { 3307 3308 if (isp->isp_state != ISP_RUNSTATE) { 3309 isp_prt(isp, ISP_LOGTINFO, "Notify Code 0x%x (qevalid=%d) acked- h/w not ready (dropping)", mp->nt_ncode, mp->nt_lreserved != NULL); 3310 return (0); 3311 } 3312 3313 /* 3314 * This case is for a Task Management Function, which shows up as an ATIO7 entry. 3315 */ 3316 if (IS_24XX(isp) && mp->nt_lreserved && ((isphdr_t *)mp->nt_lreserved)->rqs_entry_type == RQSTYPE_ATIO) { 3317 ct7_entry_t local, *cto = &local; 3318 at7_entry_t *aep = (at7_entry_t *)mp->nt_lreserved; 3319 fcportdb_t *lp; 3320 uint32_t sid; 3321 uint16_t nphdl; 3322 3323 sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | aep->at_hdr.s_id[2]; 3324 if (isp_find_pdb_by_sid(isp, mp->nt_channel, sid, &lp)) { 3325 nphdl = lp->handle; 3326 } else { 3327 nphdl = NIL_HANDLE; 3328 } 3329 ISP_MEMZERO(&local, sizeof (local)); 3330 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7; 3331 cto->ct_header.rqs_entry_count = 1; 3332 cto->ct_nphdl = nphdl; 3333 cto->ct_rxid = aep->at_rxid; 3334 cto->ct_vpidx = mp->nt_channel; 3335 cto->ct_iid_lo = sid; 3336 cto->ct_iid_hi = sid >> 16; 3337 cto->ct_oxid = aep->at_hdr.ox_id; 3338 cto->ct_flags = CT7_SENDSTATUS|CT7_NOACK|CT7_NO_DATA|CT7_FLAG_MODE1; 3339 cto->ct_flags |= (aep->at_ta_len >> 12) << CT7_TASK_ATTR_SHIFT; 3340 return (isp_target_put_entry(isp, &local)); 3341 } 3342 3343 /* 3344 * This case is for a responding to an ABTS frame 3345 */ 3346 if (IS_24XX(isp) && mp->nt_lreserved && ((isphdr_t *)mp->nt_lreserved)->rqs_entry_type == RQSTYPE_ABTS_RCVD) { 3347 3348 /* 3349 * Overload nt_need_ack here to mark whether we've terminated the associated command. 3350 */ 3351 if (mp->nt_need_ack) { 3352 uint8_t storage[QENTRY_LEN]; 3353 ct7_entry_t *cto = (ct7_entry_t *) storage; 3354 abts_t *abts = (abts_t *)mp->nt_lreserved; 3355 3356 ISP_MEMZERO(cto, sizeof (ct7_entry_t)); 3357 isp_prt(isp, ISP_LOGTDEBUG0, "%s: [%x] terminating after ABTS received", __func__, abts->abts_rxid_task); 3358 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7; 3359 cto->ct_header.rqs_entry_count = 1; 3360 cto->ct_nphdl = mp->nt_nphdl; 3361 cto->ct_rxid = abts->abts_rxid_task; 3362 cto->ct_iid_lo = mp->nt_sid; 3363 cto->ct_iid_hi = mp->nt_sid >> 16; 3364 cto->ct_oxid = abts->abts_ox_id; 3365 cto->ct_vpidx = mp->nt_channel; 3366 cto->ct_flags = CT7_NOACK|CT7_TERMINATE; 3367 if (isp_target_put_entry(isp, cto)) { 3368 return (ENOMEM); 3369 } 3370 mp->nt_need_ack = 0; 3371 } 3372 if (isp_acknak_abts(isp, mp->nt_lreserved, 0) == ENOMEM) { 3373 return (ENOMEM); 3374 } else { 3375 return (0); 3376 } 3377 } 3378 3379 /* 3380 * Handle logout cases here 3381 */ 3382 if (mp->nt_ncode == NT_GLOBAL_LOGOUT) { 3383 isp_del_all_wwn_entries(isp, mp->nt_channel); 3384 } 3385 3386 if (mp->nt_ncode == NT_LOGOUT) { 3387 if (!IS_2100(isp) && IS_FC(isp)) { 3388 isp_del_wwn_entries(isp, mp); 3389 } 3390 } 3391 3392 /* 3393 * General purpose acknowledgement 3394 */ 3395 if (mp->nt_need_ack) { 3396 isp_prt(isp, ISP_LOGTINFO, "Notify Code 0x%x (qevalid=%d) being acked", mp->nt_ncode, mp->nt_lreserved != NULL); 3397 /* 3398 * Don't need to use the guaranteed send because the caller can retry 3399 */ 3400 return (isp_notify_ack(isp, mp->nt_lreserved)); 3401 } 3402 return (0); 3403 } 3404 3405 /* 3406 * Handle task management functions. 3407 * 3408 * We show up here with a notify structure filled out. 3409 * 3410 * The nt_lreserved tag points to the original queue entry 3411 */ 3412 static void 3413 isp_handle_platform_target_tmf(ispsoftc_t *isp, isp_notify_t *notify) 3414 { 3415 tstate_t *tptr; 3416 fcportdb_t *lp; 3417 struct ccb_immediate_notify *inot; 3418 inot_private_data_t *ntp = NULL; 3419 lun_id_t lun; 3420 3421 isp_prt(isp, ISP_LOGTDEBUG0, "%s: code 0x%x sid 0x%x tagval 0x%016llx chan %d lun 0x%x", __func__, notify->nt_ncode, 3422 notify->nt_sid, (unsigned long long) notify->nt_tagval, notify->nt_channel, notify->nt_lun); 3423 /* 3424 * NB: This assignment is necessary because of tricky type conversion. 3425 * XXX: This is tricky and I need to check this. If the lun isn't known 3426 * XXX: for the task management function, it does not of necessity follow 3427 * XXX: that it should go up stream to the wildcard listener. 3428 */ 3429 if (notify->nt_lun == LUN_ANY) { 3430 lun = CAM_LUN_WILDCARD; 3431 } else { 3432 lun = notify->nt_lun; 3433 } 3434 tptr = get_lun_statep(isp, notify->nt_channel, lun); 3435 if (tptr == NULL) { 3436 tptr = get_lun_statep(isp, notify->nt_channel, CAM_LUN_WILDCARD); 3437 if (tptr == NULL) { 3438 isp_prt(isp, ISP_LOGWARN, "%s: no state pointer found for chan %d lun %#jx", __func__, notify->nt_channel, (uintmax_t)lun); 3439 goto bad; 3440 } 3441 } 3442 inot = (struct ccb_immediate_notify *) SLIST_FIRST(&tptr->inots); 3443 if (inot == NULL) { 3444 isp_prt(isp, ISP_LOGWARN, "%s: out of immediate notify structures for chan %d lun %#jx", __func__, notify->nt_channel, (uintmax_t)lun); 3445 goto bad; 3446 } 3447 3448 if (isp_find_pdb_by_sid(isp, notify->nt_channel, notify->nt_sid, &lp) == 0 && 3449 isp_find_pdb_by_handle(isp, notify->nt_channel, notify->nt_nphdl, &lp) == 0) { 3450 inot->initiator_id = CAM_TARGET_WILDCARD; 3451 } else { 3452 inot->initiator_id = FC_PORTDB_TGT(isp, notify->nt_channel, lp); 3453 } 3454 inot->seq_id = notify->nt_tagval; 3455 inot->tag_id = notify->nt_tagval >> 32; 3456 3457 switch (notify->nt_ncode) { 3458 case NT_ABORT_TASK: 3459 isp_target_mark_aborted_early(isp, tptr, inot->tag_id); 3460 inot->arg = MSG_ABORT_TASK; 3461 break; 3462 case NT_ABORT_TASK_SET: 3463 isp_target_mark_aborted_early(isp, tptr, TAG_ANY); 3464 inot->arg = MSG_ABORT_TASK_SET; 3465 break; 3466 case NT_CLEAR_ACA: 3467 inot->arg = MSG_CLEAR_ACA; 3468 break; 3469 case NT_CLEAR_TASK_SET: 3470 inot->arg = MSG_CLEAR_TASK_SET; 3471 break; 3472 case NT_LUN_RESET: 3473 inot->arg = MSG_LOGICAL_UNIT_RESET; 3474 break; 3475 case NT_TARGET_RESET: 3476 inot->arg = MSG_TARGET_RESET; 3477 break; 3478 case NT_QUERY_TASK_SET: 3479 inot->arg = MSG_QUERY_TASK_SET; 3480 break; 3481 case NT_QUERY_ASYNC_EVENT: 3482 inot->arg = MSG_QUERY_ASYNC_EVENT; 3483 break; 3484 default: 3485 isp_prt(isp, ISP_LOGWARN, "%s: unknown TMF code 0x%x for chan %d lun %#jx", __func__, notify->nt_ncode, notify->nt_channel, (uintmax_t)lun); 3486 goto bad; 3487 } 3488 3489 ntp = isp_get_ntpd(isp, tptr); 3490 if (ntp == NULL) { 3491 isp_prt(isp, ISP_LOGWARN, "%s: out of inotify private structures", __func__); 3492 goto bad; 3493 } 3494 ISP_MEMCPY(&ntp->rd.nt, notify, sizeof (isp_notify_t)); 3495 if (notify->nt_lreserved) { 3496 ISP_MEMCPY(&ntp->rd.data, notify->nt_lreserved, QENTRY_LEN); 3497 ntp->rd.nt.nt_lreserved = &ntp->rd.data; 3498 } 3499 ntp->rd.seq_id = notify->nt_tagval; 3500 ntp->rd.tag_id = notify->nt_tagval >> 32; 3501 3502 tptr->inot_count--; 3503 SLIST_REMOVE_HEAD(&tptr->inots, sim_links.sle); 3504 rls_lun_statep(isp, tptr); 3505 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, inot->ccb_h.path, "%s: Take FREE INOT count now %d\n", __func__, tptr->inot_count); 3506 inot->ccb_h.status = CAM_MESSAGE_RECV; 3507 xpt_done((union ccb *)inot); 3508 return; 3509 bad: 3510 if (tptr) { 3511 rls_lun_statep(isp, tptr); 3512 } 3513 if (notify->nt_need_ack && notify->nt_lreserved) { 3514 if (((isphdr_t *)notify->nt_lreserved)->rqs_entry_type == RQSTYPE_ABTS_RCVD) { 3515 if (isp_acknak_abts(isp, notify->nt_lreserved, ENOMEM)) { 3516 isp_prt(isp, ISP_LOGWARN, "you lose- unable to send an ACKNAK"); 3517 } 3518 } else { 3519 isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, notify->nt_lreserved); 3520 } 3521 } 3522 } 3523 3524 /* 3525 * Find the associated private data and mark it as dead so 3526 * we don't try to work on it any further. 3527 */ 3528 static void 3529 isp_target_mark_aborted(ispsoftc_t *isp, union ccb *ccb) 3530 { 3531 tstate_t *tptr; 3532 atio_private_data_t *atp; 3533 union ccb *accb = ccb->cab.abort_ccb; 3534 3535 tptr = get_lun_statep(isp, XS_CHANNEL(accb), XS_LUN(accb)); 3536 if (tptr == NULL) { 3537 tptr = get_lun_statep(isp, XS_CHANNEL(accb), CAM_LUN_WILDCARD); 3538 if (tptr == NULL) { 3539 ccb->ccb_h.status = CAM_REQ_INVALID; 3540 return; 3541 } 3542 } 3543 3544 atp = isp_find_atpd(isp, tptr, accb->atio.tag_id); 3545 if (atp == NULL) { 3546 ccb->ccb_h.status = CAM_REQ_INVALID; 3547 } else { 3548 atp->dead = 1; 3549 ccb->ccb_h.status = CAM_REQ_CMP; 3550 } 3551 rls_lun_statep(isp, tptr); 3552 } 3553 3554 static void 3555 isp_target_mark_aborted_early(ispsoftc_t *isp, tstate_t *tptr, uint32_t tag_id) 3556 { 3557 atio_private_data_t *atp; 3558 inot_private_data_t *restart_queue = tptr->restart_queue; 3559 3560 /* 3561 * First, clean any commands pending restart 3562 */ 3563 tptr->restart_queue = NULL; 3564 while (restart_queue) { 3565 uint32_t this_tag_id; 3566 inot_private_data_t *ntp = restart_queue; 3567 3568 restart_queue = ntp->rd.nt.nt_hba; 3569 3570 if (IS_24XX(isp)) { 3571 this_tag_id = ((at7_entry_t *)ntp->rd.data)->at_rxid; 3572 } else { 3573 this_tag_id = ((at2_entry_t *)ntp->rd.data)->at_rxid; 3574 } 3575 if ((uint64_t)tag_id == TAG_ANY || tag_id == this_tag_id) { 3576 isp_put_ntpd(isp, tptr, ntp); 3577 } else { 3578 ntp->rd.nt.nt_hba = tptr->restart_queue; 3579 tptr->restart_queue = ntp; 3580 } 3581 } 3582 3583 /* 3584 * Now mark other ones dead as well. 3585 */ 3586 for (atp = tptr->atpool; atp < &tptr->atpool[ATPDPSIZE]; atp++) { 3587 if ((uint64_t)tag_id == TAG_ANY || atp->tag == tag_id) { 3588 atp->dead = 1; 3589 } 3590 } 3591 } 3592 #endif 3593 3594 static void 3595 isp_cam_async(void *cbarg, uint32_t code, struct cam_path *path, void *arg) 3596 { 3597 struct cam_sim *sim; 3598 int bus, tgt; 3599 ispsoftc_t *isp; 3600 3601 sim = (struct cam_sim *)cbarg; 3602 isp = (ispsoftc_t *) cam_sim_softc(sim); 3603 bus = cam_sim_bus(sim); 3604 tgt = xpt_path_target_id(path); 3605 3606 switch (code) { 3607 case AC_LOST_DEVICE: 3608 if (IS_SCSI(isp)) { 3609 uint16_t oflags, nflags; 3610 sdparam *sdp = SDPARAM(isp, bus); 3611 3612 if (tgt >= 0) { 3613 nflags = sdp->isp_devparam[tgt].nvrm_flags; 3614 #ifndef ISP_TARGET_MODE 3615 nflags &= DPARM_SAFE_DFLT; 3616 if (isp->isp_loaded_fw) { 3617 nflags |= DPARM_NARROW | DPARM_ASYNC; 3618 } 3619 #else 3620 nflags = DPARM_DEFAULT; 3621 #endif 3622 oflags = sdp->isp_devparam[tgt].goal_flags; 3623 sdp->isp_devparam[tgt].goal_flags = nflags; 3624 sdp->isp_devparam[tgt].dev_update = 1; 3625 sdp->update = 1; 3626 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, bus); 3627 sdp->isp_devparam[tgt].goal_flags = oflags; 3628 } 3629 } 3630 break; 3631 default: 3632 isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code); 3633 break; 3634 } 3635 } 3636 3637 static void 3638 isp_poll(struct cam_sim *sim) 3639 { 3640 ispsoftc_t *isp = cam_sim_softc(sim); 3641 uint16_t isr, sema, info; 3642 3643 if (ISP_READ_ISR(isp, &isr, &sema, &info)) 3644 isp_intr(isp, isr, sema, info); 3645 } 3646 3647 3648 static void 3649 isp_watchdog(void *arg) 3650 { 3651 struct ccb_scsiio *xs = arg; 3652 ispsoftc_t *isp; 3653 uint32_t ohandle = ISP_HANDLE_FREE, handle; 3654 3655 isp = XS_ISP(xs); 3656 3657 handle = isp_find_handle(isp, xs); 3658 3659 /* 3660 * Hand crank the interrupt code just to be sure the command isn't stuck somewhere. 3661 */ 3662 if (handle != ISP_HANDLE_FREE) { 3663 uint16_t isr, sema, info; 3664 if (ISP_READ_ISR(isp, &isr, &sema, &info) != 0) 3665 isp_intr(isp, isr, sema, info); 3666 ohandle = handle; 3667 handle = isp_find_handle(isp, xs); 3668 } 3669 if (handle != ISP_HANDLE_FREE) { 3670 /* 3671 * Try and make sure the command is really dead before 3672 * we release the handle (and DMA resources) for reuse. 3673 * 3674 * If we are successful in aborting the command then 3675 * we're done here because we'll get the command returned 3676 * back separately. 3677 */ 3678 if (isp_control(isp, ISPCTL_ABORT_CMD, xs) == 0) { 3679 return; 3680 } 3681 3682 /* 3683 * Note that after calling the above, the command may in 3684 * fact have been completed. 3685 */ 3686 xs = isp_find_xs(isp, handle); 3687 3688 /* 3689 * If the command no longer exists, then we won't 3690 * be able to find the xs again with this handle. 3691 */ 3692 if (xs == NULL) { 3693 return; 3694 } 3695 3696 /* 3697 * After this point, the command is really dead. 3698 */ 3699 if (XS_XFRLEN(xs)) { 3700 ISP_DMAFREE(isp, xs, handle); 3701 } 3702 isp_destroy_handle(isp, handle); 3703 isp_prt(isp, ISP_LOGERR, "%s: timeout for handle 0x%x", __func__, handle); 3704 xs->ccb_h.status &= ~CAM_STATUS_MASK; 3705 xs->ccb_h.status |= CAM_CMD_TIMEOUT; 3706 isp_prt_endcmd(isp, xs); 3707 isp_done(xs); 3708 } else { 3709 if (ohandle != ISP_HANDLE_FREE) { 3710 isp_prt(isp, ISP_LOGWARN, "%s: timeout for handle 0x%x, recovered during interrupt", __func__, ohandle); 3711 } else { 3712 isp_prt(isp, ISP_LOGWARN, "%s: timeout for handle already free", __func__); 3713 } 3714 } 3715 } 3716 3717 static void 3718 isp_make_here(ispsoftc_t *isp, fcportdb_t *fcp, int chan, int tgt) 3719 { 3720 union ccb *ccb; 3721 struct isp_fc *fc = ISP_FC_PC(isp, chan); 3722 3723 /* 3724 * Allocate a CCB, create a wildcard path for this target and schedule a rescan. 3725 */ 3726 ccb = xpt_alloc_ccb_nowait(); 3727 if (ccb == NULL) { 3728 isp_prt(isp, ISP_LOGWARN, "Chan %d unable to alloc CCB for rescan", chan); 3729 return; 3730 } 3731 if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(fc->sim), 3732 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 3733 isp_prt(isp, ISP_LOGWARN, "unable to create path for rescan"); 3734 xpt_free_ccb(ccb); 3735 return; 3736 } 3737 xpt_rescan(ccb); 3738 } 3739 3740 static void 3741 isp_make_gone(ispsoftc_t *isp, fcportdb_t *fcp, int chan, int tgt) 3742 { 3743 struct cam_path *tp; 3744 struct isp_fc *fc = ISP_FC_PC(isp, chan); 3745 3746 if (xpt_create_path(&tp, NULL, cam_sim_path(fc->sim), tgt, CAM_LUN_WILDCARD) == CAM_REQ_CMP) { 3747 xpt_async(AC_LOST_DEVICE, tp, NULL); 3748 xpt_free_path(tp); 3749 } 3750 } 3751 3752 /* 3753 * Gone Device Timer Function- when we have decided that a device has gone 3754 * away, we wait a specific period of time prior to telling the OS it has 3755 * gone away. 3756 * 3757 * This timer function fires once a second and then scans the port database 3758 * for devices that are marked dead but still have a virtual target assigned. 3759 * We decrement a counter for that port database entry, and when it hits zero, 3760 * we tell the OS the device has gone away. 3761 */ 3762 static void 3763 isp_gdt(void *arg) 3764 { 3765 struct isp_fc *fc = arg; 3766 taskqueue_enqueue(taskqueue_thread, &fc->gtask); 3767 } 3768 3769 static void 3770 isp_gdt_task(void *arg, int pending) 3771 { 3772 struct isp_fc *fc = arg; 3773 ispsoftc_t *isp = fc->isp; 3774 int chan = fc - isp->isp_osinfo.pc.fc; 3775 fcportdb_t *lp; 3776 struct ac_contract ac; 3777 struct ac_device_changed *adc; 3778 int dbidx, more_to_do = 0; 3779 3780 ISP_LOCK(isp); 3781 isp_prt(isp, ISP_LOGDEBUG0, "Chan %d GDT timer expired", chan); 3782 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { 3783 lp = &FCPARAM(isp, chan)->portdb[dbidx]; 3784 3785 if (lp->state != FC_PORTDB_STATE_ZOMBIE) { 3786 continue; 3787 } 3788 if (lp->gone_timer != 0) { 3789 lp->gone_timer -= 1; 3790 more_to_do++; 3791 continue; 3792 } 3793 isp_prt(isp, ISP_LOGCONFIG, prom3, chan, dbidx, lp->portid, "Gone Device Timeout"); 3794 if (lp->is_target) { 3795 lp->is_target = 0; 3796 isp_make_gone(isp, lp, chan, dbidx); 3797 } 3798 if (lp->is_initiator) { 3799 lp->is_initiator = 0; 3800 ac.contract_number = AC_CONTRACT_DEV_CHG; 3801 adc = (struct ac_device_changed *) ac.contract_data; 3802 adc->wwpn = lp->port_wwn; 3803 adc->port = lp->portid; 3804 adc->target = dbidx; 3805 adc->arrived = 0; 3806 xpt_async(AC_CONTRACT, fc->path, &ac); 3807 } 3808 lp->state = FC_PORTDB_STATE_NIL; 3809 } 3810 if (fc->ready) { 3811 if (more_to_do) { 3812 callout_reset(&fc->gdt, hz, isp_gdt, fc); 3813 } else { 3814 callout_deactivate(&fc->gdt); 3815 isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Stopping Gone Device Timer @ %lu", chan, (unsigned long) time_uptime); 3816 } 3817 } 3818 ISP_UNLOCK(isp); 3819 } 3820 3821 /* 3822 * Loop Down Timer Function- when loop goes down, a timer is started and 3823 * and after it expires we come here and take all probational devices that 3824 * the OS knows about and the tell the OS that they've gone away. 3825 * 3826 * We don't clear the devices out of our port database because, when loop 3827 * come back up, we have to do some actual cleanup with the chip at that 3828 * point (implicit PLOGO, e.g., to get the chip's port database state right). 3829 */ 3830 static void 3831 isp_ldt(void *arg) 3832 { 3833 struct isp_fc *fc = arg; 3834 taskqueue_enqueue(taskqueue_thread, &fc->ltask); 3835 } 3836 3837 static void 3838 isp_ldt_task(void *arg, int pending) 3839 { 3840 struct isp_fc *fc = arg; 3841 ispsoftc_t *isp = fc->isp; 3842 int chan = fc - isp->isp_osinfo.pc.fc; 3843 fcportdb_t *lp; 3844 struct ac_contract ac; 3845 struct ac_device_changed *adc; 3846 int dbidx, i; 3847 3848 ISP_LOCK(isp); 3849 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Loop Down Timer expired @ %lu", chan, (unsigned long) time_uptime); 3850 callout_deactivate(&fc->ldt); 3851 3852 /* 3853 * Notify to the OS all targets who we now consider have departed. 3854 */ 3855 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { 3856 lp = &FCPARAM(isp, chan)->portdb[dbidx]; 3857 3858 if (lp->state == FC_PORTDB_STATE_NIL) 3859 continue; 3860 3861 /* 3862 * XXX: CLEAN UP AND COMPLETE ANY PENDING COMMANDS FIRST! 3863 */ 3864 for (i = 0; i < isp->isp_maxcmds; i++) { 3865 struct ccb_scsiio *xs; 3866 3867 if (!ISP_VALID_HANDLE(isp, isp->isp_xflist[i].handle)) { 3868 continue; 3869 } 3870 if ((xs = isp->isp_xflist[i].cmd) == NULL) { 3871 continue; 3872 } 3873 if (dbidx != XS_TGT(xs)) { 3874 continue; 3875 } 3876 isp_prt(isp, ISP_LOGWARN, "command handle 0x%x for %d.%d.%jx orphaned by loop down timeout", 3877 isp->isp_xflist[i].handle, chan, XS_TGT(xs), 3878 (uintmax_t)XS_LUN(xs)); 3879 } 3880 3881 isp_prt(isp, ISP_LOGCONFIG, prom3, chan, dbidx, lp->portid, "Loop Down Timeout"); 3882 if (lp->is_target) { 3883 lp->is_target = 0; 3884 isp_make_gone(isp, lp, chan, dbidx); 3885 } 3886 if (lp->is_initiator) { 3887 lp->is_initiator = 0; 3888 ac.contract_number = AC_CONTRACT_DEV_CHG; 3889 adc = (struct ac_device_changed *) ac.contract_data; 3890 adc->wwpn = lp->port_wwn; 3891 adc->port = lp->portid; 3892 adc->target = dbidx; 3893 adc->arrived = 0; 3894 xpt_async(AC_CONTRACT, fc->path, &ac); 3895 } 3896 } 3897 3898 isp_unfreeze_loopdown(isp, chan); 3899 /* 3900 * The loop down timer has expired. Wake up the kthread 3901 * to notice that fact (or make it false). 3902 */ 3903 fc->loop_dead = 1; 3904 fc->loop_down_time = fc->loop_down_limit+1; 3905 wakeup(fc); 3906 ISP_UNLOCK(isp); 3907 } 3908 3909 static void 3910 isp_kthread(void *arg) 3911 { 3912 struct isp_fc *fc = arg; 3913 ispsoftc_t *isp = fc->isp; 3914 int chan = fc - isp->isp_osinfo.pc.fc; 3915 int slp = 0; 3916 3917 mtx_lock(&isp->isp_osinfo.lock); 3918 3919 while (isp->isp_osinfo.is_exiting == 0) { 3920 int lb, lim; 3921 3922 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "%s: Chan %d checking FC state", __func__, chan); 3923 lb = isp_fc_runstate(isp, chan, 250000); 3924 3925 /* 3926 * Our action is different based upon whether we're supporting 3927 * Initiator mode or not. If we are, we might freeze the simq 3928 * when loop is down and set all sorts of different delays to 3929 * check again. 3930 * 3931 * If not, we simply just wait for loop to come up. 3932 */ 3933 if (lb && (FCPARAM(isp, chan)->role & ISP_ROLE_INITIATOR)) { 3934 /* 3935 * Increment loop down time by the last sleep interval 3936 */ 3937 fc->loop_down_time += slp; 3938 3939 if (lb < 0) { 3940 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "%s: Chan %d FC loop not up (down count %d)", __func__, chan, fc->loop_down_time); 3941 } else { 3942 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "%s: Chan %d FC got to %d (down count %d)", __func__, chan, lb, fc->loop_down_time); 3943 } 3944 3945 /* 3946 * If we've never seen loop up and we've waited longer 3947 * than quickboot time, or we've seen loop up but we've 3948 * waited longer than loop_down_limit, give up and go 3949 * to sleep until loop comes up. 3950 */ 3951 if (FCPARAM(isp, chan)->loop_seen_once == 0) { 3952 lim = isp_quickboot_time; 3953 } else { 3954 lim = fc->loop_down_limit; 3955 } 3956 if (fc->loop_down_time >= lim) { 3957 isp_freeze_loopdown(isp, chan, "loop limit hit"); 3958 slp = 0; 3959 } else if (fc->loop_down_time < 10) { 3960 slp = 1; 3961 } else if (fc->loop_down_time < 30) { 3962 slp = 5; 3963 } else if (fc->loop_down_time < 60) { 3964 slp = 10; 3965 } else if (fc->loop_down_time < 120) { 3966 slp = 20; 3967 } else { 3968 slp = 30; 3969 } 3970 3971 } else if (lb) { 3972 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "%s: Chan %d FC Loop Down", __func__, chan); 3973 fc->loop_down_time += slp; 3974 if (fc->loop_down_time > 300) 3975 slp = 0; 3976 else 3977 slp = 60; 3978 } else { 3979 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "%s: Chan %d FC state OK", __func__, chan); 3980 fc->loop_down_time = 0; 3981 slp = 0; 3982 } 3983 3984 3985 /* 3986 * If this is past the first loop up or the loop is dead and if we'd frozen the simq, unfreeze it 3987 * now so that CAM can start sending us commands. 3988 * 3989 * If the FC state isn't okay yet, they'll hit that in isp_start which will freeze the queue again 3990 * or kill the commands, as appropriate. 3991 */ 3992 3993 if (FCPARAM(isp, chan)->loop_seen_once || fc->loop_dead) { 3994 isp_unfreeze_loopdown(isp, chan); 3995 } 3996 3997 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "%s: Chan %d sleep time %d", __func__, chan, slp); 3998 3999 msleep(fc, &isp->isp_osinfo.lock, PRIBIO, "ispf", slp * hz); 4000 4001 /* 4002 * If slp is zero, we're waking up for the first time after 4003 * things have been okay. In this case, we set a deferral state 4004 * for all commands and delay hysteresis seconds before starting 4005 * the FC state evaluation. This gives the loop/fabric a chance 4006 * to settle. 4007 */ 4008 if (slp == 0 && fc->hysteresis) { 4009 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "%s: Chan %d sleep hysteresis ticks %d", __func__, chan, fc->hysteresis * hz); 4010 mtx_unlock(&isp->isp_osinfo.lock); 4011 pause("ispt", fc->hysteresis * hz); 4012 mtx_lock(&isp->isp_osinfo.lock); 4013 } 4014 } 4015 fc->num_threads -= 1; 4016 mtx_unlock(&isp->isp_osinfo.lock); 4017 kthread_exit(); 4018 } 4019 4020 static void 4021 isp_action(struct cam_sim *sim, union ccb *ccb) 4022 { 4023 int bus, tgt, ts, error, lim; 4024 ispsoftc_t *isp; 4025 struct ccb_trans_settings *cts; 4026 4027 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n")); 4028 4029 isp = (ispsoftc_t *)cam_sim_softc(sim); 4030 mtx_assert(&isp->isp_lock, MA_OWNED); 4031 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code); 4032 ISP_PCMD(ccb) = NULL; 4033 4034 switch (ccb->ccb_h.func_code) { 4035 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 4036 bus = XS_CHANNEL(ccb); 4037 /* 4038 * Do a couple of preliminary checks... 4039 */ 4040 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 4041 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 4042 ccb->ccb_h.status = CAM_REQ_INVALID; 4043 isp_done((struct ccb_scsiio *) ccb); 4044 break; 4045 } 4046 } 4047 ccb->csio.req_map = NULL; 4048 #ifdef DIAGNOSTIC 4049 if (ccb->ccb_h.target_id >= ISP_MAX_TARGETS(isp)) { 4050 xpt_print(ccb->ccb_h.path, "invalid target\n"); 4051 ccb->ccb_h.status = CAM_PATH_INVALID; 4052 } else if (ISP_MAX_LUNS(isp) > 0 && 4053 ccb->ccb_h.target_lun >= ISP_MAX_LUNS(isp)) { 4054 xpt_print(ccb->ccb_h.path, "invalid lun\n"); 4055 ccb->ccb_h.status = CAM_PATH_INVALID; 4056 } 4057 if (ccb->ccb_h.status == CAM_PATH_INVALID) { 4058 xpt_done(ccb); 4059 break; 4060 } 4061 #endif 4062 ccb->csio.scsi_status = SCSI_STATUS_OK; 4063 if (isp_get_pcmd(isp, ccb)) { 4064 isp_prt(isp, ISP_LOGWARN, "out of PCMDs"); 4065 cam_freeze_devq(ccb->ccb_h.path); 4066 cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 250, 0); 4067 ccb->ccb_h.status = CAM_REQUEUE_REQ; 4068 xpt_done(ccb); 4069 break; 4070 } 4071 error = isp_start((XS_T *) ccb); 4072 switch (error) { 4073 case CMD_QUEUED: 4074 ccb->ccb_h.status |= CAM_SIM_QUEUED; 4075 if (ccb->ccb_h.timeout == CAM_TIME_INFINITY) { 4076 break; 4077 } 4078 ts = ccb->ccb_h.timeout; 4079 if (ts == CAM_TIME_DEFAULT) { 4080 ts = 60*1000; 4081 } 4082 ts = isp_mstohz(ts); 4083 callout_reset(&PISP_PCMD(ccb)->wdog, ts, isp_watchdog, ccb); 4084 break; 4085 case CMD_RQLATER: 4086 /* 4087 * We get this result for FC devices if the loop state isn't ready yet 4088 * or if the device in question has gone zombie on us. 4089 * 4090 * If we've never seen Loop UP at all, we requeue this request and wait 4091 * for the initial loop up delay to expire. 4092 */ 4093 lim = ISP_FC_PC(isp, bus)->loop_down_limit; 4094 if (FCPARAM(isp, bus)->loop_seen_once == 0 || ISP_FC_PC(isp, bus)->loop_down_time >= lim) { 4095 if (FCPARAM(isp, bus)->loop_seen_once == 0) { 4096 isp_prt(isp, ISP_LOGDEBUG0, 4097 "%d.%jx loop not seen yet @ %lu", 4098 XS_TGT(ccb), (uintmax_t)XS_LUN(ccb), 4099 (unsigned long) time_uptime); 4100 } else { 4101 isp_prt(isp, ISP_LOGDEBUG0, 4102 "%d.%jx downtime (%d) > lim (%d)", 4103 XS_TGT(ccb), (uintmax_t)XS_LUN(ccb), 4104 ISP_FC_PC(isp, bus)->loop_down_time, 4105 lim); 4106 } 4107 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 4108 isp_done((struct ccb_scsiio *) ccb); 4109 break; 4110 } 4111 isp_prt(isp, ISP_LOGDEBUG0, "%d.%jx retry later", 4112 XS_TGT(ccb), (uintmax_t)XS_LUN(ccb)); 4113 cam_freeze_devq(ccb->ccb_h.path); 4114 cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 1000, 0); 4115 ccb->ccb_h.status = CAM_REQUEUE_REQ; 4116 isp_free_pcmd(isp, ccb); 4117 xpt_done(ccb); 4118 break; 4119 case CMD_EAGAIN: 4120 isp_free_pcmd(isp, ccb); 4121 cam_freeze_devq(ccb->ccb_h.path); 4122 cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 100, 0); 4123 ccb->ccb_h.status = CAM_REQUEUE_REQ; 4124 xpt_done(ccb); 4125 break; 4126 case CMD_COMPLETE: 4127 isp_done((struct ccb_scsiio *) ccb); 4128 break; 4129 default: 4130 isp_prt(isp, ISP_LOGERR, "What's this? 0x%x at %d in file %s", error, __LINE__, __FILE__); 4131 ccb->ccb_h.status = CAM_REQUEUE_REQ; 4132 isp_free_pcmd(isp, ccb); 4133 xpt_done(ccb); 4134 } 4135 break; 4136 4137 #ifdef ISP_TARGET_MODE 4138 case XPT_EN_LUN: /* Enable/Disable LUN as a target */ 4139 if (ccb->cel.enable) { 4140 isp_enable_lun(isp, ccb); 4141 } else { 4142 isp_disable_lun(isp, ccb); 4143 } 4144 break; 4145 case XPT_IMMED_NOTIFY: 4146 case XPT_IMMEDIATE_NOTIFY: /* Add Immediate Notify Resource */ 4147 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 4148 { 4149 tstate_t *tptr = get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun); 4150 if (tptr == NULL) { 4151 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), CAM_LUN_WILDCARD); 4152 } 4153 if (tptr == NULL) { 4154 const char *str; 4155 uint32_t tag; 4156 4157 if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) { 4158 str = "XPT_IMMEDIATE_NOTIFY"; 4159 tag = ccb->cin1.seq_id; 4160 } else { 4161 tag = ccb->atio.tag_id; 4162 str = "XPT_ACCEPT_TARGET_IO"; 4163 } 4164 ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "%s: [0x%x] no state pointer found for %s\n", __func__, tag, str); 4165 dump_tstates(isp, XS_CHANNEL(ccb)); 4166 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 4167 break; 4168 } 4169 ccb->ccb_h.spriv_field0 = 0; 4170 ccb->ccb_h.spriv_ptr1 = isp; 4171 4172 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 4173 if (ccb->atio.tag_id) { 4174 atio_private_data_t *atp = isp_find_atpd(isp, tptr, ccb->atio.tag_id); 4175 if (atp) { 4176 isp_put_atpd(isp, tptr, atp); 4177 } 4178 } 4179 tptr->atio_count++; 4180 SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h, sim_links.sle); 4181 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, ccb->ccb_h.path, "Put FREE ATIO (tag id 0x%x), count now %d\n", 4182 ccb->atio.tag_id, tptr->atio_count); 4183 ccb->atio.tag_id = 0; 4184 } else if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) { 4185 if (ccb->cin1.tag_id) { 4186 inot_private_data_t *ntp = isp_find_ntpd(isp, tptr, ccb->cin1.tag_id, ccb->cin1.seq_id); 4187 if (ntp) { 4188 isp_put_ntpd(isp, tptr, ntp); 4189 } 4190 } 4191 tptr->inot_count++; 4192 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, sim_links.sle); 4193 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, ccb->ccb_h.path, "Put FREE INOT, (seq id 0x%x) count now %d\n", 4194 ccb->cin1.seq_id, tptr->inot_count); 4195 ccb->cin1.seq_id = 0; 4196 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 4197 tptr->inot_count++; 4198 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, sim_links.sle); 4199 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, ccb->ccb_h.path, "Put FREE INOT, (seq id 0x%x) count now %d\n", 4200 ccb->cin1.seq_id, tptr->inot_count); 4201 ccb->cin1.seq_id = 0; 4202 } 4203 rls_lun_statep(isp, tptr); 4204 ccb->ccb_h.status = CAM_REQ_INPROG; 4205 break; 4206 } 4207 case XPT_NOTIFY_ACK: 4208 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 4209 break; 4210 case XPT_NOTIFY_ACKNOWLEDGE: /* notify ack */ 4211 { 4212 tstate_t *tptr; 4213 inot_private_data_t *ntp; 4214 4215 /* 4216 * XXX: Because we cannot guarantee that the path information in the notify acknowledge ccb 4217 * XXX: matches that for the immediate notify, we have to *search* for the notify structure 4218 */ 4219 /* 4220 * All the relevant path information is in the associated immediate notify 4221 */ 4222 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "%s: [0x%x] NOTIFY ACKNOWLEDGE for 0x%x seen\n", __func__, ccb->cna2.tag_id, ccb->cna2.seq_id); 4223 ntp = get_ntp_from_tagdata(isp, ccb->cna2.tag_id, ccb->cna2.seq_id, &tptr); 4224 if (ntp == NULL) { 4225 ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "%s: [0x%x] XPT_NOTIFY_ACKNOWLEDGE of 0x%x cannot find ntp private data\n", __func__, 4226 ccb->cna2.tag_id, ccb->cna2.seq_id); 4227 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 4228 xpt_done(ccb); 4229 break; 4230 } 4231 if (isp_handle_platform_target_notify_ack(isp, &ntp->rd.nt)) { 4232 rls_lun_statep(isp, tptr); 4233 cam_freeze_devq(ccb->ccb_h.path); 4234 cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 1000, 0); 4235 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 4236 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 4237 break; 4238 } 4239 isp_put_ntpd(isp, tptr, ntp); 4240 rls_lun_statep(isp, tptr); 4241 ccb->ccb_h.status = CAM_REQ_CMP; 4242 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "%s: [0x%x] calling xpt_done for tag 0x%x\n", __func__, ccb->cna2.tag_id, ccb->cna2.seq_id); 4243 xpt_done(ccb); 4244 break; 4245 } 4246 case XPT_CONT_TARGET_IO: 4247 isp_target_start_ctio(isp, ccb, FROM_CAM); 4248 break; 4249 #endif 4250 case XPT_RESET_DEV: /* BDR the specified SCSI device */ 4251 { 4252 struct isp_fc *fc; 4253 4254 bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); 4255 tgt = ccb->ccb_h.target_id; 4256 tgt |= (bus << 16); 4257 if (IS_FC(isp)) 4258 fc = ISP_FC_PC(isp, bus); 4259 else 4260 fc = NULL; 4261 4262 error = isp_control(isp, ISPCTL_RESET_DEV, bus, tgt); 4263 if (error) { 4264 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 4265 } else { 4266 /* 4267 * If we have a FC device, reset the Command 4268 * Reference Number, because the target will expect 4269 * that we re-start the CRN at 1 after a reset. 4270 */ 4271 if (fc != NULL) 4272 isp_fcp_reset_crn(fc, tgt, /*tgt_set*/ 1); 4273 4274 ccb->ccb_h.status = CAM_REQ_CMP; 4275 } 4276 xpt_done(ccb); 4277 break; 4278 } 4279 case XPT_ABORT: /* Abort the specified CCB */ 4280 { 4281 union ccb *accb = ccb->cab.abort_ccb; 4282 switch (accb->ccb_h.func_code) { 4283 #ifdef ISP_TARGET_MODE 4284 case XPT_ACCEPT_TARGET_IO: 4285 isp_target_mark_aborted(isp, ccb); 4286 break; 4287 #endif 4288 case XPT_SCSI_IO: 4289 error = isp_control(isp, ISPCTL_ABORT_CMD, accb); 4290 if (error) { 4291 ccb->ccb_h.status = CAM_UA_ABORT; 4292 } else { 4293 ccb->ccb_h.status = CAM_REQ_CMP; 4294 } 4295 break; 4296 default: 4297 ccb->ccb_h.status = CAM_REQ_INVALID; 4298 break; 4299 } 4300 /* 4301 * This is not a queued CCB, so the caller expects it to be 4302 * complete when control is returned. 4303 */ 4304 break; 4305 } 4306 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) 4307 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 4308 cts = &ccb->cts; 4309 if (!IS_CURRENT_SETTINGS(cts)) { 4310 ccb->ccb_h.status = CAM_REQ_INVALID; 4311 xpt_done(ccb); 4312 break; 4313 } 4314 tgt = cts->ccb_h.target_id; 4315 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 4316 if (IS_SCSI(isp)) { 4317 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; 4318 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; 4319 sdparam *sdp = SDPARAM(isp, bus); 4320 uint16_t *dptr; 4321 4322 if (spi->valid == 0 && scsi->valid == 0) { 4323 ccb->ccb_h.status = CAM_REQ_CMP; 4324 xpt_done(ccb); 4325 break; 4326 } 4327 4328 /* 4329 * We always update (internally) from goal_flags 4330 * so any request to change settings just gets 4331 * vectored to that location. 4332 */ 4333 dptr = &sdp->isp_devparam[tgt].goal_flags; 4334 4335 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 4336 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 4337 *dptr |= DPARM_DISC; 4338 else 4339 *dptr &= ~DPARM_DISC; 4340 } 4341 4342 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 4343 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 4344 *dptr |= DPARM_TQING; 4345 else 4346 *dptr &= ~DPARM_TQING; 4347 } 4348 4349 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 4350 if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) 4351 *dptr |= DPARM_WIDE; 4352 else 4353 *dptr &= ~DPARM_WIDE; 4354 } 4355 4356 /* 4357 * XXX: FIX ME 4358 */ 4359 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) && (spi->valid & CTS_SPI_VALID_SYNC_RATE) && (spi->sync_period && spi->sync_offset)) { 4360 *dptr |= DPARM_SYNC; 4361 /* 4362 * XXX: CHECK FOR LEGALITY 4363 */ 4364 sdp->isp_devparam[tgt].goal_period = spi->sync_period; 4365 sdp->isp_devparam[tgt].goal_offset = spi->sync_offset; 4366 } else { 4367 *dptr &= ~DPARM_SYNC; 4368 } 4369 isp_prt(isp, ISP_LOGDEBUG0, "SET (%d.%d.%jx) to flags %x off %x per %x", bus, tgt, (uintmax_t)cts->ccb_h.target_lun, sdp->isp_devparam[tgt].goal_flags, 4370 sdp->isp_devparam[tgt].goal_offset, sdp->isp_devparam[tgt].goal_period); 4371 sdp->isp_devparam[tgt].dev_update = 1; 4372 sdp->update = 1; 4373 } 4374 ccb->ccb_h.status = CAM_REQ_CMP; 4375 xpt_done(ccb); 4376 break; 4377 case XPT_GET_TRAN_SETTINGS: 4378 cts = &ccb->cts; 4379 tgt = cts->ccb_h.target_id; 4380 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 4381 if (IS_FC(isp)) { 4382 fcparam *fcp = FCPARAM(isp, bus); 4383 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; 4384 struct ccb_trans_settings_fc *fc = &cts->xport_specific.fc; 4385 4386 cts->protocol = PROTO_SCSI; 4387 cts->protocol_version = SCSI_REV_2; 4388 cts->transport = XPORT_FC; 4389 cts->transport_version = 0; 4390 4391 scsi->valid = CTS_SCSI_VALID_TQ; 4392 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 4393 fc->valid = CTS_FC_VALID_SPEED; 4394 fc->bitrate = 100000; 4395 fc->bitrate *= fcp->isp_gbspeed; 4396 if (tgt < MAX_FC_TARG) { 4397 fcportdb_t *lp = &fcp->portdb[tgt]; 4398 fc->wwnn = lp->node_wwn; 4399 fc->wwpn = lp->port_wwn; 4400 fc->port = lp->portid; 4401 fc->valid |= CTS_FC_VALID_WWNN | CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT; 4402 } 4403 } else { 4404 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; 4405 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; 4406 sdparam *sdp = SDPARAM(isp, bus); 4407 uint16_t dval, pval, oval; 4408 4409 if (IS_CURRENT_SETTINGS(cts)) { 4410 sdp->isp_devparam[tgt].dev_refresh = 1; 4411 sdp->update = 1; 4412 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, bus); 4413 dval = sdp->isp_devparam[tgt].actv_flags; 4414 oval = sdp->isp_devparam[tgt].actv_offset; 4415 pval = sdp->isp_devparam[tgt].actv_period; 4416 } else { 4417 dval = sdp->isp_devparam[tgt].nvrm_flags; 4418 oval = sdp->isp_devparam[tgt].nvrm_offset; 4419 pval = sdp->isp_devparam[tgt].nvrm_period; 4420 } 4421 4422 cts->protocol = PROTO_SCSI; 4423 cts->protocol_version = SCSI_REV_2; 4424 cts->transport = XPORT_SPI; 4425 cts->transport_version = 2; 4426 4427 spi->valid = 0; 4428 scsi->valid = 0; 4429 spi->flags = 0; 4430 scsi->flags = 0; 4431 if (dval & DPARM_DISC) { 4432 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 4433 } 4434 if ((dval & DPARM_SYNC) && oval && pval) { 4435 spi->sync_offset = oval; 4436 spi->sync_period = pval; 4437 } else { 4438 spi->sync_offset = 0; 4439 spi->sync_period = 0; 4440 } 4441 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 4442 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 4443 spi->valid |= CTS_SPI_VALID_BUS_WIDTH; 4444 if (dval & DPARM_WIDE) { 4445 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 4446 } else { 4447 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 4448 } 4449 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 4450 scsi->valid = CTS_SCSI_VALID_TQ; 4451 if (dval & DPARM_TQING) { 4452 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 4453 } 4454 spi->valid |= CTS_SPI_VALID_DISC; 4455 } 4456 isp_prt(isp, ISP_LOGDEBUG0, "GET %s (%d.%d.%jx) to flags %x off %x per %x", IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM", 4457 bus, tgt, (uintmax_t)cts->ccb_h.target_lun, dval, oval, pval); 4458 } 4459 ccb->ccb_h.status = CAM_REQ_CMP; 4460 xpt_done(ccb); 4461 break; 4462 4463 case XPT_CALC_GEOMETRY: 4464 cam_calc_geometry(&ccb->ccg, 1); 4465 xpt_done(ccb); 4466 break; 4467 4468 case XPT_RESET_BUS: /* Reset the specified bus */ 4469 bus = cam_sim_bus(sim); 4470 error = isp_control(isp, ISPCTL_RESET_BUS, bus); 4471 if (error) { 4472 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 4473 xpt_done(ccb); 4474 break; 4475 } 4476 if (bootverbose) { 4477 xpt_print(ccb->ccb_h.path, "reset bus on channel %d\n", bus); 4478 } 4479 if (IS_FC(isp)) { 4480 xpt_async(AC_BUS_RESET, ISP_FC_PC(isp, bus)->path, 0); 4481 } else { 4482 xpt_async(AC_BUS_RESET, ISP_SPI_PC(isp, bus)->path, 0); 4483 } 4484 ccb->ccb_h.status = CAM_REQ_CMP; 4485 xpt_done(ccb); 4486 break; 4487 4488 case XPT_TERM_IO: /* Terminate the I/O process */ 4489 ccb->ccb_h.status = CAM_REQ_INVALID; 4490 xpt_done(ccb); 4491 break; 4492 4493 case XPT_SET_SIM_KNOB: /* Set SIM knobs */ 4494 { 4495 struct ccb_sim_knob *kp = &ccb->knob; 4496 fcparam *fcp; 4497 4498 if (!IS_FC(isp)) { 4499 ccb->ccb_h.status = CAM_REQ_INVALID; 4500 xpt_done(ccb); 4501 break; 4502 } 4503 4504 bus = cam_sim_bus(xpt_path_sim(kp->ccb_h.path)); 4505 fcp = FCPARAM(isp, bus); 4506 4507 if (kp->xport_specific.fc.valid & KNOB_VALID_ADDRESS) { 4508 fcp->isp_wwnn = ISP_FC_PC(isp, bus)->def_wwnn = kp->xport_specific.fc.wwnn; 4509 fcp->isp_wwpn = ISP_FC_PC(isp, bus)->def_wwpn = kp->xport_specific.fc.wwpn; 4510 isp_prt(isp, ISP_LOGALL, "Setting Channel %d wwns to 0x%jx 0x%jx", bus, fcp->isp_wwnn, fcp->isp_wwpn); 4511 } 4512 ccb->ccb_h.status = CAM_REQ_CMP; 4513 if (kp->xport_specific.fc.valid & KNOB_VALID_ROLE) { 4514 int rchange = 0; 4515 int newrole = 0; 4516 4517 switch (kp->xport_specific.fc.role) { 4518 case KNOB_ROLE_NONE: 4519 if (fcp->role != ISP_ROLE_NONE) { 4520 rchange = 1; 4521 newrole = ISP_ROLE_NONE; 4522 } 4523 break; 4524 case KNOB_ROLE_TARGET: 4525 if (fcp->role != ISP_ROLE_TARGET) { 4526 rchange = 1; 4527 newrole = ISP_ROLE_TARGET; 4528 } 4529 break; 4530 case KNOB_ROLE_INITIATOR: 4531 if (fcp->role != ISP_ROLE_INITIATOR) { 4532 rchange = 1; 4533 newrole = ISP_ROLE_INITIATOR; 4534 } 4535 break; 4536 case KNOB_ROLE_BOTH: 4537 if (fcp->role != ISP_ROLE_BOTH) { 4538 rchange = 1; 4539 newrole = ISP_ROLE_BOTH; 4540 } 4541 break; 4542 } 4543 if (rchange) { 4544 ISP_PATH_PRT(isp, ISP_LOGCONFIG, ccb->ccb_h.path, "changing role on from %d to %d\n", fcp->role, newrole); 4545 #ifdef ISP_TARGET_MODE 4546 ISP_SET_PC(isp, bus, tm_enabled, 0); 4547 ISP_SET_PC(isp, bus, tm_luns_enabled, 0); 4548 #endif 4549 if (isp_control(isp, ISPCTL_CHANGE_ROLE, 4550 bus, newrole) != 0) { 4551 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 4552 xpt_done(ccb); 4553 break; 4554 } 4555 #ifdef ISP_TARGET_MODE 4556 if (newrole == ISP_ROLE_TARGET || newrole == ISP_ROLE_BOTH) { 4557 /* 4558 * Give the new role a chance to complain and settle 4559 */ 4560 msleep(isp, &isp->isp_lock, PRIBIO, "taking a breather", 2); 4561 ccb->ccb_h.status = isp_enable_deferred_luns(isp, bus); 4562 } 4563 #endif 4564 } 4565 } 4566 xpt_done(ccb); 4567 break; 4568 } 4569 case XPT_GET_SIM_KNOB: /* Get SIM knobs */ 4570 { 4571 struct ccb_sim_knob *kp = &ccb->knob; 4572 4573 if (IS_FC(isp)) { 4574 fcparam *fcp; 4575 4576 bus = cam_sim_bus(xpt_path_sim(kp->ccb_h.path)); 4577 fcp = FCPARAM(isp, bus); 4578 4579 kp->xport_specific.fc.wwnn = fcp->isp_wwnn; 4580 kp->xport_specific.fc.wwpn = fcp->isp_wwpn; 4581 switch (fcp->role) { 4582 case ISP_ROLE_NONE: 4583 kp->xport_specific.fc.role = KNOB_ROLE_NONE; 4584 break; 4585 case ISP_ROLE_TARGET: 4586 kp->xport_specific.fc.role = KNOB_ROLE_TARGET; 4587 break; 4588 case ISP_ROLE_INITIATOR: 4589 kp->xport_specific.fc.role = KNOB_ROLE_INITIATOR; 4590 break; 4591 case ISP_ROLE_BOTH: 4592 kp->xport_specific.fc.role = KNOB_ROLE_BOTH; 4593 break; 4594 } 4595 kp->xport_specific.fc.valid = KNOB_VALID_ADDRESS | KNOB_VALID_ROLE; 4596 ccb->ccb_h.status = CAM_REQ_CMP; 4597 } else { 4598 ccb->ccb_h.status = CAM_REQ_INVALID; 4599 } 4600 xpt_done(ccb); 4601 break; 4602 } 4603 case XPT_PATH_INQ: /* Path routing inquiry */ 4604 { 4605 struct ccb_pathinq *cpi = &ccb->cpi; 4606 4607 cpi->version_num = 1; 4608 #ifdef ISP_TARGET_MODE 4609 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 4610 #else 4611 cpi->target_sprt = 0; 4612 #endif 4613 cpi->hba_eng_cnt = 0; 4614 cpi->max_target = ISP_MAX_TARGETS(isp) - 1; 4615 cpi->max_lun = ISP_MAX_LUNS(isp) == 0 ? 4616 255 : ISP_MAX_LUNS(isp) - 1; 4617 cpi->bus_id = cam_sim_bus(sim); 4618 if (isp->isp_osinfo.sixtyfourbit) 4619 cpi->maxio = (ISP_NSEG64_MAX - 1) * PAGE_SIZE; 4620 else 4621 cpi->maxio = (ISP_NSEG_MAX - 1) * PAGE_SIZE; 4622 4623 bus = cam_sim_bus(xpt_path_sim(cpi->ccb_h.path)); 4624 if (IS_FC(isp)) { 4625 fcparam *fcp = FCPARAM(isp, bus); 4626 4627 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED; 4628 #if __FreeBSD_version >= 1000700 4629 cpi->hba_misc |= PIM_EXTLUNS; 4630 #endif 4631 #if __FreeBSD_version >= 1000039 4632 cpi->hba_misc |= PIM_NOSCAN; 4633 #endif 4634 4635 /* 4636 * Because our loop ID can shift from time to time, 4637 * make our initiator ID out of range of our bus. 4638 */ 4639 cpi->initiator_id = cpi->max_target + 1; 4640 4641 /* 4642 * Set base transfer capabilities for Fibre Channel, for this HBA. 4643 */ 4644 if (IS_25XX(isp)) { 4645 cpi->base_transfer_speed = 8000000; 4646 } else if (IS_24XX(isp)) { 4647 cpi->base_transfer_speed = 4000000; 4648 } else if (IS_23XX(isp)) { 4649 cpi->base_transfer_speed = 2000000; 4650 } else { 4651 cpi->base_transfer_speed = 1000000; 4652 } 4653 cpi->hba_inquiry = PI_TAG_ABLE; 4654 cpi->transport = XPORT_FC; 4655 cpi->transport_version = 0; 4656 cpi->xport_specific.fc.wwnn = fcp->isp_wwnn; 4657 cpi->xport_specific.fc.wwpn = fcp->isp_wwpn; 4658 cpi->xport_specific.fc.port = fcp->isp_portid; 4659 cpi->xport_specific.fc.bitrate = fcp->isp_gbspeed * 1000; 4660 } else { 4661 sdparam *sdp = SDPARAM(isp, bus); 4662 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 4663 cpi->hba_misc = PIM_UNMAPPED; 4664 cpi->initiator_id = sdp->isp_initiator_id; 4665 cpi->base_transfer_speed = 3300; 4666 cpi->transport = XPORT_SPI; 4667 cpi->transport_version = 2; 4668 } 4669 cpi->protocol = PROTO_SCSI; 4670 cpi->protocol_version = SCSI_REV_2; 4671 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 4672 strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN); 4673 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 4674 cpi->unit_number = cam_sim_unit(sim); 4675 cpi->ccb_h.status = CAM_REQ_CMP; 4676 xpt_done(ccb); 4677 break; 4678 } 4679 default: 4680 ccb->ccb_h.status = CAM_REQ_INVALID; 4681 xpt_done(ccb); 4682 break; 4683 } 4684 } 4685 4686 #define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB) 4687 4688 void 4689 isp_done(XS_T *sccb) 4690 { 4691 ispsoftc_t *isp = XS_ISP(sccb); 4692 uint32_t status; 4693 4694 if (XS_NOERR(sccb)) 4695 XS_SETERR(sccb, CAM_REQ_CMP); 4696 4697 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (sccb->scsi_status != SCSI_STATUS_OK)) { 4698 sccb->ccb_h.status &= ~CAM_STATUS_MASK; 4699 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) { 4700 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; 4701 } else { 4702 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 4703 } 4704 } 4705 4706 sccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4707 status = sccb->ccb_h.status & CAM_STATUS_MASK; 4708 if (status != CAM_REQ_CMP) { 4709 if (status != CAM_SEL_TIMEOUT) 4710 isp_prt(isp, ISP_LOGDEBUG0, 4711 "target %d lun %jx CAM status 0x%x SCSI status 0x%x", 4712 XS_TGT(sccb), (uintmax_t)XS_LUN(sccb), 4713 sccb->ccb_h.status, sccb->scsi_status); 4714 else if ((IS_FC(isp)) 4715 && (XS_TGT(sccb) < MAX_FC_TARG)) { 4716 fcparam *fcp; 4717 4718 fcp = FCPARAM(isp, XS_CHANNEL(sccb)); 4719 fcp->portdb[XS_TGT(sccb)].is_target = 0; 4720 } 4721 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 4722 sccb->ccb_h.status |= CAM_DEV_QFRZN; 4723 xpt_freeze_devq(sccb->ccb_h.path, 1); 4724 } 4725 } 4726 4727 if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) && (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 4728 xpt_print(sccb->ccb_h.path, "cam completion status 0x%x\n", sccb->ccb_h.status); 4729 } 4730 4731 if (ISP_PCMD(sccb)) { 4732 if (callout_active(&PISP_PCMD(sccb)->wdog)) 4733 callout_stop(&PISP_PCMD(sccb)->wdog); 4734 isp_free_pcmd(isp, (union ccb *) sccb); 4735 } 4736 xpt_done((union ccb *) sccb); 4737 } 4738 4739 void 4740 isp_async(ispsoftc_t *isp, ispasync_t cmd, ...) 4741 { 4742 int bus; 4743 static const char prom[] = "Chan %d [%d] WWPN 0x%16jx PortID 0x%06x handle 0x%x %s %s"; 4744 char buf[64]; 4745 char *msg = NULL; 4746 target_id_t tgt; 4747 fcportdb_t *lp; 4748 struct isp_fc *fc; 4749 struct cam_path *tmppath; 4750 struct ac_contract ac; 4751 struct ac_device_changed *adc; 4752 va_list ap; 4753 4754 switch (cmd) { 4755 case ISPASYNC_NEW_TGT_PARAMS: 4756 { 4757 struct ccb_trans_settings_scsi *scsi; 4758 struct ccb_trans_settings_spi *spi; 4759 int flags, tgt; 4760 sdparam *sdp; 4761 struct ccb_trans_settings cts; 4762 4763 memset(&cts, 0, sizeof (struct ccb_trans_settings)); 4764 4765 va_start(ap, cmd); 4766 bus = va_arg(ap, int); 4767 tgt = va_arg(ap, int); 4768 va_end(ap); 4769 sdp = SDPARAM(isp, bus); 4770 4771 if (xpt_create_path(&tmppath, NULL, cam_sim_path(ISP_SPI_PC(isp, bus)->sim), tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 4772 isp_prt(isp, ISP_LOGWARN, "isp_async cannot make temp path for %d.%d", tgt, bus); 4773 break; 4774 } 4775 flags = sdp->isp_devparam[tgt].actv_flags; 4776 cts.type = CTS_TYPE_CURRENT_SETTINGS; 4777 cts.protocol = PROTO_SCSI; 4778 cts.transport = XPORT_SPI; 4779 4780 scsi = &cts.proto_specific.scsi; 4781 spi = &cts.xport_specific.spi; 4782 4783 if (flags & DPARM_TQING) { 4784 scsi->valid |= CTS_SCSI_VALID_TQ; 4785 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 4786 } 4787 4788 if (flags & DPARM_DISC) { 4789 spi->valid |= CTS_SPI_VALID_DISC; 4790 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 4791 } 4792 spi->flags |= CTS_SPI_VALID_BUS_WIDTH; 4793 if (flags & DPARM_WIDE) { 4794 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 4795 } else { 4796 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 4797 } 4798 if (flags & DPARM_SYNC) { 4799 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 4800 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 4801 spi->sync_period = sdp->isp_devparam[tgt].actv_period; 4802 spi->sync_offset = sdp->isp_devparam[tgt].actv_offset; 4803 } 4804 isp_prt(isp, ISP_LOGDEBUG2, "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x", bus, tgt, sdp->isp_devparam[tgt].actv_period, sdp->isp_devparam[tgt].actv_offset, flags); 4805 xpt_setup_ccb(&cts.ccb_h, tmppath, 1); 4806 xpt_async(AC_TRANSFER_NEG, tmppath, &cts); 4807 xpt_free_path(tmppath); 4808 break; 4809 } 4810 case ISPASYNC_BUS_RESET: 4811 { 4812 va_start(ap, cmd); 4813 bus = va_arg(ap, int); 4814 va_end(ap); 4815 isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected", bus); 4816 if (IS_FC(isp)) { 4817 xpt_async(AC_BUS_RESET, ISP_FC_PC(isp, bus)->path, NULL); 4818 } else { 4819 xpt_async(AC_BUS_RESET, ISP_SPI_PC(isp, bus)->path, NULL); 4820 } 4821 break; 4822 } 4823 case ISPASYNC_LIP: 4824 if (msg == NULL) 4825 msg = "LIP Received"; 4826 /* FALLTHROUGH */ 4827 case ISPASYNC_LOOP_RESET: 4828 if (msg == NULL) 4829 msg = "LOOP Reset"; 4830 /* FALLTHROUGH */ 4831 case ISPASYNC_LOOP_DOWN: 4832 { 4833 if (msg == NULL) 4834 msg = "LOOP Down"; 4835 va_start(ap, cmd); 4836 bus = va_arg(ap, int); 4837 va_end(ap); 4838 4839 FCPARAM(isp, bus)->isp_linkstate = 0; 4840 4841 fc = ISP_FC_PC(isp, bus); 4842 if (cmd == ISPASYNC_LOOP_DOWN && fc->ready) { 4843 /* 4844 * We don't do any simq freezing if we are only in target mode 4845 */ 4846 if (FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) { 4847 if (fc->path) { 4848 isp_freeze_loopdown(isp, bus, msg); 4849 } 4850 } 4851 if (!callout_active(&fc->ldt)) { 4852 callout_reset(&fc->ldt, fc->loop_down_limit * hz, isp_ldt, fc); 4853 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Starting Loop Down Timer @ %lu", (unsigned long) time_uptime); 4854 } 4855 } 4856 isp_fcp_reset_crn(fc, /*tgt*/0, /*tgt_set*/ 0); 4857 4858 isp_prt(isp, ISP_LOGINFO, "Chan %d: %s", bus, msg); 4859 break; 4860 } 4861 case ISPASYNC_LOOP_UP: 4862 va_start(ap, cmd); 4863 bus = va_arg(ap, int); 4864 va_end(ap); 4865 fc = ISP_FC_PC(isp, bus); 4866 /* 4867 * Now we just note that Loop has come up. We don't 4868 * actually do anything because we're waiting for a 4869 * Change Notify before activating the FC cleanup 4870 * thread to look at the state of the loop again. 4871 */ 4872 FCPARAM(isp, bus)->isp_linkstate = 1; 4873 fc->loop_dead = 0; 4874 fc->loop_down_time = 0; 4875 isp_prt(isp, ISP_LOGINFO, "Chan %d Loop UP", bus); 4876 break; 4877 case ISPASYNC_DEV_ARRIVED: 4878 va_start(ap, cmd); 4879 bus = va_arg(ap, int); 4880 lp = va_arg(ap, fcportdb_t *); 4881 va_end(ap); 4882 fc = ISP_FC_PC(isp, bus); 4883 tgt = FC_PORTDB_TGT(isp, bus, lp); 4884 isp_gen_role_str(buf, sizeof (buf), lp->prli_word3); 4885 isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "arrived"); 4886 if ((FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) && 4887 (lp->prli_word3 & PRLI_WD3_TARGET_FUNCTION)) { 4888 lp->is_target = 1; 4889 isp_fcp_reset_crn(fc, tgt, /*tgt_set*/ 1); 4890 isp_make_here(isp, lp, bus, tgt); 4891 } 4892 if ((FCPARAM(isp, bus)->role & ISP_ROLE_TARGET) && 4893 (lp->prli_word3 & PRLI_WD3_INITIATOR_FUNCTION)) { 4894 lp->is_initiator = 1; 4895 ac.contract_number = AC_CONTRACT_DEV_CHG; 4896 adc = (struct ac_device_changed *) ac.contract_data; 4897 adc->wwpn = lp->port_wwn; 4898 adc->port = lp->portid; 4899 adc->target = tgt; 4900 adc->arrived = 1; 4901 xpt_async(AC_CONTRACT, fc->path, &ac); 4902 } 4903 break; 4904 case ISPASYNC_DEV_CHANGED: 4905 va_start(ap, cmd); 4906 bus = va_arg(ap, int); 4907 lp = va_arg(ap, fcportdb_t *); 4908 va_end(ap); 4909 fc = ISP_FC_PC(isp, bus); 4910 tgt = FC_PORTDB_TGT(isp, bus, lp); 4911 isp_gen_role_str(buf, sizeof (buf), lp->new_prli_word3); 4912 isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->new_portid, lp->handle, buf, "changed"); 4913 changed: 4914 if (lp->is_target != 4915 ((FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) && 4916 (lp->new_prli_word3 & PRLI_WD3_TARGET_FUNCTION))) { 4917 lp->is_target = !lp->is_target; 4918 if (lp->is_target) { 4919 isp_fcp_reset_crn(fc, tgt, /*tgt_set*/ 1); 4920 isp_make_here(isp, lp, bus, tgt); 4921 } else { 4922 isp_make_gone(isp, lp, bus, tgt); 4923 isp_fcp_reset_crn(fc, tgt, /*tgt_set*/ 1); 4924 } 4925 } 4926 if (lp->is_initiator != 4927 ((FCPARAM(isp, bus)->role & ISP_ROLE_TARGET) && 4928 (lp->new_prli_word3 & PRLI_WD3_INITIATOR_FUNCTION))) { 4929 lp->is_initiator = !lp->is_initiator; 4930 ac.contract_number = AC_CONTRACT_DEV_CHG; 4931 adc = (struct ac_device_changed *) ac.contract_data; 4932 adc->wwpn = lp->port_wwn; 4933 adc->port = lp->portid; 4934 adc->target = tgt; 4935 adc->arrived = lp->is_initiator; 4936 xpt_async(AC_CONTRACT, fc->path, &ac); 4937 } 4938 break; 4939 case ISPASYNC_DEV_STAYED: 4940 va_start(ap, cmd); 4941 bus = va_arg(ap, int); 4942 lp = va_arg(ap, fcportdb_t *); 4943 va_end(ap); 4944 fc = ISP_FC_PC(isp, bus); 4945 tgt = FC_PORTDB_TGT(isp, bus, lp); 4946 isp_gen_role_str(buf, sizeof (buf), lp->prli_word3); 4947 isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "stayed"); 4948 goto changed; 4949 case ISPASYNC_DEV_GONE: 4950 va_start(ap, cmd); 4951 bus = va_arg(ap, int); 4952 lp = va_arg(ap, fcportdb_t *); 4953 va_end(ap); 4954 fc = ISP_FC_PC(isp, bus); 4955 tgt = FC_PORTDB_TGT(isp, bus, lp); 4956 /* 4957 * If this has a virtual target or initiator set the isp_gdt 4958 * timer running on it to delay its departure. 4959 */ 4960 isp_gen_role_str(buf, sizeof (buf), lp->prli_word3); 4961 if (lp->is_target || lp->is_initiator) { 4962 lp->state = FC_PORTDB_STATE_ZOMBIE; 4963 lp->gone_timer = fc->gone_device_time; 4964 isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "gone zombie"); 4965 if (fc->ready && !callout_active(&fc->gdt)) { 4966 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Starting Gone Device Timer with %u seconds time now %lu", bus, lp->gone_timer, (unsigned long)time_uptime); 4967 callout_reset(&fc->gdt, hz, isp_gdt, fc); 4968 } 4969 break; 4970 } 4971 isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "gone"); 4972 break; 4973 case ISPASYNC_CHANGE_NOTIFY: 4974 { 4975 char *msg; 4976 int evt, nphdl, nlstate, reason; 4977 4978 va_start(ap, cmd); 4979 bus = va_arg(ap, int); 4980 evt = va_arg(ap, int); 4981 if (IS_24XX(isp) && evt == ISPASYNC_CHANGE_PDB) { 4982 nphdl = va_arg(ap, int); 4983 nlstate = va_arg(ap, int); 4984 reason = va_arg(ap, int); 4985 } else { 4986 nphdl = NIL_HANDLE; 4987 nlstate = reason = 0; 4988 } 4989 va_end(ap); 4990 fc = ISP_FC_PC(isp, bus); 4991 4992 if (evt == ISPASYNC_CHANGE_PDB) { 4993 msg = "Port Database Changed"; 4994 } else if (evt == ISPASYNC_CHANGE_SNS) { 4995 msg = "Name Server Database Changed"; 4996 } else { 4997 msg = "Other Change Notify"; 4998 } 4999 5000 /* 5001 * If the loop down timer is running, cancel it. 5002 */ 5003 if (fc->ready && callout_active(&fc->ldt)) { 5004 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Stopping Loop Down Timer @ %lu", (unsigned long) time_uptime); 5005 callout_stop(&fc->ldt); 5006 } 5007 isp_prt(isp, ISP_LOGINFO, "Chan %d %s", bus, msg); 5008 if (FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) { 5009 isp_freeze_loopdown(isp, bus, msg); 5010 } 5011 wakeup(fc); 5012 break; 5013 } 5014 #ifdef ISP_TARGET_MODE 5015 case ISPASYNC_TARGET_NOTIFY: 5016 { 5017 isp_notify_t *notify; 5018 va_start(ap, cmd); 5019 notify = va_arg(ap, isp_notify_t *); 5020 va_end(ap); 5021 switch (notify->nt_ncode) { 5022 case NT_ABORT_TASK: 5023 case NT_ABORT_TASK_SET: 5024 case NT_CLEAR_ACA: 5025 case NT_CLEAR_TASK_SET: 5026 case NT_LUN_RESET: 5027 case NT_TARGET_RESET: 5028 case NT_QUERY_TASK_SET: 5029 case NT_QUERY_ASYNC_EVENT: 5030 /* 5031 * These are task management functions. 5032 */ 5033 isp_handle_platform_target_tmf(isp, notify); 5034 break; 5035 case NT_BUS_RESET: 5036 case NT_LIP_RESET: 5037 case NT_LINK_UP: 5038 case NT_LINK_DOWN: 5039 case NT_HBA_RESET: 5040 /* 5041 * No action need be taken here. 5042 */ 5043 break; 5044 case NT_GLOBAL_LOGOUT: 5045 case NT_LOGOUT: 5046 /* 5047 * This is device arrival/departure notification 5048 */ 5049 isp_handle_platform_target_notify_ack(isp, notify); 5050 break; 5051 default: 5052 isp_prt(isp, ISP_LOGALL, "target notify code 0x%x", notify->nt_ncode); 5053 isp_handle_platform_target_notify_ack(isp, notify); 5054 break; 5055 } 5056 break; 5057 } 5058 case ISPASYNC_TARGET_NOTIFY_ACK: 5059 { 5060 void *inot; 5061 va_start(ap, cmd); 5062 inot = va_arg(ap, void *); 5063 va_end(ap); 5064 if (isp_notify_ack(isp, inot)) { 5065 isp_tna_t *tp = malloc(sizeof (*tp), M_DEVBUF, M_NOWAIT); 5066 if (tp) { 5067 tp->isp = isp; 5068 if (inot) { 5069 memcpy(tp->data, inot, sizeof (tp->data)); 5070 tp->not = tp->data; 5071 } else { 5072 tp->not = NULL; 5073 } 5074 callout_init_mtx(&tp->timer, &isp->isp_lock, 0); 5075 callout_reset(&tp->timer, 5, 5076 isp_refire_notify_ack, tp); 5077 } else { 5078 isp_prt(isp, ISP_LOGERR, "you lose- cannot allocate a notify refire"); 5079 } 5080 } 5081 break; 5082 } 5083 case ISPASYNC_TARGET_ACTION: 5084 { 5085 isphdr_t *hp; 5086 5087 va_start(ap, cmd); 5088 hp = va_arg(ap, isphdr_t *); 5089 va_end(ap); 5090 switch (hp->rqs_entry_type) { 5091 default: 5092 isp_prt(isp, ISP_LOGWARN, "%s: unhandled target action 0x%x", __func__, hp->rqs_entry_type); 5093 break; 5094 case RQSTYPE_NOTIFY: 5095 if (IS_SCSI(isp)) { 5096 isp_handle_platform_notify_scsi(isp, (in_entry_t *) hp); 5097 } else if (IS_24XX(isp)) { 5098 isp_handle_platform_notify_24xx(isp, (in_fcentry_24xx_t *) hp); 5099 } else { 5100 isp_handle_platform_notify_fc(isp, (in_fcentry_t *) hp); 5101 } 5102 break; 5103 case RQSTYPE_ATIO: 5104 if (IS_24XX(isp)) { 5105 isp_handle_platform_atio7(isp, (at7_entry_t *) hp); 5106 } else { 5107 isp_handle_platform_atio(isp, (at_entry_t *) hp); 5108 } 5109 break; 5110 case RQSTYPE_ATIO2: 5111 isp_handle_platform_atio2(isp, (at2_entry_t *) hp); 5112 break; 5113 case RQSTYPE_CTIO7: 5114 case RQSTYPE_CTIO3: 5115 case RQSTYPE_CTIO2: 5116 case RQSTYPE_CTIO: 5117 isp_handle_platform_ctio(isp, hp); 5118 break; 5119 case RQSTYPE_ABTS_RCVD: 5120 { 5121 abts_t *abts = (abts_t *)hp; 5122 isp_notify_t notify, *nt = ¬ify; 5123 tstate_t *tptr; 5124 fcportdb_t *lp; 5125 uint16_t chan; 5126 uint32_t sid, did; 5127 5128 did = (abts->abts_did_hi << 16) | abts->abts_did_lo; 5129 sid = (abts->abts_sid_hi << 16) | abts->abts_sid_lo; 5130 ISP_MEMZERO(nt, sizeof (isp_notify_t)); 5131 5132 nt->nt_hba = isp; 5133 nt->nt_did = did; 5134 nt->nt_nphdl = abts->abts_nphdl; 5135 nt->nt_sid = sid; 5136 isp_find_chan_by_did(isp, did, &chan); 5137 if (chan == ISP_NOCHAN) { 5138 nt->nt_tgt = TGT_ANY; 5139 } else { 5140 nt->nt_tgt = FCPARAM(isp, chan)->isp_wwpn; 5141 if (isp_find_pdb_by_handle(isp, chan, abts->abts_nphdl, &lp)) { 5142 nt->nt_wwn = lp->port_wwn; 5143 } else { 5144 nt->nt_wwn = INI_ANY; 5145 } 5146 } 5147 /* 5148 * Try hard to find the lun for this command. 5149 */ 5150 tptr = get_lun_statep_from_tag(isp, chan, abts->abts_rxid_task); 5151 if (tptr) { 5152 nt->nt_lun = tptr->ts_lun; 5153 rls_lun_statep(isp, tptr); 5154 } else { 5155 nt->nt_lun = LUN_ANY; 5156 } 5157 nt->nt_need_ack = 1; 5158 nt->nt_tagval = abts->abts_rxid_task; 5159 nt->nt_tagval |= (((uint64_t) abts->abts_rxid_abts) << 32); 5160 if (abts->abts_rxid_task == ISP24XX_NO_TASK) { 5161 isp_prt(isp, ISP_LOGTINFO, "[0x%x] ABTS from N-Port handle 0x%x Port 0x%06x has no task id (rx_id 0x%04x ox_id 0x%04x)", 5162 abts->abts_rxid_abts, abts->abts_nphdl, sid, abts->abts_rx_id, abts->abts_ox_id); 5163 } else { 5164 isp_prt(isp, ISP_LOGTINFO, "[0x%x] ABTS from N-Port handle 0x%x Port 0x%06x for task 0x%x (rx_id 0x%04x ox_id 0x%04x)", 5165 abts->abts_rxid_abts, abts->abts_nphdl, sid, abts->abts_rxid_task, abts->abts_rx_id, abts->abts_ox_id); 5166 } 5167 nt->nt_channel = chan; 5168 nt->nt_ncode = NT_ABORT_TASK; 5169 nt->nt_lreserved = hp; 5170 isp_handle_platform_target_tmf(isp, nt); 5171 break; 5172 } 5173 case RQSTYPE_ENABLE_LUN: 5174 case RQSTYPE_MODIFY_LUN: 5175 isp_ledone(isp, (lun_entry_t *) hp); 5176 break; 5177 } 5178 break; 5179 } 5180 #endif 5181 case ISPASYNC_FW_CRASH: 5182 { 5183 uint16_t mbox1, mbox6; 5184 mbox1 = ISP_READ(isp, OUTMAILBOX1); 5185 if (IS_DUALBUS(isp)) { 5186 mbox6 = ISP_READ(isp, OUTMAILBOX6); 5187 } else { 5188 mbox6 = 0; 5189 } 5190 isp_prt(isp, ISP_LOGERR, "Internal Firmware Error on bus %d @ RISC Address 0x%x", mbox6, mbox1); 5191 mbox1 = isp->isp_osinfo.mbox_sleep_ok; 5192 isp->isp_osinfo.mbox_sleep_ok = 0; 5193 isp_reinit(isp, 1); 5194 isp->isp_osinfo.mbox_sleep_ok = mbox1; 5195 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL); 5196 break; 5197 } 5198 default: 5199 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd); 5200 break; 5201 } 5202 } 5203 5204 5205 /* 5206 * Locks are held before coming here. 5207 */ 5208 void 5209 isp_uninit(ispsoftc_t *isp) 5210 { 5211 if (IS_24XX(isp)) { 5212 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_RESET); 5213 } else { 5214 ISP_WRITE(isp, HCCR, HCCR_CMD_RESET); 5215 } 5216 ISP_DISABLE_INTS(isp); 5217 } 5218 5219 /* 5220 * When we want to get the 'default' WWNs (when lacking NVRAM), we pick them 5221 * up from our platform default (defww{p|n}n) and morph them based upon 5222 * channel. 5223 * 5224 * When we want to get the 'active' WWNs, we get NVRAM WWNs and then morph them 5225 * based upon channel. 5226 */ 5227 5228 uint64_t 5229 isp_default_wwn(ispsoftc_t * isp, int chan, int isactive, int iswwnn) 5230 { 5231 uint64_t seed; 5232 struct isp_fc *fc = ISP_FC_PC(isp, chan); 5233 5234 /* 5235 * If we're asking for a active WWN, the default overrides get 5236 * returned, otherwise the NVRAM value is picked. 5237 * 5238 * If we're asking for a default WWN, we just pick the default override. 5239 */ 5240 if (isactive) { 5241 seed = iswwnn ? fc->def_wwnn : fc->def_wwpn; 5242 if (seed) { 5243 return (seed); 5244 } 5245 seed = iswwnn ? FCPARAM(isp, chan)->isp_wwnn_nvram : FCPARAM(isp, chan)->isp_wwpn_nvram; 5246 if (seed) { 5247 return (seed); 5248 } 5249 return (0x400000007F000009ull); 5250 } 5251 5252 seed = iswwnn ? fc->def_wwnn : fc->def_wwpn; 5253 5254 /* 5255 * For channel zero just return what we have. For either ACTIVE or 5256 * DEFAULT cases, we depend on default override of NVRAM values for 5257 * channel zero. 5258 */ 5259 if (chan == 0) { 5260 return (seed); 5261 } 5262 5263 /* 5264 * For other channels, we are doing one of three things: 5265 * 5266 * 1. If what we have now is non-zero, return it. Otherwise we morph 5267 * values from channel 0. 2. If we're here for a WWPN we synthesize 5268 * it if Channel 0's wwpn has a type 2 NAA. 3. If we're here for a 5269 * WWNN we synthesize it if Channel 0's wwnn has a type 2 NAA. 5270 */ 5271 5272 if (seed) { 5273 return (seed); 5274 } 5275 seed = iswwnn ? ISP_FC_PC(isp, 0)->def_wwnn : ISP_FC_PC(isp, 0)->def_wwpn; 5276 if (seed == 0) 5277 seed = iswwnn ? FCPARAM(isp, 0)->isp_wwnn_nvram : FCPARAM(isp, 0)->isp_wwpn_nvram; 5278 5279 if (((seed >> 60) & 0xf) == 2) { 5280 /* 5281 * The type 2 NAA fields for QLogic cards appear be laid out 5282 * thusly: 5283 * 5284 * bits 63..60 NAA == 2 bits 59..57 unused/zero bit 56 5285 * port (1) or node (0) WWN distinguishor bit 48 5286 * physical port on dual-port chips (23XX/24XX) 5287 * 5288 * This is somewhat nutty, particularly since bit 48 is 5289 * irrelevant as they assign separate serial numbers to 5290 * different physical ports anyway. 5291 * 5292 * We'll stick our channel number plus one first into bits 5293 * 57..59 and thence into bits 52..55 which allows for 8 bits 5294 * of channel which is comfortably more than our maximum 5295 * (126) now. 5296 */ 5297 seed &= ~0x0FF0000000000000ULL; 5298 if (iswwnn == 0) { 5299 seed |= ((uint64_t) (chan + 1) & 0xf) << 56; 5300 seed |= ((uint64_t) ((chan + 1) >> 4) & 0xf) << 52; 5301 } 5302 } else { 5303 seed = 0; 5304 } 5305 return (seed); 5306 } 5307 5308 void 5309 isp_prt(ispsoftc_t *isp, int level, const char *fmt, ...) 5310 { 5311 int loc; 5312 char lbuf[200]; 5313 va_list ap; 5314 5315 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { 5316 return; 5317 } 5318 snprintf(lbuf, sizeof (lbuf), "%s: ", device_get_nameunit(isp->isp_dev)); 5319 loc = strlen(lbuf); 5320 va_start(ap, fmt); 5321 vsnprintf(&lbuf[loc], sizeof (lbuf) - loc - 1, fmt, ap); 5322 va_end(ap); 5323 printf("%s\n", lbuf); 5324 } 5325 5326 void 5327 isp_xs_prt(ispsoftc_t *isp, XS_T *xs, int level, const char *fmt, ...) 5328 { 5329 va_list ap; 5330 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { 5331 return; 5332 } 5333 xpt_print_path(xs->ccb_h.path); 5334 va_start(ap, fmt); 5335 vprintf(fmt, ap); 5336 va_end(ap); 5337 printf("\n"); 5338 } 5339 5340 uint64_t 5341 isp_nanotime_sub(struct timespec *b, struct timespec *a) 5342 { 5343 uint64_t elapsed; 5344 struct timespec x = *b; 5345 timespecsub(&x, a); 5346 elapsed = GET_NANOSEC(&x); 5347 if (elapsed == 0) 5348 elapsed++; 5349 return (elapsed); 5350 } 5351 5352 int 5353 isp_mbox_acquire(ispsoftc_t *isp) 5354 { 5355 if (isp->isp_osinfo.mboxbsy) { 5356 return (1); 5357 } else { 5358 isp->isp_osinfo.mboxcmd_done = 0; 5359 isp->isp_osinfo.mboxbsy = 1; 5360 return (0); 5361 } 5362 } 5363 5364 void 5365 isp_mbox_wait_complete(ispsoftc_t *isp, mbreg_t *mbp) 5366 { 5367 unsigned int usecs = mbp->timeout; 5368 unsigned int max, olim, ilim; 5369 5370 if (usecs == 0) { 5371 usecs = MBCMD_DEFAULT_TIMEOUT; 5372 } 5373 max = isp->isp_mbxwrk0 + 1; 5374 5375 if (isp->isp_osinfo.mbox_sleep_ok) { 5376 unsigned int ms = (usecs + 999) / 1000; 5377 5378 isp->isp_osinfo.mbox_sleep_ok = 0; 5379 isp->isp_osinfo.mbox_sleeping = 1; 5380 for (olim = 0; olim < max; olim++) { 5381 msleep(&isp->isp_mbxworkp, &isp->isp_osinfo.lock, PRIBIO, "ispmbx_sleep", isp_mstohz(ms)); 5382 if (isp->isp_osinfo.mboxcmd_done) { 5383 break; 5384 } 5385 } 5386 isp->isp_osinfo.mbox_sleep_ok = 1; 5387 isp->isp_osinfo.mbox_sleeping = 0; 5388 } else { 5389 for (olim = 0; olim < max; olim++) { 5390 for (ilim = 0; ilim < usecs; ilim += 100) { 5391 uint16_t isr, sema, info; 5392 if (isp->isp_osinfo.mboxcmd_done) { 5393 break; 5394 } 5395 if (ISP_READ_ISR(isp, &isr, &sema, &info)) { 5396 isp_intr(isp, isr, sema, info); 5397 if (isp->isp_osinfo.mboxcmd_done) { 5398 break; 5399 } 5400 } 5401 ISP_DELAY(100); 5402 } 5403 if (isp->isp_osinfo.mboxcmd_done) { 5404 break; 5405 } 5406 } 5407 } 5408 if (isp->isp_osinfo.mboxcmd_done == 0) { 5409 isp_prt(isp, ISP_LOGWARN, "%s Mailbox Command (0x%x) Timeout (%uus) (started @ %s:%d)", 5410 isp->isp_osinfo.mbox_sleep_ok? "Interrupting" : "Polled", isp->isp_lastmbxcmd, usecs, mbp->func, mbp->lineno); 5411 mbp->param[0] = MBOX_TIMEOUT; 5412 isp->isp_osinfo.mboxcmd_done = 1; 5413 } 5414 } 5415 5416 void 5417 isp_mbox_notify_done(ispsoftc_t *isp) 5418 { 5419 if (isp->isp_osinfo.mbox_sleeping) { 5420 wakeup(&isp->isp_mbxworkp); 5421 } 5422 isp->isp_osinfo.mboxcmd_done = 1; 5423 } 5424 5425 void 5426 isp_mbox_release(ispsoftc_t *isp) 5427 { 5428 isp->isp_osinfo.mboxbsy = 0; 5429 } 5430 5431 int 5432 isp_fc_scratch_acquire(ispsoftc_t *isp, int chan) 5433 { 5434 int ret = 0; 5435 if (isp->isp_osinfo.pc.fc[chan].fcbsy) { 5436 ret = -1; 5437 } else { 5438 isp->isp_osinfo.pc.fc[chan].fcbsy = 1; 5439 } 5440 return (ret); 5441 } 5442 5443 int 5444 isp_mstohz(int ms) 5445 { 5446 int hz; 5447 struct timeval t; 5448 t.tv_sec = ms / 1000; 5449 t.tv_usec = (ms % 1000) * 1000; 5450 hz = tvtohz(&t); 5451 if (hz < 0) { 5452 hz = 0x7fffffff; 5453 } 5454 if (hz == 0) { 5455 hz = 1; 5456 } 5457 return (hz); 5458 } 5459 5460 void 5461 isp_platform_intr(void *arg) 5462 { 5463 ispsoftc_t *isp = arg; 5464 uint16_t isr, sema, info; 5465 5466 ISP_LOCK(isp); 5467 isp->isp_intcnt++; 5468 if (ISP_READ_ISR(isp, &isr, &sema, &info)) 5469 isp_intr(isp, isr, sema, info); 5470 else 5471 isp->isp_intbogus++; 5472 ISP_UNLOCK(isp); 5473 } 5474 5475 void 5476 isp_common_dmateardown(ispsoftc_t *isp, struct ccb_scsiio *csio, uint32_t hdl) 5477 { 5478 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 5479 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_POSTREAD); 5480 } else { 5481 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_POSTWRITE); 5482 } 5483 bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap); 5484 } 5485 5486 /* 5487 * Reset the command reference number for all LUNs on a specific target 5488 * (needed when a target arrives again) or for all targets on a port 5489 * (needed for events like a LIP). 5490 */ 5491 void 5492 isp_fcp_reset_crn(struct isp_fc *fc, uint32_t tgt, int tgt_set) 5493 { 5494 int i; 5495 struct isp_nexus *nxp; 5496 5497 if (tgt_set == 0) 5498 isp_prt(fc->isp, ISP_LOG_SANCFG, "resetting CRN on all targets"); 5499 else 5500 isp_prt(fc->isp, ISP_LOG_SANCFG, "resetting CRN target %u", tgt); 5501 5502 for (i = 0; i < NEXUS_HASH_WIDTH; i++) { 5503 nxp = fc->nexus_hash[i]; 5504 while (nxp) { 5505 if ((tgt_set != 0) && (tgt == nxp->tgt)) 5506 nxp->crnseed = 0; 5507 5508 nxp = nxp->next; 5509 } 5510 } 5511 } 5512 5513 int 5514 isp_fcp_next_crn(ispsoftc_t *isp, uint8_t *crnp, XS_T *cmd) 5515 { 5516 lun_id_t lun; 5517 uint32_t chan, tgt; 5518 struct isp_fc *fc; 5519 struct isp_nexus *nxp; 5520 int idx; 5521 5522 if (isp->isp_type < ISP_HA_FC_2300) 5523 return (0); 5524 5525 chan = XS_CHANNEL(cmd); 5526 tgt = XS_TGT(cmd); 5527 lun = XS_LUN(cmd); 5528 fc = &isp->isp_osinfo.pc.fc[chan]; 5529 idx = NEXUS_HASH(tgt, lun); 5530 nxp = fc->nexus_hash[idx]; 5531 5532 while (nxp) { 5533 if (nxp->tgt == tgt && nxp->lun == lun) 5534 break; 5535 nxp = nxp->next; 5536 } 5537 if (nxp == NULL) { 5538 nxp = fc->nexus_free_list; 5539 if (nxp == NULL) { 5540 nxp = malloc(sizeof (struct isp_nexus), M_DEVBUF, M_ZERO|M_NOWAIT); 5541 if (nxp == NULL) { 5542 return (-1); 5543 } 5544 } else { 5545 fc->nexus_free_list = nxp->next; 5546 } 5547 nxp->tgt = tgt; 5548 nxp->lun = lun; 5549 nxp->next = fc->nexus_hash[idx]; 5550 fc->nexus_hash[idx] = nxp; 5551 } 5552 if (nxp) { 5553 if (nxp->crnseed == 0) 5554 nxp->crnseed = 1; 5555 if (cmd) 5556 PISP_PCMD(cmd)->crn = nxp->crnseed; 5557 *crnp = nxp->crnseed++; 5558 return (0); 5559 } 5560 return (-1); 5561 } 5562 5563 /* 5564 * We enter with the lock held 5565 */ 5566 void 5567 isp_timer(void *arg) 5568 { 5569 ispsoftc_t *isp = arg; 5570 #ifdef ISP_TARGET_MODE 5571 isp_tmcmd_restart(isp); 5572 #endif 5573 callout_reset(&isp->isp_osinfo.tmo, isp_timer_count, isp_timer, isp); 5574 } 5575 5576 isp_ecmd_t * 5577 isp_get_ecmd(ispsoftc_t *isp) 5578 { 5579 isp_ecmd_t *ecmd = isp->isp_osinfo.ecmd_free; 5580 if (ecmd) { 5581 isp->isp_osinfo.ecmd_free = ecmd->next; 5582 } 5583 return (ecmd); 5584 } 5585 5586 void 5587 isp_put_ecmd(ispsoftc_t *isp, isp_ecmd_t *ecmd) 5588 { 5589 ecmd->next = isp->isp_osinfo.ecmd_free; 5590 isp->isp_osinfo.ecmd_free = ecmd; 5591 } 5592