1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2009-2020 Alexander Motin <mav@FreeBSD.org> 5 * Copyright (c) 1997-2009 by Matthew Jacob 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice immediately at the beginning of the file, without modification, 13 * this list of conditions, and the following disclaimer. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* 31 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters. 32 */ 33 #include <sys/cdefs.h> 34 #include <dev/isp/isp_freebsd.h> 35 #include <sys/unistd.h> 36 #include <sys/kthread.h> 37 #include <sys/conf.h> 38 #include <sys/module.h> 39 #include <sys/ioccom.h> 40 #include <dev/isp/isp_ioctl.h> 41 #include <sys/devicestat.h> 42 #include <cam/cam_periph.h> 43 #include <cam/cam_xpt_periph.h> 44 45 MODULE_VERSION(isp, 1); 46 MODULE_DEPEND(isp, cam, 1, 1, 1); 47 int isp_announced = 0; 48 int isp_loop_down_limit = 60; /* default loop down limit */ 49 int isp_quickboot_time = 7; /* don't wait more than N secs for loop up */ 50 int isp_gone_device_time = 30; /* grace time before reporting device lost */ 51 static const char prom3[] = "Chan %d [%u] PortID 0x%06x Departed because of %s"; 52 53 static void isp_freeze_loopdown(ispsoftc_t *, int); 54 static void isp_loop_changed(ispsoftc_t *isp, int chan); 55 static void isp_rq_check_above(ispsoftc_t *); 56 static void isp_rq_check_below(ispsoftc_t *); 57 static d_ioctl_t ispioctl; 58 static void isp_poll(struct cam_sim *); 59 static callout_func_t isp_watchdog; 60 static callout_func_t isp_gdt; 61 static task_fn_t isp_gdt_task; 62 static void isp_kthread(void *); 63 static void isp_action(struct cam_sim *, union ccb *); 64 static int isp_timer_count; 65 static void isp_timer(void *); 66 67 static struct cdevsw isp_cdevsw = { 68 .d_version = D_VERSION, 69 .d_ioctl = ispioctl, 70 .d_name = "isp", 71 }; 72 73 static int 74 isp_role_sysctl(SYSCTL_HANDLER_ARGS) 75 { 76 ispsoftc_t *isp = (ispsoftc_t *)arg1; 77 int chan = arg2; 78 int error, old, value; 79 80 value = FCPARAM(isp, chan)->role; 81 82 error = sysctl_handle_int(oidp, &value, 0, req); 83 if ((error != 0) || (req->newptr == NULL)) 84 return (error); 85 86 if (value < ISP_ROLE_NONE || value > ISP_ROLE_BOTH) 87 return (EINVAL); 88 89 ISP_LOCK(isp); 90 old = FCPARAM(isp, chan)->role; 91 92 /* We don't allow target mode switch from here. */ 93 value = (old & ISP_ROLE_TARGET) | (value & ISP_ROLE_INITIATOR); 94 95 /* If nothing has changed -- we are done. */ 96 if (value == old) { 97 ISP_UNLOCK(isp); 98 return (0); 99 } 100 101 /* Actually change the role. */ 102 error = isp_control(isp, ISPCTL_CHANGE_ROLE, chan, value); 103 ISP_UNLOCK(isp); 104 return (error); 105 } 106 107 static int 108 isp_attach_chan(ispsoftc_t *isp, struct cam_devq *devq, int chan) 109 { 110 fcparam *fcp = FCPARAM(isp, chan); 111 struct isp_fc *fc = ISP_FC_PC(isp, chan); 112 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(isp->isp_osinfo.dev); 113 struct sysctl_oid *tree = device_get_sysctl_tree(isp->isp_osinfo.dev); 114 char name[16]; 115 struct cam_sim *sim; 116 struct cam_path *path; 117 #ifdef ISP_TARGET_MODE 118 int i; 119 #endif 120 121 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 122 device_get_unit(isp->isp_dev), &isp->isp_lock, 123 isp->isp_maxcmds, isp->isp_maxcmds, devq); 124 if (sim == NULL) 125 return (ENOMEM); 126 127 if (xpt_bus_register(sim, isp->isp_dev, chan) != CAM_SUCCESS) { 128 cam_sim_free(sim, FALSE); 129 return (EIO); 130 } 131 if (xpt_create_path(&path, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 132 xpt_bus_deregister(cam_sim_path(sim)); 133 cam_sim_free(sim, FALSE); 134 return (ENXIO); 135 } 136 137 ISP_LOCK(isp); 138 fc->sim = sim; 139 fc->path = path; 140 fc->isp = isp; 141 fc->ready = 1; 142 fcp->isp_use_gft_id = 1; 143 fcp->isp_use_gff_id = 1; 144 145 callout_init_mtx(&fc->gdt, &isp->isp_lock, 0); 146 TASK_INIT(&fc->gtask, 1, isp_gdt_task, fc); 147 #ifdef ISP_TARGET_MODE 148 TAILQ_INIT(&fc->waitq); 149 STAILQ_INIT(&fc->ntfree); 150 for (i = 0; i < ATPDPSIZE; i++) 151 STAILQ_INSERT_TAIL(&fc->ntfree, &fc->ntpool[i], next); 152 LIST_INIT(&fc->atfree); 153 for (i = ATPDPSIZE-1; i >= 0; i--) 154 LIST_INSERT_HEAD(&fc->atfree, &fc->atpool[i], next); 155 for (i = 0; i < ATPDPHASHSIZE; i++) 156 LIST_INIT(&fc->atused[i]); 157 #endif 158 isp_loop_changed(isp, chan); 159 ISP_UNLOCK(isp); 160 if (kproc_create(isp_kthread, fc, &fc->kproc, 0, 0, 161 "%s_%d", device_get_nameunit(isp->isp_osinfo.dev), chan)) { 162 xpt_free_path(fc->path); 163 xpt_bus_deregister(cam_sim_path(fc->sim)); 164 cam_sim_free(fc->sim, FALSE); 165 return (ENOMEM); 166 } 167 fc->num_threads += 1; 168 if (chan > 0) { 169 snprintf(name, sizeof(name), "chan%d", chan); 170 tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree), 171 OID_AUTO, name, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 172 "Virtual channel"); 173 } 174 SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 175 "wwnn", CTLFLAG_RD, &fcp->isp_wwnn, 176 "World Wide Node Name"); 177 SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 178 "wwpn", CTLFLAG_RD, &fcp->isp_wwpn, 179 "World Wide Port Name"); 180 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 181 "loop_down_limit", CTLFLAG_RW, &fc->loop_down_limit, 0, 182 "Loop Down Limit"); 183 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 184 "gone_device_time", CTLFLAG_RW, &fc->gone_device_time, 0, 185 "Gone Device Time"); 186 #if defined(ISP_TARGET_MODE) && defined(DEBUG) 187 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 188 "inject_lost_data_frame", CTLFLAG_RW, &fc->inject_lost_data_frame, 0, 189 "Cause a Lost Frame on a Read"); 190 #endif 191 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 192 "role", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 193 isp, chan, isp_role_sysctl, "I", "Current role"); 194 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 195 "speed", CTLFLAG_RD, &fcp->isp_gbspeed, 0, 196 "Connection speed in gigabits"); 197 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 198 "linkstate", CTLFLAG_RD, &fcp->isp_linkstate, 0, 199 "Link state"); 200 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 201 "fwstate", CTLFLAG_RD, &fcp->isp_fwstate, 0, 202 "Firmware state"); 203 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 204 "loopstate", CTLFLAG_RD, &fcp->isp_loopstate, 0, 205 "Loop state"); 206 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 207 "topo", CTLFLAG_RD, &fcp->isp_topo, 0, 208 "Connection topology"); 209 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 210 "use_gft_id", CTLFLAG_RWTUN, &fcp->isp_use_gft_id, 0, 211 "Use GFT_ID during fabric scan"); 212 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 213 "use_gff_id", CTLFLAG_RWTUN, &fcp->isp_use_gff_id, 0, 214 "Use GFF_ID during fabric scan"); 215 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 216 "fw_version_flash", CTLFLAG_RD, fcp->fw_version_flash, 0, 217 "Firmware version in (active) flash region"); 218 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 219 "fw_version_ispfw", CTLFLAG_RD, fcp->fw_version_ispfw, 0, 220 "Firmware version loaded from ispfw(4)"); 221 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 222 "fw_version_run", CTLFLAG_RD, fcp->fw_version_run, 0, 223 "Firmware version currently running"); 224 return (0); 225 } 226 227 static void 228 isp_detach_chan(ispsoftc_t *isp, int chan) 229 { 230 struct isp_fc *fc = ISP_FC_PC(isp, chan); 231 232 xpt_free_path(fc->path); 233 xpt_bus_deregister(cam_sim_path(fc->sim)); 234 cam_sim_free(fc->sim, FALSE); 235 236 /* Wait for the channel's spawned threads to exit. */ 237 wakeup(fc); 238 while (fc->num_threads != 0) 239 mtx_sleep(&fc->num_threads, &isp->isp_lock, PRIBIO, "isp_reap", 0); 240 } 241 242 int 243 isp_attach(ispsoftc_t *isp) 244 { 245 const char *nu = device_get_nameunit(isp->isp_osinfo.dev); 246 int du = device_get_unit(isp->isp_dev); 247 int chan; 248 249 /* 250 * Create the device queue for our SIM(s). 251 */ 252 isp->isp_osinfo.devq = cam_simq_alloc(isp->isp_maxcmds); 253 if (isp->isp_osinfo.devq == NULL) { 254 return (EIO); 255 } 256 257 for (chan = 0; chan < isp->isp_nchan; chan++) { 258 if (isp_attach_chan(isp, isp->isp_osinfo.devq, chan)) { 259 goto unwind; 260 } 261 } 262 263 callout_init_mtx(&isp->isp_osinfo.tmo, &isp->isp_lock, 0); 264 isp_timer_count = hz >> 2; 265 callout_reset(&isp->isp_osinfo.tmo, isp_timer_count, isp_timer, isp); 266 267 isp->isp_osinfo.cdev = make_dev(&isp_cdevsw, du, UID_ROOT, GID_OPERATOR, 0600, "%s", nu); 268 if (isp->isp_osinfo.cdev) { 269 isp->isp_osinfo.cdev->si_drv1 = isp; 270 } 271 return (0); 272 273 unwind: 274 ISP_LOCK(isp); 275 isp->isp_osinfo.is_exiting = 1; 276 while (--chan >= 0) 277 isp_detach_chan(isp, chan); 278 ISP_UNLOCK(isp); 279 cam_simq_free(isp->isp_osinfo.devq); 280 isp->isp_osinfo.devq = NULL; 281 return (-1); 282 } 283 284 int 285 isp_detach(ispsoftc_t *isp) 286 { 287 int chan; 288 289 if (isp->isp_osinfo.cdev) { 290 destroy_dev(isp->isp_osinfo.cdev); 291 isp->isp_osinfo.cdev = NULL; 292 } 293 ISP_LOCK(isp); 294 /* Tell spawned threads that we're exiting. */ 295 isp->isp_osinfo.is_exiting = 1; 296 for (chan = isp->isp_nchan - 1; chan >= 0; chan -= 1) 297 isp_detach_chan(isp, chan); 298 ISP_UNLOCK(isp); 299 callout_drain(&isp->isp_osinfo.tmo); 300 cam_simq_free(isp->isp_osinfo.devq); 301 return (0); 302 } 303 304 static void 305 isp_freeze_loopdown(ispsoftc_t *isp, int chan) 306 { 307 struct isp_fc *fc = ISP_FC_PC(isp, chan); 308 309 if (fc->sim == NULL) 310 return; 311 if (fc->simqfrozen == 0) { 312 isp_prt(isp, ISP_LOGDEBUG0, 313 "Chan %d Freeze simq (loopdown)", chan); 314 fc->simqfrozen = SIMQFRZ_LOOPDOWN; 315 xpt_hold_boot(); 316 xpt_freeze_simq(fc->sim, 1); 317 } else { 318 isp_prt(isp, ISP_LOGDEBUG0, 319 "Chan %d Mark simq frozen (loopdown)", chan); 320 fc->simqfrozen |= SIMQFRZ_LOOPDOWN; 321 } 322 } 323 324 static void 325 isp_unfreeze_loopdown(ispsoftc_t *isp, int chan) 326 { 327 struct isp_fc *fc = ISP_FC_PC(isp, chan); 328 329 if (fc->sim == NULL) 330 return; 331 int wasfrozen = fc->simqfrozen & SIMQFRZ_LOOPDOWN; 332 fc->simqfrozen &= ~SIMQFRZ_LOOPDOWN; 333 if (wasfrozen && fc->simqfrozen == 0) { 334 isp_prt(isp, ISP_LOGDEBUG0, 335 "Chan %d Release simq", chan); 336 xpt_release_simq(fc->sim, 1); 337 xpt_release_boot(); 338 } 339 } 340 341 /* 342 * Functions to protect from request queue overflow by freezing SIM queue. 343 * XXX: freezing only one arbitrary SIM, since they all share the queue. 344 */ 345 static void 346 isp_rq_check_above(ispsoftc_t *isp) 347 { 348 struct isp_fc *fc = ISP_FC_PC(isp, 0); 349 350 if (isp->isp_rqovf || fc->sim == NULL) 351 return; 352 if (!isp_rqentry_avail(isp, QENTRY_MAX)) { 353 xpt_freeze_simq(fc->sim, 1); 354 isp->isp_rqovf = 1; 355 } 356 } 357 358 static void 359 isp_rq_check_below(ispsoftc_t *isp) 360 { 361 struct isp_fc *fc = ISP_FC_PC(isp, 0); 362 363 if (!isp->isp_rqovf || fc->sim == NULL) 364 return; 365 if (isp_rqentry_avail(isp, QENTRY_MAX)) { 366 xpt_release_simq(fc->sim, 0); 367 isp->isp_rqovf = 0; 368 } 369 } 370 371 static int 372 ispioctl(struct cdev *dev, u_long c, caddr_t addr, int flags, struct thread *td) 373 { 374 ispsoftc_t *isp; 375 int nr, chan, retval = ENOTTY; 376 377 isp = dev->si_drv1; 378 379 switch (c) { 380 case ISP_SDBLEV: 381 { 382 int olddblev = isp->isp_dblev; 383 isp->isp_dblev = *(int *)addr; 384 *(int *)addr = olddblev; 385 retval = 0; 386 break; 387 } 388 case ISP_GETROLE: 389 chan = *(int *)addr; 390 if (chan < 0 || chan >= isp->isp_nchan) { 391 retval = -ENXIO; 392 break; 393 } 394 *(int *)addr = FCPARAM(isp, chan)->role; 395 retval = 0; 396 break; 397 case ISP_SETROLE: 398 nr = *(int *)addr; 399 chan = nr >> 8; 400 if (chan < 0 || chan >= isp->isp_nchan) { 401 retval = -ENXIO; 402 break; 403 } 404 nr &= 0xff; 405 if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) { 406 retval = EINVAL; 407 break; 408 } 409 ISP_LOCK(isp); 410 *(int *)addr = FCPARAM(isp, chan)->role; 411 retval = isp_control(isp, ISPCTL_CHANGE_ROLE, chan, nr); 412 ISP_UNLOCK(isp); 413 break; 414 415 case ISP_RESETHBA: 416 ISP_LOCK(isp); 417 isp_reinit(isp, 0); 418 ISP_UNLOCK(isp); 419 retval = 0; 420 break; 421 422 case ISP_RESCAN: 423 chan = *(intptr_t *)addr; 424 if (chan < 0 || chan >= isp->isp_nchan) { 425 retval = -ENXIO; 426 break; 427 } 428 ISP_LOCK(isp); 429 if (isp_fc_runstate(isp, chan, 5 * 1000000) != LOOP_READY) { 430 retval = EIO; 431 } else { 432 retval = 0; 433 } 434 ISP_UNLOCK(isp); 435 break; 436 437 case ISP_FC_LIP: 438 chan = *(intptr_t *)addr; 439 if (chan < 0 || chan >= isp->isp_nchan) { 440 retval = -ENXIO; 441 break; 442 } 443 ISP_LOCK(isp); 444 if (isp_control(isp, ISPCTL_SEND_LIP, chan)) { 445 retval = EIO; 446 } else { 447 retval = 0; 448 } 449 ISP_UNLOCK(isp); 450 break; 451 case ISP_FC_GETDINFO: 452 { 453 struct isp_fc_device *ifc = (struct isp_fc_device *) addr; 454 fcportdb_t *lp; 455 456 if (ifc->loopid >= MAX_FC_TARG) { 457 retval = EINVAL; 458 break; 459 } 460 lp = &FCPARAM(isp, ifc->chan)->portdb[ifc->loopid]; 461 if (lp->state != FC_PORTDB_STATE_NIL) { 462 ifc->role = (lp->prli_word3 & SVC3_ROLE_MASK) >> SVC3_ROLE_SHIFT; 463 ifc->loopid = lp->handle; 464 ifc->portid = lp->portid; 465 ifc->node_wwn = lp->node_wwn; 466 ifc->port_wwn = lp->port_wwn; 467 retval = 0; 468 } else { 469 retval = ENODEV; 470 } 471 break; 472 } 473 case ISP_FC_GETHINFO: 474 { 475 struct isp_hba_device *hba = (struct isp_hba_device *) addr; 476 int chan = hba->fc_channel; 477 478 if (chan < 0 || chan >= isp->isp_nchan) { 479 retval = ENXIO; 480 break; 481 } 482 hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev); 483 hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev); 484 hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev); 485 hba->fc_nchannels = isp->isp_nchan; 486 hba->fc_nports = MAX_FC_TARG; 487 hba->fc_speed = FCPARAM(isp, hba->fc_channel)->isp_gbspeed; 488 hba->fc_topology = FCPARAM(isp, chan)->isp_topo + 1; 489 hba->fc_loopid = FCPARAM(isp, chan)->isp_loopid; 490 hba->nvram_node_wwn = FCPARAM(isp, chan)->isp_wwnn_nvram; 491 hba->nvram_port_wwn = FCPARAM(isp, chan)->isp_wwpn_nvram; 492 hba->active_node_wwn = FCPARAM(isp, chan)->isp_wwnn; 493 hba->active_port_wwn = FCPARAM(isp, chan)->isp_wwpn; 494 retval = 0; 495 break; 496 } 497 case ISP_TSK_MGMT: 498 { 499 int needmarker; 500 struct isp_fc_tsk_mgmt *fct = (struct isp_fc_tsk_mgmt *) addr; 501 uint16_t nphdl; 502 isp24xx_tmf_t tmf; 503 isp24xx_statusreq_t sp; 504 fcparam *fcp; 505 fcportdb_t *lp; 506 int i; 507 508 chan = fct->chan; 509 if (chan < 0 || chan >= isp->isp_nchan) { 510 retval = -ENXIO; 511 break; 512 } 513 514 needmarker = retval = 0; 515 nphdl = fct->loopid; 516 ISP_LOCK(isp); 517 fcp = FCPARAM(isp, chan); 518 519 for (i = 0; i < MAX_FC_TARG; i++) { 520 lp = &fcp->portdb[i]; 521 if (lp->handle == nphdl) { 522 break; 523 } 524 } 525 if (i == MAX_FC_TARG) { 526 retval = ENXIO; 527 ISP_UNLOCK(isp); 528 break; 529 } 530 ISP_MEMZERO(&tmf, sizeof(tmf)); 531 tmf.tmf_header.rqs_entry_type = RQSTYPE_TSK_MGMT; 532 tmf.tmf_header.rqs_entry_count = 1; 533 tmf.tmf_nphdl = lp->handle; 534 tmf.tmf_delay = 2; 535 tmf.tmf_timeout = 4; 536 tmf.tmf_tidlo = lp->portid; 537 tmf.tmf_tidhi = lp->portid >> 16; 538 tmf.tmf_vpidx = ISP_GET_VPIDX(isp, chan); 539 tmf.tmf_lun[1] = fct->lun & 0xff; 540 if (fct->lun >= 256) { 541 tmf.tmf_lun[0] = 0x40 | (fct->lun >> 8); 542 } 543 switch (fct->action) { 544 case IPT_CLEAR_ACA: 545 tmf.tmf_flags = ISP24XX_TMF_CLEAR_ACA; 546 break; 547 case IPT_TARGET_RESET: 548 tmf.tmf_flags = ISP24XX_TMF_TARGET_RESET; 549 needmarker = 1; 550 break; 551 case IPT_LUN_RESET: 552 tmf.tmf_flags = ISP24XX_TMF_LUN_RESET; 553 needmarker = 1; 554 break; 555 case IPT_CLEAR_TASK_SET: 556 tmf.tmf_flags = ISP24XX_TMF_CLEAR_TASK_SET; 557 needmarker = 1; 558 break; 559 case IPT_ABORT_TASK_SET: 560 tmf.tmf_flags = ISP24XX_TMF_ABORT_TASK_SET; 561 needmarker = 1; 562 break; 563 default: 564 retval = EINVAL; 565 break; 566 } 567 if (retval) { 568 ISP_UNLOCK(isp); 569 break; 570 } 571 572 retval = isp_exec_entry_queue(isp, &tmf, &sp, 5); 573 if (retval != 0) { 574 isp_prt(isp, ISP_LOGERR, "%s: TMF of chan %d error %d", 575 __func__, chan, retval); 576 ISP_UNLOCK(isp); 577 break; 578 } 579 580 if (sp.req_completion_status != 0) 581 retval = EIO; 582 else if (needmarker) 583 fcp->sendmarker = 1; 584 ISP_UNLOCK(isp); 585 break; 586 } 587 default: 588 break; 589 } 590 return (retval); 591 } 592 593 /* 594 * Local Inlines 595 */ 596 597 static ISP_INLINE int isp_get_pcmd(ispsoftc_t *, union ccb *); 598 static ISP_INLINE void isp_free_pcmd(ispsoftc_t *, union ccb *); 599 600 static ISP_INLINE int 601 isp_get_pcmd(ispsoftc_t *isp, union ccb *ccb) 602 { 603 ISP_PCMD(ccb) = isp->isp_osinfo.pcmd_free; 604 if (ISP_PCMD(ccb) == NULL) { 605 return (-1); 606 } 607 isp->isp_osinfo.pcmd_free = ((struct isp_pcmd *)ISP_PCMD(ccb))->next; 608 return (0); 609 } 610 611 static ISP_INLINE void 612 isp_free_pcmd(ispsoftc_t *isp, union ccb *ccb) 613 { 614 if (ISP_PCMD(ccb)) { 615 #ifdef ISP_TARGET_MODE 616 PISP_PCMD(ccb)->datalen = 0; 617 #endif 618 PISP_PCMD(ccb)->next = isp->isp_osinfo.pcmd_free; 619 isp->isp_osinfo.pcmd_free = ISP_PCMD(ccb); 620 ISP_PCMD(ccb) = NULL; 621 } 622 } 623 624 /* 625 * Put the target mode functions here, because some are inlines 626 */ 627 #ifdef ISP_TARGET_MODE 628 static ISP_INLINE tstate_t *get_lun_statep(ispsoftc_t *, int, lun_id_t); 629 static atio_private_data_t *isp_get_atpd(ispsoftc_t *, int, uint32_t, void *); 630 static atio_private_data_t *isp_find_atpd(ispsoftc_t *, int, uint32_t); 631 static atio_private_data_t *isp_find_atpd_ccb(ispsoftc_t *, int, uint32_t, void *); 632 static void isp_put_atpd(ispsoftc_t *, int, atio_private_data_t *); 633 static inot_private_data_t *isp_get_ntpd(ispsoftc_t *, int); 634 static inot_private_data_t *isp_find_ntpd(ispsoftc_t *, int, uint32_t, uint32_t); 635 static void isp_put_ntpd(ispsoftc_t *, int, inot_private_data_t *); 636 static tstate_t *create_lun_state(ispsoftc_t *, int, struct cam_path *); 637 static void destroy_lun_state(ispsoftc_t *, int, tstate_t *); 638 static void isp_enable_lun(ispsoftc_t *, union ccb *); 639 static void isp_disable_lun(ispsoftc_t *, union ccb *); 640 static callout_func_t isp_refire_notify_ack; 641 static void isp_complete_ctio(ispsoftc_t *isp, union ccb *); 642 enum Start_Ctio_How { FROM_CAM, FROM_TIMER, FROM_SRR, FROM_CTIO_DONE }; 643 static void isp_target_start_ctio(ispsoftc_t *, union ccb *, enum Start_Ctio_How); 644 static void isp_handle_platform_atio7(ispsoftc_t *, at7_entry_t *); 645 static void isp_handle_platform_ctio(ispsoftc_t *, ct7_entry_t *); 646 static int isp_handle_platform_target_notify_ack(ispsoftc_t *, isp_notify_t *, uint32_t rsp); 647 static void isp_handle_platform_target_tmf(ispsoftc_t *, isp_notify_t *); 648 static void isp_target_mark_aborted_early(ispsoftc_t *, int chan, tstate_t *, uint32_t); 649 650 static ISP_INLINE tstate_t * 651 get_lun_statep(ispsoftc_t *isp, int bus, lun_id_t lun) 652 { 653 struct isp_fc *fc = ISP_FC_PC(isp, bus); 654 tstate_t *tptr; 655 656 SLIST_FOREACH(tptr, &fc->lun_hash[LUN_HASH_FUNC(lun)], next) { 657 if (tptr->ts_lun == lun) 658 return (tptr); 659 } 660 return (NULL); 661 } 662 663 static int 664 isp_atio_restart(ispsoftc_t *isp, int bus, tstate_t *tptr) 665 { 666 inot_private_data_t *ntp; 667 struct ntpdlist rq; 668 669 if (STAILQ_EMPTY(&tptr->restart_queue)) 670 return (0); 671 STAILQ_INIT(&rq); 672 STAILQ_CONCAT(&rq, &tptr->restart_queue); 673 while ((ntp = STAILQ_FIRST(&rq)) != NULL) { 674 STAILQ_REMOVE_HEAD(&rq, next); 675 isp_prt(isp, ISP_LOGTDEBUG0, 676 "%s: restarting resrc deprived %x", __func__, 677 ((at7_entry_t *)ntp->data)->at_rxid); 678 isp_handle_platform_atio7(isp, (at7_entry_t *) ntp->data); 679 isp_put_ntpd(isp, bus, ntp); 680 if (!STAILQ_EMPTY(&tptr->restart_queue)) 681 break; 682 } 683 if (!STAILQ_EMPTY(&rq)) { 684 STAILQ_CONCAT(&rq, &tptr->restart_queue); 685 STAILQ_CONCAT(&tptr->restart_queue, &rq); 686 } 687 return (!STAILQ_EMPTY(&tptr->restart_queue)); 688 } 689 690 static void 691 isp_tmcmd_restart(ispsoftc_t *isp) 692 { 693 struct isp_fc *fc; 694 tstate_t *tptr; 695 union ccb *ccb; 696 int bus, i; 697 698 for (bus = 0; bus < isp->isp_nchan; bus++) { 699 fc = ISP_FC_PC(isp, bus); 700 for (i = 0; i < LUN_HASH_SIZE; i++) { 701 SLIST_FOREACH(tptr, &fc->lun_hash[i], next) 702 isp_atio_restart(isp, bus, tptr); 703 } 704 705 /* 706 * We only need to do this once per channel. 707 */ 708 ccb = (union ccb *)TAILQ_FIRST(&fc->waitq); 709 if (ccb != NULL) { 710 TAILQ_REMOVE(&fc->waitq, &ccb->ccb_h, sim_links.tqe); 711 isp_target_start_ctio(isp, ccb, FROM_TIMER); 712 } 713 } 714 isp_rq_check_above(isp); 715 isp_rq_check_below(isp); 716 } 717 718 static atio_private_data_t * 719 isp_get_atpd(ispsoftc_t *isp, int chan, uint32_t tag, void *ccb) 720 { 721 struct isp_fc *fc = ISP_FC_PC(isp, chan); 722 atio_private_data_t *atp; 723 724 atp = LIST_FIRST(&fc->atfree); 725 if (atp) { 726 LIST_REMOVE(atp, next); 727 atp->ccb = ccb; 728 atp->tag = tag; 729 LIST_INSERT_HEAD(&fc->atused[ATPDPHASH(tag)], atp, next); 730 } 731 return (atp); 732 } 733 734 static atio_private_data_t * 735 isp_find_atpd(ispsoftc_t *isp, int chan, uint32_t tag) 736 { 737 struct isp_fc *fc = ISP_FC_PC(isp, chan); 738 atio_private_data_t *atp; 739 740 LIST_FOREACH(atp, &fc->atused[ATPDPHASH(tag)], next) { 741 if (atp->tag == tag) 742 return (atp); 743 } 744 return (NULL); 745 } 746 747 /* 748 * Similar to above, but in addition to tag searches for opaque CCB pointer, 749 * It can be used in situations when the tag alone may already be reused. 750 */ 751 static atio_private_data_t * 752 isp_find_atpd_ccb(ispsoftc_t *isp, int chan, uint32_t tag, void *ccb) 753 { 754 struct isp_fc *fc = ISP_FC_PC(isp, chan); 755 atio_private_data_t *atp; 756 757 LIST_FOREACH(atp, &fc->atused[ATPDPHASH(tag)], next) { 758 if (atp->tag == tag && atp->ccb == ccb) 759 return (atp); 760 } 761 return (NULL); 762 } 763 764 static void 765 isp_put_atpd(ispsoftc_t *isp, int chan, atio_private_data_t *atp) 766 { 767 struct isp_fc *fc = ISP_FC_PC(isp, chan); 768 769 if (atp->ests) 770 isp_put_ecmd(isp, atp->ests); 771 LIST_REMOVE(atp, next); 772 memset(atp, 0, sizeof (*atp)); 773 LIST_INSERT_HEAD(&fc->atfree, atp, next); 774 } 775 776 static void 777 isp_dump_atpd(ispsoftc_t *isp, int chan) 778 { 779 struct isp_fc *fc = ISP_FC_PC(isp, chan); 780 atio_private_data_t *atp; 781 const char *states[8] = { "Free", "ATIO", "CAM", "CTIO", "LAST_CTIO", "PDON", "?6", "7" }; 782 783 for (atp = fc->atpool; atp < &fc->atpool[ATPDPSIZE]; atp++) { 784 if (atp->state == ATPD_STATE_FREE) 785 continue; 786 isp_prt(isp, ISP_LOGALL, "Chan %d ATP [0x%x] origdlen %u bytes_xfrd %u lun %jx nphdl 0x%04x s_id 0x%06x d_id 0x%06x oxid 0x%04x state %s", 787 chan, atp->tag, atp->orig_datalen, atp->bytes_xfered, (uintmax_t)atp->lun, atp->nphdl, atp->sid, atp->did, atp->oxid, states[atp->state & 0x7]); 788 } 789 } 790 791 static inot_private_data_t * 792 isp_get_ntpd(ispsoftc_t *isp, int chan) 793 { 794 struct isp_fc *fc = ISP_FC_PC(isp, chan); 795 inot_private_data_t *ntp; 796 797 ntp = STAILQ_FIRST(&fc->ntfree); 798 if (ntp) 799 STAILQ_REMOVE_HEAD(&fc->ntfree, next); 800 return (ntp); 801 } 802 803 static inot_private_data_t * 804 isp_find_ntpd(ispsoftc_t *isp, int chan, uint32_t tag_id, uint32_t seq_id) 805 { 806 struct isp_fc *fc = ISP_FC_PC(isp, chan); 807 inot_private_data_t *ntp; 808 809 for (ntp = fc->ntpool; ntp < &fc->ntpool[ATPDPSIZE]; ntp++) { 810 if (ntp->tag_id == tag_id && ntp->seq_id == seq_id) 811 return (ntp); 812 } 813 return (NULL); 814 } 815 816 static void 817 isp_put_ntpd(ispsoftc_t *isp, int chan, inot_private_data_t *ntp) 818 { 819 struct isp_fc *fc = ISP_FC_PC(isp, chan); 820 821 ntp->tag_id = ntp->seq_id = 0; 822 STAILQ_INSERT_HEAD(&fc->ntfree, ntp, next); 823 } 824 825 tstate_t * 826 create_lun_state(ispsoftc_t *isp, int bus, struct cam_path *path) 827 { 828 struct isp_fc *fc = ISP_FC_PC(isp, bus); 829 lun_id_t lun; 830 tstate_t *tptr; 831 832 lun = xpt_path_lun_id(path); 833 tptr = malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO); 834 if (tptr == NULL) 835 return (NULL); 836 tptr->ts_lun = lun; 837 SLIST_INIT(&tptr->atios); 838 SLIST_INIT(&tptr->inots); 839 STAILQ_INIT(&tptr->restart_queue); 840 SLIST_INSERT_HEAD(&fc->lun_hash[LUN_HASH_FUNC(lun)], tptr, next); 841 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, path, "created tstate\n"); 842 return (tptr); 843 } 844 845 static void 846 destroy_lun_state(ispsoftc_t *isp, int bus, tstate_t *tptr) 847 { 848 struct isp_fc *fc = ISP_FC_PC(isp, bus); 849 union ccb *ccb; 850 inot_private_data_t *ntp; 851 852 while ((ccb = (union ccb *)SLIST_FIRST(&tptr->atios)) != NULL) { 853 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 854 ccb->ccb_h.status = CAM_REQ_ABORTED; 855 xpt_done(ccb); 856 }; 857 while ((ccb = (union ccb *)SLIST_FIRST(&tptr->inots)) != NULL) { 858 SLIST_REMOVE_HEAD(&tptr->inots, sim_links.sle); 859 ccb->ccb_h.status = CAM_REQ_ABORTED; 860 xpt_done(ccb); 861 } 862 while ((ntp = STAILQ_FIRST(&tptr->restart_queue)) != NULL) { 863 isp_endcmd(isp, ntp->data, NIL_HANDLE, bus, SCSI_STATUS_BUSY, 0); 864 STAILQ_REMOVE_HEAD(&tptr->restart_queue, next); 865 isp_put_ntpd(isp, bus, ntp); 866 } 867 SLIST_REMOVE(&fc->lun_hash[LUN_HASH_FUNC(tptr->ts_lun)], tptr, tstate, next); 868 free(tptr, M_DEVBUF); 869 } 870 871 static void 872 isp_enable_lun(ispsoftc_t *isp, union ccb *ccb) 873 { 874 tstate_t *tptr; 875 int bus = XS_CHANNEL(ccb); 876 target_id_t target = ccb->ccb_h.target_id; 877 lun_id_t lun = ccb->ccb_h.target_lun; 878 879 /* 880 * We only support either target and lun both wildcard 881 * or target and lun both non-wildcard. 882 */ 883 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0|ISP_LOGCONFIG, ccb->ccb_h.path, 884 "enabling lun %jx\n", (uintmax_t)lun); 885 if ((target == CAM_TARGET_WILDCARD) != (lun == CAM_LUN_WILDCARD)) { 886 ccb->ccb_h.status = CAM_LUN_INVALID; 887 xpt_done(ccb); 888 return; 889 } 890 891 /* Create the state pointer. It should not already exist. */ 892 tptr = get_lun_statep(isp, bus, lun); 893 if (tptr) { 894 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 895 xpt_done(ccb); 896 return; 897 } 898 tptr = create_lun_state(isp, bus, ccb->ccb_h.path); 899 if (tptr == NULL) { 900 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 901 xpt_done(ccb); 902 return; 903 } 904 905 ccb->ccb_h.status = CAM_REQ_CMP; 906 xpt_done(ccb); 907 } 908 909 static void 910 isp_disable_lun(ispsoftc_t *isp, union ccb *ccb) 911 { 912 tstate_t *tptr; 913 int bus = XS_CHANNEL(ccb); 914 target_id_t target = ccb->ccb_h.target_id; 915 lun_id_t lun = ccb->ccb_h.target_lun; 916 917 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0|ISP_LOGCONFIG, ccb->ccb_h.path, 918 "disabling lun %jx\n", (uintmax_t)lun); 919 if ((target == CAM_TARGET_WILDCARD) != (lun == CAM_LUN_WILDCARD)) { 920 ccb->ccb_h.status = CAM_LUN_INVALID; 921 xpt_done(ccb); 922 return; 923 } 924 925 /* Find the state pointer. */ 926 if ((tptr = get_lun_statep(isp, bus, lun)) == NULL) { 927 ccb->ccb_h.status = CAM_PATH_INVALID; 928 xpt_done(ccb); 929 return; 930 } 931 932 destroy_lun_state(isp, bus, tptr); 933 ccb->ccb_h.status = CAM_REQ_CMP; 934 xpt_done(ccb); 935 } 936 937 static void 938 isp_target_start_ctio(ispsoftc_t *isp, union ccb *ccb, enum Start_Ctio_How how) 939 { 940 int fctape, sendstatus, resid; 941 fcparam *fcp; 942 atio_private_data_t *atp; 943 struct ccb_scsiio *cso; 944 struct isp_ccbq *waitq; 945 uint32_t dmaresult, handle, xfrlen, sense_length, tmp; 946 ct7_entry_t local, *cto = &local; 947 948 isp_prt(isp, ISP_LOGTDEBUG0, "%s: ENTRY[0x%x] how %u xfrlen %u sendstatus %d sense_len %u", __func__, ccb->csio.tag_id, how, ccb->csio.dxfer_len, 949 (ccb->ccb_h.flags & CAM_SEND_STATUS) != 0, ((ccb->ccb_h.flags & CAM_SEND_SENSE)? ccb->csio.sense_len : 0)); 950 951 waitq = &ISP_FC_PC(isp, XS_CHANNEL(ccb))->waitq; 952 switch (how) { 953 case FROM_CAM: 954 /* 955 * Insert at the tail of the list, if any, waiting CTIO CCBs 956 */ 957 TAILQ_INSERT_TAIL(waitq, &ccb->ccb_h, sim_links.tqe); 958 break; 959 case FROM_TIMER: 960 case FROM_SRR: 961 case FROM_CTIO_DONE: 962 TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe); 963 break; 964 } 965 966 while ((ccb = (union ccb *) TAILQ_FIRST(waitq)) != NULL) { 967 TAILQ_REMOVE(waitq, &ccb->ccb_h, sim_links.tqe); 968 969 cso = &ccb->csio; 970 xfrlen = cso->dxfer_len; 971 if (xfrlen == 0) { 972 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { 973 ISP_PATH_PRT(isp, ISP_LOGERR, ccb->ccb_h.path, "a data transfer length of zero but no status to send is wrong\n"); 974 ccb->ccb_h.status = CAM_REQ_INVALID; 975 xpt_done(ccb); 976 continue; 977 } 978 } 979 980 atp = isp_find_atpd(isp, XS_CHANNEL(ccb), cso->tag_id); 981 if (atp == NULL) { 982 isp_prt(isp, ISP_LOGERR, "%s: [0x%x] cannot find private data adjunct in %s", __func__, cso->tag_id, __func__); 983 isp_dump_atpd(isp, XS_CHANNEL(ccb)); 984 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 985 xpt_done(ccb); 986 continue; 987 } 988 989 /* 990 * Check to make sure we're still in target mode. 991 */ 992 fcp = FCPARAM(isp, XS_CHANNEL(ccb)); 993 if ((fcp->role & ISP_ROLE_TARGET) == 0) { 994 isp_prt(isp, ISP_LOGERR, "%s: [0x%x] stopping sending a CTIO because we're no longer in target mode", __func__, cso->tag_id); 995 ccb->ccb_h.status = CAM_PROVIDE_FAIL; 996 xpt_done(ccb); 997 continue; 998 } 999 1000 /* 1001 * We're only handling ATPD_CCB_OUTSTANDING outstanding CCB at a time (one of which 1002 * could be split into two CTIOs to split data and status). 1003 */ 1004 if (atp->ctcnt >= ATPD_CCB_OUTSTANDING) { 1005 isp_prt(isp, ISP_LOGTINFO, "[0x%x] handling only %d CCBs at a time (flags for this ccb: 0x%x)", cso->tag_id, ATPD_CCB_OUTSTANDING, ccb->ccb_h.flags); 1006 TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe); 1007 break; 1008 } 1009 1010 /* 1011 * Does the initiator expect FC-Tape style responses? 1012 */ 1013 if ((atp->word3 & PRLI_WD3_RETRY) && fcp->fctape_enabled) { 1014 fctape = 1; 1015 } else { 1016 fctape = 0; 1017 } 1018 1019 /* 1020 * If we already did the data xfer portion of a CTIO that sends data 1021 * and status, don't do it again and do the status portion now. 1022 */ 1023 if (atp->sendst) { 1024 isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] now sending synthesized status orig_dl=%u xfered=%u bit=%u", 1025 cso->tag_id, atp->orig_datalen, atp->bytes_xfered, atp->bytes_in_transit); 1026 xfrlen = 0; /* we already did the data transfer */ 1027 atp->sendst = 0; 1028 } 1029 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1030 sendstatus = 1; 1031 } else { 1032 sendstatus = 0; 1033 } 1034 1035 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 1036 KASSERT((sendstatus != 0), ("how can you have CAM_SEND_SENSE w/o CAM_SEND_STATUS?")); 1037 /* 1038 * Sense length is not the entire sense data structure size. Periph 1039 * drivers don't seem to be setting sense_len to reflect the actual 1040 * size. We'll peek inside to get the right amount. 1041 */ 1042 sense_length = cso->sense_len; 1043 1044 /* 1045 * This 'cannot' happen 1046 */ 1047 if (sense_length > (XCMD_SIZE - MIN_FCP_RESPONSE_SIZE)) { 1048 sense_length = XCMD_SIZE - MIN_FCP_RESPONSE_SIZE; 1049 } 1050 } else { 1051 sense_length = 0; 1052 } 1053 1054 /* 1055 * Check for overflow 1056 */ 1057 tmp = atp->bytes_xfered + atp->bytes_in_transit; 1058 if (xfrlen > 0 && tmp > atp->orig_datalen) { 1059 isp_prt(isp, ISP_LOGERR, 1060 "%s: [0x%x] data overflow by %u bytes", __func__, 1061 cso->tag_id, tmp + xfrlen - atp->orig_datalen); 1062 ccb->ccb_h.status = CAM_DATA_RUN_ERR; 1063 xpt_done(ccb); 1064 continue; 1065 } 1066 if (xfrlen > atp->orig_datalen - tmp) { 1067 xfrlen = atp->orig_datalen - tmp; 1068 if (xfrlen == 0 && !sendstatus) { 1069 cso->resid = cso->dxfer_len; 1070 ccb->ccb_h.status = CAM_REQ_CMP; 1071 xpt_done(ccb); 1072 continue; 1073 } 1074 } 1075 1076 memset(cto, 0, QENTRY_LEN); 1077 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7; 1078 cto->ct_header.rqs_entry_count = 1; 1079 cto->ct_header.rqs_seqno |= ATPD_SEQ_NOTIFY_CAM; 1080 ATPD_SET_SEQNO(cto, atp); 1081 cto->ct_nphdl = atp->nphdl; 1082 cto->ct_rxid = atp->tag; 1083 cto->ct_iid_lo = atp->sid; 1084 cto->ct_iid_hi = atp->sid >> 16; 1085 cto->ct_oxid = atp->oxid; 1086 cto->ct_vpidx = ISP_GET_VPIDX(isp, XS_CHANNEL(ccb)); 1087 cto->ct_timeout = XS_TIME(ccb); 1088 cto->ct_flags = atp->tattr << CT7_TASK_ATTR_SHIFT; 1089 1090 /* 1091 * Mode 1, status, no data. Only possible when we are sending status, have 1092 * no data to transfer, and any sense data can fit into a ct7_entry_t. 1093 * 1094 * Mode 2, status, no data. We have to use this in the case that 1095 * the sense data won't fit into a ct7_entry_t. 1096 * 1097 */ 1098 if (sendstatus && xfrlen == 0) { 1099 cto->ct_flags |= CT7_SENDSTATUS | CT7_NO_DATA; 1100 resid = atp->orig_datalen - atp->bytes_xfered - atp->bytes_in_transit; 1101 if (sense_length <= MAXRESPLEN_24XX) { 1102 cto->ct_flags |= CT7_FLAG_MODE1; 1103 cto->ct_scsi_status = cso->scsi_status; 1104 if (resid < 0) { 1105 cto->ct_resid = -resid; 1106 cto->ct_scsi_status |= (FCP_RESID_OVERFLOW << 8); 1107 } else if (resid > 0) { 1108 cto->ct_resid = resid; 1109 cto->ct_scsi_status |= (FCP_RESID_UNDERFLOW << 8); 1110 } 1111 if (fctape) { 1112 cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF; 1113 } 1114 if (sense_length) { 1115 cto->ct_scsi_status |= (FCP_SNSLEN_VALID << 8); 1116 cto->rsp.m1.ct_resplen = cto->ct_senselen = sense_length; 1117 memcpy(cto->rsp.m1.ct_resp, &cso->sense_data, sense_length); 1118 } 1119 } else { 1120 bus_addr_t addr; 1121 fcp_rsp_iu_t rp; 1122 1123 if (atp->ests == NULL) { 1124 atp->ests = isp_get_ecmd(isp); 1125 if (atp->ests == NULL) { 1126 TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe); 1127 break; 1128 } 1129 } 1130 memset(&rp, 0, sizeof(rp)); 1131 if (fctape) { 1132 cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF; 1133 rp.fcp_rsp_bits |= FCP_CONF_REQ; 1134 } 1135 cto->ct_flags |= CT7_FLAG_MODE2; 1136 rp.fcp_rsp_scsi_status = cso->scsi_status; 1137 if (resid < 0) { 1138 rp.fcp_rsp_resid = -resid; 1139 rp.fcp_rsp_bits |= FCP_RESID_OVERFLOW; 1140 } else if (resid > 0) { 1141 rp.fcp_rsp_resid = resid; 1142 rp.fcp_rsp_bits |= FCP_RESID_UNDERFLOW; 1143 } 1144 if (sense_length) { 1145 rp.fcp_rsp_snslen = sense_length; 1146 cto->ct_senselen = sense_length; 1147 rp.fcp_rsp_bits |= FCP_SNSLEN_VALID; 1148 isp_put_fcp_rsp_iu(isp, &rp, atp->ests); 1149 memcpy(((fcp_rsp_iu_t *)atp->ests)->fcp_rsp_extra, &cso->sense_data, sense_length); 1150 } else { 1151 isp_put_fcp_rsp_iu(isp, &rp, atp->ests); 1152 } 1153 if (isp->isp_dblev & ISP_LOGTDEBUG1) { 1154 isp_print_bytes(isp, "FCP Response Frame After Swizzling", MIN_FCP_RESPONSE_SIZE + sense_length, atp->ests); 1155 } 1156 bus_dmamap_sync(isp->isp_osinfo.ecmd_dmat, isp->isp_osinfo.ecmd_map, BUS_DMASYNC_PREWRITE); 1157 addr = isp->isp_osinfo.ecmd_dma; 1158 addr += ((((isp_ecmd_t *)atp->ests) - isp->isp_osinfo.ecmd_base) * XCMD_SIZE); 1159 isp_prt(isp, ISP_LOGTDEBUG0, "%s: ests base %p vaddr %p ecmd_dma %jx addr %jx len %u", __func__, isp->isp_osinfo.ecmd_base, atp->ests, 1160 (uintmax_t) isp->isp_osinfo.ecmd_dma, (uintmax_t)addr, MIN_FCP_RESPONSE_SIZE + sense_length); 1161 cto->rsp.m2.ct_datalen = MIN_FCP_RESPONSE_SIZE + sense_length; 1162 cto->rsp.m2.ct_fcp_rsp_iudata.ds_base = DMA_LO32(addr); 1163 cto->rsp.m2.ct_fcp_rsp_iudata.ds_basehi = DMA_HI32(addr); 1164 cto->rsp.m2.ct_fcp_rsp_iudata.ds_count = MIN_FCP_RESPONSE_SIZE + sense_length; 1165 } 1166 if (sense_length) { 1167 isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d slen %u sense: %x %x/%x/%x", __func__, 1168 cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, cto->ct_resid, sense_length, 1169 cso->sense_data.error_code, cso->sense_data.sense_buf[1], cso->sense_data.sense_buf[11], cso->sense_data.sense_buf[12]); 1170 } else { 1171 isp_prt(isp, ISP_LOGDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d", __func__, 1172 cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, cto->ct_resid); 1173 } 1174 atp->state = ATPD_STATE_LAST_CTIO; 1175 } 1176 1177 /* 1178 * Mode 0 data transfers, *possibly* with status. 1179 */ 1180 if (xfrlen != 0) { 1181 cto->ct_flags |= CT7_FLAG_MODE0; 1182 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1183 cto->ct_flags |= CT7_DATA_IN; 1184 } else { 1185 cto->ct_flags |= CT7_DATA_OUT; 1186 } 1187 1188 cto->rsp.m0.reloff = atp->bytes_xfered + atp->bytes_in_transit; 1189 cto->rsp.m0.ct_xfrlen = xfrlen; 1190 1191 #ifdef DEBUG 1192 if (ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame && xfrlen > ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame) { 1193 isp_prt(isp, ISP_LOGWARN, "%s: truncating data frame with xfrlen %d to %d", __func__, xfrlen, xfrlen - (xfrlen >> 2)); 1194 ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame = 0; 1195 cto->rsp.m0.ct_xfrlen -= xfrlen >> 2; 1196 } 1197 #endif 1198 if (sendstatus) { 1199 resid = atp->orig_datalen - atp->bytes_xfered - xfrlen; 1200 if (cso->scsi_status == SCSI_STATUS_OK && resid == 0 /* && fctape == 0 */) { 1201 cto->ct_flags |= CT7_SENDSTATUS; 1202 atp->state = ATPD_STATE_LAST_CTIO; 1203 if (fctape) { 1204 cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF; 1205 } 1206 } else { 1207 atp->sendst = 1; /* send status later */ 1208 cto->ct_header.rqs_seqno &= ~ATPD_SEQ_NOTIFY_CAM; 1209 atp->state = ATPD_STATE_CTIO; 1210 } 1211 } else { 1212 atp->state = ATPD_STATE_CTIO; 1213 } 1214 isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x xfrlen=%u off=%u", __func__, 1215 cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, xfrlen, atp->bytes_xfered); 1216 } 1217 1218 if (isp_get_pcmd(isp, ccb)) { 1219 ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "out of PCMDs\n"); 1220 TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe); 1221 break; 1222 } 1223 handle = isp_allocate_handle(isp, ccb, ISP_HANDLE_TARGET); 1224 if (handle == 0) { 1225 ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "No XFLIST pointers for %s\n", __func__); 1226 TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe); 1227 isp_free_pcmd(isp, ccb); 1228 break; 1229 } 1230 atp->bytes_in_transit += xfrlen; 1231 PISP_PCMD(ccb)->datalen = xfrlen; 1232 1233 /* 1234 * Call the dma setup routines for this entry (and any subsequent 1235 * CTIOs) if there's data to move, and then tell the f/w it's got 1236 * new things to play with. As with isp_start's usage of DMA setup, 1237 * any swizzling is done in the machine dependent layer. Because 1238 * of this, we put the request onto the queue area first in native 1239 * format. 1240 */ 1241 cto->ct_syshandle = handle; 1242 dmaresult = ISP_DMASETUP(isp, cso, cto); 1243 if (dmaresult != 0) { 1244 isp_destroy_handle(isp, handle); 1245 isp_free_pcmd(isp, ccb); 1246 if (dmaresult == CMD_EAGAIN) { 1247 TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe); 1248 break; 1249 } 1250 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1251 xpt_done(ccb); 1252 continue; 1253 } 1254 ccb->ccb_h.status = CAM_REQ_INPROG | CAM_SIM_QUEUED; 1255 if (xfrlen) { 1256 ccb->ccb_h.spriv_field0 = atp->bytes_xfered; 1257 } else { 1258 ccb->ccb_h.spriv_field0 = ~0; 1259 } 1260 atp->ctcnt++; 1261 atp->seqno++; 1262 } 1263 } 1264 1265 static void 1266 isp_refire_notify_ack(void *arg) 1267 { 1268 isp_tna_t *tp = arg; 1269 ispsoftc_t *isp = tp->isp; 1270 1271 ISP_ASSERT_LOCKED(isp); 1272 if (isp_notify_ack(isp, tp->not)) { 1273 callout_schedule(&tp->timer, 5); 1274 } else { 1275 free(tp, M_DEVBUF); 1276 } 1277 } 1278 1279 1280 static void 1281 isp_complete_ctio(ispsoftc_t *isp, union ccb *ccb) 1282 { 1283 1284 isp_rq_check_below(isp); 1285 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1286 xpt_done(ccb); 1287 } 1288 1289 static void 1290 isp_handle_platform_atio7(ispsoftc_t *isp, at7_entry_t *aep) 1291 { 1292 int cdbxlen; 1293 lun_id_t lun; 1294 uint16_t chan, nphdl = NIL_HANDLE; 1295 uint32_t did, sid; 1296 fcportdb_t *lp; 1297 tstate_t *tptr; 1298 struct ccb_accept_tio *atiop; 1299 atio_private_data_t *atp = NULL; 1300 atio_private_data_t *oatp; 1301 inot_private_data_t *ntp; 1302 1303 did = (aep->at_hdr.d_id[0] << 16) | (aep->at_hdr.d_id[1] << 8) | aep->at_hdr.d_id[2]; 1304 sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | aep->at_hdr.s_id[2]; 1305 lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(aep->at_cmnd.fcp_cmnd_lun)); 1306 1307 if (ISP_CAP_MULTI_ID(isp) && isp->isp_nchan > 1) { 1308 /* Channel has to be derived from D_ID */ 1309 isp_find_chan_by_did(isp, did, &chan); 1310 if (chan == ISP_NOCHAN) { 1311 isp_prt(isp, ISP_LOGWARN, 1312 "%s: [RX_ID 0x%x] D_ID %x not found on any channel", 1313 __func__, aep->at_rxid, did); 1314 isp_endcmd(isp, aep, NIL_HANDLE, ISP_NOCHAN, 1315 ECMD_TERMINATE, 0); 1316 return; 1317 } 1318 } else { 1319 chan = 0; 1320 } 1321 1322 /* 1323 * Find the PDB entry for this initiator 1324 */ 1325 if (isp_find_pdb_by_portid(isp, chan, sid, &lp) == 0) { 1326 /* 1327 * If we're not in the port database terminate the exchange. 1328 */ 1329 isp_prt(isp, ISP_LOGTINFO, "%s: [RX_ID 0x%x] D_ID 0x%06x found on Chan %d for S_ID 0x%06x wasn't in PDB already", 1330 __func__, aep->at_rxid, did, chan, sid); 1331 isp_dump_portdb(isp, chan); 1332 isp_endcmd(isp, aep, NIL_HANDLE, chan, ECMD_TERMINATE, 0); 1333 return; 1334 } 1335 nphdl = lp->handle; 1336 1337 /* 1338 * Get the tstate pointer 1339 */ 1340 tptr = get_lun_statep(isp, chan, lun); 1341 if (tptr == NULL) { 1342 tptr = get_lun_statep(isp, chan, CAM_LUN_WILDCARD); 1343 if (tptr == NULL) { 1344 isp_prt(isp, ISP_LOGWARN, 1345 "%s: [0x%x] no state pointer for lun %jx or wildcard", 1346 __func__, aep->at_rxid, (uintmax_t)lun); 1347 if (lun == 0) { 1348 isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_BUSY, 0); 1349 } else { 1350 isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_CHECK_COND | ECMD_SVALID | (0x5 << 12) | (0x25 << 16), 0); 1351 } 1352 return; 1353 } 1354 } 1355 1356 /* 1357 * Start any commands pending resources first. 1358 */ 1359 if (isp_atio_restart(isp, chan, tptr)) 1360 goto noresrc; 1361 1362 /* 1363 * If the f/w is out of resources, just send a BUSY status back. 1364 */ 1365 if (aep->at_rxid == AT7_NORESRC_RXID) { 1366 isp_endcmd(isp, aep, nphdl, chan, SCSI_BUSY, 0); 1367 return; 1368 } 1369 1370 /* 1371 * If we're out of resources, just send a BUSY status back. 1372 */ 1373 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1374 if (atiop == NULL) { 1375 isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] out of atios", aep->at_rxid); 1376 goto noresrc; 1377 } 1378 1379 oatp = isp_find_atpd(isp, chan, aep->at_rxid); 1380 if (oatp) { 1381 isp_prt(isp, oatp->state == ATPD_STATE_LAST_CTIO ? ISP_LOGTDEBUG0 : 1382 ISP_LOGWARN, "[0x%x] tag wraparound (N-Port Handle " 1383 "0x%04x S_ID 0x%04x OX_ID 0x%04x) oatp state %d", 1384 aep->at_rxid, nphdl, sid, aep->at_hdr.ox_id, oatp->state); 1385 /* 1386 * It's not a "no resource" condition- but we can treat it like one 1387 */ 1388 goto noresrc; 1389 } 1390 atp = isp_get_atpd(isp, chan, aep->at_rxid, atiop); 1391 if (atp == NULL) { 1392 isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] out of atps", aep->at_rxid); 1393 isp_endcmd(isp, aep, nphdl, chan, SCSI_BUSY, 0); 1394 return; 1395 } 1396 atp->word3 = lp->prli_word3; 1397 atp->state = ATPD_STATE_ATIO; 1398 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1399 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, atiop->ccb_h.path, "Take FREE ATIO\n"); 1400 atiop->init_id = FC_PORTDB_TGT(isp, chan, lp); 1401 atiop->ccb_h.target_id = ISP_MAX_TARGETS(isp); 1402 atiop->ccb_h.target_lun = lun; 1403 atiop->sense_len = 0; 1404 cdbxlen = aep->at_cmnd.fcp_cmnd_alen_datadir >> FCP_CMND_ADDTL_CDBLEN_SHIFT; 1405 if (cdbxlen) { 1406 isp_prt(isp, ISP_LOGWARN, "additional CDBLEN ignored"); 1407 } 1408 cdbxlen = sizeof (aep->at_cmnd.cdb_dl.sf.fcp_cmnd_cdb); 1409 ISP_MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cmnd.cdb_dl.sf.fcp_cmnd_cdb, cdbxlen); 1410 atiop->cdb_len = cdbxlen; 1411 atiop->ccb_h.status = CAM_CDB_RECVD; 1412 atiop->tag_id = atp->tag; 1413 switch (aep->at_cmnd.fcp_cmnd_task_attribute & FCP_CMND_TASK_ATTR_MASK) { 1414 case FCP_CMND_TASK_ATTR_SIMPLE: 1415 atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; 1416 atiop->tag_action = MSG_SIMPLE_TASK; 1417 break; 1418 case FCP_CMND_TASK_ATTR_HEAD: 1419 atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; 1420 atiop->tag_action = MSG_HEAD_OF_QUEUE_TASK; 1421 break; 1422 case FCP_CMND_TASK_ATTR_ORDERED: 1423 atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; 1424 atiop->tag_action = MSG_ORDERED_TASK; 1425 break; 1426 case FCP_CMND_TASK_ATTR_ACA: 1427 atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; 1428 atiop->tag_action = MSG_ACA_TASK; 1429 break; 1430 case FCP_CMND_TASK_ATTR_UNTAGGED: 1431 default: 1432 atiop->tag_action = 0; 1433 break; 1434 } 1435 atiop->priority = (aep->at_cmnd.fcp_cmnd_task_attribute & 1436 FCP_CMND_PRIO_MASK) >> FCP_CMND_PRIO_SHIFT; 1437 atp->orig_datalen = aep->at_cmnd.cdb_dl.sf.fcp_cmnd_dl; 1438 atp->bytes_xfered = 0; 1439 atp->lun = lun; 1440 atp->nphdl = nphdl; 1441 atp->sid = sid; 1442 atp->did = did; 1443 atp->oxid = aep->at_hdr.ox_id; 1444 atp->rxid = aep->at_hdr.rx_id; 1445 atp->cdb0 = atiop->cdb_io.cdb_bytes[0]; 1446 atp->tattr = aep->at_cmnd.fcp_cmnd_task_attribute & FCP_CMND_TASK_ATTR_MASK; 1447 atp->state = ATPD_STATE_CAM; 1448 isp_prt(isp, ISP_LOGTDEBUG0, "ATIO7[0x%x] CDB=0x%x lun %jx datalen %u", 1449 aep->at_rxid, atp->cdb0, (uintmax_t)lun, atp->orig_datalen); 1450 xpt_done((union ccb *)atiop); 1451 return; 1452 noresrc: 1453 KASSERT(atp == NULL, ("%s: atp is not NULL on noresrc!\n", __func__)); 1454 ntp = isp_get_ntpd(isp, chan); 1455 if (ntp == NULL) { 1456 isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_BUSY, 0); 1457 return; 1458 } 1459 memcpy(ntp->data, aep, QENTRY_LEN); 1460 STAILQ_INSERT_TAIL(&tptr->restart_queue, ntp, next); 1461 } 1462 1463 1464 /* 1465 * Handle starting an SRR (sequence retransmit request) 1466 * We get here when we've gotten the immediate notify 1467 * and the return of all outstanding CTIOs for this 1468 * transaction. 1469 */ 1470 static void 1471 isp_handle_srr_start(ispsoftc_t *isp, atio_private_data_t *atp) 1472 { 1473 in_fcentry_24xx_t *inot; 1474 uint32_t srr_off, ccb_off, ccb_len, ccb_end; 1475 union ccb *ccb; 1476 1477 inot = (in_fcentry_24xx_t *)atp->srr; 1478 srr_off = inot->in_srr_reloff_lo | (inot->in_srr_reloff_hi << 16); 1479 ccb = atp->srr_ccb; 1480 atp->srr_ccb = NULL; 1481 atp->nsrr++; 1482 if (ccb == NULL) { 1483 isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] null ccb", atp->tag); 1484 goto fail; 1485 } 1486 1487 ccb_off = ccb->ccb_h.spriv_field0; 1488 ccb_len = ccb->csio.dxfer_len; 1489 ccb_end = (ccb_off == ~0)? ~0 : ccb_off + ccb_len; 1490 1491 switch (inot->in_srr_iu) { 1492 case R_CTL_INFO_SOLICITED_DATA: 1493 /* 1494 * We have to restart a FCP_DATA data out transaction 1495 */ 1496 atp->sendst = 0; 1497 atp->bytes_xfered = srr_off; 1498 if (ccb_len == 0) { 1499 isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x but current CCB doesn't transfer data", atp->tag, srr_off); 1500 goto mdp; 1501 } 1502 if (srr_off < ccb_off || ccb_off > srr_off + ccb_len) { 1503 isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x not covered by current CCB data range [0x%x..0x%x]", atp->tag, srr_off, ccb_off, ccb_end); 1504 goto mdp; 1505 } 1506 isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x covered by current CCB data range [0x%x..0x%x]", atp->tag, srr_off, ccb_off, ccb_end); 1507 break; 1508 case R_CTL_INFO_COMMAND_STATUS: 1509 isp_prt(isp, ISP_LOGTINFO, "SRR[0x%x] Got an FCP RSP SRR- resending status", atp->tag); 1510 atp->sendst = 1; 1511 /* 1512 * We have to restart a FCP_RSP IU transaction 1513 */ 1514 break; 1515 case R_CTL_INFO_DATA_DESCRIPTOR: 1516 /* 1517 * We have to restart an FCP DATA in transaction 1518 */ 1519 isp_prt(isp, ISP_LOGWARN, "Got an FCP DATA IN SRR- dropping"); 1520 goto fail; 1521 1522 default: 1523 isp_prt(isp, ISP_LOGWARN, "Got an unknown information (%x) SRR- dropping", inot->in_srr_iu); 1524 goto fail; 1525 } 1526 1527 /* 1528 * We can't do anything until this is acked, so we might as well start it now. 1529 * We aren't going to do the usual asynchronous ack issue because we need 1530 * to make sure this gets on the wire first. 1531 */ 1532 if (isp_notify_ack(isp, inot)) { 1533 isp_prt(isp, ISP_LOGWARN, "could not push positive ack for SRR- you lose"); 1534 goto fail; 1535 } 1536 isp_target_start_ctio(isp, ccb, FROM_SRR); 1537 return; 1538 fail: 1539 inot->in_reserved = 1; 1540 isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot); 1541 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1542 ccb->ccb_h.status |= CAM_REQ_CMP_ERR; 1543 isp_complete_ctio(isp, ccb); 1544 return; 1545 mdp: 1546 if (isp_notify_ack(isp, inot)) { 1547 isp_prt(isp, ISP_LOGWARN, "could not push positive ack for SRR- you lose"); 1548 goto fail; 1549 } 1550 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1551 ccb->ccb_h.status |= CAM_MESSAGE_RECV; 1552 /* 1553 * This is not a strict interpretation of MDP, but it's close 1554 */ 1555 ccb->csio.msg_ptr = &ccb->csio.sense_data.sense_buf[SSD_FULL_SIZE - 16]; 1556 ccb->csio.msg_len = 7; 1557 ccb->csio.msg_ptr[0] = MSG_EXTENDED; 1558 ccb->csio.msg_ptr[1] = 5; 1559 ccb->csio.msg_ptr[2] = 0; /* modify data pointer */ 1560 ccb->csio.msg_ptr[3] = srr_off >> 24; 1561 ccb->csio.msg_ptr[4] = srr_off >> 16; 1562 ccb->csio.msg_ptr[5] = srr_off >> 8; 1563 ccb->csio.msg_ptr[6] = srr_off; 1564 isp_complete_ctio(isp, ccb); 1565 } 1566 1567 1568 static void 1569 isp_handle_platform_srr(ispsoftc_t *isp, isp_notify_t *notify) 1570 { 1571 in_fcentry_24xx_t *inot = notify->nt_lreserved; 1572 atio_private_data_t *atp; 1573 uint32_t tag = notify->nt_tagval & 0xffffffff; 1574 1575 atp = isp_find_atpd(isp, notify->nt_channel, tag); 1576 if (atp == NULL) { 1577 isp_prt(isp, ISP_LOGERR, "%s: cannot find adjunct for %x in SRR Notify", 1578 __func__, tag); 1579 isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot); 1580 return; 1581 } 1582 atp->srr_notify_rcvd = 1; 1583 memcpy(atp->srr, inot, sizeof (atp->srr)); 1584 isp_prt(isp, ISP_LOGTINFO, "SRR[0x%x] flags 0x%x srr_iu %x reloff 0x%x", 1585 inot->in_rxid, inot->in_flags, inot->in_srr_iu, 1586 ((uint32_t)inot->in_srr_reloff_hi << 16) | inot->in_srr_reloff_lo); 1587 if (atp->srr_ccb) 1588 isp_handle_srr_start(isp, atp); 1589 } 1590 1591 static void 1592 isp_handle_platform_ctio(ispsoftc_t *isp, ct7_entry_t *ct) 1593 { 1594 union ccb *ccb; 1595 int sentstatus = 0, ok = 0, notify_cam = 0, failure = 0; 1596 atio_private_data_t *atp = NULL; 1597 int bus; 1598 uint32_t handle, data_requested, resid; 1599 1600 handle = ct->ct_syshandle; 1601 ccb = isp_find_xs(isp, handle); 1602 if (ccb == NULL) { 1603 isp_print_bytes(isp, "null ccb in isp_handle_platform_ctio", QENTRY_LEN, ct); 1604 return; 1605 } 1606 isp_destroy_handle(isp, handle); 1607 resid = data_requested = PISP_PCMD(ccb)->datalen; 1608 isp_free_pcmd(isp, ccb); 1609 1610 bus = XS_CHANNEL(ccb); 1611 atp = isp_find_atpd(isp, bus, ct->ct_rxid); 1612 if (atp == NULL) { 1613 /* 1614 * XXX: isp_clear_commands() generates fake CTIO with zero 1615 * ct_rxid value, filling only ct_syshandle. Workaround 1616 * that using tag_id from the CCB, pointed by ct_syshandle. 1617 */ 1618 atp = isp_find_atpd(isp, bus, ccb->csio.tag_id); 1619 } 1620 if (atp == NULL) { 1621 isp_prt(isp, ISP_LOGERR, "%s: cannot find adjunct for %x after I/O", __func__, ccb->csio.tag_id); 1622 return; 1623 } 1624 KASSERT((atp->ctcnt > 0), ("ctio count not greater than zero")); 1625 atp->bytes_in_transit -= data_requested; 1626 atp->ctcnt -= 1; 1627 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1628 1629 if (ct->ct_nphdl == CT7_SRR) { 1630 atp->srr_ccb = ccb; 1631 if (atp->srr_notify_rcvd) 1632 isp_handle_srr_start(isp, atp); 1633 return; 1634 } 1635 if (ct->ct_nphdl == CT_HBA_RESET) { 1636 sentstatus = (ccb->ccb_h.flags & CAM_SEND_STATUS) && 1637 (atp->sendst == 0); 1638 failure = CAM_UNREC_HBA_ERROR; 1639 } else { 1640 sentstatus = ct->ct_flags & CT7_SENDSTATUS; 1641 ok = (ct->ct_nphdl == CT7_OK); 1642 notify_cam = (ct->ct_header.rqs_seqno & ATPD_SEQ_NOTIFY_CAM) != 0; 1643 if ((ct->ct_flags & CT7_DATAMASK) != CT7_NO_DATA) 1644 resid = ct->ct_resid; 1645 } 1646 isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, "%s: CTIO7[%x] seq %u nc %d sts 0x%x flg 0x%x sns %d resid %d %s", __func__, ct->ct_rxid, ATPD_GET_SEQNO(ct), 1647 notify_cam, ct->ct_nphdl, ct->ct_flags, (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, resid, sentstatus? "FIN" : "MID"); 1648 if (ok) { 1649 if (data_requested > 0) { 1650 atp->bytes_xfered += data_requested - resid; 1651 ccb->csio.resid = ccb->csio.dxfer_len - 1652 (data_requested - resid); 1653 } 1654 if (sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) 1655 ccb->ccb_h.status |= CAM_SENT_SENSE; 1656 ccb->ccb_h.status |= CAM_REQ_CMP; 1657 } else { 1658 notify_cam = 1; 1659 if (failure == CAM_UNREC_HBA_ERROR) 1660 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR; 1661 else 1662 ccb->ccb_h.status |= CAM_REQ_CMP_ERR; 1663 } 1664 atp->state = ATPD_STATE_PDON; 1665 1666 /* 1667 * We never *not* notify CAM when there has been any error (ok == 0), 1668 * so we never need to do an ATIO putback if we're not notifying CAM. 1669 */ 1670 isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done (ok=%d nc=%d nowsendstatus=%d ccb ss=%d)", 1671 (sentstatus)? " FINAL " : "MIDTERM ", atp->tag, ok, notify_cam, atp->sendst, (ccb->ccb_h.flags & CAM_SEND_STATUS) != 0); 1672 if (notify_cam == 0) { 1673 if (atp->sendst) { 1674 isp_target_start_ctio(isp, ccb, FROM_CTIO_DONE); 1675 } 1676 return; 1677 } 1678 1679 /* 1680 * We are done with this ATIO if we successfully sent status. 1681 * In all other cases expect either another CTIO or XPT_ABORT. 1682 */ 1683 if (ok && sentstatus) 1684 isp_put_atpd(isp, bus, atp); 1685 1686 /* 1687 * We're telling CAM we're done with this CTIO transaction. 1688 * 1689 * 24XX cards never need an ATIO put back. 1690 */ 1691 isp_complete_ctio(isp, ccb); 1692 } 1693 1694 static int 1695 isp_handle_platform_target_notify_ack(ispsoftc_t *isp, isp_notify_t *mp, uint32_t rsp) 1696 { 1697 ct7_entry_t local, *cto = &local; 1698 1699 if (isp->isp_state != ISP_RUNSTATE) { 1700 isp_prt(isp, ISP_LOGTINFO, "Notify Code 0x%x (qevalid=%d) acked- h/w not ready (dropping)", mp->nt_ncode, mp->nt_lreserved != NULL); 1701 return (0); 1702 } 1703 1704 /* 1705 * This case is for a Task Management Function, which shows up as an ATIO7 entry. 1706 */ 1707 if (mp->nt_lreserved && ((isphdr_t *)mp->nt_lreserved)->rqs_entry_type == RQSTYPE_ATIO) { 1708 at7_entry_t *aep = (at7_entry_t *)mp->nt_lreserved; 1709 fcportdb_t *lp; 1710 uint32_t sid; 1711 uint16_t nphdl; 1712 1713 sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | aep->at_hdr.s_id[2]; 1714 if (isp_find_pdb_by_portid(isp, mp->nt_channel, sid, &lp)) { 1715 nphdl = lp->handle; 1716 } else { 1717 nphdl = NIL_HANDLE; 1718 } 1719 ISP_MEMZERO(cto, sizeof (ct7_entry_t)); 1720 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7; 1721 cto->ct_header.rqs_entry_count = 1; 1722 cto->ct_nphdl = nphdl; 1723 cto->ct_rxid = aep->at_rxid; 1724 cto->ct_vpidx = mp->nt_channel; 1725 cto->ct_iid_lo = sid; 1726 cto->ct_iid_hi = sid >> 16; 1727 cto->ct_oxid = aep->at_hdr.ox_id; 1728 cto->ct_flags = CT7_SENDSTATUS|CT7_NOACK|CT7_NO_DATA|CT7_FLAG_MODE1; 1729 cto->ct_flags |= (aep->at_ta_len >> 12) << CT7_TASK_ATTR_SHIFT; 1730 if (rsp != 0) { 1731 cto->ct_scsi_status |= (FCP_RSPLEN_VALID << 8); 1732 cto->rsp.m1.ct_resplen = 4; 1733 ISP_MEMZERO(cto->rsp.m1.ct_resp, sizeof (cto->rsp.m1.ct_resp)); 1734 cto->rsp.m1.ct_resp[0] = rsp & 0xff; 1735 cto->rsp.m1.ct_resp[1] = (rsp >> 8) & 0xff; 1736 cto->rsp.m1.ct_resp[2] = (rsp >> 16) & 0xff; 1737 cto->rsp.m1.ct_resp[3] = (rsp >> 24) & 0xff; 1738 } 1739 return (isp_send_entry(isp, cto)); 1740 } 1741 1742 /* 1743 * This case is for a responding to an ABTS frame 1744 */ 1745 if (mp->nt_lreserved && ((isphdr_t *)mp->nt_lreserved)->rqs_entry_type == RQSTYPE_ABTS_RCVD) 1746 return (isp_acknak_abts(isp, mp->nt_lreserved, (rsp == 0) ? 0 : EINVAL)); 1747 1748 /* 1749 * General purpose acknowledgement 1750 */ 1751 if (mp->nt_need_ack) { 1752 isp_prt(isp, ISP_LOGTINFO, "Notify Code 0x%x (qevalid=%d) being acked", mp->nt_ncode, mp->nt_lreserved != NULL); 1753 /* 1754 * Don't need to use the guaranteed send because the caller can retry 1755 */ 1756 return (isp_notify_ack(isp, mp->nt_lreserved)); 1757 } 1758 return (0); 1759 } 1760 1761 /* 1762 * Handle task management functions. 1763 * 1764 * We show up here with a notify structure filled out. 1765 * 1766 * The nt_lreserved tag points to the original queue entry 1767 */ 1768 static void 1769 isp_handle_platform_target_tmf(ispsoftc_t *isp, isp_notify_t *notify) 1770 { 1771 tstate_t *tptr; 1772 fcportdb_t *lp; 1773 struct ccb_immediate_notify *inot; 1774 inot_private_data_t *ntp = NULL; 1775 atio_private_data_t *atp; 1776 lun_id_t lun; 1777 1778 isp_prt(isp, ISP_LOGTDEBUG0, "%s: code 0x%x sid 0x%x tagval 0x%016llx chan %d lun %jx", __func__, notify->nt_ncode, 1779 notify->nt_sid, (unsigned long long) notify->nt_tagval, notify->nt_channel, notify->nt_lun); 1780 if (notify->nt_lun == LUN_ANY) { 1781 if (notify->nt_tagval == TAG_ANY) { 1782 lun = CAM_LUN_WILDCARD; 1783 } else { 1784 atp = isp_find_atpd(isp, notify->nt_channel, 1785 notify->nt_tagval & 0xffffffff); 1786 lun = atp ? atp->lun : CAM_LUN_WILDCARD; 1787 } 1788 } else { 1789 lun = notify->nt_lun; 1790 } 1791 tptr = get_lun_statep(isp, notify->nt_channel, lun); 1792 if (tptr == NULL) { 1793 tptr = get_lun_statep(isp, notify->nt_channel, CAM_LUN_WILDCARD); 1794 if (tptr == NULL) { 1795 isp_prt(isp, ISP_LOGWARN, "%s: no state pointer found for chan %d lun %#jx", __func__, notify->nt_channel, (uintmax_t)lun); 1796 goto bad; 1797 } 1798 } 1799 inot = (struct ccb_immediate_notify *) SLIST_FIRST(&tptr->inots); 1800 if (inot == NULL) { 1801 isp_prt(isp, ISP_LOGWARN, "%s: out of immediate notify structures for chan %d lun %#jx", __func__, notify->nt_channel, (uintmax_t)lun); 1802 goto bad; 1803 } 1804 1805 inot->ccb_h.target_id = ISP_MAX_TARGETS(isp); 1806 inot->ccb_h.target_lun = lun; 1807 if (isp_find_pdb_by_portid(isp, notify->nt_channel, notify->nt_sid, &lp) == 0 && 1808 isp_find_pdb_by_handle(isp, notify->nt_channel, notify->nt_nphdl, &lp) == 0) { 1809 inot->initiator_id = CAM_TARGET_WILDCARD; 1810 } else { 1811 inot->initiator_id = FC_PORTDB_TGT(isp, notify->nt_channel, lp); 1812 } 1813 inot->seq_id = notify->nt_tagval; 1814 inot->tag_id = notify->nt_tagval >> 32; 1815 1816 switch (notify->nt_ncode) { 1817 case NT_ABORT_TASK: 1818 isp_target_mark_aborted_early(isp, notify->nt_channel, tptr, inot->tag_id); 1819 inot->arg = MSG_ABORT_TASK; 1820 break; 1821 case NT_ABORT_TASK_SET: 1822 isp_target_mark_aborted_early(isp, notify->nt_channel, tptr, TAG_ANY); 1823 inot->arg = MSG_ABORT_TASK_SET; 1824 break; 1825 case NT_CLEAR_ACA: 1826 inot->arg = MSG_CLEAR_ACA; 1827 break; 1828 case NT_CLEAR_TASK_SET: 1829 inot->arg = MSG_CLEAR_TASK_SET; 1830 break; 1831 case NT_LUN_RESET: 1832 inot->arg = MSG_LOGICAL_UNIT_RESET; 1833 break; 1834 case NT_TARGET_RESET: 1835 inot->arg = MSG_TARGET_RESET; 1836 break; 1837 case NT_QUERY_TASK_SET: 1838 inot->arg = MSG_QUERY_TASK_SET; 1839 break; 1840 case NT_QUERY_ASYNC_EVENT: 1841 inot->arg = MSG_QUERY_ASYNC_EVENT; 1842 break; 1843 default: 1844 isp_prt(isp, ISP_LOGWARN, "%s: unknown TMF code 0x%x for chan %d lun %#jx", __func__, notify->nt_ncode, notify->nt_channel, (uintmax_t)lun); 1845 goto bad; 1846 } 1847 1848 ntp = isp_get_ntpd(isp, notify->nt_channel); 1849 if (ntp == NULL) { 1850 isp_prt(isp, ISP_LOGWARN, "%s: out of inotify private structures", __func__); 1851 goto bad; 1852 } 1853 ISP_MEMCPY(&ntp->nt, notify, sizeof (isp_notify_t)); 1854 if (notify->nt_lreserved) { 1855 ISP_MEMCPY(&ntp->data, notify->nt_lreserved, QENTRY_LEN); 1856 ntp->nt.nt_lreserved = &ntp->data; 1857 } 1858 ntp->seq_id = notify->nt_tagval; 1859 ntp->tag_id = notify->nt_tagval >> 32; 1860 1861 SLIST_REMOVE_HEAD(&tptr->inots, sim_links.sle); 1862 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, inot->ccb_h.path, "Take FREE INOT\n"); 1863 inot->ccb_h.status = CAM_MESSAGE_RECV; 1864 xpt_done((union ccb *)inot); 1865 return; 1866 bad: 1867 if (notify->nt_need_ack) { 1868 if (((isphdr_t *)notify->nt_lreserved)->rqs_entry_type == RQSTYPE_ABTS_RCVD) { 1869 if (isp_acknak_abts(isp, notify->nt_lreserved, ENOMEM)) { 1870 isp_prt(isp, ISP_LOGWARN, "you lose- unable to send an ACKNAK"); 1871 } 1872 } else { 1873 isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, notify->nt_lreserved); 1874 } 1875 } 1876 } 1877 1878 /* 1879 * Clean aborted commands pending restart 1880 */ 1881 static void 1882 isp_target_mark_aborted_early(ispsoftc_t *isp, int chan, tstate_t *tptr, uint32_t tag_id) 1883 { 1884 inot_private_data_t *ntp, *tmp; 1885 uint32_t this_tag_id; 1886 1887 STAILQ_FOREACH_SAFE(ntp, &tptr->restart_queue, next, tmp) { 1888 this_tag_id = ((at7_entry_t *)ntp->data)->at_rxid; 1889 if ((uint64_t)tag_id == TAG_ANY || tag_id == this_tag_id) { 1890 STAILQ_REMOVE(&tptr->restart_queue, ntp, 1891 inot_private_data, next); 1892 isp_endcmd(isp, ntp->data, NIL_HANDLE, chan, 1893 ECMD_TERMINATE, 0); 1894 isp_put_ntpd(isp, chan, ntp); 1895 } 1896 } 1897 } 1898 #endif 1899 1900 static void 1901 isp_poll(struct cam_sim *sim) 1902 { 1903 ispsoftc_t *isp = cam_sim_softc(sim); 1904 1905 ISP_RUN_ISR(isp); 1906 } 1907 1908 1909 static void 1910 isp_watchdog(void *arg) 1911 { 1912 struct ccb_scsiio *xs = arg; 1913 ispsoftc_t *isp; 1914 uint32_t ohandle = ISP_HANDLE_FREE, handle; 1915 1916 isp = XS_ISP(xs); 1917 1918 handle = isp_find_handle(isp, xs); 1919 1920 /* 1921 * Hand crank the interrupt code just to be sure the command isn't stuck somewhere. 1922 */ 1923 if (handle != ISP_HANDLE_FREE) { 1924 ISP_RUN_ISR(isp); 1925 ohandle = handle; 1926 handle = isp_find_handle(isp, xs); 1927 } 1928 if (handle != ISP_HANDLE_FREE) { 1929 /* 1930 * Try and make sure the command is really dead before 1931 * we release the handle (and DMA resources) for reuse. 1932 * 1933 * If we are successful in aborting the command then 1934 * we're done here because we'll get the command returned 1935 * back separately. 1936 */ 1937 if (isp_control(isp, ISPCTL_ABORT_CMD, xs) == 0) { 1938 return; 1939 } 1940 1941 /* 1942 * Note that after calling the above, the command may in 1943 * fact have been completed. 1944 */ 1945 xs = isp_find_xs(isp, handle); 1946 1947 /* 1948 * If the command no longer exists, then we won't 1949 * be able to find the xs again with this handle. 1950 */ 1951 if (xs == NULL) { 1952 return; 1953 } 1954 1955 /* 1956 * After this point, the command is really dead. 1957 */ 1958 ISP_DMAFREE(isp, xs); 1959 isp_destroy_handle(isp, handle); 1960 isp_prt(isp, ISP_LOGERR, "%s: timeout for handle 0x%x", __func__, handle); 1961 XS_SETERR(xs, CAM_CMD_TIMEOUT); 1962 isp_done(xs); 1963 } else { 1964 if (ohandle != ISP_HANDLE_FREE) { 1965 isp_prt(isp, ISP_LOGWARN, "%s: timeout for handle 0x%x, recovered during interrupt", __func__, ohandle); 1966 } else { 1967 isp_prt(isp, ISP_LOGWARN, "%s: timeout for handle already free", __func__); 1968 } 1969 } 1970 } 1971 1972 static void 1973 isp_make_here(ispsoftc_t *isp, fcportdb_t *fcp, int chan, int tgt) 1974 { 1975 union ccb *ccb; 1976 struct isp_fc *fc = ISP_FC_PC(isp, chan); 1977 1978 /* 1979 * Allocate a CCB, create a wildcard path for this target and schedule a rescan. 1980 */ 1981 ccb = xpt_alloc_ccb_nowait(); 1982 if (ccb == NULL) { 1983 isp_prt(isp, ISP_LOGWARN, "Chan %d unable to alloc CCB for rescan", chan); 1984 return; 1985 } 1986 if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(fc->sim), 1987 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1988 isp_prt(isp, ISP_LOGWARN, "unable to create path for rescan"); 1989 xpt_free_ccb(ccb); 1990 return; 1991 } 1992 xpt_rescan(ccb); 1993 } 1994 1995 static void 1996 isp_make_gone(ispsoftc_t *isp, fcportdb_t *fcp, int chan, int tgt) 1997 { 1998 struct cam_path *tp; 1999 struct isp_fc *fc = ISP_FC_PC(isp, chan); 2000 2001 if (xpt_create_path(&tp, NULL, cam_sim_path(fc->sim), tgt, CAM_LUN_WILDCARD) == CAM_REQ_CMP) { 2002 xpt_async(AC_LOST_DEVICE, tp, NULL); 2003 xpt_free_path(tp); 2004 } 2005 } 2006 2007 /* 2008 * Gone Device Timer Function- when we have decided that a device has gone 2009 * away, we wait a specific period of time prior to telling the OS it has 2010 * gone away. 2011 * 2012 * This timer function fires once a second and then scans the port database 2013 * for devices that are marked dead but still have a virtual target assigned. 2014 * We decrement a counter for that port database entry, and when it hits zero, 2015 * we tell the OS the device has gone away. 2016 */ 2017 static void 2018 isp_gdt(void *arg) 2019 { 2020 struct isp_fc *fc = arg; 2021 taskqueue_enqueue(taskqueue_thread, &fc->gtask); 2022 } 2023 2024 static void 2025 isp_gdt_task(void *arg, int pending) 2026 { 2027 struct isp_fc *fc = arg; 2028 ispsoftc_t *isp = fc->isp; 2029 int chan = fc - ISP_FC_PC(isp, 0); 2030 fcportdb_t *lp; 2031 struct ac_contract ac; 2032 struct ac_device_changed *adc; 2033 int dbidx, more_to_do = 0; 2034 2035 ISP_LOCK(isp); 2036 isp_prt(isp, ISP_LOGDEBUG0, "Chan %d GDT timer expired", chan); 2037 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { 2038 lp = &FCPARAM(isp, chan)->portdb[dbidx]; 2039 2040 if (lp->state != FC_PORTDB_STATE_ZOMBIE) { 2041 continue; 2042 } 2043 if (lp->gone_timer != 0) { 2044 lp->gone_timer -= 1; 2045 more_to_do++; 2046 continue; 2047 } 2048 isp_prt(isp, ISP_LOGCONFIG, prom3, chan, dbidx, lp->portid, "Gone Device Timeout"); 2049 if (lp->is_target) { 2050 lp->is_target = 0; 2051 isp_make_gone(isp, lp, chan, dbidx); 2052 } 2053 if (lp->is_initiator) { 2054 lp->is_initiator = 0; 2055 ac.contract_number = AC_CONTRACT_DEV_CHG; 2056 adc = (struct ac_device_changed *) ac.contract_data; 2057 adc->wwpn = lp->port_wwn; 2058 adc->port = lp->portid; 2059 adc->target = dbidx; 2060 adc->arrived = 0; 2061 xpt_async(AC_CONTRACT, fc->path, &ac); 2062 } 2063 lp->state = FC_PORTDB_STATE_NIL; 2064 } 2065 if (fc->ready) { 2066 if (more_to_do) { 2067 callout_reset(&fc->gdt, hz, isp_gdt, fc); 2068 } else { 2069 callout_deactivate(&fc->gdt); 2070 isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Stopping Gone Device Timer @ %lu", chan, (unsigned long) time_uptime); 2071 } 2072 } 2073 ISP_UNLOCK(isp); 2074 } 2075 2076 /* 2077 * When loop goes down we remember the time and freeze CAM command queue. 2078 * During some time period we are trying to reprobe the loop. But if we 2079 * fail, we tell the OS that devices have gone away and drop the freeze. 2080 * 2081 * We don't clear the devices out of our port database because, when loop 2082 * come back up, we have to do some actual cleanup with the chip at that 2083 * point (implicit PLOGO, e.g., to get the chip's port database state right). 2084 */ 2085 static void 2086 isp_loop_changed(ispsoftc_t *isp, int chan) 2087 { 2088 fcparam *fcp = FCPARAM(isp, chan); 2089 struct isp_fc *fc = ISP_FC_PC(isp, chan); 2090 2091 if (fc->loop_down_time) 2092 return; 2093 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Loop changed", chan); 2094 if (fcp->role & ISP_ROLE_INITIATOR) 2095 isp_freeze_loopdown(isp, chan); 2096 fc->loop_down_time = time_uptime; 2097 wakeup(fc); 2098 } 2099 2100 static void 2101 isp_loop_up(ispsoftc_t *isp, int chan) 2102 { 2103 struct isp_fc *fc = ISP_FC_PC(isp, chan); 2104 2105 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Loop is up", chan); 2106 fc->loop_seen_once = 1; 2107 fc->loop_down_time = 0; 2108 isp_unfreeze_loopdown(isp, chan); 2109 } 2110 2111 static void 2112 isp_loop_dead(ispsoftc_t *isp, int chan) 2113 { 2114 fcparam *fcp = FCPARAM(isp, chan); 2115 struct isp_fc *fc = ISP_FC_PC(isp, chan); 2116 fcportdb_t *lp; 2117 struct ac_contract ac; 2118 struct ac_device_changed *adc; 2119 int dbidx, i; 2120 2121 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Loop is dead", chan); 2122 2123 /* 2124 * Notify to the OS all targets who we now consider have departed. 2125 */ 2126 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { 2127 lp = &fcp->portdb[dbidx]; 2128 2129 if (lp->state == FC_PORTDB_STATE_NIL) 2130 continue; 2131 2132 for (i = 0; i < ISP_HANDLE_NUM(isp); i++) { 2133 struct ccb_scsiio *xs; 2134 2135 if (ISP_H2HT(isp->isp_xflist[i].handle) != ISP_HANDLE_INITIATOR) { 2136 continue; 2137 } 2138 if ((xs = isp->isp_xflist[i].cmd) == NULL) { 2139 continue; 2140 } 2141 if (dbidx != XS_TGT(xs)) { 2142 continue; 2143 } 2144 isp_prt(isp, ISP_LOGWARN, "command handle 0x%x for %d.%d.%jx orphaned by loop down timeout", 2145 isp->isp_xflist[i].handle, chan, XS_TGT(xs), 2146 (uintmax_t)XS_LUN(xs)); 2147 2148 /* 2149 * Just like in isp_watchdog, abort the outstanding 2150 * command or immediately free its resources if it is 2151 * not active 2152 */ 2153 if (isp_control(isp, ISPCTL_ABORT_CMD, xs) == 0) { 2154 continue; 2155 } 2156 2157 ISP_DMAFREE(isp, xs); 2158 isp_destroy_handle(isp, isp->isp_xflist[i].handle); 2159 isp_prt(isp, ISP_LOGWARN, "command handle 0x%x for %d.%d.%jx could not be aborted and was destroyed", 2160 isp->isp_xflist[i].handle, chan, XS_TGT(xs), 2161 (uintmax_t)XS_LUN(xs)); 2162 XS_SETERR(xs, HBA_BUSRESET); 2163 isp_done(xs); 2164 } 2165 2166 isp_prt(isp, ISP_LOGCONFIG, prom3, chan, dbidx, lp->portid, "Loop Down Timeout"); 2167 if (lp->is_target) { 2168 lp->is_target = 0; 2169 isp_make_gone(isp, lp, chan, dbidx); 2170 } 2171 if (lp->is_initiator) { 2172 lp->is_initiator = 0; 2173 ac.contract_number = AC_CONTRACT_DEV_CHG; 2174 adc = (struct ac_device_changed *) ac.contract_data; 2175 adc->wwpn = lp->port_wwn; 2176 adc->port = lp->portid; 2177 adc->target = dbidx; 2178 adc->arrived = 0; 2179 xpt_async(AC_CONTRACT, fc->path, &ac); 2180 } 2181 } 2182 2183 isp_unfreeze_loopdown(isp, chan); 2184 fc->loop_down_time = 0; 2185 } 2186 2187 static void 2188 isp_kthread(void *arg) 2189 { 2190 struct isp_fc *fc = arg; 2191 ispsoftc_t *isp = fc->isp; 2192 int chan = fc - ISP_FC_PC(isp, 0); 2193 int slp = 0, d; 2194 int lb, lim; 2195 2196 ISP_LOCK(isp); 2197 while (isp->isp_osinfo.is_exiting == 0) { 2198 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, 2199 "Chan %d Checking FC state", chan); 2200 lb = isp_fc_runstate(isp, chan, 250000); 2201 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, 2202 "Chan %d FC got to %s state", chan, 2203 isp_fc_loop_statename(lb)); 2204 2205 /* 2206 * Our action is different based upon whether we're supporting 2207 * Initiator mode or not. If we are, we might freeze the simq 2208 * when loop is down and set all sorts of different delays to 2209 * check again. 2210 * 2211 * If not, we simply just wait for loop to come up. 2212 */ 2213 if (lb == LOOP_READY || lb < 0) { 2214 slp = 0; 2215 } else { 2216 /* 2217 * If we've never seen loop up and we've waited longer 2218 * than quickboot time, or we've seen loop up but we've 2219 * waited longer than loop_down_limit, give up and go 2220 * to sleep until loop comes up. 2221 */ 2222 if (fc->loop_seen_once == 0) 2223 lim = isp_quickboot_time; 2224 else 2225 lim = fc->loop_down_limit; 2226 d = time_uptime - fc->loop_down_time; 2227 if (d >= lim) 2228 slp = 0; 2229 else if (d < 10) 2230 slp = 1; 2231 else if (d < 30) 2232 slp = 5; 2233 else if (d < 60) 2234 slp = 10; 2235 else if (d < 120) 2236 slp = 20; 2237 else 2238 slp = 30; 2239 } 2240 2241 if (slp == 0) { 2242 if (lb == LOOP_READY) 2243 isp_loop_up(isp, chan); 2244 else 2245 isp_loop_dead(isp, chan); 2246 } 2247 2248 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, 2249 "Chan %d sleep for %d seconds", chan, slp); 2250 msleep(fc, &isp->isp_lock, PRIBIO, "ispf", slp * hz); 2251 } 2252 fc->num_threads -= 1; 2253 wakeup(&fc->num_threads); 2254 ISP_UNLOCK(isp); 2255 kthread_exit(); 2256 } 2257 2258 #ifdef ISP_TARGET_MODE 2259 static int 2260 isp_abort_atpd(ispsoftc_t *isp, int chan, atio_private_data_t *atp) 2261 { 2262 uint8_t storage[QENTRY_LEN]; 2263 ct7_entry_t *cto = (ct7_entry_t *) storage; 2264 2265 ISP_MEMZERO(cto, sizeof (ct7_entry_t)); 2266 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7; 2267 cto->ct_header.rqs_entry_count = 1; 2268 cto->ct_nphdl = atp->nphdl; 2269 cto->ct_vpidx = chan; 2270 cto->ct_iid_lo = atp->sid; 2271 cto->ct_iid_hi = atp->sid >> 16; 2272 cto->ct_rxid = atp->tag; 2273 cto->ct_flags = CT7_NOACK|CT7_TERMINATE; 2274 cto->ct_oxid = atp->oxid; 2275 return (isp_send_entry(isp, cto)); 2276 } 2277 2278 static void 2279 isp_abort_atio(ispsoftc_t *isp, union ccb *ccb) 2280 { 2281 atio_private_data_t *atp; 2282 union ccb *accb = ccb->cab.abort_ccb; 2283 struct ccb_hdr *sccb; 2284 tstate_t *tptr; 2285 2286 tptr = get_lun_statep(isp, XS_CHANNEL(accb), XS_LUN(accb)); 2287 if (tptr != NULL) { 2288 /* Search for the ATIO among queueued. */ 2289 SLIST_FOREACH(sccb, &tptr->atios, sim_links.sle) { 2290 if (sccb != &accb->ccb_h) 2291 continue; 2292 SLIST_REMOVE(&tptr->atios, sccb, ccb_hdr, sim_links.sle); 2293 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, sccb->path, 2294 "Abort FREE ATIO\n"); 2295 accb->ccb_h.status = CAM_REQ_ABORTED; 2296 xpt_done(accb); 2297 ccb->ccb_h.status = CAM_REQ_CMP; 2298 return; 2299 } 2300 } 2301 2302 /* Search for the ATIO among running. */ 2303 atp = isp_find_atpd_ccb(isp, XS_CHANNEL(accb), accb->atio.tag_id, accb); 2304 if (atp != NULL) { 2305 if (isp_abort_atpd(isp, XS_CHANNEL(accb), atp)) { 2306 ccb->ccb_h.status = CAM_UA_ABORT; 2307 return; 2308 } 2309 isp_put_atpd(isp, XS_CHANNEL(accb), atp); 2310 } 2311 2312 ccb->ccb_h.status = CAM_REQ_CMP; 2313 } 2314 2315 static void 2316 isp_abort_inot(ispsoftc_t *isp, union ccb *ccb) 2317 { 2318 inot_private_data_t *ntp; 2319 union ccb *accb = ccb->cab.abort_ccb; 2320 struct ccb_hdr *sccb; 2321 tstate_t *tptr; 2322 2323 tptr = get_lun_statep(isp, XS_CHANNEL(accb), XS_LUN(accb)); 2324 if (tptr != NULL) { 2325 /* Search for the INOT among queueued. */ 2326 SLIST_FOREACH(sccb, &tptr->inots, sim_links.sle) { 2327 if (sccb != &accb->ccb_h) 2328 continue; 2329 SLIST_REMOVE(&tptr->inots, sccb, ccb_hdr, sim_links.sle); 2330 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, sccb->path, 2331 "Abort FREE INOT\n"); 2332 accb->ccb_h.status = CAM_REQ_ABORTED; 2333 xpt_done(accb); 2334 ccb->ccb_h.status = CAM_REQ_CMP; 2335 return; 2336 } 2337 } 2338 2339 /* Search for the INOT among running. */ 2340 ntp = isp_find_ntpd(isp, XS_CHANNEL(accb), accb->cin1.tag_id, accb->cin1.seq_id); 2341 if (ntp != NULL) { 2342 if (ntp->nt.nt_need_ack) { 2343 isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, 2344 ntp->nt.nt_lreserved); 2345 } 2346 isp_put_ntpd(isp, XS_CHANNEL(accb), ntp); 2347 ccb->ccb_h.status = CAM_REQ_CMP; 2348 } else { 2349 ccb->ccb_h.status = CAM_UA_ABORT; 2350 return; 2351 } 2352 } 2353 #endif 2354 2355 static void 2356 isp_action(struct cam_sim *sim, union ccb *ccb) 2357 { 2358 int bus, tgt, error; 2359 ispsoftc_t *isp; 2360 fcparam *fcp; 2361 struct ccb_trans_settings *cts; 2362 sbintime_t ts; 2363 2364 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n")); 2365 2366 isp = (ispsoftc_t *)cam_sim_softc(sim); 2367 ISP_ASSERT_LOCKED(isp); 2368 bus = cam_sim_bus(sim); 2369 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code); 2370 ISP_PCMD(ccb) = NULL; 2371 2372 switch (ccb->ccb_h.func_code) { 2373 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 2374 /* 2375 * Do a couple of preliminary checks... 2376 */ 2377 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 2378 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 2379 ccb->ccb_h.status = CAM_REQ_INVALID; 2380 isp_done((struct ccb_scsiio *) ccb); 2381 break; 2382 } 2383 } 2384 #ifdef DIAGNOSTIC 2385 if (ccb->ccb_h.target_id >= ISP_MAX_TARGETS(isp)) { 2386 xpt_print(ccb->ccb_h.path, "invalid target\n"); 2387 ccb->ccb_h.status = CAM_PATH_INVALID; 2388 } 2389 if (ccb->ccb_h.status == CAM_PATH_INVALID) { 2390 xpt_done(ccb); 2391 break; 2392 } 2393 #endif 2394 ccb->csio.scsi_status = SCSI_STATUS_OK; 2395 if (isp_get_pcmd(isp, ccb)) { 2396 isp_prt(isp, ISP_LOGWARN, "out of PCMDs"); 2397 cam_freeze_devq(ccb->ccb_h.path); 2398 cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 250, 0); 2399 ccb->ccb_h.status = CAM_REQUEUE_REQ; 2400 xpt_done(ccb); 2401 break; 2402 } 2403 error = isp_start((XS_T *) ccb); 2404 isp_rq_check_above(isp); 2405 switch (error) { 2406 case 0: 2407 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2408 if (ccb->ccb_h.timeout == CAM_TIME_INFINITY) 2409 break; 2410 /* Give firmware extra 10s to handle timeout. */ 2411 ts = SBT_1MS * ccb->ccb_h.timeout + 10 * SBT_1S; 2412 callout_reset_sbt(&PISP_PCMD(ccb)->wdog, ts, 0, 2413 isp_watchdog, ccb, 0); 2414 break; 2415 case CMD_RQLATER: 2416 isp_prt(isp, ISP_LOGDEBUG0, "%d.%jx retry later", 2417 XS_TGT(ccb), (uintmax_t)XS_LUN(ccb)); 2418 cam_freeze_devq(ccb->ccb_h.path); 2419 cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 1000, 0); 2420 ccb->ccb_h.status = CAM_REQUEUE_REQ; 2421 isp_free_pcmd(isp, ccb); 2422 xpt_done(ccb); 2423 break; 2424 case CMD_EAGAIN: 2425 isp_free_pcmd(isp, ccb); 2426 cam_freeze_devq(ccb->ccb_h.path); 2427 cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 10, 0); 2428 ccb->ccb_h.status = CAM_REQUEUE_REQ; 2429 xpt_done(ccb); 2430 break; 2431 case CMD_COMPLETE: 2432 isp_done((struct ccb_scsiio *) ccb); 2433 break; 2434 default: 2435 isp_prt(isp, ISP_LOGERR, "What's this? 0x%x at %d in file %s", error, __LINE__, __FILE__); 2436 ccb->ccb_h.status = CAM_REQUEUE_REQ; 2437 isp_free_pcmd(isp, ccb); 2438 xpt_done(ccb); 2439 } 2440 break; 2441 2442 #ifdef ISP_TARGET_MODE 2443 case XPT_EN_LUN: /* Enable/Disable LUN as a target */ 2444 if (ccb->cel.enable) { 2445 isp_enable_lun(isp, ccb); 2446 } else { 2447 isp_disable_lun(isp, ccb); 2448 } 2449 break; 2450 case XPT_IMMEDIATE_NOTIFY: /* Add Immediate Notify Resource */ 2451 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 2452 { 2453 tstate_t *tptr = get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun); 2454 if (tptr == NULL) { 2455 const char *str; 2456 2457 if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) 2458 str = "XPT_IMMEDIATE_NOTIFY"; 2459 else 2460 str = "XPT_ACCEPT_TARGET_IO"; 2461 ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, 2462 "%s: no state pointer found for %s\n", 2463 __func__, str); 2464 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2465 xpt_done(ccb); 2466 break; 2467 } 2468 2469 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 2470 ccb->atio.tag_id = 0; 2471 SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h, sim_links.sle); 2472 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, ccb->ccb_h.path, 2473 "Put FREE ATIO\n"); 2474 } else if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) { 2475 ccb->cin1.seq_id = ccb->cin1.tag_id = 0; 2476 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, sim_links.sle); 2477 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, ccb->ccb_h.path, 2478 "Put FREE INOT\n"); 2479 } 2480 ccb->ccb_h.status = CAM_REQ_INPROG; 2481 break; 2482 } 2483 case XPT_NOTIFY_ACKNOWLEDGE: /* notify ack */ 2484 { 2485 atio_private_data_t *atp; 2486 inot_private_data_t *ntp; 2487 2488 /* 2489 * XXX: Because we cannot guarantee that the path information in the notify acknowledge ccb 2490 * XXX: matches that for the immediate notify, we have to *search* for the notify structure 2491 */ 2492 /* 2493 * All the relevant path information is in the associated immediate notify 2494 */ 2495 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "%s: [0x%x] NOTIFY ACKNOWLEDGE for 0x%x seen\n", __func__, ccb->cna2.tag_id, ccb->cna2.seq_id); 2496 ntp = isp_find_ntpd(isp, XS_CHANNEL(ccb), ccb->cna2.tag_id, ccb->cna2.seq_id); 2497 if (ntp == NULL) { 2498 ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "%s: [0x%x] XPT_NOTIFY_ACKNOWLEDGE of 0x%x cannot find ntp private data\n", __func__, 2499 ccb->cna2.tag_id, ccb->cna2.seq_id); 2500 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2501 xpt_done(ccb); 2502 break; 2503 } 2504 2505 /* 2506 * Target should abort all affected CCBs before ACK-ing INOT, 2507 * but if/since it doesn't, add this hack to allow tag reuse. 2508 */ 2509 uint32_t rsp = (ccb->ccb_h.flags & CAM_SEND_STATUS) ? ccb->cna2.arg : 0; 2510 if (ntp->nt.nt_ncode == NT_ABORT_TASK && (rsp & 0xff) == 0 && 2511 (atp = isp_find_atpd(isp, XS_CHANNEL(ccb), ccb->cna2.seq_id)) != NULL) { 2512 if (isp_abort_atpd(isp, XS_CHANNEL(ccb), atp) == 0) 2513 isp_put_atpd(isp, XS_CHANNEL(ccb), atp); 2514 } 2515 2516 if (isp_handle_platform_target_notify_ack(isp, &ntp->nt, rsp)) { 2517 cam_freeze_devq(ccb->ccb_h.path); 2518 cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 10, 0); 2519 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2520 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 2521 break; 2522 } 2523 isp_put_ntpd(isp, XS_CHANNEL(ccb), ntp); 2524 ccb->ccb_h.status = CAM_REQ_CMP; 2525 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "%s: [0x%x] calling xpt_done for tag 0x%x\n", __func__, ccb->cna2.tag_id, ccb->cna2.seq_id); 2526 xpt_done(ccb); 2527 break; 2528 } 2529 case XPT_CONT_TARGET_IO: 2530 isp_target_start_ctio(isp, ccb, FROM_CAM); 2531 isp_rq_check_above(isp); 2532 break; 2533 #endif 2534 case XPT_RESET_DEV: /* BDR the specified SCSI device */ 2535 tgt = ccb->ccb_h.target_id; 2536 tgt |= (bus << 16); 2537 2538 error = isp_control(isp, ISPCTL_RESET_DEV, bus, tgt); 2539 if (error) { 2540 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2541 } else { 2542 /* 2543 * If we have a FC device, reset the Command 2544 * Reference Number, because the target will expect 2545 * that we re-start the CRN at 1 after a reset. 2546 */ 2547 isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1); 2548 2549 ccb->ccb_h.status = CAM_REQ_CMP; 2550 } 2551 xpt_done(ccb); 2552 break; 2553 case XPT_ABORT: /* Abort the specified CCB */ 2554 { 2555 union ccb *accb = ccb->cab.abort_ccb; 2556 switch (accb->ccb_h.func_code) { 2557 #ifdef ISP_TARGET_MODE 2558 case XPT_ACCEPT_TARGET_IO: 2559 isp_abort_atio(isp, ccb); 2560 break; 2561 case XPT_IMMEDIATE_NOTIFY: 2562 isp_abort_inot(isp, ccb); 2563 break; 2564 #endif 2565 case XPT_SCSI_IO: 2566 error = isp_control(isp, ISPCTL_ABORT_CMD, accb); 2567 if (error) { 2568 ccb->ccb_h.status = CAM_UA_ABORT; 2569 } else { 2570 ccb->ccb_h.status = CAM_REQ_CMP; 2571 } 2572 break; 2573 default: 2574 ccb->ccb_h.status = CAM_REQ_INVALID; 2575 break; 2576 } 2577 /* 2578 * This is not a queued CCB, so the caller expects it to be 2579 * complete when control is returned. 2580 */ 2581 break; 2582 } 2583 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) 2584 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 2585 cts = &ccb->cts; 2586 if (!IS_CURRENT_SETTINGS(cts)) { 2587 ccb->ccb_h.status = CAM_REQ_INVALID; 2588 xpt_done(ccb); 2589 break; 2590 } 2591 ccb->ccb_h.status = CAM_REQ_CMP; 2592 xpt_done(ccb); 2593 break; 2594 case XPT_GET_TRAN_SETTINGS: 2595 { 2596 struct ccb_trans_settings_scsi *scsi; 2597 struct ccb_trans_settings_fc *fc; 2598 2599 cts = &ccb->cts; 2600 scsi = &cts->proto_specific.scsi; 2601 fc = &cts->xport_specific.fc; 2602 tgt = cts->ccb_h.target_id; 2603 fcp = FCPARAM(isp, bus); 2604 2605 cts->protocol = PROTO_SCSI; 2606 cts->protocol_version = SCSI_REV_2; 2607 cts->transport = XPORT_FC; 2608 cts->transport_version = 0; 2609 2610 scsi->valid = CTS_SCSI_VALID_TQ; 2611 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 2612 fc->valid = CTS_FC_VALID_SPEED; 2613 fc->bitrate = fcp->isp_gbspeed * 100000; 2614 if (tgt < MAX_FC_TARG) { 2615 fcportdb_t *lp = &fcp->portdb[tgt]; 2616 fc->wwnn = lp->node_wwn; 2617 fc->wwpn = lp->port_wwn; 2618 fc->port = lp->portid; 2619 fc->valid |= CTS_FC_VALID_WWNN | CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT; 2620 } 2621 ccb->ccb_h.status = CAM_REQ_CMP; 2622 xpt_done(ccb); 2623 break; 2624 } 2625 case XPT_CALC_GEOMETRY: 2626 cam_calc_geometry(&ccb->ccg, 1); 2627 xpt_done(ccb); 2628 break; 2629 2630 case XPT_RESET_BUS: /* Reset the specified bus */ 2631 error = isp_control(isp, ISPCTL_RESET_BUS, bus); 2632 if (error) { 2633 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2634 xpt_done(ccb); 2635 break; 2636 } 2637 if (bootverbose) { 2638 xpt_print(ccb->ccb_h.path, "reset bus on channel %d\n", bus); 2639 } 2640 xpt_async(AC_BUS_RESET, ISP_FC_PC(isp, bus)->path, 0); 2641 ccb->ccb_h.status = CAM_REQ_CMP; 2642 xpt_done(ccb); 2643 break; 2644 2645 case XPT_TERM_IO: /* Terminate the I/O process */ 2646 ccb->ccb_h.status = CAM_REQ_INVALID; 2647 xpt_done(ccb); 2648 break; 2649 2650 case XPT_SET_SIM_KNOB: /* Set SIM knobs */ 2651 { 2652 struct ccb_sim_knob *kp = &ccb->knob; 2653 fcparam *fcp = FCPARAM(isp, bus); 2654 2655 if (kp->xport_specific.fc.valid & KNOB_VALID_ADDRESS) { 2656 fcp->isp_wwnn = ISP_FC_PC(isp, bus)->def_wwnn = kp->xport_specific.fc.wwnn; 2657 fcp->isp_wwpn = ISP_FC_PC(isp, bus)->def_wwpn = kp->xport_specific.fc.wwpn; 2658 isp_prt(isp, ISP_LOGALL, "Setting Channel %d wwns to 0x%jx 0x%jx", bus, fcp->isp_wwnn, fcp->isp_wwpn); 2659 } 2660 ccb->ccb_h.status = CAM_REQ_CMP; 2661 if (kp->xport_specific.fc.valid & KNOB_VALID_ROLE) { 2662 int rchange = 0; 2663 int newrole = 0; 2664 2665 switch (kp->xport_specific.fc.role) { 2666 case KNOB_ROLE_NONE: 2667 if (fcp->role != ISP_ROLE_NONE) { 2668 rchange = 1; 2669 newrole = ISP_ROLE_NONE; 2670 } 2671 break; 2672 case KNOB_ROLE_TARGET: 2673 if (fcp->role != ISP_ROLE_TARGET) { 2674 rchange = 1; 2675 newrole = ISP_ROLE_TARGET; 2676 } 2677 break; 2678 case KNOB_ROLE_INITIATOR: 2679 if (fcp->role != ISP_ROLE_INITIATOR) { 2680 rchange = 1; 2681 newrole = ISP_ROLE_INITIATOR; 2682 } 2683 break; 2684 case KNOB_ROLE_BOTH: 2685 if (fcp->role != ISP_ROLE_BOTH) { 2686 rchange = 1; 2687 newrole = ISP_ROLE_BOTH; 2688 } 2689 break; 2690 } 2691 if (rchange) { 2692 ISP_PATH_PRT(isp, ISP_LOGCONFIG, ccb->ccb_h.path, "changing role on from %d to %d\n", fcp->role, newrole); 2693 if (isp_control(isp, ISPCTL_CHANGE_ROLE, 2694 bus, newrole) != 0) { 2695 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2696 xpt_done(ccb); 2697 break; 2698 } 2699 } 2700 } 2701 xpt_done(ccb); 2702 break; 2703 } 2704 case XPT_GET_SIM_KNOB_OLD: /* Get SIM knobs -- compat value */ 2705 case XPT_GET_SIM_KNOB: /* Get SIM knobs */ 2706 { 2707 struct ccb_sim_knob *kp = &ccb->knob; 2708 fcparam *fcp = FCPARAM(isp, bus); 2709 2710 kp->xport_specific.fc.wwnn = fcp->isp_wwnn; 2711 kp->xport_specific.fc.wwpn = fcp->isp_wwpn; 2712 switch (fcp->role) { 2713 case ISP_ROLE_NONE: 2714 kp->xport_specific.fc.role = KNOB_ROLE_NONE; 2715 break; 2716 case ISP_ROLE_TARGET: 2717 kp->xport_specific.fc.role = KNOB_ROLE_TARGET; 2718 break; 2719 case ISP_ROLE_INITIATOR: 2720 kp->xport_specific.fc.role = KNOB_ROLE_INITIATOR; 2721 break; 2722 case ISP_ROLE_BOTH: 2723 kp->xport_specific.fc.role = KNOB_ROLE_BOTH; 2724 break; 2725 } 2726 kp->xport_specific.fc.valid = KNOB_VALID_ADDRESS | KNOB_VALID_ROLE; 2727 ccb->ccb_h.status = CAM_REQ_CMP; 2728 xpt_done(ccb); 2729 break; 2730 } 2731 case XPT_PATH_INQ: /* Path routing inquiry */ 2732 { 2733 struct ccb_pathinq *cpi = &ccb->cpi; 2734 2735 cpi->version_num = 1; 2736 #ifdef ISP_TARGET_MODE 2737 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 2738 #else 2739 cpi->target_sprt = 0; 2740 #endif 2741 cpi->hba_eng_cnt = 0; 2742 cpi->max_target = ISP_MAX_TARGETS(isp) - 1; 2743 cpi->max_lun = 255; 2744 cpi->bus_id = cam_sim_bus(sim); 2745 cpi->maxio = (ISP_NSEG64_MAX - 1) * PAGE_SIZE; 2746 2747 fcp = FCPARAM(isp, bus); 2748 2749 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED; 2750 cpi->hba_misc |= PIM_EXTLUNS | PIM_NOSCAN; 2751 2752 /* 2753 * Because our loop ID can shift from time to time, 2754 * make our initiator ID out of range of our bus. 2755 */ 2756 cpi->initiator_id = cpi->max_target + 1; 2757 2758 /* 2759 * Set base transfer capabilities for Fibre Channel, for this HBA. 2760 */ 2761 if (IS_25XX(isp)) 2762 cpi->base_transfer_speed = 8000000; 2763 else 2764 cpi->base_transfer_speed = 4000000; 2765 cpi->hba_inquiry = PI_TAG_ABLE; 2766 cpi->transport = XPORT_FC; 2767 cpi->transport_version = 0; 2768 cpi->xport_specific.fc.wwnn = fcp->isp_wwnn; 2769 cpi->xport_specific.fc.wwpn = fcp->isp_wwpn; 2770 cpi->xport_specific.fc.port = fcp->isp_portid; 2771 cpi->xport_specific.fc.bitrate = fcp->isp_gbspeed * 1000; 2772 cpi->protocol = PROTO_SCSI; 2773 cpi->protocol_version = SCSI_REV_2; 2774 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 2775 strlcpy(cpi->hba_vid, "Qlogic", HBA_IDLEN); 2776 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 2777 cpi->unit_number = cam_sim_unit(sim); 2778 cpi->ccb_h.status = CAM_REQ_CMP; 2779 xpt_done(ccb); 2780 break; 2781 } 2782 default: 2783 ccb->ccb_h.status = CAM_REQ_INVALID; 2784 xpt_done(ccb); 2785 break; 2786 } 2787 } 2788 2789 void 2790 isp_done(XS_T *sccb) 2791 { 2792 ispsoftc_t *isp = XS_ISP(sccb); 2793 uint32_t status; 2794 2795 if (XS_NOERR(sccb)) 2796 XS_SETERR(sccb, CAM_REQ_CMP); 2797 2798 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (sccb->scsi_status != SCSI_STATUS_OK)) { 2799 sccb->ccb_h.status &= ~CAM_STATUS_MASK; 2800 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) { 2801 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; 2802 } else { 2803 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 2804 } 2805 } 2806 2807 sccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2808 status = sccb->ccb_h.status & CAM_STATUS_MASK; 2809 if (status != CAM_REQ_CMP && 2810 (sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 2811 sccb->ccb_h.status |= CAM_DEV_QFRZN; 2812 xpt_freeze_devq(sccb->ccb_h.path, 1); 2813 } 2814 2815 if (ISP_PCMD(sccb)) { 2816 if (callout_active(&PISP_PCMD(sccb)->wdog)) 2817 callout_stop(&PISP_PCMD(sccb)->wdog); 2818 isp_free_pcmd(isp, (union ccb *) sccb); 2819 } 2820 isp_rq_check_below(isp); 2821 xpt_done((union ccb *) sccb); 2822 } 2823 2824 void 2825 isp_async(ispsoftc_t *isp, ispasync_t cmd, ...) 2826 { 2827 int bus; 2828 static const char prom[] = "Chan %d [%d] WWPN 0x%16jx PortID 0x%06x handle 0x%x %s %s"; 2829 char buf[64]; 2830 char *msg = NULL; 2831 target_id_t tgt = 0; 2832 fcportdb_t *lp; 2833 struct isp_fc *fc; 2834 struct ac_contract ac; 2835 struct ac_device_changed *adc; 2836 va_list ap; 2837 2838 switch (cmd) { 2839 case ISPASYNC_LOOP_RESET: 2840 { 2841 uint16_t lipp; 2842 fcparam *fcp; 2843 va_start(ap, cmd); 2844 bus = va_arg(ap, int); 2845 va_end(ap); 2846 2847 lipp = ISP_READ(isp, OUTMAILBOX1); 2848 fcp = FCPARAM(isp, bus); 2849 2850 isp_prt(isp, ISP_LOGINFO, "Chan %d LOOP Reset, LIP primitive %x", bus, lipp); 2851 /* 2852 * Per FCP-4, a Reset LIP should result in a CRN reset. Other 2853 * LIPs and loop up/down events should never reset the CRN. For 2854 * an as of yet unknown reason, 24xx series cards (and 2855 * potentially others) can interrupt with a LIP Reset status 2856 * when no LIP reset came down the wire. Additionally, the LIP 2857 * primitive accompanying this status would not be a valid LIP 2858 * Reset primitive, but some variation of an invalid AL_PA 2859 * LIP. As a result, we have to verify the AL_PD in the LIP 2860 * addresses our port before blindly resetting. 2861 */ 2862 if (FCP_IS_DEST_ALPD(fcp, (lipp & 0x00FF))) 2863 isp_fcp_reset_crn(isp, bus, /*tgt*/0, /*tgt_set*/ 0); 2864 isp_loop_changed(isp, bus); 2865 break; 2866 } 2867 case ISPASYNC_LIP: 2868 if (msg == NULL) 2869 msg = "LIP Received"; 2870 /* FALLTHROUGH */ 2871 case ISPASYNC_LOOP_DOWN: 2872 if (msg == NULL) 2873 msg = "LOOP Down"; 2874 /* FALLTHROUGH */ 2875 case ISPASYNC_LOOP_UP: 2876 if (msg == NULL) 2877 msg = "LOOP Up"; 2878 va_start(ap, cmd); 2879 bus = va_arg(ap, int); 2880 va_end(ap); 2881 isp_loop_changed(isp, bus); 2882 isp_prt(isp, ISP_LOGINFO, "Chan %d %s", bus, msg); 2883 break; 2884 case ISPASYNC_DEV_ARRIVED: 2885 va_start(ap, cmd); 2886 bus = va_arg(ap, int); 2887 lp = va_arg(ap, fcportdb_t *); 2888 va_end(ap); 2889 fc = ISP_FC_PC(isp, bus); 2890 tgt = FC_PORTDB_TGT(isp, bus, lp); 2891 isp_gen_role_str(buf, sizeof (buf), lp->prli_word3); 2892 isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "arrived"); 2893 if ((FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) && 2894 (lp->prli_word3 & PRLI_WD3_TARGET_FUNCTION)) { 2895 lp->is_target = 1; 2896 isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1); 2897 isp_make_here(isp, lp, bus, tgt); 2898 } 2899 if ((FCPARAM(isp, bus)->role & ISP_ROLE_TARGET) && 2900 (lp->prli_word3 & PRLI_WD3_INITIATOR_FUNCTION)) { 2901 lp->is_initiator = 1; 2902 ac.contract_number = AC_CONTRACT_DEV_CHG; 2903 adc = (struct ac_device_changed *) ac.contract_data; 2904 adc->wwpn = lp->port_wwn; 2905 adc->port = lp->portid; 2906 adc->target = tgt; 2907 adc->arrived = 1; 2908 xpt_async(AC_CONTRACT, fc->path, &ac); 2909 } 2910 break; 2911 case ISPASYNC_DEV_CHANGED: 2912 case ISPASYNC_DEV_STAYED: 2913 { 2914 int crn_reset_done; 2915 2916 crn_reset_done = 0; 2917 va_start(ap, cmd); 2918 bus = va_arg(ap, int); 2919 lp = va_arg(ap, fcportdb_t *); 2920 va_end(ap); 2921 fc = ISP_FC_PC(isp, bus); 2922 tgt = FC_PORTDB_TGT(isp, bus, lp); 2923 isp_gen_role_str(buf, sizeof (buf), lp->new_prli_word3); 2924 if (cmd == ISPASYNC_DEV_CHANGED) 2925 isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->new_portid, lp->handle, buf, "changed"); 2926 else 2927 isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "stayed"); 2928 2929 if (lp->is_target != 2930 ((FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) && 2931 (lp->new_prli_word3 & PRLI_WD3_TARGET_FUNCTION))) { 2932 lp->is_target = !lp->is_target; 2933 if (lp->is_target) { 2934 if (cmd == ISPASYNC_DEV_CHANGED) { 2935 isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1); 2936 crn_reset_done = 1; 2937 } 2938 isp_make_here(isp, lp, bus, tgt); 2939 } else { 2940 isp_make_gone(isp, lp, bus, tgt); 2941 if (cmd == ISPASYNC_DEV_CHANGED) { 2942 isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1); 2943 crn_reset_done = 1; 2944 } 2945 } 2946 } 2947 if (lp->is_initiator != 2948 ((FCPARAM(isp, bus)->role & ISP_ROLE_TARGET) && 2949 (lp->new_prli_word3 & PRLI_WD3_INITIATOR_FUNCTION))) { 2950 lp->is_initiator = !lp->is_initiator; 2951 ac.contract_number = AC_CONTRACT_DEV_CHG; 2952 adc = (struct ac_device_changed *) ac.contract_data; 2953 adc->wwpn = lp->port_wwn; 2954 adc->port = lp->portid; 2955 adc->target = tgt; 2956 adc->arrived = lp->is_initiator; 2957 xpt_async(AC_CONTRACT, fc->path, &ac); 2958 } 2959 2960 if ((cmd == ISPASYNC_DEV_CHANGED) && 2961 (crn_reset_done == 0)) 2962 isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1); 2963 2964 break; 2965 } 2966 case ISPASYNC_DEV_GONE: 2967 va_start(ap, cmd); 2968 bus = va_arg(ap, int); 2969 lp = va_arg(ap, fcportdb_t *); 2970 va_end(ap); 2971 fc = ISP_FC_PC(isp, bus); 2972 tgt = FC_PORTDB_TGT(isp, bus, lp); 2973 /* 2974 * If this has a virtual target or initiator set the isp_gdt 2975 * timer running on it to delay its departure. 2976 */ 2977 isp_gen_role_str(buf, sizeof (buf), lp->prli_word3); 2978 if (lp->is_target || lp->is_initiator) { 2979 lp->state = FC_PORTDB_STATE_ZOMBIE; 2980 lp->gone_timer = fc->gone_device_time; 2981 isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "gone zombie"); 2982 if (fc->ready && !callout_active(&fc->gdt)) { 2983 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Starting Gone Device Timer with %u seconds time now %lu", bus, lp->gone_timer, (unsigned long)time_uptime); 2984 callout_reset(&fc->gdt, hz, isp_gdt, fc); 2985 } 2986 break; 2987 } 2988 isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "gone"); 2989 break; 2990 case ISPASYNC_CHANGE_NOTIFY: 2991 { 2992 char *msg; 2993 int evt, nphdl, nlstate, portid, reason; 2994 2995 va_start(ap, cmd); 2996 bus = va_arg(ap, int); 2997 evt = va_arg(ap, int); 2998 if (evt == ISPASYNC_CHANGE_PDB) { 2999 nphdl = va_arg(ap, int); 3000 nlstate = va_arg(ap, int); 3001 reason = va_arg(ap, int); 3002 } else if (evt == ISPASYNC_CHANGE_SNS) { 3003 portid = va_arg(ap, int); 3004 } else { 3005 nphdl = NIL_HANDLE; 3006 nlstate = reason = 0; 3007 } 3008 va_end(ap); 3009 3010 if (evt == ISPASYNC_CHANGE_PDB) { 3011 int tgt_set = 0; 3012 msg = "Port Database Changed"; 3013 isp_prt(isp, ISP_LOGINFO, 3014 "Chan %d %s (nphdl 0x%x state 0x%x reason 0x%x)", 3015 bus, msg, nphdl, nlstate, reason); 3016 /* 3017 * Port database syncs are not sufficient for 3018 * determining that logins or logouts are done on the 3019 * loop, but this information is directly available from 3020 * the reason code from the incoming mbox. We must reset 3021 * the fcp crn on these events according to FCP-4 3022 */ 3023 switch (reason) { 3024 case PDB24XX_AE_IMPL_LOGO_1: 3025 case PDB24XX_AE_IMPL_LOGO_2: 3026 case PDB24XX_AE_IMPL_LOGO_3: 3027 case PDB24XX_AE_PLOGI_RCVD: 3028 case PDB24XX_AE_PRLI_RCVD: 3029 case PDB24XX_AE_PRLO_RCVD: 3030 case PDB24XX_AE_LOGO_RCVD: 3031 case PDB24XX_AE_PLOGI_DONE: 3032 case PDB24XX_AE_PRLI_DONE: 3033 /* 3034 * If the event is not global, twiddle tgt and 3035 * tgt_set to nominate only the target 3036 * associated with the nphdl. 3037 */ 3038 if (nphdl != PDB24XX_AE_GLOBAL) { 3039 /* Break if we don't yet have the pdb */ 3040 if (!isp_find_pdb_by_handle(isp, bus, nphdl, &lp)) 3041 break; 3042 tgt = FC_PORTDB_TGT(isp, bus, lp); 3043 tgt_set = 1; 3044 } 3045 isp_fcp_reset_crn(isp, bus, tgt, tgt_set); 3046 break; 3047 default: 3048 break; /* NOP */ 3049 } 3050 } else if (evt == ISPASYNC_CHANGE_SNS) { 3051 msg = "Name Server Database Changed"; 3052 isp_prt(isp, ISP_LOGINFO, "Chan %d %s (PortID 0x%06x)", 3053 bus, msg, portid); 3054 } else { 3055 msg = "Other Change Notify"; 3056 isp_prt(isp, ISP_LOGINFO, "Chan %d %s", bus, msg); 3057 } 3058 isp_loop_changed(isp, bus); 3059 break; 3060 } 3061 #ifdef ISP_TARGET_MODE 3062 case ISPASYNC_TARGET_NOTIFY: 3063 { 3064 isp_notify_t *notify; 3065 va_start(ap, cmd); 3066 notify = va_arg(ap, isp_notify_t *); 3067 va_end(ap); 3068 switch (notify->nt_ncode) { 3069 case NT_ABORT_TASK: 3070 case NT_ABORT_TASK_SET: 3071 case NT_CLEAR_ACA: 3072 case NT_CLEAR_TASK_SET: 3073 case NT_LUN_RESET: 3074 case NT_TARGET_RESET: 3075 case NT_QUERY_TASK_SET: 3076 case NT_QUERY_ASYNC_EVENT: 3077 /* 3078 * These are task management functions. 3079 */ 3080 isp_handle_platform_target_tmf(isp, notify); 3081 break; 3082 case NT_LIP_RESET: 3083 case NT_LINK_UP: 3084 case NT_LINK_DOWN: 3085 case NT_HBA_RESET: 3086 /* 3087 * No action need be taken here. 3088 */ 3089 break; 3090 case NT_SRR: 3091 isp_handle_platform_srr(isp, notify); 3092 break; 3093 default: 3094 isp_prt(isp, ISP_LOGALL, "target notify code 0x%x", notify->nt_ncode); 3095 isp_handle_platform_target_notify_ack(isp, notify, 0); 3096 break; 3097 } 3098 break; 3099 } 3100 case ISPASYNC_TARGET_NOTIFY_ACK: 3101 { 3102 void *inot; 3103 va_start(ap, cmd); 3104 inot = va_arg(ap, void *); 3105 va_end(ap); 3106 if (isp_notify_ack(isp, inot)) { 3107 isp_tna_t *tp = malloc(sizeof (*tp), M_DEVBUF, M_NOWAIT); 3108 if (tp) { 3109 tp->isp = isp; 3110 memcpy(tp->data, inot, sizeof (tp->data)); 3111 tp->not = tp->data; 3112 callout_init_mtx(&tp->timer, &isp->isp_lock, 0); 3113 callout_reset(&tp->timer, 5, 3114 isp_refire_notify_ack, tp); 3115 } else { 3116 isp_prt(isp, ISP_LOGERR, "you lose- cannot allocate a notify refire"); 3117 } 3118 } 3119 break; 3120 } 3121 case ISPASYNC_TARGET_ACTION: 3122 { 3123 isphdr_t *hp; 3124 3125 va_start(ap, cmd); 3126 hp = va_arg(ap, isphdr_t *); 3127 va_end(ap); 3128 switch (hp->rqs_entry_type) { 3129 case RQSTYPE_ATIO: 3130 isp_handle_platform_atio7(isp, (at7_entry_t *)hp); 3131 break; 3132 case RQSTYPE_CTIO7: 3133 isp_handle_platform_ctio(isp, (ct7_entry_t *)hp); 3134 break; 3135 default: 3136 isp_prt(isp, ISP_LOGWARN, "%s: unhandled target action 0x%x", 3137 __func__, hp->rqs_entry_type); 3138 break; 3139 } 3140 break; 3141 } 3142 #endif 3143 case ISPASYNC_FW_CRASH: 3144 { 3145 uint16_t mbox1; 3146 mbox1 = ISP_READ(isp, OUTMAILBOX1); 3147 isp_prt(isp, ISP_LOGERR, "Internal Firmware Error @ RISC Address 0x%x", mbox1); 3148 #if 0 3149 isp_reinit(isp, 1); 3150 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL); 3151 #endif 3152 break; 3153 } 3154 default: 3155 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd); 3156 break; 3157 } 3158 } 3159 3160 uint64_t 3161 isp_default_wwn(ispsoftc_t * isp, int chan, int isactive, int iswwnn) 3162 { 3163 uint64_t seed; 3164 struct isp_fc *fc = ISP_FC_PC(isp, chan); 3165 3166 /* First try to use explicitly configured WWNs. */ 3167 seed = iswwnn ? fc->def_wwnn : fc->def_wwpn; 3168 if (seed) 3169 return (seed); 3170 3171 /* Otherwise try to use WWNs from NVRAM. */ 3172 if (isactive) { 3173 seed = iswwnn ? FCPARAM(isp, chan)->isp_wwnn_nvram : 3174 FCPARAM(isp, chan)->isp_wwpn_nvram; 3175 if (seed) 3176 return (seed); 3177 } 3178 3179 /* If still no WWNs, try to steal them from the first channel. */ 3180 if (chan > 0) { 3181 seed = iswwnn ? ISP_FC_PC(isp, 0)->def_wwnn : 3182 ISP_FC_PC(isp, 0)->def_wwpn; 3183 if (seed == 0) { 3184 seed = iswwnn ? FCPARAM(isp, 0)->isp_wwnn_nvram : 3185 FCPARAM(isp, 0)->isp_wwpn_nvram; 3186 } 3187 } 3188 3189 /* If still nothing -- improvise. */ 3190 if (seed == 0) { 3191 seed = 0x400000007F000000ull + device_get_unit(isp->isp_dev); 3192 if (!iswwnn) 3193 seed ^= 0x0100000000000000ULL; 3194 } 3195 3196 /* For additional channels we have to improvise even more. */ 3197 if (!iswwnn && chan > 0) { 3198 /* 3199 * We'll stick our channel number plus one first into bits 3200 * 57..59 and thence into bits 52..55 which allows for 8 bits 3201 * of channel which is enough for our maximum of 255 channels. 3202 */ 3203 seed ^= 0x0100000000000000ULL; 3204 seed ^= ((uint64_t) (chan + 1) & 0xf) << 56; 3205 seed ^= ((uint64_t) ((chan + 1) >> 4) & 0xf) << 52; 3206 } 3207 return (seed); 3208 } 3209 3210 void 3211 isp_prt(ispsoftc_t *isp, int level, const char *fmt, ...) 3212 { 3213 int loc; 3214 char lbuf[200]; 3215 va_list ap; 3216 3217 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { 3218 return; 3219 } 3220 snprintf(lbuf, sizeof (lbuf), "%s: ", device_get_nameunit(isp->isp_dev)); 3221 loc = strlen(lbuf); 3222 va_start(ap, fmt); 3223 vsnprintf(&lbuf[loc], sizeof (lbuf) - loc - 1, fmt, ap); 3224 va_end(ap); 3225 printf("%s\n", lbuf); 3226 } 3227 3228 void 3229 isp_xs_prt(ispsoftc_t *isp, XS_T *xs, int level, const char *fmt, ...) 3230 { 3231 va_list ap; 3232 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { 3233 return; 3234 } 3235 xpt_print_path(xs->ccb_h.path); 3236 va_start(ap, fmt); 3237 vprintf(fmt, ap); 3238 va_end(ap); 3239 printf("\n"); 3240 } 3241 3242 uint64_t 3243 isp_nanotime_sub(struct timespec *b, struct timespec *a) 3244 { 3245 uint64_t elapsed; 3246 struct timespec x; 3247 3248 timespecsub(b, a, &x); 3249 elapsed = GET_NANOSEC(&x); 3250 if (elapsed == 0) 3251 elapsed++; 3252 return (elapsed); 3253 } 3254 3255 int 3256 isp_fc_scratch_acquire(ispsoftc_t *isp, int chan) 3257 { 3258 struct isp_fc *fc = ISP_FC_PC(isp, chan); 3259 3260 if (fc->fcbsy) 3261 return (-1); 3262 fc->fcbsy = 1; 3263 return (0); 3264 } 3265 3266 void 3267 isp_platform_intr(void *arg) 3268 { 3269 ispsoftc_t *isp = arg; 3270 3271 ISP_LOCK(isp); 3272 ISP_RUN_ISR(isp); 3273 ISP_UNLOCK(isp); 3274 } 3275 3276 void 3277 isp_platform_intr_resp(void *arg) 3278 { 3279 ispsoftc_t *isp = arg; 3280 3281 ISP_LOCK(isp); 3282 isp_intr_respq(isp); 3283 ISP_UNLOCK(isp); 3284 3285 /* We have handshake enabled, so explicitly complete interrupt */ 3286 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); 3287 } 3288 3289 void 3290 isp_platform_intr_atio(void *arg) 3291 { 3292 ispsoftc_t *isp = arg; 3293 3294 ISP_LOCK(isp); 3295 #ifdef ISP_TARGET_MODE 3296 isp_intr_atioq(isp); 3297 #endif 3298 ISP_UNLOCK(isp); 3299 3300 /* We have handshake enabled, so explicitly complete interrupt */ 3301 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); 3302 } 3303 3304 typedef struct { 3305 ispsoftc_t *isp; 3306 struct ccb_scsiio *csio; 3307 void *qe; 3308 int error; 3309 } mush_t; 3310 3311 static void 3312 isp_dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 3313 { 3314 mush_t *mp = (mush_t *) arg; 3315 ispsoftc_t *isp= mp->isp; 3316 struct ccb_scsiio *csio = mp->csio; 3317 bus_dmasync_op_t op; 3318 3319 if (error) { 3320 mp->error = error; 3321 return; 3322 } 3323 if ((csio->ccb_h.func_code == XPT_CONT_TARGET_IO) ^ 3324 ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)) 3325 op = BUS_DMASYNC_PREREAD; 3326 else 3327 op = BUS_DMASYNC_PREWRITE; 3328 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, op); 3329 3330 mp->error = ISP_SEND_CMD(isp, mp->qe, dm_segs, nseg); 3331 if (mp->error) 3332 isp_dmafree(isp, csio); 3333 } 3334 3335 int 3336 isp_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *qe) 3337 { 3338 mush_t mp; 3339 int error; 3340 3341 if (XS_XFRLEN(csio)) { 3342 mp.isp = isp; 3343 mp.csio = csio; 3344 mp.qe = qe; 3345 mp.error = 0; 3346 error = bus_dmamap_load_ccb(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, 3347 (union ccb *)csio, isp_dma2, &mp, BUS_DMA_NOWAIT); 3348 if (error == 0) 3349 error = mp.error; 3350 } else { 3351 error = ISP_SEND_CMD(isp, qe, NULL, 0); 3352 } 3353 switch (error) { 3354 case 0: 3355 case CMD_COMPLETE: 3356 case CMD_EAGAIN: 3357 case CMD_RQLATER: 3358 break; 3359 case ENOMEM: 3360 error = CMD_EAGAIN; 3361 break; 3362 case EINVAL: 3363 case EFBIG: 3364 csio->ccb_h.status = CAM_REQ_INVALID; 3365 error = CMD_COMPLETE; 3366 break; 3367 default: 3368 csio->ccb_h.status = CAM_UNREC_HBA_ERROR; 3369 error = CMD_COMPLETE; 3370 break; 3371 } 3372 return (error); 3373 } 3374 3375 void 3376 isp_dmafree(ispsoftc_t *isp, struct ccb_scsiio *csio) 3377 { 3378 bus_dmasync_op_t op; 3379 3380 if (XS_XFRLEN(csio) == 0) 3381 return; 3382 3383 if ((csio->ccb_h.func_code == XPT_CONT_TARGET_IO) ^ 3384 ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)) 3385 op = BUS_DMASYNC_POSTREAD; 3386 else 3387 op = BUS_DMASYNC_POSTWRITE; 3388 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, op); 3389 bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap); 3390 } 3391 3392 /* 3393 * Reset the command reference number for all LUNs on a specific target 3394 * (needed when a target arrives again) or for all targets on a port 3395 * (needed for events like a LIP). 3396 */ 3397 void 3398 isp_fcp_reset_crn(ispsoftc_t *isp, int chan, uint32_t tgt, int tgt_set) 3399 { 3400 struct isp_fc *fc = ISP_FC_PC(isp, chan); 3401 struct isp_nexus *nxp; 3402 int i; 3403 3404 if (tgt_set == 0) 3405 isp_prt(isp, ISP_LOGDEBUG0, 3406 "Chan %d resetting CRN on all targets", chan); 3407 else 3408 isp_prt(isp, ISP_LOGDEBUG0, 3409 "Chan %d resetting CRN on target %u", chan, tgt); 3410 3411 for (i = 0; i < NEXUS_HASH_WIDTH; i++) { 3412 for (nxp = fc->nexus_hash[i]; nxp != NULL; nxp = nxp->next) { 3413 if (tgt_set == 0 || tgt == nxp->tgt) 3414 nxp->crnseed = 0; 3415 } 3416 } 3417 } 3418 3419 int 3420 isp_fcp_next_crn(ispsoftc_t *isp, uint8_t *crnp, XS_T *cmd) 3421 { 3422 lun_id_t lun; 3423 uint32_t chan, tgt; 3424 struct isp_fc *fc; 3425 struct isp_nexus *nxp; 3426 int idx; 3427 3428 chan = XS_CHANNEL(cmd); 3429 tgt = XS_TGT(cmd); 3430 lun = XS_LUN(cmd); 3431 fc = ISP_FC_PC(isp, chan); 3432 idx = NEXUS_HASH(tgt, lun); 3433 nxp = fc->nexus_hash[idx]; 3434 3435 while (nxp) { 3436 if (nxp->tgt == tgt && nxp->lun == lun) 3437 break; 3438 nxp = nxp->next; 3439 } 3440 if (nxp == NULL) { 3441 nxp = fc->nexus_free_list; 3442 if (nxp == NULL) { 3443 nxp = malloc(sizeof (struct isp_nexus), M_DEVBUF, M_ZERO|M_NOWAIT); 3444 if (nxp == NULL) { 3445 return (-1); 3446 } 3447 } else { 3448 fc->nexus_free_list = nxp->next; 3449 } 3450 nxp->tgt = tgt; 3451 nxp->lun = lun; 3452 nxp->next = fc->nexus_hash[idx]; 3453 fc->nexus_hash[idx] = nxp; 3454 } 3455 if (nxp->crnseed == 0) 3456 nxp->crnseed = 1; 3457 *crnp = nxp->crnseed++; 3458 return (0); 3459 } 3460 3461 /* 3462 * We enter with the lock held 3463 */ 3464 void 3465 isp_timer(void *arg) 3466 { 3467 ispsoftc_t *isp = arg; 3468 #ifdef ISP_TARGET_MODE 3469 isp_tmcmd_restart(isp); 3470 #endif 3471 callout_reset(&isp->isp_osinfo.tmo, isp_timer_count, isp_timer, isp); 3472 } 3473 3474 #ifdef ISP_TARGET_MODE 3475 isp_ecmd_t * 3476 isp_get_ecmd(ispsoftc_t *isp) 3477 { 3478 isp_ecmd_t *ecmd = isp->isp_osinfo.ecmd_free; 3479 if (ecmd) { 3480 isp->isp_osinfo.ecmd_free = ecmd->next; 3481 } 3482 return (ecmd); 3483 } 3484 3485 void 3486 isp_put_ecmd(ispsoftc_t *isp, isp_ecmd_t *ecmd) 3487 { 3488 ecmd->next = isp->isp_osinfo.ecmd_free; 3489 isp->isp_osinfo.ecmd_free = ecmd; 3490 } 3491 #endif 3492