1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2009-2020 Alexander Motin <mav@FreeBSD.org> 5 * Copyright (c) 1997-2009 by Matthew Jacob 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice immediately at the beginning of the file, without modification, 13 * this list of conditions, and the following disclaimer. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* 31 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters. 32 */ 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <dev/isp/isp_freebsd.h> 37 #include <sys/unistd.h> 38 #include <sys/kthread.h> 39 #include <sys/conf.h> 40 #include <sys/module.h> 41 #include <sys/ioccom.h> 42 #include <dev/isp/isp_ioctl.h> 43 #include <sys/devicestat.h> 44 #include <cam/cam_periph.h> 45 #include <cam/cam_xpt_periph.h> 46 47 MODULE_VERSION(isp, 1); 48 MODULE_DEPEND(isp, cam, 1, 1, 1); 49 int isp_announced = 0; 50 int isp_loop_down_limit = 60; /* default loop down limit */ 51 int isp_quickboot_time = 7; /* don't wait more than N secs for loop up */ 52 int isp_gone_device_time = 30; /* grace time before reporting device lost */ 53 static const char prom3[] = "Chan %d [%u] PortID 0x%06x Departed because of %s"; 54 55 static void isp_freeze_loopdown(ispsoftc_t *, int); 56 static void isp_loop_changed(ispsoftc_t *isp, int chan); 57 static void isp_rq_check_above(ispsoftc_t *); 58 static void isp_rq_check_below(ispsoftc_t *); 59 static d_ioctl_t ispioctl; 60 static void isp_poll(struct cam_sim *); 61 static callout_func_t isp_watchdog; 62 static callout_func_t isp_gdt; 63 static task_fn_t isp_gdt_task; 64 static void isp_kthread(void *); 65 static void isp_action(struct cam_sim *, union ccb *); 66 static int isp_timer_count; 67 static void isp_timer(void *); 68 69 static struct cdevsw isp_cdevsw = { 70 .d_version = D_VERSION, 71 .d_ioctl = ispioctl, 72 .d_name = "isp", 73 }; 74 75 static int 76 isp_role_sysctl(SYSCTL_HANDLER_ARGS) 77 { 78 ispsoftc_t *isp = (ispsoftc_t *)arg1; 79 int chan = arg2; 80 int error, old, value; 81 82 value = FCPARAM(isp, chan)->role; 83 84 error = sysctl_handle_int(oidp, &value, 0, req); 85 if ((error != 0) || (req->newptr == NULL)) 86 return (error); 87 88 if (value < ISP_ROLE_NONE || value > ISP_ROLE_BOTH) 89 return (EINVAL); 90 91 ISP_LOCK(isp); 92 old = FCPARAM(isp, chan)->role; 93 94 /* We don't allow target mode switch from here. */ 95 value = (old & ISP_ROLE_TARGET) | (value & ISP_ROLE_INITIATOR); 96 97 /* If nothing has changed -- we are done. */ 98 if (value == old) { 99 ISP_UNLOCK(isp); 100 return (0); 101 } 102 103 /* Actually change the role. */ 104 error = isp_control(isp, ISPCTL_CHANGE_ROLE, chan, value); 105 ISP_UNLOCK(isp); 106 return (error); 107 } 108 109 static int 110 isp_attach_chan(ispsoftc_t *isp, struct cam_devq *devq, int chan) 111 { 112 fcparam *fcp = FCPARAM(isp, chan); 113 struct isp_fc *fc = ISP_FC_PC(isp, chan); 114 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(isp->isp_osinfo.dev); 115 struct sysctl_oid *tree = device_get_sysctl_tree(isp->isp_osinfo.dev); 116 char name[16]; 117 struct cam_sim *sim; 118 struct cam_path *path; 119 #ifdef ISP_TARGET_MODE 120 int i; 121 #endif 122 123 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 124 device_get_unit(isp->isp_dev), &isp->isp_lock, 125 isp->isp_maxcmds, isp->isp_maxcmds, devq); 126 if (sim == NULL) 127 return (ENOMEM); 128 129 if (xpt_bus_register(sim, isp->isp_dev, chan) != CAM_SUCCESS) { 130 cam_sim_free(sim, FALSE); 131 return (EIO); 132 } 133 if (xpt_create_path(&path, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 134 xpt_bus_deregister(cam_sim_path(sim)); 135 cam_sim_free(sim, FALSE); 136 return (ENXIO); 137 } 138 139 ISP_LOCK(isp); 140 fc->sim = sim; 141 fc->path = path; 142 fc->isp = isp; 143 fc->ready = 1; 144 fcp->isp_use_gft_id = 1; 145 fcp->isp_use_gff_id = 1; 146 147 callout_init_mtx(&fc->gdt, &isp->isp_lock, 0); 148 TASK_INIT(&fc->gtask, 1, isp_gdt_task, fc); 149 #ifdef ISP_TARGET_MODE 150 TAILQ_INIT(&fc->waitq); 151 STAILQ_INIT(&fc->ntfree); 152 for (i = 0; i < ATPDPSIZE; i++) 153 STAILQ_INSERT_TAIL(&fc->ntfree, &fc->ntpool[i], next); 154 LIST_INIT(&fc->atfree); 155 for (i = ATPDPSIZE-1; i >= 0; i--) 156 LIST_INSERT_HEAD(&fc->atfree, &fc->atpool[i], next); 157 for (i = 0; i < ATPDPHASHSIZE; i++) 158 LIST_INIT(&fc->atused[i]); 159 #endif 160 isp_loop_changed(isp, chan); 161 ISP_UNLOCK(isp); 162 if (kproc_create(isp_kthread, fc, &fc->kproc, 0, 0, 163 "%s_%d", device_get_nameunit(isp->isp_osinfo.dev), chan)) { 164 xpt_free_path(fc->path); 165 xpt_bus_deregister(cam_sim_path(fc->sim)); 166 cam_sim_free(fc->sim, FALSE); 167 return (ENOMEM); 168 } 169 fc->num_threads += 1; 170 if (chan > 0) { 171 snprintf(name, sizeof(name), "chan%d", chan); 172 tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree), 173 OID_AUTO, name, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 174 "Virtual channel"); 175 } 176 SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 177 "wwnn", CTLFLAG_RD, &fcp->isp_wwnn, 178 "World Wide Node Name"); 179 SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 180 "wwpn", CTLFLAG_RD, &fcp->isp_wwpn, 181 "World Wide Port Name"); 182 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 183 "loop_down_limit", CTLFLAG_RW, &fc->loop_down_limit, 0, 184 "Loop Down Limit"); 185 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 186 "gone_device_time", CTLFLAG_RW, &fc->gone_device_time, 0, 187 "Gone Device Time"); 188 #if defined(ISP_TARGET_MODE) && defined(DEBUG) 189 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 190 "inject_lost_data_frame", CTLFLAG_RW, &fc->inject_lost_data_frame, 0, 191 "Cause a Lost Frame on a Read"); 192 #endif 193 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 194 "role", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 195 isp, chan, isp_role_sysctl, "I", "Current role"); 196 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 197 "speed", CTLFLAG_RD, &fcp->isp_gbspeed, 0, 198 "Connection speed in gigabits"); 199 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 200 "linkstate", CTLFLAG_RD, &fcp->isp_linkstate, 0, 201 "Link state"); 202 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 203 "fwstate", CTLFLAG_RD, &fcp->isp_fwstate, 0, 204 "Firmware state"); 205 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 206 "loopstate", CTLFLAG_RD, &fcp->isp_loopstate, 0, 207 "Loop state"); 208 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 209 "topo", CTLFLAG_RD, &fcp->isp_topo, 0, 210 "Connection topology"); 211 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 212 "use_gft_id", CTLFLAG_RWTUN, &fcp->isp_use_gft_id, 0, 213 "Use GFT_ID during fabric scan"); 214 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 215 "use_gff_id", CTLFLAG_RWTUN, &fcp->isp_use_gff_id, 0, 216 "Use GFF_ID during fabric scan"); 217 return (0); 218 } 219 220 static void 221 isp_detach_chan(ispsoftc_t *isp, int chan) 222 { 223 struct isp_fc *fc = ISP_FC_PC(isp, chan); 224 225 xpt_free_path(fc->path); 226 xpt_bus_deregister(cam_sim_path(fc->sim)); 227 cam_sim_free(fc->sim, FALSE); 228 229 /* Wait for the channel's spawned threads to exit. */ 230 wakeup(fc); 231 while (fc->num_threads != 0) 232 mtx_sleep(&fc->num_threads, &isp->isp_lock, PRIBIO, "isp_reap", 0); 233 } 234 235 int 236 isp_attach(ispsoftc_t *isp) 237 { 238 const char *nu = device_get_nameunit(isp->isp_osinfo.dev); 239 int du = device_get_unit(isp->isp_dev); 240 int chan; 241 242 /* 243 * Create the device queue for our SIM(s). 244 */ 245 isp->isp_osinfo.devq = cam_simq_alloc(isp->isp_maxcmds); 246 if (isp->isp_osinfo.devq == NULL) { 247 return (EIO); 248 } 249 250 for (chan = 0; chan < isp->isp_nchan; chan++) { 251 if (isp_attach_chan(isp, isp->isp_osinfo.devq, chan)) { 252 goto unwind; 253 } 254 } 255 256 callout_init_mtx(&isp->isp_osinfo.tmo, &isp->isp_lock, 0); 257 isp_timer_count = hz >> 2; 258 callout_reset(&isp->isp_osinfo.tmo, isp_timer_count, isp_timer, isp); 259 260 isp->isp_osinfo.cdev = make_dev(&isp_cdevsw, du, UID_ROOT, GID_OPERATOR, 0600, "%s", nu); 261 if (isp->isp_osinfo.cdev) { 262 isp->isp_osinfo.cdev->si_drv1 = isp; 263 } 264 return (0); 265 266 unwind: 267 ISP_LOCK(isp); 268 isp->isp_osinfo.is_exiting = 1; 269 while (--chan >= 0) 270 isp_detach_chan(isp, chan); 271 ISP_UNLOCK(isp); 272 cam_simq_free(isp->isp_osinfo.devq); 273 isp->isp_osinfo.devq = NULL; 274 return (-1); 275 } 276 277 int 278 isp_detach(ispsoftc_t *isp) 279 { 280 int chan; 281 282 if (isp->isp_osinfo.cdev) { 283 destroy_dev(isp->isp_osinfo.cdev); 284 isp->isp_osinfo.cdev = NULL; 285 } 286 ISP_LOCK(isp); 287 /* Tell spawned threads that we're exiting. */ 288 isp->isp_osinfo.is_exiting = 1; 289 for (chan = isp->isp_nchan - 1; chan >= 0; chan -= 1) 290 isp_detach_chan(isp, chan); 291 ISP_UNLOCK(isp); 292 callout_drain(&isp->isp_osinfo.tmo); 293 cam_simq_free(isp->isp_osinfo.devq); 294 return (0); 295 } 296 297 static void 298 isp_freeze_loopdown(ispsoftc_t *isp, int chan) 299 { 300 struct isp_fc *fc = ISP_FC_PC(isp, chan); 301 302 if (fc->sim == NULL) 303 return; 304 if (fc->simqfrozen == 0) { 305 isp_prt(isp, ISP_LOGDEBUG0, 306 "Chan %d Freeze simq (loopdown)", chan); 307 fc->simqfrozen = SIMQFRZ_LOOPDOWN; 308 xpt_hold_boot(); 309 xpt_freeze_simq(fc->sim, 1); 310 } else { 311 isp_prt(isp, ISP_LOGDEBUG0, 312 "Chan %d Mark simq frozen (loopdown)", chan); 313 fc->simqfrozen |= SIMQFRZ_LOOPDOWN; 314 } 315 } 316 317 static void 318 isp_unfreeze_loopdown(ispsoftc_t *isp, int chan) 319 { 320 struct isp_fc *fc = ISP_FC_PC(isp, chan); 321 322 if (fc->sim == NULL) 323 return; 324 int wasfrozen = fc->simqfrozen & SIMQFRZ_LOOPDOWN; 325 fc->simqfrozen &= ~SIMQFRZ_LOOPDOWN; 326 if (wasfrozen && fc->simqfrozen == 0) { 327 isp_prt(isp, ISP_LOGDEBUG0, 328 "Chan %d Release simq", chan); 329 xpt_release_simq(fc->sim, 1); 330 xpt_release_boot(); 331 } 332 } 333 334 /* 335 * Functions to protect from request queue overflow by freezing SIM queue. 336 * XXX: freezing only one arbitrary SIM, since they all share the queue. 337 */ 338 static void 339 isp_rq_check_above(ispsoftc_t *isp) 340 { 341 struct isp_fc *fc = ISP_FC_PC(isp, 0); 342 343 if (isp->isp_rqovf || fc->sim == NULL) 344 return; 345 if (!isp_rqentry_avail(isp, QENTRY_MAX)) { 346 xpt_freeze_simq(fc->sim, 1); 347 isp->isp_rqovf = 1; 348 } 349 } 350 351 static void 352 isp_rq_check_below(ispsoftc_t *isp) 353 { 354 struct isp_fc *fc = ISP_FC_PC(isp, 0); 355 356 if (!isp->isp_rqovf || fc->sim == NULL) 357 return; 358 if (isp_rqentry_avail(isp, QENTRY_MAX)) { 359 xpt_release_simq(fc->sim, 0); 360 isp->isp_rqovf = 0; 361 } 362 } 363 364 static int 365 ispioctl(struct cdev *dev, u_long c, caddr_t addr, int flags, struct thread *td) 366 { 367 ispsoftc_t *isp; 368 int nr, chan, retval = ENOTTY; 369 370 isp = dev->si_drv1; 371 372 switch (c) { 373 case ISP_SDBLEV: 374 { 375 int olddblev = isp->isp_dblev; 376 isp->isp_dblev = *(int *)addr; 377 *(int *)addr = olddblev; 378 retval = 0; 379 break; 380 } 381 case ISP_GETROLE: 382 chan = *(int *)addr; 383 if (chan < 0 || chan >= isp->isp_nchan) { 384 retval = -ENXIO; 385 break; 386 } 387 *(int *)addr = FCPARAM(isp, chan)->role; 388 retval = 0; 389 break; 390 case ISP_SETROLE: 391 nr = *(int *)addr; 392 chan = nr >> 8; 393 if (chan < 0 || chan >= isp->isp_nchan) { 394 retval = -ENXIO; 395 break; 396 } 397 nr &= 0xff; 398 if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) { 399 retval = EINVAL; 400 break; 401 } 402 ISP_LOCK(isp); 403 *(int *)addr = FCPARAM(isp, chan)->role; 404 retval = isp_control(isp, ISPCTL_CHANGE_ROLE, chan, nr); 405 ISP_UNLOCK(isp); 406 break; 407 408 case ISP_RESETHBA: 409 ISP_LOCK(isp); 410 isp_reinit(isp, 0); 411 ISP_UNLOCK(isp); 412 retval = 0; 413 break; 414 415 case ISP_RESCAN: 416 chan = *(intptr_t *)addr; 417 if (chan < 0 || chan >= isp->isp_nchan) { 418 retval = -ENXIO; 419 break; 420 } 421 ISP_LOCK(isp); 422 if (isp_fc_runstate(isp, chan, 5 * 1000000) != LOOP_READY) { 423 retval = EIO; 424 } else { 425 retval = 0; 426 } 427 ISP_UNLOCK(isp); 428 break; 429 430 case ISP_FC_LIP: 431 chan = *(intptr_t *)addr; 432 if (chan < 0 || chan >= isp->isp_nchan) { 433 retval = -ENXIO; 434 break; 435 } 436 ISP_LOCK(isp); 437 if (isp_control(isp, ISPCTL_SEND_LIP, chan)) { 438 retval = EIO; 439 } else { 440 retval = 0; 441 } 442 ISP_UNLOCK(isp); 443 break; 444 case ISP_FC_GETDINFO: 445 { 446 struct isp_fc_device *ifc = (struct isp_fc_device *) addr; 447 fcportdb_t *lp; 448 449 if (ifc->loopid >= MAX_FC_TARG) { 450 retval = EINVAL; 451 break; 452 } 453 lp = &FCPARAM(isp, ifc->chan)->portdb[ifc->loopid]; 454 if (lp->state != FC_PORTDB_STATE_NIL) { 455 ifc->role = (lp->prli_word3 & SVC3_ROLE_MASK) >> SVC3_ROLE_SHIFT; 456 ifc->loopid = lp->handle; 457 ifc->portid = lp->portid; 458 ifc->node_wwn = lp->node_wwn; 459 ifc->port_wwn = lp->port_wwn; 460 retval = 0; 461 } else { 462 retval = ENODEV; 463 } 464 break; 465 } 466 case ISP_FC_GETHINFO: 467 { 468 struct isp_hba_device *hba = (struct isp_hba_device *) addr; 469 int chan = hba->fc_channel; 470 471 if (chan < 0 || chan >= isp->isp_nchan) { 472 retval = ENXIO; 473 break; 474 } 475 hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev); 476 hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev); 477 hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev); 478 hba->fc_nchannels = isp->isp_nchan; 479 hba->fc_nports = MAX_FC_TARG; 480 hba->fc_speed = FCPARAM(isp, hba->fc_channel)->isp_gbspeed; 481 hba->fc_topology = FCPARAM(isp, chan)->isp_topo + 1; 482 hba->fc_loopid = FCPARAM(isp, chan)->isp_loopid; 483 hba->nvram_node_wwn = FCPARAM(isp, chan)->isp_wwnn_nvram; 484 hba->nvram_port_wwn = FCPARAM(isp, chan)->isp_wwpn_nvram; 485 hba->active_node_wwn = FCPARAM(isp, chan)->isp_wwnn; 486 hba->active_port_wwn = FCPARAM(isp, chan)->isp_wwpn; 487 retval = 0; 488 break; 489 } 490 case ISP_TSK_MGMT: 491 { 492 int needmarker; 493 struct isp_fc_tsk_mgmt *fct = (struct isp_fc_tsk_mgmt *) addr; 494 uint16_t nphdl; 495 void *reqp; 496 uint8_t resp[QENTRY_LEN]; 497 isp24xx_tmf_t tmf; 498 isp24xx_statusreq_t sp; 499 fcparam *fcp; 500 fcportdb_t *lp; 501 int i; 502 503 chan = fct->chan; 504 if (chan < 0 || chan >= isp->isp_nchan) { 505 retval = -ENXIO; 506 break; 507 } 508 509 needmarker = retval = 0; 510 nphdl = fct->loopid; 511 ISP_LOCK(isp); 512 fcp = FCPARAM(isp, chan); 513 514 for (i = 0; i < MAX_FC_TARG; i++) { 515 lp = &fcp->portdb[i]; 516 if (lp->handle == nphdl) { 517 break; 518 } 519 } 520 if (i == MAX_FC_TARG) { 521 retval = ENXIO; 522 ISP_UNLOCK(isp); 523 break; 524 } 525 ISP_MEMZERO(&tmf, sizeof(tmf)); 526 tmf.tmf_header.rqs_entry_type = RQSTYPE_TSK_MGMT; 527 tmf.tmf_header.rqs_entry_count = 1; 528 tmf.tmf_nphdl = lp->handle; 529 tmf.tmf_delay = 2; 530 tmf.tmf_timeout = 4; 531 tmf.tmf_tidlo = lp->portid; 532 tmf.tmf_tidhi = lp->portid >> 16; 533 tmf.tmf_vpidx = ISP_GET_VPIDX(isp, chan); 534 tmf.tmf_lun[1] = fct->lun & 0xff; 535 if (fct->lun >= 256) { 536 tmf.tmf_lun[0] = 0x40 | (fct->lun >> 8); 537 } 538 switch (fct->action) { 539 case IPT_CLEAR_ACA: 540 tmf.tmf_flags = ISP24XX_TMF_CLEAR_ACA; 541 break; 542 case IPT_TARGET_RESET: 543 tmf.tmf_flags = ISP24XX_TMF_TARGET_RESET; 544 needmarker = 1; 545 break; 546 case IPT_LUN_RESET: 547 tmf.tmf_flags = ISP24XX_TMF_LUN_RESET; 548 needmarker = 1; 549 break; 550 case IPT_CLEAR_TASK_SET: 551 tmf.tmf_flags = ISP24XX_TMF_CLEAR_TASK_SET; 552 needmarker = 1; 553 break; 554 case IPT_ABORT_TASK_SET: 555 tmf.tmf_flags = ISP24XX_TMF_ABORT_TASK_SET; 556 needmarker = 1; 557 break; 558 default: 559 retval = EINVAL; 560 break; 561 } 562 if (retval) { 563 ISP_UNLOCK(isp); 564 break; 565 } 566 567 /* Prepare space for response in memory */ 568 memset(resp, 0xff, sizeof(resp)); 569 tmf.tmf_handle = isp_allocate_handle(isp, resp, 570 ISP_HANDLE_CTRL); 571 if (tmf.tmf_handle == 0) { 572 isp_prt(isp, ISP_LOGERR, 573 "%s: TMF of Chan %d out of handles", 574 __func__, chan); 575 ISP_UNLOCK(isp); 576 retval = ENOMEM; 577 break; 578 } 579 580 /* Send request and wait for response. */ 581 reqp = isp_getrqentry(isp); 582 if (reqp == NULL) { 583 isp_prt(isp, ISP_LOGERR, 584 "%s: TMF of Chan %d out of rqent", 585 __func__, chan); 586 isp_destroy_handle(isp, tmf.tmf_handle); 587 ISP_UNLOCK(isp); 588 retval = EIO; 589 break; 590 } 591 isp_put_24xx_tmf(isp, &tmf, (isp24xx_tmf_t *)reqp); 592 if (isp->isp_dblev & ISP_LOGDEBUG1) 593 isp_print_bytes(isp, "IOCB TMF", QENTRY_LEN, reqp); 594 ISP_SYNC_REQUEST(isp); 595 if (msleep(resp, &isp->isp_lock, 0, "TMF", 5*hz) == EWOULDBLOCK) { 596 isp_prt(isp, ISP_LOGERR, 597 "%s: TMF of Chan %d timed out", 598 __func__, chan); 599 isp_destroy_handle(isp, tmf.tmf_handle); 600 ISP_UNLOCK(isp); 601 retval = EIO; 602 break; 603 } 604 if (isp->isp_dblev & ISP_LOGDEBUG1) 605 isp_print_bytes(isp, "IOCB TMF response", QENTRY_LEN, resp); 606 isp_get_24xx_response(isp, (isp24xx_statusreq_t *)resp, &sp); 607 608 if (sp.req_completion_status != 0) 609 retval = EIO; 610 else if (needmarker) 611 fcp->sendmarker = 1; 612 ISP_UNLOCK(isp); 613 break; 614 } 615 default: 616 break; 617 } 618 return (retval); 619 } 620 621 /* 622 * Local Inlines 623 */ 624 625 static ISP_INLINE int isp_get_pcmd(ispsoftc_t *, union ccb *); 626 static ISP_INLINE void isp_free_pcmd(ispsoftc_t *, union ccb *); 627 628 static ISP_INLINE int 629 isp_get_pcmd(ispsoftc_t *isp, union ccb *ccb) 630 { 631 ISP_PCMD(ccb) = isp->isp_osinfo.pcmd_free; 632 if (ISP_PCMD(ccb) == NULL) { 633 return (-1); 634 } 635 isp->isp_osinfo.pcmd_free = ((struct isp_pcmd *)ISP_PCMD(ccb))->next; 636 return (0); 637 } 638 639 static ISP_INLINE void 640 isp_free_pcmd(ispsoftc_t *isp, union ccb *ccb) 641 { 642 if (ISP_PCMD(ccb)) { 643 #ifdef ISP_TARGET_MODE 644 PISP_PCMD(ccb)->datalen = 0; 645 #endif 646 PISP_PCMD(ccb)->next = isp->isp_osinfo.pcmd_free; 647 isp->isp_osinfo.pcmd_free = ISP_PCMD(ccb); 648 ISP_PCMD(ccb) = NULL; 649 } 650 } 651 652 /* 653 * Put the target mode functions here, because some are inlines 654 */ 655 #ifdef ISP_TARGET_MODE 656 static ISP_INLINE tstate_t *get_lun_statep(ispsoftc_t *, int, lun_id_t); 657 static atio_private_data_t *isp_get_atpd(ispsoftc_t *, int, uint32_t); 658 static atio_private_data_t *isp_find_atpd(ispsoftc_t *, int, uint32_t); 659 static void isp_put_atpd(ispsoftc_t *, int, atio_private_data_t *); 660 static inot_private_data_t *isp_get_ntpd(ispsoftc_t *, int); 661 static inot_private_data_t *isp_find_ntpd(ispsoftc_t *, int, uint32_t, uint32_t); 662 static void isp_put_ntpd(ispsoftc_t *, int, inot_private_data_t *); 663 static tstate_t *create_lun_state(ispsoftc_t *, int, struct cam_path *); 664 static void destroy_lun_state(ispsoftc_t *, int, tstate_t *); 665 static void isp_enable_lun(ispsoftc_t *, union ccb *); 666 static void isp_disable_lun(ispsoftc_t *, union ccb *); 667 static callout_func_t isp_refire_notify_ack; 668 static void isp_complete_ctio(ispsoftc_t *isp, union ccb *); 669 enum Start_Ctio_How { FROM_CAM, FROM_TIMER, FROM_SRR, FROM_CTIO_DONE }; 670 static void isp_target_start_ctio(ispsoftc_t *, union ccb *, enum Start_Ctio_How); 671 static void isp_handle_platform_atio7(ispsoftc_t *, at7_entry_t *); 672 static void isp_handle_platform_ctio(ispsoftc_t *, ct7_entry_t *); 673 static int isp_handle_platform_target_notify_ack(ispsoftc_t *, isp_notify_t *, uint32_t rsp); 674 static void isp_handle_platform_target_tmf(ispsoftc_t *, isp_notify_t *); 675 static void isp_target_mark_aborted_early(ispsoftc_t *, int chan, tstate_t *, uint32_t); 676 677 static ISP_INLINE tstate_t * 678 get_lun_statep(ispsoftc_t *isp, int bus, lun_id_t lun) 679 { 680 struct isp_fc *fc = ISP_FC_PC(isp, bus); 681 tstate_t *tptr; 682 683 SLIST_FOREACH(tptr, &fc->lun_hash[LUN_HASH_FUNC(lun)], next) { 684 if (tptr->ts_lun == lun) 685 return (tptr); 686 } 687 return (NULL); 688 } 689 690 static int 691 isp_atio_restart(ispsoftc_t *isp, int bus, tstate_t *tptr) 692 { 693 inot_private_data_t *ntp; 694 struct ntpdlist rq; 695 696 if (STAILQ_EMPTY(&tptr->restart_queue)) 697 return (0); 698 STAILQ_INIT(&rq); 699 STAILQ_CONCAT(&rq, &tptr->restart_queue); 700 while ((ntp = STAILQ_FIRST(&rq)) != NULL) { 701 STAILQ_REMOVE_HEAD(&rq, next); 702 isp_prt(isp, ISP_LOGTDEBUG0, 703 "%s: restarting resrc deprived %x", __func__, 704 ((at7_entry_t *)ntp->data)->at_rxid); 705 isp_handle_platform_atio7(isp, (at7_entry_t *) ntp->data); 706 isp_put_ntpd(isp, bus, ntp); 707 if (!STAILQ_EMPTY(&tptr->restart_queue)) 708 break; 709 } 710 if (!STAILQ_EMPTY(&rq)) { 711 STAILQ_CONCAT(&rq, &tptr->restart_queue); 712 STAILQ_CONCAT(&tptr->restart_queue, &rq); 713 } 714 return (!STAILQ_EMPTY(&tptr->restart_queue)); 715 } 716 717 static void 718 isp_tmcmd_restart(ispsoftc_t *isp) 719 { 720 struct isp_fc *fc; 721 tstate_t *tptr; 722 union ccb *ccb; 723 int bus, i; 724 725 for (bus = 0; bus < isp->isp_nchan; bus++) { 726 fc = ISP_FC_PC(isp, bus); 727 for (i = 0; i < LUN_HASH_SIZE; i++) { 728 SLIST_FOREACH(tptr, &fc->lun_hash[i], next) 729 isp_atio_restart(isp, bus, tptr); 730 } 731 732 /* 733 * We only need to do this once per channel. 734 */ 735 ccb = (union ccb *)TAILQ_FIRST(&fc->waitq); 736 if (ccb != NULL) { 737 TAILQ_REMOVE(&fc->waitq, &ccb->ccb_h, sim_links.tqe); 738 isp_target_start_ctio(isp, ccb, FROM_TIMER); 739 } 740 } 741 isp_rq_check_above(isp); 742 isp_rq_check_below(isp); 743 } 744 745 static atio_private_data_t * 746 isp_get_atpd(ispsoftc_t *isp, int chan, uint32_t tag) 747 { 748 struct isp_fc *fc = ISP_FC_PC(isp, chan); 749 atio_private_data_t *atp; 750 751 atp = LIST_FIRST(&fc->atfree); 752 if (atp) { 753 LIST_REMOVE(atp, next); 754 atp->tag = tag; 755 LIST_INSERT_HEAD(&fc->atused[ATPDPHASH(tag)], atp, next); 756 } 757 return (atp); 758 } 759 760 static atio_private_data_t * 761 isp_find_atpd(ispsoftc_t *isp, int chan, uint32_t tag) 762 { 763 struct isp_fc *fc = ISP_FC_PC(isp, chan); 764 atio_private_data_t *atp; 765 766 LIST_FOREACH(atp, &fc->atused[ATPDPHASH(tag)], next) { 767 if (atp->tag == tag) 768 return (atp); 769 } 770 return (NULL); 771 } 772 773 static void 774 isp_put_atpd(ispsoftc_t *isp, int chan, atio_private_data_t *atp) 775 { 776 struct isp_fc *fc = ISP_FC_PC(isp, chan); 777 778 if (atp->ests) 779 isp_put_ecmd(isp, atp->ests); 780 LIST_REMOVE(atp, next); 781 memset(atp, 0, sizeof (*atp)); 782 LIST_INSERT_HEAD(&fc->atfree, atp, next); 783 } 784 785 static void 786 isp_dump_atpd(ispsoftc_t *isp, int chan) 787 { 788 struct isp_fc *fc = ISP_FC_PC(isp, chan); 789 atio_private_data_t *atp; 790 const char *states[8] = { "Free", "ATIO", "CAM", "CTIO", "LAST_CTIO", "PDON", "?6", "7" }; 791 792 for (atp = fc->atpool; atp < &fc->atpool[ATPDPSIZE]; atp++) { 793 if (atp->state == ATPD_STATE_FREE) 794 continue; 795 isp_prt(isp, ISP_LOGALL, "Chan %d ATP [0x%x] origdlen %u bytes_xfrd %u lun %jx nphdl 0x%04x s_id 0x%06x d_id 0x%06x oxid 0x%04x state %s", 796 chan, atp->tag, atp->orig_datalen, atp->bytes_xfered, (uintmax_t)atp->lun, atp->nphdl, atp->sid, atp->did, atp->oxid, states[atp->state & 0x7]); 797 } 798 } 799 800 static inot_private_data_t * 801 isp_get_ntpd(ispsoftc_t *isp, int chan) 802 { 803 struct isp_fc *fc = ISP_FC_PC(isp, chan); 804 inot_private_data_t *ntp; 805 806 ntp = STAILQ_FIRST(&fc->ntfree); 807 if (ntp) 808 STAILQ_REMOVE_HEAD(&fc->ntfree, next); 809 return (ntp); 810 } 811 812 static inot_private_data_t * 813 isp_find_ntpd(ispsoftc_t *isp, int chan, uint32_t tag_id, uint32_t seq_id) 814 { 815 struct isp_fc *fc = ISP_FC_PC(isp, chan); 816 inot_private_data_t *ntp; 817 818 for (ntp = fc->ntpool; ntp < &fc->ntpool[ATPDPSIZE]; ntp++) { 819 if (ntp->tag_id == tag_id && ntp->seq_id == seq_id) 820 return (ntp); 821 } 822 return (NULL); 823 } 824 825 static void 826 isp_put_ntpd(ispsoftc_t *isp, int chan, inot_private_data_t *ntp) 827 { 828 struct isp_fc *fc = ISP_FC_PC(isp, chan); 829 830 ntp->tag_id = ntp->seq_id = 0; 831 STAILQ_INSERT_HEAD(&fc->ntfree, ntp, next); 832 } 833 834 tstate_t * 835 create_lun_state(ispsoftc_t *isp, int bus, struct cam_path *path) 836 { 837 struct isp_fc *fc = ISP_FC_PC(isp, bus); 838 lun_id_t lun; 839 tstate_t *tptr; 840 841 lun = xpt_path_lun_id(path); 842 tptr = malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO); 843 if (tptr == NULL) 844 return (NULL); 845 tptr->ts_lun = lun; 846 SLIST_INIT(&tptr->atios); 847 SLIST_INIT(&tptr->inots); 848 STAILQ_INIT(&tptr->restart_queue); 849 SLIST_INSERT_HEAD(&fc->lun_hash[LUN_HASH_FUNC(lun)], tptr, next); 850 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, path, "created tstate\n"); 851 return (tptr); 852 } 853 854 static void 855 destroy_lun_state(ispsoftc_t *isp, int bus, tstate_t *tptr) 856 { 857 struct isp_fc *fc = ISP_FC_PC(isp, bus); 858 union ccb *ccb; 859 inot_private_data_t *ntp; 860 861 while ((ccb = (union ccb *)SLIST_FIRST(&tptr->atios)) != NULL) { 862 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 863 ccb->ccb_h.status = CAM_REQ_ABORTED; 864 xpt_done(ccb); 865 }; 866 while ((ccb = (union ccb *)SLIST_FIRST(&tptr->inots)) != NULL) { 867 SLIST_REMOVE_HEAD(&tptr->inots, sim_links.sle); 868 ccb->ccb_h.status = CAM_REQ_ABORTED; 869 xpt_done(ccb); 870 } 871 while ((ntp = STAILQ_FIRST(&tptr->restart_queue)) != NULL) { 872 isp_endcmd(isp, ntp->data, NIL_HANDLE, bus, SCSI_STATUS_BUSY, 0); 873 STAILQ_REMOVE_HEAD(&tptr->restart_queue, next); 874 isp_put_ntpd(isp, bus, ntp); 875 } 876 SLIST_REMOVE(&fc->lun_hash[LUN_HASH_FUNC(tptr->ts_lun)], tptr, tstate, next); 877 free(tptr, M_DEVBUF); 878 } 879 880 static void 881 isp_enable_lun(ispsoftc_t *isp, union ccb *ccb) 882 { 883 tstate_t *tptr; 884 int bus = XS_CHANNEL(ccb); 885 target_id_t target = ccb->ccb_h.target_id; 886 lun_id_t lun = ccb->ccb_h.target_lun; 887 888 /* 889 * We only support either target and lun both wildcard 890 * or target and lun both non-wildcard. 891 */ 892 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0|ISP_LOGCONFIG, ccb->ccb_h.path, 893 "enabling lun %jx\n", (uintmax_t)lun); 894 if ((target == CAM_TARGET_WILDCARD) != (lun == CAM_LUN_WILDCARD)) { 895 ccb->ccb_h.status = CAM_LUN_INVALID; 896 xpt_done(ccb); 897 return; 898 } 899 900 /* Create the state pointer. It should not already exist. */ 901 tptr = get_lun_statep(isp, bus, lun); 902 if (tptr) { 903 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 904 xpt_done(ccb); 905 return; 906 } 907 tptr = create_lun_state(isp, bus, ccb->ccb_h.path); 908 if (tptr == NULL) { 909 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 910 xpt_done(ccb); 911 return; 912 } 913 914 ccb->ccb_h.status = CAM_REQ_CMP; 915 xpt_done(ccb); 916 } 917 918 static void 919 isp_disable_lun(ispsoftc_t *isp, union ccb *ccb) 920 { 921 tstate_t *tptr; 922 int bus = XS_CHANNEL(ccb); 923 target_id_t target = ccb->ccb_h.target_id; 924 lun_id_t lun = ccb->ccb_h.target_lun; 925 926 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0|ISP_LOGCONFIG, ccb->ccb_h.path, 927 "disabling lun %jx\n", (uintmax_t)lun); 928 if ((target == CAM_TARGET_WILDCARD) != (lun == CAM_LUN_WILDCARD)) { 929 ccb->ccb_h.status = CAM_LUN_INVALID; 930 xpt_done(ccb); 931 return; 932 } 933 934 /* Find the state pointer. */ 935 if ((tptr = get_lun_statep(isp, bus, lun)) == NULL) { 936 ccb->ccb_h.status = CAM_PATH_INVALID; 937 xpt_done(ccb); 938 return; 939 } 940 941 destroy_lun_state(isp, bus, tptr); 942 ccb->ccb_h.status = CAM_REQ_CMP; 943 xpt_done(ccb); 944 } 945 946 static void 947 isp_target_start_ctio(ispsoftc_t *isp, union ccb *ccb, enum Start_Ctio_How how) 948 { 949 int fctape, sendstatus, resid; 950 fcparam *fcp; 951 atio_private_data_t *atp; 952 struct ccb_scsiio *cso; 953 struct isp_ccbq *waitq; 954 uint32_t dmaresult, handle, xfrlen, sense_length, tmp; 955 ct7_entry_t local, *cto = &local; 956 957 isp_prt(isp, ISP_LOGTDEBUG0, "%s: ENTRY[0x%x] how %u xfrlen %u sendstatus %d sense_len %u", __func__, ccb->csio.tag_id, how, ccb->csio.dxfer_len, 958 (ccb->ccb_h.flags & CAM_SEND_STATUS) != 0, ((ccb->ccb_h.flags & CAM_SEND_SENSE)? ccb->csio.sense_len : 0)); 959 960 waitq = &ISP_FC_PC(isp, XS_CHANNEL(ccb))->waitq; 961 switch (how) { 962 case FROM_CAM: 963 /* 964 * Insert at the tail of the list, if any, waiting CTIO CCBs 965 */ 966 TAILQ_INSERT_TAIL(waitq, &ccb->ccb_h, sim_links.tqe); 967 break; 968 case FROM_TIMER: 969 case FROM_SRR: 970 case FROM_CTIO_DONE: 971 TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe); 972 break; 973 } 974 975 while ((ccb = (union ccb *) TAILQ_FIRST(waitq)) != NULL) { 976 TAILQ_REMOVE(waitq, &ccb->ccb_h, sim_links.tqe); 977 978 cso = &ccb->csio; 979 xfrlen = cso->dxfer_len; 980 if (xfrlen == 0) { 981 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { 982 ISP_PATH_PRT(isp, ISP_LOGERR, ccb->ccb_h.path, "a data transfer length of zero but no status to send is wrong\n"); 983 ccb->ccb_h.status = CAM_REQ_INVALID; 984 xpt_done(ccb); 985 continue; 986 } 987 } 988 989 atp = isp_find_atpd(isp, XS_CHANNEL(ccb), cso->tag_id); 990 if (atp == NULL) { 991 isp_prt(isp, ISP_LOGERR, "%s: [0x%x] cannot find private data adjunct in %s", __func__, cso->tag_id, __func__); 992 isp_dump_atpd(isp, XS_CHANNEL(ccb)); 993 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 994 xpt_done(ccb); 995 continue; 996 } 997 998 /* 999 * Is this command a dead duck? 1000 */ 1001 if (atp->dead) { 1002 isp_prt(isp, ISP_LOGERR, "%s: [0x%x] not sending a CTIO for a dead command", __func__, cso->tag_id); 1003 ccb->ccb_h.status = CAM_REQ_ABORTED; 1004 xpt_done(ccb); 1005 continue; 1006 } 1007 1008 /* 1009 * Check to make sure we're still in target mode. 1010 */ 1011 fcp = FCPARAM(isp, XS_CHANNEL(ccb)); 1012 if ((fcp->role & ISP_ROLE_TARGET) == 0) { 1013 isp_prt(isp, ISP_LOGERR, "%s: [0x%x] stopping sending a CTIO because we're no longer in target mode", __func__, cso->tag_id); 1014 ccb->ccb_h.status = CAM_PROVIDE_FAIL; 1015 xpt_done(ccb); 1016 continue; 1017 } 1018 1019 /* 1020 * We're only handling ATPD_CCB_OUTSTANDING outstanding CCB at a time (one of which 1021 * could be split into two CTIOs to split data and status). 1022 */ 1023 if (atp->ctcnt >= ATPD_CCB_OUTSTANDING) { 1024 isp_prt(isp, ISP_LOGTINFO, "[0x%x] handling only %d CCBs at a time (flags for this ccb: 0x%x)", cso->tag_id, ATPD_CCB_OUTSTANDING, ccb->ccb_h.flags); 1025 TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe); 1026 break; 1027 } 1028 1029 /* 1030 * Does the initiator expect FC-Tape style responses? 1031 */ 1032 if ((atp->word3 & PRLI_WD3_RETRY) && fcp->fctape_enabled) { 1033 fctape = 1; 1034 } else { 1035 fctape = 0; 1036 } 1037 1038 /* 1039 * If we already did the data xfer portion of a CTIO that sends data 1040 * and status, don't do it again and do the status portion now. 1041 */ 1042 if (atp->sendst) { 1043 isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] now sending synthesized status orig_dl=%u xfered=%u bit=%u", 1044 cso->tag_id, atp->orig_datalen, atp->bytes_xfered, atp->bytes_in_transit); 1045 xfrlen = 0; /* we already did the data transfer */ 1046 atp->sendst = 0; 1047 } 1048 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1049 sendstatus = 1; 1050 } else { 1051 sendstatus = 0; 1052 } 1053 1054 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 1055 KASSERT((sendstatus != 0), ("how can you have CAM_SEND_SENSE w/o CAM_SEND_STATUS?")); 1056 /* 1057 * Sense length is not the entire sense data structure size. Periph 1058 * drivers don't seem to be setting sense_len to reflect the actual 1059 * size. We'll peek inside to get the right amount. 1060 */ 1061 sense_length = cso->sense_len; 1062 1063 /* 1064 * This 'cannot' happen 1065 */ 1066 if (sense_length > (XCMD_SIZE - MIN_FCP_RESPONSE_SIZE)) { 1067 sense_length = XCMD_SIZE - MIN_FCP_RESPONSE_SIZE; 1068 } 1069 } else { 1070 sense_length = 0; 1071 } 1072 1073 /* 1074 * Check for overflow 1075 */ 1076 tmp = atp->bytes_xfered + atp->bytes_in_transit; 1077 if (xfrlen > 0 && tmp > atp->orig_datalen) { 1078 isp_prt(isp, ISP_LOGERR, 1079 "%s: [0x%x] data overflow by %u bytes", __func__, 1080 cso->tag_id, tmp + xfrlen - atp->orig_datalen); 1081 ccb->ccb_h.status = CAM_DATA_RUN_ERR; 1082 xpt_done(ccb); 1083 continue; 1084 } 1085 if (xfrlen > atp->orig_datalen - tmp) { 1086 xfrlen = atp->orig_datalen - tmp; 1087 if (xfrlen == 0 && !sendstatus) { 1088 cso->resid = cso->dxfer_len; 1089 ccb->ccb_h.status = CAM_REQ_CMP; 1090 xpt_done(ccb); 1091 continue; 1092 } 1093 } 1094 1095 memset(cto, 0, QENTRY_LEN); 1096 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7; 1097 cto->ct_header.rqs_entry_count = 1; 1098 cto->ct_header.rqs_seqno |= ATPD_SEQ_NOTIFY_CAM; 1099 ATPD_SET_SEQNO(cto, atp); 1100 cto->ct_nphdl = atp->nphdl; 1101 cto->ct_rxid = atp->tag; 1102 cto->ct_iid_lo = atp->sid; 1103 cto->ct_iid_hi = atp->sid >> 16; 1104 cto->ct_oxid = atp->oxid; 1105 cto->ct_vpidx = ISP_GET_VPIDX(isp, XS_CHANNEL(ccb)); 1106 cto->ct_timeout = XS_TIME(ccb); 1107 cto->ct_flags = atp->tattr << CT7_TASK_ATTR_SHIFT; 1108 1109 /* 1110 * Mode 1, status, no data. Only possible when we are sending status, have 1111 * no data to transfer, and any sense data can fit into a ct7_entry_t. 1112 * 1113 * Mode 2, status, no data. We have to use this in the case that 1114 * the sense data won't fit into a ct7_entry_t. 1115 * 1116 */ 1117 if (sendstatus && xfrlen == 0) { 1118 cto->ct_flags |= CT7_SENDSTATUS | CT7_NO_DATA; 1119 resid = atp->orig_datalen - atp->bytes_xfered - atp->bytes_in_transit; 1120 if (sense_length <= MAXRESPLEN_24XX) { 1121 cto->ct_flags |= CT7_FLAG_MODE1; 1122 cto->ct_scsi_status = cso->scsi_status; 1123 if (resid < 0) { 1124 cto->ct_resid = -resid; 1125 cto->ct_scsi_status |= (FCP_RESID_OVERFLOW << 8); 1126 } else if (resid > 0) { 1127 cto->ct_resid = resid; 1128 cto->ct_scsi_status |= (FCP_RESID_UNDERFLOW << 8); 1129 } 1130 if (fctape) { 1131 cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF; 1132 } 1133 if (sense_length) { 1134 cto->ct_scsi_status |= (FCP_SNSLEN_VALID << 8); 1135 cto->rsp.m1.ct_resplen = cto->ct_senselen = sense_length; 1136 memcpy(cto->rsp.m1.ct_resp, &cso->sense_data, sense_length); 1137 } 1138 } else { 1139 bus_addr_t addr; 1140 fcp_rsp_iu_t rp; 1141 1142 if (atp->ests == NULL) { 1143 atp->ests = isp_get_ecmd(isp); 1144 if (atp->ests == NULL) { 1145 TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe); 1146 break; 1147 } 1148 } 1149 memset(&rp, 0, sizeof(rp)); 1150 if (fctape) { 1151 cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF; 1152 rp.fcp_rsp_bits |= FCP_CONF_REQ; 1153 } 1154 cto->ct_flags |= CT7_FLAG_MODE2; 1155 rp.fcp_rsp_scsi_status = cso->scsi_status; 1156 if (resid < 0) { 1157 rp.fcp_rsp_resid = -resid; 1158 rp.fcp_rsp_bits |= FCP_RESID_OVERFLOW; 1159 } else if (resid > 0) { 1160 rp.fcp_rsp_resid = resid; 1161 rp.fcp_rsp_bits |= FCP_RESID_UNDERFLOW; 1162 } 1163 if (sense_length) { 1164 rp.fcp_rsp_snslen = sense_length; 1165 cto->ct_senselen = sense_length; 1166 rp.fcp_rsp_bits |= FCP_SNSLEN_VALID; 1167 isp_put_fcp_rsp_iu(isp, &rp, atp->ests); 1168 memcpy(((fcp_rsp_iu_t *)atp->ests)->fcp_rsp_extra, &cso->sense_data, sense_length); 1169 } else { 1170 isp_put_fcp_rsp_iu(isp, &rp, atp->ests); 1171 } 1172 if (isp->isp_dblev & ISP_LOGTDEBUG1) { 1173 isp_print_bytes(isp, "FCP Response Frame After Swizzling", MIN_FCP_RESPONSE_SIZE + sense_length, atp->ests); 1174 } 1175 bus_dmamap_sync(isp->isp_osinfo.ecmd_dmat, isp->isp_osinfo.ecmd_map, BUS_DMASYNC_PREWRITE); 1176 addr = isp->isp_osinfo.ecmd_dma; 1177 addr += ((((isp_ecmd_t *)atp->ests) - isp->isp_osinfo.ecmd_base) * XCMD_SIZE); 1178 isp_prt(isp, ISP_LOGTDEBUG0, "%s: ests base %p vaddr %p ecmd_dma %jx addr %jx len %u", __func__, isp->isp_osinfo.ecmd_base, atp->ests, 1179 (uintmax_t) isp->isp_osinfo.ecmd_dma, (uintmax_t)addr, MIN_FCP_RESPONSE_SIZE + sense_length); 1180 cto->rsp.m2.ct_datalen = MIN_FCP_RESPONSE_SIZE + sense_length; 1181 cto->rsp.m2.ct_fcp_rsp_iudata.ds_base = DMA_LO32(addr); 1182 cto->rsp.m2.ct_fcp_rsp_iudata.ds_basehi = DMA_HI32(addr); 1183 cto->rsp.m2.ct_fcp_rsp_iudata.ds_count = MIN_FCP_RESPONSE_SIZE + sense_length; 1184 } 1185 if (sense_length) { 1186 isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d slen %u sense: %x %x/%x/%x", __func__, 1187 cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, cto->ct_resid, sense_length, 1188 cso->sense_data.error_code, cso->sense_data.sense_buf[1], cso->sense_data.sense_buf[11], cso->sense_data.sense_buf[12]); 1189 } else { 1190 isp_prt(isp, ISP_LOGDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d", __func__, 1191 cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, cto->ct_resid); 1192 } 1193 atp->state = ATPD_STATE_LAST_CTIO; 1194 } 1195 1196 /* 1197 * Mode 0 data transfers, *possibly* with status. 1198 */ 1199 if (xfrlen != 0) { 1200 cto->ct_flags |= CT7_FLAG_MODE0; 1201 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1202 cto->ct_flags |= CT7_DATA_IN; 1203 } else { 1204 cto->ct_flags |= CT7_DATA_OUT; 1205 } 1206 1207 cto->rsp.m0.reloff = atp->bytes_xfered + atp->bytes_in_transit; 1208 cto->rsp.m0.ct_xfrlen = xfrlen; 1209 1210 #ifdef DEBUG 1211 if (ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame && xfrlen > ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame) { 1212 isp_prt(isp, ISP_LOGWARN, "%s: truncating data frame with xfrlen %d to %d", __func__, xfrlen, xfrlen - (xfrlen >> 2)); 1213 ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame = 0; 1214 cto->rsp.m0.ct_xfrlen -= xfrlen >> 2; 1215 } 1216 #endif 1217 if (sendstatus) { 1218 resid = atp->orig_datalen - atp->bytes_xfered - xfrlen; 1219 if (cso->scsi_status == SCSI_STATUS_OK && resid == 0 /* && fctape == 0 */) { 1220 cto->ct_flags |= CT7_SENDSTATUS; 1221 atp->state = ATPD_STATE_LAST_CTIO; 1222 if (fctape) { 1223 cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF; 1224 } 1225 } else { 1226 atp->sendst = 1; /* send status later */ 1227 cto->ct_header.rqs_seqno &= ~ATPD_SEQ_NOTIFY_CAM; 1228 atp->state = ATPD_STATE_CTIO; 1229 } 1230 } else { 1231 atp->state = ATPD_STATE_CTIO; 1232 } 1233 isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x xfrlen=%u off=%u", __func__, 1234 cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, xfrlen, atp->bytes_xfered); 1235 } 1236 1237 if (isp_get_pcmd(isp, ccb)) { 1238 ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "out of PCMDs\n"); 1239 TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe); 1240 break; 1241 } 1242 handle = isp_allocate_handle(isp, ccb, ISP_HANDLE_TARGET); 1243 if (handle == 0) { 1244 ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "No XFLIST pointers for %s\n", __func__); 1245 TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe); 1246 isp_free_pcmd(isp, ccb); 1247 break; 1248 } 1249 atp->bytes_in_transit += xfrlen; 1250 PISP_PCMD(ccb)->datalen = xfrlen; 1251 1252 /* 1253 * Call the dma setup routines for this entry (and any subsequent 1254 * CTIOs) if there's data to move, and then tell the f/w it's got 1255 * new things to play with. As with isp_start's usage of DMA setup, 1256 * any swizzling is done in the machine dependent layer. Because 1257 * of this, we put the request onto the queue area first in native 1258 * format. 1259 */ 1260 cto->ct_syshandle = handle; 1261 dmaresult = ISP_DMASETUP(isp, cso, cto); 1262 if (dmaresult != 0) { 1263 isp_destroy_handle(isp, handle); 1264 isp_free_pcmd(isp, ccb); 1265 if (dmaresult == CMD_EAGAIN) { 1266 TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe); 1267 break; 1268 } 1269 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1270 xpt_done(ccb); 1271 continue; 1272 } 1273 ccb->ccb_h.status = CAM_REQ_INPROG | CAM_SIM_QUEUED; 1274 if (xfrlen) { 1275 ccb->ccb_h.spriv_field0 = atp->bytes_xfered; 1276 } else { 1277 ccb->ccb_h.spriv_field0 = ~0; 1278 } 1279 atp->ctcnt++; 1280 atp->seqno++; 1281 } 1282 } 1283 1284 static void 1285 isp_refire_notify_ack(void *arg) 1286 { 1287 isp_tna_t *tp = arg; 1288 ispsoftc_t *isp = tp->isp; 1289 1290 ISP_ASSERT_LOCKED(isp); 1291 if (isp_notify_ack(isp, tp->not)) { 1292 callout_schedule(&tp->timer, 5); 1293 } else { 1294 free(tp, M_DEVBUF); 1295 } 1296 } 1297 1298 1299 static void 1300 isp_complete_ctio(ispsoftc_t *isp, union ccb *ccb) 1301 { 1302 1303 isp_rq_check_below(isp); 1304 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1305 xpt_done(ccb); 1306 } 1307 1308 static void 1309 isp_handle_platform_atio7(ispsoftc_t *isp, at7_entry_t *aep) 1310 { 1311 int cdbxlen; 1312 lun_id_t lun; 1313 uint16_t chan, nphdl = NIL_HANDLE; 1314 uint32_t did, sid; 1315 fcportdb_t *lp; 1316 tstate_t *tptr; 1317 struct ccb_accept_tio *atiop; 1318 atio_private_data_t *atp = NULL; 1319 atio_private_data_t *oatp; 1320 inot_private_data_t *ntp; 1321 1322 did = (aep->at_hdr.d_id[0] << 16) | (aep->at_hdr.d_id[1] << 8) | aep->at_hdr.d_id[2]; 1323 sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | aep->at_hdr.s_id[2]; 1324 lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(aep->at_cmnd.fcp_cmnd_lun)); 1325 1326 if (ISP_CAP_MULTI_ID(isp) && isp->isp_nchan > 1) { 1327 /* Channel has to be derived from D_ID */ 1328 isp_find_chan_by_did(isp, did, &chan); 1329 if (chan == ISP_NOCHAN) { 1330 isp_prt(isp, ISP_LOGWARN, 1331 "%s: [RX_ID 0x%x] D_ID %x not found on any channel", 1332 __func__, aep->at_rxid, did); 1333 isp_endcmd(isp, aep, NIL_HANDLE, ISP_NOCHAN, 1334 ECMD_TERMINATE, 0); 1335 return; 1336 } 1337 } else { 1338 chan = 0; 1339 } 1340 1341 /* 1342 * Find the PDB entry for this initiator 1343 */ 1344 if (isp_find_pdb_by_portid(isp, chan, sid, &lp) == 0) { 1345 /* 1346 * If we're not in the port database terminate the exchange. 1347 */ 1348 isp_prt(isp, ISP_LOGTINFO, "%s: [RX_ID 0x%x] D_ID 0x%06x found on Chan %d for S_ID 0x%06x wasn't in PDB already", 1349 __func__, aep->at_rxid, did, chan, sid); 1350 isp_dump_portdb(isp, chan); 1351 isp_endcmd(isp, aep, NIL_HANDLE, chan, ECMD_TERMINATE, 0); 1352 return; 1353 } 1354 nphdl = lp->handle; 1355 1356 /* 1357 * Get the tstate pointer 1358 */ 1359 tptr = get_lun_statep(isp, chan, lun); 1360 if (tptr == NULL) { 1361 tptr = get_lun_statep(isp, chan, CAM_LUN_WILDCARD); 1362 if (tptr == NULL) { 1363 isp_prt(isp, ISP_LOGWARN, 1364 "%s: [0x%x] no state pointer for lun %jx or wildcard", 1365 __func__, aep->at_rxid, (uintmax_t)lun); 1366 if (lun == 0) { 1367 isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_BUSY, 0); 1368 } else { 1369 isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_CHECK_COND | ECMD_SVALID | (0x5 << 12) | (0x25 << 16), 0); 1370 } 1371 return; 1372 } 1373 } 1374 1375 /* 1376 * Start any commands pending resources first. 1377 */ 1378 if (isp_atio_restart(isp, chan, tptr)) 1379 goto noresrc; 1380 1381 /* 1382 * If the f/w is out of resources, just send a BUSY status back. 1383 */ 1384 if (aep->at_rxid == AT7_NORESRC_RXID) { 1385 isp_endcmd(isp, aep, nphdl, chan, SCSI_BUSY, 0); 1386 return; 1387 } 1388 1389 /* 1390 * If we're out of resources, just send a BUSY status back. 1391 */ 1392 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1393 if (atiop == NULL) { 1394 isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] out of atios", aep->at_rxid); 1395 goto noresrc; 1396 } 1397 1398 oatp = isp_find_atpd(isp, chan, aep->at_rxid); 1399 if (oatp) { 1400 isp_prt(isp, oatp->state == ATPD_STATE_LAST_CTIO ? ISP_LOGTDEBUG0 : 1401 ISP_LOGWARN, "[0x%x] tag wraparound (N-Port Handle " 1402 "0x%04x S_ID 0x%04x OX_ID 0x%04x) oatp state %d", 1403 aep->at_rxid, nphdl, sid, aep->at_hdr.ox_id, oatp->state); 1404 /* 1405 * It's not a "no resource" condition- but we can treat it like one 1406 */ 1407 goto noresrc; 1408 } 1409 atp = isp_get_atpd(isp, chan, aep->at_rxid); 1410 if (atp == NULL) { 1411 isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] out of atps", aep->at_rxid); 1412 isp_endcmd(isp, aep, nphdl, chan, SCSI_BUSY, 0); 1413 return; 1414 } 1415 atp->word3 = lp->prli_word3; 1416 atp->state = ATPD_STATE_ATIO; 1417 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1418 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, atiop->ccb_h.path, "Take FREE ATIO\n"); 1419 atiop->init_id = FC_PORTDB_TGT(isp, chan, lp); 1420 atiop->ccb_h.target_id = ISP_MAX_TARGETS(isp); 1421 atiop->ccb_h.target_lun = lun; 1422 atiop->sense_len = 0; 1423 cdbxlen = aep->at_cmnd.fcp_cmnd_alen_datadir >> FCP_CMND_ADDTL_CDBLEN_SHIFT; 1424 if (cdbxlen) { 1425 isp_prt(isp, ISP_LOGWARN, "additional CDBLEN ignored"); 1426 } 1427 cdbxlen = sizeof (aep->at_cmnd.cdb_dl.sf.fcp_cmnd_cdb); 1428 ISP_MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cmnd.cdb_dl.sf.fcp_cmnd_cdb, cdbxlen); 1429 atiop->cdb_len = cdbxlen; 1430 atiop->ccb_h.status = CAM_CDB_RECVD; 1431 atiop->tag_id = atp->tag; 1432 switch (aep->at_cmnd.fcp_cmnd_task_attribute & FCP_CMND_TASK_ATTR_MASK) { 1433 case FCP_CMND_TASK_ATTR_SIMPLE: 1434 atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; 1435 atiop->tag_action = MSG_SIMPLE_TASK; 1436 break; 1437 case FCP_CMND_TASK_ATTR_HEAD: 1438 atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; 1439 atiop->tag_action = MSG_HEAD_OF_QUEUE_TASK; 1440 break; 1441 case FCP_CMND_TASK_ATTR_ORDERED: 1442 atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; 1443 atiop->tag_action = MSG_ORDERED_TASK; 1444 break; 1445 case FCP_CMND_TASK_ATTR_ACA: 1446 atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; 1447 atiop->tag_action = MSG_ACA_TASK; 1448 break; 1449 case FCP_CMND_TASK_ATTR_UNTAGGED: 1450 default: 1451 atiop->tag_action = 0; 1452 break; 1453 } 1454 atiop->priority = (aep->at_cmnd.fcp_cmnd_task_attribute & 1455 FCP_CMND_PRIO_MASK) >> FCP_CMND_PRIO_SHIFT; 1456 atp->orig_datalen = aep->at_cmnd.cdb_dl.sf.fcp_cmnd_dl; 1457 atp->bytes_xfered = 0; 1458 atp->lun = lun; 1459 atp->nphdl = nphdl; 1460 atp->sid = sid; 1461 atp->did = did; 1462 atp->oxid = aep->at_hdr.ox_id; 1463 atp->rxid = aep->at_hdr.rx_id; 1464 atp->cdb0 = atiop->cdb_io.cdb_bytes[0]; 1465 atp->tattr = aep->at_cmnd.fcp_cmnd_task_attribute & FCP_CMND_TASK_ATTR_MASK; 1466 atp->state = ATPD_STATE_CAM; 1467 isp_prt(isp, ISP_LOGTDEBUG0, "ATIO7[0x%x] CDB=0x%x lun %jx datalen %u", 1468 aep->at_rxid, atp->cdb0, (uintmax_t)lun, atp->orig_datalen); 1469 xpt_done((union ccb *)atiop); 1470 return; 1471 noresrc: 1472 KASSERT(atp == NULL, ("%s: atp is not NULL on noresrc!\n", __func__)); 1473 ntp = isp_get_ntpd(isp, chan); 1474 if (ntp == NULL) { 1475 isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_BUSY, 0); 1476 return; 1477 } 1478 memcpy(ntp->data, aep, QENTRY_LEN); 1479 STAILQ_INSERT_TAIL(&tptr->restart_queue, ntp, next); 1480 } 1481 1482 1483 /* 1484 * Handle starting an SRR (sequence retransmit request) 1485 * We get here when we've gotten the immediate notify 1486 * and the return of all outstanding CTIOs for this 1487 * transaction. 1488 */ 1489 static void 1490 isp_handle_srr_start(ispsoftc_t *isp, atio_private_data_t *atp) 1491 { 1492 in_fcentry_24xx_t *inot; 1493 uint32_t srr_off, ccb_off, ccb_len, ccb_end; 1494 union ccb *ccb; 1495 1496 inot = (in_fcentry_24xx_t *)atp->srr; 1497 srr_off = inot->in_srr_reloff_lo | (inot->in_srr_reloff_hi << 16); 1498 ccb = atp->srr_ccb; 1499 atp->srr_ccb = NULL; 1500 atp->nsrr++; 1501 if (ccb == NULL) { 1502 isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] null ccb", atp->tag); 1503 goto fail; 1504 } 1505 1506 ccb_off = ccb->ccb_h.spriv_field0; 1507 ccb_len = ccb->csio.dxfer_len; 1508 ccb_end = (ccb_off == ~0)? ~0 : ccb_off + ccb_len; 1509 1510 switch (inot->in_srr_iu) { 1511 case R_CTL_INFO_SOLICITED_DATA: 1512 /* 1513 * We have to restart a FCP_DATA data out transaction 1514 */ 1515 atp->sendst = 0; 1516 atp->bytes_xfered = srr_off; 1517 if (ccb_len == 0) { 1518 isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x but current CCB doesn't transfer data", atp->tag, srr_off); 1519 goto mdp; 1520 } 1521 if (srr_off < ccb_off || ccb_off > srr_off + ccb_len) { 1522 isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x not covered by current CCB data range [0x%x..0x%x]", atp->tag, srr_off, ccb_off, ccb_end); 1523 goto mdp; 1524 } 1525 isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x covered by current CCB data range [0x%x..0x%x]", atp->tag, srr_off, ccb_off, ccb_end); 1526 break; 1527 case R_CTL_INFO_COMMAND_STATUS: 1528 isp_prt(isp, ISP_LOGTINFO, "SRR[0x%x] Got an FCP RSP SRR- resending status", atp->tag); 1529 atp->sendst = 1; 1530 /* 1531 * We have to restart a FCP_RSP IU transaction 1532 */ 1533 break; 1534 case R_CTL_INFO_DATA_DESCRIPTOR: 1535 /* 1536 * We have to restart an FCP DATA in transaction 1537 */ 1538 isp_prt(isp, ISP_LOGWARN, "Got an FCP DATA IN SRR- dropping"); 1539 goto fail; 1540 1541 default: 1542 isp_prt(isp, ISP_LOGWARN, "Got an unknown information (%x) SRR- dropping", inot->in_srr_iu); 1543 goto fail; 1544 } 1545 1546 /* 1547 * We can't do anything until this is acked, so we might as well start it now. 1548 * We aren't going to do the usual asynchronous ack issue because we need 1549 * to make sure this gets on the wire first. 1550 */ 1551 if (isp_notify_ack(isp, inot)) { 1552 isp_prt(isp, ISP_LOGWARN, "could not push positive ack for SRR- you lose"); 1553 goto fail; 1554 } 1555 isp_target_start_ctio(isp, ccb, FROM_SRR); 1556 return; 1557 fail: 1558 inot->in_reserved = 1; 1559 isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot); 1560 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1561 ccb->ccb_h.status |= CAM_REQ_CMP_ERR; 1562 isp_complete_ctio(isp, ccb); 1563 return; 1564 mdp: 1565 if (isp_notify_ack(isp, inot)) { 1566 isp_prt(isp, ISP_LOGWARN, "could not push positive ack for SRR- you lose"); 1567 goto fail; 1568 } 1569 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1570 ccb->ccb_h.status |= CAM_MESSAGE_RECV; 1571 /* 1572 * This is not a strict interpretation of MDP, but it's close 1573 */ 1574 ccb->csio.msg_ptr = &ccb->csio.sense_data.sense_buf[SSD_FULL_SIZE - 16]; 1575 ccb->csio.msg_len = 7; 1576 ccb->csio.msg_ptr[0] = MSG_EXTENDED; 1577 ccb->csio.msg_ptr[1] = 5; 1578 ccb->csio.msg_ptr[2] = 0; /* modify data pointer */ 1579 ccb->csio.msg_ptr[3] = srr_off >> 24; 1580 ccb->csio.msg_ptr[4] = srr_off >> 16; 1581 ccb->csio.msg_ptr[5] = srr_off >> 8; 1582 ccb->csio.msg_ptr[6] = srr_off; 1583 isp_complete_ctio(isp, ccb); 1584 } 1585 1586 1587 static void 1588 isp_handle_platform_srr(ispsoftc_t *isp, isp_notify_t *notify) 1589 { 1590 in_fcentry_24xx_t *inot = notify->nt_lreserved; 1591 atio_private_data_t *atp; 1592 uint32_t tag = notify->nt_tagval & 0xffffffff; 1593 1594 atp = isp_find_atpd(isp, notify->nt_channel, tag); 1595 if (atp == NULL) { 1596 isp_prt(isp, ISP_LOGERR, "%s: cannot find adjunct for %x in SRR Notify", 1597 __func__, tag); 1598 isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot); 1599 return; 1600 } 1601 atp->srr_notify_rcvd = 1; 1602 memcpy(atp->srr, inot, sizeof (atp->srr)); 1603 isp_prt(isp, ISP_LOGTINFO, "SRR[0x%x] flags 0x%x srr_iu %x reloff 0x%x", 1604 inot->in_rxid, inot->in_flags, inot->in_srr_iu, 1605 ((uint32_t)inot->in_srr_reloff_hi << 16) | inot->in_srr_reloff_lo); 1606 if (atp->srr_ccb) 1607 isp_handle_srr_start(isp, atp); 1608 } 1609 1610 static void 1611 isp_handle_platform_ctio(ispsoftc_t *isp, ct7_entry_t *ct) 1612 { 1613 union ccb *ccb; 1614 int sentstatus = 0, ok = 0, notify_cam = 0, failure = 0; 1615 atio_private_data_t *atp = NULL; 1616 int bus; 1617 uint32_t handle, data_requested, resid; 1618 1619 handle = ct->ct_syshandle; 1620 ccb = isp_find_xs(isp, handle); 1621 if (ccb == NULL) { 1622 isp_print_bytes(isp, "null ccb in isp_handle_platform_ctio", QENTRY_LEN, ct); 1623 return; 1624 } 1625 isp_destroy_handle(isp, handle); 1626 resid = data_requested = PISP_PCMD(ccb)->datalen; 1627 isp_free_pcmd(isp, ccb); 1628 1629 bus = XS_CHANNEL(ccb); 1630 atp = isp_find_atpd(isp, bus, ct->ct_rxid); 1631 if (atp == NULL) { 1632 /* 1633 * XXX: isp_clear_commands() generates fake CTIO with zero 1634 * ct_rxid value, filling only ct_syshandle. Workaround 1635 * that using tag_id from the CCB, pointed by ct_syshandle. 1636 */ 1637 atp = isp_find_atpd(isp, bus, ccb->csio.tag_id); 1638 } 1639 if (atp == NULL) { 1640 isp_prt(isp, ISP_LOGERR, "%s: cannot find adjunct for %x after I/O", __func__, ccb->csio.tag_id); 1641 return; 1642 } 1643 KASSERT((atp->ctcnt > 0), ("ctio count not greater than zero")); 1644 atp->bytes_in_transit -= data_requested; 1645 atp->ctcnt -= 1; 1646 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1647 1648 if (ct->ct_nphdl == CT7_SRR) { 1649 atp->srr_ccb = ccb; 1650 if (atp->srr_notify_rcvd) 1651 isp_handle_srr_start(isp, atp); 1652 return; 1653 } 1654 if (ct->ct_nphdl == CT_HBA_RESET) { 1655 sentstatus = (ccb->ccb_h.flags & CAM_SEND_STATUS) && 1656 (atp->sendst == 0); 1657 failure = CAM_UNREC_HBA_ERROR; 1658 } else { 1659 sentstatus = ct->ct_flags & CT7_SENDSTATUS; 1660 ok = (ct->ct_nphdl == CT7_OK); 1661 notify_cam = (ct->ct_header.rqs_seqno & ATPD_SEQ_NOTIFY_CAM) != 0; 1662 if ((ct->ct_flags & CT7_DATAMASK) != CT7_NO_DATA) 1663 resid = ct->ct_resid; 1664 } 1665 isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, "%s: CTIO7[%x] seq %u nc %d sts 0x%x flg 0x%x sns %d resid %d %s", __func__, ct->ct_rxid, ATPD_GET_SEQNO(ct), 1666 notify_cam, ct->ct_nphdl, ct->ct_flags, (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, resid, sentstatus? "FIN" : "MID"); 1667 if (ok) { 1668 if (data_requested > 0) { 1669 atp->bytes_xfered += data_requested - resid; 1670 ccb->csio.resid = ccb->csio.dxfer_len - 1671 (data_requested - resid); 1672 } 1673 if (sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) 1674 ccb->ccb_h.status |= CAM_SENT_SENSE; 1675 ccb->ccb_h.status |= CAM_REQ_CMP; 1676 } else { 1677 notify_cam = 1; 1678 if (failure == CAM_UNREC_HBA_ERROR) 1679 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR; 1680 else 1681 ccb->ccb_h.status |= CAM_REQ_CMP_ERR; 1682 } 1683 atp->state = ATPD_STATE_PDON; 1684 1685 /* 1686 * We never *not* notify CAM when there has been any error (ok == 0), 1687 * so we never need to do an ATIO putback if we're not notifying CAM. 1688 */ 1689 isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done (ok=%d nc=%d nowsendstatus=%d ccb ss=%d)", 1690 (sentstatus)? " FINAL " : "MIDTERM ", atp->tag, ok, notify_cam, atp->sendst, (ccb->ccb_h.flags & CAM_SEND_STATUS) != 0); 1691 if (notify_cam == 0) { 1692 if (atp->sendst) { 1693 isp_target_start_ctio(isp, ccb, FROM_CTIO_DONE); 1694 } 1695 return; 1696 } 1697 1698 /* 1699 * We are done with this ATIO if we successfully sent status. 1700 * In all other cases expect either another CTIO or XPT_ABORT. 1701 */ 1702 if (ok && sentstatus) 1703 isp_put_atpd(isp, bus, atp); 1704 1705 /* 1706 * We're telling CAM we're done with this CTIO transaction. 1707 * 1708 * 24XX cards never need an ATIO put back. 1709 */ 1710 isp_complete_ctio(isp, ccb); 1711 } 1712 1713 static int 1714 isp_handle_platform_target_notify_ack(ispsoftc_t *isp, isp_notify_t *mp, uint32_t rsp) 1715 { 1716 ct7_entry_t local, *cto = &local; 1717 1718 if (isp->isp_state != ISP_RUNSTATE) { 1719 isp_prt(isp, ISP_LOGTINFO, "Notify Code 0x%x (qevalid=%d) acked- h/w not ready (dropping)", mp->nt_ncode, mp->nt_lreserved != NULL); 1720 return (0); 1721 } 1722 1723 /* 1724 * This case is for a Task Management Function, which shows up as an ATIO7 entry. 1725 */ 1726 if (mp->nt_lreserved && ((isphdr_t *)mp->nt_lreserved)->rqs_entry_type == RQSTYPE_ATIO) { 1727 at7_entry_t *aep = (at7_entry_t *)mp->nt_lreserved; 1728 fcportdb_t *lp; 1729 uint32_t sid; 1730 uint16_t nphdl; 1731 1732 sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | aep->at_hdr.s_id[2]; 1733 if (isp_find_pdb_by_portid(isp, mp->nt_channel, sid, &lp)) { 1734 nphdl = lp->handle; 1735 } else { 1736 nphdl = NIL_HANDLE; 1737 } 1738 ISP_MEMZERO(cto, sizeof (ct7_entry_t)); 1739 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7; 1740 cto->ct_header.rqs_entry_count = 1; 1741 cto->ct_nphdl = nphdl; 1742 cto->ct_rxid = aep->at_rxid; 1743 cto->ct_vpidx = mp->nt_channel; 1744 cto->ct_iid_lo = sid; 1745 cto->ct_iid_hi = sid >> 16; 1746 cto->ct_oxid = aep->at_hdr.ox_id; 1747 cto->ct_flags = CT7_SENDSTATUS|CT7_NOACK|CT7_NO_DATA|CT7_FLAG_MODE1; 1748 cto->ct_flags |= (aep->at_ta_len >> 12) << CT7_TASK_ATTR_SHIFT; 1749 if (rsp != 0) { 1750 cto->ct_scsi_status |= (FCP_RSPLEN_VALID << 8); 1751 cto->rsp.m1.ct_resplen = 4; 1752 ISP_MEMZERO(cto->rsp.m1.ct_resp, sizeof (cto->rsp.m1.ct_resp)); 1753 cto->rsp.m1.ct_resp[0] = rsp & 0xff; 1754 cto->rsp.m1.ct_resp[1] = (rsp >> 8) & 0xff; 1755 cto->rsp.m1.ct_resp[2] = (rsp >> 16) & 0xff; 1756 cto->rsp.m1.ct_resp[3] = (rsp >> 24) & 0xff; 1757 } 1758 return (isp_target_put_entry(isp, &cto)); 1759 } 1760 1761 /* 1762 * This case is for a responding to an ABTS frame 1763 */ 1764 if (mp->nt_lreserved && ((isphdr_t *)mp->nt_lreserved)->rqs_entry_type == RQSTYPE_ABTS_RCVD) { 1765 1766 /* 1767 * Overload nt_need_ack here to mark whether we've terminated the associated command. 1768 */ 1769 if (mp->nt_need_ack) { 1770 abts_t *abts = (abts_t *)mp->nt_lreserved; 1771 1772 ISP_MEMZERO(cto, sizeof (ct7_entry_t)); 1773 isp_prt(isp, ISP_LOGTDEBUG0, "%s: [%x] terminating after ABTS received", __func__, abts->abts_rxid_task); 1774 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7; 1775 cto->ct_header.rqs_entry_count = 1; 1776 cto->ct_nphdl = mp->nt_nphdl; 1777 cto->ct_rxid = abts->abts_rxid_task; 1778 cto->ct_iid_lo = mp->nt_sid; 1779 cto->ct_iid_hi = mp->nt_sid >> 16; 1780 cto->ct_oxid = abts->abts_ox_id; 1781 cto->ct_vpidx = mp->nt_channel; 1782 cto->ct_flags = CT7_NOACK|CT7_TERMINATE; 1783 if (isp_target_put_entry(isp, cto)) { 1784 return (ENOMEM); 1785 } 1786 mp->nt_need_ack = 0; 1787 } 1788 return (isp_acknak_abts(isp, mp->nt_lreserved, 0)); 1789 } 1790 1791 /* 1792 * General purpose acknowledgement 1793 */ 1794 if (mp->nt_need_ack) { 1795 isp_prt(isp, ISP_LOGTINFO, "Notify Code 0x%x (qevalid=%d) being acked", mp->nt_ncode, mp->nt_lreserved != NULL); 1796 /* 1797 * Don't need to use the guaranteed send because the caller can retry 1798 */ 1799 return (isp_notify_ack(isp, mp->nt_lreserved)); 1800 } 1801 return (0); 1802 } 1803 1804 /* 1805 * Handle task management functions. 1806 * 1807 * We show up here with a notify structure filled out. 1808 * 1809 * The nt_lreserved tag points to the original queue entry 1810 */ 1811 static void 1812 isp_handle_platform_target_tmf(ispsoftc_t *isp, isp_notify_t *notify) 1813 { 1814 tstate_t *tptr; 1815 fcportdb_t *lp; 1816 struct ccb_immediate_notify *inot; 1817 inot_private_data_t *ntp = NULL; 1818 atio_private_data_t *atp; 1819 lun_id_t lun; 1820 1821 isp_prt(isp, ISP_LOGTDEBUG0, "%s: code 0x%x sid 0x%x tagval 0x%016llx chan %d lun %jx", __func__, notify->nt_ncode, 1822 notify->nt_sid, (unsigned long long) notify->nt_tagval, notify->nt_channel, notify->nt_lun); 1823 if (notify->nt_lun == LUN_ANY) { 1824 if (notify->nt_tagval == TAG_ANY) { 1825 lun = CAM_LUN_WILDCARD; 1826 } else { 1827 atp = isp_find_atpd(isp, notify->nt_channel, 1828 notify->nt_tagval & 0xffffffff); 1829 lun = atp ? atp->lun : CAM_LUN_WILDCARD; 1830 } 1831 } else { 1832 lun = notify->nt_lun; 1833 } 1834 tptr = get_lun_statep(isp, notify->nt_channel, lun); 1835 if (tptr == NULL) { 1836 tptr = get_lun_statep(isp, notify->nt_channel, CAM_LUN_WILDCARD); 1837 if (tptr == NULL) { 1838 isp_prt(isp, ISP_LOGWARN, "%s: no state pointer found for chan %d lun %#jx", __func__, notify->nt_channel, (uintmax_t)lun); 1839 goto bad; 1840 } 1841 } 1842 inot = (struct ccb_immediate_notify *) SLIST_FIRST(&tptr->inots); 1843 if (inot == NULL) { 1844 isp_prt(isp, ISP_LOGWARN, "%s: out of immediate notify structures for chan %d lun %#jx", __func__, notify->nt_channel, (uintmax_t)lun); 1845 goto bad; 1846 } 1847 1848 inot->ccb_h.target_id = ISP_MAX_TARGETS(isp); 1849 inot->ccb_h.target_lun = lun; 1850 if (isp_find_pdb_by_portid(isp, notify->nt_channel, notify->nt_sid, &lp) == 0 && 1851 isp_find_pdb_by_handle(isp, notify->nt_channel, notify->nt_nphdl, &lp) == 0) { 1852 inot->initiator_id = CAM_TARGET_WILDCARD; 1853 } else { 1854 inot->initiator_id = FC_PORTDB_TGT(isp, notify->nt_channel, lp); 1855 } 1856 inot->seq_id = notify->nt_tagval; 1857 inot->tag_id = notify->nt_tagval >> 32; 1858 1859 switch (notify->nt_ncode) { 1860 case NT_ABORT_TASK: 1861 isp_target_mark_aborted_early(isp, notify->nt_channel, tptr, inot->tag_id); 1862 inot->arg = MSG_ABORT_TASK; 1863 break; 1864 case NT_ABORT_TASK_SET: 1865 isp_target_mark_aborted_early(isp, notify->nt_channel, tptr, TAG_ANY); 1866 inot->arg = MSG_ABORT_TASK_SET; 1867 break; 1868 case NT_CLEAR_ACA: 1869 inot->arg = MSG_CLEAR_ACA; 1870 break; 1871 case NT_CLEAR_TASK_SET: 1872 inot->arg = MSG_CLEAR_TASK_SET; 1873 break; 1874 case NT_LUN_RESET: 1875 inot->arg = MSG_LOGICAL_UNIT_RESET; 1876 break; 1877 case NT_TARGET_RESET: 1878 inot->arg = MSG_TARGET_RESET; 1879 break; 1880 case NT_QUERY_TASK_SET: 1881 inot->arg = MSG_QUERY_TASK_SET; 1882 break; 1883 case NT_QUERY_ASYNC_EVENT: 1884 inot->arg = MSG_QUERY_ASYNC_EVENT; 1885 break; 1886 default: 1887 isp_prt(isp, ISP_LOGWARN, "%s: unknown TMF code 0x%x for chan %d lun %#jx", __func__, notify->nt_ncode, notify->nt_channel, (uintmax_t)lun); 1888 goto bad; 1889 } 1890 1891 ntp = isp_get_ntpd(isp, notify->nt_channel); 1892 if (ntp == NULL) { 1893 isp_prt(isp, ISP_LOGWARN, "%s: out of inotify private structures", __func__); 1894 goto bad; 1895 } 1896 ISP_MEMCPY(&ntp->nt, notify, sizeof (isp_notify_t)); 1897 if (notify->nt_lreserved) { 1898 ISP_MEMCPY(&ntp->data, notify->nt_lreserved, QENTRY_LEN); 1899 ntp->nt.nt_lreserved = &ntp->data; 1900 } 1901 ntp->seq_id = notify->nt_tagval; 1902 ntp->tag_id = notify->nt_tagval >> 32; 1903 1904 SLIST_REMOVE_HEAD(&tptr->inots, sim_links.sle); 1905 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, inot->ccb_h.path, "Take FREE INOT\n"); 1906 inot->ccb_h.status = CAM_MESSAGE_RECV; 1907 xpt_done((union ccb *)inot); 1908 return; 1909 bad: 1910 if (notify->nt_need_ack) { 1911 if (((isphdr_t *)notify->nt_lreserved)->rqs_entry_type == RQSTYPE_ABTS_RCVD) { 1912 if (isp_acknak_abts(isp, notify->nt_lreserved, ENOMEM)) { 1913 isp_prt(isp, ISP_LOGWARN, "you lose- unable to send an ACKNAK"); 1914 } 1915 } else { 1916 isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, notify->nt_lreserved); 1917 } 1918 } 1919 } 1920 1921 static void 1922 isp_target_mark_aborted_early(ispsoftc_t *isp, int chan, tstate_t *tptr, uint32_t tag_id) 1923 { 1924 struct isp_fc *fc = ISP_FC_PC(isp, chan); 1925 atio_private_data_t *atp; 1926 inot_private_data_t *ntp, *tmp; 1927 uint32_t this_tag_id; 1928 1929 /* 1930 * First, clean any commands pending restart 1931 */ 1932 STAILQ_FOREACH_SAFE(ntp, &tptr->restart_queue, next, tmp) { 1933 this_tag_id = ((at7_entry_t *)ntp->data)->at_rxid; 1934 if ((uint64_t)tag_id == TAG_ANY || tag_id == this_tag_id) { 1935 isp_endcmd(isp, ntp->data, NIL_HANDLE, chan, 1936 ECMD_TERMINATE, 0); 1937 isp_put_ntpd(isp, chan, ntp); 1938 STAILQ_REMOVE(&tptr->restart_queue, ntp, 1939 inot_private_data, next); 1940 } 1941 } 1942 1943 /* 1944 * Now mark other ones dead as well. 1945 */ 1946 for (atp = fc->atpool; atp < &fc->atpool[ATPDPSIZE]; atp++) { 1947 if (atp->lun != tptr->ts_lun) 1948 continue; 1949 if ((uint64_t)tag_id == TAG_ANY || atp->tag == tag_id) 1950 atp->dead = 1; 1951 } 1952 } 1953 #endif 1954 1955 static void 1956 isp_poll(struct cam_sim *sim) 1957 { 1958 ispsoftc_t *isp = cam_sim_softc(sim); 1959 1960 ISP_RUN_ISR(isp); 1961 } 1962 1963 1964 static void 1965 isp_watchdog(void *arg) 1966 { 1967 struct ccb_scsiio *xs = arg; 1968 ispsoftc_t *isp; 1969 uint32_t ohandle = ISP_HANDLE_FREE, handle; 1970 1971 isp = XS_ISP(xs); 1972 1973 handle = isp_find_handle(isp, xs); 1974 1975 /* 1976 * Hand crank the interrupt code just to be sure the command isn't stuck somewhere. 1977 */ 1978 if (handle != ISP_HANDLE_FREE) { 1979 ISP_RUN_ISR(isp); 1980 ohandle = handle; 1981 handle = isp_find_handle(isp, xs); 1982 } 1983 if (handle != ISP_HANDLE_FREE) { 1984 /* 1985 * Try and make sure the command is really dead before 1986 * we release the handle (and DMA resources) for reuse. 1987 * 1988 * If we are successful in aborting the command then 1989 * we're done here because we'll get the command returned 1990 * back separately. 1991 */ 1992 if (isp_control(isp, ISPCTL_ABORT_CMD, xs) == 0) { 1993 return; 1994 } 1995 1996 /* 1997 * Note that after calling the above, the command may in 1998 * fact have been completed. 1999 */ 2000 xs = isp_find_xs(isp, handle); 2001 2002 /* 2003 * If the command no longer exists, then we won't 2004 * be able to find the xs again with this handle. 2005 */ 2006 if (xs == NULL) { 2007 return; 2008 } 2009 2010 /* 2011 * After this point, the command is really dead. 2012 */ 2013 ISP_DMAFREE(isp, xs); 2014 isp_destroy_handle(isp, handle); 2015 isp_prt(isp, ISP_LOGERR, "%s: timeout for handle 0x%x", __func__, handle); 2016 XS_SETERR(xs, CAM_CMD_TIMEOUT); 2017 isp_done(xs); 2018 } else { 2019 if (ohandle != ISP_HANDLE_FREE) { 2020 isp_prt(isp, ISP_LOGWARN, "%s: timeout for handle 0x%x, recovered during interrupt", __func__, ohandle); 2021 } else { 2022 isp_prt(isp, ISP_LOGWARN, "%s: timeout for handle already free", __func__); 2023 } 2024 } 2025 } 2026 2027 static void 2028 isp_make_here(ispsoftc_t *isp, fcportdb_t *fcp, int chan, int tgt) 2029 { 2030 union ccb *ccb; 2031 struct isp_fc *fc = ISP_FC_PC(isp, chan); 2032 2033 /* 2034 * Allocate a CCB, create a wildcard path for this target and schedule a rescan. 2035 */ 2036 ccb = xpt_alloc_ccb_nowait(); 2037 if (ccb == NULL) { 2038 isp_prt(isp, ISP_LOGWARN, "Chan %d unable to alloc CCB for rescan", chan); 2039 return; 2040 } 2041 if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(fc->sim), 2042 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2043 isp_prt(isp, ISP_LOGWARN, "unable to create path for rescan"); 2044 xpt_free_ccb(ccb); 2045 return; 2046 } 2047 xpt_rescan(ccb); 2048 } 2049 2050 static void 2051 isp_make_gone(ispsoftc_t *isp, fcportdb_t *fcp, int chan, int tgt) 2052 { 2053 struct cam_path *tp; 2054 struct isp_fc *fc = ISP_FC_PC(isp, chan); 2055 2056 if (xpt_create_path(&tp, NULL, cam_sim_path(fc->sim), tgt, CAM_LUN_WILDCARD) == CAM_REQ_CMP) { 2057 xpt_async(AC_LOST_DEVICE, tp, NULL); 2058 xpt_free_path(tp); 2059 } 2060 } 2061 2062 /* 2063 * Gone Device Timer Function- when we have decided that a device has gone 2064 * away, we wait a specific period of time prior to telling the OS it has 2065 * gone away. 2066 * 2067 * This timer function fires once a second and then scans the port database 2068 * for devices that are marked dead but still have a virtual target assigned. 2069 * We decrement a counter for that port database entry, and when it hits zero, 2070 * we tell the OS the device has gone away. 2071 */ 2072 static void 2073 isp_gdt(void *arg) 2074 { 2075 struct isp_fc *fc = arg; 2076 taskqueue_enqueue(taskqueue_thread, &fc->gtask); 2077 } 2078 2079 static void 2080 isp_gdt_task(void *arg, int pending) 2081 { 2082 struct isp_fc *fc = arg; 2083 ispsoftc_t *isp = fc->isp; 2084 int chan = fc - ISP_FC_PC(isp, 0); 2085 fcportdb_t *lp; 2086 struct ac_contract ac; 2087 struct ac_device_changed *adc; 2088 int dbidx, more_to_do = 0; 2089 2090 ISP_LOCK(isp); 2091 isp_prt(isp, ISP_LOGDEBUG0, "Chan %d GDT timer expired", chan); 2092 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { 2093 lp = &FCPARAM(isp, chan)->portdb[dbidx]; 2094 2095 if (lp->state != FC_PORTDB_STATE_ZOMBIE) { 2096 continue; 2097 } 2098 if (lp->gone_timer != 0) { 2099 lp->gone_timer -= 1; 2100 more_to_do++; 2101 continue; 2102 } 2103 isp_prt(isp, ISP_LOGCONFIG, prom3, chan, dbidx, lp->portid, "Gone Device Timeout"); 2104 if (lp->is_target) { 2105 lp->is_target = 0; 2106 isp_make_gone(isp, lp, chan, dbidx); 2107 } 2108 if (lp->is_initiator) { 2109 lp->is_initiator = 0; 2110 ac.contract_number = AC_CONTRACT_DEV_CHG; 2111 adc = (struct ac_device_changed *) ac.contract_data; 2112 adc->wwpn = lp->port_wwn; 2113 adc->port = lp->portid; 2114 adc->target = dbidx; 2115 adc->arrived = 0; 2116 xpt_async(AC_CONTRACT, fc->path, &ac); 2117 } 2118 lp->state = FC_PORTDB_STATE_NIL; 2119 } 2120 if (fc->ready) { 2121 if (more_to_do) { 2122 callout_reset(&fc->gdt, hz, isp_gdt, fc); 2123 } else { 2124 callout_deactivate(&fc->gdt); 2125 isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Stopping Gone Device Timer @ %lu", chan, (unsigned long) time_uptime); 2126 } 2127 } 2128 ISP_UNLOCK(isp); 2129 } 2130 2131 /* 2132 * When loop goes down we remember the time and freeze CAM command queue. 2133 * During some time period we are trying to reprobe the loop. But if we 2134 * fail, we tell the OS that devices have gone away and drop the freeze. 2135 * 2136 * We don't clear the devices out of our port database because, when loop 2137 * come back up, we have to do some actual cleanup with the chip at that 2138 * point (implicit PLOGO, e.g., to get the chip's port database state right). 2139 */ 2140 static void 2141 isp_loop_changed(ispsoftc_t *isp, int chan) 2142 { 2143 fcparam *fcp = FCPARAM(isp, chan); 2144 struct isp_fc *fc = ISP_FC_PC(isp, chan); 2145 2146 if (fc->loop_down_time) 2147 return; 2148 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Loop changed", chan); 2149 if (fcp->role & ISP_ROLE_INITIATOR) 2150 isp_freeze_loopdown(isp, chan); 2151 fc->loop_down_time = time_uptime; 2152 wakeup(fc); 2153 } 2154 2155 static void 2156 isp_loop_up(ispsoftc_t *isp, int chan) 2157 { 2158 struct isp_fc *fc = ISP_FC_PC(isp, chan); 2159 2160 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Loop is up", chan); 2161 fc->loop_seen_once = 1; 2162 fc->loop_down_time = 0; 2163 isp_unfreeze_loopdown(isp, chan); 2164 } 2165 2166 static void 2167 isp_loop_dead(ispsoftc_t *isp, int chan) 2168 { 2169 fcparam *fcp = FCPARAM(isp, chan); 2170 struct isp_fc *fc = ISP_FC_PC(isp, chan); 2171 fcportdb_t *lp; 2172 struct ac_contract ac; 2173 struct ac_device_changed *adc; 2174 int dbidx, i; 2175 2176 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Loop is dead", chan); 2177 2178 /* 2179 * Notify to the OS all targets who we now consider have departed. 2180 */ 2181 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { 2182 lp = &fcp->portdb[dbidx]; 2183 2184 if (lp->state == FC_PORTDB_STATE_NIL) 2185 continue; 2186 2187 for (i = 0; i < ISP_HANDLE_NUM(isp); i++) { 2188 struct ccb_scsiio *xs; 2189 2190 if (ISP_H2HT(isp->isp_xflist[i].handle) != ISP_HANDLE_INITIATOR) { 2191 continue; 2192 } 2193 if ((xs = isp->isp_xflist[i].cmd) == NULL) { 2194 continue; 2195 } 2196 if (dbidx != XS_TGT(xs)) { 2197 continue; 2198 } 2199 isp_prt(isp, ISP_LOGWARN, "command handle 0x%x for %d.%d.%jx orphaned by loop down timeout", 2200 isp->isp_xflist[i].handle, chan, XS_TGT(xs), 2201 (uintmax_t)XS_LUN(xs)); 2202 2203 /* 2204 * Just like in isp_watchdog, abort the outstanding 2205 * command or immediately free its resources if it is 2206 * not active 2207 */ 2208 if (isp_control(isp, ISPCTL_ABORT_CMD, xs) == 0) { 2209 continue; 2210 } 2211 2212 ISP_DMAFREE(isp, xs); 2213 isp_destroy_handle(isp, isp->isp_xflist[i].handle); 2214 isp_prt(isp, ISP_LOGWARN, "command handle 0x%x for %d.%d.%jx could not be aborted and was destroyed", 2215 isp->isp_xflist[i].handle, chan, XS_TGT(xs), 2216 (uintmax_t)XS_LUN(xs)); 2217 XS_SETERR(xs, HBA_BUSRESET); 2218 isp_done(xs); 2219 } 2220 2221 isp_prt(isp, ISP_LOGCONFIG, prom3, chan, dbidx, lp->portid, "Loop Down Timeout"); 2222 if (lp->is_target) { 2223 lp->is_target = 0; 2224 isp_make_gone(isp, lp, chan, dbidx); 2225 } 2226 if (lp->is_initiator) { 2227 lp->is_initiator = 0; 2228 ac.contract_number = AC_CONTRACT_DEV_CHG; 2229 adc = (struct ac_device_changed *) ac.contract_data; 2230 adc->wwpn = lp->port_wwn; 2231 adc->port = lp->portid; 2232 adc->target = dbidx; 2233 adc->arrived = 0; 2234 xpt_async(AC_CONTRACT, fc->path, &ac); 2235 } 2236 } 2237 2238 isp_unfreeze_loopdown(isp, chan); 2239 fc->loop_down_time = 0; 2240 } 2241 2242 static void 2243 isp_kthread(void *arg) 2244 { 2245 struct isp_fc *fc = arg; 2246 ispsoftc_t *isp = fc->isp; 2247 int chan = fc - ISP_FC_PC(isp, 0); 2248 int slp = 0, d; 2249 int lb, lim; 2250 2251 ISP_LOCK(isp); 2252 while (isp->isp_osinfo.is_exiting == 0) { 2253 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, 2254 "Chan %d Checking FC state", chan); 2255 lb = isp_fc_runstate(isp, chan, 250000); 2256 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, 2257 "Chan %d FC got to %s state", chan, 2258 isp_fc_loop_statename(lb)); 2259 2260 /* 2261 * Our action is different based upon whether we're supporting 2262 * Initiator mode or not. If we are, we might freeze the simq 2263 * when loop is down and set all sorts of different delays to 2264 * check again. 2265 * 2266 * If not, we simply just wait for loop to come up. 2267 */ 2268 if (lb == LOOP_READY || lb < 0) { 2269 slp = 0; 2270 } else { 2271 /* 2272 * If we've never seen loop up and we've waited longer 2273 * than quickboot time, or we've seen loop up but we've 2274 * waited longer than loop_down_limit, give up and go 2275 * to sleep until loop comes up. 2276 */ 2277 if (fc->loop_seen_once == 0) 2278 lim = isp_quickboot_time; 2279 else 2280 lim = fc->loop_down_limit; 2281 d = time_uptime - fc->loop_down_time; 2282 if (d >= lim) 2283 slp = 0; 2284 else if (d < 10) 2285 slp = 1; 2286 else if (d < 30) 2287 slp = 5; 2288 else if (d < 60) 2289 slp = 10; 2290 else if (d < 120) 2291 slp = 20; 2292 else 2293 slp = 30; 2294 } 2295 2296 if (slp == 0) { 2297 if (lb == LOOP_READY) 2298 isp_loop_up(isp, chan); 2299 else 2300 isp_loop_dead(isp, chan); 2301 } 2302 2303 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, 2304 "Chan %d sleep for %d seconds", chan, slp); 2305 msleep(fc, &isp->isp_lock, PRIBIO, "ispf", slp * hz); 2306 } 2307 fc->num_threads -= 1; 2308 wakeup(&fc->num_threads); 2309 ISP_UNLOCK(isp); 2310 kthread_exit(); 2311 } 2312 2313 #ifdef ISP_TARGET_MODE 2314 static void 2315 isp_abort_atio(ispsoftc_t *isp, union ccb *ccb) 2316 { 2317 atio_private_data_t *atp; 2318 union ccb *accb = ccb->cab.abort_ccb; 2319 struct ccb_hdr *sccb; 2320 tstate_t *tptr; 2321 2322 tptr = get_lun_statep(isp, XS_CHANNEL(accb), XS_LUN(accb)); 2323 if (tptr != NULL) { 2324 /* Search for the ATIO among queueued. */ 2325 SLIST_FOREACH(sccb, &tptr->atios, sim_links.sle) { 2326 if (sccb != &accb->ccb_h) 2327 continue; 2328 SLIST_REMOVE(&tptr->atios, sccb, ccb_hdr, sim_links.sle); 2329 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, sccb->path, 2330 "Abort FREE ATIO\n"); 2331 accb->ccb_h.status = CAM_REQ_ABORTED; 2332 xpt_done(accb); 2333 ccb->ccb_h.status = CAM_REQ_CMP; 2334 return; 2335 } 2336 } 2337 2338 /* Search for the ATIO among running. */ 2339 atp = isp_find_atpd(isp, XS_CHANNEL(accb), accb->atio.tag_id); 2340 if (atp != NULL) { 2341 /* Send TERMINATE to firmware. */ 2342 if (!atp->dead) { 2343 uint8_t storage[QENTRY_LEN]; 2344 ct7_entry_t *cto = (ct7_entry_t *) storage; 2345 2346 ISP_MEMZERO(cto, sizeof (ct7_entry_t)); 2347 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7; 2348 cto->ct_header.rqs_entry_count = 1; 2349 cto->ct_nphdl = atp->nphdl; 2350 cto->ct_rxid = atp->tag; 2351 cto->ct_iid_lo = atp->sid; 2352 cto->ct_iid_hi = atp->sid >> 16; 2353 cto->ct_oxid = atp->oxid; 2354 cto->ct_vpidx = XS_CHANNEL(accb); 2355 cto->ct_flags = CT7_NOACK|CT7_TERMINATE; 2356 isp_target_put_entry(isp, cto); 2357 } 2358 isp_put_atpd(isp, XS_CHANNEL(accb), atp); 2359 ccb->ccb_h.status = CAM_REQ_CMP; 2360 } else { 2361 ccb->ccb_h.status = CAM_UA_ABORT; 2362 } 2363 } 2364 2365 static void 2366 isp_abort_inot(ispsoftc_t *isp, union ccb *ccb) 2367 { 2368 inot_private_data_t *ntp; 2369 union ccb *accb = ccb->cab.abort_ccb; 2370 struct ccb_hdr *sccb; 2371 tstate_t *tptr; 2372 2373 tptr = get_lun_statep(isp, XS_CHANNEL(accb), XS_LUN(accb)); 2374 if (tptr != NULL) { 2375 /* Search for the INOT among queueued. */ 2376 SLIST_FOREACH(sccb, &tptr->inots, sim_links.sle) { 2377 if (sccb != &accb->ccb_h) 2378 continue; 2379 SLIST_REMOVE(&tptr->inots, sccb, ccb_hdr, sim_links.sle); 2380 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, sccb->path, 2381 "Abort FREE INOT\n"); 2382 accb->ccb_h.status = CAM_REQ_ABORTED; 2383 xpt_done(accb); 2384 ccb->ccb_h.status = CAM_REQ_CMP; 2385 return; 2386 } 2387 } 2388 2389 /* Search for the INOT among running. */ 2390 ntp = isp_find_ntpd(isp, XS_CHANNEL(accb), accb->cin1.tag_id, accb->cin1.seq_id); 2391 if (ntp != NULL) { 2392 if (ntp->nt.nt_need_ack) { 2393 isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, 2394 ntp->nt.nt_lreserved); 2395 } 2396 isp_put_ntpd(isp, XS_CHANNEL(accb), ntp); 2397 ccb->ccb_h.status = CAM_REQ_CMP; 2398 } else { 2399 ccb->ccb_h.status = CAM_UA_ABORT; 2400 return; 2401 } 2402 } 2403 #endif 2404 2405 static void 2406 isp_action(struct cam_sim *sim, union ccb *ccb) 2407 { 2408 int bus, tgt, error; 2409 ispsoftc_t *isp; 2410 fcparam *fcp; 2411 struct ccb_trans_settings *cts; 2412 sbintime_t ts; 2413 2414 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n")); 2415 2416 isp = (ispsoftc_t *)cam_sim_softc(sim); 2417 ISP_ASSERT_LOCKED(isp); 2418 bus = cam_sim_bus(sim); 2419 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code); 2420 ISP_PCMD(ccb) = NULL; 2421 2422 switch (ccb->ccb_h.func_code) { 2423 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 2424 /* 2425 * Do a couple of preliminary checks... 2426 */ 2427 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 2428 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 2429 ccb->ccb_h.status = CAM_REQ_INVALID; 2430 isp_done((struct ccb_scsiio *) ccb); 2431 break; 2432 } 2433 } 2434 #ifdef DIAGNOSTIC 2435 if (ccb->ccb_h.target_id >= ISP_MAX_TARGETS(isp)) { 2436 xpt_print(ccb->ccb_h.path, "invalid target\n"); 2437 ccb->ccb_h.status = CAM_PATH_INVALID; 2438 } 2439 if (ccb->ccb_h.status == CAM_PATH_INVALID) { 2440 xpt_done(ccb); 2441 break; 2442 } 2443 #endif 2444 ccb->csio.scsi_status = SCSI_STATUS_OK; 2445 if (isp_get_pcmd(isp, ccb)) { 2446 isp_prt(isp, ISP_LOGWARN, "out of PCMDs"); 2447 cam_freeze_devq(ccb->ccb_h.path); 2448 cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 250, 0); 2449 ccb->ccb_h.status = CAM_REQUEUE_REQ; 2450 xpt_done(ccb); 2451 break; 2452 } 2453 error = isp_start((XS_T *) ccb); 2454 isp_rq_check_above(isp); 2455 switch (error) { 2456 case 0: 2457 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2458 if (ccb->ccb_h.timeout == CAM_TIME_INFINITY) 2459 break; 2460 /* Give firmware extra 10s to handle timeout. */ 2461 ts = SBT_1MS * ccb->ccb_h.timeout + 10 * SBT_1S; 2462 callout_reset_sbt(&PISP_PCMD(ccb)->wdog, ts, 0, 2463 isp_watchdog, ccb, 0); 2464 break; 2465 case CMD_RQLATER: 2466 isp_prt(isp, ISP_LOGDEBUG0, "%d.%jx retry later", 2467 XS_TGT(ccb), (uintmax_t)XS_LUN(ccb)); 2468 cam_freeze_devq(ccb->ccb_h.path); 2469 cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 1000, 0); 2470 ccb->ccb_h.status = CAM_REQUEUE_REQ; 2471 isp_free_pcmd(isp, ccb); 2472 xpt_done(ccb); 2473 break; 2474 case CMD_EAGAIN: 2475 isp_free_pcmd(isp, ccb); 2476 cam_freeze_devq(ccb->ccb_h.path); 2477 cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 10, 0); 2478 ccb->ccb_h.status = CAM_REQUEUE_REQ; 2479 xpt_done(ccb); 2480 break; 2481 case CMD_COMPLETE: 2482 isp_done((struct ccb_scsiio *) ccb); 2483 break; 2484 default: 2485 isp_prt(isp, ISP_LOGERR, "What's this? 0x%x at %d in file %s", error, __LINE__, __FILE__); 2486 ccb->ccb_h.status = CAM_REQUEUE_REQ; 2487 isp_free_pcmd(isp, ccb); 2488 xpt_done(ccb); 2489 } 2490 break; 2491 2492 #ifdef ISP_TARGET_MODE 2493 case XPT_EN_LUN: /* Enable/Disable LUN as a target */ 2494 if (ccb->cel.enable) { 2495 isp_enable_lun(isp, ccb); 2496 } else { 2497 isp_disable_lun(isp, ccb); 2498 } 2499 break; 2500 case XPT_IMMEDIATE_NOTIFY: /* Add Immediate Notify Resource */ 2501 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 2502 { 2503 tstate_t *tptr = get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun); 2504 if (tptr == NULL) { 2505 const char *str; 2506 2507 if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) 2508 str = "XPT_IMMEDIATE_NOTIFY"; 2509 else 2510 str = "XPT_ACCEPT_TARGET_IO"; 2511 ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, 2512 "%s: no state pointer found for %s\n", 2513 __func__, str); 2514 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2515 xpt_done(ccb); 2516 break; 2517 } 2518 2519 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 2520 ccb->atio.tag_id = 0; 2521 SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h, sim_links.sle); 2522 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, ccb->ccb_h.path, 2523 "Put FREE ATIO\n"); 2524 } else if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) { 2525 ccb->cin1.seq_id = ccb->cin1.tag_id = 0; 2526 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, sim_links.sle); 2527 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, ccb->ccb_h.path, 2528 "Put FREE INOT\n"); 2529 } 2530 ccb->ccb_h.status = CAM_REQ_INPROG; 2531 break; 2532 } 2533 case XPT_NOTIFY_ACKNOWLEDGE: /* notify ack */ 2534 { 2535 inot_private_data_t *ntp; 2536 2537 /* 2538 * XXX: Because we cannot guarantee that the path information in the notify acknowledge ccb 2539 * XXX: matches that for the immediate notify, we have to *search* for the notify structure 2540 */ 2541 /* 2542 * All the relevant path information is in the associated immediate notify 2543 */ 2544 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "%s: [0x%x] NOTIFY ACKNOWLEDGE for 0x%x seen\n", __func__, ccb->cna2.tag_id, ccb->cna2.seq_id); 2545 ntp = isp_find_ntpd(isp, XS_CHANNEL(ccb), ccb->cna2.tag_id, ccb->cna2.seq_id); 2546 if (ntp == NULL) { 2547 ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "%s: [0x%x] XPT_NOTIFY_ACKNOWLEDGE of 0x%x cannot find ntp private data\n", __func__, 2548 ccb->cna2.tag_id, ccb->cna2.seq_id); 2549 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2550 xpt_done(ccb); 2551 break; 2552 } 2553 if (isp_handle_platform_target_notify_ack(isp, &ntp->nt, 2554 (ccb->ccb_h.flags & CAM_SEND_STATUS) ? ccb->cna2.arg : 0)) { 2555 cam_freeze_devq(ccb->ccb_h.path); 2556 cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 10, 0); 2557 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2558 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 2559 break; 2560 } 2561 isp_put_ntpd(isp, XS_CHANNEL(ccb), ntp); 2562 ccb->ccb_h.status = CAM_REQ_CMP; 2563 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "%s: [0x%x] calling xpt_done for tag 0x%x\n", __func__, ccb->cna2.tag_id, ccb->cna2.seq_id); 2564 xpt_done(ccb); 2565 break; 2566 } 2567 case XPT_CONT_TARGET_IO: 2568 isp_target_start_ctio(isp, ccb, FROM_CAM); 2569 isp_rq_check_above(isp); 2570 break; 2571 #endif 2572 case XPT_RESET_DEV: /* BDR the specified SCSI device */ 2573 tgt = ccb->ccb_h.target_id; 2574 tgt |= (bus << 16); 2575 2576 error = isp_control(isp, ISPCTL_RESET_DEV, bus, tgt); 2577 if (error) { 2578 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2579 } else { 2580 /* 2581 * If we have a FC device, reset the Command 2582 * Reference Number, because the target will expect 2583 * that we re-start the CRN at 1 after a reset. 2584 */ 2585 isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1); 2586 2587 ccb->ccb_h.status = CAM_REQ_CMP; 2588 } 2589 xpt_done(ccb); 2590 break; 2591 case XPT_ABORT: /* Abort the specified CCB */ 2592 { 2593 union ccb *accb = ccb->cab.abort_ccb; 2594 switch (accb->ccb_h.func_code) { 2595 #ifdef ISP_TARGET_MODE 2596 case XPT_ACCEPT_TARGET_IO: 2597 isp_abort_atio(isp, ccb); 2598 break; 2599 case XPT_IMMEDIATE_NOTIFY: 2600 isp_abort_inot(isp, ccb); 2601 break; 2602 #endif 2603 case XPT_SCSI_IO: 2604 error = isp_control(isp, ISPCTL_ABORT_CMD, accb); 2605 if (error) { 2606 ccb->ccb_h.status = CAM_UA_ABORT; 2607 } else { 2608 ccb->ccb_h.status = CAM_REQ_CMP; 2609 } 2610 break; 2611 default: 2612 ccb->ccb_h.status = CAM_REQ_INVALID; 2613 break; 2614 } 2615 /* 2616 * This is not a queued CCB, so the caller expects it to be 2617 * complete when control is returned. 2618 */ 2619 break; 2620 } 2621 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) 2622 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 2623 cts = &ccb->cts; 2624 if (!IS_CURRENT_SETTINGS(cts)) { 2625 ccb->ccb_h.status = CAM_REQ_INVALID; 2626 xpt_done(ccb); 2627 break; 2628 } 2629 ccb->ccb_h.status = CAM_REQ_CMP; 2630 xpt_done(ccb); 2631 break; 2632 case XPT_GET_TRAN_SETTINGS: 2633 { 2634 struct ccb_trans_settings_scsi *scsi; 2635 struct ccb_trans_settings_fc *fc; 2636 2637 cts = &ccb->cts; 2638 scsi = &cts->proto_specific.scsi; 2639 fc = &cts->xport_specific.fc; 2640 tgt = cts->ccb_h.target_id; 2641 fcp = FCPARAM(isp, bus); 2642 2643 cts->protocol = PROTO_SCSI; 2644 cts->protocol_version = SCSI_REV_2; 2645 cts->transport = XPORT_FC; 2646 cts->transport_version = 0; 2647 2648 scsi->valid = CTS_SCSI_VALID_TQ; 2649 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 2650 fc->valid = CTS_FC_VALID_SPEED; 2651 fc->bitrate = fcp->isp_gbspeed * 100000; 2652 if (tgt < MAX_FC_TARG) { 2653 fcportdb_t *lp = &fcp->portdb[tgt]; 2654 fc->wwnn = lp->node_wwn; 2655 fc->wwpn = lp->port_wwn; 2656 fc->port = lp->portid; 2657 fc->valid |= CTS_FC_VALID_WWNN | CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT; 2658 } 2659 ccb->ccb_h.status = CAM_REQ_CMP; 2660 xpt_done(ccb); 2661 break; 2662 } 2663 case XPT_CALC_GEOMETRY: 2664 cam_calc_geometry(&ccb->ccg, 1); 2665 xpt_done(ccb); 2666 break; 2667 2668 case XPT_RESET_BUS: /* Reset the specified bus */ 2669 error = isp_control(isp, ISPCTL_RESET_BUS, bus); 2670 if (error) { 2671 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2672 xpt_done(ccb); 2673 break; 2674 } 2675 if (bootverbose) { 2676 xpt_print(ccb->ccb_h.path, "reset bus on channel %d\n", bus); 2677 } 2678 xpt_async(AC_BUS_RESET, ISP_FC_PC(isp, bus)->path, 0); 2679 ccb->ccb_h.status = CAM_REQ_CMP; 2680 xpt_done(ccb); 2681 break; 2682 2683 case XPT_TERM_IO: /* Terminate the I/O process */ 2684 ccb->ccb_h.status = CAM_REQ_INVALID; 2685 xpt_done(ccb); 2686 break; 2687 2688 case XPT_SET_SIM_KNOB: /* Set SIM knobs */ 2689 { 2690 struct ccb_sim_knob *kp = &ccb->knob; 2691 fcparam *fcp = FCPARAM(isp, bus); 2692 2693 if (kp->xport_specific.fc.valid & KNOB_VALID_ADDRESS) { 2694 fcp->isp_wwnn = ISP_FC_PC(isp, bus)->def_wwnn = kp->xport_specific.fc.wwnn; 2695 fcp->isp_wwpn = ISP_FC_PC(isp, bus)->def_wwpn = kp->xport_specific.fc.wwpn; 2696 isp_prt(isp, ISP_LOGALL, "Setting Channel %d wwns to 0x%jx 0x%jx", bus, fcp->isp_wwnn, fcp->isp_wwpn); 2697 } 2698 ccb->ccb_h.status = CAM_REQ_CMP; 2699 if (kp->xport_specific.fc.valid & KNOB_VALID_ROLE) { 2700 int rchange = 0; 2701 int newrole = 0; 2702 2703 switch (kp->xport_specific.fc.role) { 2704 case KNOB_ROLE_NONE: 2705 if (fcp->role != ISP_ROLE_NONE) { 2706 rchange = 1; 2707 newrole = ISP_ROLE_NONE; 2708 } 2709 break; 2710 case KNOB_ROLE_TARGET: 2711 if (fcp->role != ISP_ROLE_TARGET) { 2712 rchange = 1; 2713 newrole = ISP_ROLE_TARGET; 2714 } 2715 break; 2716 case KNOB_ROLE_INITIATOR: 2717 if (fcp->role != ISP_ROLE_INITIATOR) { 2718 rchange = 1; 2719 newrole = ISP_ROLE_INITIATOR; 2720 } 2721 break; 2722 case KNOB_ROLE_BOTH: 2723 if (fcp->role != ISP_ROLE_BOTH) { 2724 rchange = 1; 2725 newrole = ISP_ROLE_BOTH; 2726 } 2727 break; 2728 } 2729 if (rchange) { 2730 ISP_PATH_PRT(isp, ISP_LOGCONFIG, ccb->ccb_h.path, "changing role on from %d to %d\n", fcp->role, newrole); 2731 if (isp_control(isp, ISPCTL_CHANGE_ROLE, 2732 bus, newrole) != 0) { 2733 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2734 xpt_done(ccb); 2735 break; 2736 } 2737 } 2738 } 2739 xpt_done(ccb); 2740 break; 2741 } 2742 case XPT_GET_SIM_KNOB_OLD: /* Get SIM knobs -- compat value */ 2743 case XPT_GET_SIM_KNOB: /* Get SIM knobs */ 2744 { 2745 struct ccb_sim_knob *kp = &ccb->knob; 2746 fcparam *fcp = FCPARAM(isp, bus); 2747 2748 kp->xport_specific.fc.wwnn = fcp->isp_wwnn; 2749 kp->xport_specific.fc.wwpn = fcp->isp_wwpn; 2750 switch (fcp->role) { 2751 case ISP_ROLE_NONE: 2752 kp->xport_specific.fc.role = KNOB_ROLE_NONE; 2753 break; 2754 case ISP_ROLE_TARGET: 2755 kp->xport_specific.fc.role = KNOB_ROLE_TARGET; 2756 break; 2757 case ISP_ROLE_INITIATOR: 2758 kp->xport_specific.fc.role = KNOB_ROLE_INITIATOR; 2759 break; 2760 case ISP_ROLE_BOTH: 2761 kp->xport_specific.fc.role = KNOB_ROLE_BOTH; 2762 break; 2763 } 2764 kp->xport_specific.fc.valid = KNOB_VALID_ADDRESS | KNOB_VALID_ROLE; 2765 ccb->ccb_h.status = CAM_REQ_CMP; 2766 xpt_done(ccb); 2767 break; 2768 } 2769 case XPT_PATH_INQ: /* Path routing inquiry */ 2770 { 2771 struct ccb_pathinq *cpi = &ccb->cpi; 2772 2773 cpi->version_num = 1; 2774 #ifdef ISP_TARGET_MODE 2775 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 2776 #else 2777 cpi->target_sprt = 0; 2778 #endif 2779 cpi->hba_eng_cnt = 0; 2780 cpi->max_target = ISP_MAX_TARGETS(isp) - 1; 2781 cpi->max_lun = 255; 2782 cpi->bus_id = cam_sim_bus(sim); 2783 cpi->maxio = (ISP_NSEG64_MAX - 1) * PAGE_SIZE; 2784 2785 fcp = FCPARAM(isp, bus); 2786 2787 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED; 2788 cpi->hba_misc |= PIM_EXTLUNS | PIM_NOSCAN; 2789 2790 /* 2791 * Because our loop ID can shift from time to time, 2792 * make our initiator ID out of range of our bus. 2793 */ 2794 cpi->initiator_id = cpi->max_target + 1; 2795 2796 /* 2797 * Set base transfer capabilities for Fibre Channel, for this HBA. 2798 */ 2799 if (IS_25XX(isp)) 2800 cpi->base_transfer_speed = 8000000; 2801 else 2802 cpi->base_transfer_speed = 4000000; 2803 cpi->hba_inquiry = PI_TAG_ABLE; 2804 cpi->transport = XPORT_FC; 2805 cpi->transport_version = 0; 2806 cpi->xport_specific.fc.wwnn = fcp->isp_wwnn; 2807 cpi->xport_specific.fc.wwpn = fcp->isp_wwpn; 2808 cpi->xport_specific.fc.port = fcp->isp_portid; 2809 cpi->xport_specific.fc.bitrate = fcp->isp_gbspeed * 1000; 2810 cpi->protocol = PROTO_SCSI; 2811 cpi->protocol_version = SCSI_REV_2; 2812 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 2813 strlcpy(cpi->hba_vid, "Qlogic", HBA_IDLEN); 2814 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 2815 cpi->unit_number = cam_sim_unit(sim); 2816 cpi->ccb_h.status = CAM_REQ_CMP; 2817 xpt_done(ccb); 2818 break; 2819 } 2820 default: 2821 ccb->ccb_h.status = CAM_REQ_INVALID; 2822 xpt_done(ccb); 2823 break; 2824 } 2825 } 2826 2827 void 2828 isp_done(XS_T *sccb) 2829 { 2830 ispsoftc_t *isp = XS_ISP(sccb); 2831 uint32_t status; 2832 2833 if (XS_NOERR(sccb)) 2834 XS_SETERR(sccb, CAM_REQ_CMP); 2835 2836 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (sccb->scsi_status != SCSI_STATUS_OK)) { 2837 sccb->ccb_h.status &= ~CAM_STATUS_MASK; 2838 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) { 2839 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; 2840 } else { 2841 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 2842 } 2843 } 2844 2845 sccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2846 status = sccb->ccb_h.status & CAM_STATUS_MASK; 2847 if (status != CAM_REQ_CMP && 2848 (sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 2849 sccb->ccb_h.status |= CAM_DEV_QFRZN; 2850 xpt_freeze_devq(sccb->ccb_h.path, 1); 2851 } 2852 2853 if (ISP_PCMD(sccb)) { 2854 if (callout_active(&PISP_PCMD(sccb)->wdog)) 2855 callout_stop(&PISP_PCMD(sccb)->wdog); 2856 isp_free_pcmd(isp, (union ccb *) sccb); 2857 } 2858 isp_rq_check_below(isp); 2859 xpt_done((union ccb *) sccb); 2860 } 2861 2862 void 2863 isp_async(ispsoftc_t *isp, ispasync_t cmd, ...) 2864 { 2865 int bus; 2866 static const char prom[] = "Chan %d [%d] WWPN 0x%16jx PortID 0x%06x handle 0x%x %s %s"; 2867 char buf[64]; 2868 char *msg = NULL; 2869 target_id_t tgt = 0; 2870 fcportdb_t *lp; 2871 struct isp_fc *fc; 2872 struct ac_contract ac; 2873 struct ac_device_changed *adc; 2874 va_list ap; 2875 2876 switch (cmd) { 2877 case ISPASYNC_LOOP_RESET: 2878 { 2879 uint16_t lipp; 2880 fcparam *fcp; 2881 va_start(ap, cmd); 2882 bus = va_arg(ap, int); 2883 va_end(ap); 2884 2885 lipp = ISP_READ(isp, OUTMAILBOX1); 2886 fcp = FCPARAM(isp, bus); 2887 2888 isp_prt(isp, ISP_LOGINFO, "Chan %d LOOP Reset, LIP primitive %x", bus, lipp); 2889 /* 2890 * Per FCP-4, a Reset LIP should result in a CRN reset. Other 2891 * LIPs and loop up/down events should never reset the CRN. For 2892 * an as of yet unknown reason, 24xx series cards (and 2893 * potentially others) can interrupt with a LIP Reset status 2894 * when no LIP reset came down the wire. Additionally, the LIP 2895 * primitive accompanying this status would not be a valid LIP 2896 * Reset primitive, but some variation of an invalid AL_PA 2897 * LIP. As a result, we have to verify the AL_PD in the LIP 2898 * addresses our port before blindly resetting. 2899 */ 2900 if (FCP_IS_DEST_ALPD(fcp, (lipp & 0x00FF))) 2901 isp_fcp_reset_crn(isp, bus, /*tgt*/0, /*tgt_set*/ 0); 2902 isp_loop_changed(isp, bus); 2903 break; 2904 } 2905 case ISPASYNC_LIP: 2906 if (msg == NULL) 2907 msg = "LIP Received"; 2908 /* FALLTHROUGH */ 2909 case ISPASYNC_LOOP_DOWN: 2910 if (msg == NULL) 2911 msg = "LOOP Down"; 2912 /* FALLTHROUGH */ 2913 case ISPASYNC_LOOP_UP: 2914 if (msg == NULL) 2915 msg = "LOOP Up"; 2916 va_start(ap, cmd); 2917 bus = va_arg(ap, int); 2918 va_end(ap); 2919 isp_loop_changed(isp, bus); 2920 isp_prt(isp, ISP_LOGINFO, "Chan %d %s", bus, msg); 2921 break; 2922 case ISPASYNC_DEV_ARRIVED: 2923 va_start(ap, cmd); 2924 bus = va_arg(ap, int); 2925 lp = va_arg(ap, fcportdb_t *); 2926 va_end(ap); 2927 fc = ISP_FC_PC(isp, bus); 2928 tgt = FC_PORTDB_TGT(isp, bus, lp); 2929 isp_gen_role_str(buf, sizeof (buf), lp->prli_word3); 2930 isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "arrived"); 2931 if ((FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) && 2932 (lp->prli_word3 & PRLI_WD3_TARGET_FUNCTION)) { 2933 lp->is_target = 1; 2934 isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1); 2935 isp_make_here(isp, lp, bus, tgt); 2936 } 2937 if ((FCPARAM(isp, bus)->role & ISP_ROLE_TARGET) && 2938 (lp->prli_word3 & PRLI_WD3_INITIATOR_FUNCTION)) { 2939 lp->is_initiator = 1; 2940 ac.contract_number = AC_CONTRACT_DEV_CHG; 2941 adc = (struct ac_device_changed *) ac.contract_data; 2942 adc->wwpn = lp->port_wwn; 2943 adc->port = lp->portid; 2944 adc->target = tgt; 2945 adc->arrived = 1; 2946 xpt_async(AC_CONTRACT, fc->path, &ac); 2947 } 2948 break; 2949 case ISPASYNC_DEV_CHANGED: 2950 case ISPASYNC_DEV_STAYED: 2951 { 2952 int crn_reset_done; 2953 2954 crn_reset_done = 0; 2955 va_start(ap, cmd); 2956 bus = va_arg(ap, int); 2957 lp = va_arg(ap, fcportdb_t *); 2958 va_end(ap); 2959 fc = ISP_FC_PC(isp, bus); 2960 tgt = FC_PORTDB_TGT(isp, bus, lp); 2961 isp_gen_role_str(buf, sizeof (buf), lp->new_prli_word3); 2962 if (cmd == ISPASYNC_DEV_CHANGED) 2963 isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->new_portid, lp->handle, buf, "changed"); 2964 else 2965 isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "stayed"); 2966 2967 if (lp->is_target != 2968 ((FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) && 2969 (lp->new_prli_word3 & PRLI_WD3_TARGET_FUNCTION))) { 2970 lp->is_target = !lp->is_target; 2971 if (lp->is_target) { 2972 if (cmd == ISPASYNC_DEV_CHANGED) { 2973 isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1); 2974 crn_reset_done = 1; 2975 } 2976 isp_make_here(isp, lp, bus, tgt); 2977 } else { 2978 isp_make_gone(isp, lp, bus, tgt); 2979 if (cmd == ISPASYNC_DEV_CHANGED) { 2980 isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1); 2981 crn_reset_done = 1; 2982 } 2983 } 2984 } 2985 if (lp->is_initiator != 2986 ((FCPARAM(isp, bus)->role & ISP_ROLE_TARGET) && 2987 (lp->new_prli_word3 & PRLI_WD3_INITIATOR_FUNCTION))) { 2988 lp->is_initiator = !lp->is_initiator; 2989 ac.contract_number = AC_CONTRACT_DEV_CHG; 2990 adc = (struct ac_device_changed *) ac.contract_data; 2991 adc->wwpn = lp->port_wwn; 2992 adc->port = lp->portid; 2993 adc->target = tgt; 2994 adc->arrived = lp->is_initiator; 2995 xpt_async(AC_CONTRACT, fc->path, &ac); 2996 } 2997 2998 if ((cmd == ISPASYNC_DEV_CHANGED) && 2999 (crn_reset_done == 0)) 3000 isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1); 3001 3002 break; 3003 } 3004 case ISPASYNC_DEV_GONE: 3005 va_start(ap, cmd); 3006 bus = va_arg(ap, int); 3007 lp = va_arg(ap, fcportdb_t *); 3008 va_end(ap); 3009 fc = ISP_FC_PC(isp, bus); 3010 tgt = FC_PORTDB_TGT(isp, bus, lp); 3011 /* 3012 * If this has a virtual target or initiator set the isp_gdt 3013 * timer running on it to delay its departure. 3014 */ 3015 isp_gen_role_str(buf, sizeof (buf), lp->prli_word3); 3016 if (lp->is_target || lp->is_initiator) { 3017 lp->state = FC_PORTDB_STATE_ZOMBIE; 3018 lp->gone_timer = fc->gone_device_time; 3019 isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "gone zombie"); 3020 if (fc->ready && !callout_active(&fc->gdt)) { 3021 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Starting Gone Device Timer with %u seconds time now %lu", bus, lp->gone_timer, (unsigned long)time_uptime); 3022 callout_reset(&fc->gdt, hz, isp_gdt, fc); 3023 } 3024 break; 3025 } 3026 isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "gone"); 3027 break; 3028 case ISPASYNC_CHANGE_NOTIFY: 3029 { 3030 char *msg; 3031 int evt, nphdl, nlstate, portid, reason; 3032 3033 va_start(ap, cmd); 3034 bus = va_arg(ap, int); 3035 evt = va_arg(ap, int); 3036 if (evt == ISPASYNC_CHANGE_PDB) { 3037 nphdl = va_arg(ap, int); 3038 nlstate = va_arg(ap, int); 3039 reason = va_arg(ap, int); 3040 } else if (evt == ISPASYNC_CHANGE_SNS) { 3041 portid = va_arg(ap, int); 3042 } else { 3043 nphdl = NIL_HANDLE; 3044 nlstate = reason = 0; 3045 } 3046 va_end(ap); 3047 3048 if (evt == ISPASYNC_CHANGE_PDB) { 3049 int tgt_set = 0; 3050 msg = "Port Database Changed"; 3051 isp_prt(isp, ISP_LOGINFO, 3052 "Chan %d %s (nphdl 0x%x state 0x%x reason 0x%x)", 3053 bus, msg, nphdl, nlstate, reason); 3054 /* 3055 * Port database syncs are not sufficient for 3056 * determining that logins or logouts are done on the 3057 * loop, but this information is directly available from 3058 * the reason code from the incoming mbox. We must reset 3059 * the fcp crn on these events according to FCP-4 3060 */ 3061 switch (reason) { 3062 case PDB24XX_AE_IMPL_LOGO_1: 3063 case PDB24XX_AE_IMPL_LOGO_2: 3064 case PDB24XX_AE_IMPL_LOGO_3: 3065 case PDB24XX_AE_PLOGI_RCVD: 3066 case PDB24XX_AE_PRLI_RCVD: 3067 case PDB24XX_AE_PRLO_RCVD: 3068 case PDB24XX_AE_LOGO_RCVD: 3069 case PDB24XX_AE_PLOGI_DONE: 3070 case PDB24XX_AE_PRLI_DONE: 3071 /* 3072 * If the event is not global, twiddle tgt and 3073 * tgt_set to nominate only the target 3074 * associated with the nphdl. 3075 */ 3076 if (nphdl != PDB24XX_AE_GLOBAL) { 3077 /* Break if we don't yet have the pdb */ 3078 if (!isp_find_pdb_by_handle(isp, bus, nphdl, &lp)) 3079 break; 3080 tgt = FC_PORTDB_TGT(isp, bus, lp); 3081 tgt_set = 1; 3082 } 3083 isp_fcp_reset_crn(isp, bus, tgt, tgt_set); 3084 break; 3085 default: 3086 break; /* NOP */ 3087 } 3088 } else if (evt == ISPASYNC_CHANGE_SNS) { 3089 msg = "Name Server Database Changed"; 3090 isp_prt(isp, ISP_LOGINFO, "Chan %d %s (PortID 0x%06x)", 3091 bus, msg, portid); 3092 } else { 3093 msg = "Other Change Notify"; 3094 isp_prt(isp, ISP_LOGINFO, "Chan %d %s", bus, msg); 3095 } 3096 isp_loop_changed(isp, bus); 3097 break; 3098 } 3099 #ifdef ISP_TARGET_MODE 3100 case ISPASYNC_TARGET_NOTIFY: 3101 { 3102 isp_notify_t *notify; 3103 va_start(ap, cmd); 3104 notify = va_arg(ap, isp_notify_t *); 3105 va_end(ap); 3106 switch (notify->nt_ncode) { 3107 case NT_ABORT_TASK: 3108 case NT_ABORT_TASK_SET: 3109 case NT_CLEAR_ACA: 3110 case NT_CLEAR_TASK_SET: 3111 case NT_LUN_RESET: 3112 case NT_TARGET_RESET: 3113 case NT_QUERY_TASK_SET: 3114 case NT_QUERY_ASYNC_EVENT: 3115 /* 3116 * These are task management functions. 3117 */ 3118 isp_handle_platform_target_tmf(isp, notify); 3119 break; 3120 case NT_LIP_RESET: 3121 case NT_LINK_UP: 3122 case NT_LINK_DOWN: 3123 case NT_HBA_RESET: 3124 /* 3125 * No action need be taken here. 3126 */ 3127 break; 3128 case NT_SRR: 3129 isp_handle_platform_srr(isp, notify); 3130 break; 3131 default: 3132 isp_prt(isp, ISP_LOGALL, "target notify code 0x%x", notify->nt_ncode); 3133 isp_handle_platform_target_notify_ack(isp, notify, 0); 3134 break; 3135 } 3136 break; 3137 } 3138 case ISPASYNC_TARGET_NOTIFY_ACK: 3139 { 3140 void *inot; 3141 va_start(ap, cmd); 3142 inot = va_arg(ap, void *); 3143 va_end(ap); 3144 if (isp_notify_ack(isp, inot)) { 3145 isp_tna_t *tp = malloc(sizeof (*tp), M_DEVBUF, M_NOWAIT); 3146 if (tp) { 3147 tp->isp = isp; 3148 memcpy(tp->data, inot, sizeof (tp->data)); 3149 tp->not = tp->data; 3150 callout_init_mtx(&tp->timer, &isp->isp_lock, 0); 3151 callout_reset(&tp->timer, 5, 3152 isp_refire_notify_ack, tp); 3153 } else { 3154 isp_prt(isp, ISP_LOGERR, "you lose- cannot allocate a notify refire"); 3155 } 3156 } 3157 break; 3158 } 3159 case ISPASYNC_TARGET_ACTION: 3160 { 3161 isphdr_t *hp; 3162 3163 va_start(ap, cmd); 3164 hp = va_arg(ap, isphdr_t *); 3165 va_end(ap); 3166 switch (hp->rqs_entry_type) { 3167 case RQSTYPE_ATIO: 3168 isp_handle_platform_atio7(isp, (at7_entry_t *)hp); 3169 break; 3170 case RQSTYPE_CTIO7: 3171 isp_handle_platform_ctio(isp, (ct7_entry_t *)hp); 3172 break; 3173 default: 3174 isp_prt(isp, ISP_LOGWARN, "%s: unhandled target action 0x%x", 3175 __func__, hp->rqs_entry_type); 3176 break; 3177 } 3178 break; 3179 } 3180 #endif 3181 case ISPASYNC_FW_CRASH: 3182 { 3183 uint16_t mbox1; 3184 mbox1 = ISP_READ(isp, OUTMAILBOX1); 3185 isp_prt(isp, ISP_LOGERR, "Internal Firmware Error @ RISC Address 0x%x", mbox1); 3186 #if 0 3187 isp_reinit(isp, 1); 3188 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL); 3189 #endif 3190 break; 3191 } 3192 default: 3193 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd); 3194 break; 3195 } 3196 } 3197 3198 uint64_t 3199 isp_default_wwn(ispsoftc_t * isp, int chan, int isactive, int iswwnn) 3200 { 3201 uint64_t seed; 3202 struct isp_fc *fc = ISP_FC_PC(isp, chan); 3203 3204 /* First try to use explicitly configured WWNs. */ 3205 seed = iswwnn ? fc->def_wwnn : fc->def_wwpn; 3206 if (seed) 3207 return (seed); 3208 3209 /* Otherwise try to use WWNs from NVRAM. */ 3210 if (isactive) { 3211 seed = iswwnn ? FCPARAM(isp, chan)->isp_wwnn_nvram : 3212 FCPARAM(isp, chan)->isp_wwpn_nvram; 3213 if (seed) 3214 return (seed); 3215 } 3216 3217 /* If still no WWNs, try to steal them from the first channel. */ 3218 if (chan > 0) { 3219 seed = iswwnn ? ISP_FC_PC(isp, 0)->def_wwnn : 3220 ISP_FC_PC(isp, 0)->def_wwpn; 3221 if (seed == 0) { 3222 seed = iswwnn ? FCPARAM(isp, 0)->isp_wwnn_nvram : 3223 FCPARAM(isp, 0)->isp_wwpn_nvram; 3224 } 3225 } 3226 3227 /* If still nothing -- improvise. */ 3228 if (seed == 0) { 3229 seed = 0x400000007F000000ull + device_get_unit(isp->isp_dev); 3230 if (!iswwnn) 3231 seed ^= 0x0100000000000000ULL; 3232 } 3233 3234 /* For additional channels we have to improvise even more. */ 3235 if (!iswwnn && chan > 0) { 3236 /* 3237 * We'll stick our channel number plus one first into bits 3238 * 57..59 and thence into bits 52..55 which allows for 8 bits 3239 * of channel which is enough for our maximum of 255 channels. 3240 */ 3241 seed ^= 0x0100000000000000ULL; 3242 seed ^= ((uint64_t) (chan + 1) & 0xf) << 56; 3243 seed ^= ((uint64_t) ((chan + 1) >> 4) & 0xf) << 52; 3244 } 3245 return (seed); 3246 } 3247 3248 void 3249 isp_prt(ispsoftc_t *isp, int level, const char *fmt, ...) 3250 { 3251 int loc; 3252 char lbuf[200]; 3253 va_list ap; 3254 3255 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { 3256 return; 3257 } 3258 snprintf(lbuf, sizeof (lbuf), "%s: ", device_get_nameunit(isp->isp_dev)); 3259 loc = strlen(lbuf); 3260 va_start(ap, fmt); 3261 vsnprintf(&lbuf[loc], sizeof (lbuf) - loc - 1, fmt, ap); 3262 va_end(ap); 3263 printf("%s\n", lbuf); 3264 } 3265 3266 void 3267 isp_xs_prt(ispsoftc_t *isp, XS_T *xs, int level, const char *fmt, ...) 3268 { 3269 va_list ap; 3270 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { 3271 return; 3272 } 3273 xpt_print_path(xs->ccb_h.path); 3274 va_start(ap, fmt); 3275 vprintf(fmt, ap); 3276 va_end(ap); 3277 printf("\n"); 3278 } 3279 3280 uint64_t 3281 isp_nanotime_sub(struct timespec *b, struct timespec *a) 3282 { 3283 uint64_t elapsed; 3284 struct timespec x; 3285 3286 timespecsub(b, a, &x); 3287 elapsed = GET_NANOSEC(&x); 3288 if (elapsed == 0) 3289 elapsed++; 3290 return (elapsed); 3291 } 3292 3293 int 3294 isp_fc_scratch_acquire(ispsoftc_t *isp, int chan) 3295 { 3296 struct isp_fc *fc = ISP_FC_PC(isp, chan); 3297 3298 if (fc->fcbsy) 3299 return (-1); 3300 fc->fcbsy = 1; 3301 return (0); 3302 } 3303 3304 void 3305 isp_platform_intr(void *arg) 3306 { 3307 ispsoftc_t *isp = arg; 3308 3309 ISP_LOCK(isp); 3310 ISP_RUN_ISR(isp); 3311 ISP_UNLOCK(isp); 3312 } 3313 3314 void 3315 isp_platform_intr_resp(void *arg) 3316 { 3317 ispsoftc_t *isp = arg; 3318 3319 ISP_LOCK(isp); 3320 isp_intr_respq(isp); 3321 ISP_UNLOCK(isp); 3322 3323 /* We have handshake enabled, so explicitly complete interrupt */ 3324 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); 3325 } 3326 3327 void 3328 isp_platform_intr_atio(void *arg) 3329 { 3330 ispsoftc_t *isp = arg; 3331 3332 ISP_LOCK(isp); 3333 #ifdef ISP_TARGET_MODE 3334 isp_intr_atioq(isp); 3335 #endif 3336 ISP_UNLOCK(isp); 3337 3338 /* We have handshake enabled, so explicitly complete interrupt */ 3339 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); 3340 } 3341 3342 typedef struct { 3343 ispsoftc_t *isp; 3344 struct ccb_scsiio *csio; 3345 void *qe; 3346 int error; 3347 } mush_t; 3348 3349 static void 3350 isp_dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 3351 { 3352 mush_t *mp = (mush_t *) arg; 3353 ispsoftc_t *isp= mp->isp; 3354 struct ccb_scsiio *csio = mp->csio; 3355 bus_dmasync_op_t op; 3356 3357 if (error) { 3358 mp->error = error; 3359 return; 3360 } 3361 if ((csio->ccb_h.func_code == XPT_CONT_TARGET_IO) ^ 3362 ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)) 3363 op = BUS_DMASYNC_PREREAD; 3364 else 3365 op = BUS_DMASYNC_PREWRITE; 3366 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, op); 3367 3368 mp->error = ISP_SEND_CMD(isp, mp->qe, dm_segs, nseg); 3369 if (mp->error) 3370 isp_dmafree(isp, csio); 3371 } 3372 3373 int 3374 isp_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *qe) 3375 { 3376 mush_t mp; 3377 int error; 3378 3379 if (XS_XFRLEN(csio)) { 3380 mp.isp = isp; 3381 mp.csio = csio; 3382 mp.qe = qe; 3383 mp.error = 0; 3384 error = bus_dmamap_load_ccb(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, 3385 (union ccb *)csio, isp_dma2, &mp, BUS_DMA_NOWAIT); 3386 if (error == 0) 3387 error = mp.error; 3388 } else { 3389 error = ISP_SEND_CMD(isp, qe, NULL, 0); 3390 } 3391 switch (error) { 3392 case 0: 3393 case CMD_COMPLETE: 3394 case CMD_EAGAIN: 3395 case CMD_RQLATER: 3396 break; 3397 case ENOMEM: 3398 error = CMD_EAGAIN; 3399 break; 3400 case EINVAL: 3401 case EFBIG: 3402 csio->ccb_h.status = CAM_REQ_INVALID; 3403 error = CMD_COMPLETE; 3404 break; 3405 default: 3406 csio->ccb_h.status = CAM_UNREC_HBA_ERROR; 3407 error = CMD_COMPLETE; 3408 break; 3409 } 3410 return (error); 3411 } 3412 3413 void 3414 isp_dmafree(ispsoftc_t *isp, struct ccb_scsiio *csio) 3415 { 3416 bus_dmasync_op_t op; 3417 3418 if (XS_XFRLEN(csio) == 0) 3419 return; 3420 3421 if ((csio->ccb_h.func_code == XPT_CONT_TARGET_IO) ^ 3422 ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)) 3423 op = BUS_DMASYNC_POSTREAD; 3424 else 3425 op = BUS_DMASYNC_POSTWRITE; 3426 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, op); 3427 bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap); 3428 } 3429 3430 /* 3431 * Reset the command reference number for all LUNs on a specific target 3432 * (needed when a target arrives again) or for all targets on a port 3433 * (needed for events like a LIP). 3434 */ 3435 void 3436 isp_fcp_reset_crn(ispsoftc_t *isp, int chan, uint32_t tgt, int tgt_set) 3437 { 3438 struct isp_fc *fc = ISP_FC_PC(isp, chan); 3439 struct isp_nexus *nxp; 3440 int i; 3441 3442 if (tgt_set == 0) 3443 isp_prt(isp, ISP_LOGDEBUG0, 3444 "Chan %d resetting CRN on all targets", chan); 3445 else 3446 isp_prt(isp, ISP_LOGDEBUG0, 3447 "Chan %d resetting CRN on target %u", chan, tgt); 3448 3449 for (i = 0; i < NEXUS_HASH_WIDTH; i++) { 3450 for (nxp = fc->nexus_hash[i]; nxp != NULL; nxp = nxp->next) { 3451 if (tgt_set == 0 || tgt == nxp->tgt) 3452 nxp->crnseed = 0; 3453 } 3454 } 3455 } 3456 3457 int 3458 isp_fcp_next_crn(ispsoftc_t *isp, uint8_t *crnp, XS_T *cmd) 3459 { 3460 lun_id_t lun; 3461 uint32_t chan, tgt; 3462 struct isp_fc *fc; 3463 struct isp_nexus *nxp; 3464 int idx; 3465 3466 chan = XS_CHANNEL(cmd); 3467 tgt = XS_TGT(cmd); 3468 lun = XS_LUN(cmd); 3469 fc = ISP_FC_PC(isp, chan); 3470 idx = NEXUS_HASH(tgt, lun); 3471 nxp = fc->nexus_hash[idx]; 3472 3473 while (nxp) { 3474 if (nxp->tgt == tgt && nxp->lun == lun) 3475 break; 3476 nxp = nxp->next; 3477 } 3478 if (nxp == NULL) { 3479 nxp = fc->nexus_free_list; 3480 if (nxp == NULL) { 3481 nxp = malloc(sizeof (struct isp_nexus), M_DEVBUF, M_ZERO|M_NOWAIT); 3482 if (nxp == NULL) { 3483 return (-1); 3484 } 3485 } else { 3486 fc->nexus_free_list = nxp->next; 3487 } 3488 nxp->tgt = tgt; 3489 nxp->lun = lun; 3490 nxp->next = fc->nexus_hash[idx]; 3491 fc->nexus_hash[idx] = nxp; 3492 } 3493 if (nxp->crnseed == 0) 3494 nxp->crnseed = 1; 3495 *crnp = nxp->crnseed++; 3496 return (0); 3497 } 3498 3499 /* 3500 * We enter with the lock held 3501 */ 3502 void 3503 isp_timer(void *arg) 3504 { 3505 ispsoftc_t *isp = arg; 3506 #ifdef ISP_TARGET_MODE 3507 isp_tmcmd_restart(isp); 3508 #endif 3509 callout_reset(&isp->isp_osinfo.tmo, isp_timer_count, isp_timer, isp); 3510 } 3511 3512 #ifdef ISP_TARGET_MODE 3513 isp_ecmd_t * 3514 isp_get_ecmd(ispsoftc_t *isp) 3515 { 3516 isp_ecmd_t *ecmd = isp->isp_osinfo.ecmd_free; 3517 if (ecmd) { 3518 isp->isp_osinfo.ecmd_free = ecmd->next; 3519 } 3520 return (ecmd); 3521 } 3522 3523 void 3524 isp_put_ecmd(ispsoftc_t *isp, isp_ecmd_t *ecmd) 3525 { 3526 ecmd->next = isp->isp_osinfo.ecmd_free; 3527 isp->isp_osinfo.ecmd_free = ecmd; 3528 } 3529 #endif 3530