1 /*- 2 * Copyright (c) 2008, 2009 Silicon Graphics International Corp. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * substantially similar to the "NO WARRANTY" disclaimer below 13 * ("Disclaimer") and any redistribution must be conditioned upon 14 * including a substantially similar Disclaimer requirement for further 15 * binary redistribution. 16 * 17 * NO WARRANTY 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGES. 29 * 30 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/scsi_ctl.c#4 $ 31 */ 32 /* 33 * Peripheral driver interface between CAM and CTL (CAM Target Layer). 34 * 35 * Author: Ken Merry <ken@FreeBSD.org> 36 */ 37 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 #include <sys/param.h> 42 #include <sys/queue.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/condvar.h> 48 #include <sys/malloc.h> 49 #include <sys/bus.h> 50 #include <sys/endian.h> 51 #include <sys/sbuf.h> 52 #include <sys/sysctl.h> 53 #include <sys/types.h> 54 #include <sys/systm.h> 55 #include <machine/bus.h> 56 57 #include <cam/cam.h> 58 #include <cam/cam_ccb.h> 59 #include <cam/cam_periph.h> 60 #include <cam/cam_queue.h> 61 #include <cam/cam_xpt_periph.h> 62 #include <cam/cam_debug.h> 63 #include <cam/cam_sim.h> 64 #include <cam/cam_xpt.h> 65 66 #include <cam/scsi/scsi_all.h> 67 #include <cam/scsi/scsi_message.h> 68 69 #include <cam/ctl/ctl_io.h> 70 #include <cam/ctl/ctl.h> 71 #include <cam/ctl/ctl_frontend.h> 72 #include <cam/ctl/ctl_util.h> 73 #include <cam/ctl/ctl_error.h> 74 75 typedef enum { 76 CTLFE_CCB_DEFAULT = 0x00, 77 CTLFE_CCB_WAITING = 0x01 78 } ctlfe_ccb_types; 79 80 struct ctlfe_softc { 81 struct ctl_frontend fe; 82 path_id_t path_id; 83 struct cam_sim *sim; 84 char port_name[DEV_IDLEN]; 85 STAILQ_HEAD(, ctlfe_lun_softc) lun_softc_list; 86 STAILQ_ENTRY(ctlfe_softc) links; 87 }; 88 89 STAILQ_HEAD(, ctlfe_softc) ctlfe_softc_list; 90 struct mtx ctlfe_list_mtx; 91 static char ctlfe_mtx_desc[] = "ctlfelist"; 92 static int ctlfe_dma_enabled = 1; 93 #ifdef CTLFE_INIT_ENABLE 94 static int ctlfe_max_targets = 1; 95 static int ctlfe_num_targets = 0; 96 #endif 97 98 typedef enum { 99 CTLFE_LUN_NONE = 0x00, 100 CTLFE_LUN_WILDCARD = 0x01 101 } ctlfe_lun_flags; 102 103 struct ctlfe_lun_softc { 104 struct ctlfe_softc *parent_softc; 105 struct cam_periph *periph; 106 ctlfe_lun_flags flags; 107 struct callout dma_callout; 108 uint64_t ccbs_alloced; 109 uint64_t ccbs_freed; 110 uint64_t ctios_sent; 111 uint64_t ctios_returned; 112 uint64_t atios_sent; 113 uint64_t atios_returned; 114 uint64_t inots_sent; 115 uint64_t inots_returned; 116 /* bus_dma_tag_t dma_tag; */ 117 TAILQ_HEAD(, ccb_hdr) work_queue; 118 STAILQ_ENTRY(ctlfe_lun_softc) links; 119 }; 120 121 typedef enum { 122 CTLFE_CMD_NONE = 0x00, 123 CTLFE_CMD_PIECEWISE = 0x01 124 } ctlfe_cmd_flags; 125 126 /* 127 * The size limit of this structure is CTL_PORT_PRIV_SIZE, from ctl_io.h. 128 * Currently that is 600 bytes. 129 */ 130 struct ctlfe_lun_cmd_info { 131 int cur_transfer_index; 132 ctlfe_cmd_flags flags; 133 /* 134 * XXX KDM struct bus_dma_segment is 8 bytes on i386, and 16 135 * bytes on amd64. So with 32 elements, this is 256 bytes on 136 * i386 and 512 bytes on amd64. 137 */ 138 bus_dma_segment_t cam_sglist[32]; 139 }; 140 141 /* 142 * When we register the adapter/bus, request that this many ctl_ios be 143 * allocated. This should be the maximum supported by the adapter, but we 144 * currently don't have a way to get that back from the path inquiry. 145 * XXX KDM add that to the path inquiry. 146 */ 147 #define CTLFE_REQ_CTL_IO 4096 148 /* 149 * Number of Accept Target I/O CCBs to allocate and queue down to the 150 * adapter per LUN. 151 * XXX KDM should this be controlled by CTL? 152 */ 153 #define CTLFE_ATIO_PER_LUN 1024 154 /* 155 * Number of Immediate Notify CCBs (used for aborts, resets, etc.) to 156 * allocate and queue down to the adapter per LUN. 157 * XXX KDM should this be controlled by CTL? 158 */ 159 #define CTLFE_IN_PER_LUN 1024 160 161 /* 162 * Timeout (in seconds) on CTIO CCB allocation for doing a DMA or sending 163 * status to the initiator. The SIM is expected to have its own timeouts, 164 * so we're not putting this timeout around the CCB execution time. The 165 * SIM should timeout and let us know if it has an issue. 166 */ 167 #define CTLFE_DMA_TIMEOUT 60 168 169 /* 170 * Turn this on to enable extra debugging prints. 171 */ 172 #if 0 173 #define CTLFE_DEBUG 174 #endif 175 176 /* 177 * Use randomly assigned WWNN/WWPN values. This is to work around an issue 178 * in the FreeBSD initiator that makes it unable to rescan the target if 179 * the target gets rebooted and the WWNN/WWPN stay the same. 180 */ 181 #if 0 182 #define RANDOM_WWNN 183 #endif 184 185 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, dma_enabled, CTLFLAG_RW, 186 &ctlfe_dma_enabled, 0, "DMA enabled"); 187 MALLOC_DEFINE(M_CTLFE, "CAM CTL FE", "CAM CTL FE interface"); 188 189 #define ccb_type ppriv_field0 190 /* This is only used in the ATIO */ 191 #define io_ptr ppriv_ptr1 192 193 /* This is only used in the CTIO */ 194 #define ccb_atio ppriv_ptr1 195 196 int ctlfeinitialize(void); 197 void ctlfeshutdown(void); 198 static periph_init_t ctlfeinit; 199 static void ctlfeasync(void *callback_arg, uint32_t code, 200 struct cam_path *path, void *arg); 201 static periph_ctor_t ctlferegister; 202 static periph_oninv_t ctlfeoninvalidate; 203 static periph_dtor_t ctlfecleanup; 204 static periph_start_t ctlfestart; 205 static void ctlfedone(struct cam_periph *periph, 206 union ccb *done_ccb); 207 208 static void ctlfe_onoffline(void *arg, int online); 209 static void ctlfe_online(void *arg); 210 static void ctlfe_offline(void *arg); 211 static int ctlfe_targ_enable(void *arg, struct ctl_id targ_id); 212 static int ctlfe_targ_disable(void *arg, struct ctl_id targ_id); 213 static int ctlfe_lun_enable(void *arg, struct ctl_id targ_id, 214 int lun_id); 215 static int ctlfe_lun_disable(void *arg, struct ctl_id targ_id, 216 int lun_id); 217 static void ctlfe_dump_sim(struct cam_sim *sim); 218 static void ctlfe_dump_queue(struct ctlfe_lun_softc *softc); 219 static void ctlfe_dma_timeout(void *arg); 220 static void ctlfe_datamove_done(union ctl_io *io); 221 static void ctlfe_dump(void); 222 223 static struct periph_driver ctlfe_driver = 224 { 225 ctlfeinit, "ctl", 226 TAILQ_HEAD_INITIALIZER(ctlfe_driver.units), /*generation*/ 0 227 }; 228 PERIPHDRIVER_DECLARE(ctl, ctlfe_driver); 229 230 extern struct ctl_softc *control_softc; 231 extern int ctl_disable; 232 233 int 234 ctlfeinitialize(void) 235 { 236 cam_status status; 237 238 /* Don't initialize if we're disabled */ 239 if (ctl_disable != 0) 240 return (0); 241 242 STAILQ_INIT(&ctlfe_softc_list); 243 244 mtx_init(&ctlfe_list_mtx, ctlfe_mtx_desc, NULL, MTX_DEF); 245 246 xpt_lock_buses(); 247 periphdriver_register(&ctlfe_driver); 248 xpt_unlock_buses(); 249 250 status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED | 251 AC_CONTRACT, ctlfeasync, NULL, NULL); 252 253 if (status != CAM_REQ_CMP) { 254 printf("ctl: Failed to attach async callback due to CAM " 255 "status 0x%x!\n", status); 256 } 257 258 return (0); 259 } 260 261 void 262 ctlfeshutdown(void) 263 { 264 return; 265 } 266 267 void 268 ctlfeinit(void) 269 { 270 cam_status status; 271 272 /* Don't initialize if we're disabled */ 273 if (ctl_disable != 0) 274 return; 275 276 STAILQ_INIT(&ctlfe_softc_list); 277 278 mtx_init(&ctlfe_list_mtx, ctlfe_mtx_desc, NULL, MTX_DEF); 279 280 KASSERT(control_softc != NULL, ("CTL is not initialized!")); 281 282 status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED | 283 AC_CONTRACT, ctlfeasync, NULL, NULL); 284 285 if (status != CAM_REQ_CMP) { 286 printf("ctl: Failed to attach async callback due to CAM " 287 "status 0x%x!\n", status); 288 } 289 } 290 291 static void 292 ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) 293 { 294 295 #ifdef CTLFEDEBUG 296 printf("%s: entered\n", __func__); 297 #endif 298 299 /* 300 * When a new path gets registered, and it is capable of target 301 * mode, go ahead and attach. Later on, we may need to be more 302 * selective, but for now this will be sufficient. 303 */ 304 switch (code) { 305 case AC_PATH_REGISTERED: { 306 struct ctl_frontend *fe; 307 struct ctlfe_softc *bus_softc; 308 struct ccb_pathinq *cpi; 309 int retval; 310 311 cpi = (struct ccb_pathinq *)arg; 312 313 /* Don't attach if it doesn't support target mode */ 314 if ((cpi->target_sprt & PIT_PROCESSOR) == 0) { 315 #ifdef CTLFEDEBUG 316 printf("%s: SIM %s%d doesn't support target mode\n", 317 __func__, cpi->dev_name, cpi->unit_number); 318 #endif 319 break; 320 } 321 322 #ifdef CTLFE_INIT_ENABLE 323 if (ctlfe_num_targets >= ctlfe_max_targets) { 324 union ccb *ccb; 325 struct cam_sim *sim; 326 327 ccb = (union ccb *)malloc(sizeof(*ccb), M_TEMP, 328 M_NOWAIT | M_ZERO); 329 if (ccb == NULL) { 330 printf("%s: unable to malloc CCB!\n", __func__); 331 return; 332 } 333 xpt_setup_ccb(&ccb->ccb_h, cpi->ccb_h.path, 334 CAM_PRIORITY_NONE); 335 336 sim = xpt_path_sim(cpi->ccb_h.path); 337 338 ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; 339 ccb->knob.xport_specific.valid = KNOB_VALID_ROLE; 340 ccb->knob.xport_specific.fc.role = KNOB_ROLE_INITIATOR; 341 342 /* We should hold the SIM lock here */ 343 mtx_assert(sim->mtx, MA_OWNED); 344 345 xpt_action(ccb); 346 347 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != 348 CAM_REQ_CMP) { 349 printf("%s: SIM %s%d (path id %d) initiator " 350 "enable failed with status %#x\n", 351 __func__, cpi->dev_name, 352 cpi->unit_number, cpi->ccb_h.path_id, 353 ccb->ccb_h.status); 354 } else { 355 printf("%s: SIM %s%d (path id %d) initiator " 356 "enable succeeded\n", 357 __func__, cpi->dev_name, 358 cpi->unit_number, cpi->ccb_h.path_id); 359 } 360 361 free(ccb, M_TEMP); 362 363 break; 364 } else { 365 ctlfe_num_targets++; 366 } 367 368 printf("%s: ctlfe_num_targets = %d\n", __func__, 369 ctlfe_num_targets); 370 #endif /* CTLFE_INIT_ENABLE */ 371 372 /* 373 * We're in an interrupt context here, so we have to 374 * use M_NOWAIT. Of course this means trouble if we 375 * can't allocate memory. 376 */ 377 bus_softc = malloc(sizeof(*bus_softc), M_CTLFE, 378 M_NOWAIT | M_ZERO); 379 if (bus_softc == NULL) { 380 printf("%s: unable to malloc %zd bytes for softc\n", 381 __func__, sizeof(*bus_softc)); 382 return; 383 } 384 385 bus_softc->path_id = cpi->ccb_h.path_id; 386 bus_softc->sim = xpt_path_sim(cpi->ccb_h.path); 387 STAILQ_INIT(&bus_softc->lun_softc_list); 388 389 fe = &bus_softc->fe; 390 391 /* 392 * XXX KDM should we be more accurate here ? 393 */ 394 if (cpi->transport == XPORT_FC) 395 fe->port_type = CTL_PORT_FC; 396 else 397 fe->port_type = CTL_PORT_SCSI; 398 399 /* XXX KDM what should the real number be here? */ 400 fe->num_requested_ctl_io = 4096; 401 snprintf(bus_softc->port_name, sizeof(bus_softc->port_name), 402 "%s%d", cpi->dev_name, cpi->unit_number); 403 /* 404 * XXX KDM it would be nice to allocate storage in the 405 * frontend structure itself. 406 */ 407 fe->port_name = bus_softc->port_name; 408 fe->physical_port = cpi->unit_number; 409 fe->virtual_port = cpi->bus_id; 410 fe->port_online = ctlfe_online; 411 fe->port_offline = ctlfe_offline; 412 fe->onoff_arg = bus_softc; 413 fe->targ_enable = ctlfe_targ_enable; 414 fe->targ_disable = ctlfe_targ_disable; 415 fe->lun_enable = ctlfe_lun_enable; 416 fe->lun_disable = ctlfe_lun_disable; 417 fe->targ_lun_arg = bus_softc; 418 fe->fe_datamove = ctlfe_datamove_done; 419 fe->fe_done = ctlfe_datamove_done; 420 fe->fe_dump = ctlfe_dump; 421 /* 422 * XXX KDM the path inquiry doesn't give us the maximum 423 * number of targets supported. 424 */ 425 fe->max_targets = cpi->max_target; 426 fe->max_target_id = cpi->max_target; 427 428 /* 429 * XXX KDM need to figure out whether we're the master or 430 * slave. 431 */ 432 #ifdef CTLFEDEBUG 433 printf("%s: calling ctl_frontend_register() for %s%d\n", 434 __func__, cpi->dev_name, cpi->unit_number); 435 #endif 436 retval = ctl_frontend_register(fe, /*master_SC*/ 1); 437 if (retval != 0) { 438 printf("%s: ctl_frontend_register() failed with " 439 "error %d!\n", __func__, retval); 440 free(bus_softc, M_CTLFE); 441 break; 442 } else { 443 mtx_lock(&ctlfe_list_mtx); 444 STAILQ_INSERT_TAIL(&ctlfe_softc_list, bus_softc, links); 445 mtx_unlock(&ctlfe_list_mtx); 446 } 447 448 break; 449 } 450 case AC_PATH_DEREGISTERED: { 451 struct ctlfe_softc *softc = NULL; 452 453 mtx_lock(&ctlfe_list_mtx); 454 STAILQ_FOREACH(softc, &ctlfe_softc_list, links) { 455 if (softc->path_id == xpt_path_path_id(path)) { 456 STAILQ_REMOVE(&ctlfe_softc_list, softc, 457 ctlfe_softc, links); 458 break; 459 } 460 } 461 mtx_unlock(&ctlfe_list_mtx); 462 463 if (softc != NULL) { 464 /* 465 * XXX KDM are we certain at this point that there 466 * are no outstanding commands for this frontend? 467 */ 468 ctl_frontend_deregister(&softc->fe); 469 free(softc, M_CTLFE); 470 } 471 break; 472 } 473 case AC_CONTRACT: { 474 struct ac_contract *ac; 475 476 ac = (struct ac_contract *)arg; 477 478 switch (ac->contract_number) { 479 case AC_CONTRACT_DEV_CHG: { 480 struct ac_device_changed *dev_chg; 481 struct ctlfe_softc *softc; 482 int retval, found; 483 484 dev_chg = (struct ac_device_changed *)ac->contract_data; 485 486 printf("%s: WWPN %#jx port 0x%06x path %u target %u %s\n", 487 __func__, dev_chg->wwpn, dev_chg->port, 488 xpt_path_path_id(path), dev_chg->target, 489 (dev_chg->arrived == 0) ? "left" : "arrived"); 490 491 found = 0; 492 493 mtx_lock(&ctlfe_list_mtx); 494 STAILQ_FOREACH(softc, &ctlfe_softc_list, links) { 495 if (softc->path_id == xpt_path_path_id(path)) { 496 found = 1; 497 break; 498 } 499 } 500 mtx_unlock(&ctlfe_list_mtx); 501 502 if (found == 0) { 503 printf("%s: CTL port for CAM path %u not " 504 "found!\n", __func__, 505 xpt_path_path_id(path)); 506 break; 507 } 508 if (dev_chg->arrived != 0) { 509 retval = ctl_add_initiator(dev_chg->wwpn, 510 softc->fe.targ_port, dev_chg->target); 511 } else { 512 retval = ctl_remove_initiator( 513 softc->fe.targ_port, dev_chg->target); 514 } 515 516 if (retval != 0) { 517 printf("%s: could not %s port %d iid %u " 518 "WWPN %#jx!\n", __func__, 519 (dev_chg->arrived != 0) ? "add" : 520 "remove", softc->fe.targ_port, 521 dev_chg->target, 522 (uintmax_t)dev_chg->wwpn); 523 } 524 break; 525 } 526 default: 527 printf("%s: unsupported contract number %ju\n", 528 __func__, (uintmax_t)ac->contract_number); 529 break; 530 } 531 break; 532 } 533 default: 534 break; 535 } 536 } 537 538 static cam_status 539 ctlferegister(struct cam_periph *periph, void *arg) 540 { 541 struct ctlfe_softc *bus_softc; 542 struct ctlfe_lun_softc *softc; 543 struct cam_sim *sim; 544 union ccb en_lun_ccb; 545 cam_status status; 546 int i; 547 548 softc = (struct ctlfe_lun_softc *)arg; 549 bus_softc = softc->parent_softc; 550 sim = xpt_path_sim(periph->path); 551 552 TAILQ_INIT(&softc->work_queue); 553 softc->periph = periph; 554 555 callout_init_mtx(&softc->dma_callout, sim->mtx, /*flags*/ 0); 556 periph->softc = softc; 557 558 xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); 559 en_lun_ccb.ccb_h.func_code = XPT_EN_LUN; 560 en_lun_ccb.cel.grp6_len = 0; 561 en_lun_ccb.cel.grp7_len = 0; 562 en_lun_ccb.cel.enable = 1; 563 xpt_action(&en_lun_ccb); 564 status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK); 565 if (status != CAM_REQ_CMP) { 566 xpt_print(periph->path, "%s: Enable LUN failed, status 0x%x\n", 567 __func__, en_lun_ccb.ccb_h.status); 568 return (status); 569 } 570 571 status = CAM_REQ_CMP; 572 573 for (i = 0; i < CTLFE_ATIO_PER_LUN; i++) { 574 union ccb *new_ccb; 575 576 new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, 577 M_ZERO|M_NOWAIT); 578 if (new_ccb == NULL) { 579 status = CAM_RESRC_UNAVAIL; 580 break; 581 } 582 xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1); 583 new_ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO; 584 new_ccb->ccb_h.cbfcnp = ctlfedone; 585 xpt_action(new_ccb); 586 softc->atios_sent++; 587 status = new_ccb->ccb_h.status; 588 if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 589 free(new_ccb, M_CTLFE); 590 break; 591 } 592 } 593 594 status = cam_periph_acquire(periph); 595 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 596 xpt_print(periph->path, "%s: could not acquire reference " 597 "count, status = %#x\n", __func__, status); 598 return (status); 599 } 600 601 if (i == 0) { 602 xpt_print(periph->path, "%s: could not allocate ATIO CCBs, " 603 "status 0x%x\n", __func__, status); 604 return (CAM_REQ_CMP_ERR); 605 } 606 607 for (i = 0; i < CTLFE_IN_PER_LUN; i++) { 608 union ccb *new_ccb; 609 610 new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, 611 M_ZERO|M_NOWAIT); 612 if (new_ccb == NULL) { 613 status = CAM_RESRC_UNAVAIL; 614 break; 615 } 616 617 xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1); 618 new_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; 619 new_ccb->ccb_h.cbfcnp = ctlfedone; 620 xpt_action(new_ccb); 621 softc->inots_sent++; 622 status = new_ccb->ccb_h.status; 623 if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 624 /* 625 * Note that we don't free the CCB here. If the 626 * status is not CAM_REQ_INPROG, then we're 627 * probably talking to a SIM that says it is 628 * target-capable but doesn't support the 629 * XPT_IMMEDIATE_NOTIFY CCB. i.e. it supports the 630 * older API. In that case, it'll call xpt_done() 631 * on the CCB, and we need to free it in our done 632 * routine as a result. 633 */ 634 break; 635 } 636 } 637 if ((i == 0) 638 || (status != CAM_REQ_INPROG)) { 639 xpt_print(periph->path, "%s: could not allocate immediate " 640 "notify CCBs, status 0x%x\n", __func__, status); 641 return (CAM_REQ_CMP_ERR); 642 } 643 return (CAM_REQ_CMP); 644 } 645 646 static void 647 ctlfeoninvalidate(struct cam_periph *periph) 648 { 649 union ccb en_lun_ccb; 650 cam_status status; 651 struct ctlfe_lun_softc *softc; 652 653 softc = (struct ctlfe_lun_softc *)periph->softc; 654 655 xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); 656 en_lun_ccb.ccb_h.func_code = XPT_EN_LUN; 657 en_lun_ccb.cel.grp6_len = 0; 658 en_lun_ccb.cel.grp7_len = 0; 659 en_lun_ccb.cel.enable = 0; 660 xpt_action(&en_lun_ccb); 661 status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK); 662 if (status != CAM_REQ_CMP) { 663 xpt_print(periph->path, "%s: Disable LUN failed, status 0x%x\n", 664 __func__, en_lun_ccb.ccb_h.status); 665 /* 666 * XXX KDM what do we do now? 667 */ 668 } 669 xpt_print(periph->path, "LUN removed, %ju ATIOs outstanding, %ju " 670 "INOTs outstanding, %d refs\n", softc->atios_sent - 671 softc->atios_returned, softc->inots_sent - 672 softc->inots_returned, periph->refcount); 673 } 674 675 static void 676 ctlfecleanup(struct cam_periph *periph) 677 { 678 struct ctlfe_lun_softc *softc; 679 struct ctlfe_softc *bus_softc; 680 681 xpt_print(periph->path, "%s: Called\n", __func__); 682 683 softc = (struct ctlfe_lun_softc *)periph->softc; 684 bus_softc = softc->parent_softc; 685 686 STAILQ_REMOVE(&bus_softc->lun_softc_list, softc, ctlfe_lun_softc, links); 687 688 /* 689 * XXX KDM is there anything else that needs to be done here? 690 */ 691 692 callout_stop(&softc->dma_callout); 693 694 free(softc, M_CTLFE); 695 } 696 697 static void 698 ctlfestart(struct cam_periph *periph, union ccb *start_ccb) 699 { 700 struct ctlfe_lun_softc *softc; 701 struct ccb_hdr *ccb_h; 702 703 softc = (struct ctlfe_lun_softc *)periph->softc; 704 705 softc->ccbs_alloced++; 706 707 start_ccb->ccb_h.ccb_type = CTLFE_CCB_DEFAULT; 708 709 ccb_h = TAILQ_FIRST(&softc->work_queue); 710 if (periph->immediate_priority <= periph->pinfo.priority) { 711 panic("shouldn't get to the CCB waiting case!"); 712 start_ccb->ccb_h.ccb_type = CTLFE_CCB_WAITING; 713 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, 714 periph_links.sle); 715 periph->immediate_priority = CAM_PRIORITY_NONE; 716 wakeup(&periph->ccb_list); 717 } else if (ccb_h == NULL) { 718 softc->ccbs_freed++; 719 xpt_release_ccb(start_ccb); 720 } else { 721 struct ccb_accept_tio *atio; 722 struct ccb_scsiio *csio; 723 uint8_t *data_ptr; 724 uint32_t dxfer_len; 725 ccb_flags flags; 726 union ctl_io *io; 727 uint8_t scsi_status; 728 729 /* Take the ATIO off the work queue */ 730 TAILQ_REMOVE(&softc->work_queue, ccb_h, periph_links.tqe); 731 atio = (struct ccb_accept_tio *)ccb_h; 732 io = (union ctl_io *)ccb_h->io_ptr; 733 csio = &start_ccb->csio; 734 735 flags = atio->ccb_h.flags & 736 (CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK); 737 738 if ((io == NULL) 739 || (io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) { 740 /* 741 * We're done, send status back. 742 */ 743 flags |= CAM_SEND_STATUS; 744 if (io == NULL) { 745 scsi_status = SCSI_STATUS_BUSY; 746 csio->sense_len = 0; 747 } else if ((io->io_hdr.status & CTL_STATUS_MASK) == 748 CTL_CMD_ABORTED) { 749 io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED; 750 751 /* 752 * If this command was aborted, we don't 753 * need to send status back to the SIM. 754 * Just free the CTIO and ctl_io, and 755 * recycle the ATIO back to the SIM. 756 */ 757 xpt_print(periph->path, "%s: aborted " 758 "command 0x%04x discarded\n", 759 __func__, io->scsiio.tag_num); 760 ctl_free_io(io); 761 /* 762 * For a wildcard attachment, commands can 763 * come in with a specific target/lun. Reset 764 * the target and LUN fields back to the 765 * wildcard values before we send them back 766 * down to the SIM. The SIM has a wildcard 767 * LUN enabled, not whatever target/lun 768 * these happened to be. 769 */ 770 if (softc->flags & CTLFE_LUN_WILDCARD) { 771 atio->ccb_h.target_id = 772 CAM_TARGET_WILDCARD; 773 atio->ccb_h.target_lun = 774 CAM_LUN_WILDCARD; 775 } 776 777 if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) { 778 cam_release_devq(periph->path, 779 /*relsim_flags*/0, 780 /*reduction*/0, 781 /*timeout*/0, 782 /*getcount_only*/0); 783 atio->ccb_h.status &= ~CAM_DEV_QFRZN; 784 } 785 786 ccb_h = TAILQ_FIRST(&softc->work_queue); 787 788 if (atio->ccb_h.func_code != 789 XPT_ACCEPT_TARGET_IO) { 790 xpt_print(periph->path, "%s: func_code " 791 "is %#x\n", __func__, 792 atio->ccb_h.func_code); 793 } 794 start_ccb->ccb_h.func_code = XPT_ABORT; 795 start_ccb->cab.abort_ccb = (union ccb *)atio; 796 start_ccb->ccb_h.cbfcnp = ctlfedone; 797 798 /* Tell the SIM that we've aborted this ATIO */ 799 xpt_action(start_ccb); 800 softc->ccbs_freed++; 801 xpt_release_ccb(start_ccb); 802 803 /* 804 * Send the ATIO back down to the SIM. 805 */ 806 xpt_action((union ccb *)atio); 807 softc->atios_sent++; 808 809 /* 810 * If we still have work to do, ask for 811 * another CCB. Otherwise, deactivate our 812 * callout. 813 */ 814 if (ccb_h != NULL) 815 xpt_schedule(periph, /*priority*/ 1); 816 else 817 callout_stop(&softc->dma_callout); 818 819 return; 820 } else { 821 io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED; 822 scsi_status = io->scsiio.scsi_status; 823 csio->sense_len = io->scsiio.sense_len; 824 } 825 data_ptr = NULL; 826 dxfer_len = 0; 827 if (io == NULL) { 828 printf("%s: tag %04x io is NULL\n", __func__, 829 atio->tag_id); 830 } else { 831 #ifdef CTLFEDEBUG 832 printf("%s: tag %04x status %x\n", __func__, 833 atio->tag_id, io->io_hdr.status); 834 #endif 835 } 836 csio->sglist_cnt = 0; 837 if (csio->sense_len != 0) { 838 csio->sense_data = io->scsiio.sense_data; 839 flags |= CAM_SEND_SENSE; 840 } else if (scsi_status == SCSI_STATUS_CHECK_COND) { 841 xpt_print(periph->path, "%s: check condition " 842 "with no sense\n", __func__); 843 } 844 } else { 845 struct ctlfe_lun_cmd_info *cmd_info; 846 847 /* 848 * Datamove call, we need to setup the S/G list. 849 */ 850 851 cmd_info = (struct ctlfe_lun_cmd_info *) 852 io->io_hdr.port_priv; 853 854 KASSERT(sizeof(*cmd_info) < CTL_PORT_PRIV_SIZE, 855 ("%s: sizeof(struct ctlfe_lun_cmd_info) %zd < " 856 "CTL_PORT_PRIV_SIZE %d", __func__, 857 sizeof(*cmd_info), CTL_PORT_PRIV_SIZE)); 858 io->io_hdr.flags &= ~CTL_FLAG_DMA_QUEUED; 859 860 /* 861 * Need to zero this, in case it has been used for 862 * a previous datamove for this particular I/O. 863 */ 864 bzero(cmd_info, sizeof(*cmd_info)); 865 scsi_status = 0; 866 867 /* 868 * Set the direction, relative to the initiator. 869 */ 870 flags &= ~CAM_DIR_MASK; 871 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 872 CTL_FLAG_DATA_IN) 873 flags |= CAM_DIR_IN; 874 else 875 flags |= CAM_DIR_OUT; 876 877 csio->cdb_len = atio->cdb_len; 878 879 flags &= ~CAM_DATA_MASK; 880 if (io->scsiio.kern_sg_entries == 0) { 881 /* No S/G list */ 882 data_ptr = io->scsiio.kern_data_ptr; 883 dxfer_len = io->scsiio.kern_data_len; 884 csio->sglist_cnt = 0; 885 886 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) 887 flags |= CAM_DATA_PADDR; 888 else 889 flags |= CAM_DATA_VADDR; 890 } else if (io->scsiio.kern_sg_entries <= 891 (sizeof(cmd_info->cam_sglist)/ 892 sizeof(cmd_info->cam_sglist[0]))) { 893 /* 894 * S/G list with physical or virtual pointers. 895 * Just populate the CAM S/G list with the 896 * pointers. 897 */ 898 int i; 899 struct ctl_sg_entry *ctl_sglist; 900 bus_dma_segment_t *cam_sglist; 901 902 ctl_sglist = (struct ctl_sg_entry *) 903 io->scsiio.kern_data_ptr; 904 cam_sglist = cmd_info->cam_sglist; 905 906 for (i = 0; i < io->scsiio.kern_sg_entries;i++){ 907 cam_sglist[i].ds_addr = 908 (bus_addr_t)ctl_sglist[i].addr; 909 cam_sglist[i].ds_len = 910 ctl_sglist[i].len; 911 } 912 csio->sglist_cnt = io->scsiio.kern_sg_entries; 913 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) 914 flags |= CAM_DATA_SG_PADDR; 915 else 916 flags &= ~CAM_DATA_SG; 917 data_ptr = (uint8_t *)cam_sglist; 918 dxfer_len = io->scsiio.kern_data_len; 919 } else { 920 /* S/G list with virtual pointers */ 921 struct ctl_sg_entry *sglist; 922 int *ti; 923 924 /* 925 * If we have more S/G list pointers than 926 * will fit in the available storage in the 927 * cmd_info structure inside the ctl_io header, 928 * then we need to send down the pointers 929 * one element at a time. 930 */ 931 932 sglist = (struct ctl_sg_entry *) 933 io->scsiio.kern_data_ptr; 934 ti = &cmd_info->cur_transfer_index; 935 data_ptr = sglist[*ti].addr; 936 dxfer_len = sglist[*ti].len; 937 csio->sglist_cnt = 0; 938 cmd_info->flags |= CTLFE_CMD_PIECEWISE; 939 (*ti)++; 940 } 941 942 io->scsiio.ext_data_filled += dxfer_len; 943 944 if (io->scsiio.ext_data_filled > 945 io->scsiio.kern_total_len) { 946 xpt_print(periph->path, "%s: tag 0x%04x " 947 "fill len %u > total %u\n", 948 __func__, io->scsiio.tag_num, 949 io->scsiio.ext_data_filled, 950 io->scsiio.kern_total_len); 951 } 952 } 953 954 #ifdef CTLFEDEBUG 955 printf("%s: %s: tag %04x flags %x ptr %p len %u\n", __func__, 956 (flags & CAM_SEND_STATUS) ? "done" : "datamove", 957 atio->tag_id, flags, data_ptr, dxfer_len); 958 #endif 959 960 /* 961 * Valid combinations: 962 * - CAM_SEND_STATUS, SCATTER_VALID = 0, dxfer_len = 0, 963 * sglist_cnt = 0 964 * - CAM_SEND_STATUS = 0, SCATTER_VALID = 0, dxfer_len != 0, 965 * sglist_cnt = 0 966 * - CAM_SEND_STATUS = 0, SCATTER_VALID, dxfer_len != 0, 967 * sglist_cnt != 0 968 */ 969 #ifdef CTLFEDEBUG 970 if (((flags & CAM_SEND_STATUS) 971 && (((flags & CAM_SCATTER_VALID) != 0) 972 || (dxfer_len != 0) 973 || (csio->sglist_cnt != 0))) 974 || (((flags & CAM_SEND_STATUS) == 0) 975 && (dxfer_len == 0)) 976 || ((flags & CAM_SCATTER_VALID) 977 && (csio->sglist_cnt == 0)) 978 || (((flags & CAM_SCATTER_VALID) == 0) 979 && (csio->sglist_cnt != 0))) { 980 printf("%s: tag %04x cdb %02x flags %#x dxfer_len " 981 "%d sg %u\n", __func__, atio->tag_id, 982 atio->cdb_io.cdb_bytes[0], flags, dxfer_len, 983 csio->sglist_cnt); 984 if (io != NULL) { 985 printf("%s: tag %04x io status %#x\n", __func__, 986 atio->tag_id, io->io_hdr.status); 987 } else { 988 printf("%s: tag %04x no associated io\n", 989 __func__, atio->tag_id); 990 } 991 } 992 #endif 993 cam_fill_ctio(csio, 994 /*retries*/ 2, 995 ctlfedone, 996 flags, 997 (flags & CAM_TAG_ACTION_VALID) ? 998 MSG_SIMPLE_Q_TAG : 0, 999 atio->tag_id, 1000 atio->init_id, 1001 scsi_status, 1002 /*data_ptr*/ data_ptr, 1003 /*dxfer_len*/ dxfer_len, 1004 /*timeout*/ 5 * 1000); 1005 start_ccb->ccb_h.ccb_atio = atio; 1006 if (((flags & CAM_SEND_STATUS) == 0) 1007 && (io != NULL)) 1008 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 1009 1010 softc->ctios_sent++; 1011 1012 xpt_action(start_ccb); 1013 1014 if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) { 1015 cam_release_devq(periph->path, 1016 /*relsim_flags*/0, 1017 /*reduction*/0, 1018 /*timeout*/0, 1019 /*getcount_only*/0); 1020 atio->ccb_h.status &= ~CAM_DEV_QFRZN; 1021 } 1022 1023 ccb_h = TAILQ_FIRST(&softc->work_queue); 1024 } 1025 /* 1026 * If we still have work to do, ask for another CCB. Otherwise, 1027 * deactivate our callout. 1028 */ 1029 if (ccb_h != NULL) 1030 xpt_schedule(periph, /*priority*/ 1); 1031 else 1032 callout_stop(&softc->dma_callout); 1033 } 1034 1035 static void 1036 ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb) 1037 { 1038 struct ctlfe_lun_softc *softc; 1039 1040 softc = (struct ctlfe_lun_softc *)periph->softc; 1041 1042 switch (ccb->ccb_h.func_code) { 1043 case XPT_ACCEPT_TARGET_IO: 1044 softc->atios_returned++; 1045 break; 1046 case XPT_IMMEDIATE_NOTIFY: 1047 case XPT_NOTIFY_ACKNOWLEDGE: 1048 softc->inots_returned++; 1049 break; 1050 default: 1051 break; 1052 } 1053 1054 free(ccb, M_CTLFE); 1055 1056 KASSERT(softc->atios_returned <= softc->atios_sent, ("%s: " 1057 "atios_returned %ju > atios_sent %ju", __func__, 1058 softc->atios_returned, softc->atios_sent)); 1059 KASSERT(softc->inots_returned <= softc->inots_sent, ("%s: " 1060 "inots_returned %ju > inots_sent %ju", __func__, 1061 softc->inots_returned, softc->inots_sent)); 1062 1063 /* 1064 * If we have received all of our CCBs, we can release our 1065 * reference on the peripheral driver. It will probably go away 1066 * now. 1067 */ 1068 if ((softc->atios_returned == softc->atios_sent) 1069 && (softc->inots_returned == softc->inots_sent)) { 1070 cam_periph_release_locked(periph); 1071 } 1072 } 1073 1074 static int 1075 ctlfe_adjust_cdb(struct ccb_accept_tio *atio, uint32_t offset) 1076 { 1077 uint64_t lba; 1078 uint32_t num_blocks, nbc; 1079 uint8_t *cmdbyt = (atio->ccb_h.flags & CAM_CDB_POINTER)? 1080 atio->cdb_io.cdb_ptr : atio->cdb_io.cdb_bytes; 1081 1082 nbc = offset >> 9; /* ASSUMING 512 BYTE BLOCKS */ 1083 1084 switch (cmdbyt[0]) { 1085 case READ_6: 1086 case WRITE_6: 1087 { 1088 struct scsi_rw_6 *cdb = (struct scsi_rw_6 *)cmdbyt; 1089 lba = scsi_3btoul(cdb->addr); 1090 lba &= 0x1fffff; 1091 num_blocks = cdb->length; 1092 if (num_blocks == 0) 1093 num_blocks = 256; 1094 lba += nbc; 1095 num_blocks -= nbc; 1096 scsi_ulto3b(lba, cdb->addr); 1097 cdb->length = num_blocks; 1098 break; 1099 } 1100 case READ_10: 1101 case WRITE_10: 1102 { 1103 struct scsi_rw_10 *cdb = (struct scsi_rw_10 *)cmdbyt; 1104 lba = scsi_4btoul(cdb->addr); 1105 num_blocks = scsi_2btoul(cdb->length); 1106 lba += nbc; 1107 num_blocks -= nbc; 1108 scsi_ulto4b(lba, cdb->addr); 1109 scsi_ulto2b(num_blocks, cdb->length); 1110 break; 1111 } 1112 case READ_12: 1113 case WRITE_12: 1114 { 1115 struct scsi_rw_12 *cdb = (struct scsi_rw_12 *)cmdbyt; 1116 lba = scsi_4btoul(cdb->addr); 1117 num_blocks = scsi_4btoul(cdb->length); 1118 lba += nbc; 1119 num_blocks -= nbc; 1120 scsi_ulto4b(lba, cdb->addr); 1121 scsi_ulto4b(num_blocks, cdb->length); 1122 break; 1123 } 1124 case READ_16: 1125 case WRITE_16: 1126 { 1127 struct scsi_rw_16 *cdb = (struct scsi_rw_16 *)cmdbyt; 1128 lba = scsi_8btou64(cdb->addr); 1129 num_blocks = scsi_4btoul(cdb->length); 1130 lba += nbc; 1131 num_blocks -= nbc; 1132 scsi_u64to8b(lba, cdb->addr); 1133 scsi_ulto4b(num_blocks, cdb->length); 1134 break; 1135 } 1136 default: 1137 return -1; 1138 } 1139 return (0); 1140 } 1141 1142 static void 1143 ctlfedone(struct cam_periph *periph, union ccb *done_ccb) 1144 { 1145 struct ctlfe_lun_softc *softc; 1146 struct ctlfe_softc *bus_softc; 1147 struct ccb_accept_tio *atio = NULL; 1148 union ctl_io *io = NULL; 1149 1150 #ifdef CTLFE_DEBUG 1151 printf("%s: entered, func_code = %#x, type = %#lx\n", __func__, 1152 done_ccb->ccb_h.func_code, done_ccb->ccb_h.ccb_type); 1153 #endif 1154 1155 softc = (struct ctlfe_lun_softc *)periph->softc; 1156 bus_softc = softc->parent_softc; 1157 1158 if (done_ccb->ccb_h.ccb_type == CTLFE_CCB_WAITING) { 1159 panic("shouldn't get to the CCB waiting case!"); 1160 wakeup(&done_ccb->ccb_h.cbfcnp); 1161 return; 1162 } 1163 1164 /* 1165 * If the peripheral is invalid, ATIOs and immediate notify CCBs 1166 * need to be freed. Most of the ATIOs and INOTs that come back 1167 * will be CCBs that are being returned from the SIM as a result of 1168 * our disabling the LUN. 1169 * 1170 * Other CCB types are handled in their respective cases below. 1171 */ 1172 if (periph->flags & CAM_PERIPH_INVALID) { 1173 switch (done_ccb->ccb_h.func_code) { 1174 case XPT_ACCEPT_TARGET_IO: 1175 case XPT_IMMEDIATE_NOTIFY: 1176 case XPT_NOTIFY_ACKNOWLEDGE: 1177 ctlfe_free_ccb(periph, done_ccb); 1178 return; 1179 default: 1180 break; 1181 } 1182 1183 } 1184 switch (done_ccb->ccb_h.func_code) { 1185 case XPT_ACCEPT_TARGET_IO: { 1186 1187 atio = &done_ccb->atio; 1188 1189 softc->atios_returned++; 1190 1191 resubmit: 1192 /* 1193 * Allocate a ctl_io, pass it to CTL, and wait for the 1194 * datamove or done. 1195 */ 1196 io = ctl_alloc_io(bus_softc->fe.ctl_pool_ref); 1197 if (io == NULL) { 1198 atio->ccb_h.flags &= ~CAM_DIR_MASK; 1199 atio->ccb_h.flags |= CAM_DIR_NONE; 1200 1201 printf("%s: ctl_alloc_io failed!\n", __func__); 1202 1203 /* 1204 * XXX KDM need to set SCSI_STATUS_BUSY, but there 1205 * is no field in the ATIO structure to do that, 1206 * and we aren't able to allocate a ctl_io here. 1207 * What to do? 1208 */ 1209 atio->sense_len = 0; 1210 done_ccb->ccb_h.io_ptr = NULL; 1211 TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h, 1212 periph_links.tqe); 1213 xpt_schedule(periph, /*priority*/ 1); 1214 break; 1215 } 1216 ctl_zero_io(io); 1217 1218 /* Save pointers on both sides */ 1219 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = done_ccb; 1220 done_ccb->ccb_h.io_ptr = io; 1221 1222 /* 1223 * Only SCSI I/O comes down this path, resets, etc. come 1224 * down the immediate notify path below. 1225 */ 1226 io->io_hdr.io_type = CTL_IO_SCSI; 1227 io->io_hdr.nexus.initid.id = atio->init_id; 1228 io->io_hdr.nexus.targ_port = bus_softc->fe.targ_port; 1229 io->io_hdr.nexus.targ_target.id = atio->ccb_h.target_id; 1230 io->io_hdr.nexus.targ_lun = atio->ccb_h.target_lun; 1231 io->scsiio.tag_num = atio->tag_id; 1232 switch (atio->tag_action) { 1233 case CAM_TAG_ACTION_NONE: 1234 io->scsiio.tag_type = CTL_TAG_UNTAGGED; 1235 break; 1236 case MSG_SIMPLE_TASK: 1237 io->scsiio.tag_type = CTL_TAG_SIMPLE; 1238 break; 1239 case MSG_HEAD_OF_QUEUE_TASK: 1240 io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE; 1241 break; 1242 case MSG_ORDERED_TASK: 1243 io->scsiio.tag_type = CTL_TAG_ORDERED; 1244 break; 1245 case MSG_ACA_TASK: 1246 io->scsiio.tag_type = CTL_TAG_ACA; 1247 break; 1248 default: 1249 io->scsiio.tag_type = CTL_TAG_UNTAGGED; 1250 printf("%s: unhandled tag type %#x!!\n", __func__, 1251 atio->tag_action); 1252 break; 1253 } 1254 if (atio->cdb_len > sizeof(io->scsiio.cdb)) { 1255 printf("%s: WARNING: CDB len %d > ctl_io space %zd\n", 1256 __func__, atio->cdb_len, sizeof(io->scsiio.cdb)); 1257 } 1258 io->scsiio.cdb_len = min(atio->cdb_len, sizeof(io->scsiio.cdb)); 1259 bcopy(atio->cdb_io.cdb_bytes, io->scsiio.cdb, 1260 io->scsiio.cdb_len); 1261 1262 #ifdef CTLFEDEBUG 1263 printf("%s: %ju:%d:%ju:%d: tag %04x CDB %02x\n", __func__, 1264 (uintmax_t)io->io_hdr.nexus.initid.id, 1265 io->io_hdr.nexus.targ_port, 1266 (uintmax_t)io->io_hdr.nexus.targ_target.id, 1267 io->io_hdr.nexus.targ_lun, 1268 io->scsiio.tag_num, io->scsiio.cdb[0]); 1269 #endif 1270 1271 ctl_queue(io); 1272 break; 1273 } 1274 case XPT_CONT_TARGET_IO: { 1275 int srr = 0; 1276 uint32_t srr_off = 0; 1277 1278 atio = (struct ccb_accept_tio *)done_ccb->ccb_h.ccb_atio; 1279 io = (union ctl_io *)atio->ccb_h.io_ptr; 1280 1281 softc->ctios_returned++; 1282 #ifdef CTLFEDEBUG 1283 printf("%s: got XPT_CONT_TARGET_IO tag %#x flags %#x\n", 1284 __func__, atio->tag_id, done_ccb->ccb_h.flags); 1285 #endif 1286 /* 1287 * Handle SRR case were the data pointer is pushed back hack 1288 */ 1289 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_MESSAGE_RECV 1290 && done_ccb->csio.msg_ptr != NULL 1291 && done_ccb->csio.msg_ptr[0] == MSG_EXTENDED 1292 && done_ccb->csio.msg_ptr[1] == 5 1293 && done_ccb->csio.msg_ptr[2] == 0) { 1294 srr = 1; 1295 srr_off = 1296 (done_ccb->csio.msg_ptr[3] << 24) 1297 | (done_ccb->csio.msg_ptr[4] << 16) 1298 | (done_ccb->csio.msg_ptr[5] << 8) 1299 | (done_ccb->csio.msg_ptr[6]); 1300 } 1301 1302 if (srr && (done_ccb->ccb_h.flags & CAM_SEND_STATUS)) { 1303 /* 1304 * If status was being sent, the back end data is now 1305 * history. Hack it up and resubmit a new command with 1306 * the CDB adjusted. If the SIM does the right thing, 1307 * all of the resid math should work. 1308 */ 1309 softc->ccbs_freed++; 1310 xpt_release_ccb(done_ccb); 1311 ctl_free_io(io); 1312 if (ctlfe_adjust_cdb(atio, srr_off) == 0) { 1313 done_ccb = (union ccb *)atio; 1314 goto resubmit; 1315 } 1316 /* 1317 * Fall through to doom.... 1318 */ 1319 } else if (srr) { 1320 /* 1321 * If we have an srr and we're still sending data, we 1322 * should be able to adjust offsets and cycle again. 1323 */ 1324 io->scsiio.kern_rel_offset = 1325 io->scsiio.ext_data_filled = srr_off; 1326 io->scsiio.ext_data_len = io->scsiio.kern_total_len - 1327 io->scsiio.kern_rel_offset; 1328 softc->ccbs_freed++; 1329 io->scsiio.io_hdr.status = CTL_STATUS_NONE; 1330 xpt_release_ccb(done_ccb); 1331 TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h, 1332 periph_links.tqe); 1333 xpt_schedule(periph, /*priority*/ 1); 1334 return; 1335 } 1336 1337 /* 1338 * If we were sending status back to the initiator, free up 1339 * resources. If we were doing a datamove, call the 1340 * datamove done routine. 1341 */ 1342 if (done_ccb->ccb_h.flags & CAM_SEND_STATUS) { 1343 softc->ccbs_freed++; 1344 xpt_release_ccb(done_ccb); 1345 ctl_free_io(io); 1346 /* 1347 * For a wildcard attachment, commands can come in 1348 * with a specific target/lun. Reset the target 1349 * and LUN fields back to the wildcard values before 1350 * we send them back down to the SIM. The SIM has 1351 * a wildcard LUN enabled, not whatever target/lun 1352 * these happened to be. 1353 */ 1354 if (softc->flags & CTLFE_LUN_WILDCARD) { 1355 atio->ccb_h.target_id = CAM_TARGET_WILDCARD; 1356 atio->ccb_h.target_lun = CAM_LUN_WILDCARD; 1357 } 1358 if (periph->flags & CAM_PERIPH_INVALID) { 1359 ctlfe_free_ccb(periph, (union ccb *)atio); 1360 return; 1361 } else { 1362 xpt_action((union ccb *)atio); 1363 softc->atios_sent++; 1364 } 1365 } else { 1366 struct ctlfe_lun_cmd_info *cmd_info; 1367 struct ccb_scsiio *csio; 1368 1369 csio = &done_ccb->csio; 1370 cmd_info = (struct ctlfe_lun_cmd_info *) 1371 io->io_hdr.port_priv; 1372 1373 io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; 1374 1375 io->scsiio.ext_data_len += csio->dxfer_len; 1376 if (io->scsiio.ext_data_len > 1377 io->scsiio.kern_total_len) { 1378 xpt_print(periph->path, "%s: tag 0x%04x " 1379 "done len %u > total %u sent %u\n", 1380 __func__, io->scsiio.tag_num, 1381 io->scsiio.ext_data_len, 1382 io->scsiio.kern_total_len, 1383 io->scsiio.ext_data_filled); 1384 } 1385 /* 1386 * Translate CAM status to CTL status. Success 1387 * does not change the overall, ctl_io status. In 1388 * that case we just set port_status to 0. If we 1389 * have a failure, though, set a data phase error 1390 * for the overall ctl_io. 1391 */ 1392 switch (done_ccb->ccb_h.status & CAM_STATUS_MASK) { 1393 case CAM_REQ_CMP: 1394 io->io_hdr.port_status = 0; 1395 break; 1396 default: 1397 /* 1398 * XXX KDM we probably need to figure out a 1399 * standard set of errors that the SIM 1400 * drivers should return in the event of a 1401 * data transfer failure. A data phase 1402 * error will at least point the user to a 1403 * data transfer error of some sort. 1404 * Hopefully the SIM printed out some 1405 * additional information to give the user 1406 * a clue what happened. 1407 */ 1408 io->io_hdr.port_status = 0xbad1; 1409 ctl_set_data_phase_error(&io->scsiio); 1410 /* 1411 * XXX KDM figure out residual. 1412 */ 1413 break; 1414 } 1415 /* 1416 * If we had to break this S/G list into multiple 1417 * pieces, figure out where we are in the list, and 1418 * continue sending pieces if necessary. 1419 */ 1420 if ((cmd_info->flags & CTLFE_CMD_PIECEWISE) 1421 && (io->io_hdr.port_status == 0) 1422 && (cmd_info->cur_transfer_index < 1423 io->scsiio.kern_sg_entries)) { 1424 struct ctl_sg_entry *sglist; 1425 ccb_flags flags; 1426 uint8_t scsi_status; 1427 uint8_t *data_ptr; 1428 uint32_t dxfer_len; 1429 int *ti; 1430 1431 sglist = (struct ctl_sg_entry *) 1432 io->scsiio.kern_data_ptr; 1433 ti = &cmd_info->cur_transfer_index; 1434 flags = atio->ccb_h.flags & 1435 (CAM_DIS_DISCONNECT| 1436 CAM_TAG_ACTION_VALID| 1437 CAM_DIR_MASK); 1438 1439 /* 1440 * Set the direction, relative to the initiator. 1441 */ 1442 flags &= ~CAM_DIR_MASK; 1443 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 1444 CTL_FLAG_DATA_IN) 1445 flags |= CAM_DIR_IN; 1446 else 1447 flags |= CAM_DIR_OUT; 1448 1449 data_ptr = sglist[*ti].addr; 1450 dxfer_len = sglist[*ti].len; 1451 (*ti)++; 1452 1453 scsi_status = 0; 1454 1455 if (((flags & CAM_SEND_STATUS) == 0) 1456 && (dxfer_len == 0)) { 1457 printf("%s: tag %04x no status or " 1458 "len cdb = %02x\n", __func__, 1459 atio->tag_id, 1460 atio->cdb_io.cdb_bytes[0]); 1461 printf("%s: tag %04x io status %#x\n", 1462 __func__, atio->tag_id, 1463 io->io_hdr.status); 1464 } 1465 1466 cam_fill_ctio(csio, 1467 /*retries*/ 2, 1468 ctlfedone, 1469 flags, 1470 (flags & CAM_TAG_ACTION_VALID) ? 1471 MSG_SIMPLE_Q_TAG : 0, 1472 atio->tag_id, 1473 atio->init_id, 1474 scsi_status, 1475 /*data_ptr*/ data_ptr, 1476 /*dxfer_len*/ dxfer_len, 1477 /*timeout*/ 5 * 1000); 1478 1479 csio->resid = 0; 1480 csio->ccb_h.ccb_atio = atio; 1481 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 1482 softc->ctios_sent++; 1483 xpt_action((union ccb *)csio); 1484 } else { 1485 /* 1486 * Release the CTIO. The ATIO will be sent back 1487 * down to the SIM once we send status. 1488 */ 1489 softc->ccbs_freed++; 1490 xpt_release_ccb(done_ccb); 1491 1492 /* Call the backend move done callback */ 1493 io->scsiio.be_move_done(io); 1494 } 1495 } 1496 break; 1497 } 1498 case XPT_IMMEDIATE_NOTIFY: { 1499 union ctl_io *io; 1500 struct ccb_immediate_notify *inot; 1501 cam_status status; 1502 int frozen; 1503 1504 inot = &done_ccb->cin1; 1505 1506 softc->inots_returned++; 1507 1508 frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; 1509 1510 printf("%s: got XPT_IMMEDIATE_NOTIFY status %#x tag %#x " 1511 "seq %#x\n", __func__, inot->ccb_h.status, 1512 inot->tag_id, inot->seq_id); 1513 1514 io = ctl_alloc_io(bus_softc->fe.ctl_pool_ref); 1515 if (io != NULL) { 1516 int send_ctl_io; 1517 1518 send_ctl_io = 1; 1519 1520 ctl_zero_io(io); 1521 io->io_hdr.io_type = CTL_IO_TASK; 1522 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr =done_ccb; 1523 inot->ccb_h.io_ptr = io; 1524 io->io_hdr.nexus.initid.id = inot->initiator_id; 1525 io->io_hdr.nexus.targ_port = bus_softc->fe.targ_port; 1526 io->io_hdr.nexus.targ_target.id = inot->ccb_h.target_id; 1527 io->io_hdr.nexus.targ_lun = inot->ccb_h.target_lun; 1528 /* XXX KDM should this be the tag_id? */ 1529 io->taskio.tag_num = inot->seq_id; 1530 1531 status = inot->ccb_h.status & CAM_STATUS_MASK; 1532 switch (status) { 1533 case CAM_SCSI_BUS_RESET: 1534 io->taskio.task_action = CTL_TASK_BUS_RESET; 1535 break; 1536 case CAM_BDR_SENT: 1537 io->taskio.task_action = CTL_TASK_TARGET_RESET; 1538 break; 1539 case CAM_MESSAGE_RECV: 1540 switch (inot->arg) { 1541 case MSG_ABORT_TASK_SET: 1542 /* 1543 * XXX KDM this isn't currently 1544 * supported by CTL. It ends up 1545 * being a no-op. 1546 */ 1547 io->taskio.task_action = 1548 CTL_TASK_ABORT_TASK_SET; 1549 break; 1550 case MSG_TARGET_RESET: 1551 io->taskio.task_action = 1552 CTL_TASK_TARGET_RESET; 1553 break; 1554 case MSG_ABORT_TASK: 1555 io->taskio.task_action = 1556 CTL_TASK_ABORT_TASK; 1557 break; 1558 case MSG_LOGICAL_UNIT_RESET: 1559 io->taskio.task_action = 1560 CTL_TASK_LUN_RESET; 1561 break; 1562 case MSG_CLEAR_TASK_SET: 1563 /* 1564 * XXX KDM this isn't currently 1565 * supported by CTL. It ends up 1566 * being a no-op. 1567 */ 1568 io->taskio.task_action = 1569 CTL_TASK_CLEAR_TASK_SET; 1570 break; 1571 case MSG_CLEAR_ACA: 1572 io->taskio.task_action = 1573 CTL_TASK_CLEAR_ACA; 1574 break; 1575 case MSG_NOOP: 1576 send_ctl_io = 0; 1577 break; 1578 default: 1579 xpt_print(periph->path, "%s: " 1580 "unsupported message 0x%x\n", 1581 __func__, inot->arg); 1582 send_ctl_io = 0; 1583 break; 1584 } 1585 break; 1586 case CAM_REQ_ABORTED: 1587 /* 1588 * This request was sent back by the driver. 1589 * XXX KDM what do we do here? 1590 */ 1591 send_ctl_io = 0; 1592 break; 1593 case CAM_REQ_INVALID: 1594 case CAM_PROVIDE_FAIL: 1595 default: 1596 /* 1597 * We should only get here if we're talking 1598 * to a talking to a SIM that is target 1599 * capable but supports the old API. In 1600 * that case, we need to just free the CCB. 1601 * If we actually send a notify acknowledge, 1602 * it will send that back with an error as 1603 * well. 1604 */ 1605 1606 if ((status != CAM_REQ_INVALID) 1607 && (status != CAM_PROVIDE_FAIL)) 1608 xpt_print(periph->path, "%s: " 1609 "unsupported CAM status " 1610 "0x%x\n", __func__, status); 1611 1612 ctl_free_io(io); 1613 ctlfe_free_ccb(periph, done_ccb); 1614 1615 return; 1616 } 1617 if (send_ctl_io != 0) { 1618 ctl_queue(io); 1619 } else { 1620 ctl_free_io(io); 1621 done_ccb->ccb_h.status = CAM_REQ_INPROG; 1622 done_ccb->ccb_h.func_code = 1623 XPT_NOTIFY_ACKNOWLEDGE; 1624 xpt_action(done_ccb); 1625 } 1626 } else { 1627 xpt_print(periph->path, "%s: could not allocate " 1628 "ctl_io for immediate notify!\n", __func__); 1629 /* requeue this to the adapter */ 1630 done_ccb->ccb_h.status = CAM_REQ_INPROG; 1631 done_ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; 1632 xpt_action(done_ccb); 1633 } 1634 1635 if (frozen != 0) { 1636 cam_release_devq(periph->path, 1637 /*relsim_flags*/ 0, 1638 /*opening reduction*/ 0, 1639 /*timeout*/ 0, 1640 /*getcount_only*/ 0); 1641 } 1642 break; 1643 } 1644 case XPT_NOTIFY_ACKNOWLEDGE: 1645 /* 1646 * Queue this back down to the SIM as an immediate notify. 1647 */ 1648 done_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; 1649 xpt_action(done_ccb); 1650 softc->inots_sent++; 1651 break; 1652 case XPT_ABORT: 1653 /* 1654 * XPT_ABORT is an immediate CCB, we shouldn't get here. 1655 */ 1656 panic("%s: XPT_ABORT CCB returned!", __func__); 1657 break; 1658 case XPT_SET_SIM_KNOB: 1659 case XPT_GET_SIM_KNOB: 1660 break; 1661 default: 1662 panic("%s: unexpected CCB type %#x", __func__, 1663 done_ccb->ccb_h.func_code); 1664 break; 1665 } 1666 } 1667 1668 static void 1669 ctlfe_onoffline(void *arg, int online) 1670 { 1671 struct ctlfe_softc *bus_softc; 1672 union ccb *ccb; 1673 cam_status status; 1674 struct cam_path *path; 1675 struct cam_sim *sim; 1676 int set_wwnn; 1677 1678 bus_softc = (struct ctlfe_softc *)arg; 1679 1680 set_wwnn = 0; 1681 1682 sim = bus_softc->sim; 1683 1684 mtx_assert(sim->mtx, MA_OWNED); 1685 1686 status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, 1687 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 1688 if (status != CAM_REQ_CMP) { 1689 printf("%s: unable to create path!\n", __func__); 1690 return; 1691 } 1692 ccb = (union ccb *)malloc(sizeof(*ccb), M_TEMP, M_NOWAIT | M_ZERO); 1693 if (ccb == NULL) { 1694 printf("%s: unable to malloc CCB!\n", __func__); 1695 xpt_free_path(path); 1696 return; 1697 } 1698 xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE); 1699 1700 /* 1701 * Copan WWN format: 1702 * 1703 * Bits 63-60: 0x5 NAA, IEEE registered name 1704 * Bits 59-36: 0x000ED5 IEEE Company name assigned to Copan 1705 * Bits 35-12: Copan SSN (Sequential Serial Number) 1706 * Bits 11-8: Type of port: 1707 * 1 == N-Port 1708 * 2 == F-Port 1709 * 3 == NL-Port 1710 * Bits 7-0: 0 == Node Name, >0 == Port Number 1711 */ 1712 1713 if (online != 0) { 1714 1715 ccb->ccb_h.func_code = XPT_GET_SIM_KNOB; 1716 1717 1718 xpt_action(ccb); 1719 1720 1721 if ((ccb->knob.xport_specific.valid & KNOB_VALID_ADDRESS) != 0){ 1722 #ifdef RANDOM_WWNN 1723 uint64_t random_bits; 1724 #endif 1725 1726 printf("%s: %s current WWNN %#jx\n", __func__, 1727 bus_softc->port_name, 1728 ccb->knob.xport_specific.fc.wwnn); 1729 printf("%s: %s current WWPN %#jx\n", __func__, 1730 bus_softc->port_name, 1731 ccb->knob.xport_specific.fc.wwpn); 1732 1733 #ifdef RANDOM_WWNN 1734 arc4rand(&random_bits, sizeof(random_bits), 0); 1735 #endif 1736 1737 /* 1738 * XXX KDM this is a bit of a kludge for now. We 1739 * take the current WWNN/WWPN from the card, and 1740 * replace the company identifier and the NL-Port 1741 * indicator and the port number (for the WWPN). 1742 * This should be replaced later with ddb_GetWWNN, 1743 * or possibly a more centralized scheme. (It 1744 * would be nice to have the WWNN/WWPN for each 1745 * port stored in the ctl_frontend structure.) 1746 */ 1747 #ifdef RANDOM_WWNN 1748 ccb->knob.xport_specific.fc.wwnn = 1749 (random_bits & 1750 0x0000000fffffff00ULL) | 1751 /* Company ID */ 0x5000ED5000000000ULL | 1752 /* NL-Port */ 0x0300; 1753 ccb->knob.xport_specific.fc.wwpn = 1754 (random_bits & 1755 0x0000000fffffff00ULL) | 1756 /* Company ID */ 0x5000ED5000000000ULL | 1757 /* NL-Port */ 0x3000 | 1758 /* Port Num */ (bus_softc->fe.targ_port & 0xff); 1759 1760 /* 1761 * This is a bit of an API break/reversal, but if 1762 * we're doing the random WWNN that's a little 1763 * different anyway. So record what we're actually 1764 * using with the frontend code so it's reported 1765 * accurately. 1766 */ 1767 bus_softc->fe.wwnn = 1768 ccb->knob.xport_specific.fc.wwnn; 1769 bus_softc->fe.wwpn = 1770 ccb->knob.xport_specific.fc.wwpn; 1771 set_wwnn = 1; 1772 #else /* RANDOM_WWNN */ 1773 /* 1774 * If the user has specified a WWNN/WWPN, send them 1775 * down to the SIM. Otherwise, record what the SIM 1776 * has reported. 1777 */ 1778 if ((bus_softc->fe.wwnn != 0) 1779 && (bus_softc->fe.wwpn != 0)) { 1780 ccb->knob.xport_specific.fc.wwnn = 1781 bus_softc->fe.wwnn; 1782 ccb->knob.xport_specific.fc.wwpn = 1783 bus_softc->fe.wwpn; 1784 set_wwnn = 1; 1785 } else { 1786 bus_softc->fe.wwnn = 1787 ccb->knob.xport_specific.fc.wwnn; 1788 bus_softc->fe.wwpn = 1789 ccb->knob.xport_specific.fc.wwpn; 1790 } 1791 #endif /* RANDOM_WWNN */ 1792 1793 1794 if (set_wwnn != 0) { 1795 printf("%s: %s new WWNN %#jx\n", __func__, 1796 bus_softc->port_name, 1797 ccb->knob.xport_specific.fc.wwnn); 1798 printf("%s: %s new WWPN %#jx\n", __func__, 1799 bus_softc->port_name, 1800 ccb->knob.xport_specific.fc.wwpn); 1801 } 1802 } else { 1803 printf("%s: %s has no valid WWNN/WWPN\n", __func__, 1804 bus_softc->port_name); 1805 } 1806 } 1807 ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; 1808 ccb->knob.xport_specific.valid = KNOB_VALID_ROLE; 1809 if (set_wwnn != 0) 1810 ccb->knob.xport_specific.valid |= KNOB_VALID_ADDRESS; 1811 1812 if (online != 0) 1813 ccb->knob.xport_specific.fc.role = KNOB_ROLE_TARGET; 1814 else 1815 ccb->knob.xport_specific.fc.role = KNOB_ROLE_NONE; 1816 1817 xpt_action(ccb); 1818 1819 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1820 printf("%s: SIM %s (path id %d) target %s failed with " 1821 "status %#x\n", 1822 __func__, bus_softc->port_name, bus_softc->path_id, 1823 (online != 0) ? "enable" : "disable", 1824 ccb->ccb_h.status); 1825 } else { 1826 printf("%s: SIM %s (path id %d) target %s succeeded\n", 1827 __func__, bus_softc->port_name, bus_softc->path_id, 1828 (online != 0) ? "enable" : "disable"); 1829 } 1830 1831 xpt_free_path(path); 1832 1833 free(ccb, M_TEMP); 1834 1835 return; 1836 } 1837 1838 static void 1839 ctlfe_online(void *arg) 1840 { 1841 struct ctlfe_softc *bus_softc; 1842 struct cam_path *path; 1843 cam_status status; 1844 struct ctlfe_lun_softc *lun_softc; 1845 struct cam_sim *sim; 1846 1847 bus_softc = (struct ctlfe_softc *)arg; 1848 sim = bus_softc->sim; 1849 1850 CAM_SIM_LOCK(sim); 1851 1852 /* 1853 * Create the wildcard LUN before bringing the port online. 1854 */ 1855 status = xpt_create_path(&path, /*periph*/ NULL, 1856 bus_softc->path_id, CAM_TARGET_WILDCARD, 1857 CAM_LUN_WILDCARD); 1858 if (status != CAM_REQ_CMP) { 1859 printf("%s: unable to create path for wildcard periph\n", 1860 __func__); 1861 CAM_SIM_UNLOCK(sim); 1862 return; 1863 } 1864 1865 lun_softc = malloc(sizeof(*lun_softc), M_CTLFE, 1866 M_NOWAIT | M_ZERO); 1867 if (lun_softc == NULL) { 1868 xpt_print(path, "%s: unable to allocate softc for " 1869 "wildcard periph\n", __func__); 1870 xpt_free_path(path); 1871 CAM_SIM_UNLOCK(sim); 1872 return; 1873 } 1874 1875 lun_softc->parent_softc = bus_softc; 1876 lun_softc->flags |= CTLFE_LUN_WILDCARD; 1877 1878 STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, lun_softc, links); 1879 1880 1881 status = cam_periph_alloc(ctlferegister, 1882 ctlfeoninvalidate, 1883 ctlfecleanup, 1884 ctlfestart, 1885 "ctl", 1886 CAM_PERIPH_BIO, 1887 path, 1888 ctlfeasync, 1889 0, 1890 lun_softc); 1891 1892 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1893 const struct cam_status_entry *entry; 1894 1895 entry = cam_fetch_status_entry(status); 1896 1897 printf("%s: CAM error %s (%#x) returned from " 1898 "cam_periph_alloc()\n", __func__, (entry != NULL) ? 1899 entry->status_text : "Unknown", status); 1900 } 1901 1902 xpt_free_path(path); 1903 1904 ctlfe_onoffline(arg, /*online*/ 1); 1905 1906 CAM_SIM_UNLOCK(sim); 1907 } 1908 1909 static void 1910 ctlfe_offline(void *arg) 1911 { 1912 struct ctlfe_softc *bus_softc; 1913 struct cam_path *path; 1914 cam_status status; 1915 struct cam_periph *periph; 1916 struct cam_sim *sim; 1917 1918 bus_softc = (struct ctlfe_softc *)arg; 1919 sim = bus_softc->sim; 1920 1921 CAM_SIM_LOCK(sim); 1922 1923 ctlfe_onoffline(arg, /*online*/ 0); 1924 1925 /* 1926 * Disable the wildcard LUN for this port now that we have taken 1927 * the port offline. 1928 */ 1929 status = xpt_create_path(&path, /*periph*/ NULL, 1930 bus_softc->path_id, CAM_TARGET_WILDCARD, 1931 CAM_LUN_WILDCARD); 1932 if (status != CAM_REQ_CMP) { 1933 CAM_SIM_UNLOCK(sim); 1934 printf("%s: unable to create path for wildcard periph\n", 1935 __func__); 1936 return; 1937 } 1938 1939 1940 if ((periph = cam_periph_find(path, "ctl")) != NULL) 1941 cam_periph_invalidate(periph); 1942 1943 xpt_free_path(path); 1944 1945 CAM_SIM_UNLOCK(sim); 1946 } 1947 1948 static int 1949 ctlfe_targ_enable(void *arg, struct ctl_id targ_id) 1950 { 1951 return (0); 1952 } 1953 1954 static int 1955 ctlfe_targ_disable(void *arg, struct ctl_id targ_id) 1956 { 1957 return (0); 1958 } 1959 1960 /* 1961 * This will get called to enable a LUN on every bus that is attached to 1962 * CTL. So we only need to create a path/periph for this particular bus. 1963 */ 1964 static int 1965 ctlfe_lun_enable(void *arg, struct ctl_id targ_id, int lun_id) 1966 { 1967 struct ctlfe_softc *bus_softc; 1968 struct ctlfe_lun_softc *softc; 1969 struct cam_path *path; 1970 struct cam_periph *periph; 1971 struct cam_sim *sim; 1972 cam_status status; 1973 1974 1975 bus_softc = (struct ctlfe_softc *)arg; 1976 sim = bus_softc->sim; 1977 1978 status = xpt_create_path_unlocked(&path, /*periph*/ NULL, 1979 bus_softc->path_id, 1980 targ_id.id, lun_id); 1981 /* XXX KDM need some way to return status to CTL here? */ 1982 if (status != CAM_REQ_CMP) { 1983 printf("%s: could not create path, status %#x\n", __func__, 1984 status); 1985 return (1); 1986 } 1987 1988 softc = malloc(sizeof(*softc), M_CTLFE, M_WAITOK | M_ZERO); 1989 CAM_SIM_LOCK(sim); 1990 periph = cam_periph_find(path, "ctl"); 1991 if (periph != NULL) { 1992 /* We've already got a periph, no need to alloc a new one. */ 1993 xpt_free_path(path); 1994 free(softc, M_CTLFE); 1995 CAM_SIM_UNLOCK(sim); 1996 return (0); 1997 } 1998 1999 softc->parent_softc = bus_softc; 2000 STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, softc, links); 2001 2002 status = cam_periph_alloc(ctlferegister, 2003 ctlfeoninvalidate, 2004 ctlfecleanup, 2005 ctlfestart, 2006 "ctl", 2007 CAM_PERIPH_BIO, 2008 path, 2009 ctlfeasync, 2010 0, 2011 softc); 2012 2013 xpt_free_path(path); 2014 2015 CAM_SIM_UNLOCK(sim); 2016 2017 return (0); 2018 } 2019 2020 /* 2021 * This will get called when the user removes a LUN to disable that LUN 2022 * on every bus that is attached to CTL. 2023 */ 2024 static int 2025 ctlfe_lun_disable(void *arg, struct ctl_id targ_id, int lun_id) 2026 { 2027 struct ctlfe_softc *softc; 2028 struct ctlfe_lun_softc *lun_softc; 2029 struct cam_sim *sim; 2030 2031 softc = (struct ctlfe_softc *)arg; 2032 sim = softc->sim; 2033 2034 CAM_SIM_LOCK(sim); 2035 STAILQ_FOREACH(lun_softc, &softc->lun_softc_list, links) { 2036 struct cam_path *path; 2037 2038 path = lun_softc->periph->path; 2039 2040 if ((xpt_path_target_id(path) == targ_id.id) 2041 && (xpt_path_lun_id(path) == lun_id)) { 2042 break; 2043 } 2044 } 2045 if (lun_softc == NULL) { 2046 CAM_SIM_UNLOCK(sim); 2047 printf("%s: can't find target %d lun %d\n", __func__, 2048 targ_id.id, lun_id); 2049 return (1); 2050 } 2051 2052 cam_periph_invalidate(lun_softc->periph); 2053 2054 CAM_SIM_UNLOCK(sim); 2055 2056 return (0); 2057 } 2058 2059 static void 2060 ctlfe_dump_sim(struct cam_sim *sim) 2061 { 2062 int i; 2063 2064 printf("%s%d: max tagged openings: %d, max dev openings: %d\n", 2065 sim->sim_name, sim->unit_number, 2066 sim->max_tagged_dev_openings, sim->max_dev_openings); 2067 printf("%s%d: max_ccbs: %u, ccb_count: %u\n", 2068 sim->sim_name, sim->unit_number, 2069 sim->max_ccbs, sim->ccb_count); 2070 printf("%s%d: ccb_freeq is %sempty\n", 2071 sim->sim_name, sim->unit_number, 2072 (SLIST_FIRST(&sim->ccb_freeq) == NULL) ? "" : "NOT "); 2073 printf("%s%d: alloc_queue.entries %d, alloc_openings %d\n", 2074 sim->sim_name, sim->unit_number, 2075 sim->devq->alloc_queue.entries, sim->devq->alloc_openings); 2076 printf("%s%d: qfrozen_cnt:", sim->sim_name, sim->unit_number); 2077 for (i = 0; i < CAM_RL_VALUES; i++) { 2078 printf("%s%u", (i != 0) ? ":" : "", 2079 sim->devq->alloc_queue.qfrozen_cnt[i]); 2080 } 2081 printf("\n"); 2082 } 2083 2084 /* 2085 * Assumes that the SIM lock is held. 2086 */ 2087 static void 2088 ctlfe_dump_queue(struct ctlfe_lun_softc *softc) 2089 { 2090 struct ccb_hdr *hdr; 2091 struct cam_periph *periph; 2092 int num_items; 2093 2094 periph = softc->periph; 2095 num_items = 0; 2096 2097 TAILQ_FOREACH(hdr, &softc->work_queue, periph_links.tqe) { 2098 union ctl_io *io; 2099 2100 io = hdr->io_ptr; 2101 2102 num_items++; 2103 2104 /* 2105 * This can happen when we get an ATIO but can't allocate 2106 * a ctl_io. See the XPT_ACCEPT_TARGET_IO case in ctlfedone(). 2107 */ 2108 if (io == NULL) { 2109 struct ccb_scsiio *csio; 2110 2111 csio = (struct ccb_scsiio *)hdr; 2112 2113 xpt_print(periph->path, "CCB %#x ctl_io allocation " 2114 "failed\n", csio->tag_id); 2115 continue; 2116 } 2117 2118 /* 2119 * Only regular SCSI I/O is put on the work 2120 * queue, so we can print sense here. There may be no 2121 * sense if it's no the queue for a DMA, but this serves to 2122 * print out the CCB as well. 2123 * 2124 * XXX KDM switch this over to scsi_sense_print() when 2125 * CTL is merged in with CAM. 2126 */ 2127 ctl_io_error_print(io, NULL); 2128 2129 /* 2130 * We're sending status back to the 2131 * initiator, so we're on the queue waiting 2132 * for a CTIO to do that. 2133 */ 2134 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) 2135 continue; 2136 2137 /* 2138 * Otherwise, we're on the queue waiting to 2139 * do a data transfer. 2140 */ 2141 xpt_print(periph->path, "Total %u, Current %u, Resid %u\n", 2142 io->scsiio.kern_total_len, io->scsiio.kern_data_len, 2143 io->scsiio.kern_data_resid); 2144 } 2145 2146 xpt_print(periph->path, "%d requests total waiting for CCBs\n", 2147 num_items); 2148 xpt_print(periph->path, "%ju CCBs oustanding (%ju allocated, %ju " 2149 "freed)\n", (uintmax_t)(softc->ccbs_alloced - 2150 softc->ccbs_freed), (uintmax_t)softc->ccbs_alloced, 2151 (uintmax_t)softc->ccbs_freed); 2152 xpt_print(periph->path, "%ju CTIOs outstanding (%ju sent, %ju " 2153 "returned\n", (uintmax_t)(softc->ctios_sent - 2154 softc->ctios_returned), softc->ctios_sent, 2155 softc->ctios_returned); 2156 } 2157 2158 /* 2159 * This function is called when we fail to get a CCB for a DMA or status return 2160 * to the initiator within the specified time period. 2161 * 2162 * The callout code should insure that we hold the sim mutex here. 2163 */ 2164 static void 2165 ctlfe_dma_timeout(void *arg) 2166 { 2167 struct ctlfe_lun_softc *softc; 2168 struct cam_periph *periph; 2169 struct cam_sim *sim; 2170 int num_queued; 2171 2172 softc = (struct ctlfe_lun_softc *)arg; 2173 periph = softc->periph; 2174 sim = xpt_path_sim(periph->path); 2175 num_queued = 0; 2176 2177 /* 2178 * Nothing to do... 2179 */ 2180 if (TAILQ_FIRST(&softc->work_queue) == NULL) { 2181 xpt_print(periph->path, "TIMEOUT triggered after %d " 2182 "seconds, but nothing on work queue??\n", 2183 CTLFE_DMA_TIMEOUT); 2184 return; 2185 } 2186 2187 xpt_print(periph->path, "TIMEOUT (%d seconds) waiting for DMA to " 2188 "start\n", CTLFE_DMA_TIMEOUT); 2189 2190 ctlfe_dump_queue(softc); 2191 2192 ctlfe_dump_sim(sim); 2193 2194 xpt_print(periph->path, "calling xpt_schedule() to attempt to " 2195 "unstick our queue\n"); 2196 2197 xpt_schedule(periph, /*priority*/ 1); 2198 2199 xpt_print(periph->path, "xpt_schedule() call complete\n"); 2200 } 2201 2202 /* 2203 * Datamove/done routine called by CTL. Put ourselves on the queue to 2204 * receive a CCB from CAM so we can queue the continue I/O request down 2205 * to the adapter. 2206 */ 2207 static void 2208 ctlfe_datamove_done(union ctl_io *io) 2209 { 2210 union ccb *ccb; 2211 struct cam_sim *sim; 2212 struct cam_periph *periph; 2213 struct ctlfe_lun_softc *softc; 2214 2215 ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 2216 2217 sim = xpt_path_sim(ccb->ccb_h.path); 2218 2219 CAM_SIM_LOCK(sim); 2220 2221 periph = xpt_path_periph(ccb->ccb_h.path); 2222 2223 softc = (struct ctlfe_lun_softc *)periph->softc; 2224 2225 if (io->io_hdr.io_type == CTL_IO_TASK) { 2226 /* 2227 * Task management commands don't require any further 2228 * communication back to the adapter. Requeue the CCB 2229 * to the adapter, and free the CTL I/O. 2230 */ 2231 xpt_print(ccb->ccb_h.path, "%s: returning task I/O " 2232 "tag %#x seq %#x\n", __func__, 2233 ccb->cin1.tag_id, ccb->cin1.seq_id); 2234 /* 2235 * Send the notify acknowledge down to the SIM, to let it 2236 * know we processed the task management command. 2237 */ 2238 ccb->ccb_h.status = CAM_REQ_INPROG; 2239 ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; 2240 xpt_action(ccb); 2241 ctl_free_io(io); 2242 } else { 2243 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) 2244 io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED; 2245 else 2246 io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED; 2247 2248 TAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h, 2249 periph_links.tqe); 2250 2251 /* 2252 * Reset the timeout for our latest active DMA. 2253 */ 2254 callout_reset(&softc->dma_callout, 2255 CTLFE_DMA_TIMEOUT * hz, 2256 ctlfe_dma_timeout, softc); 2257 /* 2258 * Ask for the CAM transport layer to send us a CCB to do 2259 * the DMA or send status, unless ctlfe_dma_enabled is set 2260 * to 0. 2261 */ 2262 if (ctlfe_dma_enabled != 0) 2263 xpt_schedule(periph, /*priority*/ 1); 2264 } 2265 2266 CAM_SIM_UNLOCK(sim); 2267 } 2268 2269 static void 2270 ctlfe_dump(void) 2271 { 2272 struct ctlfe_softc *bus_softc; 2273 2274 STAILQ_FOREACH(bus_softc, &ctlfe_softc_list, links) { 2275 struct ctlfe_lun_softc *lun_softc; 2276 2277 ctlfe_dump_sim(bus_softc->sim); 2278 2279 STAILQ_FOREACH(lun_softc, &bus_softc->lun_softc_list, links) { 2280 ctlfe_dump_queue(lun_softc); 2281 } 2282 } 2283 } 2284