1 /*- 2 * Copyright (c) 2008, 2009 Silicon Graphics International Corp. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * substantially similar to the "NO WARRANTY" disclaimer below 13 * ("Disclaimer") and any redistribution must be conditioned upon 14 * including a substantially similar Disclaimer requirement for further 15 * binary redistribution. 16 * 17 * NO WARRANTY 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGES. 29 * 30 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/scsi_ctl.c#4 $ 31 */ 32 /* 33 * Peripheral driver interface between CAM and CTL (CAM Target Layer). 34 * 35 * Author: Ken Merry <ken@FreeBSD.org> 36 */ 37 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 #include <sys/param.h> 42 #include <sys/queue.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/condvar.h> 48 #include <sys/malloc.h> 49 #include <sys/bus.h> 50 #include <sys/endian.h> 51 #include <sys/sbuf.h> 52 #include <sys/sysctl.h> 53 #include <sys/types.h> 54 #include <sys/systm.h> 55 #include <machine/bus.h> 56 57 #include <cam/cam.h> 58 #include <cam/cam_ccb.h> 59 #include <cam/cam_periph.h> 60 #include <cam/cam_queue.h> 61 #include <cam/cam_xpt_periph.h> 62 #include <cam/cam_debug.h> 63 #include <cam/cam_sim.h> 64 #include <cam/cam_xpt.h> 65 66 #include <cam/scsi/scsi_all.h> 67 #include <cam/scsi/scsi_message.h> 68 69 #include <cam/ctl/ctl_io.h> 70 #include <cam/ctl/ctl.h> 71 #include <cam/ctl/ctl_frontend.h> 72 #include <cam/ctl/ctl_util.h> 73 #include <cam/ctl/ctl_error.h> 74 75 struct ctlfe_softc { 76 struct ctl_port port; 77 path_id_t path_id; 78 target_id_t target_id; 79 u_int maxio; 80 struct cam_sim *sim; 81 char port_name[DEV_IDLEN]; 82 struct mtx lun_softc_mtx; 83 STAILQ_HEAD(, ctlfe_lun_softc) lun_softc_list; 84 STAILQ_ENTRY(ctlfe_softc) links; 85 }; 86 87 STAILQ_HEAD(, ctlfe_softc) ctlfe_softc_list; 88 struct mtx ctlfe_list_mtx; 89 static char ctlfe_mtx_desc[] = "ctlfelist"; 90 #ifdef CTLFE_INIT_ENABLE 91 static int ctlfe_max_targets = 1; 92 static int ctlfe_num_targets = 0; 93 #endif 94 95 typedef enum { 96 CTLFE_LUN_NONE = 0x00, 97 CTLFE_LUN_WILDCARD = 0x01 98 } ctlfe_lun_flags; 99 100 struct ctlfe_lun_softc { 101 struct ctlfe_softc *parent_softc; 102 struct cam_periph *periph; 103 ctlfe_lun_flags flags; 104 uint64_t ccbs_alloced; 105 uint64_t ccbs_freed; 106 uint64_t ctios_sent; 107 uint64_t ctios_returned; 108 uint64_t atios_alloced; 109 uint64_t atios_freed; 110 uint64_t inots_alloced; 111 uint64_t inots_freed; 112 /* bus_dma_tag_t dma_tag; */ 113 TAILQ_HEAD(, ccb_hdr) work_queue; 114 STAILQ_ENTRY(ctlfe_lun_softc) links; 115 }; 116 117 typedef enum { 118 CTLFE_CMD_NONE = 0x00, 119 CTLFE_CMD_PIECEWISE = 0x01 120 } ctlfe_cmd_flags; 121 122 /* 123 * The size limit of this structure is CTL_PORT_PRIV_SIZE, from ctl_io.h. 124 * Currently that is 600 bytes. 125 */ 126 struct ctlfe_lun_cmd_info { 127 int cur_transfer_index; 128 size_t cur_transfer_off; 129 ctlfe_cmd_flags flags; 130 /* 131 * XXX KDM struct bus_dma_segment is 8 bytes on i386, and 16 132 * bytes on amd64. So with 32 elements, this is 256 bytes on 133 * i386 and 512 bytes on amd64. 134 */ 135 #define CTLFE_MAX_SEGS 32 136 bus_dma_segment_t cam_sglist[CTLFE_MAX_SEGS]; 137 }; 138 CTASSERT(sizeof(struct ctlfe_lun_cmd_info) <= CTL_PORT_PRIV_SIZE); 139 140 /* 141 * When we register the adapter/bus, request that this many ctl_ios be 142 * allocated. This should be the maximum supported by the adapter, but we 143 * currently don't have a way to get that back from the path inquiry. 144 * XXX KDM add that to the path inquiry. 145 */ 146 #define CTLFE_REQ_CTL_IO 4096 147 /* 148 * Number of Accept Target I/O CCBs to allocate and queue down to the 149 * adapter per LUN. 150 * XXX KDM should this be controlled by CTL? 151 */ 152 #define CTLFE_ATIO_PER_LUN 1024 153 /* 154 * Number of Immediate Notify CCBs (used for aborts, resets, etc.) to 155 * allocate and queue down to the adapter per LUN. 156 * XXX KDM should this be controlled by CTL? 157 */ 158 #define CTLFE_IN_PER_LUN 1024 159 160 /* 161 * Timeout (in seconds) on CTIO CCB allocation for doing a DMA or sending 162 * status to the initiator. The SIM is expected to have its own timeouts, 163 * so we're not putting this timeout around the CCB execution time. The 164 * SIM should timeout and let us know if it has an issue. 165 */ 166 #define CTLFE_DMA_TIMEOUT 60 167 168 /* 169 * Turn this on to enable extra debugging prints. 170 */ 171 #if 0 172 #define CTLFE_DEBUG 173 #endif 174 175 /* 176 * Use randomly assigned WWNN/WWPN values. This is to work around an issue 177 * in the FreeBSD initiator that makes it unable to rescan the target if 178 * the target gets rebooted and the WWNN/WWPN stay the same. 179 */ 180 #if 0 181 #define RANDOM_WWNN 182 #endif 183 184 MALLOC_DEFINE(M_CTLFE, "CAM CTL FE", "CAM CTL FE interface"); 185 186 #define io_ptr ppriv_ptr0 187 188 /* This is only used in the CTIO */ 189 #define ccb_atio ppriv_ptr1 190 191 int ctlfeinitialize(void); 192 void ctlfeshutdown(void); 193 static periph_init_t ctlfeperiphinit; 194 static void ctlfeasync(void *callback_arg, uint32_t code, 195 struct cam_path *path, void *arg); 196 static periph_ctor_t ctlferegister; 197 static periph_oninv_t ctlfeoninvalidate; 198 static periph_dtor_t ctlfecleanup; 199 static periph_start_t ctlfestart; 200 static void ctlfedone(struct cam_periph *periph, 201 union ccb *done_ccb); 202 203 static void ctlfe_onoffline(void *arg, int online); 204 static void ctlfe_online(void *arg); 205 static void ctlfe_offline(void *arg); 206 static int ctlfe_lun_enable(void *arg, int lun_id); 207 static int ctlfe_lun_disable(void *arg, int lun_id); 208 static void ctlfe_dump_sim(struct cam_sim *sim); 209 static void ctlfe_dump_queue(struct ctlfe_lun_softc *softc); 210 static void ctlfe_datamove(union ctl_io *io); 211 static void ctlfe_done(union ctl_io *io); 212 static void ctlfe_dump(void); 213 214 static struct periph_driver ctlfe_driver = 215 { 216 ctlfeperiphinit, "ctl", 217 TAILQ_HEAD_INITIALIZER(ctlfe_driver.units), /*generation*/ 0, 218 CAM_PERIPH_DRV_EARLY 219 }; 220 221 static struct ctl_frontend ctlfe_frontend = 222 { 223 .name = "camtgt", 224 .init = ctlfeinitialize, 225 .fe_dump = ctlfe_dump, 226 .shutdown = ctlfeshutdown, 227 }; 228 CTL_FRONTEND_DECLARE(ctlfe, ctlfe_frontend); 229 230 void 231 ctlfeshutdown(void) 232 { 233 return; 234 } 235 236 int 237 ctlfeinitialize(void) 238 { 239 240 STAILQ_INIT(&ctlfe_softc_list); 241 mtx_init(&ctlfe_list_mtx, ctlfe_mtx_desc, NULL, MTX_DEF); 242 periphdriver_register(&ctlfe_driver); 243 return (0); 244 } 245 246 void 247 ctlfeperiphinit(void) 248 { 249 cam_status status; 250 251 status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED | 252 AC_CONTRACT, ctlfeasync, NULL, NULL); 253 if (status != CAM_REQ_CMP) { 254 printf("ctl: Failed to attach async callback due to CAM " 255 "status 0x%x!\n", status); 256 } 257 } 258 259 static void 260 ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) 261 { 262 struct ctlfe_softc *softc; 263 264 #ifdef CTLFEDEBUG 265 printf("%s: entered\n", __func__); 266 #endif 267 268 mtx_lock(&ctlfe_list_mtx); 269 STAILQ_FOREACH(softc, &ctlfe_softc_list, links) { 270 if (softc->path_id == xpt_path_path_id(path)) 271 break; 272 } 273 mtx_unlock(&ctlfe_list_mtx); 274 275 /* 276 * When a new path gets registered, and it is capable of target 277 * mode, go ahead and attach. Later on, we may need to be more 278 * selective, but for now this will be sufficient. 279 */ 280 switch (code) { 281 case AC_PATH_REGISTERED: { 282 struct ctl_port *port; 283 struct ccb_pathinq *cpi; 284 int retval; 285 286 cpi = (struct ccb_pathinq *)arg; 287 288 /* Don't attach if it doesn't support target mode */ 289 if ((cpi->target_sprt & PIT_PROCESSOR) == 0) { 290 #ifdef CTLFEDEBUG 291 printf("%s: SIM %s%d doesn't support target mode\n", 292 __func__, cpi->dev_name, cpi->unit_number); 293 #endif 294 break; 295 } 296 297 if (softc != NULL) { 298 #ifdef CTLFEDEBUG 299 printf("%s: CTL port for CAM path %u already exists\n", 300 __func__, xpt_path_path_id(path)); 301 #endif 302 break; 303 } 304 305 #ifdef CTLFE_INIT_ENABLE 306 if (ctlfe_num_targets >= ctlfe_max_targets) { 307 union ccb *ccb; 308 309 ccb = (union ccb *)malloc(sizeof(*ccb), M_TEMP, 310 M_NOWAIT | M_ZERO); 311 if (ccb == NULL) { 312 printf("%s: unable to malloc CCB!\n", __func__); 313 return; 314 } 315 xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE); 316 317 ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; 318 ccb->knob.xport_specific.valid = KNOB_VALID_ROLE; 319 ccb->knob.xport_specific.fc.role = KNOB_ROLE_INITIATOR; 320 321 xpt_action(ccb); 322 323 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != 324 CAM_REQ_CMP) { 325 printf("%s: SIM %s%d (path id %d) initiator " 326 "enable failed with status %#x\n", 327 __func__, cpi->dev_name, 328 cpi->unit_number, cpi->ccb_h.path_id, 329 ccb->ccb_h.status); 330 } else { 331 printf("%s: SIM %s%d (path id %d) initiator " 332 "enable succeeded\n", 333 __func__, cpi->dev_name, 334 cpi->unit_number, cpi->ccb_h.path_id); 335 } 336 337 free(ccb, M_TEMP); 338 339 break; 340 } else { 341 ctlfe_num_targets++; 342 } 343 344 printf("%s: ctlfe_num_targets = %d\n", __func__, 345 ctlfe_num_targets); 346 #endif /* CTLFE_INIT_ENABLE */ 347 348 /* 349 * We're in an interrupt context here, so we have to 350 * use M_NOWAIT. Of course this means trouble if we 351 * can't allocate memory. 352 */ 353 softc = malloc(sizeof(*softc), M_CTLFE, M_NOWAIT | M_ZERO); 354 if (softc == NULL) { 355 printf("%s: unable to malloc %zd bytes for softc\n", 356 __func__, sizeof(*softc)); 357 return; 358 } 359 360 softc->path_id = cpi->ccb_h.path_id; 361 softc->target_id = cpi->initiator_id; 362 softc->sim = xpt_path_sim(path); 363 if (cpi->maxio != 0) 364 softc->maxio = cpi->maxio; 365 else 366 softc->maxio = DFLTPHYS; 367 mtx_init(&softc->lun_softc_mtx, "LUN softc mtx", NULL, MTX_DEF); 368 STAILQ_INIT(&softc->lun_softc_list); 369 370 port = &softc->port; 371 port->frontend = &ctlfe_frontend; 372 373 /* 374 * XXX KDM should we be more accurate here ? 375 */ 376 if (cpi->transport == XPORT_FC) 377 port->port_type = CTL_PORT_FC; 378 else if (cpi->transport == XPORT_SAS) 379 port->port_type = CTL_PORT_SAS; 380 else 381 port->port_type = CTL_PORT_SCSI; 382 383 /* XXX KDM what should the real number be here? */ 384 port->num_requested_ctl_io = 4096; 385 snprintf(softc->port_name, sizeof(softc->port_name), 386 "%s%d", cpi->dev_name, cpi->unit_number); 387 /* 388 * XXX KDM it would be nice to allocate storage in the 389 * frontend structure itself. 390 */ 391 port->port_name = softc->port_name; 392 port->physical_port = cpi->bus_id; 393 port->virtual_port = 0; 394 port->port_online = ctlfe_online; 395 port->port_offline = ctlfe_offline; 396 port->onoff_arg = softc; 397 port->lun_enable = ctlfe_lun_enable; 398 port->lun_disable = ctlfe_lun_disable; 399 port->targ_lun_arg = softc; 400 port->fe_datamove = ctlfe_datamove; 401 port->fe_done = ctlfe_done; 402 /* 403 * XXX KDM the path inquiry doesn't give us the maximum 404 * number of targets supported. 405 */ 406 port->max_targets = cpi->max_target; 407 port->max_target_id = cpi->max_target; 408 409 /* 410 * XXX KDM need to figure out whether we're the master or 411 * slave. 412 */ 413 #ifdef CTLFEDEBUG 414 printf("%s: calling ctl_port_register() for %s%d\n", 415 __func__, cpi->dev_name, cpi->unit_number); 416 #endif 417 retval = ctl_port_register(port); 418 if (retval != 0) { 419 printf("%s: ctl_port_register() failed with " 420 "error %d!\n", __func__, retval); 421 mtx_destroy(&softc->lun_softc_mtx); 422 free(softc, M_CTLFE); 423 break; 424 } else { 425 mtx_lock(&ctlfe_list_mtx); 426 STAILQ_INSERT_TAIL(&ctlfe_softc_list, softc, links); 427 mtx_unlock(&ctlfe_list_mtx); 428 } 429 430 break; 431 } 432 case AC_PATH_DEREGISTERED: { 433 434 if (softc != NULL) { 435 /* 436 * XXX KDM are we certain at this point that there 437 * are no outstanding commands for this frontend? 438 */ 439 mtx_lock(&ctlfe_list_mtx); 440 STAILQ_REMOVE(&ctlfe_softc_list, softc, ctlfe_softc, 441 links); 442 mtx_unlock(&ctlfe_list_mtx); 443 ctl_port_deregister(&softc->port); 444 mtx_destroy(&softc->lun_softc_mtx); 445 free(softc, M_CTLFE); 446 } 447 break; 448 } 449 case AC_CONTRACT: { 450 struct ac_contract *ac; 451 452 ac = (struct ac_contract *)arg; 453 454 switch (ac->contract_number) { 455 case AC_CONTRACT_DEV_CHG: { 456 struct ac_device_changed *dev_chg; 457 int retval; 458 459 dev_chg = (struct ac_device_changed *)ac->contract_data; 460 461 printf("%s: WWPN %#jx port 0x%06x path %u target %u %s\n", 462 __func__, dev_chg->wwpn, dev_chg->port, 463 xpt_path_path_id(path), dev_chg->target, 464 (dev_chg->arrived == 0) ? "left" : "arrived"); 465 466 if (softc == NULL) { 467 printf("%s: CTL port for CAM path %u not " 468 "found!\n", __func__, 469 xpt_path_path_id(path)); 470 break; 471 } 472 if (dev_chg->arrived != 0) { 473 retval = ctl_add_initiator(&softc->port, 474 dev_chg->target, dev_chg->wwpn, NULL); 475 } else { 476 retval = ctl_remove_initiator(&softc->port, 477 dev_chg->target); 478 } 479 480 if (retval < 0) { 481 printf("%s: could not %s port %d iid %u " 482 "WWPN %#jx!\n", __func__, 483 (dev_chg->arrived != 0) ? "add" : 484 "remove", softc->port.targ_port, 485 dev_chg->target, 486 (uintmax_t)dev_chg->wwpn); 487 } 488 break; 489 } 490 default: 491 printf("%s: unsupported contract number %ju\n", 492 __func__, (uintmax_t)ac->contract_number); 493 break; 494 } 495 break; 496 } 497 default: 498 break; 499 } 500 } 501 502 static cam_status 503 ctlferegister(struct cam_periph *periph, void *arg) 504 { 505 struct ctlfe_softc *bus_softc; 506 struct ctlfe_lun_softc *softc; 507 union ccb en_lun_ccb; 508 cam_status status; 509 int i; 510 511 softc = (struct ctlfe_lun_softc *)arg; 512 bus_softc = softc->parent_softc; 513 514 TAILQ_INIT(&softc->work_queue); 515 softc->periph = periph; 516 periph->softc = softc; 517 518 xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); 519 en_lun_ccb.ccb_h.func_code = XPT_EN_LUN; 520 en_lun_ccb.cel.grp6_len = 0; 521 en_lun_ccb.cel.grp7_len = 0; 522 en_lun_ccb.cel.enable = 1; 523 xpt_action(&en_lun_ccb); 524 status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK); 525 if (status != CAM_REQ_CMP) { 526 xpt_print(periph->path, "%s: Enable LUN failed, status 0x%x\n", 527 __func__, en_lun_ccb.ccb_h.status); 528 return (status); 529 } 530 531 status = CAM_REQ_CMP; 532 533 for (i = 0; i < CTLFE_ATIO_PER_LUN; i++) { 534 union ccb *new_ccb; 535 union ctl_io *new_io; 536 537 new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, 538 M_ZERO|M_NOWAIT); 539 if (new_ccb == NULL) { 540 status = CAM_RESRC_UNAVAIL; 541 break; 542 } 543 new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref); 544 if (new_io == NULL) { 545 free(new_ccb, M_CTLFE); 546 status = CAM_RESRC_UNAVAIL; 547 break; 548 } 549 softc->atios_alloced++; 550 new_ccb->ccb_h.io_ptr = new_io; 551 552 xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1); 553 new_ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO; 554 new_ccb->ccb_h.cbfcnp = ctlfedone; 555 new_ccb->ccb_h.flags |= CAM_UNLOCKED; 556 xpt_action(new_ccb); 557 status = new_ccb->ccb_h.status; 558 if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 559 ctl_free_io(new_io); 560 free(new_ccb, M_CTLFE); 561 break; 562 } 563 } 564 565 status = cam_periph_acquire(periph); 566 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 567 xpt_print(periph->path, "%s: could not acquire reference " 568 "count, status = %#x\n", __func__, status); 569 return (status); 570 } 571 572 if (i == 0) { 573 xpt_print(periph->path, "%s: could not allocate ATIO CCBs, " 574 "status 0x%x\n", __func__, status); 575 return (CAM_REQ_CMP_ERR); 576 } 577 578 for (i = 0; i < CTLFE_IN_PER_LUN; i++) { 579 union ccb *new_ccb; 580 union ctl_io *new_io; 581 582 new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, 583 M_ZERO|M_NOWAIT); 584 if (new_ccb == NULL) { 585 status = CAM_RESRC_UNAVAIL; 586 break; 587 } 588 new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref); 589 if (new_io == NULL) { 590 free(new_ccb, M_CTLFE); 591 status = CAM_RESRC_UNAVAIL; 592 break; 593 } 594 softc->inots_alloced++; 595 new_ccb->ccb_h.io_ptr = new_io; 596 597 xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1); 598 new_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; 599 new_ccb->ccb_h.cbfcnp = ctlfedone; 600 new_ccb->ccb_h.flags |= CAM_UNLOCKED; 601 xpt_action(new_ccb); 602 status = new_ccb->ccb_h.status; 603 if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 604 /* 605 * Note that we don't free the CCB here. If the 606 * status is not CAM_REQ_INPROG, then we're 607 * probably talking to a SIM that says it is 608 * target-capable but doesn't support the 609 * XPT_IMMEDIATE_NOTIFY CCB. i.e. it supports the 610 * older API. In that case, it'll call xpt_done() 611 * on the CCB, and we need to free it in our done 612 * routine as a result. 613 */ 614 break; 615 } 616 } 617 if ((i == 0) 618 || (status != CAM_REQ_INPROG)) { 619 xpt_print(periph->path, "%s: could not allocate immediate " 620 "notify CCBs, status 0x%x\n", __func__, status); 621 return (CAM_REQ_CMP_ERR); 622 } 623 mtx_lock(&bus_softc->lun_softc_mtx); 624 STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, softc, links); 625 mtx_unlock(&bus_softc->lun_softc_mtx); 626 return (CAM_REQ_CMP); 627 } 628 629 static void 630 ctlfeoninvalidate(struct cam_periph *periph) 631 { 632 union ccb en_lun_ccb; 633 cam_status status; 634 struct ctlfe_softc *bus_softc; 635 struct ctlfe_lun_softc *softc; 636 637 softc = (struct ctlfe_lun_softc *)periph->softc; 638 639 xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); 640 en_lun_ccb.ccb_h.func_code = XPT_EN_LUN; 641 en_lun_ccb.cel.grp6_len = 0; 642 en_lun_ccb.cel.grp7_len = 0; 643 en_lun_ccb.cel.enable = 0; 644 xpt_action(&en_lun_ccb); 645 status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK); 646 if (status != CAM_REQ_CMP) { 647 xpt_print(periph->path, "%s: Disable LUN failed, status 0x%x\n", 648 __func__, en_lun_ccb.ccb_h.status); 649 /* 650 * XXX KDM what do we do now? 651 */ 652 } 653 654 bus_softc = softc->parent_softc; 655 mtx_lock(&bus_softc->lun_softc_mtx); 656 STAILQ_REMOVE(&bus_softc->lun_softc_list, softc, ctlfe_lun_softc, links); 657 mtx_unlock(&bus_softc->lun_softc_mtx); 658 } 659 660 static void 661 ctlfecleanup(struct cam_periph *periph) 662 { 663 struct ctlfe_lun_softc *softc; 664 665 softc = (struct ctlfe_lun_softc *)periph->softc; 666 667 KASSERT(softc->ccbs_freed == softc->ccbs_alloced, ("%s: " 668 "ccbs_freed %ju != ccbs_alloced %ju", __func__, 669 softc->ccbs_freed, softc->ccbs_alloced)); 670 KASSERT(softc->ctios_returned == softc->ctios_sent, ("%s: " 671 "ctios_returned %ju != ctios_sent %ju", __func__, 672 softc->ctios_returned, softc->ctios_sent)); 673 KASSERT(softc->atios_freed == softc->atios_alloced, ("%s: " 674 "atios_freed %ju != atios_alloced %ju", __func__, 675 softc->atios_freed, softc->atios_alloced)); 676 KASSERT(softc->inots_freed == softc->inots_alloced, ("%s: " 677 "inots_freed %ju != inots_alloced %ju", __func__, 678 softc->inots_freed, softc->inots_alloced)); 679 680 free(softc, M_CTLFE); 681 } 682 683 static void 684 ctlfedata(struct ctlfe_lun_softc *softc, union ctl_io *io, 685 ccb_flags *flags, uint8_t **data_ptr, uint32_t *dxfer_len, 686 u_int16_t *sglist_cnt) 687 { 688 struct ctlfe_softc *bus_softc; 689 struct ctlfe_lun_cmd_info *cmd_info; 690 struct ctl_sg_entry *ctl_sglist; 691 bus_dma_segment_t *cam_sglist; 692 size_t off; 693 int i, idx; 694 695 cmd_info = (struct ctlfe_lun_cmd_info *)io->io_hdr.port_priv; 696 bus_softc = softc->parent_softc; 697 698 /* 699 * Set the direction, relative to the initiator. 700 */ 701 *flags &= ~CAM_DIR_MASK; 702 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) 703 *flags |= CAM_DIR_IN; 704 else 705 *flags |= CAM_DIR_OUT; 706 707 *flags &= ~CAM_DATA_MASK; 708 idx = cmd_info->cur_transfer_index; 709 off = cmd_info->cur_transfer_off; 710 cmd_info->flags &= ~CTLFE_CMD_PIECEWISE; 711 if (io->scsiio.kern_sg_entries == 0) { 712 /* No S/G list. */ 713 *data_ptr = io->scsiio.kern_data_ptr + off; 714 if (io->scsiio.kern_data_len - off <= bus_softc->maxio) { 715 *dxfer_len = io->scsiio.kern_data_len - off; 716 } else { 717 *dxfer_len = bus_softc->maxio; 718 cmd_info->cur_transfer_index = -1; 719 cmd_info->cur_transfer_off = bus_softc->maxio; 720 cmd_info->flags |= CTLFE_CMD_PIECEWISE; 721 } 722 *sglist_cnt = 0; 723 724 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) 725 *flags |= CAM_DATA_PADDR; 726 else 727 *flags |= CAM_DATA_VADDR; 728 } else { 729 /* S/G list with physical or virtual pointers. */ 730 ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 731 cam_sglist = cmd_info->cam_sglist; 732 *dxfer_len = 0; 733 for (i = 0; i < io->scsiio.kern_sg_entries - idx; i++) { 734 cam_sglist[i].ds_addr = (bus_addr_t)ctl_sglist[i + idx].addr + off; 735 if (ctl_sglist[i + idx].len - off <= bus_softc->maxio - *dxfer_len) { 736 cam_sglist[i].ds_len = ctl_sglist[idx + i].len - off; 737 *dxfer_len += cam_sglist[i].ds_len; 738 } else { 739 cam_sglist[i].ds_len = bus_softc->maxio - *dxfer_len; 740 cmd_info->cur_transfer_index = idx + i; 741 cmd_info->cur_transfer_off = cam_sglist[i].ds_len + off; 742 cmd_info->flags |= CTLFE_CMD_PIECEWISE; 743 *dxfer_len += cam_sglist[i].ds_len; 744 if (ctl_sglist[i].len != 0) 745 i++; 746 break; 747 } 748 if (i == (CTLFE_MAX_SEGS - 1) && 749 idx + i < (io->scsiio.kern_sg_entries - 1)) { 750 cmd_info->cur_transfer_index = idx + i + 1; 751 cmd_info->cur_transfer_off = 0; 752 cmd_info->flags |= CTLFE_CMD_PIECEWISE; 753 i++; 754 break; 755 } 756 off = 0; 757 } 758 *sglist_cnt = i; 759 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) 760 *flags |= CAM_DATA_SG_PADDR; 761 else 762 *flags |= CAM_DATA_SG; 763 *data_ptr = (uint8_t *)cam_sglist; 764 } 765 } 766 767 static void 768 ctlfestart(struct cam_periph *periph, union ccb *start_ccb) 769 { 770 struct ctlfe_lun_softc *softc; 771 struct ctlfe_lun_cmd_info *cmd_info; 772 struct ccb_hdr *ccb_h; 773 struct ccb_accept_tio *atio; 774 struct ccb_scsiio *csio; 775 uint8_t *data_ptr; 776 uint32_t dxfer_len; 777 ccb_flags flags; 778 union ctl_io *io; 779 uint8_t scsi_status; 780 781 softc = (struct ctlfe_lun_softc *)periph->softc; 782 softc->ccbs_alloced++; 783 784 ccb_h = TAILQ_FIRST(&softc->work_queue); 785 if (ccb_h == NULL) { 786 softc->ccbs_freed++; 787 xpt_release_ccb(start_ccb); 788 return; 789 } 790 791 /* Take the ATIO off the work queue */ 792 TAILQ_REMOVE(&softc->work_queue, ccb_h, periph_links.tqe); 793 atio = (struct ccb_accept_tio *)ccb_h; 794 io = (union ctl_io *)ccb_h->io_ptr; 795 csio = &start_ccb->csio; 796 797 flags = atio->ccb_h.flags & 798 (CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK); 799 cmd_info = (struct ctlfe_lun_cmd_info *)io->io_hdr.port_priv; 800 cmd_info->cur_transfer_index = 0; 801 cmd_info->cur_transfer_off = 0; 802 cmd_info->flags = 0; 803 804 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) { 805 /* 806 * Datamove call, we need to setup the S/G list. 807 */ 808 scsi_status = 0; 809 csio->cdb_len = atio->cdb_len; 810 ctlfedata(softc, io, &flags, &data_ptr, &dxfer_len, 811 &csio->sglist_cnt); 812 io->scsiio.ext_data_filled += dxfer_len; 813 if (io->scsiio.ext_data_filled > io->scsiio.kern_total_len) { 814 xpt_print(periph->path, "%s: tag 0x%04x " 815 "fill len %u > total %u\n", 816 __func__, io->scsiio.tag_num, 817 io->scsiio.ext_data_filled, 818 io->scsiio.kern_total_len); 819 } 820 } else { 821 /* 822 * We're done, send status back. 823 */ 824 if ((io->io_hdr.flags & CTL_FLAG_ABORT) && 825 (io->io_hdr.flags & CTL_FLAG_ABORT_STATUS) == 0) { 826 io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED; 827 828 /* 829 * If this command was aborted, we don't 830 * need to send status back to the SIM. 831 * Just free the CTIO and ctl_io, and 832 * recycle the ATIO back to the SIM. 833 */ 834 xpt_print(periph->path, "%s: aborted " 835 "command 0x%04x discarded\n", 836 __func__, io->scsiio.tag_num); 837 /* 838 * For a wildcard attachment, commands can 839 * come in with a specific target/lun. Reset 840 * the target and LUN fields back to the 841 * wildcard values before we send them back 842 * down to the SIM. The SIM has a wildcard 843 * LUN enabled, not whatever target/lun 844 * these happened to be. 845 */ 846 if (softc->flags & CTLFE_LUN_WILDCARD) { 847 atio->ccb_h.target_id = CAM_TARGET_WILDCARD; 848 atio->ccb_h.target_lun = CAM_LUN_WILDCARD; 849 } 850 851 if (atio->ccb_h.func_code != XPT_ACCEPT_TARGET_IO) { 852 xpt_print(periph->path, "%s: func_code " 853 "is %#x\n", __func__, 854 atio->ccb_h.func_code); 855 } 856 start_ccb->ccb_h.func_code = XPT_ABORT; 857 start_ccb->cab.abort_ccb = (union ccb *)atio; 858 859 /* Tell the SIM that we've aborted this ATIO */ 860 xpt_action(start_ccb); 861 softc->ccbs_freed++; 862 xpt_release_ccb(start_ccb); 863 864 /* 865 * Send the ATIO back down to the SIM. 866 */ 867 xpt_action((union ccb *)atio); 868 869 /* 870 * If we still have work to do, ask for 871 * another CCB. Otherwise, deactivate our 872 * callout. 873 */ 874 if (!TAILQ_EMPTY(&softc->work_queue)) 875 xpt_schedule(periph, /*priority*/ 1); 876 return; 877 } 878 data_ptr = NULL; 879 dxfer_len = 0; 880 csio->sglist_cnt = 0; 881 scsi_status = 0; 882 } 883 if ((io->io_hdr.flags & CTL_FLAG_STATUS_QUEUED) && 884 (cmd_info->flags & CTLFE_CMD_PIECEWISE) == 0 && 885 ((io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) == 0 || 886 io->io_hdr.status == CTL_SUCCESS)) { 887 flags |= CAM_SEND_STATUS; 888 scsi_status = io->scsiio.scsi_status; 889 csio->sense_len = io->scsiio.sense_len; 890 #ifdef CTLFEDEBUG 891 printf("%s: tag %04x status %x\n", __func__, 892 atio->tag_id, io->io_hdr.status); 893 #endif 894 if (csio->sense_len != 0) { 895 csio->sense_data = io->scsiio.sense_data; 896 flags |= CAM_SEND_SENSE; 897 } else if (scsi_status == SCSI_STATUS_CHECK_COND) { 898 xpt_print(periph->path, "%s: check condition " 899 "with no sense\n", __func__); 900 } 901 } 902 903 #ifdef CTLFEDEBUG 904 printf("%s: %s: tag %04x flags %x ptr %p len %u\n", __func__, 905 (flags & CAM_SEND_STATUS) ? "done" : "datamove", 906 atio->tag_id, flags, data_ptr, dxfer_len); 907 #endif 908 909 /* 910 * Valid combinations: 911 * - CAM_SEND_STATUS, CAM_DATA_SG = 0, dxfer_len = 0, 912 * sglist_cnt = 0 913 * - CAM_SEND_STATUS = 0, CAM_DATA_SG = 0, dxfer_len != 0, 914 * sglist_cnt = 0 915 * - CAM_SEND_STATUS = 0, CAM_DATA_SG, dxfer_len != 0, 916 * sglist_cnt != 0 917 */ 918 #ifdef CTLFEDEBUG 919 if (((flags & CAM_SEND_STATUS) 920 && (((flags & CAM_DATA_SG) != 0) 921 || (dxfer_len != 0) 922 || (csio->sglist_cnt != 0))) 923 || (((flags & CAM_SEND_STATUS) == 0) 924 && (dxfer_len == 0)) 925 || ((flags & CAM_DATA_SG) 926 && (csio->sglist_cnt == 0)) 927 || (((flags & CAM_DATA_SG) == 0) 928 && (csio->sglist_cnt != 0))) { 929 printf("%s: tag %04x cdb %02x flags %#x dxfer_len " 930 "%d sg %u\n", __func__, atio->tag_id, 931 atio->cdb_io.cdb_bytes[0], flags, dxfer_len, 932 csio->sglist_cnt); 933 printf("%s: tag %04x io status %#x\n", __func__, 934 atio->tag_id, io->io_hdr.status); 935 } 936 #endif 937 cam_fill_ctio(csio, 938 /*retries*/ 2, 939 ctlfedone, 940 flags, 941 (flags & CAM_TAG_ACTION_VALID) ? MSG_SIMPLE_Q_TAG : 0, 942 atio->tag_id, 943 atio->init_id, 944 scsi_status, 945 /*data_ptr*/ data_ptr, 946 /*dxfer_len*/ dxfer_len, 947 /*timeout*/ 5 * 1000); 948 start_ccb->ccb_h.flags |= CAM_UNLOCKED; 949 start_ccb->ccb_h.ccb_atio = atio; 950 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 951 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 952 io->io_hdr.flags &= ~(CTL_FLAG_DMA_QUEUED | CTL_FLAG_STATUS_QUEUED); 953 954 softc->ctios_sent++; 955 956 cam_periph_unlock(periph); 957 xpt_action(start_ccb); 958 cam_periph_lock(periph); 959 960 /* 961 * If we still have work to do, ask for another CCB. 962 */ 963 if (!TAILQ_EMPTY(&softc->work_queue)) 964 xpt_schedule(periph, /*priority*/ 1); 965 } 966 967 static void 968 ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb) 969 { 970 struct ctlfe_lun_softc *softc; 971 972 softc = (struct ctlfe_lun_softc *)periph->softc; 973 974 switch (ccb->ccb_h.func_code) { 975 case XPT_ACCEPT_TARGET_IO: 976 softc->atios_freed++; 977 break; 978 case XPT_IMMEDIATE_NOTIFY: 979 case XPT_NOTIFY_ACKNOWLEDGE: 980 softc->inots_freed++; 981 break; 982 default: 983 break; 984 } 985 986 ctl_free_io(ccb->ccb_h.io_ptr); 987 free(ccb, M_CTLFE); 988 989 KASSERT(softc->atios_freed <= softc->atios_alloced, ("%s: " 990 "atios_freed %ju > atios_alloced %ju", __func__, 991 softc->atios_freed, softc->atios_alloced)); 992 KASSERT(softc->inots_freed <= softc->inots_alloced, ("%s: " 993 "inots_freed %ju > inots_alloced %ju", __func__, 994 softc->inots_freed, softc->inots_alloced)); 995 996 /* 997 * If we have received all of our CCBs, we can release our 998 * reference on the peripheral driver. It will probably go away 999 * now. 1000 */ 1001 if ((softc->atios_freed == softc->atios_alloced) 1002 && (softc->inots_freed == softc->inots_alloced)) { 1003 cam_periph_release_locked(periph); 1004 } 1005 } 1006 1007 static int 1008 ctlfe_adjust_cdb(struct ccb_accept_tio *atio, uint32_t offset) 1009 { 1010 uint64_t lba; 1011 uint32_t num_blocks, nbc; 1012 uint8_t *cmdbyt = (atio->ccb_h.flags & CAM_CDB_POINTER)? 1013 atio->cdb_io.cdb_ptr : atio->cdb_io.cdb_bytes; 1014 1015 nbc = offset >> 9; /* ASSUMING 512 BYTE BLOCKS */ 1016 1017 switch (cmdbyt[0]) { 1018 case READ_6: 1019 case WRITE_6: 1020 { 1021 struct scsi_rw_6 *cdb = (struct scsi_rw_6 *)cmdbyt; 1022 lba = scsi_3btoul(cdb->addr); 1023 lba &= 0x1fffff; 1024 num_blocks = cdb->length; 1025 if (num_blocks == 0) 1026 num_blocks = 256; 1027 lba += nbc; 1028 num_blocks -= nbc; 1029 scsi_ulto3b(lba, cdb->addr); 1030 cdb->length = num_blocks; 1031 break; 1032 } 1033 case READ_10: 1034 case WRITE_10: 1035 { 1036 struct scsi_rw_10 *cdb = (struct scsi_rw_10 *)cmdbyt; 1037 lba = scsi_4btoul(cdb->addr); 1038 num_blocks = scsi_2btoul(cdb->length); 1039 lba += nbc; 1040 num_blocks -= nbc; 1041 scsi_ulto4b(lba, cdb->addr); 1042 scsi_ulto2b(num_blocks, cdb->length); 1043 break; 1044 } 1045 case READ_12: 1046 case WRITE_12: 1047 { 1048 struct scsi_rw_12 *cdb = (struct scsi_rw_12 *)cmdbyt; 1049 lba = scsi_4btoul(cdb->addr); 1050 num_blocks = scsi_4btoul(cdb->length); 1051 lba += nbc; 1052 num_blocks -= nbc; 1053 scsi_ulto4b(lba, cdb->addr); 1054 scsi_ulto4b(num_blocks, cdb->length); 1055 break; 1056 } 1057 case READ_16: 1058 case WRITE_16: 1059 case WRITE_ATOMIC_16: 1060 { 1061 struct scsi_rw_16 *cdb = (struct scsi_rw_16 *)cmdbyt; 1062 lba = scsi_8btou64(cdb->addr); 1063 num_blocks = scsi_4btoul(cdb->length); 1064 lba += nbc; 1065 num_blocks -= nbc; 1066 scsi_u64to8b(lba, cdb->addr); 1067 scsi_ulto4b(num_blocks, cdb->length); 1068 break; 1069 } 1070 default: 1071 return -1; 1072 } 1073 return (0); 1074 } 1075 1076 static void 1077 ctlfedone(struct cam_periph *periph, union ccb *done_ccb) 1078 { 1079 struct ctlfe_lun_softc *softc; 1080 struct ctlfe_softc *bus_softc; 1081 struct ccb_accept_tio *atio = NULL; 1082 union ctl_io *io = NULL; 1083 struct mtx *mtx; 1084 1085 KASSERT((done_ccb->ccb_h.flags & CAM_UNLOCKED) != 0, 1086 ("CCB in ctlfedone() without CAM_UNLOCKED flag")); 1087 #ifdef CTLFE_DEBUG 1088 printf("%s: entered, func_code = %#x\n", __func__, 1089 done_ccb->ccb_h.func_code); 1090 #endif 1091 1092 /* 1093 * At this point CTL has no known use case for device queue freezes. 1094 * In case some SIM think different -- drop its freeze right here. 1095 */ 1096 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 1097 cam_release_devq(periph->path, 1098 /*relsim_flags*/0, 1099 /*reduction*/0, 1100 /*timeout*/0, 1101 /*getcount_only*/0); 1102 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1103 } 1104 1105 softc = (struct ctlfe_lun_softc *)periph->softc; 1106 bus_softc = softc->parent_softc; 1107 mtx = cam_periph_mtx(periph); 1108 mtx_lock(mtx); 1109 1110 /* 1111 * If the peripheral is invalid, ATIOs and immediate notify CCBs 1112 * need to be freed. Most of the ATIOs and INOTs that come back 1113 * will be CCBs that are being returned from the SIM as a result of 1114 * our disabling the LUN. 1115 * 1116 * Other CCB types are handled in their respective cases below. 1117 */ 1118 if (periph->flags & CAM_PERIPH_INVALID) { 1119 switch (done_ccb->ccb_h.func_code) { 1120 case XPT_ACCEPT_TARGET_IO: 1121 case XPT_IMMEDIATE_NOTIFY: 1122 case XPT_NOTIFY_ACKNOWLEDGE: 1123 ctlfe_free_ccb(periph, done_ccb); 1124 goto out; 1125 default: 1126 break; 1127 } 1128 1129 } 1130 switch (done_ccb->ccb_h.func_code) { 1131 case XPT_ACCEPT_TARGET_IO: { 1132 1133 atio = &done_ccb->atio; 1134 1135 resubmit: 1136 /* 1137 * Allocate a ctl_io, pass it to CTL, and wait for the 1138 * datamove or done. 1139 */ 1140 mtx_unlock(mtx); 1141 io = done_ccb->ccb_h.io_ptr; 1142 ctl_zero_io(io); 1143 1144 /* Save pointers on both sides */ 1145 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = done_ccb; 1146 done_ccb->ccb_h.io_ptr = io; 1147 1148 /* 1149 * Only SCSI I/O comes down this path, resets, etc. come 1150 * down the immediate notify path below. 1151 */ 1152 io->io_hdr.io_type = CTL_IO_SCSI; 1153 io->io_hdr.nexus.initid.id = atio->init_id; 1154 io->io_hdr.nexus.targ_port = bus_softc->port.targ_port; 1155 io->io_hdr.nexus.targ_target.id = atio->ccb_h.target_id; 1156 io->io_hdr.nexus.targ_lun = atio->ccb_h.target_lun; 1157 io->scsiio.tag_num = atio->tag_id; 1158 switch (atio->tag_action) { 1159 case CAM_TAG_ACTION_NONE: 1160 io->scsiio.tag_type = CTL_TAG_UNTAGGED; 1161 break; 1162 case MSG_SIMPLE_TASK: 1163 io->scsiio.tag_type = CTL_TAG_SIMPLE; 1164 break; 1165 case MSG_HEAD_OF_QUEUE_TASK: 1166 io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE; 1167 break; 1168 case MSG_ORDERED_TASK: 1169 io->scsiio.tag_type = CTL_TAG_ORDERED; 1170 break; 1171 case MSG_ACA_TASK: 1172 io->scsiio.tag_type = CTL_TAG_ACA; 1173 break; 1174 default: 1175 io->scsiio.tag_type = CTL_TAG_UNTAGGED; 1176 printf("%s: unhandled tag type %#x!!\n", __func__, 1177 atio->tag_action); 1178 break; 1179 } 1180 if (atio->cdb_len > sizeof(io->scsiio.cdb)) { 1181 printf("%s: WARNING: CDB len %d > ctl_io space %zd\n", 1182 __func__, atio->cdb_len, sizeof(io->scsiio.cdb)); 1183 } 1184 io->scsiio.cdb_len = min(atio->cdb_len, sizeof(io->scsiio.cdb)); 1185 bcopy(atio->cdb_io.cdb_bytes, io->scsiio.cdb, 1186 io->scsiio.cdb_len); 1187 1188 #ifdef CTLFEDEBUG 1189 printf("%s: %ju:%d:%ju:%d: tag %04x CDB %02x\n", __func__, 1190 (uintmax_t)io->io_hdr.nexus.initid.id, 1191 io->io_hdr.nexus.targ_port, 1192 (uintmax_t)io->io_hdr.nexus.targ_target.id, 1193 io->io_hdr.nexus.targ_lun, 1194 io->scsiio.tag_num, io->scsiio.cdb[0]); 1195 #endif 1196 1197 ctl_queue(io); 1198 return; 1199 } 1200 case XPT_CONT_TARGET_IO: { 1201 int srr = 0; 1202 uint32_t srr_off = 0; 1203 1204 atio = (struct ccb_accept_tio *)done_ccb->ccb_h.ccb_atio; 1205 io = (union ctl_io *)atio->ccb_h.io_ptr; 1206 1207 softc->ctios_returned++; 1208 #ifdef CTLFEDEBUG 1209 printf("%s: got XPT_CONT_TARGET_IO tag %#x flags %#x\n", 1210 __func__, atio->tag_id, done_ccb->ccb_h.flags); 1211 #endif 1212 /* 1213 * Handle SRR case were the data pointer is pushed back hack 1214 */ 1215 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_MESSAGE_RECV 1216 && done_ccb->csio.msg_ptr != NULL 1217 && done_ccb->csio.msg_ptr[0] == MSG_EXTENDED 1218 && done_ccb->csio.msg_ptr[1] == 5 1219 && done_ccb->csio.msg_ptr[2] == 0) { 1220 srr = 1; 1221 srr_off = 1222 (done_ccb->csio.msg_ptr[3] << 24) 1223 | (done_ccb->csio.msg_ptr[4] << 16) 1224 | (done_ccb->csio.msg_ptr[5] << 8) 1225 | (done_ccb->csio.msg_ptr[6]); 1226 } 1227 1228 if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) { 1229 /* 1230 * If status was being sent, the back end data is now 1231 * history. Hack it up and resubmit a new command with 1232 * the CDB adjusted. If the SIM does the right thing, 1233 * all of the resid math should work. 1234 */ 1235 softc->ccbs_freed++; 1236 xpt_release_ccb(done_ccb); 1237 if (ctlfe_adjust_cdb(atio, srr_off) == 0) { 1238 done_ccb = (union ccb *)atio; 1239 goto resubmit; 1240 } 1241 /* 1242 * Fall through to doom.... 1243 */ 1244 } else if (srr) { 1245 /* 1246 * If we have an srr and we're still sending data, we 1247 * should be able to adjust offsets and cycle again. 1248 */ 1249 io->scsiio.kern_rel_offset = 1250 io->scsiio.ext_data_filled = srr_off; 1251 io->scsiio.ext_data_len = io->scsiio.kern_total_len - 1252 io->scsiio.kern_rel_offset; 1253 softc->ccbs_freed++; 1254 io->scsiio.io_hdr.status = CTL_STATUS_NONE; 1255 xpt_release_ccb(done_ccb); 1256 TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h, 1257 periph_links.tqe); 1258 xpt_schedule(periph, /*priority*/ 1); 1259 break; 1260 } 1261 1262 if ((done_ccb->ccb_h.flags & CAM_SEND_STATUS) && 1263 (done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) 1264 io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; 1265 1266 /* 1267 * If we were sending status back to the initiator, free up 1268 * resources. If we were doing a datamove, call the 1269 * datamove done routine. 1270 */ 1271 if ((io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) { 1272 softc->ccbs_freed++; 1273 xpt_release_ccb(done_ccb); 1274 /* 1275 * For a wildcard attachment, commands can come in 1276 * with a specific target/lun. Reset the target 1277 * and LUN fields back to the wildcard values before 1278 * we send them back down to the SIM. The SIM has 1279 * a wildcard LUN enabled, not whatever target/lun 1280 * these happened to be. 1281 */ 1282 if (softc->flags & CTLFE_LUN_WILDCARD) { 1283 atio->ccb_h.target_id = CAM_TARGET_WILDCARD; 1284 atio->ccb_h.target_lun = CAM_LUN_WILDCARD; 1285 } 1286 if (periph->flags & CAM_PERIPH_INVALID) { 1287 ctlfe_free_ccb(periph, (union ccb *)atio); 1288 } else { 1289 mtx_unlock(mtx); 1290 xpt_action((union ccb *)atio); 1291 return; 1292 } 1293 } else { 1294 struct ctlfe_lun_cmd_info *cmd_info; 1295 struct ccb_scsiio *csio; 1296 1297 csio = &done_ccb->csio; 1298 cmd_info = (struct ctlfe_lun_cmd_info *) 1299 io->io_hdr.port_priv; 1300 1301 io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; 1302 1303 io->scsiio.ext_data_len += csio->dxfer_len; 1304 if (io->scsiio.ext_data_len > 1305 io->scsiio.kern_total_len) { 1306 xpt_print(periph->path, "%s: tag 0x%04x " 1307 "done len %u > total %u sent %u\n", 1308 __func__, io->scsiio.tag_num, 1309 io->scsiio.ext_data_len, 1310 io->scsiio.kern_total_len, 1311 io->scsiio.ext_data_filled); 1312 } 1313 /* 1314 * Translate CAM status to CTL status. Success 1315 * does not change the overall, ctl_io status. In 1316 * that case we just set port_status to 0. If we 1317 * have a failure, though, set a data phase error 1318 * for the overall ctl_io. 1319 */ 1320 switch (done_ccb->ccb_h.status & CAM_STATUS_MASK) { 1321 case CAM_REQ_CMP: 1322 io->io_hdr.port_status = 0; 1323 break; 1324 default: 1325 /* 1326 * XXX KDM we probably need to figure out a 1327 * standard set of errors that the SIM 1328 * drivers should return in the event of a 1329 * data transfer failure. A data phase 1330 * error will at least point the user to a 1331 * data transfer error of some sort. 1332 * Hopefully the SIM printed out some 1333 * additional information to give the user 1334 * a clue what happened. 1335 */ 1336 io->io_hdr.port_status = 0xbad1; 1337 ctl_set_data_phase_error(&io->scsiio); 1338 /* 1339 * XXX KDM figure out residual. 1340 */ 1341 break; 1342 } 1343 /* 1344 * If we had to break this S/G list into multiple 1345 * pieces, figure out where we are in the list, and 1346 * continue sending pieces if necessary. 1347 */ 1348 if ((cmd_info->flags & CTLFE_CMD_PIECEWISE) 1349 && (io->io_hdr.port_status == 0)) { 1350 ccb_flags flags; 1351 uint8_t scsi_status; 1352 uint8_t *data_ptr; 1353 uint32_t dxfer_len; 1354 1355 flags = atio->ccb_h.flags & 1356 (CAM_DIS_DISCONNECT| 1357 CAM_TAG_ACTION_VALID); 1358 1359 ctlfedata(softc, io, &flags, &data_ptr, 1360 &dxfer_len, &csio->sglist_cnt); 1361 1362 scsi_status = 0; 1363 1364 if (((flags & CAM_SEND_STATUS) == 0) 1365 && (dxfer_len == 0)) { 1366 printf("%s: tag %04x no status or " 1367 "len cdb = %02x\n", __func__, 1368 atio->tag_id, 1369 atio->cdb_io.cdb_bytes[0]); 1370 printf("%s: tag %04x io status %#x\n", 1371 __func__, atio->tag_id, 1372 io->io_hdr.status); 1373 } 1374 1375 cam_fill_ctio(csio, 1376 /*retries*/ 2, 1377 ctlfedone, 1378 flags, 1379 (flags & CAM_TAG_ACTION_VALID) ? 1380 MSG_SIMPLE_Q_TAG : 0, 1381 atio->tag_id, 1382 atio->init_id, 1383 scsi_status, 1384 /*data_ptr*/ data_ptr, 1385 /*dxfer_len*/ dxfer_len, 1386 /*timeout*/ 5 * 1000); 1387 1388 csio->ccb_h.flags |= CAM_UNLOCKED; 1389 csio->resid = 0; 1390 csio->ccb_h.ccb_atio = atio; 1391 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 1392 softc->ctios_sent++; 1393 mtx_unlock(mtx); 1394 xpt_action((union ccb *)csio); 1395 } else { 1396 /* 1397 * Release the CTIO. The ATIO will be sent back 1398 * down to the SIM once we send status. 1399 */ 1400 softc->ccbs_freed++; 1401 xpt_release_ccb(done_ccb); 1402 mtx_unlock(mtx); 1403 1404 /* Call the backend move done callback */ 1405 io->scsiio.be_move_done(io); 1406 } 1407 return; 1408 } 1409 break; 1410 } 1411 case XPT_IMMEDIATE_NOTIFY: { 1412 union ctl_io *io; 1413 struct ccb_immediate_notify *inot; 1414 cam_status status; 1415 int send_ctl_io; 1416 1417 inot = &done_ccb->cin1; 1418 printf("%s: got XPT_IMMEDIATE_NOTIFY status %#x tag %#x " 1419 "seq %#x\n", __func__, inot->ccb_h.status, 1420 inot->tag_id, inot->seq_id); 1421 1422 io = done_ccb->ccb_h.io_ptr; 1423 ctl_zero_io(io); 1424 1425 send_ctl_io = 1; 1426 1427 io->io_hdr.io_type = CTL_IO_TASK; 1428 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr =done_ccb; 1429 inot->ccb_h.io_ptr = io; 1430 io->io_hdr.nexus.initid.id = inot->initiator_id; 1431 io->io_hdr.nexus.targ_port = bus_softc->port.targ_port; 1432 io->io_hdr.nexus.targ_target.id = inot->ccb_h.target_id; 1433 io->io_hdr.nexus.targ_lun = inot->ccb_h.target_lun; 1434 /* XXX KDM should this be the tag_id? */ 1435 io->taskio.tag_num = inot->seq_id; 1436 1437 status = inot->ccb_h.status & CAM_STATUS_MASK; 1438 switch (status) { 1439 case CAM_SCSI_BUS_RESET: 1440 io->taskio.task_action = CTL_TASK_BUS_RESET; 1441 break; 1442 case CAM_BDR_SENT: 1443 io->taskio.task_action = CTL_TASK_TARGET_RESET; 1444 break; 1445 case CAM_MESSAGE_RECV: 1446 switch (inot->arg) { 1447 case MSG_ABORT_TASK_SET: 1448 io->taskio.task_action = 1449 CTL_TASK_ABORT_TASK_SET; 1450 break; 1451 case MSG_TARGET_RESET: 1452 io->taskio.task_action = 1453 CTL_TASK_TARGET_RESET; 1454 break; 1455 case MSG_ABORT_TASK: 1456 io->taskio.task_action = 1457 CTL_TASK_ABORT_TASK; 1458 break; 1459 case MSG_LOGICAL_UNIT_RESET: 1460 io->taskio.task_action = 1461 CTL_TASK_LUN_RESET; 1462 break; 1463 case MSG_CLEAR_TASK_SET: 1464 io->taskio.task_action = 1465 CTL_TASK_CLEAR_TASK_SET; 1466 break; 1467 case MSG_CLEAR_ACA: 1468 io->taskio.task_action = 1469 CTL_TASK_CLEAR_ACA; 1470 break; 1471 case MSG_NOOP: 1472 send_ctl_io = 0; 1473 break; 1474 default: 1475 xpt_print(periph->path, 1476 "%s: unsupported message 0x%x\n", 1477 __func__, inot->arg); 1478 send_ctl_io = 0; 1479 break; 1480 } 1481 break; 1482 case CAM_REQ_ABORTED: 1483 /* 1484 * This request was sent back by the driver. 1485 * XXX KDM what do we do here? 1486 */ 1487 send_ctl_io = 0; 1488 break; 1489 case CAM_REQ_INVALID: 1490 case CAM_PROVIDE_FAIL: 1491 default: 1492 /* 1493 * We should only get here if we're talking 1494 * to a talking to a SIM that is target 1495 * capable but supports the old API. In 1496 * that case, we need to just free the CCB. 1497 * If we actually send a notify acknowledge, 1498 * it will send that back with an error as 1499 * well. 1500 */ 1501 1502 if ((status != CAM_REQ_INVALID) 1503 && (status != CAM_PROVIDE_FAIL)) 1504 xpt_print(periph->path, 1505 "%s: unsupported CAM status 0x%x\n", 1506 __func__, status); 1507 1508 ctlfe_free_ccb(periph, done_ccb); 1509 1510 goto out; 1511 } 1512 if (send_ctl_io != 0) { 1513 ctl_queue(io); 1514 } else { 1515 done_ccb->ccb_h.status = CAM_REQ_INPROG; 1516 done_ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; 1517 xpt_action(done_ccb); 1518 } 1519 break; 1520 } 1521 case XPT_NOTIFY_ACKNOWLEDGE: 1522 /* 1523 * Queue this back down to the SIM as an immediate notify. 1524 */ 1525 done_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; 1526 xpt_action(done_ccb); 1527 break; 1528 case XPT_SET_SIM_KNOB: 1529 case XPT_GET_SIM_KNOB: 1530 break; 1531 default: 1532 panic("%s: unexpected CCB type %#x", __func__, 1533 done_ccb->ccb_h.func_code); 1534 break; 1535 } 1536 1537 out: 1538 mtx_unlock(mtx); 1539 } 1540 1541 static void 1542 ctlfe_onoffline(void *arg, int online) 1543 { 1544 struct ctlfe_softc *bus_softc; 1545 union ccb *ccb; 1546 cam_status status; 1547 struct cam_path *path; 1548 int set_wwnn; 1549 1550 bus_softc = (struct ctlfe_softc *)arg; 1551 1552 set_wwnn = 0; 1553 1554 status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, 1555 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 1556 if (status != CAM_REQ_CMP) { 1557 printf("%s: unable to create path!\n", __func__); 1558 return; 1559 } 1560 ccb = xpt_alloc_ccb(); 1561 xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE); 1562 ccb->ccb_h.func_code = XPT_GET_SIM_KNOB; 1563 xpt_action(ccb); 1564 1565 /* 1566 * Copan WWN format: 1567 * 1568 * Bits 63-60: 0x5 NAA, IEEE registered name 1569 * Bits 59-36: 0x000ED5 IEEE Company name assigned to Copan 1570 * Bits 35-12: Copan SSN (Sequential Serial Number) 1571 * Bits 11-8: Type of port: 1572 * 1 == N-Port 1573 * 2 == F-Port 1574 * 3 == NL-Port 1575 * Bits 7-0: 0 == Node Name, >0 == Port Number 1576 */ 1577 if (online != 0) { 1578 if ((ccb->knob.xport_specific.valid & KNOB_VALID_ADDRESS) != 0){ 1579 #ifdef RANDOM_WWNN 1580 uint64_t random_bits; 1581 #endif 1582 1583 printf("%s: %s current WWNN %#jx\n", __func__, 1584 bus_softc->port_name, 1585 ccb->knob.xport_specific.fc.wwnn); 1586 printf("%s: %s current WWPN %#jx\n", __func__, 1587 bus_softc->port_name, 1588 ccb->knob.xport_specific.fc.wwpn); 1589 1590 #ifdef RANDOM_WWNN 1591 arc4rand(&random_bits, sizeof(random_bits), 0); 1592 #endif 1593 1594 /* 1595 * XXX KDM this is a bit of a kludge for now. We 1596 * take the current WWNN/WWPN from the card, and 1597 * replace the company identifier and the NL-Port 1598 * indicator and the port number (for the WWPN). 1599 * This should be replaced later with ddb_GetWWNN, 1600 * or possibly a more centralized scheme. (It 1601 * would be nice to have the WWNN/WWPN for each 1602 * port stored in the ctl_port structure.) 1603 */ 1604 #ifdef RANDOM_WWNN 1605 ccb->knob.xport_specific.fc.wwnn = 1606 (random_bits & 1607 0x0000000fffffff00ULL) | 1608 /* Company ID */ 0x5000ED5000000000ULL | 1609 /* NL-Port */ 0x0300; 1610 ccb->knob.xport_specific.fc.wwpn = 1611 (random_bits & 1612 0x0000000fffffff00ULL) | 1613 /* Company ID */ 0x5000ED5000000000ULL | 1614 /* NL-Port */ 0x3000 | 1615 /* Port Num */ (bus_softc->port.targ_port & 0xff); 1616 1617 /* 1618 * This is a bit of an API break/reversal, but if 1619 * we're doing the random WWNN that's a little 1620 * different anyway. So record what we're actually 1621 * using with the frontend code so it's reported 1622 * accurately. 1623 */ 1624 ctl_port_set_wwns(&bus_softc->port, 1625 true, ccb->knob.xport_specific.fc.wwnn, 1626 true, ccb->knob.xport_specific.fc.wwpn); 1627 set_wwnn = 1; 1628 #else /* RANDOM_WWNN */ 1629 /* 1630 * If the user has specified a WWNN/WWPN, send them 1631 * down to the SIM. Otherwise, record what the SIM 1632 * has reported. 1633 */ 1634 if (bus_softc->port.wwnn != 0 && bus_softc->port.wwnn 1635 != ccb->knob.xport_specific.fc.wwnn) { 1636 ccb->knob.xport_specific.fc.wwnn = 1637 bus_softc->port.wwnn; 1638 set_wwnn = 1; 1639 } else { 1640 ctl_port_set_wwns(&bus_softc->port, 1641 true, ccb->knob.xport_specific.fc.wwnn, 1642 false, 0); 1643 } 1644 if (bus_softc->port.wwpn != 0 && bus_softc->port.wwpn 1645 != ccb->knob.xport_specific.fc.wwpn) { 1646 ccb->knob.xport_specific.fc.wwpn = 1647 bus_softc->port.wwpn; 1648 set_wwnn = 1; 1649 } else { 1650 ctl_port_set_wwns(&bus_softc->port, 1651 false, 0, 1652 true, ccb->knob.xport_specific.fc.wwpn); 1653 } 1654 #endif /* RANDOM_WWNN */ 1655 1656 1657 if (set_wwnn != 0) { 1658 printf("%s: %s new WWNN %#jx\n", __func__, 1659 bus_softc->port_name, 1660 ccb->knob.xport_specific.fc.wwnn); 1661 printf("%s: %s new WWPN %#jx\n", __func__, 1662 bus_softc->port_name, 1663 ccb->knob.xport_specific.fc.wwpn); 1664 } 1665 } else { 1666 printf("%s: %s has no valid WWNN/WWPN\n", __func__, 1667 bus_softc->port_name); 1668 } 1669 } 1670 ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; 1671 ccb->knob.xport_specific.valid = KNOB_VALID_ROLE; 1672 if (set_wwnn != 0) 1673 ccb->knob.xport_specific.valid |= KNOB_VALID_ADDRESS; 1674 1675 if (online != 0) 1676 ccb->knob.xport_specific.fc.role |= KNOB_ROLE_TARGET; 1677 else 1678 ccb->knob.xport_specific.fc.role &= ~KNOB_ROLE_TARGET; 1679 1680 xpt_action(ccb); 1681 1682 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1683 printf("%s: SIM %s (path id %d) target %s failed with " 1684 "status %#x\n", 1685 __func__, bus_softc->port_name, bus_softc->path_id, 1686 (online != 0) ? "enable" : "disable", 1687 ccb->ccb_h.status); 1688 } else { 1689 printf("%s: SIM %s (path id %d) target %s succeeded\n", 1690 __func__, bus_softc->port_name, bus_softc->path_id, 1691 (online != 0) ? "enable" : "disable"); 1692 } 1693 1694 xpt_free_path(path); 1695 xpt_free_ccb(ccb); 1696 } 1697 1698 static void 1699 ctlfe_online(void *arg) 1700 { 1701 struct ctlfe_softc *bus_softc; 1702 struct cam_path *path; 1703 cam_status status; 1704 struct ctlfe_lun_softc *lun_softc; 1705 struct cam_periph *periph; 1706 1707 bus_softc = (struct ctlfe_softc *)arg; 1708 1709 /* 1710 * Create the wildcard LUN before bringing the port online. 1711 */ 1712 status = xpt_create_path(&path, /*periph*/ NULL, 1713 bus_softc->path_id, CAM_TARGET_WILDCARD, 1714 CAM_LUN_WILDCARD); 1715 if (status != CAM_REQ_CMP) { 1716 printf("%s: unable to create path for wildcard periph\n", 1717 __func__); 1718 return; 1719 } 1720 1721 lun_softc = malloc(sizeof(*lun_softc), M_CTLFE, M_WAITOK | M_ZERO); 1722 1723 xpt_path_lock(path); 1724 periph = cam_periph_find(path, "ctl"); 1725 if (periph != NULL) { 1726 /* We've already got a periph, no need to alloc a new one. */ 1727 xpt_path_unlock(path); 1728 xpt_free_path(path); 1729 free(lun_softc, M_CTLFE); 1730 return; 1731 } 1732 lun_softc->parent_softc = bus_softc; 1733 lun_softc->flags |= CTLFE_LUN_WILDCARD; 1734 1735 status = cam_periph_alloc(ctlferegister, 1736 ctlfeoninvalidate, 1737 ctlfecleanup, 1738 ctlfestart, 1739 "ctl", 1740 CAM_PERIPH_BIO, 1741 path, 1742 ctlfeasync, 1743 0, 1744 lun_softc); 1745 1746 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1747 const struct cam_status_entry *entry; 1748 1749 entry = cam_fetch_status_entry(status); 1750 printf("%s: CAM error %s (%#x) returned from " 1751 "cam_periph_alloc()\n", __func__, (entry != NULL) ? 1752 entry->status_text : "Unknown", status); 1753 free(lun_softc, M_CTLFE); 1754 } 1755 1756 xpt_path_unlock(path); 1757 ctlfe_onoffline(arg, /*online*/ 1); 1758 xpt_free_path(path); 1759 } 1760 1761 static void 1762 ctlfe_offline(void *arg) 1763 { 1764 struct ctlfe_softc *bus_softc; 1765 struct cam_path *path; 1766 cam_status status; 1767 struct cam_periph *periph; 1768 1769 bus_softc = (struct ctlfe_softc *)arg; 1770 1771 ctlfe_onoffline(arg, /*online*/ 0); 1772 1773 /* 1774 * Disable the wildcard LUN for this port now that we have taken 1775 * the port offline. 1776 */ 1777 status = xpt_create_path(&path, /*periph*/ NULL, 1778 bus_softc->path_id, CAM_TARGET_WILDCARD, 1779 CAM_LUN_WILDCARD); 1780 if (status != CAM_REQ_CMP) { 1781 printf("%s: unable to create path for wildcard periph\n", 1782 __func__); 1783 return; 1784 } 1785 xpt_path_lock(path); 1786 if ((periph = cam_periph_find(path, "ctl")) != NULL) 1787 cam_periph_invalidate(periph); 1788 xpt_path_unlock(path); 1789 xpt_free_path(path); 1790 } 1791 1792 /* 1793 * This will get called to enable a LUN on every bus that is attached to 1794 * CTL. So we only need to create a path/periph for this particular bus. 1795 */ 1796 static int 1797 ctlfe_lun_enable(void *arg, int lun_id) 1798 { 1799 struct ctlfe_softc *bus_softc; 1800 struct ctlfe_lun_softc *softc; 1801 struct cam_path *path; 1802 struct cam_periph *periph; 1803 cam_status status; 1804 1805 bus_softc = (struct ctlfe_softc *)arg; 1806 1807 status = xpt_create_path(&path, /*periph*/ NULL, 1808 bus_softc->path_id, bus_softc->target_id, lun_id); 1809 /* XXX KDM need some way to return status to CTL here? */ 1810 if (status != CAM_REQ_CMP) { 1811 printf("%s: could not create path, status %#x\n", __func__, 1812 status); 1813 return (1); 1814 } 1815 1816 softc = malloc(sizeof(*softc), M_CTLFE, M_WAITOK | M_ZERO); 1817 xpt_path_lock(path); 1818 periph = cam_periph_find(path, "ctl"); 1819 if (periph != NULL) { 1820 /* We've already got a periph, no need to alloc a new one. */ 1821 xpt_path_unlock(path); 1822 xpt_free_path(path); 1823 free(softc, M_CTLFE); 1824 return (0); 1825 } 1826 softc->parent_softc = bus_softc; 1827 1828 status = cam_periph_alloc(ctlferegister, 1829 ctlfeoninvalidate, 1830 ctlfecleanup, 1831 ctlfestart, 1832 "ctl", 1833 CAM_PERIPH_BIO, 1834 path, 1835 ctlfeasync, 1836 0, 1837 softc); 1838 1839 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1840 const struct cam_status_entry *entry; 1841 1842 entry = cam_fetch_status_entry(status); 1843 printf("%s: CAM error %s (%#x) returned from " 1844 "cam_periph_alloc()\n", __func__, (entry != NULL) ? 1845 entry->status_text : "Unknown", status); 1846 free(softc, M_CTLFE); 1847 } 1848 1849 xpt_path_unlock(path); 1850 xpt_free_path(path); 1851 return (0); 1852 } 1853 1854 /* 1855 * This will get called when the user removes a LUN to disable that LUN 1856 * on every bus that is attached to CTL. 1857 */ 1858 static int 1859 ctlfe_lun_disable(void *arg, int lun_id) 1860 { 1861 struct ctlfe_softc *softc; 1862 struct ctlfe_lun_softc *lun_softc; 1863 1864 softc = (struct ctlfe_softc *)arg; 1865 1866 mtx_lock(&softc->lun_softc_mtx); 1867 STAILQ_FOREACH(lun_softc, &softc->lun_softc_list, links) { 1868 struct cam_path *path; 1869 1870 path = lun_softc->periph->path; 1871 1872 if ((xpt_path_target_id(path) == 0) 1873 && (xpt_path_lun_id(path) == lun_id)) { 1874 break; 1875 } 1876 } 1877 if (lun_softc == NULL) { 1878 mtx_unlock(&softc->lun_softc_mtx); 1879 printf("%s: can't find lun %d\n", __func__, lun_id); 1880 return (1); 1881 } 1882 cam_periph_acquire(lun_softc->periph); 1883 mtx_unlock(&softc->lun_softc_mtx); 1884 1885 cam_periph_lock(lun_softc->periph); 1886 cam_periph_invalidate(lun_softc->periph); 1887 cam_periph_unlock(lun_softc->periph); 1888 cam_periph_release(lun_softc->periph); 1889 return (0); 1890 } 1891 1892 static void 1893 ctlfe_dump_sim(struct cam_sim *sim) 1894 { 1895 1896 printf("%s%d: max tagged openings: %d, max dev openings: %d\n", 1897 sim->sim_name, sim->unit_number, 1898 sim->max_tagged_dev_openings, sim->max_dev_openings); 1899 } 1900 1901 /* 1902 * Assumes that the SIM lock is held. 1903 */ 1904 static void 1905 ctlfe_dump_queue(struct ctlfe_lun_softc *softc) 1906 { 1907 struct ccb_hdr *hdr; 1908 struct cam_periph *periph; 1909 int num_items; 1910 1911 periph = softc->periph; 1912 num_items = 0; 1913 1914 TAILQ_FOREACH(hdr, &softc->work_queue, periph_links.tqe) { 1915 union ctl_io *io = hdr->io_ptr; 1916 1917 num_items++; 1918 1919 /* 1920 * Only regular SCSI I/O is put on the work 1921 * queue, so we can print sense here. There may be no 1922 * sense if it's no the queue for a DMA, but this serves to 1923 * print out the CCB as well. 1924 * 1925 * XXX KDM switch this over to scsi_sense_print() when 1926 * CTL is merged in with CAM. 1927 */ 1928 ctl_io_error_print(io, NULL); 1929 1930 /* 1931 * Print DMA status if we are DMA_QUEUED. 1932 */ 1933 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) { 1934 xpt_print(periph->path, 1935 "Total %u, Current %u, Resid %u\n", 1936 io->scsiio.kern_total_len, 1937 io->scsiio.kern_data_len, 1938 io->scsiio.kern_data_resid); 1939 } 1940 } 1941 1942 xpt_print(periph->path, "%d requests total waiting for CCBs\n", 1943 num_items); 1944 xpt_print(periph->path, "%ju CCBs outstanding (%ju allocated, %ju " 1945 "freed)\n", (uintmax_t)(softc->ccbs_alloced - 1946 softc->ccbs_freed), (uintmax_t)softc->ccbs_alloced, 1947 (uintmax_t)softc->ccbs_freed); 1948 xpt_print(periph->path, "%ju CTIOs outstanding (%ju sent, %ju " 1949 "returned\n", (uintmax_t)(softc->ctios_sent - 1950 softc->ctios_returned), softc->ctios_sent, 1951 softc->ctios_returned); 1952 } 1953 1954 /* 1955 * Datamove/done routine called by CTL. Put ourselves on the queue to 1956 * receive a CCB from CAM so we can queue the continue I/O request down 1957 * to the adapter. 1958 */ 1959 static void 1960 ctlfe_datamove(union ctl_io *io) 1961 { 1962 union ccb *ccb; 1963 struct cam_periph *periph; 1964 struct ctlfe_lun_softc *softc; 1965 1966 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 1967 ("Unexpected io_type (%d) in ctlfe_datamove", io->io_hdr.io_type)); 1968 1969 ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 1970 periph = xpt_path_periph(ccb->ccb_h.path); 1971 cam_periph_lock(periph); 1972 softc = (struct ctlfe_lun_softc *)periph->softc; 1973 io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED; 1974 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) 1975 io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED; 1976 TAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h, 1977 periph_links.tqe); 1978 xpt_schedule(periph, /*priority*/ 1); 1979 cam_periph_unlock(periph); 1980 } 1981 1982 static void 1983 ctlfe_done(union ctl_io *io) 1984 { 1985 union ccb *ccb; 1986 struct cam_periph *periph; 1987 struct ctlfe_lun_softc *softc; 1988 1989 ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 1990 periph = xpt_path_periph(ccb->ccb_h.path); 1991 cam_periph_lock(periph); 1992 softc = (struct ctlfe_lun_softc *)periph->softc; 1993 1994 if (io->io_hdr.io_type == CTL_IO_TASK) { 1995 /* 1996 * Task management commands don't require any further 1997 * communication back to the adapter. Requeue the CCB 1998 * to the adapter, and free the CTL I/O. 1999 */ 2000 xpt_print(ccb->ccb_h.path, "%s: returning task I/O " 2001 "tag %#x seq %#x\n", __func__, 2002 ccb->cin1.tag_id, ccb->cin1.seq_id); 2003 /* 2004 * Send the notify acknowledge down to the SIM, to let it 2005 * know we processed the task management command. 2006 */ 2007 ccb->ccb_h.status = CAM_REQ_INPROG; 2008 ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; 2009 xpt_action(ccb); 2010 } else if (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) { 2011 if (softc->flags & CTLFE_LUN_WILDCARD) { 2012 ccb->ccb_h.target_id = CAM_TARGET_WILDCARD; 2013 ccb->ccb_h.target_lun = CAM_LUN_WILDCARD; 2014 } 2015 if (periph->flags & CAM_PERIPH_INVALID) { 2016 ctlfe_free_ccb(periph, ccb); 2017 } else { 2018 cam_periph_unlock(periph); 2019 xpt_action(ccb); 2020 return; 2021 } 2022 } else { 2023 io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED; 2024 TAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h, 2025 periph_links.tqe); 2026 xpt_schedule(periph, /*priority*/ 1); 2027 } 2028 2029 cam_periph_unlock(periph); 2030 } 2031 2032 static void 2033 ctlfe_dump(void) 2034 { 2035 struct ctlfe_softc *bus_softc; 2036 struct ctlfe_lun_softc *lun_softc; 2037 2038 STAILQ_FOREACH(bus_softc, &ctlfe_softc_list, links) { 2039 ctlfe_dump_sim(bus_softc->sim); 2040 STAILQ_FOREACH(lun_softc, &bus_softc->lun_softc_list, links) 2041 ctlfe_dump_queue(lun_softc); 2042 } 2043 } 2044