1 /*- 2 * Copyright (c) 2008, 2009 Silicon Graphics International Corp. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * substantially similar to the "NO WARRANTY" disclaimer below 13 * ("Disclaimer") and any redistribution must be conditioned upon 14 * including a substantially similar Disclaimer requirement for further 15 * binary redistribution. 16 * 17 * NO WARRANTY 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGES. 29 * 30 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/scsi_ctl.c#4 $ 31 */ 32 /* 33 * Peripheral driver interface between CAM and CTL (CAM Target Layer). 34 * 35 * Author: Ken Merry <ken@FreeBSD.org> 36 */ 37 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 #include <sys/param.h> 42 #include <sys/queue.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/condvar.h> 48 #include <sys/malloc.h> 49 #include <sys/bus.h> 50 #include <sys/endian.h> 51 #include <sys/sbuf.h> 52 #include <sys/sysctl.h> 53 #include <sys/types.h> 54 #include <sys/systm.h> 55 #include <machine/bus.h> 56 57 #include <cam/cam.h> 58 #include <cam/cam_ccb.h> 59 #include <cam/cam_periph.h> 60 #include <cam/cam_queue.h> 61 #include <cam/cam_xpt_periph.h> 62 #include <cam/cam_debug.h> 63 #include <cam/cam_sim.h> 64 #include <cam/cam_xpt.h> 65 66 #include <cam/scsi/scsi_all.h> 67 #include <cam/scsi/scsi_message.h> 68 69 #include <cam/ctl/ctl_io.h> 70 #include <cam/ctl/ctl.h> 71 #include <cam/ctl/ctl_frontend.h> 72 #include <cam/ctl/ctl_util.h> 73 #include <cam/ctl/ctl_error.h> 74 75 typedef enum { 76 CTLFE_CCB_DEFAULT = 0x00 77 } ctlfe_ccb_types; 78 79 struct ctlfe_softc { 80 struct ctl_port port; 81 path_id_t path_id; 82 u_int maxio; 83 struct cam_sim *sim; 84 char port_name[DEV_IDLEN]; 85 struct mtx lun_softc_mtx; 86 STAILQ_HEAD(, ctlfe_lun_softc) lun_softc_list; 87 STAILQ_ENTRY(ctlfe_softc) links; 88 }; 89 90 STAILQ_HEAD(, ctlfe_softc) ctlfe_softc_list; 91 struct mtx ctlfe_list_mtx; 92 static char ctlfe_mtx_desc[] = "ctlfelist"; 93 static int ctlfe_dma_enabled = 1; 94 #ifdef CTLFE_INIT_ENABLE 95 static int ctlfe_max_targets = 1; 96 static int ctlfe_num_targets = 0; 97 #endif 98 99 typedef enum { 100 CTLFE_LUN_NONE = 0x00, 101 CTLFE_LUN_WILDCARD = 0x01 102 } ctlfe_lun_flags; 103 104 struct ctlfe_lun_softc { 105 struct ctlfe_softc *parent_softc; 106 struct cam_periph *periph; 107 ctlfe_lun_flags flags; 108 struct callout dma_callout; 109 uint64_t ccbs_alloced; 110 uint64_t ccbs_freed; 111 uint64_t ctios_sent; 112 uint64_t ctios_returned; 113 uint64_t atios_sent; 114 uint64_t atios_returned; 115 uint64_t inots_sent; 116 uint64_t inots_returned; 117 /* bus_dma_tag_t dma_tag; */ 118 TAILQ_HEAD(, ccb_hdr) work_queue; 119 STAILQ_ENTRY(ctlfe_lun_softc) links; 120 }; 121 122 typedef enum { 123 CTLFE_CMD_NONE = 0x00, 124 CTLFE_CMD_PIECEWISE = 0x01 125 } ctlfe_cmd_flags; 126 127 /* 128 * The size limit of this structure is CTL_PORT_PRIV_SIZE, from ctl_io.h. 129 * Currently that is 600 bytes. 130 */ 131 struct ctlfe_lun_cmd_info { 132 int cur_transfer_index; 133 size_t cur_transfer_off; 134 ctlfe_cmd_flags flags; 135 /* 136 * XXX KDM struct bus_dma_segment is 8 bytes on i386, and 16 137 * bytes on amd64. So with 32 elements, this is 256 bytes on 138 * i386 and 512 bytes on amd64. 139 */ 140 #define CTLFE_MAX_SEGS 32 141 bus_dma_segment_t cam_sglist[CTLFE_MAX_SEGS]; 142 }; 143 144 /* 145 * When we register the adapter/bus, request that this many ctl_ios be 146 * allocated. This should be the maximum supported by the adapter, but we 147 * currently don't have a way to get that back from the path inquiry. 148 * XXX KDM add that to the path inquiry. 149 */ 150 #define CTLFE_REQ_CTL_IO 4096 151 /* 152 * Number of Accept Target I/O CCBs to allocate and queue down to the 153 * adapter per LUN. 154 * XXX KDM should this be controlled by CTL? 155 */ 156 #define CTLFE_ATIO_PER_LUN 1024 157 /* 158 * Number of Immediate Notify CCBs (used for aborts, resets, etc.) to 159 * allocate and queue down to the adapter per LUN. 160 * XXX KDM should this be controlled by CTL? 161 */ 162 #define CTLFE_IN_PER_LUN 1024 163 164 /* 165 * Timeout (in seconds) on CTIO CCB allocation for doing a DMA or sending 166 * status to the initiator. The SIM is expected to have its own timeouts, 167 * so we're not putting this timeout around the CCB execution time. The 168 * SIM should timeout and let us know if it has an issue. 169 */ 170 #define CTLFE_DMA_TIMEOUT 60 171 172 /* 173 * Turn this on to enable extra debugging prints. 174 */ 175 #if 0 176 #define CTLFE_DEBUG 177 #endif 178 179 /* 180 * Use randomly assigned WWNN/WWPN values. This is to work around an issue 181 * in the FreeBSD initiator that makes it unable to rescan the target if 182 * the target gets rebooted and the WWNN/WWPN stay the same. 183 */ 184 #if 0 185 #define RANDOM_WWNN 186 #endif 187 188 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, dma_enabled, CTLFLAG_RW, 189 &ctlfe_dma_enabled, 0, "DMA enabled"); 190 MALLOC_DEFINE(M_CTLFE, "CAM CTL FE", "CAM CTL FE interface"); 191 192 #define ccb_type ppriv_field0 193 /* This is only used in the ATIO */ 194 #define io_ptr ppriv_ptr1 195 196 /* This is only used in the CTIO */ 197 #define ccb_atio ppriv_ptr1 198 199 int ctlfeinitialize(void); 200 void ctlfeshutdown(void); 201 static periph_init_t ctlfeperiphinit; 202 static void ctlfeasync(void *callback_arg, uint32_t code, 203 struct cam_path *path, void *arg); 204 static periph_ctor_t ctlferegister; 205 static periph_oninv_t ctlfeoninvalidate; 206 static periph_dtor_t ctlfecleanup; 207 static periph_start_t ctlfestart; 208 static void ctlfedone(struct cam_periph *periph, 209 union ccb *done_ccb); 210 211 static void ctlfe_onoffline(void *arg, int online); 212 static void ctlfe_online(void *arg); 213 static void ctlfe_offline(void *arg); 214 static int ctlfe_lun_enable(void *arg, struct ctl_id targ_id, 215 int lun_id); 216 static int ctlfe_lun_disable(void *arg, struct ctl_id targ_id, 217 int lun_id); 218 static void ctlfe_dump_sim(struct cam_sim *sim); 219 static void ctlfe_dump_queue(struct ctlfe_lun_softc *softc); 220 static void ctlfe_dma_timeout(void *arg); 221 static void ctlfe_datamove_done(union ctl_io *io); 222 static void ctlfe_dump(void); 223 224 static struct periph_driver ctlfe_driver = 225 { 226 ctlfeperiphinit, "ctl", 227 TAILQ_HEAD_INITIALIZER(ctlfe_driver.units), /*generation*/ 0, 228 CAM_PERIPH_DRV_EARLY 229 }; 230 231 static struct ctl_frontend ctlfe_frontend = 232 { 233 .name = "camtgt", 234 .init = ctlfeinitialize, 235 .fe_dump = ctlfe_dump, 236 .shutdown = ctlfeshutdown, 237 }; 238 CTL_FRONTEND_DECLARE(ctlfe, ctlfe_frontend); 239 240 extern struct ctl_softc *control_softc; 241 242 void 243 ctlfeshutdown(void) 244 { 245 return; 246 } 247 248 int 249 ctlfeinitialize(void) 250 { 251 252 STAILQ_INIT(&ctlfe_softc_list); 253 mtx_init(&ctlfe_list_mtx, ctlfe_mtx_desc, NULL, MTX_DEF); 254 periphdriver_register(&ctlfe_driver); 255 return (0); 256 } 257 258 void 259 ctlfeperiphinit(void) 260 { 261 cam_status status; 262 263 status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED | 264 AC_CONTRACT, ctlfeasync, NULL, NULL); 265 if (status != CAM_REQ_CMP) { 266 printf("ctl: Failed to attach async callback due to CAM " 267 "status 0x%x!\n", status); 268 } 269 } 270 271 static void 272 ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) 273 { 274 struct ctlfe_softc *softc; 275 276 #ifdef CTLFEDEBUG 277 printf("%s: entered\n", __func__); 278 #endif 279 280 mtx_lock(&ctlfe_list_mtx); 281 STAILQ_FOREACH(softc, &ctlfe_softc_list, links) { 282 if (softc->path_id == xpt_path_path_id(path)) 283 break; 284 } 285 mtx_unlock(&ctlfe_list_mtx); 286 287 /* 288 * When a new path gets registered, and it is capable of target 289 * mode, go ahead and attach. Later on, we may need to be more 290 * selective, but for now this will be sufficient. 291 */ 292 switch (code) { 293 case AC_PATH_REGISTERED: { 294 struct ctl_port *port; 295 struct ccb_pathinq *cpi; 296 int retval; 297 298 cpi = (struct ccb_pathinq *)arg; 299 300 /* Don't attach if it doesn't support target mode */ 301 if ((cpi->target_sprt & PIT_PROCESSOR) == 0) { 302 #ifdef CTLFEDEBUG 303 printf("%s: SIM %s%d doesn't support target mode\n", 304 __func__, cpi->dev_name, cpi->unit_number); 305 #endif 306 break; 307 } 308 309 if (softc != NULL) { 310 #ifdef CTLFEDEBUG 311 printf("%s: CTL port for CAM path %u already exists\n", 312 __func__, xpt_path_path_id(path)); 313 #endif 314 break; 315 } 316 317 #ifdef CTLFE_INIT_ENABLE 318 if (ctlfe_num_targets >= ctlfe_max_targets) { 319 union ccb *ccb; 320 321 ccb = (union ccb *)malloc(sizeof(*ccb), M_TEMP, 322 M_NOWAIT | M_ZERO); 323 if (ccb == NULL) { 324 printf("%s: unable to malloc CCB!\n", __func__); 325 return; 326 } 327 xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE); 328 329 ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; 330 ccb->knob.xport_specific.valid = KNOB_VALID_ROLE; 331 ccb->knob.xport_specific.fc.role = KNOB_ROLE_INITIATOR; 332 333 xpt_action(ccb); 334 335 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != 336 CAM_REQ_CMP) { 337 printf("%s: SIM %s%d (path id %d) initiator " 338 "enable failed with status %#x\n", 339 __func__, cpi->dev_name, 340 cpi->unit_number, cpi->ccb_h.path_id, 341 ccb->ccb_h.status); 342 } else { 343 printf("%s: SIM %s%d (path id %d) initiator " 344 "enable succeeded\n", 345 __func__, cpi->dev_name, 346 cpi->unit_number, cpi->ccb_h.path_id); 347 } 348 349 free(ccb, M_TEMP); 350 351 break; 352 } else { 353 ctlfe_num_targets++; 354 } 355 356 printf("%s: ctlfe_num_targets = %d\n", __func__, 357 ctlfe_num_targets); 358 #endif /* CTLFE_INIT_ENABLE */ 359 360 /* 361 * We're in an interrupt context here, so we have to 362 * use M_NOWAIT. Of course this means trouble if we 363 * can't allocate memory. 364 */ 365 softc = malloc(sizeof(*softc), M_CTLFE, M_NOWAIT | M_ZERO); 366 if (softc == NULL) { 367 printf("%s: unable to malloc %zd bytes for softc\n", 368 __func__, sizeof(*softc)); 369 return; 370 } 371 372 softc->path_id = cpi->ccb_h.path_id; 373 softc->sim = xpt_path_sim(path); 374 if (cpi->maxio != 0) 375 softc->maxio = cpi->maxio; 376 else 377 softc->maxio = DFLTPHYS; 378 mtx_init(&softc->lun_softc_mtx, "LUN softc mtx", NULL, MTX_DEF); 379 STAILQ_INIT(&softc->lun_softc_list); 380 381 port = &softc->port; 382 port->frontend = &ctlfe_frontend; 383 384 /* 385 * XXX KDM should we be more accurate here ? 386 */ 387 if (cpi->transport == XPORT_FC) 388 port->port_type = CTL_PORT_FC; 389 else if (cpi->transport == XPORT_SAS) 390 port->port_type = CTL_PORT_SAS; 391 else 392 port->port_type = CTL_PORT_SCSI; 393 394 /* XXX KDM what should the real number be here? */ 395 port->num_requested_ctl_io = 4096; 396 snprintf(softc->port_name, sizeof(softc->port_name), 397 "%s%d", cpi->dev_name, cpi->unit_number); 398 /* 399 * XXX KDM it would be nice to allocate storage in the 400 * frontend structure itself. 401 */ 402 port->port_name = softc->port_name; 403 port->physical_port = cpi->bus_id; 404 port->virtual_port = 0; 405 port->port_online = ctlfe_online; 406 port->port_offline = ctlfe_offline; 407 port->onoff_arg = softc; 408 port->lun_enable = ctlfe_lun_enable; 409 port->lun_disable = ctlfe_lun_disable; 410 port->targ_lun_arg = softc; 411 port->fe_datamove = ctlfe_datamove_done; 412 port->fe_done = ctlfe_datamove_done; 413 /* 414 * XXX KDM the path inquiry doesn't give us the maximum 415 * number of targets supported. 416 */ 417 port->max_targets = cpi->max_target; 418 port->max_target_id = cpi->max_target; 419 420 /* 421 * XXX KDM need to figure out whether we're the master or 422 * slave. 423 */ 424 #ifdef CTLFEDEBUG 425 printf("%s: calling ctl_port_register() for %s%d\n", 426 __func__, cpi->dev_name, cpi->unit_number); 427 #endif 428 retval = ctl_port_register(port, /*master_SC*/ 1); 429 if (retval != 0) { 430 printf("%s: ctl_port_register() failed with " 431 "error %d!\n", __func__, retval); 432 mtx_destroy(&softc->lun_softc_mtx); 433 free(softc, M_CTLFE); 434 break; 435 } else { 436 mtx_lock(&ctlfe_list_mtx); 437 STAILQ_INSERT_TAIL(&ctlfe_softc_list, softc, links); 438 mtx_unlock(&ctlfe_list_mtx); 439 } 440 441 break; 442 } 443 case AC_PATH_DEREGISTERED: { 444 445 if (softc != NULL) { 446 /* 447 * XXX KDM are we certain at this point that there 448 * are no outstanding commands for this frontend? 449 */ 450 mtx_lock(&ctlfe_list_mtx); 451 STAILQ_REMOVE(&ctlfe_softc_list, softc, ctlfe_softc, 452 links); 453 mtx_unlock(&ctlfe_list_mtx); 454 ctl_port_deregister(&softc->port); 455 mtx_destroy(&softc->lun_softc_mtx); 456 free(softc, M_CTLFE); 457 } 458 break; 459 } 460 case AC_CONTRACT: { 461 struct ac_contract *ac; 462 463 ac = (struct ac_contract *)arg; 464 465 switch (ac->contract_number) { 466 case AC_CONTRACT_DEV_CHG: { 467 struct ac_device_changed *dev_chg; 468 int retval; 469 470 dev_chg = (struct ac_device_changed *)ac->contract_data; 471 472 printf("%s: WWPN %#jx port 0x%06x path %u target %u %s\n", 473 __func__, dev_chg->wwpn, dev_chg->port, 474 xpt_path_path_id(path), dev_chg->target, 475 (dev_chg->arrived == 0) ? "left" : "arrived"); 476 477 if (softc == NULL) { 478 printf("%s: CTL port for CAM path %u not " 479 "found!\n", __func__, 480 xpt_path_path_id(path)); 481 break; 482 } 483 if (dev_chg->arrived != 0) { 484 retval = ctl_add_initiator(&softc->port, 485 dev_chg->target, dev_chg->wwpn, NULL); 486 } else { 487 retval = ctl_remove_initiator(&softc->port, 488 dev_chg->target); 489 } 490 491 if (retval < 0) { 492 printf("%s: could not %s port %d iid %u " 493 "WWPN %#jx!\n", __func__, 494 (dev_chg->arrived != 0) ? "add" : 495 "remove", softc->port.targ_port, 496 dev_chg->target, 497 (uintmax_t)dev_chg->wwpn); 498 } 499 break; 500 } 501 default: 502 printf("%s: unsupported contract number %ju\n", 503 __func__, (uintmax_t)ac->contract_number); 504 break; 505 } 506 break; 507 } 508 default: 509 break; 510 } 511 } 512 513 static cam_status 514 ctlferegister(struct cam_periph *periph, void *arg) 515 { 516 struct ctlfe_softc *bus_softc; 517 struct ctlfe_lun_softc *softc; 518 union ccb en_lun_ccb; 519 cam_status status; 520 int i; 521 522 softc = (struct ctlfe_lun_softc *)arg; 523 bus_softc = softc->parent_softc; 524 525 TAILQ_INIT(&softc->work_queue); 526 softc->periph = periph; 527 528 callout_init_mtx(&softc->dma_callout, xpt_path_mtx(periph->path), 529 /*flags*/ 0); 530 periph->softc = softc; 531 532 xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); 533 en_lun_ccb.ccb_h.func_code = XPT_EN_LUN; 534 en_lun_ccb.cel.grp6_len = 0; 535 en_lun_ccb.cel.grp7_len = 0; 536 en_lun_ccb.cel.enable = 1; 537 xpt_action(&en_lun_ccb); 538 status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK); 539 if (status != CAM_REQ_CMP) { 540 xpt_print(periph->path, "%s: Enable LUN failed, status 0x%x\n", 541 __func__, en_lun_ccb.ccb_h.status); 542 return (status); 543 } 544 545 status = CAM_REQ_CMP; 546 547 for (i = 0; i < CTLFE_ATIO_PER_LUN; i++) { 548 union ccb *new_ccb; 549 550 new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, 551 M_ZERO|M_NOWAIT); 552 if (new_ccb == NULL) { 553 status = CAM_RESRC_UNAVAIL; 554 break; 555 } 556 xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1); 557 new_ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO; 558 new_ccb->ccb_h.cbfcnp = ctlfedone; 559 new_ccb->ccb_h.flags |= CAM_UNLOCKED; 560 xpt_action(new_ccb); 561 softc->atios_sent++; 562 status = new_ccb->ccb_h.status; 563 if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 564 free(new_ccb, M_CTLFE); 565 break; 566 } 567 } 568 569 status = cam_periph_acquire(periph); 570 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 571 xpt_print(periph->path, "%s: could not acquire reference " 572 "count, status = %#x\n", __func__, status); 573 return (status); 574 } 575 576 if (i == 0) { 577 xpt_print(periph->path, "%s: could not allocate ATIO CCBs, " 578 "status 0x%x\n", __func__, status); 579 return (CAM_REQ_CMP_ERR); 580 } 581 582 for (i = 0; i < CTLFE_IN_PER_LUN; i++) { 583 union ccb *new_ccb; 584 585 new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, 586 M_ZERO|M_NOWAIT); 587 if (new_ccb == NULL) { 588 status = CAM_RESRC_UNAVAIL; 589 break; 590 } 591 592 xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1); 593 new_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; 594 new_ccb->ccb_h.cbfcnp = ctlfedone; 595 new_ccb->ccb_h.flags |= CAM_UNLOCKED; 596 xpt_action(new_ccb); 597 softc->inots_sent++; 598 status = new_ccb->ccb_h.status; 599 if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 600 /* 601 * Note that we don't free the CCB here. If the 602 * status is not CAM_REQ_INPROG, then we're 603 * probably talking to a SIM that says it is 604 * target-capable but doesn't support the 605 * XPT_IMMEDIATE_NOTIFY CCB. i.e. it supports the 606 * older API. In that case, it'll call xpt_done() 607 * on the CCB, and we need to free it in our done 608 * routine as a result. 609 */ 610 break; 611 } 612 } 613 if ((i == 0) 614 || (status != CAM_REQ_INPROG)) { 615 xpt_print(periph->path, "%s: could not allocate immediate " 616 "notify CCBs, status 0x%x\n", __func__, status); 617 return (CAM_REQ_CMP_ERR); 618 } 619 return (CAM_REQ_CMP); 620 } 621 622 static void 623 ctlfeoninvalidate(struct cam_periph *periph) 624 { 625 union ccb en_lun_ccb; 626 cam_status status; 627 struct ctlfe_softc *bus_softc; 628 struct ctlfe_lun_softc *softc; 629 630 softc = (struct ctlfe_lun_softc *)periph->softc; 631 632 xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); 633 en_lun_ccb.ccb_h.func_code = XPT_EN_LUN; 634 en_lun_ccb.cel.grp6_len = 0; 635 en_lun_ccb.cel.grp7_len = 0; 636 en_lun_ccb.cel.enable = 0; 637 xpt_action(&en_lun_ccb); 638 status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK); 639 if (status != CAM_REQ_CMP) { 640 xpt_print(periph->path, "%s: Disable LUN failed, status 0x%x\n", 641 __func__, en_lun_ccb.ccb_h.status); 642 /* 643 * XXX KDM what do we do now? 644 */ 645 } 646 xpt_print(periph->path, "LUN removed, %ju ATIOs outstanding, %ju " 647 "INOTs outstanding, %d refs\n", softc->atios_sent - 648 softc->atios_returned, softc->inots_sent - 649 softc->inots_returned, periph->refcount); 650 651 bus_softc = softc->parent_softc; 652 mtx_lock(&bus_softc->lun_softc_mtx); 653 STAILQ_REMOVE(&bus_softc->lun_softc_list, softc, ctlfe_lun_softc, links); 654 mtx_unlock(&bus_softc->lun_softc_mtx); 655 } 656 657 static void 658 ctlfecleanup(struct cam_periph *periph) 659 { 660 struct ctlfe_lun_softc *softc; 661 662 xpt_print(periph->path, "%s: Called\n", __func__); 663 664 softc = (struct ctlfe_lun_softc *)periph->softc; 665 666 /* 667 * XXX KDM is there anything else that needs to be done here? 668 */ 669 670 callout_stop(&softc->dma_callout); 671 672 free(softc, M_CTLFE); 673 } 674 675 static void 676 ctlfedata(struct ctlfe_lun_softc *softc, union ctl_io *io, 677 ccb_flags *flags, uint8_t **data_ptr, uint32_t *dxfer_len, 678 u_int16_t *sglist_cnt) 679 { 680 struct ctlfe_softc *bus_softc; 681 struct ctlfe_lun_cmd_info *cmd_info; 682 struct ctl_sg_entry *ctl_sglist; 683 bus_dma_segment_t *cam_sglist; 684 size_t off; 685 int i, idx; 686 687 cmd_info = (struct ctlfe_lun_cmd_info *)io->io_hdr.port_priv; 688 bus_softc = softc->parent_softc; 689 690 /* 691 * Set the direction, relative to the initiator. 692 */ 693 *flags &= ~CAM_DIR_MASK; 694 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) 695 *flags |= CAM_DIR_IN; 696 else 697 *flags |= CAM_DIR_OUT; 698 699 *flags &= ~CAM_DATA_MASK; 700 idx = cmd_info->cur_transfer_index; 701 off = cmd_info->cur_transfer_off; 702 cmd_info->flags &= ~CTLFE_CMD_PIECEWISE; 703 if (io->scsiio.kern_sg_entries == 0) { 704 /* No S/G list. */ 705 *data_ptr = io->scsiio.kern_data_ptr + off; 706 if (io->scsiio.kern_data_len - off <= bus_softc->maxio) { 707 *dxfer_len = io->scsiio.kern_data_len - off; 708 } else { 709 *dxfer_len = bus_softc->maxio; 710 cmd_info->cur_transfer_index = -1; 711 cmd_info->cur_transfer_off = bus_softc->maxio; 712 cmd_info->flags |= CTLFE_CMD_PIECEWISE; 713 } 714 *sglist_cnt = 0; 715 716 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) 717 *flags |= CAM_DATA_PADDR; 718 else 719 *flags |= CAM_DATA_VADDR; 720 } else { 721 /* S/G list with physical or virtual pointers. */ 722 ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 723 cam_sglist = cmd_info->cam_sglist; 724 *dxfer_len = 0; 725 for (i = 0; i < io->scsiio.kern_sg_entries - idx; i++) { 726 cam_sglist[i].ds_addr = (bus_addr_t)ctl_sglist[i + idx].addr + off; 727 if (ctl_sglist[i + idx].len - off <= bus_softc->maxio - *dxfer_len) { 728 cam_sglist[i].ds_len = ctl_sglist[idx + i].len - off; 729 *dxfer_len += cam_sglist[i].ds_len; 730 } else { 731 cam_sglist[i].ds_len = bus_softc->maxio - *dxfer_len; 732 cmd_info->cur_transfer_index = idx + i; 733 cmd_info->cur_transfer_off = cam_sglist[i].ds_len + off; 734 cmd_info->flags |= CTLFE_CMD_PIECEWISE; 735 *dxfer_len += cam_sglist[i].ds_len; 736 if (ctl_sglist[i].len != 0) 737 i++; 738 break; 739 } 740 if (i == (CTLFE_MAX_SEGS - 1) && 741 idx + i < (io->scsiio.kern_sg_entries - 1)) { 742 cmd_info->cur_transfer_index = idx + i + 1; 743 cmd_info->cur_transfer_off = 0; 744 cmd_info->flags |= CTLFE_CMD_PIECEWISE; 745 i++; 746 break; 747 } 748 off = 0; 749 } 750 *sglist_cnt = i; 751 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) 752 *flags |= CAM_DATA_SG_PADDR; 753 else 754 *flags |= CAM_DATA_SG; 755 *data_ptr = (uint8_t *)cam_sglist; 756 } 757 } 758 759 static void 760 ctlfestart(struct cam_periph *periph, union ccb *start_ccb) 761 { 762 struct ctlfe_lun_softc *softc; 763 struct ccb_hdr *ccb_h; 764 765 softc = (struct ctlfe_lun_softc *)periph->softc; 766 767 softc->ccbs_alloced++; 768 769 start_ccb->ccb_h.ccb_type = CTLFE_CCB_DEFAULT; 770 771 ccb_h = TAILQ_FIRST(&softc->work_queue); 772 if (ccb_h == NULL) { 773 softc->ccbs_freed++; 774 xpt_release_ccb(start_ccb); 775 } else { 776 struct ccb_accept_tio *atio; 777 struct ccb_scsiio *csio; 778 uint8_t *data_ptr; 779 uint32_t dxfer_len; 780 ccb_flags flags; 781 union ctl_io *io; 782 uint8_t scsi_status; 783 784 /* Take the ATIO off the work queue */ 785 TAILQ_REMOVE(&softc->work_queue, ccb_h, periph_links.tqe); 786 atio = (struct ccb_accept_tio *)ccb_h; 787 io = (union ctl_io *)ccb_h->io_ptr; 788 csio = &start_ccb->csio; 789 790 flags = atio->ccb_h.flags & 791 (CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK); 792 793 if ((io == NULL) 794 || (io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) { 795 /* 796 * We're done, send status back. 797 */ 798 flags |= CAM_SEND_STATUS; 799 if (io == NULL) { 800 scsi_status = SCSI_STATUS_BUSY; 801 csio->sense_len = 0; 802 } else if ((io->io_hdr.flags & CTL_FLAG_ABORT) && 803 (io->io_hdr.flags & CTL_FLAG_ABORT_STATUS) == 0) { 804 io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED; 805 806 /* 807 * If this command was aborted, we don't 808 * need to send status back to the SIM. 809 * Just free the CTIO and ctl_io, and 810 * recycle the ATIO back to the SIM. 811 */ 812 xpt_print(periph->path, "%s: aborted " 813 "command 0x%04x discarded\n", 814 __func__, io->scsiio.tag_num); 815 ctl_free_io(io); 816 /* 817 * For a wildcard attachment, commands can 818 * come in with a specific target/lun. Reset 819 * the target and LUN fields back to the 820 * wildcard values before we send them back 821 * down to the SIM. The SIM has a wildcard 822 * LUN enabled, not whatever target/lun 823 * these happened to be. 824 */ 825 if (softc->flags & CTLFE_LUN_WILDCARD) { 826 atio->ccb_h.target_id = 827 CAM_TARGET_WILDCARD; 828 atio->ccb_h.target_lun = 829 CAM_LUN_WILDCARD; 830 } 831 832 if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) { 833 cam_release_devq(periph->path, 834 /*relsim_flags*/0, 835 /*reduction*/0, 836 /*timeout*/0, 837 /*getcount_only*/0); 838 atio->ccb_h.status &= ~CAM_DEV_QFRZN; 839 } 840 841 ccb_h = TAILQ_FIRST(&softc->work_queue); 842 843 if (atio->ccb_h.func_code != 844 XPT_ACCEPT_TARGET_IO) { 845 xpt_print(periph->path, "%s: func_code " 846 "is %#x\n", __func__, 847 atio->ccb_h.func_code); 848 } 849 start_ccb->ccb_h.func_code = XPT_ABORT; 850 start_ccb->cab.abort_ccb = (union ccb *)atio; 851 852 /* Tell the SIM that we've aborted this ATIO */ 853 xpt_action(start_ccb); 854 softc->ccbs_freed++; 855 xpt_release_ccb(start_ccb); 856 857 /* 858 * Send the ATIO back down to the SIM. 859 */ 860 xpt_action((union ccb *)atio); 861 softc->atios_sent++; 862 863 /* 864 * If we still have work to do, ask for 865 * another CCB. Otherwise, deactivate our 866 * callout. 867 */ 868 if (ccb_h != NULL) 869 xpt_schedule(periph, /*priority*/ 1); 870 else 871 callout_stop(&softc->dma_callout); 872 873 return; 874 } else { 875 io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED; 876 scsi_status = io->scsiio.scsi_status; 877 csio->sense_len = io->scsiio.sense_len; 878 } 879 data_ptr = NULL; 880 dxfer_len = 0; 881 if (io == NULL) { 882 printf("%s: tag %04x io is NULL\n", __func__, 883 atio->tag_id); 884 } else { 885 #ifdef CTLFEDEBUG 886 printf("%s: tag %04x status %x\n", __func__, 887 atio->tag_id, io->io_hdr.status); 888 #endif 889 } 890 csio->sglist_cnt = 0; 891 if (csio->sense_len != 0) { 892 csio->sense_data = io->scsiio.sense_data; 893 flags |= CAM_SEND_SENSE; 894 } else if (scsi_status == SCSI_STATUS_CHECK_COND) { 895 xpt_print(periph->path, "%s: check condition " 896 "with no sense\n", __func__); 897 } 898 } else { 899 struct ctlfe_lun_cmd_info *cmd_info; 900 901 /* 902 * Datamove call, we need to setup the S/G list. 903 */ 904 905 cmd_info = (struct ctlfe_lun_cmd_info *) 906 io->io_hdr.port_priv; 907 908 KASSERT(sizeof(*cmd_info) < CTL_PORT_PRIV_SIZE, 909 ("%s: sizeof(struct ctlfe_lun_cmd_info) %zd < " 910 "CTL_PORT_PRIV_SIZE %d", __func__, 911 sizeof(*cmd_info), CTL_PORT_PRIV_SIZE)); 912 io->io_hdr.flags &= ~CTL_FLAG_DMA_QUEUED; 913 914 /* 915 * Need to zero this, in case it has been used for 916 * a previous datamove for this particular I/O. 917 */ 918 bzero(cmd_info, sizeof(*cmd_info)); 919 scsi_status = 0; 920 921 csio->cdb_len = atio->cdb_len; 922 923 ctlfedata(softc, io, &flags, &data_ptr, &dxfer_len, 924 &csio->sglist_cnt); 925 926 io->scsiio.ext_data_filled += dxfer_len; 927 928 if (io->scsiio.ext_data_filled > 929 io->scsiio.kern_total_len) { 930 xpt_print(periph->path, "%s: tag 0x%04x " 931 "fill len %u > total %u\n", 932 __func__, io->scsiio.tag_num, 933 io->scsiio.ext_data_filled, 934 io->scsiio.kern_total_len); 935 } 936 } 937 938 #ifdef CTLFEDEBUG 939 printf("%s: %s: tag %04x flags %x ptr %p len %u\n", __func__, 940 (flags & CAM_SEND_STATUS) ? "done" : "datamove", 941 atio->tag_id, flags, data_ptr, dxfer_len); 942 #endif 943 944 /* 945 * Valid combinations: 946 * - CAM_SEND_STATUS, CAM_DATA_SG = 0, dxfer_len = 0, 947 * sglist_cnt = 0 948 * - CAM_SEND_STATUS = 0, CAM_DATA_SG = 0, dxfer_len != 0, 949 * sglist_cnt = 0 950 * - CAM_SEND_STATUS = 0, CAM_DATA_SG, dxfer_len != 0, 951 * sglist_cnt != 0 952 */ 953 #ifdef CTLFEDEBUG 954 if (((flags & CAM_SEND_STATUS) 955 && (((flags & CAM_DATA_SG) != 0) 956 || (dxfer_len != 0) 957 || (csio->sglist_cnt != 0))) 958 || (((flags & CAM_SEND_STATUS) == 0) 959 && (dxfer_len == 0)) 960 || ((flags & CAM_DATA_SG) 961 && (csio->sglist_cnt == 0)) 962 || (((flags & CAM_DATA_SG) == 0) 963 && (csio->sglist_cnt != 0))) { 964 printf("%s: tag %04x cdb %02x flags %#x dxfer_len " 965 "%d sg %u\n", __func__, atio->tag_id, 966 atio->cdb_io.cdb_bytes[0], flags, dxfer_len, 967 csio->sglist_cnt); 968 if (io != NULL) { 969 printf("%s: tag %04x io status %#x\n", __func__, 970 atio->tag_id, io->io_hdr.status); 971 } else { 972 printf("%s: tag %04x no associated io\n", 973 __func__, atio->tag_id); 974 } 975 } 976 #endif 977 cam_fill_ctio(csio, 978 /*retries*/ 2, 979 ctlfedone, 980 flags, 981 (flags & CAM_TAG_ACTION_VALID) ? 982 MSG_SIMPLE_Q_TAG : 0, 983 atio->tag_id, 984 atio->init_id, 985 scsi_status, 986 /*data_ptr*/ data_ptr, 987 /*dxfer_len*/ dxfer_len, 988 /*timeout*/ 5 * 1000); 989 start_ccb->ccb_h.flags |= CAM_UNLOCKED; 990 start_ccb->ccb_h.ccb_atio = atio; 991 if (((flags & CAM_SEND_STATUS) == 0) 992 && (io != NULL)) 993 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 994 995 softc->ctios_sent++; 996 997 cam_periph_unlock(periph); 998 xpt_action(start_ccb); 999 cam_periph_lock(periph); 1000 1001 if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) { 1002 cam_release_devq(periph->path, 1003 /*relsim_flags*/0, 1004 /*reduction*/0, 1005 /*timeout*/0, 1006 /*getcount_only*/0); 1007 atio->ccb_h.status &= ~CAM_DEV_QFRZN; 1008 } 1009 1010 ccb_h = TAILQ_FIRST(&softc->work_queue); 1011 } 1012 /* 1013 * If we still have work to do, ask for another CCB. Otherwise, 1014 * deactivate our callout. 1015 */ 1016 if (ccb_h != NULL) 1017 xpt_schedule(periph, /*priority*/ 1); 1018 else 1019 callout_stop(&softc->dma_callout); 1020 } 1021 1022 static void 1023 ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb) 1024 { 1025 struct ctlfe_lun_softc *softc; 1026 1027 softc = (struct ctlfe_lun_softc *)periph->softc; 1028 1029 switch (ccb->ccb_h.func_code) { 1030 case XPT_ACCEPT_TARGET_IO: 1031 softc->atios_returned++; 1032 break; 1033 case XPT_IMMEDIATE_NOTIFY: 1034 case XPT_NOTIFY_ACKNOWLEDGE: 1035 softc->inots_returned++; 1036 break; 1037 default: 1038 break; 1039 } 1040 1041 free(ccb, M_CTLFE); 1042 1043 KASSERT(softc->atios_returned <= softc->atios_sent, ("%s: " 1044 "atios_returned %ju > atios_sent %ju", __func__, 1045 softc->atios_returned, softc->atios_sent)); 1046 KASSERT(softc->inots_returned <= softc->inots_sent, ("%s: " 1047 "inots_returned %ju > inots_sent %ju", __func__, 1048 softc->inots_returned, softc->inots_sent)); 1049 1050 /* 1051 * If we have received all of our CCBs, we can release our 1052 * reference on the peripheral driver. It will probably go away 1053 * now. 1054 */ 1055 if ((softc->atios_returned == softc->atios_sent) 1056 && (softc->inots_returned == softc->inots_sent)) { 1057 cam_periph_release_locked(periph); 1058 } 1059 } 1060 1061 static int 1062 ctlfe_adjust_cdb(struct ccb_accept_tio *atio, uint32_t offset) 1063 { 1064 uint64_t lba; 1065 uint32_t num_blocks, nbc; 1066 uint8_t *cmdbyt = (atio->ccb_h.flags & CAM_CDB_POINTER)? 1067 atio->cdb_io.cdb_ptr : atio->cdb_io.cdb_bytes; 1068 1069 nbc = offset >> 9; /* ASSUMING 512 BYTE BLOCKS */ 1070 1071 switch (cmdbyt[0]) { 1072 case READ_6: 1073 case WRITE_6: 1074 { 1075 struct scsi_rw_6 *cdb = (struct scsi_rw_6 *)cmdbyt; 1076 lba = scsi_3btoul(cdb->addr); 1077 lba &= 0x1fffff; 1078 num_blocks = cdb->length; 1079 if (num_blocks == 0) 1080 num_blocks = 256; 1081 lba += nbc; 1082 num_blocks -= nbc; 1083 scsi_ulto3b(lba, cdb->addr); 1084 cdb->length = num_blocks; 1085 break; 1086 } 1087 case READ_10: 1088 case WRITE_10: 1089 { 1090 struct scsi_rw_10 *cdb = (struct scsi_rw_10 *)cmdbyt; 1091 lba = scsi_4btoul(cdb->addr); 1092 num_blocks = scsi_2btoul(cdb->length); 1093 lba += nbc; 1094 num_blocks -= nbc; 1095 scsi_ulto4b(lba, cdb->addr); 1096 scsi_ulto2b(num_blocks, cdb->length); 1097 break; 1098 } 1099 case READ_12: 1100 case WRITE_12: 1101 { 1102 struct scsi_rw_12 *cdb = (struct scsi_rw_12 *)cmdbyt; 1103 lba = scsi_4btoul(cdb->addr); 1104 num_blocks = scsi_4btoul(cdb->length); 1105 lba += nbc; 1106 num_blocks -= nbc; 1107 scsi_ulto4b(lba, cdb->addr); 1108 scsi_ulto4b(num_blocks, cdb->length); 1109 break; 1110 } 1111 case READ_16: 1112 case WRITE_16: 1113 case WRITE_ATOMIC_16: 1114 { 1115 struct scsi_rw_16 *cdb = (struct scsi_rw_16 *)cmdbyt; 1116 lba = scsi_8btou64(cdb->addr); 1117 num_blocks = scsi_4btoul(cdb->length); 1118 lba += nbc; 1119 num_blocks -= nbc; 1120 scsi_u64to8b(lba, cdb->addr); 1121 scsi_ulto4b(num_blocks, cdb->length); 1122 break; 1123 } 1124 default: 1125 return -1; 1126 } 1127 return (0); 1128 } 1129 1130 static void 1131 ctlfedone(struct cam_periph *periph, union ccb *done_ccb) 1132 { 1133 struct ctlfe_lun_softc *softc; 1134 struct ctlfe_softc *bus_softc; 1135 struct ccb_accept_tio *atio = NULL; 1136 union ctl_io *io = NULL; 1137 struct mtx *mtx; 1138 1139 KASSERT((done_ccb->ccb_h.flags & CAM_UNLOCKED) != 0, 1140 ("CCB in ctlfedone() without CAM_UNLOCKED flag")); 1141 #ifdef CTLFE_DEBUG 1142 printf("%s: entered, func_code = %#x, type = %#lx\n", __func__, 1143 done_ccb->ccb_h.func_code, done_ccb->ccb_h.ccb_type); 1144 #endif 1145 1146 softc = (struct ctlfe_lun_softc *)periph->softc; 1147 bus_softc = softc->parent_softc; 1148 mtx = cam_periph_mtx(periph); 1149 mtx_lock(mtx); 1150 1151 /* 1152 * If the peripheral is invalid, ATIOs and immediate notify CCBs 1153 * need to be freed. Most of the ATIOs and INOTs that come back 1154 * will be CCBs that are being returned from the SIM as a result of 1155 * our disabling the LUN. 1156 * 1157 * Other CCB types are handled in their respective cases below. 1158 */ 1159 if (periph->flags & CAM_PERIPH_INVALID) { 1160 switch (done_ccb->ccb_h.func_code) { 1161 case XPT_ACCEPT_TARGET_IO: 1162 case XPT_IMMEDIATE_NOTIFY: 1163 case XPT_NOTIFY_ACKNOWLEDGE: 1164 ctlfe_free_ccb(periph, done_ccb); 1165 goto out; 1166 default: 1167 break; 1168 } 1169 1170 } 1171 switch (done_ccb->ccb_h.func_code) { 1172 case XPT_ACCEPT_TARGET_IO: { 1173 1174 atio = &done_ccb->atio; 1175 1176 softc->atios_returned++; 1177 1178 resubmit: 1179 /* 1180 * Allocate a ctl_io, pass it to CTL, and wait for the 1181 * datamove or done. 1182 */ 1183 io = ctl_alloc_io(bus_softc->port.ctl_pool_ref); 1184 if (io == NULL) { 1185 atio->ccb_h.flags &= ~CAM_DIR_MASK; 1186 atio->ccb_h.flags |= CAM_DIR_NONE; 1187 1188 printf("%s: ctl_alloc_io failed!\n", __func__); 1189 1190 /* 1191 * XXX KDM need to set SCSI_STATUS_BUSY, but there 1192 * is no field in the ATIO structure to do that, 1193 * and we aren't able to allocate a ctl_io here. 1194 * What to do? 1195 */ 1196 atio->sense_len = 0; 1197 done_ccb->ccb_h.io_ptr = NULL; 1198 TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h, 1199 periph_links.tqe); 1200 xpt_schedule(periph, /*priority*/ 1); 1201 break; 1202 } 1203 mtx_unlock(mtx); 1204 ctl_zero_io(io); 1205 1206 /* Save pointers on both sides */ 1207 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = done_ccb; 1208 done_ccb->ccb_h.io_ptr = io; 1209 1210 /* 1211 * Only SCSI I/O comes down this path, resets, etc. come 1212 * down the immediate notify path below. 1213 */ 1214 io->io_hdr.io_type = CTL_IO_SCSI; 1215 io->io_hdr.nexus.initid.id = atio->init_id; 1216 io->io_hdr.nexus.targ_port = bus_softc->port.targ_port; 1217 io->io_hdr.nexus.targ_target.id = atio->ccb_h.target_id; 1218 io->io_hdr.nexus.targ_lun = atio->ccb_h.target_lun; 1219 io->scsiio.tag_num = atio->tag_id; 1220 switch (atio->tag_action) { 1221 case CAM_TAG_ACTION_NONE: 1222 io->scsiio.tag_type = CTL_TAG_UNTAGGED; 1223 break; 1224 case MSG_SIMPLE_TASK: 1225 io->scsiio.tag_type = CTL_TAG_SIMPLE; 1226 break; 1227 case MSG_HEAD_OF_QUEUE_TASK: 1228 io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE; 1229 break; 1230 case MSG_ORDERED_TASK: 1231 io->scsiio.tag_type = CTL_TAG_ORDERED; 1232 break; 1233 case MSG_ACA_TASK: 1234 io->scsiio.tag_type = CTL_TAG_ACA; 1235 break; 1236 default: 1237 io->scsiio.tag_type = CTL_TAG_UNTAGGED; 1238 printf("%s: unhandled tag type %#x!!\n", __func__, 1239 atio->tag_action); 1240 break; 1241 } 1242 if (atio->cdb_len > sizeof(io->scsiio.cdb)) { 1243 printf("%s: WARNING: CDB len %d > ctl_io space %zd\n", 1244 __func__, atio->cdb_len, sizeof(io->scsiio.cdb)); 1245 } 1246 io->scsiio.cdb_len = min(atio->cdb_len, sizeof(io->scsiio.cdb)); 1247 bcopy(atio->cdb_io.cdb_bytes, io->scsiio.cdb, 1248 io->scsiio.cdb_len); 1249 1250 #ifdef CTLFEDEBUG 1251 printf("%s: %ju:%d:%ju:%d: tag %04x CDB %02x\n", __func__, 1252 (uintmax_t)io->io_hdr.nexus.initid.id, 1253 io->io_hdr.nexus.targ_port, 1254 (uintmax_t)io->io_hdr.nexus.targ_target.id, 1255 io->io_hdr.nexus.targ_lun, 1256 io->scsiio.tag_num, io->scsiio.cdb[0]); 1257 #endif 1258 1259 ctl_queue(io); 1260 return; 1261 } 1262 case XPT_CONT_TARGET_IO: { 1263 int srr = 0; 1264 uint32_t srr_off = 0; 1265 1266 atio = (struct ccb_accept_tio *)done_ccb->ccb_h.ccb_atio; 1267 io = (union ctl_io *)atio->ccb_h.io_ptr; 1268 1269 softc->ctios_returned++; 1270 #ifdef CTLFEDEBUG 1271 printf("%s: got XPT_CONT_TARGET_IO tag %#x flags %#x\n", 1272 __func__, atio->tag_id, done_ccb->ccb_h.flags); 1273 #endif 1274 /* 1275 * Handle SRR case were the data pointer is pushed back hack 1276 */ 1277 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_MESSAGE_RECV 1278 && done_ccb->csio.msg_ptr != NULL 1279 && done_ccb->csio.msg_ptr[0] == MSG_EXTENDED 1280 && done_ccb->csio.msg_ptr[1] == 5 1281 && done_ccb->csio.msg_ptr[2] == 0) { 1282 srr = 1; 1283 srr_off = 1284 (done_ccb->csio.msg_ptr[3] << 24) 1285 | (done_ccb->csio.msg_ptr[4] << 16) 1286 | (done_ccb->csio.msg_ptr[5] << 8) 1287 | (done_ccb->csio.msg_ptr[6]); 1288 } 1289 1290 if (srr && (done_ccb->ccb_h.flags & CAM_SEND_STATUS)) { 1291 /* 1292 * If status was being sent, the back end data is now 1293 * history. Hack it up and resubmit a new command with 1294 * the CDB adjusted. If the SIM does the right thing, 1295 * all of the resid math should work. 1296 */ 1297 softc->ccbs_freed++; 1298 xpt_release_ccb(done_ccb); 1299 ctl_free_io(io); 1300 if (ctlfe_adjust_cdb(atio, srr_off) == 0) { 1301 done_ccb = (union ccb *)atio; 1302 goto resubmit; 1303 } 1304 /* 1305 * Fall through to doom.... 1306 */ 1307 } else if (srr) { 1308 /* 1309 * If we have an srr and we're still sending data, we 1310 * should be able to adjust offsets and cycle again. 1311 */ 1312 io->scsiio.kern_rel_offset = 1313 io->scsiio.ext_data_filled = srr_off; 1314 io->scsiio.ext_data_len = io->scsiio.kern_total_len - 1315 io->scsiio.kern_rel_offset; 1316 softc->ccbs_freed++; 1317 io->scsiio.io_hdr.status = CTL_STATUS_NONE; 1318 xpt_release_ccb(done_ccb); 1319 TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h, 1320 periph_links.tqe); 1321 xpt_schedule(periph, /*priority*/ 1); 1322 break; 1323 } 1324 1325 /* 1326 * If we were sending status back to the initiator, free up 1327 * resources. If we were doing a datamove, call the 1328 * datamove done routine. 1329 */ 1330 if (done_ccb->ccb_h.flags & CAM_SEND_STATUS) { 1331 softc->ccbs_freed++; 1332 xpt_release_ccb(done_ccb); 1333 ctl_free_io(io); 1334 /* 1335 * For a wildcard attachment, commands can come in 1336 * with a specific target/lun. Reset the target 1337 * and LUN fields back to the wildcard values before 1338 * we send them back down to the SIM. The SIM has 1339 * a wildcard LUN enabled, not whatever target/lun 1340 * these happened to be. 1341 */ 1342 if (softc->flags & CTLFE_LUN_WILDCARD) { 1343 atio->ccb_h.target_id = CAM_TARGET_WILDCARD; 1344 atio->ccb_h.target_lun = CAM_LUN_WILDCARD; 1345 } 1346 if (periph->flags & CAM_PERIPH_INVALID) { 1347 ctlfe_free_ccb(periph, (union ccb *)atio); 1348 } else { 1349 softc->atios_sent++; 1350 mtx_unlock(mtx); 1351 xpt_action((union ccb *)atio); 1352 return; 1353 } 1354 } else { 1355 struct ctlfe_lun_cmd_info *cmd_info; 1356 struct ccb_scsiio *csio; 1357 1358 csio = &done_ccb->csio; 1359 cmd_info = (struct ctlfe_lun_cmd_info *) 1360 io->io_hdr.port_priv; 1361 1362 io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; 1363 1364 io->scsiio.ext_data_len += csio->dxfer_len; 1365 if (io->scsiio.ext_data_len > 1366 io->scsiio.kern_total_len) { 1367 xpt_print(periph->path, "%s: tag 0x%04x " 1368 "done len %u > total %u sent %u\n", 1369 __func__, io->scsiio.tag_num, 1370 io->scsiio.ext_data_len, 1371 io->scsiio.kern_total_len, 1372 io->scsiio.ext_data_filled); 1373 } 1374 /* 1375 * Translate CAM status to CTL status. Success 1376 * does not change the overall, ctl_io status. In 1377 * that case we just set port_status to 0. If we 1378 * have a failure, though, set a data phase error 1379 * for the overall ctl_io. 1380 */ 1381 switch (done_ccb->ccb_h.status & CAM_STATUS_MASK) { 1382 case CAM_REQ_CMP: 1383 io->io_hdr.port_status = 0; 1384 break; 1385 default: 1386 /* 1387 * XXX KDM we probably need to figure out a 1388 * standard set of errors that the SIM 1389 * drivers should return in the event of a 1390 * data transfer failure. A data phase 1391 * error will at least point the user to a 1392 * data transfer error of some sort. 1393 * Hopefully the SIM printed out some 1394 * additional information to give the user 1395 * a clue what happened. 1396 */ 1397 io->io_hdr.port_status = 0xbad1; 1398 ctl_set_data_phase_error(&io->scsiio); 1399 /* 1400 * XXX KDM figure out residual. 1401 */ 1402 break; 1403 } 1404 /* 1405 * If we had to break this S/G list into multiple 1406 * pieces, figure out where we are in the list, and 1407 * continue sending pieces if necessary. 1408 */ 1409 if ((cmd_info->flags & CTLFE_CMD_PIECEWISE) 1410 && (io->io_hdr.port_status == 0)) { 1411 ccb_flags flags; 1412 uint8_t scsi_status; 1413 uint8_t *data_ptr; 1414 uint32_t dxfer_len; 1415 1416 flags = atio->ccb_h.flags & 1417 (CAM_DIS_DISCONNECT| 1418 CAM_TAG_ACTION_VALID); 1419 1420 ctlfedata(softc, io, &flags, &data_ptr, 1421 &dxfer_len, &csio->sglist_cnt); 1422 1423 scsi_status = 0; 1424 1425 if (((flags & CAM_SEND_STATUS) == 0) 1426 && (dxfer_len == 0)) { 1427 printf("%s: tag %04x no status or " 1428 "len cdb = %02x\n", __func__, 1429 atio->tag_id, 1430 atio->cdb_io.cdb_bytes[0]); 1431 printf("%s: tag %04x io status %#x\n", 1432 __func__, atio->tag_id, 1433 io->io_hdr.status); 1434 } 1435 1436 cam_fill_ctio(csio, 1437 /*retries*/ 2, 1438 ctlfedone, 1439 flags, 1440 (flags & CAM_TAG_ACTION_VALID) ? 1441 MSG_SIMPLE_Q_TAG : 0, 1442 atio->tag_id, 1443 atio->init_id, 1444 scsi_status, 1445 /*data_ptr*/ data_ptr, 1446 /*dxfer_len*/ dxfer_len, 1447 /*timeout*/ 5 * 1000); 1448 1449 csio->ccb_h.flags |= CAM_UNLOCKED; 1450 csio->resid = 0; 1451 csio->ccb_h.ccb_atio = atio; 1452 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 1453 softc->ctios_sent++; 1454 mtx_unlock(mtx); 1455 xpt_action((union ccb *)csio); 1456 } else { 1457 /* 1458 * Release the CTIO. The ATIO will be sent back 1459 * down to the SIM once we send status. 1460 */ 1461 softc->ccbs_freed++; 1462 xpt_release_ccb(done_ccb); 1463 mtx_unlock(mtx); 1464 1465 /* Call the backend move done callback */ 1466 io->scsiio.be_move_done(io); 1467 } 1468 return; 1469 } 1470 break; 1471 } 1472 case XPT_IMMEDIATE_NOTIFY: { 1473 union ctl_io *io; 1474 struct ccb_immediate_notify *inot; 1475 cam_status status; 1476 int frozen; 1477 1478 inot = &done_ccb->cin1; 1479 1480 softc->inots_returned++; 1481 1482 frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; 1483 1484 printf("%s: got XPT_IMMEDIATE_NOTIFY status %#x tag %#x " 1485 "seq %#x\n", __func__, inot->ccb_h.status, 1486 inot->tag_id, inot->seq_id); 1487 1488 io = ctl_alloc_io(bus_softc->port.ctl_pool_ref); 1489 if (io != NULL) { 1490 int send_ctl_io; 1491 1492 send_ctl_io = 1; 1493 1494 ctl_zero_io(io); 1495 io->io_hdr.io_type = CTL_IO_TASK; 1496 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr =done_ccb; 1497 inot->ccb_h.io_ptr = io; 1498 io->io_hdr.nexus.initid.id = inot->initiator_id; 1499 io->io_hdr.nexus.targ_port = bus_softc->port.targ_port; 1500 io->io_hdr.nexus.targ_target.id = inot->ccb_h.target_id; 1501 io->io_hdr.nexus.targ_lun = inot->ccb_h.target_lun; 1502 /* XXX KDM should this be the tag_id? */ 1503 io->taskio.tag_num = inot->seq_id; 1504 1505 status = inot->ccb_h.status & CAM_STATUS_MASK; 1506 switch (status) { 1507 case CAM_SCSI_BUS_RESET: 1508 io->taskio.task_action = CTL_TASK_BUS_RESET; 1509 break; 1510 case CAM_BDR_SENT: 1511 io->taskio.task_action = CTL_TASK_TARGET_RESET; 1512 break; 1513 case CAM_MESSAGE_RECV: 1514 switch (inot->arg) { 1515 case MSG_ABORT_TASK_SET: 1516 /* 1517 * XXX KDM this isn't currently 1518 * supported by CTL. It ends up 1519 * being a no-op. 1520 */ 1521 io->taskio.task_action = 1522 CTL_TASK_ABORT_TASK_SET; 1523 break; 1524 case MSG_TARGET_RESET: 1525 io->taskio.task_action = 1526 CTL_TASK_TARGET_RESET; 1527 break; 1528 case MSG_ABORT_TASK: 1529 io->taskio.task_action = 1530 CTL_TASK_ABORT_TASK; 1531 break; 1532 case MSG_LOGICAL_UNIT_RESET: 1533 io->taskio.task_action = 1534 CTL_TASK_LUN_RESET; 1535 break; 1536 case MSG_CLEAR_TASK_SET: 1537 /* 1538 * XXX KDM this isn't currently 1539 * supported by CTL. It ends up 1540 * being a no-op. 1541 */ 1542 io->taskio.task_action = 1543 CTL_TASK_CLEAR_TASK_SET; 1544 break; 1545 case MSG_CLEAR_ACA: 1546 io->taskio.task_action = 1547 CTL_TASK_CLEAR_ACA; 1548 break; 1549 case MSG_NOOP: 1550 send_ctl_io = 0; 1551 break; 1552 default: 1553 xpt_print(periph->path, "%s: " 1554 "unsupported message 0x%x\n", 1555 __func__, inot->arg); 1556 send_ctl_io = 0; 1557 break; 1558 } 1559 break; 1560 case CAM_REQ_ABORTED: 1561 /* 1562 * This request was sent back by the driver. 1563 * XXX KDM what do we do here? 1564 */ 1565 send_ctl_io = 0; 1566 break; 1567 case CAM_REQ_INVALID: 1568 case CAM_PROVIDE_FAIL: 1569 default: 1570 /* 1571 * We should only get here if we're talking 1572 * to a talking to a SIM that is target 1573 * capable but supports the old API. In 1574 * that case, we need to just free the CCB. 1575 * If we actually send a notify acknowledge, 1576 * it will send that back with an error as 1577 * well. 1578 */ 1579 1580 if ((status != CAM_REQ_INVALID) 1581 && (status != CAM_PROVIDE_FAIL)) 1582 xpt_print(periph->path, "%s: " 1583 "unsupported CAM status " 1584 "0x%x\n", __func__, status); 1585 1586 ctl_free_io(io); 1587 ctlfe_free_ccb(periph, done_ccb); 1588 1589 goto out; 1590 } 1591 if (send_ctl_io != 0) { 1592 ctl_queue(io); 1593 } else { 1594 ctl_free_io(io); 1595 done_ccb->ccb_h.status = CAM_REQ_INPROG; 1596 done_ccb->ccb_h.func_code = 1597 XPT_NOTIFY_ACKNOWLEDGE; 1598 xpt_action(done_ccb); 1599 } 1600 } else { 1601 xpt_print(periph->path, "%s: could not allocate " 1602 "ctl_io for immediate notify!\n", __func__); 1603 /* requeue this to the adapter */ 1604 done_ccb->ccb_h.status = CAM_REQ_INPROG; 1605 done_ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; 1606 xpt_action(done_ccb); 1607 } 1608 1609 if (frozen != 0) { 1610 cam_release_devq(periph->path, 1611 /*relsim_flags*/ 0, 1612 /*opening reduction*/ 0, 1613 /*timeout*/ 0, 1614 /*getcount_only*/ 0); 1615 } 1616 break; 1617 } 1618 case XPT_NOTIFY_ACKNOWLEDGE: 1619 /* 1620 * Queue this back down to the SIM as an immediate notify. 1621 */ 1622 done_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; 1623 xpt_action(done_ccb); 1624 softc->inots_sent++; 1625 break; 1626 case XPT_SET_SIM_KNOB: 1627 case XPT_GET_SIM_KNOB: 1628 break; 1629 default: 1630 panic("%s: unexpected CCB type %#x", __func__, 1631 done_ccb->ccb_h.func_code); 1632 break; 1633 } 1634 1635 out: 1636 mtx_unlock(mtx); 1637 } 1638 1639 static void 1640 ctlfe_onoffline(void *arg, int online) 1641 { 1642 struct ctlfe_softc *bus_softc; 1643 union ccb *ccb; 1644 cam_status status; 1645 struct cam_path *path; 1646 int set_wwnn; 1647 1648 bus_softc = (struct ctlfe_softc *)arg; 1649 1650 set_wwnn = 0; 1651 1652 status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, 1653 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 1654 if (status != CAM_REQ_CMP) { 1655 printf("%s: unable to create path!\n", __func__); 1656 return; 1657 } 1658 ccb = (union ccb *)malloc(sizeof(*ccb), M_TEMP, M_NOWAIT | M_ZERO); 1659 if (ccb == NULL) { 1660 printf("%s: unable to malloc CCB!\n", __func__); 1661 xpt_free_path(path); 1662 return; 1663 } 1664 xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE); 1665 1666 /* 1667 * Copan WWN format: 1668 * 1669 * Bits 63-60: 0x5 NAA, IEEE registered name 1670 * Bits 59-36: 0x000ED5 IEEE Company name assigned to Copan 1671 * Bits 35-12: Copan SSN (Sequential Serial Number) 1672 * Bits 11-8: Type of port: 1673 * 1 == N-Port 1674 * 2 == F-Port 1675 * 3 == NL-Port 1676 * Bits 7-0: 0 == Node Name, >0 == Port Number 1677 */ 1678 1679 if (online != 0) { 1680 1681 ccb->ccb_h.func_code = XPT_GET_SIM_KNOB; 1682 1683 1684 xpt_action(ccb); 1685 1686 1687 if ((ccb->knob.xport_specific.valid & KNOB_VALID_ADDRESS) != 0){ 1688 #ifdef RANDOM_WWNN 1689 uint64_t random_bits; 1690 #endif 1691 1692 printf("%s: %s current WWNN %#jx\n", __func__, 1693 bus_softc->port_name, 1694 ccb->knob.xport_specific.fc.wwnn); 1695 printf("%s: %s current WWPN %#jx\n", __func__, 1696 bus_softc->port_name, 1697 ccb->knob.xport_specific.fc.wwpn); 1698 1699 #ifdef RANDOM_WWNN 1700 arc4rand(&random_bits, sizeof(random_bits), 0); 1701 #endif 1702 1703 /* 1704 * XXX KDM this is a bit of a kludge for now. We 1705 * take the current WWNN/WWPN from the card, and 1706 * replace the company identifier and the NL-Port 1707 * indicator and the port number (for the WWPN). 1708 * This should be replaced later with ddb_GetWWNN, 1709 * or possibly a more centralized scheme. (It 1710 * would be nice to have the WWNN/WWPN for each 1711 * port stored in the ctl_port structure.) 1712 */ 1713 #ifdef RANDOM_WWNN 1714 ccb->knob.xport_specific.fc.wwnn = 1715 (random_bits & 1716 0x0000000fffffff00ULL) | 1717 /* Company ID */ 0x5000ED5000000000ULL | 1718 /* NL-Port */ 0x0300; 1719 ccb->knob.xport_specific.fc.wwpn = 1720 (random_bits & 1721 0x0000000fffffff00ULL) | 1722 /* Company ID */ 0x5000ED5000000000ULL | 1723 /* NL-Port */ 0x3000 | 1724 /* Port Num */ (bus_softc->port.targ_port & 0xff); 1725 1726 /* 1727 * This is a bit of an API break/reversal, but if 1728 * we're doing the random WWNN that's a little 1729 * different anyway. So record what we're actually 1730 * using with the frontend code so it's reported 1731 * accurately. 1732 */ 1733 ctl_port_set_wwns(&bus_softc->port, 1734 true, ccb->knob.xport_specific.fc.wwnn, 1735 true, ccb->knob.xport_specific.fc.wwpn); 1736 set_wwnn = 1; 1737 #else /* RANDOM_WWNN */ 1738 /* 1739 * If the user has specified a WWNN/WWPN, send them 1740 * down to the SIM. Otherwise, record what the SIM 1741 * has reported. 1742 */ 1743 if ((bus_softc->port.wwnn != 0) 1744 && (bus_softc->port.wwpn != 0)) { 1745 ccb->knob.xport_specific.fc.wwnn = 1746 bus_softc->port.wwnn; 1747 ccb->knob.xport_specific.fc.wwpn = 1748 bus_softc->port.wwpn; 1749 set_wwnn = 1; 1750 } else { 1751 ctl_port_set_wwns(&bus_softc->port, 1752 true, ccb->knob.xport_specific.fc.wwnn, 1753 true, ccb->knob.xport_specific.fc.wwpn); 1754 } 1755 #endif /* RANDOM_WWNN */ 1756 1757 1758 if (set_wwnn != 0) { 1759 printf("%s: %s new WWNN %#jx\n", __func__, 1760 bus_softc->port_name, 1761 ccb->knob.xport_specific.fc.wwnn); 1762 printf("%s: %s new WWPN %#jx\n", __func__, 1763 bus_softc->port_name, 1764 ccb->knob.xport_specific.fc.wwpn); 1765 } 1766 } else { 1767 printf("%s: %s has no valid WWNN/WWPN\n", __func__, 1768 bus_softc->port_name); 1769 } 1770 } 1771 ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; 1772 ccb->knob.xport_specific.valid = KNOB_VALID_ROLE; 1773 if (set_wwnn != 0) 1774 ccb->knob.xport_specific.valid |= KNOB_VALID_ADDRESS; 1775 1776 if (online != 0) 1777 ccb->knob.xport_specific.fc.role = KNOB_ROLE_TARGET; 1778 else 1779 ccb->knob.xport_specific.fc.role = KNOB_ROLE_NONE; 1780 1781 xpt_action(ccb); 1782 1783 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1784 printf("%s: SIM %s (path id %d) target %s failed with " 1785 "status %#x\n", 1786 __func__, bus_softc->port_name, bus_softc->path_id, 1787 (online != 0) ? "enable" : "disable", 1788 ccb->ccb_h.status); 1789 } else { 1790 printf("%s: SIM %s (path id %d) target %s succeeded\n", 1791 __func__, bus_softc->port_name, bus_softc->path_id, 1792 (online != 0) ? "enable" : "disable"); 1793 } 1794 1795 xpt_free_path(path); 1796 1797 free(ccb, M_TEMP); 1798 1799 return; 1800 } 1801 1802 static void 1803 ctlfe_online(void *arg) 1804 { 1805 struct ctlfe_softc *bus_softc; 1806 struct cam_path *path; 1807 cam_status status; 1808 struct ctlfe_lun_softc *lun_softc; 1809 1810 bus_softc = (struct ctlfe_softc *)arg; 1811 1812 /* 1813 * Create the wildcard LUN before bringing the port online. 1814 */ 1815 status = xpt_create_path(&path, /*periph*/ NULL, 1816 bus_softc->path_id, CAM_TARGET_WILDCARD, 1817 CAM_LUN_WILDCARD); 1818 if (status != CAM_REQ_CMP) { 1819 printf("%s: unable to create path for wildcard periph\n", 1820 __func__); 1821 return; 1822 } 1823 1824 lun_softc = malloc(sizeof(*lun_softc), M_CTLFE, 1825 M_NOWAIT | M_ZERO); 1826 if (lun_softc == NULL) { 1827 xpt_print(path, "%s: unable to allocate softc for " 1828 "wildcard periph\n", __func__); 1829 xpt_free_path(path); 1830 return; 1831 } 1832 1833 xpt_path_lock(path); 1834 lun_softc->parent_softc = bus_softc; 1835 lun_softc->flags |= CTLFE_LUN_WILDCARD; 1836 1837 mtx_lock(&bus_softc->lun_softc_mtx); 1838 STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, lun_softc, links); 1839 mtx_unlock(&bus_softc->lun_softc_mtx); 1840 1841 status = cam_periph_alloc(ctlferegister, 1842 ctlfeoninvalidate, 1843 ctlfecleanup, 1844 ctlfestart, 1845 "ctl", 1846 CAM_PERIPH_BIO, 1847 path, 1848 ctlfeasync, 1849 0, 1850 lun_softc); 1851 1852 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1853 const struct cam_status_entry *entry; 1854 1855 entry = cam_fetch_status_entry(status); 1856 1857 printf("%s: CAM error %s (%#x) returned from " 1858 "cam_periph_alloc()\n", __func__, (entry != NULL) ? 1859 entry->status_text : "Unknown", status); 1860 } 1861 1862 ctlfe_onoffline(arg, /*online*/ 1); 1863 1864 xpt_path_unlock(path); 1865 xpt_free_path(path); 1866 } 1867 1868 static void 1869 ctlfe_offline(void *arg) 1870 { 1871 struct ctlfe_softc *bus_softc; 1872 struct cam_path *path; 1873 cam_status status; 1874 struct cam_periph *periph; 1875 1876 bus_softc = (struct ctlfe_softc *)arg; 1877 1878 /* 1879 * Disable the wildcard LUN for this port now that we have taken 1880 * the port offline. 1881 */ 1882 status = xpt_create_path(&path, /*periph*/ NULL, 1883 bus_softc->path_id, CAM_TARGET_WILDCARD, 1884 CAM_LUN_WILDCARD); 1885 if (status != CAM_REQ_CMP) { 1886 printf("%s: unable to create path for wildcard periph\n", 1887 __func__); 1888 return; 1889 } 1890 1891 xpt_path_lock(path); 1892 1893 ctlfe_onoffline(arg, /*online*/ 0); 1894 1895 if ((periph = cam_periph_find(path, "ctl")) != NULL) 1896 cam_periph_invalidate(periph); 1897 1898 xpt_path_unlock(path); 1899 xpt_free_path(path); 1900 } 1901 1902 /* 1903 * This will get called to enable a LUN on every bus that is attached to 1904 * CTL. So we only need to create a path/periph for this particular bus. 1905 */ 1906 static int 1907 ctlfe_lun_enable(void *arg, struct ctl_id targ_id, int lun_id) 1908 { 1909 struct ctlfe_softc *bus_softc; 1910 struct ctlfe_lun_softc *softc; 1911 struct cam_path *path; 1912 struct cam_periph *periph; 1913 cam_status status; 1914 1915 bus_softc = (struct ctlfe_softc *)arg; 1916 1917 status = xpt_create_path(&path, /*periph*/ NULL, 1918 bus_softc->path_id, 1919 targ_id.id, lun_id); 1920 /* XXX KDM need some way to return status to CTL here? */ 1921 if (status != CAM_REQ_CMP) { 1922 printf("%s: could not create path, status %#x\n", __func__, 1923 status); 1924 return (1); 1925 } 1926 1927 softc = malloc(sizeof(*softc), M_CTLFE, M_WAITOK | M_ZERO); 1928 xpt_path_lock(path); 1929 periph = cam_periph_find(path, "ctl"); 1930 if (periph != NULL) { 1931 /* We've already got a periph, no need to alloc a new one. */ 1932 xpt_path_unlock(path); 1933 xpt_free_path(path); 1934 free(softc, M_CTLFE); 1935 return (0); 1936 } 1937 1938 softc->parent_softc = bus_softc; 1939 mtx_lock(&bus_softc->lun_softc_mtx); 1940 STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, softc, links); 1941 mtx_unlock(&bus_softc->lun_softc_mtx); 1942 1943 status = cam_periph_alloc(ctlferegister, 1944 ctlfeoninvalidate, 1945 ctlfecleanup, 1946 ctlfestart, 1947 "ctl", 1948 CAM_PERIPH_BIO, 1949 path, 1950 ctlfeasync, 1951 0, 1952 softc); 1953 1954 xpt_path_unlock(path); 1955 xpt_free_path(path); 1956 return (0); 1957 } 1958 1959 /* 1960 * This will get called when the user removes a LUN to disable that LUN 1961 * on every bus that is attached to CTL. 1962 */ 1963 static int 1964 ctlfe_lun_disable(void *arg, struct ctl_id targ_id, int lun_id) 1965 { 1966 struct ctlfe_softc *softc; 1967 struct ctlfe_lun_softc *lun_softc; 1968 1969 softc = (struct ctlfe_softc *)arg; 1970 1971 mtx_lock(&softc->lun_softc_mtx); 1972 STAILQ_FOREACH(lun_softc, &softc->lun_softc_list, links) { 1973 struct cam_path *path; 1974 1975 path = lun_softc->periph->path; 1976 1977 if ((xpt_path_target_id(path) == targ_id.id) 1978 && (xpt_path_lun_id(path) == lun_id)) { 1979 break; 1980 } 1981 } 1982 if (lun_softc == NULL) { 1983 mtx_unlock(&softc->lun_softc_mtx); 1984 printf("%s: can't find target %d lun %d\n", __func__, 1985 targ_id.id, lun_id); 1986 return (1); 1987 } 1988 cam_periph_acquire(lun_softc->periph); 1989 mtx_unlock(&softc->lun_softc_mtx); 1990 1991 cam_periph_lock(lun_softc->periph); 1992 cam_periph_invalidate(lun_softc->periph); 1993 cam_periph_unlock(lun_softc->periph); 1994 cam_periph_release(lun_softc->periph); 1995 return (0); 1996 } 1997 1998 static void 1999 ctlfe_dump_sim(struct cam_sim *sim) 2000 { 2001 2002 printf("%s%d: max tagged openings: %d, max dev openings: %d\n", 2003 sim->sim_name, sim->unit_number, 2004 sim->max_tagged_dev_openings, sim->max_dev_openings); 2005 printf("\n"); 2006 } 2007 2008 /* 2009 * Assumes that the SIM lock is held. 2010 */ 2011 static void 2012 ctlfe_dump_queue(struct ctlfe_lun_softc *softc) 2013 { 2014 struct ccb_hdr *hdr; 2015 struct cam_periph *periph; 2016 int num_items; 2017 2018 periph = softc->periph; 2019 num_items = 0; 2020 2021 TAILQ_FOREACH(hdr, &softc->work_queue, periph_links.tqe) { 2022 union ctl_io *io; 2023 2024 io = hdr->io_ptr; 2025 2026 num_items++; 2027 2028 /* 2029 * This can happen when we get an ATIO but can't allocate 2030 * a ctl_io. See the XPT_ACCEPT_TARGET_IO case in ctlfedone(). 2031 */ 2032 if (io == NULL) { 2033 struct ccb_scsiio *csio; 2034 2035 csio = (struct ccb_scsiio *)hdr; 2036 2037 xpt_print(periph->path, "CCB %#x ctl_io allocation " 2038 "failed\n", csio->tag_id); 2039 continue; 2040 } 2041 2042 /* 2043 * Only regular SCSI I/O is put on the work 2044 * queue, so we can print sense here. There may be no 2045 * sense if it's no the queue for a DMA, but this serves to 2046 * print out the CCB as well. 2047 * 2048 * XXX KDM switch this over to scsi_sense_print() when 2049 * CTL is merged in with CAM. 2050 */ 2051 ctl_io_error_print(io, NULL); 2052 2053 /* 2054 * We're sending status back to the 2055 * initiator, so we're on the queue waiting 2056 * for a CTIO to do that. 2057 */ 2058 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) 2059 continue; 2060 2061 /* 2062 * Otherwise, we're on the queue waiting to 2063 * do a data transfer. 2064 */ 2065 xpt_print(periph->path, "Total %u, Current %u, Resid %u\n", 2066 io->scsiio.kern_total_len, io->scsiio.kern_data_len, 2067 io->scsiio.kern_data_resid); 2068 } 2069 2070 xpt_print(periph->path, "%d requests total waiting for CCBs\n", 2071 num_items); 2072 xpt_print(periph->path, "%ju CCBs outstanding (%ju allocated, %ju " 2073 "freed)\n", (uintmax_t)(softc->ccbs_alloced - 2074 softc->ccbs_freed), (uintmax_t)softc->ccbs_alloced, 2075 (uintmax_t)softc->ccbs_freed); 2076 xpt_print(periph->path, "%ju CTIOs outstanding (%ju sent, %ju " 2077 "returned\n", (uintmax_t)(softc->ctios_sent - 2078 softc->ctios_returned), softc->ctios_sent, 2079 softc->ctios_returned); 2080 } 2081 2082 /* 2083 * This function is called when we fail to get a CCB for a DMA or status return 2084 * to the initiator within the specified time period. 2085 * 2086 * The callout code should insure that we hold the sim mutex here. 2087 */ 2088 static void 2089 ctlfe_dma_timeout(void *arg) 2090 { 2091 struct ctlfe_lun_softc *softc; 2092 struct cam_periph *periph; 2093 struct cam_sim *sim; 2094 int num_queued; 2095 2096 softc = (struct ctlfe_lun_softc *)arg; 2097 periph = softc->periph; 2098 sim = xpt_path_sim(periph->path); 2099 num_queued = 0; 2100 2101 /* 2102 * Nothing to do... 2103 */ 2104 if (TAILQ_FIRST(&softc->work_queue) == NULL) { 2105 xpt_print(periph->path, "TIMEOUT triggered after %d " 2106 "seconds, but nothing on work queue??\n", 2107 CTLFE_DMA_TIMEOUT); 2108 return; 2109 } 2110 2111 xpt_print(periph->path, "TIMEOUT (%d seconds) waiting for DMA to " 2112 "start\n", CTLFE_DMA_TIMEOUT); 2113 2114 ctlfe_dump_queue(softc); 2115 2116 ctlfe_dump_sim(sim); 2117 2118 xpt_print(periph->path, "calling xpt_schedule() to attempt to " 2119 "unstick our queue\n"); 2120 2121 xpt_schedule(periph, /*priority*/ 1); 2122 2123 xpt_print(periph->path, "xpt_schedule() call complete\n"); 2124 } 2125 2126 /* 2127 * Datamove/done routine called by CTL. Put ourselves on the queue to 2128 * receive a CCB from CAM so we can queue the continue I/O request down 2129 * to the adapter. 2130 */ 2131 static void 2132 ctlfe_datamove_done(union ctl_io *io) 2133 { 2134 union ccb *ccb; 2135 struct cam_periph *periph; 2136 struct ctlfe_lun_softc *softc; 2137 2138 ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 2139 2140 periph = xpt_path_periph(ccb->ccb_h.path); 2141 cam_periph_lock(periph); 2142 2143 softc = (struct ctlfe_lun_softc *)periph->softc; 2144 2145 if (io->io_hdr.io_type == CTL_IO_TASK) { 2146 /* 2147 * Task management commands don't require any further 2148 * communication back to the adapter. Requeue the CCB 2149 * to the adapter, and free the CTL I/O. 2150 */ 2151 xpt_print(ccb->ccb_h.path, "%s: returning task I/O " 2152 "tag %#x seq %#x\n", __func__, 2153 ccb->cin1.tag_id, ccb->cin1.seq_id); 2154 /* 2155 * Send the notify acknowledge down to the SIM, to let it 2156 * know we processed the task management command. 2157 */ 2158 ccb->ccb_h.status = CAM_REQ_INPROG; 2159 ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; 2160 xpt_action(ccb); 2161 ctl_free_io(io); 2162 } else { 2163 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) 2164 io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED; 2165 else 2166 io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED; 2167 2168 TAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h, 2169 periph_links.tqe); 2170 2171 /* 2172 * Reset the timeout for our latest active DMA. 2173 */ 2174 callout_reset(&softc->dma_callout, 2175 CTLFE_DMA_TIMEOUT * hz, 2176 ctlfe_dma_timeout, softc); 2177 /* 2178 * Ask for the CAM transport layer to send us a CCB to do 2179 * the DMA or send status, unless ctlfe_dma_enabled is set 2180 * to 0. 2181 */ 2182 if (ctlfe_dma_enabled != 0) 2183 xpt_schedule(periph, /*priority*/ 1); 2184 } 2185 2186 cam_periph_unlock(periph); 2187 } 2188 2189 static void 2190 ctlfe_dump(void) 2191 { 2192 struct ctlfe_softc *bus_softc; 2193 2194 STAILQ_FOREACH(bus_softc, &ctlfe_softc_list, links) { 2195 struct ctlfe_lun_softc *lun_softc; 2196 2197 ctlfe_dump_sim(bus_softc->sim); 2198 2199 STAILQ_FOREACH(lun_softc, &bus_softc->lun_softc_list, links) { 2200 ctlfe_dump_queue(lun_softc); 2201 } 2202 } 2203 } 2204