1 /*- 2 * Copyright (c) 2008, 2009 Silicon Graphics International Corp. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * substantially similar to the "NO WARRANTY" disclaimer below 13 * ("Disclaimer") and any redistribution must be conditioned upon 14 * including a substantially similar Disclaimer requirement for further 15 * binary redistribution. 16 * 17 * NO WARRANTY 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGES. 29 * 30 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/scsi_ctl.c#4 $ 31 */ 32 /* 33 * Peripheral driver interface between CAM and CTL (CAM Target Layer). 34 * 35 * Author: Ken Merry <ken@FreeBSD.org> 36 */ 37 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 #include <sys/param.h> 42 #include <sys/queue.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/condvar.h> 48 #include <sys/malloc.h> 49 #include <sys/bus.h> 50 #include <sys/endian.h> 51 #include <sys/sbuf.h> 52 #include <sys/sysctl.h> 53 #include <sys/types.h> 54 #include <sys/systm.h> 55 #include <machine/bus.h> 56 57 #include <cam/cam.h> 58 #include <cam/cam_ccb.h> 59 #include <cam/cam_periph.h> 60 #include <cam/cam_queue.h> 61 #include <cam/cam_xpt_periph.h> 62 #include <cam/cam_debug.h> 63 #include <cam/cam_sim.h> 64 #include <cam/cam_xpt.h> 65 66 #include <cam/scsi/scsi_all.h> 67 #include <cam/scsi/scsi_message.h> 68 69 #include <cam/ctl/ctl_io.h> 70 #include <cam/ctl/ctl.h> 71 #include <cam/ctl/ctl_frontend.h> 72 #include <cam/ctl/ctl_util.h> 73 #include <cam/ctl/ctl_error.h> 74 75 typedef enum { 76 CTLFE_CCB_DEFAULT = 0x00 77 } ctlfe_ccb_types; 78 79 struct ctlfe_softc { 80 struct ctl_port port; 81 path_id_t path_id; 82 u_int maxio; 83 struct cam_sim *sim; 84 char port_name[DEV_IDLEN]; 85 struct mtx lun_softc_mtx; 86 STAILQ_HEAD(, ctlfe_lun_softc) lun_softc_list; 87 STAILQ_ENTRY(ctlfe_softc) links; 88 }; 89 90 STAILQ_HEAD(, ctlfe_softc) ctlfe_softc_list; 91 struct mtx ctlfe_list_mtx; 92 static char ctlfe_mtx_desc[] = "ctlfelist"; 93 static int ctlfe_dma_enabled = 1; 94 #ifdef CTLFE_INIT_ENABLE 95 static int ctlfe_max_targets = 1; 96 static int ctlfe_num_targets = 0; 97 #endif 98 99 typedef enum { 100 CTLFE_LUN_NONE = 0x00, 101 CTLFE_LUN_WILDCARD = 0x01 102 } ctlfe_lun_flags; 103 104 struct ctlfe_lun_softc { 105 struct ctlfe_softc *parent_softc; 106 struct cam_periph *periph; 107 ctlfe_lun_flags flags; 108 struct callout dma_callout; 109 uint64_t ccbs_alloced; 110 uint64_t ccbs_freed; 111 uint64_t ctios_sent; 112 uint64_t ctios_returned; 113 uint64_t atios_sent; 114 uint64_t atios_returned; 115 uint64_t inots_sent; 116 uint64_t inots_returned; 117 /* bus_dma_tag_t dma_tag; */ 118 TAILQ_HEAD(, ccb_hdr) work_queue; 119 STAILQ_ENTRY(ctlfe_lun_softc) links; 120 }; 121 122 typedef enum { 123 CTLFE_CMD_NONE = 0x00, 124 CTLFE_CMD_PIECEWISE = 0x01 125 } ctlfe_cmd_flags; 126 127 /* 128 * The size limit of this structure is CTL_PORT_PRIV_SIZE, from ctl_io.h. 129 * Currently that is 600 bytes. 130 */ 131 struct ctlfe_lun_cmd_info { 132 int cur_transfer_index; 133 size_t cur_transfer_off; 134 ctlfe_cmd_flags flags; 135 /* 136 * XXX KDM struct bus_dma_segment is 8 bytes on i386, and 16 137 * bytes on amd64. So with 32 elements, this is 256 bytes on 138 * i386 and 512 bytes on amd64. 139 */ 140 #define CTLFE_MAX_SEGS 32 141 bus_dma_segment_t cam_sglist[CTLFE_MAX_SEGS]; 142 }; 143 144 /* 145 * When we register the adapter/bus, request that this many ctl_ios be 146 * allocated. This should be the maximum supported by the adapter, but we 147 * currently don't have a way to get that back from the path inquiry. 148 * XXX KDM add that to the path inquiry. 149 */ 150 #define CTLFE_REQ_CTL_IO 4096 151 /* 152 * Number of Accept Target I/O CCBs to allocate and queue down to the 153 * adapter per LUN. 154 * XXX KDM should this be controlled by CTL? 155 */ 156 #define CTLFE_ATIO_PER_LUN 1024 157 /* 158 * Number of Immediate Notify CCBs (used for aborts, resets, etc.) to 159 * allocate and queue down to the adapter per LUN. 160 * XXX KDM should this be controlled by CTL? 161 */ 162 #define CTLFE_IN_PER_LUN 1024 163 164 /* 165 * Timeout (in seconds) on CTIO CCB allocation for doing a DMA or sending 166 * status to the initiator. The SIM is expected to have its own timeouts, 167 * so we're not putting this timeout around the CCB execution time. The 168 * SIM should timeout and let us know if it has an issue. 169 */ 170 #define CTLFE_DMA_TIMEOUT 60 171 172 /* 173 * Turn this on to enable extra debugging prints. 174 */ 175 #if 0 176 #define CTLFE_DEBUG 177 #endif 178 179 /* 180 * Use randomly assigned WWNN/WWPN values. This is to work around an issue 181 * in the FreeBSD initiator that makes it unable to rescan the target if 182 * the target gets rebooted and the WWNN/WWPN stay the same. 183 */ 184 #if 0 185 #define RANDOM_WWNN 186 #endif 187 188 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, dma_enabled, CTLFLAG_RW, 189 &ctlfe_dma_enabled, 0, "DMA enabled"); 190 MALLOC_DEFINE(M_CTLFE, "CAM CTL FE", "CAM CTL FE interface"); 191 192 #define ccb_type ppriv_field0 193 /* This is only used in the ATIO */ 194 #define io_ptr ppriv_ptr1 195 196 /* This is only used in the CTIO */ 197 #define ccb_atio ppriv_ptr1 198 199 int ctlfeinitialize(void); 200 void ctlfeshutdown(void); 201 static periph_init_t ctlfeperiphinit; 202 static void ctlfeasync(void *callback_arg, uint32_t code, 203 struct cam_path *path, void *arg); 204 static periph_ctor_t ctlferegister; 205 static periph_oninv_t ctlfeoninvalidate; 206 static periph_dtor_t ctlfecleanup; 207 static periph_start_t ctlfestart; 208 static void ctlfedone(struct cam_periph *periph, 209 union ccb *done_ccb); 210 211 static void ctlfe_onoffline(void *arg, int online); 212 static void ctlfe_online(void *arg); 213 static void ctlfe_offline(void *arg); 214 static int ctlfe_lun_enable(void *arg, struct ctl_id targ_id, 215 int lun_id); 216 static int ctlfe_lun_disable(void *arg, struct ctl_id targ_id, 217 int lun_id); 218 static void ctlfe_dump_sim(struct cam_sim *sim); 219 static void ctlfe_dump_queue(struct ctlfe_lun_softc *softc); 220 static void ctlfe_dma_timeout(void *arg); 221 static void ctlfe_datamove_done(union ctl_io *io); 222 static void ctlfe_dump(void); 223 224 static struct periph_driver ctlfe_driver = 225 { 226 ctlfeperiphinit, "ctl", 227 TAILQ_HEAD_INITIALIZER(ctlfe_driver.units), /*generation*/ 0, 228 CAM_PERIPH_DRV_EARLY 229 }; 230 231 static struct ctl_frontend ctlfe_frontend = 232 { 233 .name = "camtgt", 234 .init = ctlfeinitialize, 235 .fe_dump = ctlfe_dump, 236 .shutdown = ctlfeshutdown, 237 }; 238 CTL_FRONTEND_DECLARE(ctlfe, ctlfe_frontend); 239 240 extern struct ctl_softc *control_softc; 241 242 void 243 ctlfeshutdown(void) 244 { 245 return; 246 } 247 248 int 249 ctlfeinitialize(void) 250 { 251 252 STAILQ_INIT(&ctlfe_softc_list); 253 mtx_init(&ctlfe_list_mtx, ctlfe_mtx_desc, NULL, MTX_DEF); 254 periphdriver_register(&ctlfe_driver); 255 return (0); 256 } 257 258 void 259 ctlfeperiphinit(void) 260 { 261 cam_status status; 262 263 status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED | 264 AC_CONTRACT, ctlfeasync, NULL, NULL); 265 if (status != CAM_REQ_CMP) { 266 printf("ctl: Failed to attach async callback due to CAM " 267 "status 0x%x!\n", status); 268 } 269 } 270 271 static void 272 ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) 273 { 274 struct ctlfe_softc *softc; 275 276 #ifdef CTLFEDEBUG 277 printf("%s: entered\n", __func__); 278 #endif 279 280 mtx_lock(&ctlfe_list_mtx); 281 STAILQ_FOREACH(softc, &ctlfe_softc_list, links) { 282 if (softc->path_id == xpt_path_path_id(path)) 283 break; 284 } 285 mtx_unlock(&ctlfe_list_mtx); 286 287 /* 288 * When a new path gets registered, and it is capable of target 289 * mode, go ahead and attach. Later on, we may need to be more 290 * selective, but for now this will be sufficient. 291 */ 292 switch (code) { 293 case AC_PATH_REGISTERED: { 294 struct ctl_port *port; 295 struct ccb_pathinq *cpi; 296 int retval; 297 298 cpi = (struct ccb_pathinq *)arg; 299 300 /* Don't attach if it doesn't support target mode */ 301 if ((cpi->target_sprt & PIT_PROCESSOR) == 0) { 302 #ifdef CTLFEDEBUG 303 printf("%s: SIM %s%d doesn't support target mode\n", 304 __func__, cpi->dev_name, cpi->unit_number); 305 #endif 306 break; 307 } 308 309 if (softc != NULL) { 310 #ifdef CTLFEDEBUG 311 printf("%s: CTL port for CAM path %u already exists\n", 312 __func__, xpt_path_path_id(path)); 313 #endif 314 break; 315 } 316 317 #ifdef CTLFE_INIT_ENABLE 318 if (ctlfe_num_targets >= ctlfe_max_targets) { 319 union ccb *ccb; 320 321 ccb = (union ccb *)malloc(sizeof(*ccb), M_TEMP, 322 M_NOWAIT | M_ZERO); 323 if (ccb == NULL) { 324 printf("%s: unable to malloc CCB!\n", __func__); 325 return; 326 } 327 xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE); 328 329 ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; 330 ccb->knob.xport_specific.valid = KNOB_VALID_ROLE; 331 ccb->knob.xport_specific.fc.role = KNOB_ROLE_INITIATOR; 332 333 xpt_action(ccb); 334 335 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != 336 CAM_REQ_CMP) { 337 printf("%s: SIM %s%d (path id %d) initiator " 338 "enable failed with status %#x\n", 339 __func__, cpi->dev_name, 340 cpi->unit_number, cpi->ccb_h.path_id, 341 ccb->ccb_h.status); 342 } else { 343 printf("%s: SIM %s%d (path id %d) initiator " 344 "enable succeeded\n", 345 __func__, cpi->dev_name, 346 cpi->unit_number, cpi->ccb_h.path_id); 347 } 348 349 free(ccb, M_TEMP); 350 351 break; 352 } else { 353 ctlfe_num_targets++; 354 } 355 356 printf("%s: ctlfe_num_targets = %d\n", __func__, 357 ctlfe_num_targets); 358 #endif /* CTLFE_INIT_ENABLE */ 359 360 /* 361 * We're in an interrupt context here, so we have to 362 * use M_NOWAIT. Of course this means trouble if we 363 * can't allocate memory. 364 */ 365 softc = malloc(sizeof(*softc), M_CTLFE, M_NOWAIT | M_ZERO); 366 if (softc == NULL) { 367 printf("%s: unable to malloc %zd bytes for softc\n", 368 __func__, sizeof(*softc)); 369 return; 370 } 371 372 softc->path_id = cpi->ccb_h.path_id; 373 softc->sim = xpt_path_sim(path); 374 if (cpi->maxio != 0) 375 softc->maxio = cpi->maxio; 376 else 377 softc->maxio = DFLTPHYS; 378 mtx_init(&softc->lun_softc_mtx, "LUN softc mtx", NULL, MTX_DEF); 379 STAILQ_INIT(&softc->lun_softc_list); 380 381 port = &softc->port; 382 port->frontend = &ctlfe_frontend; 383 384 /* 385 * XXX KDM should we be more accurate here ? 386 */ 387 if (cpi->transport == XPORT_FC) 388 port->port_type = CTL_PORT_FC; 389 else if (cpi->transport == XPORT_SAS) 390 port->port_type = CTL_PORT_SAS; 391 else 392 port->port_type = CTL_PORT_SCSI; 393 394 /* XXX KDM what should the real number be here? */ 395 port->num_requested_ctl_io = 4096; 396 snprintf(softc->port_name, sizeof(softc->port_name), 397 "%s%d", cpi->dev_name, cpi->unit_number); 398 /* 399 * XXX KDM it would be nice to allocate storage in the 400 * frontend structure itself. 401 */ 402 port->port_name = softc->port_name; 403 port->physical_port = cpi->bus_id; 404 port->virtual_port = 0; 405 port->port_online = ctlfe_online; 406 port->port_offline = ctlfe_offline; 407 port->onoff_arg = softc; 408 port->lun_enable = ctlfe_lun_enable; 409 port->lun_disable = ctlfe_lun_disable; 410 port->targ_lun_arg = softc; 411 port->fe_datamove = ctlfe_datamove_done; 412 port->fe_done = ctlfe_datamove_done; 413 /* 414 * XXX KDM the path inquiry doesn't give us the maximum 415 * number of targets supported. 416 */ 417 port->max_targets = cpi->max_target; 418 port->max_target_id = cpi->max_target; 419 420 /* 421 * XXX KDM need to figure out whether we're the master or 422 * slave. 423 */ 424 #ifdef CTLFEDEBUG 425 printf("%s: calling ctl_port_register() for %s%d\n", 426 __func__, cpi->dev_name, cpi->unit_number); 427 #endif 428 retval = ctl_port_register(port, /*master_SC*/ 1); 429 if (retval != 0) { 430 printf("%s: ctl_port_register() failed with " 431 "error %d!\n", __func__, retval); 432 mtx_destroy(&softc->lun_softc_mtx); 433 free(softc, M_CTLFE); 434 break; 435 } else { 436 mtx_lock(&ctlfe_list_mtx); 437 STAILQ_INSERT_TAIL(&ctlfe_softc_list, softc, links); 438 mtx_unlock(&ctlfe_list_mtx); 439 } 440 441 break; 442 } 443 case AC_PATH_DEREGISTERED: { 444 445 if (softc != NULL) { 446 /* 447 * XXX KDM are we certain at this point that there 448 * are no outstanding commands for this frontend? 449 */ 450 mtx_lock(&ctlfe_list_mtx); 451 STAILQ_REMOVE(&ctlfe_softc_list, softc, ctlfe_softc, 452 links); 453 mtx_unlock(&ctlfe_list_mtx); 454 ctl_port_deregister(&softc->port); 455 mtx_destroy(&softc->lun_softc_mtx); 456 free(softc, M_CTLFE); 457 } 458 break; 459 } 460 case AC_CONTRACT: { 461 struct ac_contract *ac; 462 463 ac = (struct ac_contract *)arg; 464 465 switch (ac->contract_number) { 466 case AC_CONTRACT_DEV_CHG: { 467 struct ac_device_changed *dev_chg; 468 int retval; 469 470 dev_chg = (struct ac_device_changed *)ac->contract_data; 471 472 printf("%s: WWPN %#jx port 0x%06x path %u target %u %s\n", 473 __func__, dev_chg->wwpn, dev_chg->port, 474 xpt_path_path_id(path), dev_chg->target, 475 (dev_chg->arrived == 0) ? "left" : "arrived"); 476 477 if (softc == NULL) { 478 printf("%s: CTL port for CAM path %u not " 479 "found!\n", __func__, 480 xpt_path_path_id(path)); 481 break; 482 } 483 if (dev_chg->arrived != 0) { 484 retval = ctl_add_initiator(&softc->port, 485 dev_chg->target, dev_chg->wwpn, NULL); 486 } else { 487 retval = ctl_remove_initiator(&softc->port, 488 dev_chg->target); 489 } 490 491 if (retval < 0) { 492 printf("%s: could not %s port %d iid %u " 493 "WWPN %#jx!\n", __func__, 494 (dev_chg->arrived != 0) ? "add" : 495 "remove", softc->port.targ_port, 496 dev_chg->target, 497 (uintmax_t)dev_chg->wwpn); 498 } 499 break; 500 } 501 default: 502 printf("%s: unsupported contract number %ju\n", 503 __func__, (uintmax_t)ac->contract_number); 504 break; 505 } 506 break; 507 } 508 default: 509 break; 510 } 511 } 512 513 static cam_status 514 ctlferegister(struct cam_periph *periph, void *arg) 515 { 516 struct ctlfe_softc *bus_softc; 517 struct ctlfe_lun_softc *softc; 518 union ccb en_lun_ccb; 519 cam_status status; 520 int i; 521 522 softc = (struct ctlfe_lun_softc *)arg; 523 bus_softc = softc->parent_softc; 524 525 TAILQ_INIT(&softc->work_queue); 526 softc->periph = periph; 527 528 callout_init_mtx(&softc->dma_callout, xpt_path_mtx(periph->path), 529 /*flags*/ 0); 530 periph->softc = softc; 531 532 xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); 533 en_lun_ccb.ccb_h.func_code = XPT_EN_LUN; 534 en_lun_ccb.cel.grp6_len = 0; 535 en_lun_ccb.cel.grp7_len = 0; 536 en_lun_ccb.cel.enable = 1; 537 xpt_action(&en_lun_ccb); 538 status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK); 539 if (status != CAM_REQ_CMP) { 540 xpt_print(periph->path, "%s: Enable LUN failed, status 0x%x\n", 541 __func__, en_lun_ccb.ccb_h.status); 542 return (status); 543 } 544 545 status = CAM_REQ_CMP; 546 547 for (i = 0; i < CTLFE_ATIO_PER_LUN; i++) { 548 union ccb *new_ccb; 549 550 new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, 551 M_ZERO|M_NOWAIT); 552 if (new_ccb == NULL) { 553 status = CAM_RESRC_UNAVAIL; 554 break; 555 } 556 xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1); 557 new_ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO; 558 new_ccb->ccb_h.cbfcnp = ctlfedone; 559 new_ccb->ccb_h.flags |= CAM_UNLOCKED; 560 xpt_action(new_ccb); 561 softc->atios_sent++; 562 status = new_ccb->ccb_h.status; 563 if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 564 free(new_ccb, M_CTLFE); 565 break; 566 } 567 } 568 569 status = cam_periph_acquire(periph); 570 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 571 xpt_print(periph->path, "%s: could not acquire reference " 572 "count, status = %#x\n", __func__, status); 573 return (status); 574 } 575 576 if (i == 0) { 577 xpt_print(periph->path, "%s: could not allocate ATIO CCBs, " 578 "status 0x%x\n", __func__, status); 579 return (CAM_REQ_CMP_ERR); 580 } 581 582 for (i = 0; i < CTLFE_IN_PER_LUN; i++) { 583 union ccb *new_ccb; 584 585 new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, 586 M_ZERO|M_NOWAIT); 587 if (new_ccb == NULL) { 588 status = CAM_RESRC_UNAVAIL; 589 break; 590 } 591 592 xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1); 593 new_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; 594 new_ccb->ccb_h.cbfcnp = ctlfedone; 595 new_ccb->ccb_h.flags |= CAM_UNLOCKED; 596 xpt_action(new_ccb); 597 softc->inots_sent++; 598 status = new_ccb->ccb_h.status; 599 if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 600 /* 601 * Note that we don't free the CCB here. If the 602 * status is not CAM_REQ_INPROG, then we're 603 * probably talking to a SIM that says it is 604 * target-capable but doesn't support the 605 * XPT_IMMEDIATE_NOTIFY CCB. i.e. it supports the 606 * older API. In that case, it'll call xpt_done() 607 * on the CCB, and we need to free it in our done 608 * routine as a result. 609 */ 610 break; 611 } 612 } 613 if ((i == 0) 614 || (status != CAM_REQ_INPROG)) { 615 xpt_print(periph->path, "%s: could not allocate immediate " 616 "notify CCBs, status 0x%x\n", __func__, status); 617 return (CAM_REQ_CMP_ERR); 618 } 619 return (CAM_REQ_CMP); 620 } 621 622 static void 623 ctlfeoninvalidate(struct cam_periph *periph) 624 { 625 union ccb en_lun_ccb; 626 cam_status status; 627 struct ctlfe_softc *bus_softc; 628 struct ctlfe_lun_softc *softc; 629 630 softc = (struct ctlfe_lun_softc *)periph->softc; 631 632 xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); 633 en_lun_ccb.ccb_h.func_code = XPT_EN_LUN; 634 en_lun_ccb.cel.grp6_len = 0; 635 en_lun_ccb.cel.grp7_len = 0; 636 en_lun_ccb.cel.enable = 0; 637 xpt_action(&en_lun_ccb); 638 status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK); 639 if (status != CAM_REQ_CMP) { 640 xpt_print(periph->path, "%s: Disable LUN failed, status 0x%x\n", 641 __func__, en_lun_ccb.ccb_h.status); 642 /* 643 * XXX KDM what do we do now? 644 */ 645 } 646 xpt_print(periph->path, "LUN removed, %ju ATIOs outstanding, %ju " 647 "INOTs outstanding, %d refs\n", softc->atios_sent - 648 softc->atios_returned, softc->inots_sent - 649 softc->inots_returned, periph->refcount); 650 651 bus_softc = softc->parent_softc; 652 mtx_lock(&bus_softc->lun_softc_mtx); 653 STAILQ_REMOVE(&bus_softc->lun_softc_list, softc, ctlfe_lun_softc, links); 654 mtx_unlock(&bus_softc->lun_softc_mtx); 655 } 656 657 static void 658 ctlfecleanup(struct cam_periph *periph) 659 { 660 struct ctlfe_lun_softc *softc; 661 662 xpt_print(periph->path, "%s: Called\n", __func__); 663 664 softc = (struct ctlfe_lun_softc *)periph->softc; 665 666 /* 667 * XXX KDM is there anything else that needs to be done here? 668 */ 669 670 callout_stop(&softc->dma_callout); 671 672 free(softc, M_CTLFE); 673 } 674 675 static void 676 ctlfedata(struct ctlfe_lun_softc *softc, union ctl_io *io, 677 ccb_flags *flags, uint8_t **data_ptr, uint32_t *dxfer_len, 678 u_int16_t *sglist_cnt) 679 { 680 struct ctlfe_softc *bus_softc; 681 struct ctlfe_lun_cmd_info *cmd_info; 682 struct ctl_sg_entry *ctl_sglist; 683 bus_dma_segment_t *cam_sglist; 684 size_t off; 685 int i, idx; 686 687 cmd_info = (struct ctlfe_lun_cmd_info *)io->io_hdr.port_priv; 688 bus_softc = softc->parent_softc; 689 690 /* 691 * Set the direction, relative to the initiator. 692 */ 693 *flags &= ~CAM_DIR_MASK; 694 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) 695 *flags |= CAM_DIR_IN; 696 else 697 *flags |= CAM_DIR_OUT; 698 699 *flags &= ~CAM_DATA_MASK; 700 idx = cmd_info->cur_transfer_index; 701 off = cmd_info->cur_transfer_off; 702 cmd_info->flags &= ~CTLFE_CMD_PIECEWISE; 703 if (io->scsiio.kern_sg_entries == 0) { 704 /* No S/G list. */ 705 *data_ptr = io->scsiio.kern_data_ptr + off; 706 if (io->scsiio.kern_data_len - off <= bus_softc->maxio) { 707 *dxfer_len = io->scsiio.kern_data_len - off; 708 } else { 709 *dxfer_len = bus_softc->maxio; 710 cmd_info->cur_transfer_index = -1; 711 cmd_info->cur_transfer_off = bus_softc->maxio; 712 cmd_info->flags |= CTLFE_CMD_PIECEWISE; 713 } 714 *sglist_cnt = 0; 715 716 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) 717 *flags |= CAM_DATA_PADDR; 718 else 719 *flags |= CAM_DATA_VADDR; 720 } else { 721 /* S/G list with physical or virtual pointers. */ 722 ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 723 cam_sglist = cmd_info->cam_sglist; 724 *dxfer_len = 0; 725 for (i = 0; i < io->scsiio.kern_sg_entries - idx; i++) { 726 cam_sglist[i].ds_addr = (bus_addr_t)ctl_sglist[i + idx].addr + off; 727 if (ctl_sglist[i + idx].len - off <= bus_softc->maxio - *dxfer_len) { 728 cam_sglist[i].ds_len = ctl_sglist[idx + i].len - off; 729 *dxfer_len += cam_sglist[i].ds_len; 730 } else { 731 cam_sglist[i].ds_len = bus_softc->maxio - *dxfer_len; 732 cmd_info->cur_transfer_index = idx + i; 733 cmd_info->cur_transfer_off = cam_sglist[i].ds_len + off; 734 cmd_info->flags |= CTLFE_CMD_PIECEWISE; 735 *dxfer_len += cam_sglist[i].ds_len; 736 if (ctl_sglist[i].len != 0) 737 i++; 738 break; 739 } 740 if (i == (CTLFE_MAX_SEGS - 1) && 741 idx + i < (io->scsiio.kern_sg_entries - 1)) { 742 cmd_info->cur_transfer_index = idx + i + 1; 743 cmd_info->cur_transfer_off = 0; 744 cmd_info->flags |= CTLFE_CMD_PIECEWISE; 745 i++; 746 break; 747 } 748 off = 0; 749 } 750 *sglist_cnt = i; 751 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) 752 *flags |= CAM_DATA_SG_PADDR; 753 else 754 *flags |= CAM_DATA_SG; 755 *data_ptr = (uint8_t *)cam_sglist; 756 } 757 } 758 759 static void 760 ctlfestart(struct cam_periph *periph, union ccb *start_ccb) 761 { 762 struct ctlfe_lun_softc *softc; 763 struct ccb_hdr *ccb_h; 764 765 softc = (struct ctlfe_lun_softc *)periph->softc; 766 767 softc->ccbs_alloced++; 768 769 start_ccb->ccb_h.ccb_type = CTLFE_CCB_DEFAULT; 770 771 ccb_h = TAILQ_FIRST(&softc->work_queue); 772 if (ccb_h == NULL) { 773 softc->ccbs_freed++; 774 xpt_release_ccb(start_ccb); 775 } else { 776 struct ccb_accept_tio *atio; 777 struct ccb_scsiio *csio; 778 uint8_t *data_ptr; 779 uint32_t dxfer_len; 780 ccb_flags flags; 781 union ctl_io *io; 782 uint8_t scsi_status; 783 784 /* Take the ATIO off the work queue */ 785 TAILQ_REMOVE(&softc->work_queue, ccb_h, periph_links.tqe); 786 atio = (struct ccb_accept_tio *)ccb_h; 787 io = (union ctl_io *)ccb_h->io_ptr; 788 csio = &start_ccb->csio; 789 790 flags = atio->ccb_h.flags & 791 (CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK); 792 793 if ((io == NULL) 794 || (io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) { 795 /* 796 * We're done, send status back. 797 */ 798 flags |= CAM_SEND_STATUS; 799 if (io == NULL) { 800 scsi_status = SCSI_STATUS_BUSY; 801 csio->sense_len = 0; 802 } else if ((io->io_hdr.flags & CTL_FLAG_ABORT) && 803 (io->io_hdr.flags & CTL_FLAG_ABORT_STATUS) == 0) { 804 io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED; 805 806 /* 807 * If this command was aborted, we don't 808 * need to send status back to the SIM. 809 * Just free the CTIO and ctl_io, and 810 * recycle the ATIO back to the SIM. 811 */ 812 xpt_print(periph->path, "%s: aborted " 813 "command 0x%04x discarded\n", 814 __func__, io->scsiio.tag_num); 815 ctl_free_io(io); 816 /* 817 * For a wildcard attachment, commands can 818 * come in with a specific target/lun. Reset 819 * the target and LUN fields back to the 820 * wildcard values before we send them back 821 * down to the SIM. The SIM has a wildcard 822 * LUN enabled, not whatever target/lun 823 * these happened to be. 824 */ 825 if (softc->flags & CTLFE_LUN_WILDCARD) { 826 atio->ccb_h.target_id = 827 CAM_TARGET_WILDCARD; 828 atio->ccb_h.target_lun = 829 CAM_LUN_WILDCARD; 830 } 831 832 if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) { 833 cam_release_devq(periph->path, 834 /*relsim_flags*/0, 835 /*reduction*/0, 836 /*timeout*/0, 837 /*getcount_only*/0); 838 atio->ccb_h.status &= ~CAM_DEV_QFRZN; 839 } 840 841 ccb_h = TAILQ_FIRST(&softc->work_queue); 842 843 if (atio->ccb_h.func_code != 844 XPT_ACCEPT_TARGET_IO) { 845 xpt_print(periph->path, "%s: func_code " 846 "is %#x\n", __func__, 847 atio->ccb_h.func_code); 848 } 849 start_ccb->ccb_h.func_code = XPT_ABORT; 850 start_ccb->cab.abort_ccb = (union ccb *)atio; 851 852 /* Tell the SIM that we've aborted this ATIO */ 853 xpt_action(start_ccb); 854 softc->ccbs_freed++; 855 xpt_release_ccb(start_ccb); 856 857 /* 858 * Send the ATIO back down to the SIM. 859 */ 860 xpt_action((union ccb *)atio); 861 softc->atios_sent++; 862 863 /* 864 * If we still have work to do, ask for 865 * another CCB. Otherwise, deactivate our 866 * callout. 867 */ 868 if (ccb_h != NULL) 869 xpt_schedule(periph, /*priority*/ 1); 870 else 871 callout_stop(&softc->dma_callout); 872 873 return; 874 } else { 875 io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED; 876 scsi_status = io->scsiio.scsi_status; 877 csio->sense_len = io->scsiio.sense_len; 878 } 879 data_ptr = NULL; 880 dxfer_len = 0; 881 if (io == NULL) { 882 printf("%s: tag %04x io is NULL\n", __func__, 883 atio->tag_id); 884 } else { 885 #ifdef CTLFEDEBUG 886 printf("%s: tag %04x status %x\n", __func__, 887 atio->tag_id, io->io_hdr.status); 888 #endif 889 } 890 csio->sglist_cnt = 0; 891 if (csio->sense_len != 0) { 892 csio->sense_data = io->scsiio.sense_data; 893 flags |= CAM_SEND_SENSE; 894 } else if (scsi_status == SCSI_STATUS_CHECK_COND) { 895 xpt_print(periph->path, "%s: check condition " 896 "with no sense\n", __func__); 897 } 898 } else { 899 struct ctlfe_lun_cmd_info *cmd_info; 900 901 /* 902 * Datamove call, we need to setup the S/G list. 903 */ 904 905 cmd_info = (struct ctlfe_lun_cmd_info *) 906 io->io_hdr.port_priv; 907 908 KASSERT(sizeof(*cmd_info) < CTL_PORT_PRIV_SIZE, 909 ("%s: sizeof(struct ctlfe_lun_cmd_info) %zd < " 910 "CTL_PORT_PRIV_SIZE %d", __func__, 911 sizeof(*cmd_info), CTL_PORT_PRIV_SIZE)); 912 io->io_hdr.flags &= ~CTL_FLAG_DMA_QUEUED; 913 914 /* 915 * Need to zero this, in case it has been used for 916 * a previous datamove for this particular I/O. 917 */ 918 bzero(cmd_info, sizeof(*cmd_info)); 919 scsi_status = 0; 920 921 csio->cdb_len = atio->cdb_len; 922 923 ctlfedata(softc, io, &flags, &data_ptr, &dxfer_len, 924 &csio->sglist_cnt); 925 926 io->scsiio.ext_data_filled += dxfer_len; 927 928 if (io->scsiio.ext_data_filled > 929 io->scsiio.kern_total_len) { 930 xpt_print(periph->path, "%s: tag 0x%04x " 931 "fill len %u > total %u\n", 932 __func__, io->scsiio.tag_num, 933 io->scsiio.ext_data_filled, 934 io->scsiio.kern_total_len); 935 } 936 } 937 938 #ifdef CTLFEDEBUG 939 printf("%s: %s: tag %04x flags %x ptr %p len %u\n", __func__, 940 (flags & CAM_SEND_STATUS) ? "done" : "datamove", 941 atio->tag_id, flags, data_ptr, dxfer_len); 942 #endif 943 944 /* 945 * Valid combinations: 946 * - CAM_SEND_STATUS, CAM_DATA_SG = 0, dxfer_len = 0, 947 * sglist_cnt = 0 948 * - CAM_SEND_STATUS = 0, CAM_DATA_SG = 0, dxfer_len != 0, 949 * sglist_cnt = 0 950 * - CAM_SEND_STATUS = 0, CAM_DATA_SG, dxfer_len != 0, 951 * sglist_cnt != 0 952 */ 953 #ifdef CTLFEDEBUG 954 if (((flags & CAM_SEND_STATUS) 955 && (((flags & CAM_DATA_SG) != 0) 956 || (dxfer_len != 0) 957 || (csio->sglist_cnt != 0))) 958 || (((flags & CAM_SEND_STATUS) == 0) 959 && (dxfer_len == 0)) 960 || ((flags & CAM_DATA_SG) 961 && (csio->sglist_cnt == 0)) 962 || (((flags & CAM_DATA_SG) == 0) 963 && (csio->sglist_cnt != 0))) { 964 printf("%s: tag %04x cdb %02x flags %#x dxfer_len " 965 "%d sg %u\n", __func__, atio->tag_id, 966 atio->cdb_io.cdb_bytes[0], flags, dxfer_len, 967 csio->sglist_cnt); 968 if (io != NULL) { 969 printf("%s: tag %04x io status %#x\n", __func__, 970 atio->tag_id, io->io_hdr.status); 971 } else { 972 printf("%s: tag %04x no associated io\n", 973 __func__, atio->tag_id); 974 } 975 } 976 #endif 977 cam_fill_ctio(csio, 978 /*retries*/ 2, 979 ctlfedone, 980 flags, 981 (flags & CAM_TAG_ACTION_VALID) ? 982 MSG_SIMPLE_Q_TAG : 0, 983 atio->tag_id, 984 atio->init_id, 985 scsi_status, 986 /*data_ptr*/ data_ptr, 987 /*dxfer_len*/ dxfer_len, 988 /*timeout*/ 5 * 1000); 989 start_ccb->ccb_h.flags |= CAM_UNLOCKED; 990 start_ccb->ccb_h.ccb_atio = atio; 991 if (((flags & CAM_SEND_STATUS) == 0) 992 && (io != NULL)) 993 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 994 995 softc->ctios_sent++; 996 997 cam_periph_unlock(periph); 998 xpt_action(start_ccb); 999 cam_periph_lock(periph); 1000 1001 if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) { 1002 cam_release_devq(periph->path, 1003 /*relsim_flags*/0, 1004 /*reduction*/0, 1005 /*timeout*/0, 1006 /*getcount_only*/0); 1007 atio->ccb_h.status &= ~CAM_DEV_QFRZN; 1008 } 1009 1010 ccb_h = TAILQ_FIRST(&softc->work_queue); 1011 } 1012 /* 1013 * If we still have work to do, ask for another CCB. Otherwise, 1014 * deactivate our callout. 1015 */ 1016 if (ccb_h != NULL) 1017 xpt_schedule(periph, /*priority*/ 1); 1018 else 1019 callout_stop(&softc->dma_callout); 1020 } 1021 1022 static void 1023 ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb) 1024 { 1025 struct ctlfe_lun_softc *softc; 1026 1027 softc = (struct ctlfe_lun_softc *)periph->softc; 1028 1029 switch (ccb->ccb_h.func_code) { 1030 case XPT_ACCEPT_TARGET_IO: 1031 softc->atios_returned++; 1032 break; 1033 case XPT_IMMEDIATE_NOTIFY: 1034 case XPT_NOTIFY_ACKNOWLEDGE: 1035 softc->inots_returned++; 1036 break; 1037 default: 1038 break; 1039 } 1040 1041 free(ccb, M_CTLFE); 1042 1043 KASSERT(softc->atios_returned <= softc->atios_sent, ("%s: " 1044 "atios_returned %ju > atios_sent %ju", __func__, 1045 softc->atios_returned, softc->atios_sent)); 1046 KASSERT(softc->inots_returned <= softc->inots_sent, ("%s: " 1047 "inots_returned %ju > inots_sent %ju", __func__, 1048 softc->inots_returned, softc->inots_sent)); 1049 1050 /* 1051 * If we have received all of our CCBs, we can release our 1052 * reference on the peripheral driver. It will probably go away 1053 * now. 1054 */ 1055 if ((softc->atios_returned == softc->atios_sent) 1056 && (softc->inots_returned == softc->inots_sent)) { 1057 cam_periph_release_locked(periph); 1058 } 1059 } 1060 1061 static int 1062 ctlfe_adjust_cdb(struct ccb_accept_tio *atio, uint32_t offset) 1063 { 1064 uint64_t lba; 1065 uint32_t num_blocks, nbc; 1066 uint8_t *cmdbyt = (atio->ccb_h.flags & CAM_CDB_POINTER)? 1067 atio->cdb_io.cdb_ptr : atio->cdb_io.cdb_bytes; 1068 1069 nbc = offset >> 9; /* ASSUMING 512 BYTE BLOCKS */ 1070 1071 switch (cmdbyt[0]) { 1072 case READ_6: 1073 case WRITE_6: 1074 { 1075 struct scsi_rw_6 *cdb = (struct scsi_rw_6 *)cmdbyt; 1076 lba = scsi_3btoul(cdb->addr); 1077 lba &= 0x1fffff; 1078 num_blocks = cdb->length; 1079 if (num_blocks == 0) 1080 num_blocks = 256; 1081 lba += nbc; 1082 num_blocks -= nbc; 1083 scsi_ulto3b(lba, cdb->addr); 1084 cdb->length = num_blocks; 1085 break; 1086 } 1087 case READ_10: 1088 case WRITE_10: 1089 { 1090 struct scsi_rw_10 *cdb = (struct scsi_rw_10 *)cmdbyt; 1091 lba = scsi_4btoul(cdb->addr); 1092 num_blocks = scsi_2btoul(cdb->length); 1093 lba += nbc; 1094 num_blocks -= nbc; 1095 scsi_ulto4b(lba, cdb->addr); 1096 scsi_ulto2b(num_blocks, cdb->length); 1097 break; 1098 } 1099 case READ_12: 1100 case WRITE_12: 1101 { 1102 struct scsi_rw_12 *cdb = (struct scsi_rw_12 *)cmdbyt; 1103 lba = scsi_4btoul(cdb->addr); 1104 num_blocks = scsi_4btoul(cdb->length); 1105 lba += nbc; 1106 num_blocks -= nbc; 1107 scsi_ulto4b(lba, cdb->addr); 1108 scsi_ulto4b(num_blocks, cdb->length); 1109 break; 1110 } 1111 case READ_16: 1112 case WRITE_16: 1113 case WRITE_ATOMIC_16: 1114 { 1115 struct scsi_rw_16 *cdb = (struct scsi_rw_16 *)cmdbyt; 1116 lba = scsi_8btou64(cdb->addr); 1117 num_blocks = scsi_4btoul(cdb->length); 1118 lba += nbc; 1119 num_blocks -= nbc; 1120 scsi_u64to8b(lba, cdb->addr); 1121 scsi_ulto4b(num_blocks, cdb->length); 1122 break; 1123 } 1124 default: 1125 return -1; 1126 } 1127 return (0); 1128 } 1129 1130 static void 1131 ctlfedone(struct cam_periph *periph, union ccb *done_ccb) 1132 { 1133 struct ctlfe_lun_softc *softc; 1134 struct ctlfe_softc *bus_softc; 1135 struct ccb_accept_tio *atio = NULL; 1136 union ctl_io *io = NULL; 1137 struct mtx *mtx; 1138 1139 KASSERT((done_ccb->ccb_h.flags & CAM_UNLOCKED) != 0, 1140 ("CCB in ctlfedone() without CAM_UNLOCKED flag")); 1141 #ifdef CTLFE_DEBUG 1142 printf("%s: entered, func_code = %#x, type = %#lx\n", __func__, 1143 done_ccb->ccb_h.func_code, done_ccb->ccb_h.ccb_type); 1144 #endif 1145 1146 softc = (struct ctlfe_lun_softc *)periph->softc; 1147 bus_softc = softc->parent_softc; 1148 mtx = cam_periph_mtx(periph); 1149 mtx_lock(mtx); 1150 1151 /* 1152 * If the peripheral is invalid, ATIOs and immediate notify CCBs 1153 * need to be freed. Most of the ATIOs and INOTs that come back 1154 * will be CCBs that are being returned from the SIM as a result of 1155 * our disabling the LUN. 1156 * 1157 * Other CCB types are handled in their respective cases below. 1158 */ 1159 if (periph->flags & CAM_PERIPH_INVALID) { 1160 switch (done_ccb->ccb_h.func_code) { 1161 case XPT_ACCEPT_TARGET_IO: 1162 case XPT_IMMEDIATE_NOTIFY: 1163 case XPT_NOTIFY_ACKNOWLEDGE: 1164 ctlfe_free_ccb(periph, done_ccb); 1165 goto out; 1166 default: 1167 break; 1168 } 1169 1170 } 1171 switch (done_ccb->ccb_h.func_code) { 1172 case XPT_ACCEPT_TARGET_IO: { 1173 1174 atio = &done_ccb->atio; 1175 1176 softc->atios_returned++; 1177 1178 resubmit: 1179 /* 1180 * Allocate a ctl_io, pass it to CTL, and wait for the 1181 * datamove or done. 1182 */ 1183 io = ctl_alloc_io(bus_softc->port.ctl_pool_ref); 1184 if (io == NULL) { 1185 atio->ccb_h.flags &= ~CAM_DIR_MASK; 1186 atio->ccb_h.flags |= CAM_DIR_NONE; 1187 1188 printf("%s: ctl_alloc_io failed!\n", __func__); 1189 1190 /* 1191 * XXX KDM need to set SCSI_STATUS_BUSY, but there 1192 * is no field in the ATIO structure to do that, 1193 * and we aren't able to allocate a ctl_io here. 1194 * What to do? 1195 */ 1196 atio->sense_len = 0; 1197 done_ccb->ccb_h.io_ptr = NULL; 1198 TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h, 1199 periph_links.tqe); 1200 xpt_schedule(periph, /*priority*/ 1); 1201 break; 1202 } 1203 mtx_unlock(mtx); 1204 ctl_zero_io(io); 1205 1206 /* Save pointers on both sides */ 1207 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = done_ccb; 1208 done_ccb->ccb_h.io_ptr = io; 1209 1210 /* 1211 * Only SCSI I/O comes down this path, resets, etc. come 1212 * down the immediate notify path below. 1213 */ 1214 io->io_hdr.io_type = CTL_IO_SCSI; 1215 io->io_hdr.nexus.initid.id = atio->init_id; 1216 io->io_hdr.nexus.targ_port = bus_softc->port.targ_port; 1217 io->io_hdr.nexus.targ_target.id = atio->ccb_h.target_id; 1218 io->io_hdr.nexus.targ_lun = atio->ccb_h.target_lun; 1219 io->scsiio.tag_num = atio->tag_id; 1220 switch (atio->tag_action) { 1221 case CAM_TAG_ACTION_NONE: 1222 io->scsiio.tag_type = CTL_TAG_UNTAGGED; 1223 break; 1224 case MSG_SIMPLE_TASK: 1225 io->scsiio.tag_type = CTL_TAG_SIMPLE; 1226 break; 1227 case MSG_HEAD_OF_QUEUE_TASK: 1228 io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE; 1229 break; 1230 case MSG_ORDERED_TASK: 1231 io->scsiio.tag_type = CTL_TAG_ORDERED; 1232 break; 1233 case MSG_ACA_TASK: 1234 io->scsiio.tag_type = CTL_TAG_ACA; 1235 break; 1236 default: 1237 io->scsiio.tag_type = CTL_TAG_UNTAGGED; 1238 printf("%s: unhandled tag type %#x!!\n", __func__, 1239 atio->tag_action); 1240 break; 1241 } 1242 if (atio->cdb_len > sizeof(io->scsiio.cdb)) { 1243 printf("%s: WARNING: CDB len %d > ctl_io space %zd\n", 1244 __func__, atio->cdb_len, sizeof(io->scsiio.cdb)); 1245 } 1246 io->scsiio.cdb_len = min(atio->cdb_len, sizeof(io->scsiio.cdb)); 1247 bcopy(atio->cdb_io.cdb_bytes, io->scsiio.cdb, 1248 io->scsiio.cdb_len); 1249 1250 #ifdef CTLFEDEBUG 1251 printf("%s: %ju:%d:%ju:%d: tag %04x CDB %02x\n", __func__, 1252 (uintmax_t)io->io_hdr.nexus.initid.id, 1253 io->io_hdr.nexus.targ_port, 1254 (uintmax_t)io->io_hdr.nexus.targ_target.id, 1255 io->io_hdr.nexus.targ_lun, 1256 io->scsiio.tag_num, io->scsiio.cdb[0]); 1257 #endif 1258 1259 ctl_queue(io); 1260 return; 1261 } 1262 case XPT_CONT_TARGET_IO: { 1263 int srr = 0; 1264 uint32_t srr_off = 0; 1265 1266 atio = (struct ccb_accept_tio *)done_ccb->ccb_h.ccb_atio; 1267 io = (union ctl_io *)atio->ccb_h.io_ptr; 1268 1269 softc->ctios_returned++; 1270 #ifdef CTLFEDEBUG 1271 printf("%s: got XPT_CONT_TARGET_IO tag %#x flags %#x\n", 1272 __func__, atio->tag_id, done_ccb->ccb_h.flags); 1273 #endif 1274 /* 1275 * Handle SRR case were the data pointer is pushed back hack 1276 */ 1277 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_MESSAGE_RECV 1278 && done_ccb->csio.msg_ptr != NULL 1279 && done_ccb->csio.msg_ptr[0] == MSG_EXTENDED 1280 && done_ccb->csio.msg_ptr[1] == 5 1281 && done_ccb->csio.msg_ptr[2] == 0) { 1282 srr = 1; 1283 srr_off = 1284 (done_ccb->csio.msg_ptr[3] << 24) 1285 | (done_ccb->csio.msg_ptr[4] << 16) 1286 | (done_ccb->csio.msg_ptr[5] << 8) 1287 | (done_ccb->csio.msg_ptr[6]); 1288 } 1289 1290 if (srr && (done_ccb->ccb_h.flags & CAM_SEND_STATUS)) { 1291 /* 1292 * If status was being sent, the back end data is now 1293 * history. Hack it up and resubmit a new command with 1294 * the CDB adjusted. If the SIM does the right thing, 1295 * all of the resid math should work. 1296 */ 1297 softc->ccbs_freed++; 1298 xpt_release_ccb(done_ccb); 1299 ctl_free_io(io); 1300 if (ctlfe_adjust_cdb(atio, srr_off) == 0) { 1301 done_ccb = (union ccb *)atio; 1302 goto resubmit; 1303 } 1304 /* 1305 * Fall through to doom.... 1306 */ 1307 } else if (srr) { 1308 /* 1309 * If we have an srr and we're still sending data, we 1310 * should be able to adjust offsets and cycle again. 1311 */ 1312 io->scsiio.kern_rel_offset = 1313 io->scsiio.ext_data_filled = srr_off; 1314 io->scsiio.ext_data_len = io->scsiio.kern_total_len - 1315 io->scsiio.kern_rel_offset; 1316 softc->ccbs_freed++; 1317 io->scsiio.io_hdr.status = CTL_STATUS_NONE; 1318 xpt_release_ccb(done_ccb); 1319 TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h, 1320 periph_links.tqe); 1321 xpt_schedule(periph, /*priority*/ 1); 1322 break; 1323 } 1324 1325 /* 1326 * If we were sending status back to the initiator, free up 1327 * resources. If we were doing a datamove, call the 1328 * datamove done routine. 1329 */ 1330 if (done_ccb->ccb_h.flags & CAM_SEND_STATUS) { 1331 softc->ccbs_freed++; 1332 xpt_release_ccb(done_ccb); 1333 ctl_free_io(io); 1334 /* 1335 * For a wildcard attachment, commands can come in 1336 * with a specific target/lun. Reset the target 1337 * and LUN fields back to the wildcard values before 1338 * we send them back down to the SIM. The SIM has 1339 * a wildcard LUN enabled, not whatever target/lun 1340 * these happened to be. 1341 */ 1342 if (softc->flags & CTLFE_LUN_WILDCARD) { 1343 atio->ccb_h.target_id = CAM_TARGET_WILDCARD; 1344 atio->ccb_h.target_lun = CAM_LUN_WILDCARD; 1345 } 1346 if (periph->flags & CAM_PERIPH_INVALID) { 1347 ctlfe_free_ccb(periph, (union ccb *)atio); 1348 } else { 1349 softc->atios_sent++; 1350 mtx_unlock(mtx); 1351 xpt_action((union ccb *)atio); 1352 return; 1353 } 1354 } else { 1355 struct ctlfe_lun_cmd_info *cmd_info; 1356 struct ccb_scsiio *csio; 1357 1358 csio = &done_ccb->csio; 1359 cmd_info = (struct ctlfe_lun_cmd_info *) 1360 io->io_hdr.port_priv; 1361 1362 io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; 1363 1364 io->scsiio.ext_data_len += csio->dxfer_len; 1365 if (io->scsiio.ext_data_len > 1366 io->scsiio.kern_total_len) { 1367 xpt_print(periph->path, "%s: tag 0x%04x " 1368 "done len %u > total %u sent %u\n", 1369 __func__, io->scsiio.tag_num, 1370 io->scsiio.ext_data_len, 1371 io->scsiio.kern_total_len, 1372 io->scsiio.ext_data_filled); 1373 } 1374 /* 1375 * Translate CAM status to CTL status. Success 1376 * does not change the overall, ctl_io status. In 1377 * that case we just set port_status to 0. If we 1378 * have a failure, though, set a data phase error 1379 * for the overall ctl_io. 1380 */ 1381 switch (done_ccb->ccb_h.status & CAM_STATUS_MASK) { 1382 case CAM_REQ_CMP: 1383 io->io_hdr.port_status = 0; 1384 break; 1385 default: 1386 /* 1387 * XXX KDM we probably need to figure out a 1388 * standard set of errors that the SIM 1389 * drivers should return in the event of a 1390 * data transfer failure. A data phase 1391 * error will at least point the user to a 1392 * data transfer error of some sort. 1393 * Hopefully the SIM printed out some 1394 * additional information to give the user 1395 * a clue what happened. 1396 */ 1397 io->io_hdr.port_status = 0xbad1; 1398 ctl_set_data_phase_error(&io->scsiio); 1399 /* 1400 * XXX KDM figure out residual. 1401 */ 1402 break; 1403 } 1404 /* 1405 * If we had to break this S/G list into multiple 1406 * pieces, figure out where we are in the list, and 1407 * continue sending pieces if necessary. 1408 */ 1409 if ((cmd_info->flags & CTLFE_CMD_PIECEWISE) 1410 && (io->io_hdr.port_status == 0)) { 1411 ccb_flags flags; 1412 uint8_t scsi_status; 1413 uint8_t *data_ptr; 1414 uint32_t dxfer_len; 1415 1416 flags = atio->ccb_h.flags & 1417 (CAM_DIS_DISCONNECT| 1418 CAM_TAG_ACTION_VALID); 1419 1420 ctlfedata(softc, io, &flags, &data_ptr, 1421 &dxfer_len, &csio->sglist_cnt); 1422 1423 scsi_status = 0; 1424 1425 if (((flags & CAM_SEND_STATUS) == 0) 1426 && (dxfer_len == 0)) { 1427 printf("%s: tag %04x no status or " 1428 "len cdb = %02x\n", __func__, 1429 atio->tag_id, 1430 atio->cdb_io.cdb_bytes[0]); 1431 printf("%s: tag %04x io status %#x\n", 1432 __func__, atio->tag_id, 1433 io->io_hdr.status); 1434 } 1435 1436 cam_fill_ctio(csio, 1437 /*retries*/ 2, 1438 ctlfedone, 1439 flags, 1440 (flags & CAM_TAG_ACTION_VALID) ? 1441 MSG_SIMPLE_Q_TAG : 0, 1442 atio->tag_id, 1443 atio->init_id, 1444 scsi_status, 1445 /*data_ptr*/ data_ptr, 1446 /*dxfer_len*/ dxfer_len, 1447 /*timeout*/ 5 * 1000); 1448 1449 csio->ccb_h.flags |= CAM_UNLOCKED; 1450 csio->resid = 0; 1451 csio->ccb_h.ccb_atio = atio; 1452 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 1453 softc->ctios_sent++; 1454 mtx_unlock(mtx); 1455 xpt_action((union ccb *)csio); 1456 } else { 1457 /* 1458 * Release the CTIO. The ATIO will be sent back 1459 * down to the SIM once we send status. 1460 */ 1461 softc->ccbs_freed++; 1462 xpt_release_ccb(done_ccb); 1463 mtx_unlock(mtx); 1464 1465 /* Call the backend move done callback */ 1466 io->scsiio.be_move_done(io); 1467 } 1468 return; 1469 } 1470 break; 1471 } 1472 case XPT_IMMEDIATE_NOTIFY: { 1473 union ctl_io *io; 1474 struct ccb_immediate_notify *inot; 1475 cam_status status; 1476 int frozen; 1477 1478 inot = &done_ccb->cin1; 1479 1480 softc->inots_returned++; 1481 1482 frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; 1483 1484 printf("%s: got XPT_IMMEDIATE_NOTIFY status %#x tag %#x " 1485 "seq %#x\n", __func__, inot->ccb_h.status, 1486 inot->tag_id, inot->seq_id); 1487 1488 io = ctl_alloc_io(bus_softc->port.ctl_pool_ref); 1489 if (io != NULL) { 1490 int send_ctl_io; 1491 1492 send_ctl_io = 1; 1493 1494 ctl_zero_io(io); 1495 io->io_hdr.io_type = CTL_IO_TASK; 1496 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr =done_ccb; 1497 inot->ccb_h.io_ptr = io; 1498 io->io_hdr.nexus.initid.id = inot->initiator_id; 1499 io->io_hdr.nexus.targ_port = bus_softc->port.targ_port; 1500 io->io_hdr.nexus.targ_target.id = inot->ccb_h.target_id; 1501 io->io_hdr.nexus.targ_lun = inot->ccb_h.target_lun; 1502 /* XXX KDM should this be the tag_id? */ 1503 io->taskio.tag_num = inot->seq_id; 1504 1505 status = inot->ccb_h.status & CAM_STATUS_MASK; 1506 switch (status) { 1507 case CAM_SCSI_BUS_RESET: 1508 io->taskio.task_action = CTL_TASK_BUS_RESET; 1509 break; 1510 case CAM_BDR_SENT: 1511 io->taskio.task_action = CTL_TASK_TARGET_RESET; 1512 break; 1513 case CAM_MESSAGE_RECV: 1514 switch (inot->arg) { 1515 case MSG_ABORT_TASK_SET: 1516 io->taskio.task_action = 1517 CTL_TASK_ABORT_TASK_SET; 1518 break; 1519 case MSG_TARGET_RESET: 1520 io->taskio.task_action = 1521 CTL_TASK_TARGET_RESET; 1522 break; 1523 case MSG_ABORT_TASK: 1524 io->taskio.task_action = 1525 CTL_TASK_ABORT_TASK; 1526 break; 1527 case MSG_LOGICAL_UNIT_RESET: 1528 io->taskio.task_action = 1529 CTL_TASK_LUN_RESET; 1530 break; 1531 case MSG_CLEAR_TASK_SET: 1532 io->taskio.task_action = 1533 CTL_TASK_CLEAR_TASK_SET; 1534 break; 1535 case MSG_CLEAR_ACA: 1536 io->taskio.task_action = 1537 CTL_TASK_CLEAR_ACA; 1538 break; 1539 case MSG_NOOP: 1540 send_ctl_io = 0; 1541 break; 1542 default: 1543 xpt_print(periph->path, "%s: " 1544 "unsupported message 0x%x\n", 1545 __func__, inot->arg); 1546 send_ctl_io = 0; 1547 break; 1548 } 1549 break; 1550 case CAM_REQ_ABORTED: 1551 /* 1552 * This request was sent back by the driver. 1553 * XXX KDM what do we do here? 1554 */ 1555 send_ctl_io = 0; 1556 break; 1557 case CAM_REQ_INVALID: 1558 case CAM_PROVIDE_FAIL: 1559 default: 1560 /* 1561 * We should only get here if we're talking 1562 * to a talking to a SIM that is target 1563 * capable but supports the old API. In 1564 * that case, we need to just free the CCB. 1565 * If we actually send a notify acknowledge, 1566 * it will send that back with an error as 1567 * well. 1568 */ 1569 1570 if ((status != CAM_REQ_INVALID) 1571 && (status != CAM_PROVIDE_FAIL)) 1572 xpt_print(periph->path, "%s: " 1573 "unsupported CAM status " 1574 "0x%x\n", __func__, status); 1575 1576 ctl_free_io(io); 1577 ctlfe_free_ccb(periph, done_ccb); 1578 1579 goto out; 1580 } 1581 if (send_ctl_io != 0) { 1582 ctl_queue(io); 1583 } else { 1584 ctl_free_io(io); 1585 done_ccb->ccb_h.status = CAM_REQ_INPROG; 1586 done_ccb->ccb_h.func_code = 1587 XPT_NOTIFY_ACKNOWLEDGE; 1588 xpt_action(done_ccb); 1589 } 1590 } else { 1591 xpt_print(periph->path, "%s: could not allocate " 1592 "ctl_io for immediate notify!\n", __func__); 1593 /* requeue this to the adapter */ 1594 done_ccb->ccb_h.status = CAM_REQ_INPROG; 1595 done_ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; 1596 xpt_action(done_ccb); 1597 } 1598 1599 if (frozen != 0) { 1600 cam_release_devq(periph->path, 1601 /*relsim_flags*/ 0, 1602 /*opening reduction*/ 0, 1603 /*timeout*/ 0, 1604 /*getcount_only*/ 0); 1605 } 1606 break; 1607 } 1608 case XPT_NOTIFY_ACKNOWLEDGE: 1609 /* 1610 * Queue this back down to the SIM as an immediate notify. 1611 */ 1612 done_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; 1613 xpt_action(done_ccb); 1614 softc->inots_sent++; 1615 break; 1616 case XPT_SET_SIM_KNOB: 1617 case XPT_GET_SIM_KNOB: 1618 break; 1619 default: 1620 panic("%s: unexpected CCB type %#x", __func__, 1621 done_ccb->ccb_h.func_code); 1622 break; 1623 } 1624 1625 out: 1626 mtx_unlock(mtx); 1627 } 1628 1629 static void 1630 ctlfe_onoffline(void *arg, int online) 1631 { 1632 struct ctlfe_softc *bus_softc; 1633 union ccb *ccb; 1634 cam_status status; 1635 struct cam_path *path; 1636 int set_wwnn; 1637 1638 bus_softc = (struct ctlfe_softc *)arg; 1639 1640 set_wwnn = 0; 1641 1642 status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, 1643 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 1644 if (status != CAM_REQ_CMP) { 1645 printf("%s: unable to create path!\n", __func__); 1646 return; 1647 } 1648 ccb = (union ccb *)malloc(sizeof(*ccb), M_TEMP, M_NOWAIT | M_ZERO); 1649 if (ccb == NULL) { 1650 printf("%s: unable to malloc CCB!\n", __func__); 1651 xpt_free_path(path); 1652 return; 1653 } 1654 xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE); 1655 1656 /* 1657 * Copan WWN format: 1658 * 1659 * Bits 63-60: 0x5 NAA, IEEE registered name 1660 * Bits 59-36: 0x000ED5 IEEE Company name assigned to Copan 1661 * Bits 35-12: Copan SSN (Sequential Serial Number) 1662 * Bits 11-8: Type of port: 1663 * 1 == N-Port 1664 * 2 == F-Port 1665 * 3 == NL-Port 1666 * Bits 7-0: 0 == Node Name, >0 == Port Number 1667 */ 1668 1669 if (online != 0) { 1670 1671 ccb->ccb_h.func_code = XPT_GET_SIM_KNOB; 1672 1673 1674 xpt_action(ccb); 1675 1676 1677 if ((ccb->knob.xport_specific.valid & KNOB_VALID_ADDRESS) != 0){ 1678 #ifdef RANDOM_WWNN 1679 uint64_t random_bits; 1680 #endif 1681 1682 printf("%s: %s current WWNN %#jx\n", __func__, 1683 bus_softc->port_name, 1684 ccb->knob.xport_specific.fc.wwnn); 1685 printf("%s: %s current WWPN %#jx\n", __func__, 1686 bus_softc->port_name, 1687 ccb->knob.xport_specific.fc.wwpn); 1688 1689 #ifdef RANDOM_WWNN 1690 arc4rand(&random_bits, sizeof(random_bits), 0); 1691 #endif 1692 1693 /* 1694 * XXX KDM this is a bit of a kludge for now. We 1695 * take the current WWNN/WWPN from the card, and 1696 * replace the company identifier and the NL-Port 1697 * indicator and the port number (for the WWPN). 1698 * This should be replaced later with ddb_GetWWNN, 1699 * or possibly a more centralized scheme. (It 1700 * would be nice to have the WWNN/WWPN for each 1701 * port stored in the ctl_port structure.) 1702 */ 1703 #ifdef RANDOM_WWNN 1704 ccb->knob.xport_specific.fc.wwnn = 1705 (random_bits & 1706 0x0000000fffffff00ULL) | 1707 /* Company ID */ 0x5000ED5000000000ULL | 1708 /* NL-Port */ 0x0300; 1709 ccb->knob.xport_specific.fc.wwpn = 1710 (random_bits & 1711 0x0000000fffffff00ULL) | 1712 /* Company ID */ 0x5000ED5000000000ULL | 1713 /* NL-Port */ 0x3000 | 1714 /* Port Num */ (bus_softc->port.targ_port & 0xff); 1715 1716 /* 1717 * This is a bit of an API break/reversal, but if 1718 * we're doing the random WWNN that's a little 1719 * different anyway. So record what we're actually 1720 * using with the frontend code so it's reported 1721 * accurately. 1722 */ 1723 ctl_port_set_wwns(&bus_softc->port, 1724 true, ccb->knob.xport_specific.fc.wwnn, 1725 true, ccb->knob.xport_specific.fc.wwpn); 1726 set_wwnn = 1; 1727 #else /* RANDOM_WWNN */ 1728 /* 1729 * If the user has specified a WWNN/WWPN, send them 1730 * down to the SIM. Otherwise, record what the SIM 1731 * has reported. 1732 */ 1733 if ((bus_softc->port.wwnn != 0) 1734 && (bus_softc->port.wwpn != 0)) { 1735 ccb->knob.xport_specific.fc.wwnn = 1736 bus_softc->port.wwnn; 1737 ccb->knob.xport_specific.fc.wwpn = 1738 bus_softc->port.wwpn; 1739 set_wwnn = 1; 1740 } else { 1741 ctl_port_set_wwns(&bus_softc->port, 1742 true, ccb->knob.xport_specific.fc.wwnn, 1743 true, ccb->knob.xport_specific.fc.wwpn); 1744 } 1745 #endif /* RANDOM_WWNN */ 1746 1747 1748 if (set_wwnn != 0) { 1749 printf("%s: %s new WWNN %#jx\n", __func__, 1750 bus_softc->port_name, 1751 ccb->knob.xport_specific.fc.wwnn); 1752 printf("%s: %s new WWPN %#jx\n", __func__, 1753 bus_softc->port_name, 1754 ccb->knob.xport_specific.fc.wwpn); 1755 } 1756 } else { 1757 printf("%s: %s has no valid WWNN/WWPN\n", __func__, 1758 bus_softc->port_name); 1759 } 1760 } 1761 ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; 1762 ccb->knob.xport_specific.valid = KNOB_VALID_ROLE; 1763 if (set_wwnn != 0) 1764 ccb->knob.xport_specific.valid |= KNOB_VALID_ADDRESS; 1765 1766 if (online != 0) 1767 ccb->knob.xport_specific.fc.role = KNOB_ROLE_TARGET; 1768 else 1769 ccb->knob.xport_specific.fc.role = KNOB_ROLE_NONE; 1770 1771 xpt_action(ccb); 1772 1773 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1774 printf("%s: SIM %s (path id %d) target %s failed with " 1775 "status %#x\n", 1776 __func__, bus_softc->port_name, bus_softc->path_id, 1777 (online != 0) ? "enable" : "disable", 1778 ccb->ccb_h.status); 1779 } else { 1780 printf("%s: SIM %s (path id %d) target %s succeeded\n", 1781 __func__, bus_softc->port_name, bus_softc->path_id, 1782 (online != 0) ? "enable" : "disable"); 1783 } 1784 1785 xpt_free_path(path); 1786 1787 free(ccb, M_TEMP); 1788 1789 return; 1790 } 1791 1792 static void 1793 ctlfe_online(void *arg) 1794 { 1795 struct ctlfe_softc *bus_softc; 1796 struct cam_path *path; 1797 cam_status status; 1798 struct ctlfe_lun_softc *lun_softc; 1799 struct cam_periph *periph; 1800 1801 bus_softc = (struct ctlfe_softc *)arg; 1802 1803 /* 1804 * Create the wildcard LUN before bringing the port online. 1805 */ 1806 status = xpt_create_path(&path, /*periph*/ NULL, 1807 bus_softc->path_id, CAM_TARGET_WILDCARD, 1808 CAM_LUN_WILDCARD); 1809 if (status != CAM_REQ_CMP) { 1810 printf("%s: unable to create path for wildcard periph\n", 1811 __func__); 1812 return; 1813 } 1814 1815 lun_softc = malloc(sizeof(*lun_softc), M_CTLFE, 1816 M_NOWAIT | M_ZERO); 1817 if (lun_softc == NULL) { 1818 xpt_print(path, "%s: unable to allocate softc for " 1819 "wildcard periph\n", __func__); 1820 xpt_free_path(path); 1821 return; 1822 } 1823 1824 xpt_path_lock(path); 1825 periph = cam_periph_find(path, "ctl"); 1826 if (periph != NULL) { 1827 /* We've already got a periph, no need to alloc a new one. */ 1828 xpt_path_unlock(path); 1829 xpt_free_path(path); 1830 free(lun_softc, M_CTLFE); 1831 return; 1832 } 1833 lun_softc->parent_softc = bus_softc; 1834 lun_softc->flags |= CTLFE_LUN_WILDCARD; 1835 1836 status = cam_periph_alloc(ctlferegister, 1837 ctlfeoninvalidate, 1838 ctlfecleanup, 1839 ctlfestart, 1840 "ctl", 1841 CAM_PERIPH_BIO, 1842 path, 1843 ctlfeasync, 1844 0, 1845 lun_softc); 1846 1847 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1848 const struct cam_status_entry *entry; 1849 1850 entry = cam_fetch_status_entry(status); 1851 printf("%s: CAM error %s (%#x) returned from " 1852 "cam_periph_alloc()\n", __func__, (entry != NULL) ? 1853 entry->status_text : "Unknown", status); 1854 free(lun_softc, M_CTLFE); 1855 } else { 1856 mtx_lock(&bus_softc->lun_softc_mtx); 1857 STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, lun_softc, links); 1858 mtx_unlock(&bus_softc->lun_softc_mtx); 1859 ctlfe_onoffline(arg, /*online*/ 1); 1860 } 1861 1862 xpt_path_unlock(path); 1863 xpt_free_path(path); 1864 } 1865 1866 static void 1867 ctlfe_offline(void *arg) 1868 { 1869 struct ctlfe_softc *bus_softc; 1870 struct cam_path *path; 1871 cam_status status; 1872 struct cam_periph *periph; 1873 1874 bus_softc = (struct ctlfe_softc *)arg; 1875 1876 /* 1877 * Disable the wildcard LUN for this port now that we have taken 1878 * the port offline. 1879 */ 1880 status = xpt_create_path(&path, /*periph*/ NULL, 1881 bus_softc->path_id, CAM_TARGET_WILDCARD, 1882 CAM_LUN_WILDCARD); 1883 if (status != CAM_REQ_CMP) { 1884 printf("%s: unable to create path for wildcard periph\n", 1885 __func__); 1886 return; 1887 } 1888 1889 xpt_path_lock(path); 1890 1891 ctlfe_onoffline(arg, /*online*/ 0); 1892 1893 if ((periph = cam_periph_find(path, "ctl")) != NULL) 1894 cam_periph_invalidate(periph); 1895 1896 xpt_path_unlock(path); 1897 xpt_free_path(path); 1898 } 1899 1900 /* 1901 * This will get called to enable a LUN on every bus that is attached to 1902 * CTL. So we only need to create a path/periph for this particular bus. 1903 */ 1904 static int 1905 ctlfe_lun_enable(void *arg, struct ctl_id targ_id, int lun_id) 1906 { 1907 struct ctlfe_softc *bus_softc; 1908 struct ctlfe_lun_softc *softc; 1909 struct cam_path *path; 1910 struct cam_periph *periph; 1911 cam_status status; 1912 1913 bus_softc = (struct ctlfe_softc *)arg; 1914 1915 status = xpt_create_path(&path, /*periph*/ NULL, 1916 bus_softc->path_id, 1917 targ_id.id, lun_id); 1918 /* XXX KDM need some way to return status to CTL here? */ 1919 if (status != CAM_REQ_CMP) { 1920 printf("%s: could not create path, status %#x\n", __func__, 1921 status); 1922 return (1); 1923 } 1924 1925 softc = malloc(sizeof(*softc), M_CTLFE, M_WAITOK | M_ZERO); 1926 xpt_path_lock(path); 1927 periph = cam_periph_find(path, "ctl"); 1928 if (periph != NULL) { 1929 /* We've already got a periph, no need to alloc a new one. */ 1930 xpt_path_unlock(path); 1931 xpt_free_path(path); 1932 free(softc, M_CTLFE); 1933 return (0); 1934 } 1935 softc->parent_softc = bus_softc; 1936 1937 status = cam_periph_alloc(ctlferegister, 1938 ctlfeoninvalidate, 1939 ctlfecleanup, 1940 ctlfestart, 1941 "ctl", 1942 CAM_PERIPH_BIO, 1943 path, 1944 ctlfeasync, 1945 0, 1946 softc); 1947 1948 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1949 const struct cam_status_entry *entry; 1950 1951 entry = cam_fetch_status_entry(status); 1952 printf("%s: CAM error %s (%#x) returned from " 1953 "cam_periph_alloc()\n", __func__, (entry != NULL) ? 1954 entry->status_text : "Unknown", status); 1955 free(softc, M_CTLFE); 1956 } else { 1957 mtx_lock(&bus_softc->lun_softc_mtx); 1958 STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, softc, links); 1959 mtx_unlock(&bus_softc->lun_softc_mtx); 1960 } 1961 1962 xpt_path_unlock(path); 1963 xpt_free_path(path); 1964 return (0); 1965 } 1966 1967 /* 1968 * This will get called when the user removes a LUN to disable that LUN 1969 * on every bus that is attached to CTL. 1970 */ 1971 static int 1972 ctlfe_lun_disable(void *arg, struct ctl_id targ_id, int lun_id) 1973 { 1974 struct ctlfe_softc *softc; 1975 struct ctlfe_lun_softc *lun_softc; 1976 1977 softc = (struct ctlfe_softc *)arg; 1978 1979 mtx_lock(&softc->lun_softc_mtx); 1980 STAILQ_FOREACH(lun_softc, &softc->lun_softc_list, links) { 1981 struct cam_path *path; 1982 1983 path = lun_softc->periph->path; 1984 1985 if ((xpt_path_target_id(path) == targ_id.id) 1986 && (xpt_path_lun_id(path) == lun_id)) { 1987 break; 1988 } 1989 } 1990 if (lun_softc == NULL) { 1991 mtx_unlock(&softc->lun_softc_mtx); 1992 printf("%s: can't find target %d lun %d\n", __func__, 1993 targ_id.id, lun_id); 1994 return (1); 1995 } 1996 cam_periph_acquire(lun_softc->periph); 1997 mtx_unlock(&softc->lun_softc_mtx); 1998 1999 cam_periph_lock(lun_softc->periph); 2000 cam_periph_invalidate(lun_softc->periph); 2001 cam_periph_unlock(lun_softc->periph); 2002 cam_periph_release(lun_softc->periph); 2003 return (0); 2004 } 2005 2006 static void 2007 ctlfe_dump_sim(struct cam_sim *sim) 2008 { 2009 2010 printf("%s%d: max tagged openings: %d, max dev openings: %d\n", 2011 sim->sim_name, sim->unit_number, 2012 sim->max_tagged_dev_openings, sim->max_dev_openings); 2013 printf("\n"); 2014 } 2015 2016 /* 2017 * Assumes that the SIM lock is held. 2018 */ 2019 static void 2020 ctlfe_dump_queue(struct ctlfe_lun_softc *softc) 2021 { 2022 struct ccb_hdr *hdr; 2023 struct cam_periph *periph; 2024 int num_items; 2025 2026 periph = softc->periph; 2027 num_items = 0; 2028 2029 TAILQ_FOREACH(hdr, &softc->work_queue, periph_links.tqe) { 2030 union ctl_io *io; 2031 2032 io = hdr->io_ptr; 2033 2034 num_items++; 2035 2036 /* 2037 * This can happen when we get an ATIO but can't allocate 2038 * a ctl_io. See the XPT_ACCEPT_TARGET_IO case in ctlfedone(). 2039 */ 2040 if (io == NULL) { 2041 struct ccb_scsiio *csio; 2042 2043 csio = (struct ccb_scsiio *)hdr; 2044 2045 xpt_print(periph->path, "CCB %#x ctl_io allocation " 2046 "failed\n", csio->tag_id); 2047 continue; 2048 } 2049 2050 /* 2051 * Only regular SCSI I/O is put on the work 2052 * queue, so we can print sense here. There may be no 2053 * sense if it's no the queue for a DMA, but this serves to 2054 * print out the CCB as well. 2055 * 2056 * XXX KDM switch this over to scsi_sense_print() when 2057 * CTL is merged in with CAM. 2058 */ 2059 ctl_io_error_print(io, NULL); 2060 2061 /* 2062 * We're sending status back to the 2063 * initiator, so we're on the queue waiting 2064 * for a CTIO to do that. 2065 */ 2066 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) 2067 continue; 2068 2069 /* 2070 * Otherwise, we're on the queue waiting to 2071 * do a data transfer. 2072 */ 2073 xpt_print(periph->path, "Total %u, Current %u, Resid %u\n", 2074 io->scsiio.kern_total_len, io->scsiio.kern_data_len, 2075 io->scsiio.kern_data_resid); 2076 } 2077 2078 xpt_print(periph->path, "%d requests total waiting for CCBs\n", 2079 num_items); 2080 xpt_print(periph->path, "%ju CCBs outstanding (%ju allocated, %ju " 2081 "freed)\n", (uintmax_t)(softc->ccbs_alloced - 2082 softc->ccbs_freed), (uintmax_t)softc->ccbs_alloced, 2083 (uintmax_t)softc->ccbs_freed); 2084 xpt_print(periph->path, "%ju CTIOs outstanding (%ju sent, %ju " 2085 "returned\n", (uintmax_t)(softc->ctios_sent - 2086 softc->ctios_returned), softc->ctios_sent, 2087 softc->ctios_returned); 2088 } 2089 2090 /* 2091 * This function is called when we fail to get a CCB for a DMA or status return 2092 * to the initiator within the specified time period. 2093 * 2094 * The callout code should insure that we hold the sim mutex here. 2095 */ 2096 static void 2097 ctlfe_dma_timeout(void *arg) 2098 { 2099 struct ctlfe_lun_softc *softc; 2100 struct cam_periph *periph; 2101 struct cam_sim *sim; 2102 int num_queued; 2103 2104 softc = (struct ctlfe_lun_softc *)arg; 2105 periph = softc->periph; 2106 sim = xpt_path_sim(periph->path); 2107 num_queued = 0; 2108 2109 /* 2110 * Nothing to do... 2111 */ 2112 if (TAILQ_FIRST(&softc->work_queue) == NULL) { 2113 xpt_print(periph->path, "TIMEOUT triggered after %d " 2114 "seconds, but nothing on work queue??\n", 2115 CTLFE_DMA_TIMEOUT); 2116 return; 2117 } 2118 2119 xpt_print(periph->path, "TIMEOUT (%d seconds) waiting for DMA to " 2120 "start\n", CTLFE_DMA_TIMEOUT); 2121 2122 ctlfe_dump_queue(softc); 2123 2124 ctlfe_dump_sim(sim); 2125 2126 xpt_print(periph->path, "calling xpt_schedule() to attempt to " 2127 "unstick our queue\n"); 2128 2129 xpt_schedule(periph, /*priority*/ 1); 2130 2131 xpt_print(periph->path, "xpt_schedule() call complete\n"); 2132 } 2133 2134 /* 2135 * Datamove/done routine called by CTL. Put ourselves on the queue to 2136 * receive a CCB from CAM so we can queue the continue I/O request down 2137 * to the adapter. 2138 */ 2139 static void 2140 ctlfe_datamove_done(union ctl_io *io) 2141 { 2142 union ccb *ccb; 2143 struct cam_periph *periph; 2144 struct ctlfe_lun_softc *softc; 2145 2146 ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 2147 2148 periph = xpt_path_periph(ccb->ccb_h.path); 2149 cam_periph_lock(periph); 2150 2151 softc = (struct ctlfe_lun_softc *)periph->softc; 2152 2153 if (io->io_hdr.io_type == CTL_IO_TASK) { 2154 /* 2155 * Task management commands don't require any further 2156 * communication back to the adapter. Requeue the CCB 2157 * to the adapter, and free the CTL I/O. 2158 */ 2159 xpt_print(ccb->ccb_h.path, "%s: returning task I/O " 2160 "tag %#x seq %#x\n", __func__, 2161 ccb->cin1.tag_id, ccb->cin1.seq_id); 2162 /* 2163 * Send the notify acknowledge down to the SIM, to let it 2164 * know we processed the task management command. 2165 */ 2166 ccb->ccb_h.status = CAM_REQ_INPROG; 2167 ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; 2168 xpt_action(ccb); 2169 ctl_free_io(io); 2170 } else { 2171 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) 2172 io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED; 2173 else 2174 io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED; 2175 2176 TAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h, 2177 periph_links.tqe); 2178 2179 /* 2180 * Reset the timeout for our latest active DMA. 2181 */ 2182 callout_reset(&softc->dma_callout, 2183 CTLFE_DMA_TIMEOUT * hz, 2184 ctlfe_dma_timeout, softc); 2185 /* 2186 * Ask for the CAM transport layer to send us a CCB to do 2187 * the DMA or send status, unless ctlfe_dma_enabled is set 2188 * to 0. 2189 */ 2190 if (ctlfe_dma_enabled != 0) 2191 xpt_schedule(periph, /*priority*/ 1); 2192 } 2193 2194 cam_periph_unlock(periph); 2195 } 2196 2197 static void 2198 ctlfe_dump(void) 2199 { 2200 struct ctlfe_softc *bus_softc; 2201 2202 STAILQ_FOREACH(bus_softc, &ctlfe_softc_list, links) { 2203 struct ctlfe_lun_softc *lun_softc; 2204 2205 ctlfe_dump_sim(bus_softc->sim); 2206 2207 STAILQ_FOREACH(lun_softc, &bus_softc->lun_softc_list, links) { 2208 ctlfe_dump_queue(lun_softc); 2209 } 2210 } 2211 } 2212