1 /*- 2 * Copyright (c) 2008, 2009 Silicon Graphics International Corp. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * substantially similar to the "NO WARRANTY" disclaimer below 13 * ("Disclaimer") and any redistribution must be conditioned upon 14 * including a substantially similar Disclaimer requirement for further 15 * binary redistribution. 16 * 17 * NO WARRANTY 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGES. 29 * 30 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/scsi_ctl.c#4 $ 31 */ 32 /* 33 * Peripheral driver interface between CAM and CTL (CAM Target Layer). 34 * 35 * Author: Ken Merry <ken@FreeBSD.org> 36 */ 37 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 #include <sys/param.h> 42 #include <sys/queue.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/condvar.h> 48 #include <sys/malloc.h> 49 #include <sys/bus.h> 50 #include <sys/endian.h> 51 #include <sys/sbuf.h> 52 #include <sys/sysctl.h> 53 #include <sys/types.h> 54 #include <sys/systm.h> 55 #include <machine/bus.h> 56 57 #include <cam/cam.h> 58 #include <cam/cam_ccb.h> 59 #include <cam/cam_periph.h> 60 #include <cam/cam_queue.h> 61 #include <cam/cam_xpt_periph.h> 62 #include <cam/cam_debug.h> 63 #include <cam/cam_sim.h> 64 #include <cam/cam_xpt.h> 65 66 #include <cam/scsi/scsi_all.h> 67 #include <cam/scsi/scsi_message.h> 68 69 #include <cam/ctl/ctl_io.h> 70 #include <cam/ctl/ctl.h> 71 #include <cam/ctl/ctl_frontend.h> 72 #include <cam/ctl/ctl_util.h> 73 #include <cam/ctl/ctl_error.h> 74 75 struct ctlfe_softc { 76 struct ctl_port port; 77 path_id_t path_id; 78 u_int maxio; 79 struct cam_sim *sim; 80 char port_name[DEV_IDLEN]; 81 struct mtx lun_softc_mtx; 82 STAILQ_HEAD(, ctlfe_lun_softc) lun_softc_list; 83 STAILQ_ENTRY(ctlfe_softc) links; 84 }; 85 86 STAILQ_HEAD(, ctlfe_softc) ctlfe_softc_list; 87 struct mtx ctlfe_list_mtx; 88 static char ctlfe_mtx_desc[] = "ctlfelist"; 89 #ifdef CTLFE_INIT_ENABLE 90 static int ctlfe_max_targets = 1; 91 static int ctlfe_num_targets = 0; 92 #endif 93 94 typedef enum { 95 CTLFE_LUN_NONE = 0x00, 96 CTLFE_LUN_WILDCARD = 0x01 97 } ctlfe_lun_flags; 98 99 struct ctlfe_lun_softc { 100 struct ctlfe_softc *parent_softc; 101 struct cam_periph *periph; 102 ctlfe_lun_flags flags; 103 uint64_t ccbs_alloced; 104 uint64_t ccbs_freed; 105 uint64_t ctios_sent; 106 uint64_t ctios_returned; 107 uint64_t atios_sent; 108 uint64_t atios_returned; 109 uint64_t inots_sent; 110 uint64_t inots_returned; 111 /* bus_dma_tag_t dma_tag; */ 112 TAILQ_HEAD(, ccb_hdr) work_queue; 113 STAILQ_ENTRY(ctlfe_lun_softc) links; 114 }; 115 116 typedef enum { 117 CTLFE_CMD_NONE = 0x00, 118 CTLFE_CMD_PIECEWISE = 0x01 119 } ctlfe_cmd_flags; 120 121 /* 122 * The size limit of this structure is CTL_PORT_PRIV_SIZE, from ctl_io.h. 123 * Currently that is 600 bytes. 124 */ 125 struct ctlfe_lun_cmd_info { 126 int cur_transfer_index; 127 size_t cur_transfer_off; 128 ctlfe_cmd_flags flags; 129 /* 130 * XXX KDM struct bus_dma_segment is 8 bytes on i386, and 16 131 * bytes on amd64. So with 32 elements, this is 256 bytes on 132 * i386 and 512 bytes on amd64. 133 */ 134 #define CTLFE_MAX_SEGS 32 135 bus_dma_segment_t cam_sglist[CTLFE_MAX_SEGS]; 136 }; 137 CTASSERT(sizeof(struct ctlfe_lun_cmd_info) <= CTL_PORT_PRIV_SIZE); 138 139 /* 140 * When we register the adapter/bus, request that this many ctl_ios be 141 * allocated. This should be the maximum supported by the adapter, but we 142 * currently don't have a way to get that back from the path inquiry. 143 * XXX KDM add that to the path inquiry. 144 */ 145 #define CTLFE_REQ_CTL_IO 4096 146 /* 147 * Number of Accept Target I/O CCBs to allocate and queue down to the 148 * adapter per LUN. 149 * XXX KDM should this be controlled by CTL? 150 */ 151 #define CTLFE_ATIO_PER_LUN 1024 152 /* 153 * Number of Immediate Notify CCBs (used for aborts, resets, etc.) to 154 * allocate and queue down to the adapter per LUN. 155 * XXX KDM should this be controlled by CTL? 156 */ 157 #define CTLFE_IN_PER_LUN 1024 158 159 /* 160 * Timeout (in seconds) on CTIO CCB allocation for doing a DMA or sending 161 * status to the initiator. The SIM is expected to have its own timeouts, 162 * so we're not putting this timeout around the CCB execution time. The 163 * SIM should timeout and let us know if it has an issue. 164 */ 165 #define CTLFE_DMA_TIMEOUT 60 166 167 /* 168 * Turn this on to enable extra debugging prints. 169 */ 170 #if 0 171 #define CTLFE_DEBUG 172 #endif 173 174 /* 175 * Use randomly assigned WWNN/WWPN values. This is to work around an issue 176 * in the FreeBSD initiator that makes it unable to rescan the target if 177 * the target gets rebooted and the WWNN/WWPN stay the same. 178 */ 179 #if 0 180 #define RANDOM_WWNN 181 #endif 182 183 MALLOC_DEFINE(M_CTLFE, "CAM CTL FE", "CAM CTL FE interface"); 184 185 #define io_ptr ppriv_ptr0 186 187 /* This is only used in the CTIO */ 188 #define ccb_atio ppriv_ptr1 189 190 int ctlfeinitialize(void); 191 void ctlfeshutdown(void); 192 static periph_init_t ctlfeperiphinit; 193 static void ctlfeasync(void *callback_arg, uint32_t code, 194 struct cam_path *path, void *arg); 195 static periph_ctor_t ctlferegister; 196 static periph_oninv_t ctlfeoninvalidate; 197 static periph_dtor_t ctlfecleanup; 198 static periph_start_t ctlfestart; 199 static void ctlfedone(struct cam_periph *periph, 200 union ccb *done_ccb); 201 202 static void ctlfe_onoffline(void *arg, int online); 203 static void ctlfe_online(void *arg); 204 static void ctlfe_offline(void *arg); 205 static int ctlfe_lun_enable(void *arg, struct ctl_id targ_id, 206 int lun_id); 207 static int ctlfe_lun_disable(void *arg, struct ctl_id targ_id, 208 int lun_id); 209 static void ctlfe_dump_sim(struct cam_sim *sim); 210 static void ctlfe_dump_queue(struct ctlfe_lun_softc *softc); 211 static void ctlfe_datamove(union ctl_io *io); 212 static void ctlfe_done(union ctl_io *io); 213 static void ctlfe_dump(void); 214 215 static struct periph_driver ctlfe_driver = 216 { 217 ctlfeperiphinit, "ctl", 218 TAILQ_HEAD_INITIALIZER(ctlfe_driver.units), /*generation*/ 0, 219 CAM_PERIPH_DRV_EARLY 220 }; 221 222 static struct ctl_frontend ctlfe_frontend = 223 { 224 .name = "camtgt", 225 .init = ctlfeinitialize, 226 .fe_dump = ctlfe_dump, 227 .shutdown = ctlfeshutdown, 228 }; 229 CTL_FRONTEND_DECLARE(ctlfe, ctlfe_frontend); 230 231 extern struct ctl_softc *control_softc; 232 233 void 234 ctlfeshutdown(void) 235 { 236 return; 237 } 238 239 int 240 ctlfeinitialize(void) 241 { 242 243 STAILQ_INIT(&ctlfe_softc_list); 244 mtx_init(&ctlfe_list_mtx, ctlfe_mtx_desc, NULL, MTX_DEF); 245 periphdriver_register(&ctlfe_driver); 246 return (0); 247 } 248 249 void 250 ctlfeperiphinit(void) 251 { 252 cam_status status; 253 254 status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED | 255 AC_CONTRACT, ctlfeasync, NULL, NULL); 256 if (status != CAM_REQ_CMP) { 257 printf("ctl: Failed to attach async callback due to CAM " 258 "status 0x%x!\n", status); 259 } 260 } 261 262 static void 263 ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) 264 { 265 struct ctlfe_softc *softc; 266 267 #ifdef CTLFEDEBUG 268 printf("%s: entered\n", __func__); 269 #endif 270 271 mtx_lock(&ctlfe_list_mtx); 272 STAILQ_FOREACH(softc, &ctlfe_softc_list, links) { 273 if (softc->path_id == xpt_path_path_id(path)) 274 break; 275 } 276 mtx_unlock(&ctlfe_list_mtx); 277 278 /* 279 * When a new path gets registered, and it is capable of target 280 * mode, go ahead and attach. Later on, we may need to be more 281 * selective, but for now this will be sufficient. 282 */ 283 switch (code) { 284 case AC_PATH_REGISTERED: { 285 struct ctl_port *port; 286 struct ccb_pathinq *cpi; 287 int retval; 288 289 cpi = (struct ccb_pathinq *)arg; 290 291 /* Don't attach if it doesn't support target mode */ 292 if ((cpi->target_sprt & PIT_PROCESSOR) == 0) { 293 #ifdef CTLFEDEBUG 294 printf("%s: SIM %s%d doesn't support target mode\n", 295 __func__, cpi->dev_name, cpi->unit_number); 296 #endif 297 break; 298 } 299 300 if (softc != NULL) { 301 #ifdef CTLFEDEBUG 302 printf("%s: CTL port for CAM path %u already exists\n", 303 __func__, xpt_path_path_id(path)); 304 #endif 305 break; 306 } 307 308 #ifdef CTLFE_INIT_ENABLE 309 if (ctlfe_num_targets >= ctlfe_max_targets) { 310 union ccb *ccb; 311 312 ccb = (union ccb *)malloc(sizeof(*ccb), M_TEMP, 313 M_NOWAIT | M_ZERO); 314 if (ccb == NULL) { 315 printf("%s: unable to malloc CCB!\n", __func__); 316 return; 317 } 318 xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE); 319 320 ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; 321 ccb->knob.xport_specific.valid = KNOB_VALID_ROLE; 322 ccb->knob.xport_specific.fc.role = KNOB_ROLE_INITIATOR; 323 324 xpt_action(ccb); 325 326 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != 327 CAM_REQ_CMP) { 328 printf("%s: SIM %s%d (path id %d) initiator " 329 "enable failed with status %#x\n", 330 __func__, cpi->dev_name, 331 cpi->unit_number, cpi->ccb_h.path_id, 332 ccb->ccb_h.status); 333 } else { 334 printf("%s: SIM %s%d (path id %d) initiator " 335 "enable succeeded\n", 336 __func__, cpi->dev_name, 337 cpi->unit_number, cpi->ccb_h.path_id); 338 } 339 340 free(ccb, M_TEMP); 341 342 break; 343 } else { 344 ctlfe_num_targets++; 345 } 346 347 printf("%s: ctlfe_num_targets = %d\n", __func__, 348 ctlfe_num_targets); 349 #endif /* CTLFE_INIT_ENABLE */ 350 351 /* 352 * We're in an interrupt context here, so we have to 353 * use M_NOWAIT. Of course this means trouble if we 354 * can't allocate memory. 355 */ 356 softc = malloc(sizeof(*softc), M_CTLFE, M_NOWAIT | M_ZERO); 357 if (softc == NULL) { 358 printf("%s: unable to malloc %zd bytes for softc\n", 359 __func__, sizeof(*softc)); 360 return; 361 } 362 363 softc->path_id = cpi->ccb_h.path_id; 364 softc->sim = xpt_path_sim(path); 365 if (cpi->maxio != 0) 366 softc->maxio = cpi->maxio; 367 else 368 softc->maxio = DFLTPHYS; 369 mtx_init(&softc->lun_softc_mtx, "LUN softc mtx", NULL, MTX_DEF); 370 STAILQ_INIT(&softc->lun_softc_list); 371 372 port = &softc->port; 373 port->frontend = &ctlfe_frontend; 374 375 /* 376 * XXX KDM should we be more accurate here ? 377 */ 378 if (cpi->transport == XPORT_FC) 379 port->port_type = CTL_PORT_FC; 380 else if (cpi->transport == XPORT_SAS) 381 port->port_type = CTL_PORT_SAS; 382 else 383 port->port_type = CTL_PORT_SCSI; 384 385 /* XXX KDM what should the real number be here? */ 386 port->num_requested_ctl_io = 4096; 387 snprintf(softc->port_name, sizeof(softc->port_name), 388 "%s%d", cpi->dev_name, cpi->unit_number); 389 /* 390 * XXX KDM it would be nice to allocate storage in the 391 * frontend structure itself. 392 */ 393 port->port_name = softc->port_name; 394 port->physical_port = cpi->bus_id; 395 port->virtual_port = 0; 396 port->port_online = ctlfe_online; 397 port->port_offline = ctlfe_offline; 398 port->onoff_arg = softc; 399 port->lun_enable = ctlfe_lun_enable; 400 port->lun_disable = ctlfe_lun_disable; 401 port->targ_lun_arg = softc; 402 port->fe_datamove = ctlfe_datamove; 403 port->fe_done = ctlfe_done; 404 /* 405 * XXX KDM the path inquiry doesn't give us the maximum 406 * number of targets supported. 407 */ 408 port->max_targets = cpi->max_target; 409 port->max_target_id = cpi->max_target; 410 411 /* 412 * XXX KDM need to figure out whether we're the master or 413 * slave. 414 */ 415 #ifdef CTLFEDEBUG 416 printf("%s: calling ctl_port_register() for %s%d\n", 417 __func__, cpi->dev_name, cpi->unit_number); 418 #endif 419 retval = ctl_port_register(port); 420 if (retval != 0) { 421 printf("%s: ctl_port_register() failed with " 422 "error %d!\n", __func__, retval); 423 mtx_destroy(&softc->lun_softc_mtx); 424 free(softc, M_CTLFE); 425 break; 426 } else { 427 mtx_lock(&ctlfe_list_mtx); 428 STAILQ_INSERT_TAIL(&ctlfe_softc_list, softc, links); 429 mtx_unlock(&ctlfe_list_mtx); 430 } 431 432 break; 433 } 434 case AC_PATH_DEREGISTERED: { 435 436 if (softc != NULL) { 437 /* 438 * XXX KDM are we certain at this point that there 439 * are no outstanding commands for this frontend? 440 */ 441 mtx_lock(&ctlfe_list_mtx); 442 STAILQ_REMOVE(&ctlfe_softc_list, softc, ctlfe_softc, 443 links); 444 mtx_unlock(&ctlfe_list_mtx); 445 ctl_port_deregister(&softc->port); 446 mtx_destroy(&softc->lun_softc_mtx); 447 free(softc, M_CTLFE); 448 } 449 break; 450 } 451 case AC_CONTRACT: { 452 struct ac_contract *ac; 453 454 ac = (struct ac_contract *)arg; 455 456 switch (ac->contract_number) { 457 case AC_CONTRACT_DEV_CHG: { 458 struct ac_device_changed *dev_chg; 459 int retval; 460 461 dev_chg = (struct ac_device_changed *)ac->contract_data; 462 463 printf("%s: WWPN %#jx port 0x%06x path %u target %u %s\n", 464 __func__, dev_chg->wwpn, dev_chg->port, 465 xpt_path_path_id(path), dev_chg->target, 466 (dev_chg->arrived == 0) ? "left" : "arrived"); 467 468 if (softc == NULL) { 469 printf("%s: CTL port for CAM path %u not " 470 "found!\n", __func__, 471 xpt_path_path_id(path)); 472 break; 473 } 474 if (dev_chg->arrived != 0) { 475 retval = ctl_add_initiator(&softc->port, 476 dev_chg->target, dev_chg->wwpn, NULL); 477 } else { 478 retval = ctl_remove_initiator(&softc->port, 479 dev_chg->target); 480 } 481 482 if (retval < 0) { 483 printf("%s: could not %s port %d iid %u " 484 "WWPN %#jx!\n", __func__, 485 (dev_chg->arrived != 0) ? "add" : 486 "remove", softc->port.targ_port, 487 dev_chg->target, 488 (uintmax_t)dev_chg->wwpn); 489 } 490 break; 491 } 492 default: 493 printf("%s: unsupported contract number %ju\n", 494 __func__, (uintmax_t)ac->contract_number); 495 break; 496 } 497 break; 498 } 499 default: 500 break; 501 } 502 } 503 504 static cam_status 505 ctlferegister(struct cam_periph *periph, void *arg) 506 { 507 struct ctlfe_softc *bus_softc; 508 struct ctlfe_lun_softc *softc; 509 union ccb en_lun_ccb; 510 cam_status status; 511 int i; 512 513 softc = (struct ctlfe_lun_softc *)arg; 514 bus_softc = softc->parent_softc; 515 516 TAILQ_INIT(&softc->work_queue); 517 softc->periph = periph; 518 periph->softc = softc; 519 520 xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); 521 en_lun_ccb.ccb_h.func_code = XPT_EN_LUN; 522 en_lun_ccb.cel.grp6_len = 0; 523 en_lun_ccb.cel.grp7_len = 0; 524 en_lun_ccb.cel.enable = 1; 525 xpt_action(&en_lun_ccb); 526 status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK); 527 if (status != CAM_REQ_CMP) { 528 xpt_print(periph->path, "%s: Enable LUN failed, status 0x%x\n", 529 __func__, en_lun_ccb.ccb_h.status); 530 return (status); 531 } 532 533 status = CAM_REQ_CMP; 534 535 for (i = 0; i < CTLFE_ATIO_PER_LUN; i++) { 536 union ccb *new_ccb; 537 union ctl_io *new_io; 538 539 new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, 540 M_ZERO|M_NOWAIT); 541 if (new_ccb == NULL) { 542 status = CAM_RESRC_UNAVAIL; 543 break; 544 } 545 new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref); 546 if (new_io == NULL) { 547 free(new_ccb, M_CTLFE); 548 status = CAM_RESRC_UNAVAIL; 549 break; 550 } 551 new_ccb->ccb_h.io_ptr = new_io; 552 553 xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1); 554 new_ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO; 555 new_ccb->ccb_h.cbfcnp = ctlfedone; 556 new_ccb->ccb_h.flags |= CAM_UNLOCKED; 557 xpt_action(new_ccb); 558 softc->atios_sent++; 559 status = new_ccb->ccb_h.status; 560 if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 561 ctl_free_io(new_io); 562 free(new_ccb, M_CTLFE); 563 break; 564 } 565 } 566 567 status = cam_periph_acquire(periph); 568 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 569 xpt_print(periph->path, "%s: could not acquire reference " 570 "count, status = %#x\n", __func__, status); 571 return (status); 572 } 573 574 if (i == 0) { 575 xpt_print(periph->path, "%s: could not allocate ATIO CCBs, " 576 "status 0x%x\n", __func__, status); 577 return (CAM_REQ_CMP_ERR); 578 } 579 580 for (i = 0; i < CTLFE_IN_PER_LUN; i++) { 581 union ccb *new_ccb; 582 union ctl_io *new_io; 583 584 new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, 585 M_ZERO|M_NOWAIT); 586 if (new_ccb == NULL) { 587 status = CAM_RESRC_UNAVAIL; 588 break; 589 } 590 new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref); 591 if (new_io == NULL) { 592 free(new_ccb, M_CTLFE); 593 status = CAM_RESRC_UNAVAIL; 594 break; 595 } 596 new_ccb->ccb_h.io_ptr = new_io; 597 598 xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1); 599 new_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; 600 new_ccb->ccb_h.cbfcnp = ctlfedone; 601 new_ccb->ccb_h.flags |= CAM_UNLOCKED; 602 xpt_action(new_ccb); 603 softc->inots_sent++; 604 status = new_ccb->ccb_h.status; 605 if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 606 /* 607 * Note that we don't free the CCB here. If the 608 * status is not CAM_REQ_INPROG, then we're 609 * probably talking to a SIM that says it is 610 * target-capable but doesn't support the 611 * XPT_IMMEDIATE_NOTIFY CCB. i.e. it supports the 612 * older API. In that case, it'll call xpt_done() 613 * on the CCB, and we need to free it in our done 614 * routine as a result. 615 */ 616 break; 617 } 618 } 619 if ((i == 0) 620 || (status != CAM_REQ_INPROG)) { 621 xpt_print(periph->path, "%s: could not allocate immediate " 622 "notify CCBs, status 0x%x\n", __func__, status); 623 return (CAM_REQ_CMP_ERR); 624 } 625 return (CAM_REQ_CMP); 626 } 627 628 static void 629 ctlfeoninvalidate(struct cam_periph *periph) 630 { 631 union ccb en_lun_ccb; 632 cam_status status; 633 struct ctlfe_softc *bus_softc; 634 struct ctlfe_lun_softc *softc; 635 636 softc = (struct ctlfe_lun_softc *)periph->softc; 637 638 xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); 639 en_lun_ccb.ccb_h.func_code = XPT_EN_LUN; 640 en_lun_ccb.cel.grp6_len = 0; 641 en_lun_ccb.cel.grp7_len = 0; 642 en_lun_ccb.cel.enable = 0; 643 xpt_action(&en_lun_ccb); 644 status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK); 645 if (status != CAM_REQ_CMP) { 646 xpt_print(periph->path, "%s: Disable LUN failed, status 0x%x\n", 647 __func__, en_lun_ccb.ccb_h.status); 648 /* 649 * XXX KDM what do we do now? 650 */ 651 } 652 xpt_print(periph->path, "LUN removed, %ju ATIOs outstanding, %ju " 653 "INOTs outstanding, %d refs\n", softc->atios_sent - 654 softc->atios_returned, softc->inots_sent - 655 softc->inots_returned, periph->refcount); 656 657 bus_softc = softc->parent_softc; 658 mtx_lock(&bus_softc->lun_softc_mtx); 659 STAILQ_REMOVE(&bus_softc->lun_softc_list, softc, ctlfe_lun_softc, links); 660 mtx_unlock(&bus_softc->lun_softc_mtx); 661 } 662 663 static void 664 ctlfecleanup(struct cam_periph *periph) 665 { 666 struct ctlfe_lun_softc *softc; 667 668 xpt_print(periph->path, "%s: Called\n", __func__); 669 670 softc = (struct ctlfe_lun_softc *)periph->softc; 671 672 /* 673 * XXX KDM is there anything else that needs to be done here? 674 */ 675 676 free(softc, M_CTLFE); 677 } 678 679 static void 680 ctlfedata(struct ctlfe_lun_softc *softc, union ctl_io *io, 681 ccb_flags *flags, uint8_t **data_ptr, uint32_t *dxfer_len, 682 u_int16_t *sglist_cnt) 683 { 684 struct ctlfe_softc *bus_softc; 685 struct ctlfe_lun_cmd_info *cmd_info; 686 struct ctl_sg_entry *ctl_sglist; 687 bus_dma_segment_t *cam_sglist; 688 size_t off; 689 int i, idx; 690 691 cmd_info = (struct ctlfe_lun_cmd_info *)io->io_hdr.port_priv; 692 bus_softc = softc->parent_softc; 693 694 /* 695 * Set the direction, relative to the initiator. 696 */ 697 *flags &= ~CAM_DIR_MASK; 698 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) 699 *flags |= CAM_DIR_IN; 700 else 701 *flags |= CAM_DIR_OUT; 702 703 *flags &= ~CAM_DATA_MASK; 704 idx = cmd_info->cur_transfer_index; 705 off = cmd_info->cur_transfer_off; 706 cmd_info->flags &= ~CTLFE_CMD_PIECEWISE; 707 if (io->scsiio.kern_sg_entries == 0) { 708 /* No S/G list. */ 709 *data_ptr = io->scsiio.kern_data_ptr + off; 710 if (io->scsiio.kern_data_len - off <= bus_softc->maxio) { 711 *dxfer_len = io->scsiio.kern_data_len - off; 712 } else { 713 *dxfer_len = bus_softc->maxio; 714 cmd_info->cur_transfer_index = -1; 715 cmd_info->cur_transfer_off = bus_softc->maxio; 716 cmd_info->flags |= CTLFE_CMD_PIECEWISE; 717 } 718 *sglist_cnt = 0; 719 720 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) 721 *flags |= CAM_DATA_PADDR; 722 else 723 *flags |= CAM_DATA_VADDR; 724 } else { 725 /* S/G list with physical or virtual pointers. */ 726 ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 727 cam_sglist = cmd_info->cam_sglist; 728 *dxfer_len = 0; 729 for (i = 0; i < io->scsiio.kern_sg_entries - idx; i++) { 730 cam_sglist[i].ds_addr = (bus_addr_t)ctl_sglist[i + idx].addr + off; 731 if (ctl_sglist[i + idx].len - off <= bus_softc->maxio - *dxfer_len) { 732 cam_sglist[i].ds_len = ctl_sglist[idx + i].len - off; 733 *dxfer_len += cam_sglist[i].ds_len; 734 } else { 735 cam_sglist[i].ds_len = bus_softc->maxio - *dxfer_len; 736 cmd_info->cur_transfer_index = idx + i; 737 cmd_info->cur_transfer_off = cam_sglist[i].ds_len + off; 738 cmd_info->flags |= CTLFE_CMD_PIECEWISE; 739 *dxfer_len += cam_sglist[i].ds_len; 740 if (ctl_sglist[i].len != 0) 741 i++; 742 break; 743 } 744 if (i == (CTLFE_MAX_SEGS - 1) && 745 idx + i < (io->scsiio.kern_sg_entries - 1)) { 746 cmd_info->cur_transfer_index = idx + i + 1; 747 cmd_info->cur_transfer_off = 0; 748 cmd_info->flags |= CTLFE_CMD_PIECEWISE; 749 i++; 750 break; 751 } 752 off = 0; 753 } 754 *sglist_cnt = i; 755 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) 756 *flags |= CAM_DATA_SG_PADDR; 757 else 758 *flags |= CAM_DATA_SG; 759 *data_ptr = (uint8_t *)cam_sglist; 760 } 761 } 762 763 static void 764 ctlfestart(struct cam_periph *periph, union ccb *start_ccb) 765 { 766 struct ctlfe_lun_softc *softc; 767 struct ctlfe_lun_cmd_info *cmd_info; 768 struct ccb_hdr *ccb_h; 769 struct ccb_accept_tio *atio; 770 struct ccb_scsiio *csio; 771 uint8_t *data_ptr; 772 uint32_t dxfer_len; 773 ccb_flags flags; 774 union ctl_io *io; 775 uint8_t scsi_status; 776 777 softc = (struct ctlfe_lun_softc *)periph->softc; 778 softc->ccbs_alloced++; 779 780 ccb_h = TAILQ_FIRST(&softc->work_queue); 781 if (ccb_h == NULL) { 782 softc->ccbs_freed++; 783 xpt_release_ccb(start_ccb); 784 return; 785 } 786 787 /* Take the ATIO off the work queue */ 788 TAILQ_REMOVE(&softc->work_queue, ccb_h, periph_links.tqe); 789 atio = (struct ccb_accept_tio *)ccb_h; 790 io = (union ctl_io *)ccb_h->io_ptr; 791 csio = &start_ccb->csio; 792 793 flags = atio->ccb_h.flags & 794 (CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK); 795 cmd_info = (struct ctlfe_lun_cmd_info *)io->io_hdr.port_priv; 796 cmd_info->cur_transfer_index = 0; 797 cmd_info->cur_transfer_off = 0; 798 cmd_info->flags = 0; 799 800 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) { 801 /* 802 * Datamove call, we need to setup the S/G list. 803 */ 804 scsi_status = 0; 805 csio->cdb_len = atio->cdb_len; 806 ctlfedata(softc, io, &flags, &data_ptr, &dxfer_len, 807 &csio->sglist_cnt); 808 io->scsiio.ext_data_filled += dxfer_len; 809 if (io->scsiio.ext_data_filled > io->scsiio.kern_total_len) { 810 xpt_print(periph->path, "%s: tag 0x%04x " 811 "fill len %u > total %u\n", 812 __func__, io->scsiio.tag_num, 813 io->scsiio.ext_data_filled, 814 io->scsiio.kern_total_len); 815 } 816 } else { 817 /* 818 * We're done, send status back. 819 */ 820 if ((io->io_hdr.flags & CTL_FLAG_ABORT) && 821 (io->io_hdr.flags & CTL_FLAG_ABORT_STATUS) == 0) { 822 io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED; 823 824 /* 825 * If this command was aborted, we don't 826 * need to send status back to the SIM. 827 * Just free the CTIO and ctl_io, and 828 * recycle the ATIO back to the SIM. 829 */ 830 xpt_print(periph->path, "%s: aborted " 831 "command 0x%04x discarded\n", 832 __func__, io->scsiio.tag_num); 833 /* 834 * For a wildcard attachment, commands can 835 * come in with a specific target/lun. Reset 836 * the target and LUN fields back to the 837 * wildcard values before we send them back 838 * down to the SIM. The SIM has a wildcard 839 * LUN enabled, not whatever target/lun 840 * these happened to be. 841 */ 842 if (softc->flags & CTLFE_LUN_WILDCARD) { 843 atio->ccb_h.target_id = CAM_TARGET_WILDCARD; 844 atio->ccb_h.target_lun = CAM_LUN_WILDCARD; 845 } 846 847 if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) { 848 cam_release_devq(periph->path, 849 /*relsim_flags*/0, 850 /*reduction*/0, 851 /*timeout*/0, 852 /*getcount_only*/0); 853 atio->ccb_h.status &= ~CAM_DEV_QFRZN; 854 } 855 856 if (atio->ccb_h.func_code != XPT_ACCEPT_TARGET_IO) { 857 xpt_print(periph->path, "%s: func_code " 858 "is %#x\n", __func__, 859 atio->ccb_h.func_code); 860 } 861 start_ccb->ccb_h.func_code = XPT_ABORT; 862 start_ccb->cab.abort_ccb = (union ccb *)atio; 863 864 /* Tell the SIM that we've aborted this ATIO */ 865 xpt_action(start_ccb); 866 softc->ccbs_freed++; 867 xpt_release_ccb(start_ccb); 868 869 /* 870 * Send the ATIO back down to the SIM. 871 */ 872 xpt_action((union ccb *)atio); 873 softc->atios_sent++; 874 875 /* 876 * If we still have work to do, ask for 877 * another CCB. Otherwise, deactivate our 878 * callout. 879 */ 880 if (!TAILQ_EMPTY(&softc->work_queue)) 881 xpt_schedule(periph, /*priority*/ 1); 882 return; 883 } 884 data_ptr = NULL; 885 dxfer_len = 0; 886 csio->sglist_cnt = 0; 887 scsi_status = 0; 888 } 889 if ((io->io_hdr.flags & CTL_FLAG_STATUS_QUEUED) && 890 (cmd_info->flags & CTLFE_CMD_PIECEWISE) == 0 && 891 ((io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) == 0 || 892 io->io_hdr.status == CTL_SUCCESS)) { 893 io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; 894 flags |= CAM_SEND_STATUS; 895 scsi_status = io->scsiio.scsi_status; 896 csio->sense_len = io->scsiio.sense_len; 897 #ifdef CTLFEDEBUG 898 printf("%s: tag %04x status %x\n", __func__, 899 atio->tag_id, io->io_hdr.status); 900 #endif 901 if (csio->sense_len != 0) { 902 csio->sense_data = io->scsiio.sense_data; 903 flags |= CAM_SEND_SENSE; 904 } else if (scsi_status == SCSI_STATUS_CHECK_COND) { 905 xpt_print(periph->path, "%s: check condition " 906 "with no sense\n", __func__); 907 } 908 } 909 910 #ifdef CTLFEDEBUG 911 printf("%s: %s: tag %04x flags %x ptr %p len %u\n", __func__, 912 (flags & CAM_SEND_STATUS) ? "done" : "datamove", 913 atio->tag_id, flags, data_ptr, dxfer_len); 914 #endif 915 916 /* 917 * Valid combinations: 918 * - CAM_SEND_STATUS, CAM_DATA_SG = 0, dxfer_len = 0, 919 * sglist_cnt = 0 920 * - CAM_SEND_STATUS = 0, CAM_DATA_SG = 0, dxfer_len != 0, 921 * sglist_cnt = 0 922 * - CAM_SEND_STATUS = 0, CAM_DATA_SG, dxfer_len != 0, 923 * sglist_cnt != 0 924 */ 925 #ifdef CTLFEDEBUG 926 if (((flags & CAM_SEND_STATUS) 927 && (((flags & CAM_DATA_SG) != 0) 928 || (dxfer_len != 0) 929 || (csio->sglist_cnt != 0))) 930 || (((flags & CAM_SEND_STATUS) == 0) 931 && (dxfer_len == 0)) 932 || ((flags & CAM_DATA_SG) 933 && (csio->sglist_cnt == 0)) 934 || (((flags & CAM_DATA_SG) == 0) 935 && (csio->sglist_cnt != 0))) { 936 printf("%s: tag %04x cdb %02x flags %#x dxfer_len " 937 "%d sg %u\n", __func__, atio->tag_id, 938 atio->cdb_io.cdb_bytes[0], flags, dxfer_len, 939 csio->sglist_cnt); 940 printf("%s: tag %04x io status %#x\n", __func__, 941 atio->tag_id, io->io_hdr.status); 942 } 943 #endif 944 cam_fill_ctio(csio, 945 /*retries*/ 2, 946 ctlfedone, 947 flags, 948 (flags & CAM_TAG_ACTION_VALID) ? MSG_SIMPLE_Q_TAG : 0, 949 atio->tag_id, 950 atio->init_id, 951 scsi_status, 952 /*data_ptr*/ data_ptr, 953 /*dxfer_len*/ dxfer_len, 954 /*timeout*/ 5 * 1000); 955 start_ccb->ccb_h.flags |= CAM_UNLOCKED; 956 start_ccb->ccb_h.ccb_atio = atio; 957 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 958 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 959 io->io_hdr.flags &= ~(CTL_FLAG_DMA_QUEUED | CTL_FLAG_STATUS_QUEUED); 960 961 softc->ctios_sent++; 962 963 cam_periph_unlock(periph); 964 xpt_action(start_ccb); 965 cam_periph_lock(periph); 966 967 if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) { 968 cam_release_devq(periph->path, 969 /*relsim_flags*/0, 970 /*reduction*/0, 971 /*timeout*/0, 972 /*getcount_only*/0); 973 atio->ccb_h.status &= ~CAM_DEV_QFRZN; 974 } 975 976 /* 977 * If we still have work to do, ask for another CCB. 978 */ 979 if (!TAILQ_EMPTY(&softc->work_queue)) 980 xpt_schedule(periph, /*priority*/ 1); 981 } 982 983 static void 984 ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb) 985 { 986 struct ctlfe_lun_softc *softc; 987 988 softc = (struct ctlfe_lun_softc *)periph->softc; 989 990 switch (ccb->ccb_h.func_code) { 991 case XPT_ACCEPT_TARGET_IO: 992 softc->atios_returned++; 993 break; 994 case XPT_IMMEDIATE_NOTIFY: 995 case XPT_NOTIFY_ACKNOWLEDGE: 996 softc->inots_returned++; 997 break; 998 default: 999 break; 1000 } 1001 1002 ctl_free_io(ccb->ccb_h.io_ptr); 1003 free(ccb, M_CTLFE); 1004 1005 KASSERT(softc->atios_returned <= softc->atios_sent, ("%s: " 1006 "atios_returned %ju > atios_sent %ju", __func__, 1007 softc->atios_returned, softc->atios_sent)); 1008 KASSERT(softc->inots_returned <= softc->inots_sent, ("%s: " 1009 "inots_returned %ju > inots_sent %ju", __func__, 1010 softc->inots_returned, softc->inots_sent)); 1011 1012 /* 1013 * If we have received all of our CCBs, we can release our 1014 * reference on the peripheral driver. It will probably go away 1015 * now. 1016 */ 1017 if ((softc->atios_returned == softc->atios_sent) 1018 && (softc->inots_returned == softc->inots_sent)) { 1019 cam_periph_release_locked(periph); 1020 } 1021 } 1022 1023 static int 1024 ctlfe_adjust_cdb(struct ccb_accept_tio *atio, uint32_t offset) 1025 { 1026 uint64_t lba; 1027 uint32_t num_blocks, nbc; 1028 uint8_t *cmdbyt = (atio->ccb_h.flags & CAM_CDB_POINTER)? 1029 atio->cdb_io.cdb_ptr : atio->cdb_io.cdb_bytes; 1030 1031 nbc = offset >> 9; /* ASSUMING 512 BYTE BLOCKS */ 1032 1033 switch (cmdbyt[0]) { 1034 case READ_6: 1035 case WRITE_6: 1036 { 1037 struct scsi_rw_6 *cdb = (struct scsi_rw_6 *)cmdbyt; 1038 lba = scsi_3btoul(cdb->addr); 1039 lba &= 0x1fffff; 1040 num_blocks = cdb->length; 1041 if (num_blocks == 0) 1042 num_blocks = 256; 1043 lba += nbc; 1044 num_blocks -= nbc; 1045 scsi_ulto3b(lba, cdb->addr); 1046 cdb->length = num_blocks; 1047 break; 1048 } 1049 case READ_10: 1050 case WRITE_10: 1051 { 1052 struct scsi_rw_10 *cdb = (struct scsi_rw_10 *)cmdbyt; 1053 lba = scsi_4btoul(cdb->addr); 1054 num_blocks = scsi_2btoul(cdb->length); 1055 lba += nbc; 1056 num_blocks -= nbc; 1057 scsi_ulto4b(lba, cdb->addr); 1058 scsi_ulto2b(num_blocks, cdb->length); 1059 break; 1060 } 1061 case READ_12: 1062 case WRITE_12: 1063 { 1064 struct scsi_rw_12 *cdb = (struct scsi_rw_12 *)cmdbyt; 1065 lba = scsi_4btoul(cdb->addr); 1066 num_blocks = scsi_4btoul(cdb->length); 1067 lba += nbc; 1068 num_blocks -= nbc; 1069 scsi_ulto4b(lba, cdb->addr); 1070 scsi_ulto4b(num_blocks, cdb->length); 1071 break; 1072 } 1073 case READ_16: 1074 case WRITE_16: 1075 case WRITE_ATOMIC_16: 1076 { 1077 struct scsi_rw_16 *cdb = (struct scsi_rw_16 *)cmdbyt; 1078 lba = scsi_8btou64(cdb->addr); 1079 num_blocks = scsi_4btoul(cdb->length); 1080 lba += nbc; 1081 num_blocks -= nbc; 1082 scsi_u64to8b(lba, cdb->addr); 1083 scsi_ulto4b(num_blocks, cdb->length); 1084 break; 1085 } 1086 default: 1087 return -1; 1088 } 1089 return (0); 1090 } 1091 1092 static void 1093 ctlfedone(struct cam_periph *periph, union ccb *done_ccb) 1094 { 1095 struct ctlfe_lun_softc *softc; 1096 struct ctlfe_softc *bus_softc; 1097 struct ccb_accept_tio *atio = NULL; 1098 union ctl_io *io = NULL; 1099 struct mtx *mtx; 1100 1101 KASSERT((done_ccb->ccb_h.flags & CAM_UNLOCKED) != 0, 1102 ("CCB in ctlfedone() without CAM_UNLOCKED flag")); 1103 #ifdef CTLFE_DEBUG 1104 printf("%s: entered, func_code = %#x\n", __func__, 1105 done_ccb->ccb_h.func_code); 1106 #endif 1107 1108 softc = (struct ctlfe_lun_softc *)periph->softc; 1109 bus_softc = softc->parent_softc; 1110 mtx = cam_periph_mtx(periph); 1111 mtx_lock(mtx); 1112 1113 /* 1114 * If the peripheral is invalid, ATIOs and immediate notify CCBs 1115 * need to be freed. Most of the ATIOs and INOTs that come back 1116 * will be CCBs that are being returned from the SIM as a result of 1117 * our disabling the LUN. 1118 * 1119 * Other CCB types are handled in their respective cases below. 1120 */ 1121 if (periph->flags & CAM_PERIPH_INVALID) { 1122 switch (done_ccb->ccb_h.func_code) { 1123 case XPT_ACCEPT_TARGET_IO: 1124 case XPT_IMMEDIATE_NOTIFY: 1125 case XPT_NOTIFY_ACKNOWLEDGE: 1126 ctlfe_free_ccb(periph, done_ccb); 1127 goto out; 1128 default: 1129 break; 1130 } 1131 1132 } 1133 switch (done_ccb->ccb_h.func_code) { 1134 case XPT_ACCEPT_TARGET_IO: { 1135 1136 atio = &done_ccb->atio; 1137 1138 softc->atios_returned++; 1139 1140 resubmit: 1141 /* 1142 * Allocate a ctl_io, pass it to CTL, and wait for the 1143 * datamove or done. 1144 */ 1145 mtx_unlock(mtx); 1146 io = done_ccb->ccb_h.io_ptr; 1147 ctl_zero_io(io); 1148 1149 /* Save pointers on both sides */ 1150 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = done_ccb; 1151 done_ccb->ccb_h.io_ptr = io; 1152 1153 /* 1154 * Only SCSI I/O comes down this path, resets, etc. come 1155 * down the immediate notify path below. 1156 */ 1157 io->io_hdr.io_type = CTL_IO_SCSI; 1158 io->io_hdr.nexus.initid.id = atio->init_id; 1159 io->io_hdr.nexus.targ_port = bus_softc->port.targ_port; 1160 io->io_hdr.nexus.targ_target.id = atio->ccb_h.target_id; 1161 io->io_hdr.nexus.targ_lun = atio->ccb_h.target_lun; 1162 io->scsiio.tag_num = atio->tag_id; 1163 switch (atio->tag_action) { 1164 case CAM_TAG_ACTION_NONE: 1165 io->scsiio.tag_type = CTL_TAG_UNTAGGED; 1166 break; 1167 case MSG_SIMPLE_TASK: 1168 io->scsiio.tag_type = CTL_TAG_SIMPLE; 1169 break; 1170 case MSG_HEAD_OF_QUEUE_TASK: 1171 io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE; 1172 break; 1173 case MSG_ORDERED_TASK: 1174 io->scsiio.tag_type = CTL_TAG_ORDERED; 1175 break; 1176 case MSG_ACA_TASK: 1177 io->scsiio.tag_type = CTL_TAG_ACA; 1178 break; 1179 default: 1180 io->scsiio.tag_type = CTL_TAG_UNTAGGED; 1181 printf("%s: unhandled tag type %#x!!\n", __func__, 1182 atio->tag_action); 1183 break; 1184 } 1185 if (atio->cdb_len > sizeof(io->scsiio.cdb)) { 1186 printf("%s: WARNING: CDB len %d > ctl_io space %zd\n", 1187 __func__, atio->cdb_len, sizeof(io->scsiio.cdb)); 1188 } 1189 io->scsiio.cdb_len = min(atio->cdb_len, sizeof(io->scsiio.cdb)); 1190 bcopy(atio->cdb_io.cdb_bytes, io->scsiio.cdb, 1191 io->scsiio.cdb_len); 1192 1193 #ifdef CTLFEDEBUG 1194 printf("%s: %ju:%d:%ju:%d: tag %04x CDB %02x\n", __func__, 1195 (uintmax_t)io->io_hdr.nexus.initid.id, 1196 io->io_hdr.nexus.targ_port, 1197 (uintmax_t)io->io_hdr.nexus.targ_target.id, 1198 io->io_hdr.nexus.targ_lun, 1199 io->scsiio.tag_num, io->scsiio.cdb[0]); 1200 #endif 1201 1202 ctl_queue(io); 1203 return; 1204 } 1205 case XPT_CONT_TARGET_IO: { 1206 int srr = 0; 1207 uint32_t srr_off = 0; 1208 1209 atio = (struct ccb_accept_tio *)done_ccb->ccb_h.ccb_atio; 1210 io = (union ctl_io *)atio->ccb_h.io_ptr; 1211 1212 softc->ctios_returned++; 1213 #ifdef CTLFEDEBUG 1214 printf("%s: got XPT_CONT_TARGET_IO tag %#x flags %#x\n", 1215 __func__, atio->tag_id, done_ccb->ccb_h.flags); 1216 #endif 1217 /* 1218 * Handle SRR case were the data pointer is pushed back hack 1219 */ 1220 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_MESSAGE_RECV 1221 && done_ccb->csio.msg_ptr != NULL 1222 && done_ccb->csio.msg_ptr[0] == MSG_EXTENDED 1223 && done_ccb->csio.msg_ptr[1] == 5 1224 && done_ccb->csio.msg_ptr[2] == 0) { 1225 srr = 1; 1226 srr_off = 1227 (done_ccb->csio.msg_ptr[3] << 24) 1228 | (done_ccb->csio.msg_ptr[4] << 16) 1229 | (done_ccb->csio.msg_ptr[5] << 8) 1230 | (done_ccb->csio.msg_ptr[6]); 1231 } 1232 1233 if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) { 1234 /* 1235 * If status was being sent, the back end data is now 1236 * history. Hack it up and resubmit a new command with 1237 * the CDB adjusted. If the SIM does the right thing, 1238 * all of the resid math should work. 1239 */ 1240 softc->ccbs_freed++; 1241 xpt_release_ccb(done_ccb); 1242 if (ctlfe_adjust_cdb(atio, srr_off) == 0) { 1243 done_ccb = (union ccb *)atio; 1244 goto resubmit; 1245 } 1246 /* 1247 * Fall through to doom.... 1248 */ 1249 } else if (srr) { 1250 /* 1251 * If we have an srr and we're still sending data, we 1252 * should be able to adjust offsets and cycle again. 1253 */ 1254 io->scsiio.kern_rel_offset = 1255 io->scsiio.ext_data_filled = srr_off; 1256 io->scsiio.ext_data_len = io->scsiio.kern_total_len - 1257 io->scsiio.kern_rel_offset; 1258 softc->ccbs_freed++; 1259 io->scsiio.io_hdr.status = CTL_STATUS_NONE; 1260 xpt_release_ccb(done_ccb); 1261 TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h, 1262 periph_links.tqe); 1263 xpt_schedule(periph, /*priority*/ 1); 1264 break; 1265 } 1266 1267 /* 1268 * If we were sending status back to the initiator, free up 1269 * resources. If we were doing a datamove, call the 1270 * datamove done routine. 1271 */ 1272 if ((io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) { 1273 softc->ccbs_freed++; 1274 xpt_release_ccb(done_ccb); 1275 /* 1276 * For a wildcard attachment, commands can come in 1277 * with a specific target/lun. Reset the target 1278 * and LUN fields back to the wildcard values before 1279 * we send them back down to the SIM. The SIM has 1280 * a wildcard LUN enabled, not whatever target/lun 1281 * these happened to be. 1282 */ 1283 if (softc->flags & CTLFE_LUN_WILDCARD) { 1284 atio->ccb_h.target_id = CAM_TARGET_WILDCARD; 1285 atio->ccb_h.target_lun = CAM_LUN_WILDCARD; 1286 } 1287 if (periph->flags & CAM_PERIPH_INVALID) { 1288 ctlfe_free_ccb(periph, (union ccb *)atio); 1289 } else { 1290 softc->atios_sent++; 1291 mtx_unlock(mtx); 1292 xpt_action((union ccb *)atio); 1293 return; 1294 } 1295 } else { 1296 struct ctlfe_lun_cmd_info *cmd_info; 1297 struct ccb_scsiio *csio; 1298 1299 csio = &done_ccb->csio; 1300 cmd_info = (struct ctlfe_lun_cmd_info *) 1301 io->io_hdr.port_priv; 1302 1303 io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; 1304 1305 io->scsiio.ext_data_len += csio->dxfer_len; 1306 if (io->scsiio.ext_data_len > 1307 io->scsiio.kern_total_len) { 1308 xpt_print(periph->path, "%s: tag 0x%04x " 1309 "done len %u > total %u sent %u\n", 1310 __func__, io->scsiio.tag_num, 1311 io->scsiio.ext_data_len, 1312 io->scsiio.kern_total_len, 1313 io->scsiio.ext_data_filled); 1314 } 1315 /* 1316 * Translate CAM status to CTL status. Success 1317 * does not change the overall, ctl_io status. In 1318 * that case we just set port_status to 0. If we 1319 * have a failure, though, set a data phase error 1320 * for the overall ctl_io. 1321 */ 1322 switch (done_ccb->ccb_h.status & CAM_STATUS_MASK) { 1323 case CAM_REQ_CMP: 1324 io->io_hdr.port_status = 0; 1325 break; 1326 default: 1327 /* 1328 * XXX KDM we probably need to figure out a 1329 * standard set of errors that the SIM 1330 * drivers should return in the event of a 1331 * data transfer failure. A data phase 1332 * error will at least point the user to a 1333 * data transfer error of some sort. 1334 * Hopefully the SIM printed out some 1335 * additional information to give the user 1336 * a clue what happened. 1337 */ 1338 io->io_hdr.port_status = 0xbad1; 1339 ctl_set_data_phase_error(&io->scsiio); 1340 /* 1341 * XXX KDM figure out residual. 1342 */ 1343 break; 1344 } 1345 /* 1346 * If we had to break this S/G list into multiple 1347 * pieces, figure out where we are in the list, and 1348 * continue sending pieces if necessary. 1349 */ 1350 if ((cmd_info->flags & CTLFE_CMD_PIECEWISE) 1351 && (io->io_hdr.port_status == 0)) { 1352 ccb_flags flags; 1353 uint8_t scsi_status; 1354 uint8_t *data_ptr; 1355 uint32_t dxfer_len; 1356 1357 flags = atio->ccb_h.flags & 1358 (CAM_DIS_DISCONNECT| 1359 CAM_TAG_ACTION_VALID); 1360 1361 ctlfedata(softc, io, &flags, &data_ptr, 1362 &dxfer_len, &csio->sglist_cnt); 1363 1364 scsi_status = 0; 1365 1366 if (((flags & CAM_SEND_STATUS) == 0) 1367 && (dxfer_len == 0)) { 1368 printf("%s: tag %04x no status or " 1369 "len cdb = %02x\n", __func__, 1370 atio->tag_id, 1371 atio->cdb_io.cdb_bytes[0]); 1372 printf("%s: tag %04x io status %#x\n", 1373 __func__, atio->tag_id, 1374 io->io_hdr.status); 1375 } 1376 1377 cam_fill_ctio(csio, 1378 /*retries*/ 2, 1379 ctlfedone, 1380 flags, 1381 (flags & CAM_TAG_ACTION_VALID) ? 1382 MSG_SIMPLE_Q_TAG : 0, 1383 atio->tag_id, 1384 atio->init_id, 1385 scsi_status, 1386 /*data_ptr*/ data_ptr, 1387 /*dxfer_len*/ dxfer_len, 1388 /*timeout*/ 5 * 1000); 1389 1390 csio->ccb_h.flags |= CAM_UNLOCKED; 1391 csio->resid = 0; 1392 csio->ccb_h.ccb_atio = atio; 1393 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 1394 softc->ctios_sent++; 1395 mtx_unlock(mtx); 1396 xpt_action((union ccb *)csio); 1397 } else { 1398 /* 1399 * Release the CTIO. The ATIO will be sent back 1400 * down to the SIM once we send status. 1401 */ 1402 softc->ccbs_freed++; 1403 xpt_release_ccb(done_ccb); 1404 mtx_unlock(mtx); 1405 1406 /* Call the backend move done callback */ 1407 io->scsiio.be_move_done(io); 1408 } 1409 return; 1410 } 1411 break; 1412 } 1413 case XPT_IMMEDIATE_NOTIFY: { 1414 union ctl_io *io; 1415 struct ccb_immediate_notify *inot; 1416 cam_status status; 1417 int frozen, send_ctl_io; 1418 1419 inot = &done_ccb->cin1; 1420 1421 softc->inots_returned++; 1422 1423 frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; 1424 1425 printf("%s: got XPT_IMMEDIATE_NOTIFY status %#x tag %#x " 1426 "seq %#x\n", __func__, inot->ccb_h.status, 1427 inot->tag_id, inot->seq_id); 1428 1429 io = done_ccb->ccb_h.io_ptr; 1430 ctl_zero_io(io); 1431 1432 send_ctl_io = 1; 1433 1434 io->io_hdr.io_type = CTL_IO_TASK; 1435 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr =done_ccb; 1436 inot->ccb_h.io_ptr = io; 1437 io->io_hdr.nexus.initid.id = inot->initiator_id; 1438 io->io_hdr.nexus.targ_port = bus_softc->port.targ_port; 1439 io->io_hdr.nexus.targ_target.id = inot->ccb_h.target_id; 1440 io->io_hdr.nexus.targ_lun = inot->ccb_h.target_lun; 1441 /* XXX KDM should this be the tag_id? */ 1442 io->taskio.tag_num = inot->seq_id; 1443 1444 status = inot->ccb_h.status & CAM_STATUS_MASK; 1445 switch (status) { 1446 case CAM_SCSI_BUS_RESET: 1447 io->taskio.task_action = CTL_TASK_BUS_RESET; 1448 break; 1449 case CAM_BDR_SENT: 1450 io->taskio.task_action = CTL_TASK_TARGET_RESET; 1451 break; 1452 case CAM_MESSAGE_RECV: 1453 switch (inot->arg) { 1454 case MSG_ABORT_TASK_SET: 1455 io->taskio.task_action = 1456 CTL_TASK_ABORT_TASK_SET; 1457 break; 1458 case MSG_TARGET_RESET: 1459 io->taskio.task_action = 1460 CTL_TASK_TARGET_RESET; 1461 break; 1462 case MSG_ABORT_TASK: 1463 io->taskio.task_action = 1464 CTL_TASK_ABORT_TASK; 1465 break; 1466 case MSG_LOGICAL_UNIT_RESET: 1467 io->taskio.task_action = 1468 CTL_TASK_LUN_RESET; 1469 break; 1470 case MSG_CLEAR_TASK_SET: 1471 io->taskio.task_action = 1472 CTL_TASK_CLEAR_TASK_SET; 1473 break; 1474 case MSG_CLEAR_ACA: 1475 io->taskio.task_action = 1476 CTL_TASK_CLEAR_ACA; 1477 break; 1478 case MSG_NOOP: 1479 send_ctl_io = 0; 1480 break; 1481 default: 1482 xpt_print(periph->path, 1483 "%s: unsupported message 0x%x\n", 1484 __func__, inot->arg); 1485 send_ctl_io = 0; 1486 break; 1487 } 1488 break; 1489 case CAM_REQ_ABORTED: 1490 /* 1491 * This request was sent back by the driver. 1492 * XXX KDM what do we do here? 1493 */ 1494 send_ctl_io = 0; 1495 break; 1496 case CAM_REQ_INVALID: 1497 case CAM_PROVIDE_FAIL: 1498 default: 1499 /* 1500 * We should only get here if we're talking 1501 * to a talking to a SIM that is target 1502 * capable but supports the old API. In 1503 * that case, we need to just free the CCB. 1504 * If we actually send a notify acknowledge, 1505 * it will send that back with an error as 1506 * well. 1507 */ 1508 1509 if ((status != CAM_REQ_INVALID) 1510 && (status != CAM_PROVIDE_FAIL)) 1511 xpt_print(periph->path, 1512 "%s: unsupported CAM status 0x%x\n", 1513 __func__, status); 1514 1515 ctlfe_free_ccb(periph, done_ccb); 1516 1517 goto out; 1518 } 1519 if (send_ctl_io != 0) { 1520 ctl_queue(io); 1521 } else { 1522 done_ccb->ccb_h.status = CAM_REQ_INPROG; 1523 done_ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; 1524 xpt_action(done_ccb); 1525 } 1526 1527 if (frozen != 0) { 1528 cam_release_devq(periph->path, 1529 /*relsim_flags*/ 0, 1530 /*opening reduction*/ 0, 1531 /*timeout*/ 0, 1532 /*getcount_only*/ 0); 1533 } 1534 break; 1535 } 1536 case XPT_NOTIFY_ACKNOWLEDGE: 1537 /* 1538 * Queue this back down to the SIM as an immediate notify. 1539 */ 1540 done_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; 1541 xpt_action(done_ccb); 1542 softc->inots_sent++; 1543 break; 1544 case XPT_SET_SIM_KNOB: 1545 case XPT_GET_SIM_KNOB: 1546 break; 1547 default: 1548 panic("%s: unexpected CCB type %#x", __func__, 1549 done_ccb->ccb_h.func_code); 1550 break; 1551 } 1552 1553 out: 1554 mtx_unlock(mtx); 1555 } 1556 1557 static void 1558 ctlfe_onoffline(void *arg, int online) 1559 { 1560 struct ctlfe_softc *bus_softc; 1561 union ccb *ccb; 1562 cam_status status; 1563 struct cam_path *path; 1564 int set_wwnn; 1565 1566 bus_softc = (struct ctlfe_softc *)arg; 1567 1568 set_wwnn = 0; 1569 1570 status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, 1571 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 1572 if (status != CAM_REQ_CMP) { 1573 printf("%s: unable to create path!\n", __func__); 1574 return; 1575 } 1576 ccb = (union ccb *)malloc(sizeof(*ccb), M_TEMP, M_NOWAIT | M_ZERO); 1577 if (ccb == NULL) { 1578 printf("%s: unable to malloc CCB!\n", __func__); 1579 xpt_free_path(path); 1580 return; 1581 } 1582 xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE); 1583 1584 /* 1585 * Copan WWN format: 1586 * 1587 * Bits 63-60: 0x5 NAA, IEEE registered name 1588 * Bits 59-36: 0x000ED5 IEEE Company name assigned to Copan 1589 * Bits 35-12: Copan SSN (Sequential Serial Number) 1590 * Bits 11-8: Type of port: 1591 * 1 == N-Port 1592 * 2 == F-Port 1593 * 3 == NL-Port 1594 * Bits 7-0: 0 == Node Name, >0 == Port Number 1595 */ 1596 1597 if (online != 0) { 1598 1599 ccb->ccb_h.func_code = XPT_GET_SIM_KNOB; 1600 1601 1602 xpt_action(ccb); 1603 1604 1605 if ((ccb->knob.xport_specific.valid & KNOB_VALID_ADDRESS) != 0){ 1606 #ifdef RANDOM_WWNN 1607 uint64_t random_bits; 1608 #endif 1609 1610 printf("%s: %s current WWNN %#jx\n", __func__, 1611 bus_softc->port_name, 1612 ccb->knob.xport_specific.fc.wwnn); 1613 printf("%s: %s current WWPN %#jx\n", __func__, 1614 bus_softc->port_name, 1615 ccb->knob.xport_specific.fc.wwpn); 1616 1617 #ifdef RANDOM_WWNN 1618 arc4rand(&random_bits, sizeof(random_bits), 0); 1619 #endif 1620 1621 /* 1622 * XXX KDM this is a bit of a kludge for now. We 1623 * take the current WWNN/WWPN from the card, and 1624 * replace the company identifier and the NL-Port 1625 * indicator and the port number (for the WWPN). 1626 * This should be replaced later with ddb_GetWWNN, 1627 * or possibly a more centralized scheme. (It 1628 * would be nice to have the WWNN/WWPN for each 1629 * port stored in the ctl_port structure.) 1630 */ 1631 #ifdef RANDOM_WWNN 1632 ccb->knob.xport_specific.fc.wwnn = 1633 (random_bits & 1634 0x0000000fffffff00ULL) | 1635 /* Company ID */ 0x5000ED5000000000ULL | 1636 /* NL-Port */ 0x0300; 1637 ccb->knob.xport_specific.fc.wwpn = 1638 (random_bits & 1639 0x0000000fffffff00ULL) | 1640 /* Company ID */ 0x5000ED5000000000ULL | 1641 /* NL-Port */ 0x3000 | 1642 /* Port Num */ (bus_softc->port.targ_port & 0xff); 1643 1644 /* 1645 * This is a bit of an API break/reversal, but if 1646 * we're doing the random WWNN that's a little 1647 * different anyway. So record what we're actually 1648 * using with the frontend code so it's reported 1649 * accurately. 1650 */ 1651 ctl_port_set_wwns(&bus_softc->port, 1652 true, ccb->knob.xport_specific.fc.wwnn, 1653 true, ccb->knob.xport_specific.fc.wwpn); 1654 set_wwnn = 1; 1655 #else /* RANDOM_WWNN */ 1656 /* 1657 * If the user has specified a WWNN/WWPN, send them 1658 * down to the SIM. Otherwise, record what the SIM 1659 * has reported. 1660 */ 1661 if ((bus_softc->port.wwnn != 0) 1662 && (bus_softc->port.wwpn != 0)) { 1663 ccb->knob.xport_specific.fc.wwnn = 1664 bus_softc->port.wwnn; 1665 ccb->knob.xport_specific.fc.wwpn = 1666 bus_softc->port.wwpn; 1667 set_wwnn = 1; 1668 } else { 1669 ctl_port_set_wwns(&bus_softc->port, 1670 true, ccb->knob.xport_specific.fc.wwnn, 1671 true, ccb->knob.xport_specific.fc.wwpn); 1672 } 1673 #endif /* RANDOM_WWNN */ 1674 1675 1676 if (set_wwnn != 0) { 1677 printf("%s: %s new WWNN %#jx\n", __func__, 1678 bus_softc->port_name, 1679 ccb->knob.xport_specific.fc.wwnn); 1680 printf("%s: %s new WWPN %#jx\n", __func__, 1681 bus_softc->port_name, 1682 ccb->knob.xport_specific.fc.wwpn); 1683 } 1684 } else { 1685 printf("%s: %s has no valid WWNN/WWPN\n", __func__, 1686 bus_softc->port_name); 1687 } 1688 } 1689 ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; 1690 ccb->knob.xport_specific.valid = KNOB_VALID_ROLE; 1691 if (set_wwnn != 0) 1692 ccb->knob.xport_specific.valid |= KNOB_VALID_ADDRESS; 1693 1694 if (online != 0) 1695 ccb->knob.xport_specific.fc.role = KNOB_ROLE_TARGET; 1696 else 1697 ccb->knob.xport_specific.fc.role = KNOB_ROLE_NONE; 1698 1699 xpt_action(ccb); 1700 1701 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1702 printf("%s: SIM %s (path id %d) target %s failed with " 1703 "status %#x\n", 1704 __func__, bus_softc->port_name, bus_softc->path_id, 1705 (online != 0) ? "enable" : "disable", 1706 ccb->ccb_h.status); 1707 } else { 1708 printf("%s: SIM %s (path id %d) target %s succeeded\n", 1709 __func__, bus_softc->port_name, bus_softc->path_id, 1710 (online != 0) ? "enable" : "disable"); 1711 } 1712 1713 xpt_free_path(path); 1714 1715 free(ccb, M_TEMP); 1716 1717 return; 1718 } 1719 1720 static void 1721 ctlfe_online(void *arg) 1722 { 1723 struct ctlfe_softc *bus_softc; 1724 struct cam_path *path; 1725 cam_status status; 1726 struct ctlfe_lun_softc *lun_softc; 1727 struct cam_periph *periph; 1728 1729 bus_softc = (struct ctlfe_softc *)arg; 1730 1731 /* 1732 * Create the wildcard LUN before bringing the port online. 1733 */ 1734 status = xpt_create_path(&path, /*periph*/ NULL, 1735 bus_softc->path_id, CAM_TARGET_WILDCARD, 1736 CAM_LUN_WILDCARD); 1737 if (status != CAM_REQ_CMP) { 1738 printf("%s: unable to create path for wildcard periph\n", 1739 __func__); 1740 return; 1741 } 1742 1743 lun_softc = malloc(sizeof(*lun_softc), M_CTLFE, 1744 M_NOWAIT | M_ZERO); 1745 if (lun_softc == NULL) { 1746 xpt_print(path, "%s: unable to allocate softc for " 1747 "wildcard periph\n", __func__); 1748 xpt_free_path(path); 1749 return; 1750 } 1751 1752 xpt_path_lock(path); 1753 periph = cam_periph_find(path, "ctl"); 1754 if (periph != NULL) { 1755 /* We've already got a periph, no need to alloc a new one. */ 1756 xpt_path_unlock(path); 1757 xpt_free_path(path); 1758 free(lun_softc, M_CTLFE); 1759 return; 1760 } 1761 lun_softc->parent_softc = bus_softc; 1762 lun_softc->flags |= CTLFE_LUN_WILDCARD; 1763 1764 status = cam_periph_alloc(ctlferegister, 1765 ctlfeoninvalidate, 1766 ctlfecleanup, 1767 ctlfestart, 1768 "ctl", 1769 CAM_PERIPH_BIO, 1770 path, 1771 ctlfeasync, 1772 0, 1773 lun_softc); 1774 1775 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1776 const struct cam_status_entry *entry; 1777 1778 entry = cam_fetch_status_entry(status); 1779 printf("%s: CAM error %s (%#x) returned from " 1780 "cam_periph_alloc()\n", __func__, (entry != NULL) ? 1781 entry->status_text : "Unknown", status); 1782 free(lun_softc, M_CTLFE); 1783 } else { 1784 mtx_lock(&bus_softc->lun_softc_mtx); 1785 STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, lun_softc, links); 1786 mtx_unlock(&bus_softc->lun_softc_mtx); 1787 ctlfe_onoffline(arg, /*online*/ 1); 1788 } 1789 1790 xpt_path_unlock(path); 1791 xpt_free_path(path); 1792 } 1793 1794 static void 1795 ctlfe_offline(void *arg) 1796 { 1797 struct ctlfe_softc *bus_softc; 1798 struct cam_path *path; 1799 cam_status status; 1800 struct cam_periph *periph; 1801 1802 bus_softc = (struct ctlfe_softc *)arg; 1803 1804 /* 1805 * Disable the wildcard LUN for this port now that we have taken 1806 * the port offline. 1807 */ 1808 status = xpt_create_path(&path, /*periph*/ NULL, 1809 bus_softc->path_id, CAM_TARGET_WILDCARD, 1810 CAM_LUN_WILDCARD); 1811 if (status != CAM_REQ_CMP) { 1812 printf("%s: unable to create path for wildcard periph\n", 1813 __func__); 1814 return; 1815 } 1816 1817 xpt_path_lock(path); 1818 1819 ctlfe_onoffline(arg, /*online*/ 0); 1820 1821 if ((periph = cam_periph_find(path, "ctl")) != NULL) 1822 cam_periph_invalidate(periph); 1823 1824 xpt_path_unlock(path); 1825 xpt_free_path(path); 1826 } 1827 1828 /* 1829 * This will get called to enable a LUN on every bus that is attached to 1830 * CTL. So we only need to create a path/periph for this particular bus. 1831 */ 1832 static int 1833 ctlfe_lun_enable(void *arg, struct ctl_id targ_id, int lun_id) 1834 { 1835 struct ctlfe_softc *bus_softc; 1836 struct ctlfe_lun_softc *softc; 1837 struct cam_path *path; 1838 struct cam_periph *periph; 1839 cam_status status; 1840 1841 bus_softc = (struct ctlfe_softc *)arg; 1842 1843 status = xpt_create_path(&path, /*periph*/ NULL, 1844 bus_softc->path_id, 1845 targ_id.id, lun_id); 1846 /* XXX KDM need some way to return status to CTL here? */ 1847 if (status != CAM_REQ_CMP) { 1848 printf("%s: could not create path, status %#x\n", __func__, 1849 status); 1850 return (1); 1851 } 1852 1853 softc = malloc(sizeof(*softc), M_CTLFE, M_WAITOK | M_ZERO); 1854 xpt_path_lock(path); 1855 periph = cam_periph_find(path, "ctl"); 1856 if (periph != NULL) { 1857 /* We've already got a periph, no need to alloc a new one. */ 1858 xpt_path_unlock(path); 1859 xpt_free_path(path); 1860 free(softc, M_CTLFE); 1861 return (0); 1862 } 1863 softc->parent_softc = bus_softc; 1864 1865 status = cam_periph_alloc(ctlferegister, 1866 ctlfeoninvalidate, 1867 ctlfecleanup, 1868 ctlfestart, 1869 "ctl", 1870 CAM_PERIPH_BIO, 1871 path, 1872 ctlfeasync, 1873 0, 1874 softc); 1875 1876 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1877 const struct cam_status_entry *entry; 1878 1879 entry = cam_fetch_status_entry(status); 1880 printf("%s: CAM error %s (%#x) returned from " 1881 "cam_periph_alloc()\n", __func__, (entry != NULL) ? 1882 entry->status_text : "Unknown", status); 1883 free(softc, M_CTLFE); 1884 } else { 1885 mtx_lock(&bus_softc->lun_softc_mtx); 1886 STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, softc, links); 1887 mtx_unlock(&bus_softc->lun_softc_mtx); 1888 } 1889 1890 xpt_path_unlock(path); 1891 xpt_free_path(path); 1892 return (0); 1893 } 1894 1895 /* 1896 * This will get called when the user removes a LUN to disable that LUN 1897 * on every bus that is attached to CTL. 1898 */ 1899 static int 1900 ctlfe_lun_disable(void *arg, struct ctl_id targ_id, int lun_id) 1901 { 1902 struct ctlfe_softc *softc; 1903 struct ctlfe_lun_softc *lun_softc; 1904 1905 softc = (struct ctlfe_softc *)arg; 1906 1907 mtx_lock(&softc->lun_softc_mtx); 1908 STAILQ_FOREACH(lun_softc, &softc->lun_softc_list, links) { 1909 struct cam_path *path; 1910 1911 path = lun_softc->periph->path; 1912 1913 if ((xpt_path_target_id(path) == targ_id.id) 1914 && (xpt_path_lun_id(path) == lun_id)) { 1915 break; 1916 } 1917 } 1918 if (lun_softc == NULL) { 1919 mtx_unlock(&softc->lun_softc_mtx); 1920 printf("%s: can't find target %d lun %d\n", __func__, 1921 targ_id.id, lun_id); 1922 return (1); 1923 } 1924 cam_periph_acquire(lun_softc->periph); 1925 mtx_unlock(&softc->lun_softc_mtx); 1926 1927 cam_periph_lock(lun_softc->periph); 1928 cam_periph_invalidate(lun_softc->periph); 1929 cam_periph_unlock(lun_softc->periph); 1930 cam_periph_release(lun_softc->periph); 1931 return (0); 1932 } 1933 1934 static void 1935 ctlfe_dump_sim(struct cam_sim *sim) 1936 { 1937 1938 printf("%s%d: max tagged openings: %d, max dev openings: %d\n", 1939 sim->sim_name, sim->unit_number, 1940 sim->max_tagged_dev_openings, sim->max_dev_openings); 1941 } 1942 1943 /* 1944 * Assumes that the SIM lock is held. 1945 */ 1946 static void 1947 ctlfe_dump_queue(struct ctlfe_lun_softc *softc) 1948 { 1949 struct ccb_hdr *hdr; 1950 struct cam_periph *periph; 1951 int num_items; 1952 1953 periph = softc->periph; 1954 num_items = 0; 1955 1956 TAILQ_FOREACH(hdr, &softc->work_queue, periph_links.tqe) { 1957 union ctl_io *io = hdr->io_ptr; 1958 1959 num_items++; 1960 1961 /* 1962 * Only regular SCSI I/O is put on the work 1963 * queue, so we can print sense here. There may be no 1964 * sense if it's no the queue for a DMA, but this serves to 1965 * print out the CCB as well. 1966 * 1967 * XXX KDM switch this over to scsi_sense_print() when 1968 * CTL is merged in with CAM. 1969 */ 1970 ctl_io_error_print(io, NULL); 1971 1972 /* 1973 * Print DMA status if we are DMA_QUEUED. 1974 */ 1975 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) { 1976 xpt_print(periph->path, 1977 "Total %u, Current %u, Resid %u\n", 1978 io->scsiio.kern_total_len, 1979 io->scsiio.kern_data_len, 1980 io->scsiio.kern_data_resid); 1981 } 1982 } 1983 1984 xpt_print(periph->path, "%d requests total waiting for CCBs\n", 1985 num_items); 1986 xpt_print(periph->path, "%ju CCBs outstanding (%ju allocated, %ju " 1987 "freed)\n", (uintmax_t)(softc->ccbs_alloced - 1988 softc->ccbs_freed), (uintmax_t)softc->ccbs_alloced, 1989 (uintmax_t)softc->ccbs_freed); 1990 xpt_print(periph->path, "%ju CTIOs outstanding (%ju sent, %ju " 1991 "returned\n", (uintmax_t)(softc->ctios_sent - 1992 softc->ctios_returned), softc->ctios_sent, 1993 softc->ctios_returned); 1994 } 1995 1996 /* 1997 * Datamove/done routine called by CTL. Put ourselves on the queue to 1998 * receive a CCB from CAM so we can queue the continue I/O request down 1999 * to the adapter. 2000 */ 2001 static void 2002 ctlfe_datamove(union ctl_io *io) 2003 { 2004 union ccb *ccb; 2005 struct cam_periph *periph; 2006 struct ctlfe_lun_softc *softc; 2007 2008 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 2009 ("Unexpected io_type (%d) in ctlfe_datamove", io->io_hdr.io_type)); 2010 2011 ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 2012 periph = xpt_path_periph(ccb->ccb_h.path); 2013 cam_periph_lock(periph); 2014 softc = (struct ctlfe_lun_softc *)periph->softc; 2015 io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED; 2016 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) 2017 io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED; 2018 TAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h, 2019 periph_links.tqe); 2020 xpt_schedule(periph, /*priority*/ 1); 2021 cam_periph_unlock(periph); 2022 } 2023 2024 static void 2025 ctlfe_done(union ctl_io *io) 2026 { 2027 union ccb *ccb; 2028 struct cam_periph *periph; 2029 struct ctlfe_lun_softc *softc; 2030 2031 ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 2032 periph = xpt_path_periph(ccb->ccb_h.path); 2033 cam_periph_lock(periph); 2034 softc = (struct ctlfe_lun_softc *)periph->softc; 2035 2036 if (io->io_hdr.io_type == CTL_IO_TASK) { 2037 /* 2038 * Task management commands don't require any further 2039 * communication back to the adapter. Requeue the CCB 2040 * to the adapter, and free the CTL I/O. 2041 */ 2042 xpt_print(ccb->ccb_h.path, "%s: returning task I/O " 2043 "tag %#x seq %#x\n", __func__, 2044 ccb->cin1.tag_id, ccb->cin1.seq_id); 2045 /* 2046 * Send the notify acknowledge down to the SIM, to let it 2047 * know we processed the task management command. 2048 */ 2049 ccb->ccb_h.status = CAM_REQ_INPROG; 2050 ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; 2051 xpt_action(ccb); 2052 } else if (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) { 2053 if (softc->flags & CTLFE_LUN_WILDCARD) { 2054 ccb->ccb_h.target_id = CAM_TARGET_WILDCARD; 2055 ccb->ccb_h.target_lun = CAM_LUN_WILDCARD; 2056 } 2057 if (periph->flags & CAM_PERIPH_INVALID) { 2058 ctlfe_free_ccb(periph, ccb); 2059 } else { 2060 softc->atios_sent++; 2061 cam_periph_unlock(periph); 2062 xpt_action(ccb); 2063 return; 2064 } 2065 } else { 2066 io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED; 2067 TAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h, 2068 periph_links.tqe); 2069 xpt_schedule(periph, /*priority*/ 1); 2070 } 2071 2072 cam_periph_unlock(periph); 2073 } 2074 2075 static void 2076 ctlfe_dump(void) 2077 { 2078 struct ctlfe_softc *bus_softc; 2079 struct ctlfe_lun_softc *lun_softc; 2080 2081 STAILQ_FOREACH(bus_softc, &ctlfe_softc_list, links) { 2082 ctlfe_dump_sim(bus_softc->sim); 2083 STAILQ_FOREACH(lun_softc, &bus_softc->lun_softc_list, links) 2084 ctlfe_dump_queue(lun_softc); 2085 } 2086 } 2087