1 /*- 2 * Copyright (c) 2008, 2009 Silicon Graphics International Corp. 3 * Copyright (c) 2014-2015 Alexander Motin <mav@FreeBSD.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions, and the following disclaimer, 11 * without modification. 12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 13 * substantially similar to the "NO WARRANTY" disclaimer below 14 * ("Disclaimer") and any redistribution must be conditioned upon 15 * including a substantially similar Disclaimer requirement for further 16 * binary redistribution. 17 * 18 * NO WARRANTY 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 27 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 28 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGES. 30 * 31 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/scsi_ctl.c#4 $ 32 */ 33 /* 34 * Peripheral driver interface between CAM and CTL (CAM Target Layer). 35 * 36 * Author: Ken Merry <ken@FreeBSD.org> 37 */ 38 39 #include <sys/cdefs.h> 40 __FBSDID("$FreeBSD$"); 41 42 #include <sys/param.h> 43 #include <sys/queue.h> 44 #include <sys/systm.h> 45 #include <sys/kernel.h> 46 #include <sys/lock.h> 47 #include <sys/mutex.h> 48 #include <sys/condvar.h> 49 #include <sys/malloc.h> 50 #include <sys/bus.h> 51 #include <sys/endian.h> 52 #include <sys/sbuf.h> 53 #include <sys/sysctl.h> 54 #include <sys/types.h> 55 #include <sys/systm.h> 56 #include <machine/bus.h> 57 58 #include <cam/cam.h> 59 #include <cam/cam_ccb.h> 60 #include <cam/cam_periph.h> 61 #include <cam/cam_queue.h> 62 #include <cam/cam_xpt_periph.h> 63 #include <cam/cam_debug.h> 64 #include <cam/cam_sim.h> 65 #include <cam/cam_xpt.h> 66 67 #include <cam/scsi/scsi_all.h> 68 #include <cam/scsi/scsi_message.h> 69 70 #include <cam/ctl/ctl_io.h> 71 #include <cam/ctl/ctl.h> 72 #include <cam/ctl/ctl_frontend.h> 73 #include <cam/ctl/ctl_util.h> 74 #include <cam/ctl/ctl_error.h> 75 76 struct ctlfe_softc { 77 struct ctl_port port; 78 path_id_t path_id; 79 target_id_t target_id; 80 uint32_t hba_misc; 81 u_int maxio; 82 struct cam_sim *sim; 83 char port_name[DEV_IDLEN]; 84 struct mtx lun_softc_mtx; 85 STAILQ_HEAD(, ctlfe_lun_softc) lun_softc_list; 86 STAILQ_ENTRY(ctlfe_softc) links; 87 }; 88 89 STAILQ_HEAD(, ctlfe_softc) ctlfe_softc_list; 90 struct mtx ctlfe_list_mtx; 91 static char ctlfe_mtx_desc[] = "ctlfelist"; 92 #ifdef CTLFE_INIT_ENABLE 93 static int ctlfe_max_targets = 1; 94 static int ctlfe_num_targets = 0; 95 #endif 96 97 typedef enum { 98 CTLFE_LUN_NONE = 0x00, 99 CTLFE_LUN_WILDCARD = 0x01 100 } ctlfe_lun_flags; 101 102 struct ctlfe_lun_softc { 103 struct ctlfe_softc *parent_softc; 104 struct cam_periph *periph; 105 ctlfe_lun_flags flags; 106 uint64_t ccbs_alloced; 107 uint64_t ccbs_freed; 108 uint64_t ctios_sent; 109 uint64_t ctios_returned; 110 uint64_t atios_alloced; 111 uint64_t atios_freed; 112 uint64_t inots_alloced; 113 uint64_t inots_freed; 114 /* bus_dma_tag_t dma_tag; */ 115 TAILQ_HEAD(, ccb_hdr) work_queue; 116 STAILQ_ENTRY(ctlfe_lun_softc) links; 117 }; 118 119 typedef enum { 120 CTLFE_CMD_NONE = 0x00, 121 CTLFE_CMD_PIECEWISE = 0x01 122 } ctlfe_cmd_flags; 123 124 struct ctlfe_cmd_info { 125 int cur_transfer_index; 126 size_t cur_transfer_off; 127 ctlfe_cmd_flags flags; 128 /* 129 * XXX KDM struct bus_dma_segment is 8 bytes on i386, and 16 130 * bytes on amd64. So with 32 elements, this is 256 bytes on 131 * i386 and 512 bytes on amd64. 132 */ 133 #define CTLFE_MAX_SEGS 32 134 bus_dma_segment_t cam_sglist[CTLFE_MAX_SEGS]; 135 }; 136 137 /* 138 * When we register the adapter/bus, request that this many ctl_ios be 139 * allocated. This should be the maximum supported by the adapter, but we 140 * currently don't have a way to get that back from the path inquiry. 141 * XXX KDM add that to the path inquiry. 142 */ 143 #define CTLFE_REQ_CTL_IO 4096 144 /* 145 * Number of Accept Target I/O CCBs to allocate and queue down to the 146 * adapter per LUN. 147 * XXX KDM should this be controlled by CTL? 148 */ 149 #define CTLFE_ATIO_PER_LUN 1024 150 /* 151 * Number of Immediate Notify CCBs (used for aborts, resets, etc.) to 152 * allocate and queue down to the adapter per LUN. 153 * XXX KDM should this be controlled by CTL? 154 */ 155 #define CTLFE_IN_PER_LUN 1024 156 157 /* 158 * Timeout (in seconds) on CTIO CCB allocation for doing a DMA or sending 159 * status to the initiator. The SIM is expected to have its own timeouts, 160 * so we're not putting this timeout around the CCB execution time. The 161 * SIM should timeout and let us know if it has an issue. 162 */ 163 #define CTLFE_DMA_TIMEOUT 60 164 165 /* 166 * Turn this on to enable extra debugging prints. 167 */ 168 #if 0 169 #define CTLFE_DEBUG 170 #endif 171 172 /* 173 * Use randomly assigned WWNN/WWPN values. This is to work around an issue 174 * in the FreeBSD initiator that makes it unable to rescan the target if 175 * the target gets rebooted and the WWNN/WWPN stay the same. 176 */ 177 #if 0 178 #define RANDOM_WWNN 179 #endif 180 181 MALLOC_DEFINE(M_CTLFE, "CAM CTL FE", "CAM CTL FE interface"); 182 183 #define io_ptr ppriv_ptr0 184 185 /* This is only used in the CTIO */ 186 #define ccb_atio ppriv_ptr1 187 188 int ctlfeinitialize(void); 189 void ctlfeshutdown(void); 190 static periph_init_t ctlfeperiphinit; 191 static void ctlfeasync(void *callback_arg, uint32_t code, 192 struct cam_path *path, void *arg); 193 static periph_ctor_t ctlferegister; 194 static periph_oninv_t ctlfeoninvalidate; 195 static periph_dtor_t ctlfecleanup; 196 static periph_start_t ctlfestart; 197 static void ctlfedone(struct cam_periph *periph, 198 union ccb *done_ccb); 199 200 static void ctlfe_onoffline(void *arg, int online); 201 static void ctlfe_online(void *arg); 202 static void ctlfe_offline(void *arg); 203 static int ctlfe_lun_enable(void *arg, int lun_id); 204 static int ctlfe_lun_disable(void *arg, int lun_id); 205 static void ctlfe_dump_sim(struct cam_sim *sim); 206 static void ctlfe_dump_queue(struct ctlfe_lun_softc *softc); 207 static void ctlfe_datamove(union ctl_io *io); 208 static void ctlfe_done(union ctl_io *io); 209 static void ctlfe_dump(void); 210 211 static struct periph_driver ctlfe_driver = 212 { 213 ctlfeperiphinit, "ctl", 214 TAILQ_HEAD_INITIALIZER(ctlfe_driver.units), /*generation*/ 0, 215 CAM_PERIPH_DRV_EARLY 216 }; 217 218 static struct ctl_frontend ctlfe_frontend = 219 { 220 .name = "camtgt", 221 .init = ctlfeinitialize, 222 .fe_dump = ctlfe_dump, 223 .shutdown = ctlfeshutdown, 224 }; 225 CTL_FRONTEND_DECLARE(ctlfe, ctlfe_frontend); 226 227 void 228 ctlfeshutdown(void) 229 { 230 return; 231 } 232 233 int 234 ctlfeinitialize(void) 235 { 236 237 STAILQ_INIT(&ctlfe_softc_list); 238 mtx_init(&ctlfe_list_mtx, ctlfe_mtx_desc, NULL, MTX_DEF); 239 periphdriver_register(&ctlfe_driver); 240 return (0); 241 } 242 243 void 244 ctlfeperiphinit(void) 245 { 246 cam_status status; 247 248 status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED | 249 AC_CONTRACT, ctlfeasync, NULL, NULL); 250 if (status != CAM_REQ_CMP) { 251 printf("ctl: Failed to attach async callback due to CAM " 252 "status 0x%x!\n", status); 253 } 254 } 255 256 static void 257 ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) 258 { 259 struct ctlfe_softc *softc; 260 261 #ifdef CTLFEDEBUG 262 printf("%s: entered\n", __func__); 263 #endif 264 265 mtx_lock(&ctlfe_list_mtx); 266 STAILQ_FOREACH(softc, &ctlfe_softc_list, links) { 267 if (softc->path_id == xpt_path_path_id(path)) 268 break; 269 } 270 mtx_unlock(&ctlfe_list_mtx); 271 272 /* 273 * When a new path gets registered, and it is capable of target 274 * mode, go ahead and attach. Later on, we may need to be more 275 * selective, but for now this will be sufficient. 276 */ 277 switch (code) { 278 case AC_PATH_REGISTERED: { 279 struct ctl_port *port; 280 struct ccb_pathinq *cpi; 281 int retval; 282 283 cpi = (struct ccb_pathinq *)arg; 284 285 /* Don't attach if it doesn't support target mode */ 286 if ((cpi->target_sprt & PIT_PROCESSOR) == 0) { 287 #ifdef CTLFEDEBUG 288 printf("%s: SIM %s%d doesn't support target mode\n", 289 __func__, cpi->dev_name, cpi->unit_number); 290 #endif 291 break; 292 } 293 294 if (softc != NULL) { 295 #ifdef CTLFEDEBUG 296 printf("%s: CTL port for CAM path %u already exists\n", 297 __func__, xpt_path_path_id(path)); 298 #endif 299 break; 300 } 301 302 #ifdef CTLFE_INIT_ENABLE 303 if (ctlfe_num_targets >= ctlfe_max_targets) { 304 union ccb *ccb; 305 306 ccb = (union ccb *)malloc(sizeof(*ccb), M_TEMP, 307 M_NOWAIT | M_ZERO); 308 if (ccb == NULL) { 309 printf("%s: unable to malloc CCB!\n", __func__); 310 return; 311 } 312 xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE); 313 314 ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; 315 ccb->knob.xport_specific.valid = KNOB_VALID_ROLE; 316 ccb->knob.xport_specific.fc.role = KNOB_ROLE_INITIATOR; 317 318 xpt_action(ccb); 319 320 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != 321 CAM_REQ_CMP) { 322 printf("%s: SIM %s%d (path id %d) initiator " 323 "enable failed with status %#x\n", 324 __func__, cpi->dev_name, 325 cpi->unit_number, cpi->ccb_h.path_id, 326 ccb->ccb_h.status); 327 } else { 328 printf("%s: SIM %s%d (path id %d) initiator " 329 "enable succeeded\n", 330 __func__, cpi->dev_name, 331 cpi->unit_number, cpi->ccb_h.path_id); 332 } 333 334 free(ccb, M_TEMP); 335 336 break; 337 } else { 338 ctlfe_num_targets++; 339 } 340 341 printf("%s: ctlfe_num_targets = %d\n", __func__, 342 ctlfe_num_targets); 343 #endif /* CTLFE_INIT_ENABLE */ 344 345 /* 346 * We're in an interrupt context here, so we have to 347 * use M_NOWAIT. Of course this means trouble if we 348 * can't allocate memory. 349 */ 350 softc = malloc(sizeof(*softc), M_CTLFE, M_NOWAIT | M_ZERO); 351 if (softc == NULL) { 352 printf("%s: unable to malloc %zd bytes for softc\n", 353 __func__, sizeof(*softc)); 354 return; 355 } 356 357 softc->path_id = cpi->ccb_h.path_id; 358 softc->target_id = cpi->initiator_id; 359 softc->sim = xpt_path_sim(path); 360 softc->hba_misc = cpi->hba_misc; 361 if (cpi->maxio != 0) 362 softc->maxio = cpi->maxio; 363 else 364 softc->maxio = DFLTPHYS; 365 mtx_init(&softc->lun_softc_mtx, "LUN softc mtx", NULL, MTX_DEF); 366 STAILQ_INIT(&softc->lun_softc_list); 367 368 port = &softc->port; 369 port->frontend = &ctlfe_frontend; 370 371 /* 372 * XXX KDM should we be more accurate here ? 373 */ 374 if (cpi->transport == XPORT_FC) 375 port->port_type = CTL_PORT_FC; 376 else if (cpi->transport == XPORT_SAS) 377 port->port_type = CTL_PORT_SAS; 378 else 379 port->port_type = CTL_PORT_SCSI; 380 381 /* XXX KDM what should the real number be here? */ 382 port->num_requested_ctl_io = 4096; 383 snprintf(softc->port_name, sizeof(softc->port_name), 384 "%s%d", cpi->dev_name, cpi->unit_number); 385 /* 386 * XXX KDM it would be nice to allocate storage in the 387 * frontend structure itself. 388 */ 389 port->port_name = softc->port_name; 390 port->physical_port = cpi->bus_id; 391 port->virtual_port = 0; 392 port->port_online = ctlfe_online; 393 port->port_offline = ctlfe_offline; 394 port->onoff_arg = softc; 395 port->lun_enable = ctlfe_lun_enable; 396 port->lun_disable = ctlfe_lun_disable; 397 port->targ_lun_arg = softc; 398 port->fe_datamove = ctlfe_datamove; 399 port->fe_done = ctlfe_done; 400 /* 401 * XXX KDM the path inquiry doesn't give us the maximum 402 * number of targets supported. 403 */ 404 port->max_targets = cpi->max_target; 405 port->max_target_id = cpi->max_target; 406 port->targ_port = -1; 407 408 /* 409 * XXX KDM need to figure out whether we're the master or 410 * slave. 411 */ 412 #ifdef CTLFEDEBUG 413 printf("%s: calling ctl_port_register() for %s%d\n", 414 __func__, cpi->dev_name, cpi->unit_number); 415 #endif 416 retval = ctl_port_register(port); 417 if (retval != 0) { 418 printf("%s: ctl_port_register() failed with " 419 "error %d!\n", __func__, retval); 420 mtx_destroy(&softc->lun_softc_mtx); 421 free(softc, M_CTLFE); 422 break; 423 } else { 424 mtx_lock(&ctlfe_list_mtx); 425 STAILQ_INSERT_TAIL(&ctlfe_softc_list, softc, links); 426 mtx_unlock(&ctlfe_list_mtx); 427 } 428 429 break; 430 } 431 case AC_PATH_DEREGISTERED: { 432 433 if (softc != NULL) { 434 /* 435 * XXX KDM are we certain at this point that there 436 * are no outstanding commands for this frontend? 437 */ 438 mtx_lock(&ctlfe_list_mtx); 439 STAILQ_REMOVE(&ctlfe_softc_list, softc, ctlfe_softc, 440 links); 441 mtx_unlock(&ctlfe_list_mtx); 442 ctl_port_deregister(&softc->port); 443 mtx_destroy(&softc->lun_softc_mtx); 444 free(softc, M_CTLFE); 445 } 446 break; 447 } 448 case AC_CONTRACT: { 449 struct ac_contract *ac; 450 451 ac = (struct ac_contract *)arg; 452 453 switch (ac->contract_number) { 454 case AC_CONTRACT_DEV_CHG: { 455 struct ac_device_changed *dev_chg; 456 int retval; 457 458 dev_chg = (struct ac_device_changed *)ac->contract_data; 459 460 printf("%s: WWPN %#jx port 0x%06x path %u target %u %s\n", 461 __func__, dev_chg->wwpn, dev_chg->port, 462 xpt_path_path_id(path), dev_chg->target, 463 (dev_chg->arrived == 0) ? "left" : "arrived"); 464 465 if (softc == NULL) { 466 printf("%s: CTL port for CAM path %u not " 467 "found!\n", __func__, 468 xpt_path_path_id(path)); 469 break; 470 } 471 if (dev_chg->arrived != 0) { 472 retval = ctl_add_initiator(&softc->port, 473 dev_chg->target, dev_chg->wwpn, NULL); 474 } else { 475 retval = ctl_remove_initiator(&softc->port, 476 dev_chg->target); 477 } 478 479 if (retval < 0) { 480 printf("%s: could not %s port %d iid %u " 481 "WWPN %#jx!\n", __func__, 482 (dev_chg->arrived != 0) ? "add" : 483 "remove", softc->port.targ_port, 484 dev_chg->target, 485 (uintmax_t)dev_chg->wwpn); 486 } 487 break; 488 } 489 default: 490 printf("%s: unsupported contract number %ju\n", 491 __func__, (uintmax_t)ac->contract_number); 492 break; 493 } 494 break; 495 } 496 default: 497 break; 498 } 499 } 500 501 static cam_status 502 ctlferegister(struct cam_periph *periph, void *arg) 503 { 504 struct ctlfe_softc *bus_softc; 505 struct ctlfe_lun_softc *softc; 506 union ccb en_lun_ccb; 507 cam_status status; 508 int i; 509 510 softc = (struct ctlfe_lun_softc *)arg; 511 bus_softc = softc->parent_softc; 512 513 TAILQ_INIT(&softc->work_queue); 514 softc->periph = periph; 515 periph->softc = softc; 516 517 xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); 518 en_lun_ccb.ccb_h.func_code = XPT_EN_LUN; 519 en_lun_ccb.cel.grp6_len = 0; 520 en_lun_ccb.cel.grp7_len = 0; 521 en_lun_ccb.cel.enable = 1; 522 xpt_action(&en_lun_ccb); 523 status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK); 524 if (status != CAM_REQ_CMP) { 525 xpt_print(periph->path, "%s: Enable LUN failed, status 0x%x\n", 526 __func__, en_lun_ccb.ccb_h.status); 527 return (status); 528 } 529 530 status = CAM_REQ_CMP; 531 532 for (i = 0; i < CTLFE_ATIO_PER_LUN; i++) { 533 union ccb *new_ccb; 534 union ctl_io *new_io; 535 struct ctlfe_cmd_info *cmd_info; 536 537 new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, 538 M_ZERO|M_NOWAIT); 539 if (new_ccb == NULL) { 540 status = CAM_RESRC_UNAVAIL; 541 break; 542 } 543 new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref); 544 if (new_io == NULL) { 545 free(new_ccb, M_CTLFE); 546 status = CAM_RESRC_UNAVAIL; 547 break; 548 } 549 cmd_info = malloc(sizeof(*cmd_info), M_CTLFE, 550 M_ZERO | M_NOWAIT); 551 if (cmd_info == NULL) { 552 ctl_free_io(new_io); 553 free(new_ccb, M_CTLFE); 554 status = CAM_RESRC_UNAVAIL; 555 break; 556 } 557 new_io->io_hdr.ctl_private[CTL_PRIV_FRONTEND2].ptr = cmd_info; 558 softc->atios_alloced++; 559 new_ccb->ccb_h.io_ptr = new_io; 560 561 xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1); 562 new_ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO; 563 new_ccb->ccb_h.cbfcnp = ctlfedone; 564 new_ccb->ccb_h.flags |= CAM_UNLOCKED; 565 xpt_action(new_ccb); 566 status = new_ccb->ccb_h.status; 567 if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 568 free(cmd_info, M_CTLFE); 569 ctl_free_io(new_io); 570 free(new_ccb, M_CTLFE); 571 break; 572 } 573 } 574 575 status = cam_periph_acquire(periph); 576 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 577 xpt_print(periph->path, "%s: could not acquire reference " 578 "count, status = %#x\n", __func__, status); 579 return (status); 580 } 581 582 if (i == 0) { 583 xpt_print(periph->path, "%s: could not allocate ATIO CCBs, " 584 "status 0x%x\n", __func__, status); 585 return (CAM_REQ_CMP_ERR); 586 } 587 588 for (i = 0; i < CTLFE_IN_PER_LUN; i++) { 589 union ccb *new_ccb; 590 union ctl_io *new_io; 591 592 new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, 593 M_ZERO|M_NOWAIT); 594 if (new_ccb == NULL) { 595 status = CAM_RESRC_UNAVAIL; 596 break; 597 } 598 new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref); 599 if (new_io == NULL) { 600 free(new_ccb, M_CTLFE); 601 status = CAM_RESRC_UNAVAIL; 602 break; 603 } 604 softc->inots_alloced++; 605 new_ccb->ccb_h.io_ptr = new_io; 606 607 xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1); 608 new_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; 609 new_ccb->ccb_h.cbfcnp = ctlfedone; 610 new_ccb->ccb_h.flags |= CAM_UNLOCKED; 611 xpt_action(new_ccb); 612 status = new_ccb->ccb_h.status; 613 if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 614 /* 615 * Note that we don't free the CCB here. If the 616 * status is not CAM_REQ_INPROG, then we're 617 * probably talking to a SIM that says it is 618 * target-capable but doesn't support the 619 * XPT_IMMEDIATE_NOTIFY CCB. i.e. it supports the 620 * older API. In that case, it'll call xpt_done() 621 * on the CCB, and we need to free it in our done 622 * routine as a result. 623 */ 624 break; 625 } 626 } 627 if ((i == 0) 628 || (status != CAM_REQ_INPROG)) { 629 xpt_print(periph->path, "%s: could not allocate immediate " 630 "notify CCBs, status 0x%x\n", __func__, status); 631 return (CAM_REQ_CMP_ERR); 632 } 633 mtx_lock(&bus_softc->lun_softc_mtx); 634 STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, softc, links); 635 mtx_unlock(&bus_softc->lun_softc_mtx); 636 return (CAM_REQ_CMP); 637 } 638 639 static void 640 ctlfeoninvalidate(struct cam_periph *periph) 641 { 642 union ccb en_lun_ccb; 643 cam_status status; 644 struct ctlfe_softc *bus_softc; 645 struct ctlfe_lun_softc *softc; 646 647 softc = (struct ctlfe_lun_softc *)periph->softc; 648 649 xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); 650 en_lun_ccb.ccb_h.func_code = XPT_EN_LUN; 651 en_lun_ccb.cel.grp6_len = 0; 652 en_lun_ccb.cel.grp7_len = 0; 653 en_lun_ccb.cel.enable = 0; 654 xpt_action(&en_lun_ccb); 655 status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK); 656 if (status != CAM_REQ_CMP) { 657 xpt_print(periph->path, "%s: Disable LUN failed, status 0x%x\n", 658 __func__, en_lun_ccb.ccb_h.status); 659 /* 660 * XXX KDM what do we do now? 661 */ 662 } 663 664 bus_softc = softc->parent_softc; 665 mtx_lock(&bus_softc->lun_softc_mtx); 666 STAILQ_REMOVE(&bus_softc->lun_softc_list, softc, ctlfe_lun_softc, links); 667 mtx_unlock(&bus_softc->lun_softc_mtx); 668 } 669 670 static void 671 ctlfecleanup(struct cam_periph *periph) 672 { 673 struct ctlfe_lun_softc *softc; 674 675 softc = (struct ctlfe_lun_softc *)periph->softc; 676 677 KASSERT(softc->ccbs_freed == softc->ccbs_alloced, ("%s: " 678 "ccbs_freed %ju != ccbs_alloced %ju", __func__, 679 softc->ccbs_freed, softc->ccbs_alloced)); 680 KASSERT(softc->ctios_returned == softc->ctios_sent, ("%s: " 681 "ctios_returned %ju != ctios_sent %ju", __func__, 682 softc->ctios_returned, softc->ctios_sent)); 683 KASSERT(softc->atios_freed == softc->atios_alloced, ("%s: " 684 "atios_freed %ju != atios_alloced %ju", __func__, 685 softc->atios_freed, softc->atios_alloced)); 686 KASSERT(softc->inots_freed == softc->inots_alloced, ("%s: " 687 "inots_freed %ju != inots_alloced %ju", __func__, 688 softc->inots_freed, softc->inots_alloced)); 689 690 free(softc, M_CTLFE); 691 } 692 693 static void 694 ctlfedata(struct ctlfe_lun_softc *softc, union ctl_io *io, 695 ccb_flags *flags, uint8_t **data_ptr, uint32_t *dxfer_len, 696 u_int16_t *sglist_cnt) 697 { 698 struct ctlfe_softc *bus_softc; 699 struct ctlfe_cmd_info *cmd_info; 700 struct ctl_sg_entry *ctl_sglist; 701 bus_dma_segment_t *cam_sglist; 702 size_t off; 703 int i, idx; 704 705 cmd_info = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND2].ptr; 706 bus_softc = softc->parent_softc; 707 708 /* 709 * Set the direction, relative to the initiator. 710 */ 711 *flags &= ~CAM_DIR_MASK; 712 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) 713 *flags |= CAM_DIR_IN; 714 else 715 *flags |= CAM_DIR_OUT; 716 717 *flags &= ~CAM_DATA_MASK; 718 idx = cmd_info->cur_transfer_index; 719 off = cmd_info->cur_transfer_off; 720 cmd_info->flags &= ~CTLFE_CMD_PIECEWISE; 721 if (io->scsiio.kern_sg_entries == 0) { 722 /* No S/G list. */ 723 *data_ptr = io->scsiio.kern_data_ptr + off; 724 if (io->scsiio.kern_data_len - off <= bus_softc->maxio) { 725 *dxfer_len = io->scsiio.kern_data_len - off; 726 } else { 727 *dxfer_len = bus_softc->maxio; 728 cmd_info->cur_transfer_index = -1; 729 cmd_info->cur_transfer_off = bus_softc->maxio; 730 cmd_info->flags |= CTLFE_CMD_PIECEWISE; 731 } 732 *sglist_cnt = 0; 733 734 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) 735 *flags |= CAM_DATA_PADDR; 736 else 737 *flags |= CAM_DATA_VADDR; 738 } else { 739 /* S/G list with physical or virtual pointers. */ 740 ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 741 cam_sglist = cmd_info->cam_sglist; 742 *dxfer_len = 0; 743 for (i = 0; i < io->scsiio.kern_sg_entries - idx; i++) { 744 cam_sglist[i].ds_addr = (bus_addr_t)ctl_sglist[i + idx].addr + off; 745 if (ctl_sglist[i + idx].len - off <= bus_softc->maxio - *dxfer_len) { 746 cam_sglist[i].ds_len = ctl_sglist[idx + i].len - off; 747 *dxfer_len += cam_sglist[i].ds_len; 748 } else { 749 cam_sglist[i].ds_len = bus_softc->maxio - *dxfer_len; 750 cmd_info->cur_transfer_index = idx + i; 751 cmd_info->cur_transfer_off = cam_sglist[i].ds_len + off; 752 cmd_info->flags |= CTLFE_CMD_PIECEWISE; 753 *dxfer_len += cam_sglist[i].ds_len; 754 if (ctl_sglist[i].len != 0) 755 i++; 756 break; 757 } 758 if (i == (CTLFE_MAX_SEGS - 1) && 759 idx + i < (io->scsiio.kern_sg_entries - 1)) { 760 cmd_info->cur_transfer_index = idx + i + 1; 761 cmd_info->cur_transfer_off = 0; 762 cmd_info->flags |= CTLFE_CMD_PIECEWISE; 763 i++; 764 break; 765 } 766 off = 0; 767 } 768 *sglist_cnt = i; 769 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) 770 *flags |= CAM_DATA_SG_PADDR; 771 else 772 *flags |= CAM_DATA_SG; 773 *data_ptr = (uint8_t *)cam_sglist; 774 } 775 } 776 777 static void 778 ctlfestart(struct cam_periph *periph, union ccb *start_ccb) 779 { 780 struct ctlfe_lun_softc *softc; 781 struct ctlfe_cmd_info *cmd_info; 782 struct ccb_hdr *ccb_h; 783 struct ccb_accept_tio *atio; 784 struct ccb_scsiio *csio; 785 uint8_t *data_ptr; 786 uint32_t dxfer_len; 787 ccb_flags flags; 788 union ctl_io *io; 789 uint8_t scsi_status; 790 791 softc = (struct ctlfe_lun_softc *)periph->softc; 792 softc->ccbs_alloced++; 793 794 ccb_h = TAILQ_FIRST(&softc->work_queue); 795 if (ccb_h == NULL) { 796 softc->ccbs_freed++; 797 xpt_release_ccb(start_ccb); 798 return; 799 } 800 801 /* Take the ATIO off the work queue */ 802 TAILQ_REMOVE(&softc->work_queue, ccb_h, periph_links.tqe); 803 atio = (struct ccb_accept_tio *)ccb_h; 804 io = (union ctl_io *)ccb_h->io_ptr; 805 csio = &start_ccb->csio; 806 807 flags = atio->ccb_h.flags & 808 (CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK); 809 cmd_info = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND2].ptr; 810 cmd_info->cur_transfer_index = 0; 811 cmd_info->cur_transfer_off = 0; 812 cmd_info->flags = 0; 813 814 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) { 815 /* 816 * Datamove call, we need to setup the S/G list. 817 */ 818 scsi_status = 0; 819 csio->cdb_len = atio->cdb_len; 820 ctlfedata(softc, io, &flags, &data_ptr, &dxfer_len, 821 &csio->sglist_cnt); 822 io->scsiio.ext_data_filled += dxfer_len; 823 if (io->scsiio.ext_data_filled > io->scsiio.kern_total_len) { 824 xpt_print(periph->path, "%s: tag 0x%04x " 825 "fill len %u > total %u\n", 826 __func__, io->scsiio.tag_num, 827 io->scsiio.ext_data_filled, 828 io->scsiio.kern_total_len); 829 } 830 } else { 831 /* 832 * We're done, send status back. 833 */ 834 if ((io->io_hdr.flags & CTL_FLAG_ABORT) && 835 (io->io_hdr.flags & CTL_FLAG_ABORT_STATUS) == 0) { 836 io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED; 837 838 /* 839 * If this command was aborted, we don't 840 * need to send status back to the SIM. 841 * Just free the CTIO and ctl_io, and 842 * recycle the ATIO back to the SIM. 843 */ 844 xpt_print(periph->path, "%s: aborted " 845 "command 0x%04x discarded\n", 846 __func__, io->scsiio.tag_num); 847 /* 848 * For a wildcard attachment, commands can 849 * come in with a specific target/lun. Reset 850 * the target and LUN fields back to the 851 * wildcard values before we send them back 852 * down to the SIM. The SIM has a wildcard 853 * LUN enabled, not whatever target/lun 854 * these happened to be. 855 */ 856 if (softc->flags & CTLFE_LUN_WILDCARD) { 857 atio->ccb_h.target_id = CAM_TARGET_WILDCARD; 858 atio->ccb_h.target_lun = CAM_LUN_WILDCARD; 859 } 860 861 if (atio->ccb_h.func_code != XPT_ACCEPT_TARGET_IO) { 862 xpt_print(periph->path, "%s: func_code " 863 "is %#x\n", __func__, 864 atio->ccb_h.func_code); 865 } 866 start_ccb->ccb_h.func_code = XPT_ABORT; 867 start_ccb->cab.abort_ccb = (union ccb *)atio; 868 869 /* Tell the SIM that we've aborted this ATIO */ 870 xpt_action(start_ccb); 871 softc->ccbs_freed++; 872 xpt_release_ccb(start_ccb); 873 874 /* 875 * Send the ATIO back down to the SIM. 876 */ 877 xpt_action((union ccb *)atio); 878 879 /* 880 * If we still have work to do, ask for 881 * another CCB. Otherwise, deactivate our 882 * callout. 883 */ 884 if (!TAILQ_EMPTY(&softc->work_queue)) 885 xpt_schedule(periph, /*priority*/ 1); 886 return; 887 } 888 data_ptr = NULL; 889 dxfer_len = 0; 890 csio->sglist_cnt = 0; 891 scsi_status = 0; 892 } 893 if ((io->io_hdr.flags & CTL_FLAG_STATUS_QUEUED) && 894 (cmd_info->flags & CTLFE_CMD_PIECEWISE) == 0 && 895 ((io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) == 0 || 896 io->io_hdr.status == CTL_SUCCESS)) { 897 flags |= CAM_SEND_STATUS; 898 scsi_status = io->scsiio.scsi_status; 899 csio->sense_len = io->scsiio.sense_len; 900 #ifdef CTLFEDEBUG 901 printf("%s: tag %04x status %x\n", __func__, 902 atio->tag_id, io->io_hdr.status); 903 #endif 904 if (csio->sense_len != 0) { 905 csio->sense_data = io->scsiio.sense_data; 906 flags |= CAM_SEND_SENSE; 907 } else if (scsi_status == SCSI_STATUS_CHECK_COND) { 908 xpt_print(periph->path, "%s: check condition " 909 "with no sense\n", __func__); 910 } 911 } 912 913 #ifdef CTLFEDEBUG 914 printf("%s: %s: tag %04x flags %x ptr %p len %u\n", __func__, 915 (flags & CAM_SEND_STATUS) ? "done" : "datamove", 916 atio->tag_id, flags, data_ptr, dxfer_len); 917 #endif 918 919 /* 920 * Valid combinations: 921 * - CAM_SEND_STATUS, CAM_DATA_SG = 0, dxfer_len = 0, 922 * sglist_cnt = 0 923 * - CAM_SEND_STATUS = 0, CAM_DATA_SG = 0, dxfer_len != 0, 924 * sglist_cnt = 0 925 * - CAM_SEND_STATUS = 0, CAM_DATA_SG, dxfer_len != 0, 926 * sglist_cnt != 0 927 */ 928 #ifdef CTLFEDEBUG 929 if (((flags & CAM_SEND_STATUS) 930 && (((flags & CAM_DATA_SG) != 0) 931 || (dxfer_len != 0) 932 || (csio->sglist_cnt != 0))) 933 || (((flags & CAM_SEND_STATUS) == 0) 934 && (dxfer_len == 0)) 935 || ((flags & CAM_DATA_SG) 936 && (csio->sglist_cnt == 0)) 937 || (((flags & CAM_DATA_SG) == 0) 938 && (csio->sglist_cnt != 0))) { 939 printf("%s: tag %04x cdb %02x flags %#x dxfer_len " 940 "%d sg %u\n", __func__, atio->tag_id, 941 atio->cdb_io.cdb_bytes[0], flags, dxfer_len, 942 csio->sglist_cnt); 943 printf("%s: tag %04x io status %#x\n", __func__, 944 atio->tag_id, io->io_hdr.status); 945 } 946 #endif 947 cam_fill_ctio(csio, 948 /*retries*/ 2, 949 ctlfedone, 950 flags, 951 (flags & CAM_TAG_ACTION_VALID) ? MSG_SIMPLE_Q_TAG : 0, 952 atio->tag_id, 953 atio->init_id, 954 scsi_status, 955 /*data_ptr*/ data_ptr, 956 /*dxfer_len*/ dxfer_len, 957 /*timeout*/ 5 * 1000); 958 start_ccb->ccb_h.flags |= CAM_UNLOCKED; 959 start_ccb->ccb_h.ccb_atio = atio; 960 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 961 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 962 io->io_hdr.flags &= ~(CTL_FLAG_DMA_QUEUED | CTL_FLAG_STATUS_QUEUED); 963 964 softc->ctios_sent++; 965 966 cam_periph_unlock(periph); 967 xpt_action(start_ccb); 968 cam_periph_lock(periph); 969 970 /* 971 * If we still have work to do, ask for another CCB. 972 */ 973 if (!TAILQ_EMPTY(&softc->work_queue)) 974 xpt_schedule(periph, /*priority*/ 1); 975 } 976 977 static void 978 ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb) 979 { 980 struct ctlfe_lun_softc *softc; 981 union ctl_io *io; 982 struct ctlfe_cmd_info *cmd_info; 983 984 softc = (struct ctlfe_lun_softc *)periph->softc; 985 io = ccb->ccb_h.io_ptr; 986 987 switch (ccb->ccb_h.func_code) { 988 case XPT_ACCEPT_TARGET_IO: 989 softc->atios_freed++; 990 cmd_info = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND2].ptr; 991 free(cmd_info, M_CTLFE); 992 break; 993 case XPT_IMMEDIATE_NOTIFY: 994 case XPT_NOTIFY_ACKNOWLEDGE: 995 softc->inots_freed++; 996 break; 997 default: 998 break; 999 } 1000 1001 ctl_free_io(io); 1002 free(ccb, M_CTLFE); 1003 1004 KASSERT(softc->atios_freed <= softc->atios_alloced, ("%s: " 1005 "atios_freed %ju > atios_alloced %ju", __func__, 1006 softc->atios_freed, softc->atios_alloced)); 1007 KASSERT(softc->inots_freed <= softc->inots_alloced, ("%s: " 1008 "inots_freed %ju > inots_alloced %ju", __func__, 1009 softc->inots_freed, softc->inots_alloced)); 1010 1011 /* 1012 * If we have received all of our CCBs, we can release our 1013 * reference on the peripheral driver. It will probably go away 1014 * now. 1015 */ 1016 if ((softc->atios_freed == softc->atios_alloced) 1017 && (softc->inots_freed == softc->inots_alloced)) { 1018 cam_periph_release_locked(periph); 1019 } 1020 } 1021 1022 static int 1023 ctlfe_adjust_cdb(struct ccb_accept_tio *atio, uint32_t offset) 1024 { 1025 uint64_t lba; 1026 uint32_t num_blocks, nbc; 1027 uint8_t *cmdbyt = (atio->ccb_h.flags & CAM_CDB_POINTER)? 1028 atio->cdb_io.cdb_ptr : atio->cdb_io.cdb_bytes; 1029 1030 nbc = offset >> 9; /* ASSUMING 512 BYTE BLOCKS */ 1031 1032 switch (cmdbyt[0]) { 1033 case READ_6: 1034 case WRITE_6: 1035 { 1036 struct scsi_rw_6 *cdb = (struct scsi_rw_6 *)cmdbyt; 1037 lba = scsi_3btoul(cdb->addr); 1038 lba &= 0x1fffff; 1039 num_blocks = cdb->length; 1040 if (num_blocks == 0) 1041 num_blocks = 256; 1042 lba += nbc; 1043 num_blocks -= nbc; 1044 scsi_ulto3b(lba, cdb->addr); 1045 cdb->length = num_blocks; 1046 break; 1047 } 1048 case READ_10: 1049 case WRITE_10: 1050 { 1051 struct scsi_rw_10 *cdb = (struct scsi_rw_10 *)cmdbyt; 1052 lba = scsi_4btoul(cdb->addr); 1053 num_blocks = scsi_2btoul(cdb->length); 1054 lba += nbc; 1055 num_blocks -= nbc; 1056 scsi_ulto4b(lba, cdb->addr); 1057 scsi_ulto2b(num_blocks, cdb->length); 1058 break; 1059 } 1060 case READ_12: 1061 case WRITE_12: 1062 { 1063 struct scsi_rw_12 *cdb = (struct scsi_rw_12 *)cmdbyt; 1064 lba = scsi_4btoul(cdb->addr); 1065 num_blocks = scsi_4btoul(cdb->length); 1066 lba += nbc; 1067 num_blocks -= nbc; 1068 scsi_ulto4b(lba, cdb->addr); 1069 scsi_ulto4b(num_blocks, cdb->length); 1070 break; 1071 } 1072 case READ_16: 1073 case WRITE_16: 1074 { 1075 struct scsi_rw_16 *cdb = (struct scsi_rw_16 *)cmdbyt; 1076 lba = scsi_8btou64(cdb->addr); 1077 num_blocks = scsi_4btoul(cdb->length); 1078 lba += nbc; 1079 num_blocks -= nbc; 1080 scsi_u64to8b(lba, cdb->addr); 1081 scsi_ulto4b(num_blocks, cdb->length); 1082 break; 1083 } 1084 default: 1085 return -1; 1086 } 1087 return (0); 1088 } 1089 1090 static void 1091 ctlfedone(struct cam_periph *periph, union ccb *done_ccb) 1092 { 1093 struct ctlfe_lun_softc *softc; 1094 struct ctlfe_softc *bus_softc; 1095 struct ctlfe_cmd_info *cmd_info; 1096 struct ccb_accept_tio *atio = NULL; 1097 union ctl_io *io = NULL; 1098 struct mtx *mtx; 1099 1100 KASSERT((done_ccb->ccb_h.flags & CAM_UNLOCKED) != 0, 1101 ("CCB in ctlfedone() without CAM_UNLOCKED flag")); 1102 #ifdef CTLFE_DEBUG 1103 printf("%s: entered, func_code = %#x\n", __func__, 1104 done_ccb->ccb_h.func_code); 1105 #endif 1106 1107 /* 1108 * At this point CTL has no known use case for device queue freezes. 1109 * In case some SIM think different -- drop its freeze right here. 1110 */ 1111 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 1112 cam_release_devq(periph->path, 1113 /*relsim_flags*/0, 1114 /*reduction*/0, 1115 /*timeout*/0, 1116 /*getcount_only*/0); 1117 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1118 } 1119 1120 softc = (struct ctlfe_lun_softc *)periph->softc; 1121 bus_softc = softc->parent_softc; 1122 mtx = cam_periph_mtx(periph); 1123 mtx_lock(mtx); 1124 1125 /* 1126 * If the peripheral is invalid, ATIOs and immediate notify CCBs 1127 * need to be freed. Most of the ATIOs and INOTs that come back 1128 * will be CCBs that are being returned from the SIM as a result of 1129 * our disabling the LUN. 1130 * 1131 * Other CCB types are handled in their respective cases below. 1132 */ 1133 if (periph->flags & CAM_PERIPH_INVALID) { 1134 switch (done_ccb->ccb_h.func_code) { 1135 case XPT_ACCEPT_TARGET_IO: 1136 case XPT_IMMEDIATE_NOTIFY: 1137 case XPT_NOTIFY_ACKNOWLEDGE: 1138 ctlfe_free_ccb(periph, done_ccb); 1139 goto out; 1140 default: 1141 break; 1142 } 1143 1144 } 1145 switch (done_ccb->ccb_h.func_code) { 1146 case XPT_ACCEPT_TARGET_IO: { 1147 1148 atio = &done_ccb->atio; 1149 1150 resubmit: 1151 /* 1152 * Allocate a ctl_io, pass it to CTL, and wait for the 1153 * datamove or done. 1154 */ 1155 mtx_unlock(mtx); 1156 io = done_ccb->ccb_h.io_ptr; 1157 cmd_info = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND2].ptr; 1158 ctl_zero_io(io); 1159 1160 /* Save pointers on both sides */ 1161 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = done_ccb; 1162 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND2].ptr = cmd_info; 1163 done_ccb->ccb_h.io_ptr = io; 1164 1165 /* 1166 * Only SCSI I/O comes down this path, resets, etc. come 1167 * down the immediate notify path below. 1168 */ 1169 io->io_hdr.io_type = CTL_IO_SCSI; 1170 io->io_hdr.nexus.initid = atio->init_id; 1171 io->io_hdr.nexus.targ_port = bus_softc->port.targ_port; 1172 if (bus_softc->hba_misc & PIM_EXTLUNS) { 1173 io->io_hdr.nexus.targ_lun = ctl_decode_lun( 1174 CAM_EXTLUN_BYTE_SWIZZLE(atio->ccb_h.target_lun)); 1175 } else { 1176 io->io_hdr.nexus.targ_lun = atio->ccb_h.target_lun; 1177 } 1178 io->scsiio.tag_num = atio->tag_id; 1179 switch (atio->tag_action) { 1180 case CAM_TAG_ACTION_NONE: 1181 io->scsiio.tag_type = CTL_TAG_UNTAGGED; 1182 break; 1183 case MSG_SIMPLE_TASK: 1184 io->scsiio.tag_type = CTL_TAG_SIMPLE; 1185 break; 1186 case MSG_HEAD_OF_QUEUE_TASK: 1187 io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE; 1188 break; 1189 case MSG_ORDERED_TASK: 1190 io->scsiio.tag_type = CTL_TAG_ORDERED; 1191 break; 1192 case MSG_ACA_TASK: 1193 io->scsiio.tag_type = CTL_TAG_ACA; 1194 break; 1195 default: 1196 io->scsiio.tag_type = CTL_TAG_UNTAGGED; 1197 printf("%s: unhandled tag type %#x!!\n", __func__, 1198 atio->tag_action); 1199 break; 1200 } 1201 if (atio->cdb_len > sizeof(io->scsiio.cdb)) { 1202 printf("%s: WARNING: CDB len %d > ctl_io space %zd\n", 1203 __func__, atio->cdb_len, sizeof(io->scsiio.cdb)); 1204 } 1205 io->scsiio.cdb_len = min(atio->cdb_len, sizeof(io->scsiio.cdb)); 1206 bcopy(atio->cdb_io.cdb_bytes, io->scsiio.cdb, 1207 io->scsiio.cdb_len); 1208 1209 #ifdef CTLFEDEBUG 1210 printf("%s: %u:%u:%u: tag %04x CDB %02x\n", __func__, 1211 io->io_hdr.nexus.initid, 1212 io->io_hdr.nexus.targ_port, 1213 io->io_hdr.nexus.targ_lun, 1214 io->scsiio.tag_num, io->scsiio.cdb[0]); 1215 #endif 1216 1217 ctl_queue(io); 1218 return; 1219 } 1220 case XPT_CONT_TARGET_IO: { 1221 int srr = 0; 1222 uint32_t srr_off = 0; 1223 1224 atio = (struct ccb_accept_tio *)done_ccb->ccb_h.ccb_atio; 1225 io = (union ctl_io *)atio->ccb_h.io_ptr; 1226 1227 softc->ctios_returned++; 1228 #ifdef CTLFEDEBUG 1229 printf("%s: got XPT_CONT_TARGET_IO tag %#x flags %#x\n", 1230 __func__, atio->tag_id, done_ccb->ccb_h.flags); 1231 #endif 1232 /* 1233 * Handle SRR case were the data pointer is pushed back hack 1234 */ 1235 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_MESSAGE_RECV 1236 && done_ccb->csio.msg_ptr != NULL 1237 && done_ccb->csio.msg_ptr[0] == MSG_EXTENDED 1238 && done_ccb->csio.msg_ptr[1] == 5 1239 && done_ccb->csio.msg_ptr[2] == 0) { 1240 srr = 1; 1241 srr_off = 1242 (done_ccb->csio.msg_ptr[3] << 24) 1243 | (done_ccb->csio.msg_ptr[4] << 16) 1244 | (done_ccb->csio.msg_ptr[5] << 8) 1245 | (done_ccb->csio.msg_ptr[6]); 1246 } 1247 1248 if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) { 1249 /* 1250 * If status was being sent, the back end data is now 1251 * history. Hack it up and resubmit a new command with 1252 * the CDB adjusted. If the SIM does the right thing, 1253 * all of the resid math should work. 1254 */ 1255 softc->ccbs_freed++; 1256 xpt_release_ccb(done_ccb); 1257 if (ctlfe_adjust_cdb(atio, srr_off) == 0) { 1258 done_ccb = (union ccb *)atio; 1259 goto resubmit; 1260 } 1261 /* 1262 * Fall through to doom.... 1263 */ 1264 } else if (srr) { 1265 /* 1266 * If we have an srr and we're still sending data, we 1267 * should be able to adjust offsets and cycle again. 1268 */ 1269 io->scsiio.kern_rel_offset = 1270 io->scsiio.ext_data_filled = srr_off; 1271 io->scsiio.ext_data_len = io->scsiio.kern_total_len - 1272 io->scsiio.kern_rel_offset; 1273 softc->ccbs_freed++; 1274 io->scsiio.io_hdr.status = CTL_STATUS_NONE; 1275 xpt_release_ccb(done_ccb); 1276 TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h, 1277 periph_links.tqe); 1278 xpt_schedule(periph, /*priority*/ 1); 1279 break; 1280 } 1281 1282 if ((done_ccb->ccb_h.flags & CAM_SEND_STATUS) && 1283 (done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) 1284 io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; 1285 1286 /* 1287 * If we were sending status back to the initiator, free up 1288 * resources. If we were doing a datamove, call the 1289 * datamove done routine. 1290 */ 1291 if ((io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) { 1292 softc->ccbs_freed++; 1293 xpt_release_ccb(done_ccb); 1294 /* 1295 * For a wildcard attachment, commands can come in 1296 * with a specific target/lun. Reset the target 1297 * and LUN fields back to the wildcard values before 1298 * we send them back down to the SIM. The SIM has 1299 * a wildcard LUN enabled, not whatever target/lun 1300 * these happened to be. 1301 */ 1302 if (softc->flags & CTLFE_LUN_WILDCARD) { 1303 atio->ccb_h.target_id = CAM_TARGET_WILDCARD; 1304 atio->ccb_h.target_lun = CAM_LUN_WILDCARD; 1305 } 1306 if (periph->flags & CAM_PERIPH_INVALID) { 1307 ctlfe_free_ccb(periph, (union ccb *)atio); 1308 } else { 1309 mtx_unlock(mtx); 1310 xpt_action((union ccb *)atio); 1311 return; 1312 } 1313 } else { 1314 struct ctlfe_cmd_info *cmd_info; 1315 struct ccb_scsiio *csio; 1316 1317 csio = &done_ccb->csio; 1318 cmd_info = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND2].ptr; 1319 1320 io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; 1321 1322 io->scsiio.ext_data_len += csio->dxfer_len; 1323 if (io->scsiio.ext_data_len > 1324 io->scsiio.kern_total_len) { 1325 xpt_print(periph->path, "%s: tag 0x%04x " 1326 "done len %u > total %u sent %u\n", 1327 __func__, io->scsiio.tag_num, 1328 io->scsiio.ext_data_len, 1329 io->scsiio.kern_total_len, 1330 io->scsiio.ext_data_filled); 1331 } 1332 /* 1333 * Translate CAM status to CTL status. Success 1334 * does not change the overall, ctl_io status. In 1335 * that case we just set port_status to 0. If we 1336 * have a failure, though, set a data phase error 1337 * for the overall ctl_io. 1338 */ 1339 switch (done_ccb->ccb_h.status & CAM_STATUS_MASK) { 1340 case CAM_REQ_CMP: 1341 io->io_hdr.port_status = 0; 1342 break; 1343 default: 1344 /* 1345 * XXX KDM we probably need to figure out a 1346 * standard set of errors that the SIM 1347 * drivers should return in the event of a 1348 * data transfer failure. A data phase 1349 * error will at least point the user to a 1350 * data transfer error of some sort. 1351 * Hopefully the SIM printed out some 1352 * additional information to give the user 1353 * a clue what happened. 1354 */ 1355 io->io_hdr.port_status = 0xbad1; 1356 ctl_set_data_phase_error(&io->scsiio); 1357 /* 1358 * XXX KDM figure out residual. 1359 */ 1360 break; 1361 } 1362 /* 1363 * If we had to break this S/G list into multiple 1364 * pieces, figure out where we are in the list, and 1365 * continue sending pieces if necessary. 1366 */ 1367 if ((cmd_info->flags & CTLFE_CMD_PIECEWISE) 1368 && (io->io_hdr.port_status == 0)) { 1369 ccb_flags flags; 1370 uint8_t scsi_status; 1371 uint8_t *data_ptr; 1372 uint32_t dxfer_len; 1373 1374 flags = atio->ccb_h.flags & 1375 (CAM_DIS_DISCONNECT| 1376 CAM_TAG_ACTION_VALID); 1377 1378 ctlfedata(softc, io, &flags, &data_ptr, 1379 &dxfer_len, &csio->sglist_cnt); 1380 1381 scsi_status = 0; 1382 1383 if (((flags & CAM_SEND_STATUS) == 0) 1384 && (dxfer_len == 0)) { 1385 printf("%s: tag %04x no status or " 1386 "len cdb = %02x\n", __func__, 1387 atio->tag_id, 1388 atio->cdb_io.cdb_bytes[0]); 1389 printf("%s: tag %04x io status %#x\n", 1390 __func__, atio->tag_id, 1391 io->io_hdr.status); 1392 } 1393 1394 cam_fill_ctio(csio, 1395 /*retries*/ 2, 1396 ctlfedone, 1397 flags, 1398 (flags & CAM_TAG_ACTION_VALID) ? 1399 MSG_SIMPLE_Q_TAG : 0, 1400 atio->tag_id, 1401 atio->init_id, 1402 scsi_status, 1403 /*data_ptr*/ data_ptr, 1404 /*dxfer_len*/ dxfer_len, 1405 /*timeout*/ 5 * 1000); 1406 1407 csio->ccb_h.flags |= CAM_UNLOCKED; 1408 csio->resid = 0; 1409 csio->ccb_h.ccb_atio = atio; 1410 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 1411 softc->ctios_sent++; 1412 mtx_unlock(mtx); 1413 xpt_action((union ccb *)csio); 1414 } else { 1415 /* 1416 * Release the CTIO. The ATIO will be sent back 1417 * down to the SIM once we send status. 1418 */ 1419 softc->ccbs_freed++; 1420 xpt_release_ccb(done_ccb); 1421 mtx_unlock(mtx); 1422 1423 /* Call the backend move done callback */ 1424 io->scsiio.be_move_done(io); 1425 } 1426 return; 1427 } 1428 break; 1429 } 1430 case XPT_IMMEDIATE_NOTIFY: { 1431 union ctl_io *io; 1432 struct ccb_immediate_notify *inot; 1433 cam_status status; 1434 int send_ctl_io; 1435 1436 inot = &done_ccb->cin1; 1437 printf("%s: got XPT_IMMEDIATE_NOTIFY status %#x tag %#x " 1438 "seq %#x\n", __func__, inot->ccb_h.status, 1439 inot->tag_id, inot->seq_id); 1440 1441 io = done_ccb->ccb_h.io_ptr; 1442 ctl_zero_io(io); 1443 1444 send_ctl_io = 1; 1445 1446 io->io_hdr.io_type = CTL_IO_TASK; 1447 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr =done_ccb; 1448 inot->ccb_h.io_ptr = io; 1449 io->io_hdr.nexus.initid = inot->initiator_id; 1450 io->io_hdr.nexus.targ_port = bus_softc->port.targ_port; 1451 if (bus_softc->hba_misc & PIM_EXTLUNS) { 1452 io->io_hdr.nexus.targ_lun = ctl_decode_lun( 1453 CAM_EXTLUN_BYTE_SWIZZLE(inot->ccb_h.target_lun)); 1454 } else { 1455 io->io_hdr.nexus.targ_lun = inot->ccb_h.target_lun; 1456 } 1457 /* XXX KDM should this be the tag_id? */ 1458 io->taskio.tag_num = inot->seq_id; 1459 1460 status = inot->ccb_h.status & CAM_STATUS_MASK; 1461 switch (status) { 1462 case CAM_SCSI_BUS_RESET: 1463 io->taskio.task_action = CTL_TASK_BUS_RESET; 1464 break; 1465 case CAM_BDR_SENT: 1466 io->taskio.task_action = CTL_TASK_TARGET_RESET; 1467 break; 1468 case CAM_MESSAGE_RECV: 1469 switch (inot->arg) { 1470 case MSG_ABORT_TASK_SET: 1471 io->taskio.task_action = 1472 CTL_TASK_ABORT_TASK_SET; 1473 break; 1474 case MSG_TARGET_RESET: 1475 io->taskio.task_action = CTL_TASK_TARGET_RESET; 1476 break; 1477 case MSG_ABORT_TASK: 1478 io->taskio.task_action = CTL_TASK_ABORT_TASK; 1479 break; 1480 case MSG_LOGICAL_UNIT_RESET: 1481 io->taskio.task_action = CTL_TASK_LUN_RESET; 1482 break; 1483 case MSG_CLEAR_TASK_SET: 1484 io->taskio.task_action = 1485 CTL_TASK_CLEAR_TASK_SET; 1486 break; 1487 case MSG_CLEAR_ACA: 1488 io->taskio.task_action = CTL_TASK_CLEAR_ACA; 1489 break; 1490 case MSG_QUERY_TASK: 1491 io->taskio.task_action = CTL_TASK_QUERY_TASK; 1492 break; 1493 case MSG_QUERY_TASK_SET: 1494 io->taskio.task_action = 1495 CTL_TASK_QUERY_TASK_SET; 1496 break; 1497 case MSG_QUERY_ASYNC_EVENT: 1498 io->taskio.task_action = 1499 CTL_TASK_QUERY_ASYNC_EVENT; 1500 break; 1501 case MSG_NOOP: 1502 send_ctl_io = 0; 1503 break; 1504 default: 1505 xpt_print(periph->path, 1506 "%s: unsupported message 0x%x\n", 1507 __func__, inot->arg); 1508 send_ctl_io = 0; 1509 break; 1510 } 1511 break; 1512 case CAM_REQ_ABORTED: 1513 /* 1514 * This request was sent back by the driver. 1515 * XXX KDM what do we do here? 1516 */ 1517 send_ctl_io = 0; 1518 break; 1519 case CAM_REQ_INVALID: 1520 case CAM_PROVIDE_FAIL: 1521 default: 1522 /* 1523 * We should only get here if we're talking 1524 * to a talking to a SIM that is target 1525 * capable but supports the old API. In 1526 * that case, we need to just free the CCB. 1527 * If we actually send a notify acknowledge, 1528 * it will send that back with an error as 1529 * well. 1530 */ 1531 1532 if ((status != CAM_REQ_INVALID) 1533 && (status != CAM_PROVIDE_FAIL)) 1534 xpt_print(periph->path, 1535 "%s: unsupported CAM status 0x%x\n", 1536 __func__, status); 1537 1538 ctlfe_free_ccb(periph, done_ccb); 1539 1540 goto out; 1541 } 1542 if (send_ctl_io != 0) { 1543 ctl_queue(io); 1544 } else { 1545 done_ccb->ccb_h.status = CAM_REQ_INPROG; 1546 done_ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; 1547 xpt_action(done_ccb); 1548 } 1549 break; 1550 } 1551 case XPT_NOTIFY_ACKNOWLEDGE: 1552 /* 1553 * Queue this back down to the SIM as an immediate notify. 1554 */ 1555 done_ccb->ccb_h.status = CAM_REQ_INPROG; 1556 done_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; 1557 xpt_action(done_ccb); 1558 break; 1559 case XPT_SET_SIM_KNOB: 1560 case XPT_GET_SIM_KNOB: 1561 case XPT_GET_SIM_KNOB_OLD: 1562 break; 1563 default: 1564 panic("%s: unexpected CCB type %#x", __func__, 1565 done_ccb->ccb_h.func_code); 1566 break; 1567 } 1568 1569 out: 1570 mtx_unlock(mtx); 1571 } 1572 1573 static void 1574 ctlfe_onoffline(void *arg, int online) 1575 { 1576 struct ctlfe_softc *bus_softc; 1577 union ccb *ccb; 1578 cam_status status; 1579 struct cam_path *path; 1580 int set_wwnn; 1581 1582 bus_softc = (struct ctlfe_softc *)arg; 1583 1584 set_wwnn = 0; 1585 1586 status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, 1587 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 1588 if (status != CAM_REQ_CMP) { 1589 printf("%s: unable to create path!\n", __func__); 1590 return; 1591 } 1592 ccb = xpt_alloc_ccb(); 1593 xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE); 1594 ccb->ccb_h.func_code = XPT_GET_SIM_KNOB; 1595 xpt_action(ccb); 1596 1597 /* 1598 * Copan WWN format: 1599 * 1600 * Bits 63-60: 0x5 NAA, IEEE registered name 1601 * Bits 59-36: 0x000ED5 IEEE Company name assigned to Copan 1602 * Bits 35-12: Copan SSN (Sequential Serial Number) 1603 * Bits 11-8: Type of port: 1604 * 1 == N-Port 1605 * 2 == F-Port 1606 * 3 == NL-Port 1607 * Bits 7-0: 0 == Node Name, >0 == Port Number 1608 */ 1609 if (online != 0) { 1610 if ((ccb->knob.xport_specific.valid & KNOB_VALID_ADDRESS) != 0){ 1611 #ifdef RANDOM_WWNN 1612 uint64_t random_bits; 1613 #endif 1614 1615 printf("%s: %s current WWNN %#jx\n", __func__, 1616 bus_softc->port_name, 1617 ccb->knob.xport_specific.fc.wwnn); 1618 printf("%s: %s current WWPN %#jx\n", __func__, 1619 bus_softc->port_name, 1620 ccb->knob.xport_specific.fc.wwpn); 1621 1622 #ifdef RANDOM_WWNN 1623 arc4rand(&random_bits, sizeof(random_bits), 0); 1624 #endif 1625 1626 /* 1627 * XXX KDM this is a bit of a kludge for now. We 1628 * take the current WWNN/WWPN from the card, and 1629 * replace the company identifier and the NL-Port 1630 * indicator and the port number (for the WWPN). 1631 * This should be replaced later with ddb_GetWWNN, 1632 * or possibly a more centralized scheme. (It 1633 * would be nice to have the WWNN/WWPN for each 1634 * port stored in the ctl_port structure.) 1635 */ 1636 #ifdef RANDOM_WWNN 1637 ccb->knob.xport_specific.fc.wwnn = 1638 (random_bits & 1639 0x0000000fffffff00ULL) | 1640 /* Company ID */ 0x5000ED5000000000ULL | 1641 /* NL-Port */ 0x0300; 1642 ccb->knob.xport_specific.fc.wwpn = 1643 (random_bits & 1644 0x0000000fffffff00ULL) | 1645 /* Company ID */ 0x5000ED5000000000ULL | 1646 /* NL-Port */ 0x3000 | 1647 /* Port Num */ (bus_softc->port.targ_port & 0xff); 1648 1649 /* 1650 * This is a bit of an API break/reversal, but if 1651 * we're doing the random WWNN that's a little 1652 * different anyway. So record what we're actually 1653 * using with the frontend code so it's reported 1654 * accurately. 1655 */ 1656 ctl_port_set_wwns(&bus_softc->port, 1657 true, ccb->knob.xport_specific.fc.wwnn, 1658 true, ccb->knob.xport_specific.fc.wwpn); 1659 set_wwnn = 1; 1660 #else /* RANDOM_WWNN */ 1661 /* 1662 * If the user has specified a WWNN/WWPN, send them 1663 * down to the SIM. Otherwise, record what the SIM 1664 * has reported. 1665 */ 1666 if (bus_softc->port.wwnn != 0 && bus_softc->port.wwnn 1667 != ccb->knob.xport_specific.fc.wwnn) { 1668 ccb->knob.xport_specific.fc.wwnn = 1669 bus_softc->port.wwnn; 1670 set_wwnn = 1; 1671 } else { 1672 ctl_port_set_wwns(&bus_softc->port, 1673 true, ccb->knob.xport_specific.fc.wwnn, 1674 false, 0); 1675 } 1676 if (bus_softc->port.wwpn != 0 && bus_softc->port.wwpn 1677 != ccb->knob.xport_specific.fc.wwpn) { 1678 ccb->knob.xport_specific.fc.wwpn = 1679 bus_softc->port.wwpn; 1680 set_wwnn = 1; 1681 } else { 1682 ctl_port_set_wwns(&bus_softc->port, 1683 false, 0, 1684 true, ccb->knob.xport_specific.fc.wwpn); 1685 } 1686 #endif /* RANDOM_WWNN */ 1687 1688 1689 if (set_wwnn != 0) { 1690 printf("%s: %s new WWNN %#jx\n", __func__, 1691 bus_softc->port_name, 1692 ccb->knob.xport_specific.fc.wwnn); 1693 printf("%s: %s new WWPN %#jx\n", __func__, 1694 bus_softc->port_name, 1695 ccb->knob.xport_specific.fc.wwpn); 1696 } 1697 } else { 1698 printf("%s: %s has no valid WWNN/WWPN\n", __func__, 1699 bus_softc->port_name); 1700 } 1701 } 1702 ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; 1703 ccb->knob.xport_specific.valid = KNOB_VALID_ROLE; 1704 if (set_wwnn != 0) 1705 ccb->knob.xport_specific.valid |= KNOB_VALID_ADDRESS; 1706 1707 if (online != 0) 1708 ccb->knob.xport_specific.fc.role |= KNOB_ROLE_TARGET; 1709 else 1710 ccb->knob.xport_specific.fc.role &= ~KNOB_ROLE_TARGET; 1711 1712 xpt_action(ccb); 1713 1714 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1715 printf("%s: SIM %s (path id %d) target %s failed with " 1716 "status %#x\n", 1717 __func__, bus_softc->port_name, bus_softc->path_id, 1718 (online != 0) ? "enable" : "disable", 1719 ccb->ccb_h.status); 1720 } else { 1721 printf("%s: SIM %s (path id %d) target %s succeeded\n", 1722 __func__, bus_softc->port_name, bus_softc->path_id, 1723 (online != 0) ? "enable" : "disable"); 1724 } 1725 1726 xpt_free_path(path); 1727 xpt_free_ccb(ccb); 1728 } 1729 1730 static void 1731 ctlfe_online(void *arg) 1732 { 1733 struct ctlfe_softc *bus_softc; 1734 struct cam_path *path; 1735 cam_status status; 1736 struct ctlfe_lun_softc *lun_softc; 1737 struct cam_periph *periph; 1738 1739 bus_softc = (struct ctlfe_softc *)arg; 1740 1741 /* 1742 * Create the wildcard LUN before bringing the port online. 1743 */ 1744 status = xpt_create_path(&path, /*periph*/ NULL, 1745 bus_softc->path_id, CAM_TARGET_WILDCARD, 1746 CAM_LUN_WILDCARD); 1747 if (status != CAM_REQ_CMP) { 1748 printf("%s: unable to create path for wildcard periph\n", 1749 __func__); 1750 return; 1751 } 1752 1753 lun_softc = malloc(sizeof(*lun_softc), M_CTLFE, M_WAITOK | M_ZERO); 1754 1755 xpt_path_lock(path); 1756 periph = cam_periph_find(path, "ctl"); 1757 if (periph != NULL) { 1758 /* We've already got a periph, no need to alloc a new one. */ 1759 xpt_path_unlock(path); 1760 xpt_free_path(path); 1761 free(lun_softc, M_CTLFE); 1762 return; 1763 } 1764 lun_softc->parent_softc = bus_softc; 1765 lun_softc->flags |= CTLFE_LUN_WILDCARD; 1766 1767 status = cam_periph_alloc(ctlferegister, 1768 ctlfeoninvalidate, 1769 ctlfecleanup, 1770 ctlfestart, 1771 "ctl", 1772 CAM_PERIPH_BIO, 1773 path, 1774 ctlfeasync, 1775 0, 1776 lun_softc); 1777 1778 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1779 const struct cam_status_entry *entry; 1780 1781 entry = cam_fetch_status_entry(status); 1782 printf("%s: CAM error %s (%#x) returned from " 1783 "cam_periph_alloc()\n", __func__, (entry != NULL) ? 1784 entry->status_text : "Unknown", status); 1785 free(lun_softc, M_CTLFE); 1786 } 1787 1788 xpt_path_unlock(path); 1789 ctlfe_onoffline(arg, /*online*/ 1); 1790 xpt_free_path(path); 1791 } 1792 1793 static void 1794 ctlfe_offline(void *arg) 1795 { 1796 struct ctlfe_softc *bus_softc; 1797 struct cam_path *path; 1798 cam_status status; 1799 struct cam_periph *periph; 1800 1801 bus_softc = (struct ctlfe_softc *)arg; 1802 1803 ctlfe_onoffline(arg, /*online*/ 0); 1804 1805 /* 1806 * Disable the wildcard LUN for this port now that we have taken 1807 * the port offline. 1808 */ 1809 status = xpt_create_path(&path, /*periph*/ NULL, 1810 bus_softc->path_id, CAM_TARGET_WILDCARD, 1811 CAM_LUN_WILDCARD); 1812 if (status != CAM_REQ_CMP) { 1813 printf("%s: unable to create path for wildcard periph\n", 1814 __func__); 1815 return; 1816 } 1817 xpt_path_lock(path); 1818 if ((periph = cam_periph_find(path, "ctl")) != NULL) 1819 cam_periph_invalidate(periph); 1820 xpt_path_unlock(path); 1821 xpt_free_path(path); 1822 } 1823 1824 /* 1825 * This will get called to enable a LUN on every bus that is attached to 1826 * CTL. So we only need to create a path/periph for this particular bus. 1827 */ 1828 static int 1829 ctlfe_lun_enable(void *arg, int lun_id) 1830 { 1831 struct ctlfe_softc *bus_softc; 1832 struct ctlfe_lun_softc *softc; 1833 struct cam_path *path; 1834 struct cam_periph *periph; 1835 cam_status status; 1836 1837 bus_softc = (struct ctlfe_softc *)arg; 1838 if (bus_softc->hba_misc & PIM_EXTLUNS) 1839 lun_id = CAM_EXTLUN_BYTE_SWIZZLE(ctl_encode_lun(lun_id)); 1840 1841 status = xpt_create_path(&path, /*periph*/ NULL, 1842 bus_softc->path_id, bus_softc->target_id, lun_id); 1843 /* XXX KDM need some way to return status to CTL here? */ 1844 if (status != CAM_REQ_CMP) { 1845 printf("%s: could not create path, status %#x\n", __func__, 1846 status); 1847 return (1); 1848 } 1849 1850 softc = malloc(sizeof(*softc), M_CTLFE, M_WAITOK | M_ZERO); 1851 xpt_path_lock(path); 1852 periph = cam_periph_find(path, "ctl"); 1853 if (periph != NULL) { 1854 /* We've already got a periph, no need to alloc a new one. */ 1855 xpt_path_unlock(path); 1856 xpt_free_path(path); 1857 free(softc, M_CTLFE); 1858 return (0); 1859 } 1860 softc->parent_softc = bus_softc; 1861 1862 status = cam_periph_alloc(ctlferegister, 1863 ctlfeoninvalidate, 1864 ctlfecleanup, 1865 ctlfestart, 1866 "ctl", 1867 CAM_PERIPH_BIO, 1868 path, 1869 ctlfeasync, 1870 0, 1871 softc); 1872 1873 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1874 const struct cam_status_entry *entry; 1875 1876 entry = cam_fetch_status_entry(status); 1877 printf("%s: CAM error %s (%#x) returned from " 1878 "cam_periph_alloc()\n", __func__, (entry != NULL) ? 1879 entry->status_text : "Unknown", status); 1880 free(softc, M_CTLFE); 1881 } 1882 1883 xpt_path_unlock(path); 1884 xpt_free_path(path); 1885 return (0); 1886 } 1887 1888 /* 1889 * This will get called when the user removes a LUN to disable that LUN 1890 * on every bus that is attached to CTL. 1891 */ 1892 static int 1893 ctlfe_lun_disable(void *arg, int lun_id) 1894 { 1895 struct ctlfe_softc *softc; 1896 struct ctlfe_lun_softc *lun_softc; 1897 1898 softc = (struct ctlfe_softc *)arg; 1899 if (softc->hba_misc & PIM_EXTLUNS) 1900 lun_id = CAM_EXTLUN_BYTE_SWIZZLE(ctl_encode_lun(lun_id)); 1901 1902 mtx_lock(&softc->lun_softc_mtx); 1903 STAILQ_FOREACH(lun_softc, &softc->lun_softc_list, links) { 1904 struct cam_path *path; 1905 1906 path = lun_softc->periph->path; 1907 1908 if ((xpt_path_target_id(path) == softc->target_id) 1909 && (xpt_path_lun_id(path) == lun_id)) { 1910 break; 1911 } 1912 } 1913 if (lun_softc == NULL) { 1914 mtx_unlock(&softc->lun_softc_mtx); 1915 printf("%s: can't find lun %d\n", __func__, lun_id); 1916 return (1); 1917 } 1918 cam_periph_acquire(lun_softc->periph); 1919 mtx_unlock(&softc->lun_softc_mtx); 1920 1921 cam_periph_lock(lun_softc->periph); 1922 cam_periph_invalidate(lun_softc->periph); 1923 cam_periph_unlock(lun_softc->periph); 1924 cam_periph_release(lun_softc->periph); 1925 return (0); 1926 } 1927 1928 static void 1929 ctlfe_dump_sim(struct cam_sim *sim) 1930 { 1931 1932 printf("%s%d: max tagged openings: %d, max dev openings: %d\n", 1933 sim->sim_name, sim->unit_number, 1934 sim->max_tagged_dev_openings, sim->max_dev_openings); 1935 } 1936 1937 /* 1938 * Assumes that the SIM lock is held. 1939 */ 1940 static void 1941 ctlfe_dump_queue(struct ctlfe_lun_softc *softc) 1942 { 1943 struct ccb_hdr *hdr; 1944 struct cam_periph *periph; 1945 int num_items; 1946 1947 periph = softc->periph; 1948 num_items = 0; 1949 1950 TAILQ_FOREACH(hdr, &softc->work_queue, periph_links.tqe) { 1951 union ctl_io *io = hdr->io_ptr; 1952 1953 num_items++; 1954 1955 /* 1956 * Only regular SCSI I/O is put on the work 1957 * queue, so we can print sense here. There may be no 1958 * sense if it's no the queue for a DMA, but this serves to 1959 * print out the CCB as well. 1960 * 1961 * XXX KDM switch this over to scsi_sense_print() when 1962 * CTL is merged in with CAM. 1963 */ 1964 ctl_io_error_print(io, NULL); 1965 1966 /* 1967 * Print DMA status if we are DMA_QUEUED. 1968 */ 1969 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) { 1970 xpt_print(periph->path, 1971 "Total %u, Current %u, Resid %u\n", 1972 io->scsiio.kern_total_len, 1973 io->scsiio.kern_data_len, 1974 io->scsiio.kern_data_resid); 1975 } 1976 } 1977 1978 xpt_print(periph->path, "%d requests total waiting for CCBs\n", 1979 num_items); 1980 xpt_print(periph->path, "%ju CCBs outstanding (%ju allocated, %ju " 1981 "freed)\n", (uintmax_t)(softc->ccbs_alloced - 1982 softc->ccbs_freed), (uintmax_t)softc->ccbs_alloced, 1983 (uintmax_t)softc->ccbs_freed); 1984 xpt_print(periph->path, "%ju CTIOs outstanding (%ju sent, %ju " 1985 "returned\n", (uintmax_t)(softc->ctios_sent - 1986 softc->ctios_returned), softc->ctios_sent, 1987 softc->ctios_returned); 1988 } 1989 1990 /* 1991 * Datamove/done routine called by CTL. Put ourselves on the queue to 1992 * receive a CCB from CAM so we can queue the continue I/O request down 1993 * to the adapter. 1994 */ 1995 static void 1996 ctlfe_datamove(union ctl_io *io) 1997 { 1998 union ccb *ccb; 1999 struct cam_periph *periph; 2000 struct ctlfe_lun_softc *softc; 2001 2002 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 2003 ("Unexpected io_type (%d) in ctlfe_datamove", io->io_hdr.io_type)); 2004 2005 ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 2006 periph = xpt_path_periph(ccb->ccb_h.path); 2007 cam_periph_lock(periph); 2008 softc = (struct ctlfe_lun_softc *)periph->softc; 2009 io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED; 2010 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) 2011 io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED; 2012 TAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h, 2013 periph_links.tqe); 2014 xpt_schedule(periph, /*priority*/ 1); 2015 cam_periph_unlock(periph); 2016 } 2017 2018 static void 2019 ctlfe_done(union ctl_io *io) 2020 { 2021 union ccb *ccb; 2022 struct cam_periph *periph; 2023 struct ctlfe_lun_softc *softc; 2024 2025 ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 2026 periph = xpt_path_periph(ccb->ccb_h.path); 2027 cam_periph_lock(periph); 2028 softc = (struct ctlfe_lun_softc *)periph->softc; 2029 2030 if (io->io_hdr.io_type == CTL_IO_TASK) { 2031 /* 2032 * Task management commands don't require any further 2033 * communication back to the adapter. Requeue the CCB 2034 * to the adapter, and free the CTL I/O. 2035 */ 2036 xpt_print(ccb->ccb_h.path, "%s: returning task I/O " 2037 "tag %#x seq %#x\n", __func__, 2038 ccb->cin1.tag_id, ccb->cin1.seq_id); 2039 /* 2040 * Send the notify acknowledge down to the SIM, to let it 2041 * know we processed the task management command. 2042 */ 2043 ccb->ccb_h.status = CAM_REQ_INPROG; 2044 ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; 2045 switch (io->taskio.task_status) { 2046 case CTL_TASK_FUNCTION_COMPLETE: 2047 ccb->cna2.arg = CAM_RSP_TMF_COMPLETE; 2048 break; 2049 case CTL_TASK_FUNCTION_SUCCEEDED: 2050 ccb->cna2.arg = CAM_RSP_TMF_SUCCEEDED; 2051 ccb->ccb_h.flags |= CAM_SEND_STATUS; 2052 break; 2053 case CTL_TASK_FUNCTION_REJECTED: 2054 ccb->cna2.arg = CAM_RSP_TMF_REJECTED; 2055 ccb->ccb_h.flags |= CAM_SEND_STATUS; 2056 break; 2057 case CTL_TASK_LUN_DOES_NOT_EXIST: 2058 ccb->cna2.arg = CAM_RSP_TMF_INCORRECT_LUN; 2059 ccb->ccb_h.flags |= CAM_SEND_STATUS; 2060 break; 2061 case CTL_TASK_FUNCTION_NOT_SUPPORTED: 2062 ccb->cna2.arg = CAM_RSP_TMF_FAILED; 2063 ccb->ccb_h.flags |= CAM_SEND_STATUS; 2064 break; 2065 } 2066 ccb->cna2.arg |= scsi_3btoul(io->taskio.task_resp) << 8; 2067 xpt_action(ccb); 2068 } else if (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) { 2069 if (softc->flags & CTLFE_LUN_WILDCARD) { 2070 ccb->ccb_h.target_id = CAM_TARGET_WILDCARD; 2071 ccb->ccb_h.target_lun = CAM_LUN_WILDCARD; 2072 } 2073 if (periph->flags & CAM_PERIPH_INVALID) { 2074 ctlfe_free_ccb(periph, ccb); 2075 } else { 2076 cam_periph_unlock(periph); 2077 xpt_action(ccb); 2078 return; 2079 } 2080 } else { 2081 io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED; 2082 TAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h, 2083 periph_links.tqe); 2084 xpt_schedule(periph, /*priority*/ 1); 2085 } 2086 2087 cam_periph_unlock(periph); 2088 } 2089 2090 static void 2091 ctlfe_dump(void) 2092 { 2093 struct ctlfe_softc *bus_softc; 2094 struct ctlfe_lun_softc *lun_softc; 2095 2096 STAILQ_FOREACH(bus_softc, &ctlfe_softc_list, links) { 2097 ctlfe_dump_sim(bus_softc->sim); 2098 STAILQ_FOREACH(lun_softc, &bus_softc->lun_softc_list, links) 2099 ctlfe_dump_queue(lun_softc); 2100 } 2101 } 2102