1 /*- 2 * Copyright (c) 2008, 2009 Silicon Graphics International Corp. 3 * Copyright (c) 2014-2015 Alexander Motin <mav@FreeBSD.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions, and the following disclaimer, 11 * without modification. 12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 13 * substantially similar to the "NO WARRANTY" disclaimer below 14 * ("Disclaimer") and any redistribution must be conditioned upon 15 * including a substantially similar Disclaimer requirement for further 16 * binary redistribution. 17 * 18 * NO WARRANTY 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 27 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 28 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGES. 30 * 31 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/scsi_ctl.c#4 $ 32 */ 33 /* 34 * Peripheral driver interface between CAM and CTL (CAM Target Layer). 35 * 36 * Author: Ken Merry <ken@FreeBSD.org> 37 */ 38 39 #include <sys/cdefs.h> 40 __FBSDID("$FreeBSD$"); 41 42 #include <sys/param.h> 43 #include <sys/queue.h> 44 #include <sys/systm.h> 45 #include <sys/kernel.h> 46 #include <sys/lock.h> 47 #include <sys/mutex.h> 48 #include <sys/condvar.h> 49 #include <sys/malloc.h> 50 #include <sys/bus.h> 51 #include <sys/endian.h> 52 #include <sys/sbuf.h> 53 #include <sys/sysctl.h> 54 #include <sys/types.h> 55 #include <sys/systm.h> 56 #include <machine/bus.h> 57 58 #include <cam/cam.h> 59 #include <cam/cam_ccb.h> 60 #include <cam/cam_periph.h> 61 #include <cam/cam_queue.h> 62 #include <cam/cam_xpt_periph.h> 63 #include <cam/cam_debug.h> 64 #include <cam/cam_sim.h> 65 #include <cam/cam_xpt.h> 66 67 #include <cam/scsi/scsi_all.h> 68 #include <cam/scsi/scsi_message.h> 69 70 #include <cam/ctl/ctl_io.h> 71 #include <cam/ctl/ctl.h> 72 #include <cam/ctl/ctl_frontend.h> 73 #include <cam/ctl/ctl_util.h> 74 #include <cam/ctl/ctl_error.h> 75 76 struct ctlfe_softc { 77 struct ctl_port port; 78 path_id_t path_id; 79 target_id_t target_id; 80 uint32_t hba_misc; 81 u_int maxio; 82 struct cam_sim *sim; 83 char port_name[DEV_IDLEN]; 84 struct mtx lun_softc_mtx; 85 STAILQ_HEAD(, ctlfe_lun_softc) lun_softc_list; 86 STAILQ_ENTRY(ctlfe_softc) links; 87 }; 88 89 STAILQ_HEAD(, ctlfe_softc) ctlfe_softc_list; 90 struct mtx ctlfe_list_mtx; 91 static char ctlfe_mtx_desc[] = "ctlfelist"; 92 #ifdef CTLFE_INIT_ENABLE 93 static int ctlfe_max_targets = 1; 94 static int ctlfe_num_targets = 0; 95 #endif 96 97 typedef enum { 98 CTLFE_LUN_NONE = 0x00, 99 CTLFE_LUN_WILDCARD = 0x01 100 } ctlfe_lun_flags; 101 102 struct ctlfe_lun_softc { 103 struct ctlfe_softc *parent_softc; 104 struct cam_periph *periph; 105 ctlfe_lun_flags flags; 106 uint64_t ccbs_alloced; 107 uint64_t ccbs_freed; 108 uint64_t ctios_sent; 109 uint64_t ctios_returned; 110 uint64_t atios_alloced; 111 uint64_t atios_freed; 112 uint64_t inots_alloced; 113 uint64_t inots_freed; 114 /* bus_dma_tag_t dma_tag; */ 115 TAILQ_HEAD(, ccb_hdr) work_queue; 116 STAILQ_ENTRY(ctlfe_lun_softc) links; 117 }; 118 119 typedef enum { 120 CTLFE_CMD_NONE = 0x00, 121 CTLFE_CMD_PIECEWISE = 0x01 122 } ctlfe_cmd_flags; 123 124 struct ctlfe_cmd_info { 125 int cur_transfer_index; 126 size_t cur_transfer_off; 127 ctlfe_cmd_flags flags; 128 /* 129 * XXX KDM struct bus_dma_segment is 8 bytes on i386, and 16 130 * bytes on amd64. So with 32 elements, this is 256 bytes on 131 * i386 and 512 bytes on amd64. 132 */ 133 #define CTLFE_MAX_SEGS 32 134 bus_dma_segment_t cam_sglist[CTLFE_MAX_SEGS]; 135 }; 136 137 /* 138 * When we register the adapter/bus, request that this many ctl_ios be 139 * allocated. This should be the maximum supported by the adapter, but we 140 * currently don't have a way to get that back from the path inquiry. 141 * XXX KDM add that to the path inquiry. 142 */ 143 #define CTLFE_REQ_CTL_IO 4096 144 /* 145 * Number of Accept Target I/O CCBs to allocate and queue down to the 146 * adapter per LUN. 147 * XXX KDM should this be controlled by CTL? 148 */ 149 #define CTLFE_ATIO_PER_LUN 1024 150 /* 151 * Number of Immediate Notify CCBs (used for aborts, resets, etc.) to 152 * allocate and queue down to the adapter per LUN. 153 * XXX KDM should this be controlled by CTL? 154 */ 155 #define CTLFE_IN_PER_LUN 1024 156 157 /* 158 * Timeout (in seconds) on CTIO CCB allocation for doing a DMA or sending 159 * status to the initiator. The SIM is expected to have its own timeouts, 160 * so we're not putting this timeout around the CCB execution time. The 161 * SIM should timeout and let us know if it has an issue. 162 */ 163 #define CTLFE_DMA_TIMEOUT 60 164 165 /* 166 * Turn this on to enable extra debugging prints. 167 */ 168 #if 0 169 #define CTLFE_DEBUG 170 #endif 171 172 /* 173 * Use randomly assigned WWNN/WWPN values. This is to work around an issue 174 * in the FreeBSD initiator that makes it unable to rescan the target if 175 * the target gets rebooted and the WWNN/WWPN stay the same. 176 */ 177 #if 0 178 #define RANDOM_WWNN 179 #endif 180 181 MALLOC_DEFINE(M_CTLFE, "CAM CTL FE", "CAM CTL FE interface"); 182 183 #define io_ptr ppriv_ptr0 184 185 /* This is only used in the CTIO */ 186 #define ccb_atio ppriv_ptr1 187 188 #define PRIV_CCB(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptrs[0]) 189 #define PRIV_INFO(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptrs[1]) 190 191 int ctlfeinitialize(void); 192 void ctlfeshutdown(void); 193 static periph_init_t ctlfeperiphinit; 194 static void ctlfeasync(void *callback_arg, uint32_t code, 195 struct cam_path *path, void *arg); 196 static periph_ctor_t ctlferegister; 197 static periph_oninv_t ctlfeoninvalidate; 198 static periph_dtor_t ctlfecleanup; 199 static periph_start_t ctlfestart; 200 static void ctlfedone(struct cam_periph *periph, 201 union ccb *done_ccb); 202 203 static void ctlfe_onoffline(void *arg, int online); 204 static void ctlfe_online(void *arg); 205 static void ctlfe_offline(void *arg); 206 static int ctlfe_lun_enable(void *arg, int lun_id); 207 static int ctlfe_lun_disable(void *arg, int lun_id); 208 static void ctlfe_dump_sim(struct cam_sim *sim); 209 static void ctlfe_dump_queue(struct ctlfe_lun_softc *softc); 210 static void ctlfe_datamove(union ctl_io *io); 211 static void ctlfe_done(union ctl_io *io); 212 static void ctlfe_dump(void); 213 214 static struct periph_driver ctlfe_driver = 215 { 216 ctlfeperiphinit, "ctl", 217 TAILQ_HEAD_INITIALIZER(ctlfe_driver.units), /*generation*/ 0, 218 CAM_PERIPH_DRV_EARLY 219 }; 220 221 static struct ctl_frontend ctlfe_frontend = 222 { 223 .name = "camtgt", 224 .init = ctlfeinitialize, 225 .fe_dump = ctlfe_dump, 226 .shutdown = ctlfeshutdown, 227 }; 228 CTL_FRONTEND_DECLARE(ctlfe, ctlfe_frontend); 229 230 void 231 ctlfeshutdown(void) 232 { 233 return; 234 } 235 236 int 237 ctlfeinitialize(void) 238 { 239 240 STAILQ_INIT(&ctlfe_softc_list); 241 mtx_init(&ctlfe_list_mtx, ctlfe_mtx_desc, NULL, MTX_DEF); 242 periphdriver_register(&ctlfe_driver); 243 return (0); 244 } 245 246 void 247 ctlfeperiphinit(void) 248 { 249 cam_status status; 250 251 status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED | 252 AC_CONTRACT, ctlfeasync, NULL, NULL); 253 if (status != CAM_REQ_CMP) { 254 printf("ctl: Failed to attach async callback due to CAM " 255 "status 0x%x!\n", status); 256 } 257 } 258 259 static void 260 ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) 261 { 262 struct ctlfe_softc *softc; 263 264 #ifdef CTLFEDEBUG 265 printf("%s: entered\n", __func__); 266 #endif 267 268 mtx_lock(&ctlfe_list_mtx); 269 STAILQ_FOREACH(softc, &ctlfe_softc_list, links) { 270 if (softc->path_id == xpt_path_path_id(path)) 271 break; 272 } 273 mtx_unlock(&ctlfe_list_mtx); 274 275 /* 276 * When a new path gets registered, and it is capable of target 277 * mode, go ahead and attach. Later on, we may need to be more 278 * selective, but for now this will be sufficient. 279 */ 280 switch (code) { 281 case AC_PATH_REGISTERED: { 282 struct ctl_port *port; 283 struct ccb_pathinq *cpi; 284 int retval; 285 286 cpi = (struct ccb_pathinq *)arg; 287 288 /* Don't attach if it doesn't support target mode */ 289 if ((cpi->target_sprt & PIT_PROCESSOR) == 0) { 290 #ifdef CTLFEDEBUG 291 printf("%s: SIM %s%d doesn't support target mode\n", 292 __func__, cpi->dev_name, cpi->unit_number); 293 #endif 294 break; 295 } 296 297 if (softc != NULL) { 298 #ifdef CTLFEDEBUG 299 printf("%s: CTL port for CAM path %u already exists\n", 300 __func__, xpt_path_path_id(path)); 301 #endif 302 break; 303 } 304 305 #ifdef CTLFE_INIT_ENABLE 306 if (ctlfe_num_targets >= ctlfe_max_targets) { 307 union ccb *ccb; 308 309 ccb = (union ccb *)malloc(sizeof(*ccb), M_TEMP, 310 M_NOWAIT | M_ZERO); 311 if (ccb == NULL) { 312 printf("%s: unable to malloc CCB!\n", __func__); 313 return; 314 } 315 xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE); 316 317 ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; 318 ccb->knob.xport_specific.valid = KNOB_VALID_ROLE; 319 ccb->knob.xport_specific.fc.role = KNOB_ROLE_INITIATOR; 320 321 xpt_action(ccb); 322 323 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != 324 CAM_REQ_CMP) { 325 printf("%s: SIM %s%d (path id %d) initiator " 326 "enable failed with status %#x\n", 327 __func__, cpi->dev_name, 328 cpi->unit_number, cpi->ccb_h.path_id, 329 ccb->ccb_h.status); 330 } else { 331 printf("%s: SIM %s%d (path id %d) initiator " 332 "enable succeeded\n", 333 __func__, cpi->dev_name, 334 cpi->unit_number, cpi->ccb_h.path_id); 335 } 336 337 free(ccb, M_TEMP); 338 339 break; 340 } else { 341 ctlfe_num_targets++; 342 } 343 344 printf("%s: ctlfe_num_targets = %d\n", __func__, 345 ctlfe_num_targets); 346 #endif /* CTLFE_INIT_ENABLE */ 347 348 /* 349 * We're in an interrupt context here, so we have to 350 * use M_NOWAIT. Of course this means trouble if we 351 * can't allocate memory. 352 */ 353 softc = malloc(sizeof(*softc), M_CTLFE, M_NOWAIT | M_ZERO); 354 if (softc == NULL) { 355 printf("%s: unable to malloc %zd bytes for softc\n", 356 __func__, sizeof(*softc)); 357 return; 358 } 359 360 softc->path_id = cpi->ccb_h.path_id; 361 softc->target_id = cpi->initiator_id; 362 softc->sim = xpt_path_sim(path); 363 softc->hba_misc = cpi->hba_misc; 364 if (cpi->maxio != 0) 365 softc->maxio = cpi->maxio; 366 else 367 softc->maxio = DFLTPHYS; 368 mtx_init(&softc->lun_softc_mtx, "LUN softc mtx", NULL, MTX_DEF); 369 STAILQ_INIT(&softc->lun_softc_list); 370 371 port = &softc->port; 372 port->frontend = &ctlfe_frontend; 373 374 /* 375 * XXX KDM should we be more accurate here ? 376 */ 377 if (cpi->transport == XPORT_FC) 378 port->port_type = CTL_PORT_FC; 379 else if (cpi->transport == XPORT_SAS) 380 port->port_type = CTL_PORT_SAS; 381 else 382 port->port_type = CTL_PORT_SCSI; 383 384 /* XXX KDM what should the real number be here? */ 385 port->num_requested_ctl_io = 4096; 386 snprintf(softc->port_name, sizeof(softc->port_name), 387 "%s%d", cpi->dev_name, cpi->unit_number); 388 /* 389 * XXX KDM it would be nice to allocate storage in the 390 * frontend structure itself. 391 */ 392 port->port_name = softc->port_name; 393 port->physical_port = cpi->bus_id; 394 port->virtual_port = 0; 395 port->port_online = ctlfe_online; 396 port->port_offline = ctlfe_offline; 397 port->onoff_arg = softc; 398 port->lun_enable = ctlfe_lun_enable; 399 port->lun_disable = ctlfe_lun_disable; 400 port->targ_lun_arg = softc; 401 port->fe_datamove = ctlfe_datamove; 402 port->fe_done = ctlfe_done; 403 /* 404 * XXX KDM the path inquiry doesn't give us the maximum 405 * number of targets supported. 406 */ 407 port->max_targets = cpi->max_target; 408 port->max_target_id = cpi->max_target; 409 port->targ_port = -1; 410 411 /* 412 * XXX KDM need to figure out whether we're the master or 413 * slave. 414 */ 415 #ifdef CTLFEDEBUG 416 printf("%s: calling ctl_port_register() for %s%d\n", 417 __func__, cpi->dev_name, cpi->unit_number); 418 #endif 419 retval = ctl_port_register(port); 420 if (retval != 0) { 421 printf("%s: ctl_port_register() failed with " 422 "error %d!\n", __func__, retval); 423 mtx_destroy(&softc->lun_softc_mtx); 424 free(softc, M_CTLFE); 425 break; 426 } else { 427 mtx_lock(&ctlfe_list_mtx); 428 STAILQ_INSERT_TAIL(&ctlfe_softc_list, softc, links); 429 mtx_unlock(&ctlfe_list_mtx); 430 } 431 432 break; 433 } 434 case AC_PATH_DEREGISTERED: { 435 436 if (softc != NULL) { 437 /* 438 * XXX KDM are we certain at this point that there 439 * are no outstanding commands for this frontend? 440 */ 441 mtx_lock(&ctlfe_list_mtx); 442 STAILQ_REMOVE(&ctlfe_softc_list, softc, ctlfe_softc, 443 links); 444 mtx_unlock(&ctlfe_list_mtx); 445 ctl_port_deregister(&softc->port); 446 mtx_destroy(&softc->lun_softc_mtx); 447 free(softc, M_CTLFE); 448 } 449 break; 450 } 451 case AC_CONTRACT: { 452 struct ac_contract *ac; 453 454 ac = (struct ac_contract *)arg; 455 456 switch (ac->contract_number) { 457 case AC_CONTRACT_DEV_CHG: { 458 struct ac_device_changed *dev_chg; 459 int retval; 460 461 dev_chg = (struct ac_device_changed *)ac->contract_data; 462 463 printf("%s: WWPN %#jx port 0x%06x path %u target %u %s\n", 464 __func__, dev_chg->wwpn, dev_chg->port, 465 xpt_path_path_id(path), dev_chg->target, 466 (dev_chg->arrived == 0) ? "left" : "arrived"); 467 468 if (softc == NULL) { 469 printf("%s: CTL port for CAM path %u not " 470 "found!\n", __func__, 471 xpt_path_path_id(path)); 472 break; 473 } 474 if (dev_chg->arrived != 0) { 475 retval = ctl_add_initiator(&softc->port, 476 dev_chg->target, dev_chg->wwpn, NULL); 477 } else { 478 retval = ctl_remove_initiator(&softc->port, 479 dev_chg->target); 480 } 481 482 if (retval < 0) { 483 printf("%s: could not %s port %d iid %u " 484 "WWPN %#jx!\n", __func__, 485 (dev_chg->arrived != 0) ? "add" : 486 "remove", softc->port.targ_port, 487 dev_chg->target, 488 (uintmax_t)dev_chg->wwpn); 489 } 490 break; 491 } 492 default: 493 printf("%s: unsupported contract number %ju\n", 494 __func__, (uintmax_t)ac->contract_number); 495 break; 496 } 497 break; 498 } 499 default: 500 break; 501 } 502 } 503 504 static cam_status 505 ctlferegister(struct cam_periph *periph, void *arg) 506 { 507 struct ctlfe_softc *bus_softc; 508 struct ctlfe_lun_softc *softc; 509 union ccb en_lun_ccb; 510 cam_status status; 511 int i; 512 513 softc = (struct ctlfe_lun_softc *)arg; 514 bus_softc = softc->parent_softc; 515 516 TAILQ_INIT(&softc->work_queue); 517 softc->periph = periph; 518 periph->softc = softc; 519 520 xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); 521 en_lun_ccb.ccb_h.func_code = XPT_EN_LUN; 522 en_lun_ccb.cel.grp6_len = 0; 523 en_lun_ccb.cel.grp7_len = 0; 524 en_lun_ccb.cel.enable = 1; 525 xpt_action(&en_lun_ccb); 526 status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK); 527 if (status != CAM_REQ_CMP) { 528 xpt_print(periph->path, "%s: Enable LUN failed, status 0x%x\n", 529 __func__, en_lun_ccb.ccb_h.status); 530 return (status); 531 } 532 533 status = CAM_REQ_CMP; 534 535 for (i = 0; i < CTLFE_ATIO_PER_LUN; i++) { 536 union ccb *new_ccb; 537 union ctl_io *new_io; 538 struct ctlfe_cmd_info *cmd_info; 539 540 new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, 541 M_ZERO|M_NOWAIT); 542 if (new_ccb == NULL) { 543 status = CAM_RESRC_UNAVAIL; 544 break; 545 } 546 new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref); 547 if (new_io == NULL) { 548 free(new_ccb, M_CTLFE); 549 status = CAM_RESRC_UNAVAIL; 550 break; 551 } 552 cmd_info = malloc(sizeof(*cmd_info), M_CTLFE, 553 M_ZERO | M_NOWAIT); 554 if (cmd_info == NULL) { 555 ctl_free_io(new_io); 556 free(new_ccb, M_CTLFE); 557 status = CAM_RESRC_UNAVAIL; 558 break; 559 } 560 PRIV_INFO(new_io) = cmd_info; 561 softc->atios_alloced++; 562 new_ccb->ccb_h.io_ptr = new_io; 563 564 xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1); 565 new_ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO; 566 new_ccb->ccb_h.cbfcnp = ctlfedone; 567 new_ccb->ccb_h.flags |= CAM_UNLOCKED; 568 xpt_action(new_ccb); 569 status = new_ccb->ccb_h.status; 570 if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 571 free(cmd_info, M_CTLFE); 572 ctl_free_io(new_io); 573 free(new_ccb, M_CTLFE); 574 break; 575 } 576 } 577 578 status = cam_periph_acquire(periph); 579 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 580 xpt_print(periph->path, "%s: could not acquire reference " 581 "count, status = %#x\n", __func__, status); 582 return (status); 583 } 584 585 if (i == 0) { 586 xpt_print(periph->path, "%s: could not allocate ATIO CCBs, " 587 "status 0x%x\n", __func__, status); 588 return (CAM_REQ_CMP_ERR); 589 } 590 591 for (i = 0; i < CTLFE_IN_PER_LUN; i++) { 592 union ccb *new_ccb; 593 union ctl_io *new_io; 594 595 new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, 596 M_ZERO|M_NOWAIT); 597 if (new_ccb == NULL) { 598 status = CAM_RESRC_UNAVAIL; 599 break; 600 } 601 new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref); 602 if (new_io == NULL) { 603 free(new_ccb, M_CTLFE); 604 status = CAM_RESRC_UNAVAIL; 605 break; 606 } 607 softc->inots_alloced++; 608 new_ccb->ccb_h.io_ptr = new_io; 609 610 xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1); 611 new_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; 612 new_ccb->ccb_h.cbfcnp = ctlfedone; 613 new_ccb->ccb_h.flags |= CAM_UNLOCKED; 614 xpt_action(new_ccb); 615 status = new_ccb->ccb_h.status; 616 if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 617 /* 618 * Note that we don't free the CCB here. If the 619 * status is not CAM_REQ_INPROG, then we're 620 * probably talking to a SIM that says it is 621 * target-capable but doesn't support the 622 * XPT_IMMEDIATE_NOTIFY CCB. i.e. it supports the 623 * older API. In that case, it'll call xpt_done() 624 * on the CCB, and we need to free it in our done 625 * routine as a result. 626 */ 627 break; 628 } 629 } 630 if ((i == 0) 631 || (status != CAM_REQ_INPROG)) { 632 xpt_print(periph->path, "%s: could not allocate immediate " 633 "notify CCBs, status 0x%x\n", __func__, status); 634 return (CAM_REQ_CMP_ERR); 635 } 636 mtx_lock(&bus_softc->lun_softc_mtx); 637 STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, softc, links); 638 mtx_unlock(&bus_softc->lun_softc_mtx); 639 return (CAM_REQ_CMP); 640 } 641 642 static void 643 ctlfeoninvalidate(struct cam_periph *periph) 644 { 645 union ccb en_lun_ccb; 646 cam_status status; 647 struct ctlfe_softc *bus_softc; 648 struct ctlfe_lun_softc *softc; 649 650 softc = (struct ctlfe_lun_softc *)periph->softc; 651 652 xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); 653 en_lun_ccb.ccb_h.func_code = XPT_EN_LUN; 654 en_lun_ccb.cel.grp6_len = 0; 655 en_lun_ccb.cel.grp7_len = 0; 656 en_lun_ccb.cel.enable = 0; 657 xpt_action(&en_lun_ccb); 658 status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK); 659 if (status != CAM_REQ_CMP) { 660 xpt_print(periph->path, "%s: Disable LUN failed, status 0x%x\n", 661 __func__, en_lun_ccb.ccb_h.status); 662 /* 663 * XXX KDM what do we do now? 664 */ 665 } 666 667 bus_softc = softc->parent_softc; 668 mtx_lock(&bus_softc->lun_softc_mtx); 669 STAILQ_REMOVE(&bus_softc->lun_softc_list, softc, ctlfe_lun_softc, links); 670 mtx_unlock(&bus_softc->lun_softc_mtx); 671 } 672 673 static void 674 ctlfecleanup(struct cam_periph *periph) 675 { 676 struct ctlfe_lun_softc *softc; 677 678 softc = (struct ctlfe_lun_softc *)periph->softc; 679 680 KASSERT(softc->ccbs_freed == softc->ccbs_alloced, ("%s: " 681 "ccbs_freed %ju != ccbs_alloced %ju", __func__, 682 softc->ccbs_freed, softc->ccbs_alloced)); 683 KASSERT(softc->ctios_returned == softc->ctios_sent, ("%s: " 684 "ctios_returned %ju != ctios_sent %ju", __func__, 685 softc->ctios_returned, softc->ctios_sent)); 686 KASSERT(softc->atios_freed == softc->atios_alloced, ("%s: " 687 "atios_freed %ju != atios_alloced %ju", __func__, 688 softc->atios_freed, softc->atios_alloced)); 689 KASSERT(softc->inots_freed == softc->inots_alloced, ("%s: " 690 "inots_freed %ju != inots_alloced %ju", __func__, 691 softc->inots_freed, softc->inots_alloced)); 692 693 free(softc, M_CTLFE); 694 } 695 696 static void 697 ctlfedata(struct ctlfe_lun_softc *softc, union ctl_io *io, 698 ccb_flags *flags, uint8_t **data_ptr, uint32_t *dxfer_len, 699 u_int16_t *sglist_cnt) 700 { 701 struct ctlfe_softc *bus_softc; 702 struct ctlfe_cmd_info *cmd_info; 703 struct ctl_sg_entry *ctl_sglist; 704 bus_dma_segment_t *cam_sglist; 705 size_t off; 706 int i, idx; 707 708 cmd_info = PRIV_INFO(io); 709 bus_softc = softc->parent_softc; 710 711 /* 712 * Set the direction, relative to the initiator. 713 */ 714 *flags &= ~CAM_DIR_MASK; 715 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) 716 *flags |= CAM_DIR_IN; 717 else 718 *flags |= CAM_DIR_OUT; 719 720 *flags &= ~CAM_DATA_MASK; 721 idx = cmd_info->cur_transfer_index; 722 off = cmd_info->cur_transfer_off; 723 cmd_info->flags &= ~CTLFE_CMD_PIECEWISE; 724 if (io->scsiio.kern_sg_entries == 0) { 725 /* No S/G list. */ 726 *data_ptr = io->scsiio.kern_data_ptr + off; 727 if (io->scsiio.kern_data_len - off <= bus_softc->maxio) { 728 *dxfer_len = io->scsiio.kern_data_len - off; 729 } else { 730 *dxfer_len = bus_softc->maxio; 731 cmd_info->cur_transfer_index = -1; 732 cmd_info->cur_transfer_off = bus_softc->maxio; 733 cmd_info->flags |= CTLFE_CMD_PIECEWISE; 734 } 735 *sglist_cnt = 0; 736 737 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) 738 *flags |= CAM_DATA_PADDR; 739 else 740 *flags |= CAM_DATA_VADDR; 741 } else { 742 /* S/G list with physical or virtual pointers. */ 743 ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 744 cam_sglist = cmd_info->cam_sglist; 745 *dxfer_len = 0; 746 for (i = 0; i < io->scsiio.kern_sg_entries - idx; i++) { 747 cam_sglist[i].ds_addr = (bus_addr_t)ctl_sglist[i + idx].addr + off; 748 if (ctl_sglist[i + idx].len - off <= bus_softc->maxio - *dxfer_len) { 749 cam_sglist[i].ds_len = ctl_sglist[idx + i].len - off; 750 *dxfer_len += cam_sglist[i].ds_len; 751 } else { 752 cam_sglist[i].ds_len = bus_softc->maxio - *dxfer_len; 753 cmd_info->cur_transfer_index = idx + i; 754 cmd_info->cur_transfer_off = cam_sglist[i].ds_len + off; 755 cmd_info->flags |= CTLFE_CMD_PIECEWISE; 756 *dxfer_len += cam_sglist[i].ds_len; 757 if (ctl_sglist[i].len != 0) 758 i++; 759 break; 760 } 761 if (i == (CTLFE_MAX_SEGS - 1) && 762 idx + i < (io->scsiio.kern_sg_entries - 1)) { 763 cmd_info->cur_transfer_index = idx + i + 1; 764 cmd_info->cur_transfer_off = 0; 765 cmd_info->flags |= CTLFE_CMD_PIECEWISE; 766 i++; 767 break; 768 } 769 off = 0; 770 } 771 *sglist_cnt = i; 772 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) 773 *flags |= CAM_DATA_SG_PADDR; 774 else 775 *flags |= CAM_DATA_SG; 776 *data_ptr = (uint8_t *)cam_sglist; 777 } 778 } 779 780 static void 781 ctlfestart(struct cam_periph *periph, union ccb *start_ccb) 782 { 783 struct ctlfe_lun_softc *softc; 784 struct ctlfe_cmd_info *cmd_info; 785 struct ccb_hdr *ccb_h; 786 struct ccb_accept_tio *atio; 787 struct ccb_scsiio *csio; 788 uint8_t *data_ptr; 789 uint32_t dxfer_len; 790 ccb_flags flags; 791 union ctl_io *io; 792 uint8_t scsi_status; 793 794 softc = (struct ctlfe_lun_softc *)periph->softc; 795 softc->ccbs_alloced++; 796 797 ccb_h = TAILQ_FIRST(&softc->work_queue); 798 if (ccb_h == NULL) { 799 softc->ccbs_freed++; 800 xpt_release_ccb(start_ccb); 801 return; 802 } 803 804 /* Take the ATIO off the work queue */ 805 TAILQ_REMOVE(&softc->work_queue, ccb_h, periph_links.tqe); 806 atio = (struct ccb_accept_tio *)ccb_h; 807 io = (union ctl_io *)ccb_h->io_ptr; 808 csio = &start_ccb->csio; 809 810 flags = atio->ccb_h.flags & 811 (CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK); 812 cmd_info = PRIV_INFO(io); 813 cmd_info->cur_transfer_index = 0; 814 cmd_info->cur_transfer_off = 0; 815 cmd_info->flags = 0; 816 817 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) { 818 /* 819 * Datamove call, we need to setup the S/G list. 820 */ 821 scsi_status = 0; 822 csio->cdb_len = atio->cdb_len; 823 ctlfedata(softc, io, &flags, &data_ptr, &dxfer_len, 824 &csio->sglist_cnt); 825 io->scsiio.ext_data_filled += dxfer_len; 826 if (io->scsiio.ext_data_filled > io->scsiio.kern_total_len) { 827 xpt_print(periph->path, "%s: tag 0x%04x " 828 "fill len %u > total %u\n", 829 __func__, io->scsiio.tag_num, 830 io->scsiio.ext_data_filled, 831 io->scsiio.kern_total_len); 832 } 833 } else { 834 /* 835 * We're done, send status back. 836 */ 837 if ((io->io_hdr.flags & CTL_FLAG_ABORT) && 838 (io->io_hdr.flags & CTL_FLAG_ABORT_STATUS) == 0) { 839 io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED; 840 841 /* 842 * If this command was aborted, we don't 843 * need to send status back to the SIM. 844 * Just free the CTIO and ctl_io, and 845 * recycle the ATIO back to the SIM. 846 */ 847 xpt_print(periph->path, "%s: aborted " 848 "command 0x%04x discarded\n", 849 __func__, io->scsiio.tag_num); 850 /* 851 * For a wildcard attachment, commands can 852 * come in with a specific target/lun. Reset 853 * the target and LUN fields back to the 854 * wildcard values before we send them back 855 * down to the SIM. The SIM has a wildcard 856 * LUN enabled, not whatever target/lun 857 * these happened to be. 858 */ 859 if (softc->flags & CTLFE_LUN_WILDCARD) { 860 atio->ccb_h.target_id = CAM_TARGET_WILDCARD; 861 atio->ccb_h.target_lun = CAM_LUN_WILDCARD; 862 } 863 864 if (atio->ccb_h.func_code != XPT_ACCEPT_TARGET_IO) { 865 xpt_print(periph->path, "%s: func_code " 866 "is %#x\n", __func__, 867 atio->ccb_h.func_code); 868 } 869 start_ccb->ccb_h.func_code = XPT_ABORT; 870 start_ccb->cab.abort_ccb = (union ccb *)atio; 871 872 /* Tell the SIM that we've aborted this ATIO */ 873 xpt_action(start_ccb); 874 softc->ccbs_freed++; 875 xpt_release_ccb(start_ccb); 876 877 /* 878 * Send the ATIO back down to the SIM. 879 */ 880 xpt_action((union ccb *)atio); 881 882 /* 883 * If we still have work to do, ask for 884 * another CCB. Otherwise, deactivate our 885 * callout. 886 */ 887 if (!TAILQ_EMPTY(&softc->work_queue)) 888 xpt_schedule(periph, /*priority*/ 1); 889 return; 890 } 891 data_ptr = NULL; 892 dxfer_len = 0; 893 csio->sglist_cnt = 0; 894 scsi_status = 0; 895 } 896 if ((io->io_hdr.flags & CTL_FLAG_STATUS_QUEUED) && 897 (cmd_info->flags & CTLFE_CMD_PIECEWISE) == 0 && 898 ((io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) == 0 || 899 io->io_hdr.status == CTL_SUCCESS)) { 900 flags |= CAM_SEND_STATUS; 901 scsi_status = io->scsiio.scsi_status; 902 csio->sense_len = io->scsiio.sense_len; 903 #ifdef CTLFEDEBUG 904 printf("%s: tag %04x status %x\n", __func__, 905 atio->tag_id, io->io_hdr.status); 906 #endif 907 if (csio->sense_len != 0) { 908 csio->sense_data = io->scsiio.sense_data; 909 flags |= CAM_SEND_SENSE; 910 } else if (scsi_status == SCSI_STATUS_CHECK_COND) { 911 xpt_print(periph->path, "%s: check condition " 912 "with no sense\n", __func__); 913 } 914 } 915 916 #ifdef CTLFEDEBUG 917 printf("%s: %s: tag %04x flags %x ptr %p len %u\n", __func__, 918 (flags & CAM_SEND_STATUS) ? "done" : "datamove", 919 atio->tag_id, flags, data_ptr, dxfer_len); 920 #endif 921 922 /* 923 * Valid combinations: 924 * - CAM_SEND_STATUS, CAM_DATA_SG = 0, dxfer_len = 0, 925 * sglist_cnt = 0 926 * - CAM_SEND_STATUS = 0, CAM_DATA_SG = 0, dxfer_len != 0, 927 * sglist_cnt = 0 928 * - CAM_SEND_STATUS = 0, CAM_DATA_SG, dxfer_len != 0, 929 * sglist_cnt != 0 930 */ 931 #ifdef CTLFEDEBUG 932 if (((flags & CAM_SEND_STATUS) 933 && (((flags & CAM_DATA_SG) != 0) 934 || (dxfer_len != 0) 935 || (csio->sglist_cnt != 0))) 936 || (((flags & CAM_SEND_STATUS) == 0) 937 && (dxfer_len == 0)) 938 || ((flags & CAM_DATA_SG) 939 && (csio->sglist_cnt == 0)) 940 || (((flags & CAM_DATA_SG) == 0) 941 && (csio->sglist_cnt != 0))) { 942 printf("%s: tag %04x cdb %02x flags %#x dxfer_len " 943 "%d sg %u\n", __func__, atio->tag_id, 944 atio->cdb_io.cdb_bytes[0], flags, dxfer_len, 945 csio->sglist_cnt); 946 printf("%s: tag %04x io status %#x\n", __func__, 947 atio->tag_id, io->io_hdr.status); 948 } 949 #endif 950 cam_fill_ctio(csio, 951 /*retries*/ 2, 952 ctlfedone, 953 flags, 954 (flags & CAM_TAG_ACTION_VALID) ? MSG_SIMPLE_Q_TAG : 0, 955 atio->tag_id, 956 atio->init_id, 957 scsi_status, 958 /*data_ptr*/ data_ptr, 959 /*dxfer_len*/ dxfer_len, 960 /*timeout*/ 5 * 1000); 961 start_ccb->ccb_h.flags |= CAM_UNLOCKED; 962 start_ccb->ccb_h.ccb_atio = atio; 963 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 964 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 965 io->io_hdr.flags &= ~(CTL_FLAG_DMA_QUEUED | CTL_FLAG_STATUS_QUEUED); 966 967 softc->ctios_sent++; 968 969 cam_periph_unlock(periph); 970 xpt_action(start_ccb); 971 cam_periph_lock(periph); 972 973 /* 974 * If we still have work to do, ask for another CCB. 975 */ 976 if (!TAILQ_EMPTY(&softc->work_queue)) 977 xpt_schedule(periph, /*priority*/ 1); 978 } 979 980 static void 981 ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb) 982 { 983 struct ctlfe_lun_softc *softc; 984 union ctl_io *io; 985 struct ctlfe_cmd_info *cmd_info; 986 987 softc = (struct ctlfe_lun_softc *)periph->softc; 988 io = ccb->ccb_h.io_ptr; 989 990 switch (ccb->ccb_h.func_code) { 991 case XPT_ACCEPT_TARGET_IO: 992 softc->atios_freed++; 993 cmd_info = PRIV_INFO(io); 994 free(cmd_info, M_CTLFE); 995 break; 996 case XPT_IMMEDIATE_NOTIFY: 997 case XPT_NOTIFY_ACKNOWLEDGE: 998 softc->inots_freed++; 999 break; 1000 default: 1001 break; 1002 } 1003 1004 ctl_free_io(io); 1005 free(ccb, M_CTLFE); 1006 1007 KASSERT(softc->atios_freed <= softc->atios_alloced, ("%s: " 1008 "atios_freed %ju > atios_alloced %ju", __func__, 1009 softc->atios_freed, softc->atios_alloced)); 1010 KASSERT(softc->inots_freed <= softc->inots_alloced, ("%s: " 1011 "inots_freed %ju > inots_alloced %ju", __func__, 1012 softc->inots_freed, softc->inots_alloced)); 1013 1014 /* 1015 * If we have received all of our CCBs, we can release our 1016 * reference on the peripheral driver. It will probably go away 1017 * now. 1018 */ 1019 if ((softc->atios_freed == softc->atios_alloced) 1020 && (softc->inots_freed == softc->inots_alloced)) { 1021 cam_periph_release_locked(periph); 1022 } 1023 } 1024 1025 static int 1026 ctlfe_adjust_cdb(struct ccb_accept_tio *atio, uint32_t offset) 1027 { 1028 uint64_t lba; 1029 uint32_t num_blocks, nbc; 1030 uint8_t *cmdbyt = (atio->ccb_h.flags & CAM_CDB_POINTER)? 1031 atio->cdb_io.cdb_ptr : atio->cdb_io.cdb_bytes; 1032 1033 nbc = offset >> 9; /* ASSUMING 512 BYTE BLOCKS */ 1034 1035 switch (cmdbyt[0]) { 1036 case READ_6: 1037 case WRITE_6: 1038 { 1039 struct scsi_rw_6 *cdb = (struct scsi_rw_6 *)cmdbyt; 1040 lba = scsi_3btoul(cdb->addr); 1041 lba &= 0x1fffff; 1042 num_blocks = cdb->length; 1043 if (num_blocks == 0) 1044 num_blocks = 256; 1045 lba += nbc; 1046 num_blocks -= nbc; 1047 scsi_ulto3b(lba, cdb->addr); 1048 cdb->length = num_blocks; 1049 break; 1050 } 1051 case READ_10: 1052 case WRITE_10: 1053 { 1054 struct scsi_rw_10 *cdb = (struct scsi_rw_10 *)cmdbyt; 1055 lba = scsi_4btoul(cdb->addr); 1056 num_blocks = scsi_2btoul(cdb->length); 1057 lba += nbc; 1058 num_blocks -= nbc; 1059 scsi_ulto4b(lba, cdb->addr); 1060 scsi_ulto2b(num_blocks, cdb->length); 1061 break; 1062 } 1063 case READ_12: 1064 case WRITE_12: 1065 { 1066 struct scsi_rw_12 *cdb = (struct scsi_rw_12 *)cmdbyt; 1067 lba = scsi_4btoul(cdb->addr); 1068 num_blocks = scsi_4btoul(cdb->length); 1069 lba += nbc; 1070 num_blocks -= nbc; 1071 scsi_ulto4b(lba, cdb->addr); 1072 scsi_ulto4b(num_blocks, cdb->length); 1073 break; 1074 } 1075 case READ_16: 1076 case WRITE_16: 1077 { 1078 struct scsi_rw_16 *cdb = (struct scsi_rw_16 *)cmdbyt; 1079 lba = scsi_8btou64(cdb->addr); 1080 num_blocks = scsi_4btoul(cdb->length); 1081 lba += nbc; 1082 num_blocks -= nbc; 1083 scsi_u64to8b(lba, cdb->addr); 1084 scsi_ulto4b(num_blocks, cdb->length); 1085 break; 1086 } 1087 default: 1088 return -1; 1089 } 1090 return (0); 1091 } 1092 1093 static void 1094 ctlfedone(struct cam_periph *periph, union ccb *done_ccb) 1095 { 1096 struct ctlfe_lun_softc *softc; 1097 struct ctlfe_softc *bus_softc; 1098 struct ctlfe_cmd_info *cmd_info; 1099 struct ccb_accept_tio *atio = NULL; 1100 union ctl_io *io = NULL; 1101 struct mtx *mtx; 1102 1103 KASSERT((done_ccb->ccb_h.flags & CAM_UNLOCKED) != 0, 1104 ("CCB in ctlfedone() without CAM_UNLOCKED flag")); 1105 #ifdef CTLFE_DEBUG 1106 printf("%s: entered, func_code = %#x\n", __func__, 1107 done_ccb->ccb_h.func_code); 1108 #endif 1109 1110 /* 1111 * At this point CTL has no known use case for device queue freezes. 1112 * In case some SIM think different -- drop its freeze right here. 1113 */ 1114 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 1115 cam_release_devq(periph->path, 1116 /*relsim_flags*/0, 1117 /*reduction*/0, 1118 /*timeout*/0, 1119 /*getcount_only*/0); 1120 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1121 } 1122 1123 softc = (struct ctlfe_lun_softc *)periph->softc; 1124 bus_softc = softc->parent_softc; 1125 mtx = cam_periph_mtx(periph); 1126 mtx_lock(mtx); 1127 1128 /* 1129 * If the peripheral is invalid, ATIOs and immediate notify CCBs 1130 * need to be freed. Most of the ATIOs and INOTs that come back 1131 * will be CCBs that are being returned from the SIM as a result of 1132 * our disabling the LUN. 1133 * 1134 * Other CCB types are handled in their respective cases below. 1135 */ 1136 if (periph->flags & CAM_PERIPH_INVALID) { 1137 switch (done_ccb->ccb_h.func_code) { 1138 case XPT_ACCEPT_TARGET_IO: 1139 case XPT_IMMEDIATE_NOTIFY: 1140 case XPT_NOTIFY_ACKNOWLEDGE: 1141 ctlfe_free_ccb(periph, done_ccb); 1142 goto out; 1143 default: 1144 break; 1145 } 1146 1147 } 1148 switch (done_ccb->ccb_h.func_code) { 1149 case XPT_ACCEPT_TARGET_IO: { 1150 1151 atio = &done_ccb->atio; 1152 1153 resubmit: 1154 /* 1155 * Allocate a ctl_io, pass it to CTL, and wait for the 1156 * datamove or done. 1157 */ 1158 mtx_unlock(mtx); 1159 io = done_ccb->ccb_h.io_ptr; 1160 cmd_info = PRIV_INFO(io); 1161 ctl_zero_io(io); 1162 1163 /* Save pointers on both sides */ 1164 PRIV_CCB(io) = done_ccb; 1165 PRIV_INFO(io) = cmd_info; 1166 done_ccb->ccb_h.io_ptr = io; 1167 1168 /* 1169 * Only SCSI I/O comes down this path, resets, etc. come 1170 * down the immediate notify path below. 1171 */ 1172 io->io_hdr.io_type = CTL_IO_SCSI; 1173 io->io_hdr.nexus.initid = atio->init_id; 1174 io->io_hdr.nexus.targ_port = bus_softc->port.targ_port; 1175 if (bus_softc->hba_misc & PIM_EXTLUNS) { 1176 io->io_hdr.nexus.targ_lun = ctl_decode_lun( 1177 CAM_EXTLUN_BYTE_SWIZZLE(atio->ccb_h.target_lun)); 1178 } else { 1179 io->io_hdr.nexus.targ_lun = atio->ccb_h.target_lun; 1180 } 1181 io->scsiio.tag_num = atio->tag_id; 1182 switch (atio->tag_action) { 1183 case CAM_TAG_ACTION_NONE: 1184 io->scsiio.tag_type = CTL_TAG_UNTAGGED; 1185 break; 1186 case MSG_SIMPLE_TASK: 1187 io->scsiio.tag_type = CTL_TAG_SIMPLE; 1188 break; 1189 case MSG_HEAD_OF_QUEUE_TASK: 1190 io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE; 1191 break; 1192 case MSG_ORDERED_TASK: 1193 io->scsiio.tag_type = CTL_TAG_ORDERED; 1194 break; 1195 case MSG_ACA_TASK: 1196 io->scsiio.tag_type = CTL_TAG_ACA; 1197 break; 1198 default: 1199 io->scsiio.tag_type = CTL_TAG_UNTAGGED; 1200 printf("%s: unhandled tag type %#x!!\n", __func__, 1201 atio->tag_action); 1202 break; 1203 } 1204 if (atio->cdb_len > sizeof(io->scsiio.cdb)) { 1205 printf("%s: WARNING: CDB len %d > ctl_io space %zd\n", 1206 __func__, atio->cdb_len, sizeof(io->scsiio.cdb)); 1207 } 1208 io->scsiio.cdb_len = min(atio->cdb_len, sizeof(io->scsiio.cdb)); 1209 bcopy(atio->cdb_io.cdb_bytes, io->scsiio.cdb, 1210 io->scsiio.cdb_len); 1211 1212 #ifdef CTLFEDEBUG 1213 printf("%s: %u:%u:%u: tag %04x CDB %02x\n", __func__, 1214 io->io_hdr.nexus.initid, 1215 io->io_hdr.nexus.targ_port, 1216 io->io_hdr.nexus.targ_lun, 1217 io->scsiio.tag_num, io->scsiio.cdb[0]); 1218 #endif 1219 1220 ctl_queue(io); 1221 return; 1222 } 1223 case XPT_CONT_TARGET_IO: { 1224 int srr = 0; 1225 uint32_t srr_off = 0; 1226 1227 atio = (struct ccb_accept_tio *)done_ccb->ccb_h.ccb_atio; 1228 io = (union ctl_io *)atio->ccb_h.io_ptr; 1229 1230 softc->ctios_returned++; 1231 #ifdef CTLFEDEBUG 1232 printf("%s: got XPT_CONT_TARGET_IO tag %#x flags %#x\n", 1233 __func__, atio->tag_id, done_ccb->ccb_h.flags); 1234 #endif 1235 /* 1236 * Handle SRR case were the data pointer is pushed back hack 1237 */ 1238 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_MESSAGE_RECV 1239 && done_ccb->csio.msg_ptr != NULL 1240 && done_ccb->csio.msg_ptr[0] == MSG_EXTENDED 1241 && done_ccb->csio.msg_ptr[1] == 5 1242 && done_ccb->csio.msg_ptr[2] == 0) { 1243 srr = 1; 1244 srr_off = 1245 (done_ccb->csio.msg_ptr[3] << 24) 1246 | (done_ccb->csio.msg_ptr[4] << 16) 1247 | (done_ccb->csio.msg_ptr[5] << 8) 1248 | (done_ccb->csio.msg_ptr[6]); 1249 } 1250 1251 if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) { 1252 /* 1253 * If status was being sent, the back end data is now 1254 * history. Hack it up and resubmit a new command with 1255 * the CDB adjusted. If the SIM does the right thing, 1256 * all of the resid math should work. 1257 */ 1258 softc->ccbs_freed++; 1259 xpt_release_ccb(done_ccb); 1260 if (ctlfe_adjust_cdb(atio, srr_off) == 0) { 1261 done_ccb = (union ccb *)atio; 1262 goto resubmit; 1263 } 1264 /* 1265 * Fall through to doom.... 1266 */ 1267 } else if (srr) { 1268 /* 1269 * If we have an srr and we're still sending data, we 1270 * should be able to adjust offsets and cycle again. 1271 */ 1272 io->scsiio.kern_rel_offset = 1273 io->scsiio.ext_data_filled = srr_off; 1274 io->scsiio.ext_data_len = io->scsiio.kern_total_len - 1275 io->scsiio.kern_rel_offset; 1276 softc->ccbs_freed++; 1277 io->scsiio.io_hdr.status = CTL_STATUS_NONE; 1278 xpt_release_ccb(done_ccb); 1279 TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h, 1280 periph_links.tqe); 1281 xpt_schedule(periph, /*priority*/ 1); 1282 break; 1283 } 1284 1285 if ((done_ccb->ccb_h.flags & CAM_SEND_STATUS) && 1286 (done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) 1287 io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; 1288 1289 /* 1290 * If we were sending status back to the initiator, free up 1291 * resources. If we were doing a datamove, call the 1292 * datamove done routine. 1293 */ 1294 if ((io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) { 1295 softc->ccbs_freed++; 1296 xpt_release_ccb(done_ccb); 1297 /* 1298 * For a wildcard attachment, commands can come in 1299 * with a specific target/lun. Reset the target 1300 * and LUN fields back to the wildcard values before 1301 * we send them back down to the SIM. The SIM has 1302 * a wildcard LUN enabled, not whatever target/lun 1303 * these happened to be. 1304 */ 1305 if (softc->flags & CTLFE_LUN_WILDCARD) { 1306 atio->ccb_h.target_id = CAM_TARGET_WILDCARD; 1307 atio->ccb_h.target_lun = CAM_LUN_WILDCARD; 1308 } 1309 if (periph->flags & CAM_PERIPH_INVALID) { 1310 ctlfe_free_ccb(periph, (union ccb *)atio); 1311 } else { 1312 mtx_unlock(mtx); 1313 xpt_action((union ccb *)atio); 1314 return; 1315 } 1316 } else { 1317 struct ctlfe_cmd_info *cmd_info; 1318 struct ccb_scsiio *csio; 1319 1320 csio = &done_ccb->csio; 1321 cmd_info = PRIV_INFO(io); 1322 1323 io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; 1324 1325 io->scsiio.ext_data_len += csio->dxfer_len; 1326 if (io->scsiio.ext_data_len > 1327 io->scsiio.kern_total_len) { 1328 xpt_print(periph->path, "%s: tag 0x%04x " 1329 "done len %u > total %u sent %u\n", 1330 __func__, io->scsiio.tag_num, 1331 io->scsiio.ext_data_len, 1332 io->scsiio.kern_total_len, 1333 io->scsiio.ext_data_filled); 1334 } 1335 /* 1336 * Translate CAM status to CTL status. Success 1337 * does not change the overall, ctl_io status. In 1338 * that case we just set port_status to 0. If we 1339 * have a failure, though, set a data phase error 1340 * for the overall ctl_io. 1341 */ 1342 switch (done_ccb->ccb_h.status & CAM_STATUS_MASK) { 1343 case CAM_REQ_CMP: 1344 io->io_hdr.port_status = 0; 1345 break; 1346 default: 1347 /* 1348 * XXX KDM we probably need to figure out a 1349 * standard set of errors that the SIM 1350 * drivers should return in the event of a 1351 * data transfer failure. A data phase 1352 * error will at least point the user to a 1353 * data transfer error of some sort. 1354 * Hopefully the SIM printed out some 1355 * additional information to give the user 1356 * a clue what happened. 1357 */ 1358 io->io_hdr.port_status = 0xbad1; 1359 ctl_set_data_phase_error(&io->scsiio); 1360 /* 1361 * XXX KDM figure out residual. 1362 */ 1363 break; 1364 } 1365 /* 1366 * If we had to break this S/G list into multiple 1367 * pieces, figure out where we are in the list, and 1368 * continue sending pieces if necessary. 1369 */ 1370 if ((cmd_info->flags & CTLFE_CMD_PIECEWISE) 1371 && (io->io_hdr.port_status == 0)) { 1372 ccb_flags flags; 1373 uint8_t scsi_status; 1374 uint8_t *data_ptr; 1375 uint32_t dxfer_len; 1376 1377 flags = atio->ccb_h.flags & 1378 (CAM_DIS_DISCONNECT| 1379 CAM_TAG_ACTION_VALID); 1380 1381 ctlfedata(softc, io, &flags, &data_ptr, 1382 &dxfer_len, &csio->sglist_cnt); 1383 1384 scsi_status = 0; 1385 1386 if (((flags & CAM_SEND_STATUS) == 0) 1387 && (dxfer_len == 0)) { 1388 printf("%s: tag %04x no status or " 1389 "len cdb = %02x\n", __func__, 1390 atio->tag_id, 1391 atio->cdb_io.cdb_bytes[0]); 1392 printf("%s: tag %04x io status %#x\n", 1393 __func__, atio->tag_id, 1394 io->io_hdr.status); 1395 } 1396 1397 cam_fill_ctio(csio, 1398 /*retries*/ 2, 1399 ctlfedone, 1400 flags, 1401 (flags & CAM_TAG_ACTION_VALID) ? 1402 MSG_SIMPLE_Q_TAG : 0, 1403 atio->tag_id, 1404 atio->init_id, 1405 scsi_status, 1406 /*data_ptr*/ data_ptr, 1407 /*dxfer_len*/ dxfer_len, 1408 /*timeout*/ 5 * 1000); 1409 1410 csio->ccb_h.flags |= CAM_UNLOCKED; 1411 csio->resid = 0; 1412 csio->ccb_h.ccb_atio = atio; 1413 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 1414 softc->ctios_sent++; 1415 mtx_unlock(mtx); 1416 xpt_action((union ccb *)csio); 1417 } else { 1418 /* 1419 * Release the CTIO. The ATIO will be sent back 1420 * down to the SIM once we send status. 1421 */ 1422 softc->ccbs_freed++; 1423 xpt_release_ccb(done_ccb); 1424 mtx_unlock(mtx); 1425 1426 /* Call the backend move done callback */ 1427 io->scsiio.be_move_done(io); 1428 } 1429 return; 1430 } 1431 break; 1432 } 1433 case XPT_IMMEDIATE_NOTIFY: { 1434 union ctl_io *io; 1435 struct ccb_immediate_notify *inot; 1436 cam_status status; 1437 int send_ctl_io; 1438 1439 inot = &done_ccb->cin1; 1440 printf("%s: got XPT_IMMEDIATE_NOTIFY status %#x tag %#x " 1441 "seq %#x\n", __func__, inot->ccb_h.status, 1442 inot->tag_id, inot->seq_id); 1443 1444 io = done_ccb->ccb_h.io_ptr; 1445 ctl_zero_io(io); 1446 1447 send_ctl_io = 1; 1448 1449 io->io_hdr.io_type = CTL_IO_TASK; 1450 PRIV_CCB(io) = done_ccb; 1451 inot->ccb_h.io_ptr = io; 1452 io->io_hdr.nexus.initid = inot->initiator_id; 1453 io->io_hdr.nexus.targ_port = bus_softc->port.targ_port; 1454 if (bus_softc->hba_misc & PIM_EXTLUNS) { 1455 io->io_hdr.nexus.targ_lun = ctl_decode_lun( 1456 CAM_EXTLUN_BYTE_SWIZZLE(inot->ccb_h.target_lun)); 1457 } else { 1458 io->io_hdr.nexus.targ_lun = inot->ccb_h.target_lun; 1459 } 1460 /* XXX KDM should this be the tag_id? */ 1461 io->taskio.tag_num = inot->seq_id; 1462 1463 status = inot->ccb_h.status & CAM_STATUS_MASK; 1464 switch (status) { 1465 case CAM_SCSI_BUS_RESET: 1466 io->taskio.task_action = CTL_TASK_BUS_RESET; 1467 break; 1468 case CAM_BDR_SENT: 1469 io->taskio.task_action = CTL_TASK_TARGET_RESET; 1470 break; 1471 case CAM_MESSAGE_RECV: 1472 switch (inot->arg) { 1473 case MSG_ABORT_TASK_SET: 1474 io->taskio.task_action = 1475 CTL_TASK_ABORT_TASK_SET; 1476 break; 1477 case MSG_TARGET_RESET: 1478 io->taskio.task_action = CTL_TASK_TARGET_RESET; 1479 break; 1480 case MSG_ABORT_TASK: 1481 io->taskio.task_action = CTL_TASK_ABORT_TASK; 1482 break; 1483 case MSG_LOGICAL_UNIT_RESET: 1484 io->taskio.task_action = CTL_TASK_LUN_RESET; 1485 break; 1486 case MSG_CLEAR_TASK_SET: 1487 io->taskio.task_action = 1488 CTL_TASK_CLEAR_TASK_SET; 1489 break; 1490 case MSG_CLEAR_ACA: 1491 io->taskio.task_action = CTL_TASK_CLEAR_ACA; 1492 break; 1493 case MSG_QUERY_TASK: 1494 io->taskio.task_action = CTL_TASK_QUERY_TASK; 1495 break; 1496 case MSG_QUERY_TASK_SET: 1497 io->taskio.task_action = 1498 CTL_TASK_QUERY_TASK_SET; 1499 break; 1500 case MSG_QUERY_ASYNC_EVENT: 1501 io->taskio.task_action = 1502 CTL_TASK_QUERY_ASYNC_EVENT; 1503 break; 1504 case MSG_NOOP: 1505 send_ctl_io = 0; 1506 break; 1507 default: 1508 xpt_print(periph->path, 1509 "%s: unsupported message 0x%x\n", 1510 __func__, inot->arg); 1511 send_ctl_io = 0; 1512 break; 1513 } 1514 break; 1515 case CAM_REQ_ABORTED: 1516 /* 1517 * This request was sent back by the driver. 1518 * XXX KDM what do we do here? 1519 */ 1520 send_ctl_io = 0; 1521 break; 1522 case CAM_REQ_INVALID: 1523 case CAM_PROVIDE_FAIL: 1524 default: 1525 /* 1526 * We should only get here if we're talking 1527 * to a talking to a SIM that is target 1528 * capable but supports the old API. In 1529 * that case, we need to just free the CCB. 1530 * If we actually send a notify acknowledge, 1531 * it will send that back with an error as 1532 * well. 1533 */ 1534 1535 if ((status != CAM_REQ_INVALID) 1536 && (status != CAM_PROVIDE_FAIL)) 1537 xpt_print(periph->path, 1538 "%s: unsupported CAM status 0x%x\n", 1539 __func__, status); 1540 1541 ctlfe_free_ccb(periph, done_ccb); 1542 1543 goto out; 1544 } 1545 if (send_ctl_io != 0) { 1546 ctl_queue(io); 1547 } else { 1548 done_ccb->ccb_h.status = CAM_REQ_INPROG; 1549 done_ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; 1550 xpt_action(done_ccb); 1551 } 1552 break; 1553 } 1554 case XPT_NOTIFY_ACKNOWLEDGE: 1555 /* 1556 * Queue this back down to the SIM as an immediate notify. 1557 */ 1558 done_ccb->ccb_h.status = CAM_REQ_INPROG; 1559 done_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; 1560 xpt_action(done_ccb); 1561 break; 1562 case XPT_SET_SIM_KNOB: 1563 case XPT_GET_SIM_KNOB: 1564 case XPT_GET_SIM_KNOB_OLD: 1565 break; 1566 default: 1567 panic("%s: unexpected CCB type %#x", __func__, 1568 done_ccb->ccb_h.func_code); 1569 break; 1570 } 1571 1572 out: 1573 mtx_unlock(mtx); 1574 } 1575 1576 static void 1577 ctlfe_onoffline(void *arg, int online) 1578 { 1579 struct ctlfe_softc *bus_softc; 1580 union ccb *ccb; 1581 cam_status status; 1582 struct cam_path *path; 1583 int set_wwnn; 1584 1585 bus_softc = (struct ctlfe_softc *)arg; 1586 1587 set_wwnn = 0; 1588 1589 status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, 1590 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 1591 if (status != CAM_REQ_CMP) { 1592 printf("%s: unable to create path!\n", __func__); 1593 return; 1594 } 1595 ccb = xpt_alloc_ccb(); 1596 xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE); 1597 ccb->ccb_h.func_code = XPT_GET_SIM_KNOB; 1598 xpt_action(ccb); 1599 1600 /* 1601 * Copan WWN format: 1602 * 1603 * Bits 63-60: 0x5 NAA, IEEE registered name 1604 * Bits 59-36: 0x000ED5 IEEE Company name assigned to Copan 1605 * Bits 35-12: Copan SSN (Sequential Serial Number) 1606 * Bits 11-8: Type of port: 1607 * 1 == N-Port 1608 * 2 == F-Port 1609 * 3 == NL-Port 1610 * Bits 7-0: 0 == Node Name, >0 == Port Number 1611 */ 1612 if (online != 0) { 1613 if ((ccb->knob.xport_specific.valid & KNOB_VALID_ADDRESS) != 0){ 1614 #ifdef RANDOM_WWNN 1615 uint64_t random_bits; 1616 #endif 1617 1618 printf("%s: %s current WWNN %#jx\n", __func__, 1619 bus_softc->port_name, 1620 ccb->knob.xport_specific.fc.wwnn); 1621 printf("%s: %s current WWPN %#jx\n", __func__, 1622 bus_softc->port_name, 1623 ccb->knob.xport_specific.fc.wwpn); 1624 1625 #ifdef RANDOM_WWNN 1626 arc4rand(&random_bits, sizeof(random_bits), 0); 1627 #endif 1628 1629 /* 1630 * XXX KDM this is a bit of a kludge for now. We 1631 * take the current WWNN/WWPN from the card, and 1632 * replace the company identifier and the NL-Port 1633 * indicator and the port number (for the WWPN). 1634 * This should be replaced later with ddb_GetWWNN, 1635 * or possibly a more centralized scheme. (It 1636 * would be nice to have the WWNN/WWPN for each 1637 * port stored in the ctl_port structure.) 1638 */ 1639 #ifdef RANDOM_WWNN 1640 ccb->knob.xport_specific.fc.wwnn = 1641 (random_bits & 1642 0x0000000fffffff00ULL) | 1643 /* Company ID */ 0x5000ED5000000000ULL | 1644 /* NL-Port */ 0x0300; 1645 ccb->knob.xport_specific.fc.wwpn = 1646 (random_bits & 1647 0x0000000fffffff00ULL) | 1648 /* Company ID */ 0x5000ED5000000000ULL | 1649 /* NL-Port */ 0x3000 | 1650 /* Port Num */ (bus_softc->port.targ_port & 0xff); 1651 1652 /* 1653 * This is a bit of an API break/reversal, but if 1654 * we're doing the random WWNN that's a little 1655 * different anyway. So record what we're actually 1656 * using with the frontend code so it's reported 1657 * accurately. 1658 */ 1659 ctl_port_set_wwns(&bus_softc->port, 1660 true, ccb->knob.xport_specific.fc.wwnn, 1661 true, ccb->knob.xport_specific.fc.wwpn); 1662 set_wwnn = 1; 1663 #else /* RANDOM_WWNN */ 1664 /* 1665 * If the user has specified a WWNN/WWPN, send them 1666 * down to the SIM. Otherwise, record what the SIM 1667 * has reported. 1668 */ 1669 if (bus_softc->port.wwnn != 0 && bus_softc->port.wwnn 1670 != ccb->knob.xport_specific.fc.wwnn) { 1671 ccb->knob.xport_specific.fc.wwnn = 1672 bus_softc->port.wwnn; 1673 set_wwnn = 1; 1674 } else { 1675 ctl_port_set_wwns(&bus_softc->port, 1676 true, ccb->knob.xport_specific.fc.wwnn, 1677 false, 0); 1678 } 1679 if (bus_softc->port.wwpn != 0 && bus_softc->port.wwpn 1680 != ccb->knob.xport_specific.fc.wwpn) { 1681 ccb->knob.xport_specific.fc.wwpn = 1682 bus_softc->port.wwpn; 1683 set_wwnn = 1; 1684 } else { 1685 ctl_port_set_wwns(&bus_softc->port, 1686 false, 0, 1687 true, ccb->knob.xport_specific.fc.wwpn); 1688 } 1689 #endif /* RANDOM_WWNN */ 1690 1691 1692 if (set_wwnn != 0) { 1693 printf("%s: %s new WWNN %#jx\n", __func__, 1694 bus_softc->port_name, 1695 ccb->knob.xport_specific.fc.wwnn); 1696 printf("%s: %s new WWPN %#jx\n", __func__, 1697 bus_softc->port_name, 1698 ccb->knob.xport_specific.fc.wwpn); 1699 } 1700 } else { 1701 printf("%s: %s has no valid WWNN/WWPN\n", __func__, 1702 bus_softc->port_name); 1703 } 1704 } 1705 ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; 1706 ccb->knob.xport_specific.valid = KNOB_VALID_ROLE; 1707 if (set_wwnn != 0) 1708 ccb->knob.xport_specific.valid |= KNOB_VALID_ADDRESS; 1709 1710 if (online != 0) 1711 ccb->knob.xport_specific.fc.role |= KNOB_ROLE_TARGET; 1712 else 1713 ccb->knob.xport_specific.fc.role &= ~KNOB_ROLE_TARGET; 1714 1715 xpt_action(ccb); 1716 1717 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1718 printf("%s: SIM %s (path id %d) target %s failed with " 1719 "status %#x\n", 1720 __func__, bus_softc->port_name, bus_softc->path_id, 1721 (online != 0) ? "enable" : "disable", 1722 ccb->ccb_h.status); 1723 } else { 1724 printf("%s: SIM %s (path id %d) target %s succeeded\n", 1725 __func__, bus_softc->port_name, bus_softc->path_id, 1726 (online != 0) ? "enable" : "disable"); 1727 } 1728 1729 xpt_free_path(path); 1730 xpt_free_ccb(ccb); 1731 } 1732 1733 static void 1734 ctlfe_online(void *arg) 1735 { 1736 struct ctlfe_softc *bus_softc; 1737 struct cam_path *path; 1738 cam_status status; 1739 struct ctlfe_lun_softc *lun_softc; 1740 struct cam_periph *periph; 1741 1742 bus_softc = (struct ctlfe_softc *)arg; 1743 1744 /* 1745 * Create the wildcard LUN before bringing the port online. 1746 */ 1747 status = xpt_create_path(&path, /*periph*/ NULL, 1748 bus_softc->path_id, CAM_TARGET_WILDCARD, 1749 CAM_LUN_WILDCARD); 1750 if (status != CAM_REQ_CMP) { 1751 printf("%s: unable to create path for wildcard periph\n", 1752 __func__); 1753 return; 1754 } 1755 1756 lun_softc = malloc(sizeof(*lun_softc), M_CTLFE, M_WAITOK | M_ZERO); 1757 1758 xpt_path_lock(path); 1759 periph = cam_periph_find(path, "ctl"); 1760 if (periph != NULL) { 1761 /* We've already got a periph, no need to alloc a new one. */ 1762 xpt_path_unlock(path); 1763 xpt_free_path(path); 1764 free(lun_softc, M_CTLFE); 1765 return; 1766 } 1767 lun_softc->parent_softc = bus_softc; 1768 lun_softc->flags |= CTLFE_LUN_WILDCARD; 1769 1770 status = cam_periph_alloc(ctlferegister, 1771 ctlfeoninvalidate, 1772 ctlfecleanup, 1773 ctlfestart, 1774 "ctl", 1775 CAM_PERIPH_BIO, 1776 path, 1777 ctlfeasync, 1778 0, 1779 lun_softc); 1780 1781 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1782 const struct cam_status_entry *entry; 1783 1784 entry = cam_fetch_status_entry(status); 1785 printf("%s: CAM error %s (%#x) returned from " 1786 "cam_periph_alloc()\n", __func__, (entry != NULL) ? 1787 entry->status_text : "Unknown", status); 1788 free(lun_softc, M_CTLFE); 1789 } 1790 1791 xpt_path_unlock(path); 1792 ctlfe_onoffline(arg, /*online*/ 1); 1793 xpt_free_path(path); 1794 } 1795 1796 static void 1797 ctlfe_offline(void *arg) 1798 { 1799 struct ctlfe_softc *bus_softc; 1800 struct cam_path *path; 1801 cam_status status; 1802 struct cam_periph *periph; 1803 1804 bus_softc = (struct ctlfe_softc *)arg; 1805 1806 ctlfe_onoffline(arg, /*online*/ 0); 1807 1808 /* 1809 * Disable the wildcard LUN for this port now that we have taken 1810 * the port offline. 1811 */ 1812 status = xpt_create_path(&path, /*periph*/ NULL, 1813 bus_softc->path_id, CAM_TARGET_WILDCARD, 1814 CAM_LUN_WILDCARD); 1815 if (status != CAM_REQ_CMP) { 1816 printf("%s: unable to create path for wildcard periph\n", 1817 __func__); 1818 return; 1819 } 1820 xpt_path_lock(path); 1821 if ((periph = cam_periph_find(path, "ctl")) != NULL) 1822 cam_periph_invalidate(periph); 1823 xpt_path_unlock(path); 1824 xpt_free_path(path); 1825 } 1826 1827 /* 1828 * This will get called to enable a LUN on every bus that is attached to 1829 * CTL. So we only need to create a path/periph for this particular bus. 1830 */ 1831 static int 1832 ctlfe_lun_enable(void *arg, int lun_id) 1833 { 1834 struct ctlfe_softc *bus_softc; 1835 struct ctlfe_lun_softc *softc; 1836 struct cam_path *path; 1837 struct cam_periph *periph; 1838 cam_status status; 1839 1840 bus_softc = (struct ctlfe_softc *)arg; 1841 if (bus_softc->hba_misc & PIM_EXTLUNS) 1842 lun_id = CAM_EXTLUN_BYTE_SWIZZLE(ctl_encode_lun(lun_id)); 1843 1844 status = xpt_create_path(&path, /*periph*/ NULL, 1845 bus_softc->path_id, bus_softc->target_id, lun_id); 1846 /* XXX KDM need some way to return status to CTL here? */ 1847 if (status != CAM_REQ_CMP) { 1848 printf("%s: could not create path, status %#x\n", __func__, 1849 status); 1850 return (1); 1851 } 1852 1853 softc = malloc(sizeof(*softc), M_CTLFE, M_WAITOK | M_ZERO); 1854 xpt_path_lock(path); 1855 periph = cam_periph_find(path, "ctl"); 1856 if (periph != NULL) { 1857 /* We've already got a periph, no need to alloc a new one. */ 1858 xpt_path_unlock(path); 1859 xpt_free_path(path); 1860 free(softc, M_CTLFE); 1861 return (0); 1862 } 1863 softc->parent_softc = bus_softc; 1864 1865 status = cam_periph_alloc(ctlferegister, 1866 ctlfeoninvalidate, 1867 ctlfecleanup, 1868 ctlfestart, 1869 "ctl", 1870 CAM_PERIPH_BIO, 1871 path, 1872 ctlfeasync, 1873 0, 1874 softc); 1875 1876 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1877 const struct cam_status_entry *entry; 1878 1879 entry = cam_fetch_status_entry(status); 1880 printf("%s: CAM error %s (%#x) returned from " 1881 "cam_periph_alloc()\n", __func__, (entry != NULL) ? 1882 entry->status_text : "Unknown", status); 1883 free(softc, M_CTLFE); 1884 } 1885 1886 xpt_path_unlock(path); 1887 xpt_free_path(path); 1888 return (0); 1889 } 1890 1891 /* 1892 * This will get called when the user removes a LUN to disable that LUN 1893 * on every bus that is attached to CTL. 1894 */ 1895 static int 1896 ctlfe_lun_disable(void *arg, int lun_id) 1897 { 1898 struct ctlfe_softc *softc; 1899 struct ctlfe_lun_softc *lun_softc; 1900 1901 softc = (struct ctlfe_softc *)arg; 1902 if (softc->hba_misc & PIM_EXTLUNS) 1903 lun_id = CAM_EXTLUN_BYTE_SWIZZLE(ctl_encode_lun(lun_id)); 1904 1905 mtx_lock(&softc->lun_softc_mtx); 1906 STAILQ_FOREACH(lun_softc, &softc->lun_softc_list, links) { 1907 struct cam_path *path; 1908 1909 path = lun_softc->periph->path; 1910 1911 if ((xpt_path_target_id(path) == softc->target_id) 1912 && (xpt_path_lun_id(path) == lun_id)) { 1913 break; 1914 } 1915 } 1916 if (lun_softc == NULL) { 1917 mtx_unlock(&softc->lun_softc_mtx); 1918 printf("%s: can't find lun %d\n", __func__, lun_id); 1919 return (1); 1920 } 1921 cam_periph_acquire(lun_softc->periph); 1922 mtx_unlock(&softc->lun_softc_mtx); 1923 1924 cam_periph_lock(lun_softc->periph); 1925 cam_periph_invalidate(lun_softc->periph); 1926 cam_periph_unlock(lun_softc->periph); 1927 cam_periph_release(lun_softc->periph); 1928 return (0); 1929 } 1930 1931 static void 1932 ctlfe_dump_sim(struct cam_sim *sim) 1933 { 1934 1935 printf("%s%d: max tagged openings: %d, max dev openings: %d\n", 1936 sim->sim_name, sim->unit_number, 1937 sim->max_tagged_dev_openings, sim->max_dev_openings); 1938 } 1939 1940 /* 1941 * Assumes that the SIM lock is held. 1942 */ 1943 static void 1944 ctlfe_dump_queue(struct ctlfe_lun_softc *softc) 1945 { 1946 struct ccb_hdr *hdr; 1947 struct cam_periph *periph; 1948 int num_items; 1949 1950 periph = softc->periph; 1951 num_items = 0; 1952 1953 TAILQ_FOREACH(hdr, &softc->work_queue, periph_links.tqe) { 1954 union ctl_io *io = hdr->io_ptr; 1955 1956 num_items++; 1957 1958 /* 1959 * Only regular SCSI I/O is put on the work 1960 * queue, so we can print sense here. There may be no 1961 * sense if it's no the queue for a DMA, but this serves to 1962 * print out the CCB as well. 1963 * 1964 * XXX KDM switch this over to scsi_sense_print() when 1965 * CTL is merged in with CAM. 1966 */ 1967 ctl_io_error_print(io, NULL); 1968 1969 /* 1970 * Print DMA status if we are DMA_QUEUED. 1971 */ 1972 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) { 1973 xpt_print(periph->path, 1974 "Total %u, Current %u, Resid %u\n", 1975 io->scsiio.kern_total_len, 1976 io->scsiio.kern_data_len, 1977 io->scsiio.kern_data_resid); 1978 } 1979 } 1980 1981 xpt_print(periph->path, "%d requests total waiting for CCBs\n", 1982 num_items); 1983 xpt_print(periph->path, "%ju CCBs outstanding (%ju allocated, %ju " 1984 "freed)\n", (uintmax_t)(softc->ccbs_alloced - 1985 softc->ccbs_freed), (uintmax_t)softc->ccbs_alloced, 1986 (uintmax_t)softc->ccbs_freed); 1987 xpt_print(periph->path, "%ju CTIOs outstanding (%ju sent, %ju " 1988 "returned\n", (uintmax_t)(softc->ctios_sent - 1989 softc->ctios_returned), softc->ctios_sent, 1990 softc->ctios_returned); 1991 } 1992 1993 /* 1994 * Datamove/done routine called by CTL. Put ourselves on the queue to 1995 * receive a CCB from CAM so we can queue the continue I/O request down 1996 * to the adapter. 1997 */ 1998 static void 1999 ctlfe_datamove(union ctl_io *io) 2000 { 2001 union ccb *ccb; 2002 struct cam_periph *periph; 2003 struct ctlfe_lun_softc *softc; 2004 2005 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 2006 ("Unexpected io_type (%d) in ctlfe_datamove", io->io_hdr.io_type)); 2007 2008 ccb = PRIV_CCB(io); 2009 periph = xpt_path_periph(ccb->ccb_h.path); 2010 cam_periph_lock(periph); 2011 softc = (struct ctlfe_lun_softc *)periph->softc; 2012 io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED; 2013 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) 2014 io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED; 2015 TAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h, 2016 periph_links.tqe); 2017 xpt_schedule(periph, /*priority*/ 1); 2018 cam_periph_unlock(periph); 2019 } 2020 2021 static void 2022 ctlfe_done(union ctl_io *io) 2023 { 2024 union ccb *ccb; 2025 struct cam_periph *periph; 2026 struct ctlfe_lun_softc *softc; 2027 2028 ccb = PRIV_CCB(io); 2029 periph = xpt_path_periph(ccb->ccb_h.path); 2030 cam_periph_lock(periph); 2031 softc = (struct ctlfe_lun_softc *)periph->softc; 2032 2033 if (io->io_hdr.io_type == CTL_IO_TASK) { 2034 /* 2035 * Task management commands don't require any further 2036 * communication back to the adapter. Requeue the CCB 2037 * to the adapter, and free the CTL I/O. 2038 */ 2039 xpt_print(ccb->ccb_h.path, "%s: returning task I/O " 2040 "tag %#x seq %#x\n", __func__, 2041 ccb->cin1.tag_id, ccb->cin1.seq_id); 2042 /* 2043 * Send the notify acknowledge down to the SIM, to let it 2044 * know we processed the task management command. 2045 */ 2046 ccb->ccb_h.status = CAM_REQ_INPROG; 2047 ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; 2048 switch (io->taskio.task_status) { 2049 case CTL_TASK_FUNCTION_COMPLETE: 2050 ccb->cna2.arg = CAM_RSP_TMF_COMPLETE; 2051 break; 2052 case CTL_TASK_FUNCTION_SUCCEEDED: 2053 ccb->cna2.arg = CAM_RSP_TMF_SUCCEEDED; 2054 ccb->ccb_h.flags |= CAM_SEND_STATUS; 2055 break; 2056 case CTL_TASK_FUNCTION_REJECTED: 2057 ccb->cna2.arg = CAM_RSP_TMF_REJECTED; 2058 ccb->ccb_h.flags |= CAM_SEND_STATUS; 2059 break; 2060 case CTL_TASK_LUN_DOES_NOT_EXIST: 2061 ccb->cna2.arg = CAM_RSP_TMF_INCORRECT_LUN; 2062 ccb->ccb_h.flags |= CAM_SEND_STATUS; 2063 break; 2064 case CTL_TASK_FUNCTION_NOT_SUPPORTED: 2065 ccb->cna2.arg = CAM_RSP_TMF_FAILED; 2066 ccb->ccb_h.flags |= CAM_SEND_STATUS; 2067 break; 2068 } 2069 ccb->cna2.arg |= scsi_3btoul(io->taskio.task_resp) << 8; 2070 xpt_action(ccb); 2071 } else if (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) { 2072 if (softc->flags & CTLFE_LUN_WILDCARD) { 2073 ccb->ccb_h.target_id = CAM_TARGET_WILDCARD; 2074 ccb->ccb_h.target_lun = CAM_LUN_WILDCARD; 2075 } 2076 if (periph->flags & CAM_PERIPH_INVALID) { 2077 ctlfe_free_ccb(periph, ccb); 2078 } else { 2079 cam_periph_unlock(periph); 2080 xpt_action(ccb); 2081 return; 2082 } 2083 } else { 2084 io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED; 2085 TAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h, 2086 periph_links.tqe); 2087 xpt_schedule(periph, /*priority*/ 1); 2088 } 2089 2090 cam_periph_unlock(periph); 2091 } 2092 2093 static void 2094 ctlfe_dump(void) 2095 { 2096 struct ctlfe_softc *bus_softc; 2097 struct ctlfe_lun_softc *lun_softc; 2098 2099 STAILQ_FOREACH(bus_softc, &ctlfe_softc_list, links) { 2100 ctlfe_dump_sim(bus_softc->sim); 2101 STAILQ_FOREACH(lun_softc, &bus_softc->lun_softc_list, links) 2102 ctlfe_dump_queue(lun_softc); 2103 } 2104 } 2105