1 /*- 2 * Copyright (c) 2008, 2009 Silicon Graphics International Corp. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * substantially similar to the "NO WARRANTY" disclaimer below 13 * ("Disclaimer") and any redistribution must be conditioned upon 14 * including a substantially similar Disclaimer requirement for further 15 * binary redistribution. 16 * 17 * NO WARRANTY 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGES. 29 * 30 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/scsi_ctl.c#4 $ 31 */ 32 /* 33 * Peripheral driver interface between CAM and CTL (CAM Target Layer). 34 * 35 * Author: Ken Merry <ken@FreeBSD.org> 36 */ 37 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 #include <sys/param.h> 42 #include <sys/queue.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/condvar.h> 48 #include <sys/malloc.h> 49 #include <sys/bus.h> 50 #include <sys/endian.h> 51 #include <sys/sbuf.h> 52 #include <sys/sysctl.h> 53 #include <sys/types.h> 54 #include <sys/systm.h> 55 #include <machine/bus.h> 56 57 #include <cam/cam.h> 58 #include <cam/cam_ccb.h> 59 #include <cam/cam_periph.h> 60 #include <cam/cam_queue.h> 61 #include <cam/cam_xpt_periph.h> 62 #include <cam/cam_debug.h> 63 #include <cam/cam_sim.h> 64 #include <cam/cam_xpt.h> 65 66 #include <cam/scsi/scsi_all.h> 67 #include <cam/scsi/scsi_message.h> 68 69 #include <cam/ctl/ctl_io.h> 70 #include <cam/ctl/ctl.h> 71 #include <cam/ctl/ctl_frontend.h> 72 #include <cam/ctl/ctl_util.h> 73 #include <cam/ctl/ctl_error.h> 74 75 typedef enum { 76 CTLFE_CCB_DEFAULT = 0x00 77 } ctlfe_ccb_types; 78 79 struct ctlfe_softc { 80 struct ctl_frontend fe; 81 path_id_t path_id; 82 u_int maxio; 83 struct cam_sim *sim; 84 char port_name[DEV_IDLEN]; 85 struct mtx lun_softc_mtx; 86 STAILQ_HEAD(, ctlfe_lun_softc) lun_softc_list; 87 STAILQ_ENTRY(ctlfe_softc) links; 88 }; 89 90 STAILQ_HEAD(, ctlfe_softc) ctlfe_softc_list; 91 struct mtx ctlfe_list_mtx; 92 static char ctlfe_mtx_desc[] = "ctlfelist"; 93 static int ctlfe_dma_enabled = 1; 94 #ifdef CTLFE_INIT_ENABLE 95 static int ctlfe_max_targets = 1; 96 static int ctlfe_num_targets = 0; 97 #endif 98 99 typedef enum { 100 CTLFE_LUN_NONE = 0x00, 101 CTLFE_LUN_WILDCARD = 0x01 102 } ctlfe_lun_flags; 103 104 struct ctlfe_lun_softc { 105 struct ctlfe_softc *parent_softc; 106 struct cam_periph *periph; 107 ctlfe_lun_flags flags; 108 struct callout dma_callout; 109 uint64_t ccbs_alloced; 110 uint64_t ccbs_freed; 111 uint64_t ctios_sent; 112 uint64_t ctios_returned; 113 uint64_t atios_sent; 114 uint64_t atios_returned; 115 uint64_t inots_sent; 116 uint64_t inots_returned; 117 /* bus_dma_tag_t dma_tag; */ 118 TAILQ_HEAD(, ccb_hdr) work_queue; 119 STAILQ_ENTRY(ctlfe_lun_softc) links; 120 }; 121 122 typedef enum { 123 CTLFE_CMD_NONE = 0x00, 124 CTLFE_CMD_PIECEWISE = 0x01 125 } ctlfe_cmd_flags; 126 127 /* 128 * The size limit of this structure is CTL_PORT_PRIV_SIZE, from ctl_io.h. 129 * Currently that is 600 bytes. 130 */ 131 struct ctlfe_lun_cmd_info { 132 int cur_transfer_index; 133 size_t cur_transfer_off; 134 ctlfe_cmd_flags flags; 135 /* 136 * XXX KDM struct bus_dma_segment is 8 bytes on i386, and 16 137 * bytes on amd64. So with 32 elements, this is 256 bytes on 138 * i386 and 512 bytes on amd64. 139 */ 140 #define CTLFE_MAX_SEGS 32 141 bus_dma_segment_t cam_sglist[CTLFE_MAX_SEGS]; 142 }; 143 144 /* 145 * When we register the adapter/bus, request that this many ctl_ios be 146 * allocated. This should be the maximum supported by the adapter, but we 147 * currently don't have a way to get that back from the path inquiry. 148 * XXX KDM add that to the path inquiry. 149 */ 150 #define CTLFE_REQ_CTL_IO 4096 151 /* 152 * Number of Accept Target I/O CCBs to allocate and queue down to the 153 * adapter per LUN. 154 * XXX KDM should this be controlled by CTL? 155 */ 156 #define CTLFE_ATIO_PER_LUN 1024 157 /* 158 * Number of Immediate Notify CCBs (used for aborts, resets, etc.) to 159 * allocate and queue down to the adapter per LUN. 160 * XXX KDM should this be controlled by CTL? 161 */ 162 #define CTLFE_IN_PER_LUN 1024 163 164 /* 165 * Timeout (in seconds) on CTIO CCB allocation for doing a DMA or sending 166 * status to the initiator. The SIM is expected to have its own timeouts, 167 * so we're not putting this timeout around the CCB execution time. The 168 * SIM should timeout and let us know if it has an issue. 169 */ 170 #define CTLFE_DMA_TIMEOUT 60 171 172 /* 173 * Turn this on to enable extra debugging prints. 174 */ 175 #if 0 176 #define CTLFE_DEBUG 177 #endif 178 179 /* 180 * Use randomly assigned WWNN/WWPN values. This is to work around an issue 181 * in the FreeBSD initiator that makes it unable to rescan the target if 182 * the target gets rebooted and the WWNN/WWPN stay the same. 183 */ 184 #if 0 185 #define RANDOM_WWNN 186 #endif 187 188 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, dma_enabled, CTLFLAG_RW, 189 &ctlfe_dma_enabled, 0, "DMA enabled"); 190 MALLOC_DEFINE(M_CTLFE, "CAM CTL FE", "CAM CTL FE interface"); 191 192 #define ccb_type ppriv_field0 193 /* This is only used in the ATIO */ 194 #define io_ptr ppriv_ptr1 195 196 /* This is only used in the CTIO */ 197 #define ccb_atio ppriv_ptr1 198 199 int ctlfeinitialize(void); 200 void ctlfeshutdown(void); 201 static periph_init_t ctlfeinit; 202 static void ctlfeasync(void *callback_arg, uint32_t code, 203 struct cam_path *path, void *arg); 204 static periph_ctor_t ctlferegister; 205 static periph_oninv_t ctlfeoninvalidate; 206 static periph_dtor_t ctlfecleanup; 207 static periph_start_t ctlfestart; 208 static void ctlfedone(struct cam_periph *periph, 209 union ccb *done_ccb); 210 211 static void ctlfe_onoffline(void *arg, int online); 212 static void ctlfe_online(void *arg); 213 static void ctlfe_offline(void *arg); 214 static int ctlfe_targ_enable(void *arg, struct ctl_id targ_id); 215 static int ctlfe_targ_disable(void *arg, struct ctl_id targ_id); 216 static int ctlfe_lun_enable(void *arg, struct ctl_id targ_id, 217 int lun_id); 218 static int ctlfe_lun_disable(void *arg, struct ctl_id targ_id, 219 int lun_id); 220 static void ctlfe_dump_sim(struct cam_sim *sim); 221 static void ctlfe_dump_queue(struct ctlfe_lun_softc *softc); 222 static void ctlfe_dma_timeout(void *arg); 223 static void ctlfe_datamove_done(union ctl_io *io); 224 static void ctlfe_dump(void); 225 226 static struct periph_driver ctlfe_driver = 227 { 228 ctlfeinit, "ctl", 229 TAILQ_HEAD_INITIALIZER(ctlfe_driver.units), /*generation*/ 0 230 }; 231 232 static int ctlfe_module_event_handler(module_t, int /*modeventtype_t*/, void *); 233 234 /* 235 * We're not using PERIPHDRIVER_DECLARE(), because it runs at SI_SUB_DRIVERS, 236 * and that happens before CTL gets initialised. 237 */ 238 static moduledata_t ctlfe_moduledata = { 239 "ctlfe", 240 ctlfe_module_event_handler, 241 NULL 242 }; 243 244 DECLARE_MODULE(ctlfe, ctlfe_moduledata, SI_SUB_CONFIGURE, SI_ORDER_FOURTH); 245 MODULE_VERSION(ctlfe, 1); 246 MODULE_DEPEND(ctlfe, ctl, 1, 1, 1); 247 MODULE_DEPEND(ctlfe, cam, 1, 1, 1); 248 249 extern struct ctl_softc *control_softc; 250 251 void 252 ctlfeshutdown(void) 253 { 254 return; 255 } 256 257 void 258 ctlfeinit(void) 259 { 260 cam_status status; 261 262 STAILQ_INIT(&ctlfe_softc_list); 263 264 mtx_init(&ctlfe_list_mtx, ctlfe_mtx_desc, NULL, MTX_DEF); 265 266 KASSERT(control_softc != NULL, ("CTL is not initialized!")); 267 268 status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED | 269 AC_CONTRACT, ctlfeasync, NULL, NULL); 270 271 if (status != CAM_REQ_CMP) { 272 printf("ctl: Failed to attach async callback due to CAM " 273 "status 0x%x!\n", status); 274 } 275 } 276 277 static int 278 ctlfe_module_event_handler(module_t mod, int what, void *arg) 279 { 280 281 switch (what) { 282 case MOD_LOAD: 283 periphdriver_register(&ctlfe_driver); 284 return (0); 285 case MOD_UNLOAD: 286 return (EBUSY); 287 default: 288 return (EOPNOTSUPP); 289 } 290 } 291 292 static void 293 ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) 294 { 295 296 #ifdef CTLFEDEBUG 297 printf("%s: entered\n", __func__); 298 #endif 299 300 /* 301 * When a new path gets registered, and it is capable of target 302 * mode, go ahead and attach. Later on, we may need to be more 303 * selective, but for now this will be sufficient. 304 */ 305 switch (code) { 306 case AC_PATH_REGISTERED: { 307 struct ctl_frontend *fe; 308 struct ctlfe_softc *bus_softc; 309 struct ccb_pathinq *cpi; 310 int retval; 311 312 cpi = (struct ccb_pathinq *)arg; 313 314 /* Don't attach if it doesn't support target mode */ 315 if ((cpi->target_sprt & PIT_PROCESSOR) == 0) { 316 #ifdef CTLFEDEBUG 317 printf("%s: SIM %s%d doesn't support target mode\n", 318 __func__, cpi->dev_name, cpi->unit_number); 319 #endif 320 break; 321 } 322 323 #ifdef CTLFE_INIT_ENABLE 324 if (ctlfe_num_targets >= ctlfe_max_targets) { 325 union ccb *ccb; 326 327 ccb = (union ccb *)malloc(sizeof(*ccb), M_TEMP, 328 M_NOWAIT | M_ZERO); 329 if (ccb == NULL) { 330 printf("%s: unable to malloc CCB!\n", __func__); 331 return; 332 } 333 xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE); 334 335 ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; 336 ccb->knob.xport_specific.valid = KNOB_VALID_ROLE; 337 ccb->knob.xport_specific.fc.role = KNOB_ROLE_INITIATOR; 338 339 xpt_action(ccb); 340 341 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != 342 CAM_REQ_CMP) { 343 printf("%s: SIM %s%d (path id %d) initiator " 344 "enable failed with status %#x\n", 345 __func__, cpi->dev_name, 346 cpi->unit_number, cpi->ccb_h.path_id, 347 ccb->ccb_h.status); 348 } else { 349 printf("%s: SIM %s%d (path id %d) initiator " 350 "enable succeeded\n", 351 __func__, cpi->dev_name, 352 cpi->unit_number, cpi->ccb_h.path_id); 353 } 354 355 free(ccb, M_TEMP); 356 357 break; 358 } else { 359 ctlfe_num_targets++; 360 } 361 362 printf("%s: ctlfe_num_targets = %d\n", __func__, 363 ctlfe_num_targets); 364 #endif /* CTLFE_INIT_ENABLE */ 365 366 /* 367 * We're in an interrupt context here, so we have to 368 * use M_NOWAIT. Of course this means trouble if we 369 * can't allocate memory. 370 */ 371 bus_softc = malloc(sizeof(*bus_softc), M_CTLFE, 372 M_NOWAIT | M_ZERO); 373 if (bus_softc == NULL) { 374 printf("%s: unable to malloc %zd bytes for softc\n", 375 __func__, sizeof(*bus_softc)); 376 return; 377 } 378 379 bus_softc->path_id = cpi->ccb_h.path_id; 380 bus_softc->sim = xpt_path_sim(path); 381 if (cpi->maxio != 0) 382 bus_softc->maxio = cpi->maxio; 383 else 384 bus_softc->maxio = DFLTPHYS; 385 mtx_init(&bus_softc->lun_softc_mtx, "LUN softc mtx", NULL, 386 MTX_DEF); 387 STAILQ_INIT(&bus_softc->lun_softc_list); 388 389 fe = &bus_softc->fe; 390 391 /* 392 * XXX KDM should we be more accurate here ? 393 */ 394 if (cpi->transport == XPORT_FC) 395 fe->port_type = CTL_PORT_FC; 396 else 397 fe->port_type = CTL_PORT_SCSI; 398 399 /* XXX KDM what should the real number be here? */ 400 fe->num_requested_ctl_io = 4096; 401 snprintf(bus_softc->port_name, sizeof(bus_softc->port_name), 402 "%s%d", cpi->dev_name, cpi->unit_number); 403 /* 404 * XXX KDM it would be nice to allocate storage in the 405 * frontend structure itself. 406 */ 407 fe->port_name = bus_softc->port_name; 408 fe->physical_port = cpi->unit_number; 409 fe->virtual_port = cpi->bus_id; 410 fe->port_online = ctlfe_online; 411 fe->port_offline = ctlfe_offline; 412 fe->onoff_arg = bus_softc; 413 fe->targ_enable = ctlfe_targ_enable; 414 fe->targ_disable = ctlfe_targ_disable; 415 fe->lun_enable = ctlfe_lun_enable; 416 fe->lun_disable = ctlfe_lun_disable; 417 fe->targ_lun_arg = bus_softc; 418 fe->fe_datamove = ctlfe_datamove_done; 419 fe->fe_done = ctlfe_datamove_done; 420 fe->fe_dump = ctlfe_dump; 421 /* 422 * XXX KDM the path inquiry doesn't give us the maximum 423 * number of targets supported. 424 */ 425 fe->max_targets = cpi->max_target; 426 fe->max_target_id = cpi->max_target; 427 428 /* 429 * XXX KDM need to figure out whether we're the master or 430 * slave. 431 */ 432 #ifdef CTLFEDEBUG 433 printf("%s: calling ctl_frontend_register() for %s%d\n", 434 __func__, cpi->dev_name, cpi->unit_number); 435 #endif 436 retval = ctl_frontend_register(fe, /*master_SC*/ 1); 437 if (retval != 0) { 438 printf("%s: ctl_frontend_register() failed with " 439 "error %d!\n", __func__, retval); 440 mtx_destroy(&bus_softc->lun_softc_mtx); 441 free(bus_softc, M_CTLFE); 442 break; 443 } else { 444 mtx_lock(&ctlfe_list_mtx); 445 STAILQ_INSERT_TAIL(&ctlfe_softc_list, bus_softc, links); 446 mtx_unlock(&ctlfe_list_mtx); 447 } 448 449 break; 450 } 451 case AC_PATH_DEREGISTERED: { 452 struct ctlfe_softc *softc = NULL; 453 454 mtx_lock(&ctlfe_list_mtx); 455 STAILQ_FOREACH(softc, &ctlfe_softc_list, links) { 456 if (softc->path_id == xpt_path_path_id(path)) { 457 STAILQ_REMOVE(&ctlfe_softc_list, softc, 458 ctlfe_softc, links); 459 break; 460 } 461 } 462 mtx_unlock(&ctlfe_list_mtx); 463 464 if (softc != NULL) { 465 /* 466 * XXX KDM are we certain at this point that there 467 * are no outstanding commands for this frontend? 468 */ 469 ctl_frontend_deregister(&softc->fe); 470 mtx_destroy(&softc->lun_softc_mtx); 471 free(softc, M_CTLFE); 472 } 473 break; 474 } 475 case AC_CONTRACT: { 476 struct ac_contract *ac; 477 478 ac = (struct ac_contract *)arg; 479 480 switch (ac->contract_number) { 481 case AC_CONTRACT_DEV_CHG: { 482 struct ac_device_changed *dev_chg; 483 struct ctlfe_softc *softc; 484 int retval, found; 485 486 dev_chg = (struct ac_device_changed *)ac->contract_data; 487 488 printf("%s: WWPN %#jx port 0x%06x path %u target %u %s\n", 489 __func__, dev_chg->wwpn, dev_chg->port, 490 xpt_path_path_id(path), dev_chg->target, 491 (dev_chg->arrived == 0) ? "left" : "arrived"); 492 493 found = 0; 494 495 mtx_lock(&ctlfe_list_mtx); 496 STAILQ_FOREACH(softc, &ctlfe_softc_list, links) { 497 if (softc->path_id == xpt_path_path_id(path)) { 498 found = 1; 499 break; 500 } 501 } 502 mtx_unlock(&ctlfe_list_mtx); 503 504 if (found == 0) { 505 printf("%s: CTL port for CAM path %u not " 506 "found!\n", __func__, 507 xpt_path_path_id(path)); 508 break; 509 } 510 if (dev_chg->arrived != 0) { 511 retval = ctl_add_initiator(dev_chg->wwpn, 512 softc->fe.targ_port, dev_chg->target); 513 } else { 514 retval = ctl_remove_initiator( 515 softc->fe.targ_port, dev_chg->target); 516 } 517 518 if (retval != 0) { 519 printf("%s: could not %s port %d iid %u " 520 "WWPN %#jx!\n", __func__, 521 (dev_chg->arrived != 0) ? "add" : 522 "remove", softc->fe.targ_port, 523 dev_chg->target, 524 (uintmax_t)dev_chg->wwpn); 525 } 526 break; 527 } 528 default: 529 printf("%s: unsupported contract number %ju\n", 530 __func__, (uintmax_t)ac->contract_number); 531 break; 532 } 533 break; 534 } 535 default: 536 break; 537 } 538 } 539 540 static cam_status 541 ctlferegister(struct cam_periph *periph, void *arg) 542 { 543 struct ctlfe_softc *bus_softc; 544 struct ctlfe_lun_softc *softc; 545 union ccb en_lun_ccb; 546 cam_status status; 547 int i; 548 549 softc = (struct ctlfe_lun_softc *)arg; 550 bus_softc = softc->parent_softc; 551 552 TAILQ_INIT(&softc->work_queue); 553 softc->periph = periph; 554 555 callout_init_mtx(&softc->dma_callout, xpt_path_mtx(periph->path), 556 /*flags*/ 0); 557 periph->softc = softc; 558 559 xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); 560 en_lun_ccb.ccb_h.func_code = XPT_EN_LUN; 561 en_lun_ccb.cel.grp6_len = 0; 562 en_lun_ccb.cel.grp7_len = 0; 563 en_lun_ccb.cel.enable = 1; 564 xpt_action(&en_lun_ccb); 565 status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK); 566 if (status != CAM_REQ_CMP) { 567 xpt_print(periph->path, "%s: Enable LUN failed, status 0x%x\n", 568 __func__, en_lun_ccb.ccb_h.status); 569 return (status); 570 } 571 572 status = CAM_REQ_CMP; 573 574 for (i = 0; i < CTLFE_ATIO_PER_LUN; i++) { 575 union ccb *new_ccb; 576 577 new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, 578 M_ZERO|M_NOWAIT); 579 if (new_ccb == NULL) { 580 status = CAM_RESRC_UNAVAIL; 581 break; 582 } 583 xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1); 584 new_ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO; 585 new_ccb->ccb_h.cbfcnp = ctlfedone; 586 new_ccb->ccb_h.flags |= CAM_UNLOCKED; 587 xpt_action(new_ccb); 588 softc->atios_sent++; 589 status = new_ccb->ccb_h.status; 590 if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 591 free(new_ccb, M_CTLFE); 592 break; 593 } 594 } 595 596 status = cam_periph_acquire(periph); 597 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 598 xpt_print(periph->path, "%s: could not acquire reference " 599 "count, status = %#x\n", __func__, status); 600 return (status); 601 } 602 603 if (i == 0) { 604 xpt_print(periph->path, "%s: could not allocate ATIO CCBs, " 605 "status 0x%x\n", __func__, status); 606 return (CAM_REQ_CMP_ERR); 607 } 608 609 for (i = 0; i < CTLFE_IN_PER_LUN; i++) { 610 union ccb *new_ccb; 611 612 new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, 613 M_ZERO|M_NOWAIT); 614 if (new_ccb == NULL) { 615 status = CAM_RESRC_UNAVAIL; 616 break; 617 } 618 619 xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1); 620 new_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; 621 new_ccb->ccb_h.cbfcnp = ctlfedone; 622 new_ccb->ccb_h.flags |= CAM_UNLOCKED; 623 xpt_action(new_ccb); 624 softc->inots_sent++; 625 status = new_ccb->ccb_h.status; 626 if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 627 /* 628 * Note that we don't free the CCB here. If the 629 * status is not CAM_REQ_INPROG, then we're 630 * probably talking to a SIM that says it is 631 * target-capable but doesn't support the 632 * XPT_IMMEDIATE_NOTIFY CCB. i.e. it supports the 633 * older API. In that case, it'll call xpt_done() 634 * on the CCB, and we need to free it in our done 635 * routine as a result. 636 */ 637 break; 638 } 639 } 640 if ((i == 0) 641 || (status != CAM_REQ_INPROG)) { 642 xpt_print(periph->path, "%s: could not allocate immediate " 643 "notify CCBs, status 0x%x\n", __func__, status); 644 return (CAM_REQ_CMP_ERR); 645 } 646 return (CAM_REQ_CMP); 647 } 648 649 static void 650 ctlfeoninvalidate(struct cam_periph *periph) 651 { 652 union ccb en_lun_ccb; 653 cam_status status; 654 struct ctlfe_softc *bus_softc; 655 struct ctlfe_lun_softc *softc; 656 657 softc = (struct ctlfe_lun_softc *)periph->softc; 658 659 xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); 660 en_lun_ccb.ccb_h.func_code = XPT_EN_LUN; 661 en_lun_ccb.cel.grp6_len = 0; 662 en_lun_ccb.cel.grp7_len = 0; 663 en_lun_ccb.cel.enable = 0; 664 xpt_action(&en_lun_ccb); 665 status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK); 666 if (status != CAM_REQ_CMP) { 667 xpt_print(periph->path, "%s: Disable LUN failed, status 0x%x\n", 668 __func__, en_lun_ccb.ccb_h.status); 669 /* 670 * XXX KDM what do we do now? 671 */ 672 } 673 xpt_print(periph->path, "LUN removed, %ju ATIOs outstanding, %ju " 674 "INOTs outstanding, %d refs\n", softc->atios_sent - 675 softc->atios_returned, softc->inots_sent - 676 softc->inots_returned, periph->refcount); 677 678 bus_softc = softc->parent_softc; 679 mtx_lock(&bus_softc->lun_softc_mtx); 680 STAILQ_REMOVE(&bus_softc->lun_softc_list, softc, ctlfe_lun_softc, links); 681 mtx_unlock(&bus_softc->lun_softc_mtx); 682 } 683 684 static void 685 ctlfecleanup(struct cam_periph *periph) 686 { 687 struct ctlfe_lun_softc *softc; 688 689 xpt_print(periph->path, "%s: Called\n", __func__); 690 691 softc = (struct ctlfe_lun_softc *)periph->softc; 692 693 /* 694 * XXX KDM is there anything else that needs to be done here? 695 */ 696 697 callout_stop(&softc->dma_callout); 698 699 free(softc, M_CTLFE); 700 } 701 702 static void 703 ctlfedata(struct ctlfe_lun_softc *softc, union ctl_io *io, 704 ccb_flags *flags, uint8_t **data_ptr, uint32_t *dxfer_len, 705 u_int16_t *sglist_cnt) 706 { 707 struct ctlfe_softc *bus_softc; 708 struct ctlfe_lun_cmd_info *cmd_info; 709 struct ctl_sg_entry *ctl_sglist; 710 bus_dma_segment_t *cam_sglist; 711 size_t off; 712 int i, idx; 713 714 cmd_info = (struct ctlfe_lun_cmd_info *)io->io_hdr.port_priv; 715 bus_softc = softc->parent_softc; 716 717 /* 718 * Set the direction, relative to the initiator. 719 */ 720 *flags &= ~CAM_DIR_MASK; 721 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) 722 *flags |= CAM_DIR_IN; 723 else 724 *flags |= CAM_DIR_OUT; 725 726 *flags &= ~CAM_DATA_MASK; 727 idx = cmd_info->cur_transfer_index; 728 off = cmd_info->cur_transfer_off; 729 cmd_info->flags &= ~CTLFE_CMD_PIECEWISE; 730 if (io->scsiio.kern_sg_entries == 0) { 731 /* No S/G list. */ 732 *data_ptr = io->scsiio.kern_data_ptr + off; 733 if (io->scsiio.kern_data_len - off <= bus_softc->maxio) { 734 *dxfer_len = io->scsiio.kern_data_len - off; 735 } else { 736 *dxfer_len = bus_softc->maxio; 737 cmd_info->cur_transfer_index = -1; 738 cmd_info->cur_transfer_off = bus_softc->maxio; 739 cmd_info->flags |= CTLFE_CMD_PIECEWISE; 740 } 741 *sglist_cnt = 0; 742 743 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) 744 *flags |= CAM_DATA_PADDR; 745 else 746 *flags |= CAM_DATA_VADDR; 747 } else { 748 /* S/G list with physical or virtual pointers. */ 749 ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 750 cam_sglist = cmd_info->cam_sglist; 751 *dxfer_len = 0; 752 for (i = 0; i < io->scsiio.kern_sg_entries - idx; i++) { 753 cam_sglist[i].ds_addr = (bus_addr_t)ctl_sglist[i + idx].addr + off; 754 if (ctl_sglist[i + idx].len - off <= bus_softc->maxio - *dxfer_len) { 755 cam_sglist[i].ds_len = ctl_sglist[idx + i].len - off; 756 *dxfer_len += cam_sglist[i].ds_len; 757 } else { 758 cam_sglist[i].ds_len = bus_softc->maxio - *dxfer_len; 759 cmd_info->cur_transfer_index = idx + i; 760 cmd_info->cur_transfer_off = cam_sglist[i].ds_len + off; 761 cmd_info->flags |= CTLFE_CMD_PIECEWISE; 762 *dxfer_len += cam_sglist[i].ds_len; 763 if (ctl_sglist[i].len != 0) 764 i++; 765 break; 766 } 767 if (i == (CTLFE_MAX_SEGS - 1) && 768 idx + i < (io->scsiio.kern_sg_entries - 1)) { 769 cmd_info->cur_transfer_index = idx + i + 1; 770 cmd_info->cur_transfer_off = 0; 771 cmd_info->flags |= CTLFE_CMD_PIECEWISE; 772 i++; 773 break; 774 } 775 off = 0; 776 } 777 *sglist_cnt = i; 778 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) 779 *flags |= CAM_DATA_SG_PADDR; 780 else 781 *flags |= CAM_DATA_SG; 782 *data_ptr = (uint8_t *)cam_sglist; 783 } 784 } 785 786 static void 787 ctlfestart(struct cam_periph *periph, union ccb *start_ccb) 788 { 789 struct ctlfe_lun_softc *softc; 790 struct ccb_hdr *ccb_h; 791 792 softc = (struct ctlfe_lun_softc *)periph->softc; 793 794 softc->ccbs_alloced++; 795 796 start_ccb->ccb_h.ccb_type = CTLFE_CCB_DEFAULT; 797 798 ccb_h = TAILQ_FIRST(&softc->work_queue); 799 if (ccb_h == NULL) { 800 softc->ccbs_freed++; 801 xpt_release_ccb(start_ccb); 802 } else { 803 struct ccb_accept_tio *atio; 804 struct ccb_scsiio *csio; 805 uint8_t *data_ptr; 806 uint32_t dxfer_len; 807 ccb_flags flags; 808 union ctl_io *io; 809 uint8_t scsi_status; 810 811 /* Take the ATIO off the work queue */ 812 TAILQ_REMOVE(&softc->work_queue, ccb_h, periph_links.tqe); 813 atio = (struct ccb_accept_tio *)ccb_h; 814 io = (union ctl_io *)ccb_h->io_ptr; 815 csio = &start_ccb->csio; 816 817 flags = atio->ccb_h.flags & 818 (CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK); 819 820 if ((io == NULL) 821 || (io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) { 822 /* 823 * We're done, send status back. 824 */ 825 flags |= CAM_SEND_STATUS; 826 if (io == NULL) { 827 scsi_status = SCSI_STATUS_BUSY; 828 csio->sense_len = 0; 829 } else if ((io->io_hdr.status & CTL_STATUS_MASK) == 830 CTL_CMD_ABORTED) { 831 io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED; 832 833 /* 834 * If this command was aborted, we don't 835 * need to send status back to the SIM. 836 * Just free the CTIO and ctl_io, and 837 * recycle the ATIO back to the SIM. 838 */ 839 xpt_print(periph->path, "%s: aborted " 840 "command 0x%04x discarded\n", 841 __func__, io->scsiio.tag_num); 842 ctl_free_io(io); 843 /* 844 * For a wildcard attachment, commands can 845 * come in with a specific target/lun. Reset 846 * the target and LUN fields back to the 847 * wildcard values before we send them back 848 * down to the SIM. The SIM has a wildcard 849 * LUN enabled, not whatever target/lun 850 * these happened to be. 851 */ 852 if (softc->flags & CTLFE_LUN_WILDCARD) { 853 atio->ccb_h.target_id = 854 CAM_TARGET_WILDCARD; 855 atio->ccb_h.target_lun = 856 CAM_LUN_WILDCARD; 857 } 858 859 if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) { 860 cam_release_devq(periph->path, 861 /*relsim_flags*/0, 862 /*reduction*/0, 863 /*timeout*/0, 864 /*getcount_only*/0); 865 atio->ccb_h.status &= ~CAM_DEV_QFRZN; 866 } 867 868 ccb_h = TAILQ_FIRST(&softc->work_queue); 869 870 if (atio->ccb_h.func_code != 871 XPT_ACCEPT_TARGET_IO) { 872 xpt_print(periph->path, "%s: func_code " 873 "is %#x\n", __func__, 874 atio->ccb_h.func_code); 875 } 876 start_ccb->ccb_h.func_code = XPT_ABORT; 877 start_ccb->cab.abort_ccb = (union ccb *)atio; 878 879 /* Tell the SIM that we've aborted this ATIO */ 880 xpt_action(start_ccb); 881 softc->ccbs_freed++; 882 xpt_release_ccb(start_ccb); 883 884 /* 885 * Send the ATIO back down to the SIM. 886 */ 887 xpt_action((union ccb *)atio); 888 softc->atios_sent++; 889 890 /* 891 * If we still have work to do, ask for 892 * another CCB. Otherwise, deactivate our 893 * callout. 894 */ 895 if (ccb_h != NULL) 896 xpt_schedule(periph, /*priority*/ 1); 897 else 898 callout_stop(&softc->dma_callout); 899 900 return; 901 } else { 902 io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED; 903 scsi_status = io->scsiio.scsi_status; 904 csio->sense_len = io->scsiio.sense_len; 905 } 906 data_ptr = NULL; 907 dxfer_len = 0; 908 if (io == NULL) { 909 printf("%s: tag %04x io is NULL\n", __func__, 910 atio->tag_id); 911 } else { 912 #ifdef CTLFEDEBUG 913 printf("%s: tag %04x status %x\n", __func__, 914 atio->tag_id, io->io_hdr.status); 915 #endif 916 } 917 csio->sglist_cnt = 0; 918 if (csio->sense_len != 0) { 919 csio->sense_data = io->scsiio.sense_data; 920 flags |= CAM_SEND_SENSE; 921 } else if (scsi_status == SCSI_STATUS_CHECK_COND) { 922 xpt_print(periph->path, "%s: check condition " 923 "with no sense\n", __func__); 924 } 925 } else { 926 struct ctlfe_lun_cmd_info *cmd_info; 927 928 /* 929 * Datamove call, we need to setup the S/G list. 930 */ 931 932 cmd_info = (struct ctlfe_lun_cmd_info *) 933 io->io_hdr.port_priv; 934 935 KASSERT(sizeof(*cmd_info) < CTL_PORT_PRIV_SIZE, 936 ("%s: sizeof(struct ctlfe_lun_cmd_info) %zd < " 937 "CTL_PORT_PRIV_SIZE %d", __func__, 938 sizeof(*cmd_info), CTL_PORT_PRIV_SIZE)); 939 io->io_hdr.flags &= ~CTL_FLAG_DMA_QUEUED; 940 941 /* 942 * Need to zero this, in case it has been used for 943 * a previous datamove for this particular I/O. 944 */ 945 bzero(cmd_info, sizeof(*cmd_info)); 946 scsi_status = 0; 947 948 csio->cdb_len = atio->cdb_len; 949 950 ctlfedata(softc, io, &flags, &data_ptr, &dxfer_len, 951 &csio->sglist_cnt); 952 953 io->scsiio.ext_data_filled += dxfer_len; 954 955 if (io->scsiio.ext_data_filled > 956 io->scsiio.kern_total_len) { 957 xpt_print(periph->path, "%s: tag 0x%04x " 958 "fill len %u > total %u\n", 959 __func__, io->scsiio.tag_num, 960 io->scsiio.ext_data_filled, 961 io->scsiio.kern_total_len); 962 } 963 } 964 965 #ifdef CTLFEDEBUG 966 printf("%s: %s: tag %04x flags %x ptr %p len %u\n", __func__, 967 (flags & CAM_SEND_STATUS) ? "done" : "datamove", 968 atio->tag_id, flags, data_ptr, dxfer_len); 969 #endif 970 971 /* 972 * Valid combinations: 973 * - CAM_SEND_STATUS, CAM_DATA_SG = 0, dxfer_len = 0, 974 * sglist_cnt = 0 975 * - CAM_SEND_STATUS = 0, CAM_DATA_SG = 0, dxfer_len != 0, 976 * sglist_cnt = 0 977 * - CAM_SEND_STATUS = 0, CAM_DATA_SG, dxfer_len != 0, 978 * sglist_cnt != 0 979 */ 980 #ifdef CTLFEDEBUG 981 if (((flags & CAM_SEND_STATUS) 982 && (((flags & CAM_DATA_SG) != 0) 983 || (dxfer_len != 0) 984 || (csio->sglist_cnt != 0))) 985 || (((flags & CAM_SEND_STATUS) == 0) 986 && (dxfer_len == 0)) 987 || ((flags & CAM_DATA_SG) 988 && (csio->sglist_cnt == 0)) 989 || (((flags & CAM_DATA_SG) == 0) 990 && (csio->sglist_cnt != 0))) { 991 printf("%s: tag %04x cdb %02x flags %#x dxfer_len " 992 "%d sg %u\n", __func__, atio->tag_id, 993 atio->cdb_io.cdb_bytes[0], flags, dxfer_len, 994 csio->sglist_cnt); 995 if (io != NULL) { 996 printf("%s: tag %04x io status %#x\n", __func__, 997 atio->tag_id, io->io_hdr.status); 998 } else { 999 printf("%s: tag %04x no associated io\n", 1000 __func__, atio->tag_id); 1001 } 1002 } 1003 #endif 1004 cam_fill_ctio(csio, 1005 /*retries*/ 2, 1006 ctlfedone, 1007 flags, 1008 (flags & CAM_TAG_ACTION_VALID) ? 1009 MSG_SIMPLE_Q_TAG : 0, 1010 atio->tag_id, 1011 atio->init_id, 1012 scsi_status, 1013 /*data_ptr*/ data_ptr, 1014 /*dxfer_len*/ dxfer_len, 1015 /*timeout*/ 5 * 1000); 1016 start_ccb->ccb_h.flags |= CAM_UNLOCKED; 1017 start_ccb->ccb_h.ccb_atio = atio; 1018 if (((flags & CAM_SEND_STATUS) == 0) 1019 && (io != NULL)) 1020 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 1021 1022 softc->ctios_sent++; 1023 1024 cam_periph_unlock(periph); 1025 xpt_action(start_ccb); 1026 cam_periph_lock(periph); 1027 1028 if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) { 1029 cam_release_devq(periph->path, 1030 /*relsim_flags*/0, 1031 /*reduction*/0, 1032 /*timeout*/0, 1033 /*getcount_only*/0); 1034 atio->ccb_h.status &= ~CAM_DEV_QFRZN; 1035 } 1036 1037 ccb_h = TAILQ_FIRST(&softc->work_queue); 1038 } 1039 /* 1040 * If we still have work to do, ask for another CCB. Otherwise, 1041 * deactivate our callout. 1042 */ 1043 if (ccb_h != NULL) 1044 xpt_schedule(periph, /*priority*/ 1); 1045 else 1046 callout_stop(&softc->dma_callout); 1047 } 1048 1049 static void 1050 ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb) 1051 { 1052 struct ctlfe_lun_softc *softc; 1053 1054 softc = (struct ctlfe_lun_softc *)periph->softc; 1055 1056 switch (ccb->ccb_h.func_code) { 1057 case XPT_ACCEPT_TARGET_IO: 1058 softc->atios_returned++; 1059 break; 1060 case XPT_IMMEDIATE_NOTIFY: 1061 case XPT_NOTIFY_ACKNOWLEDGE: 1062 softc->inots_returned++; 1063 break; 1064 default: 1065 break; 1066 } 1067 1068 free(ccb, M_CTLFE); 1069 1070 KASSERT(softc->atios_returned <= softc->atios_sent, ("%s: " 1071 "atios_returned %ju > atios_sent %ju", __func__, 1072 softc->atios_returned, softc->atios_sent)); 1073 KASSERT(softc->inots_returned <= softc->inots_sent, ("%s: " 1074 "inots_returned %ju > inots_sent %ju", __func__, 1075 softc->inots_returned, softc->inots_sent)); 1076 1077 /* 1078 * If we have received all of our CCBs, we can release our 1079 * reference on the peripheral driver. It will probably go away 1080 * now. 1081 */ 1082 if ((softc->atios_returned == softc->atios_sent) 1083 && (softc->inots_returned == softc->inots_sent)) { 1084 cam_periph_release_locked(periph); 1085 } 1086 } 1087 1088 static int 1089 ctlfe_adjust_cdb(struct ccb_accept_tio *atio, uint32_t offset) 1090 { 1091 uint64_t lba; 1092 uint32_t num_blocks, nbc; 1093 uint8_t *cmdbyt = (atio->ccb_h.flags & CAM_CDB_POINTER)? 1094 atio->cdb_io.cdb_ptr : atio->cdb_io.cdb_bytes; 1095 1096 nbc = offset >> 9; /* ASSUMING 512 BYTE BLOCKS */ 1097 1098 switch (cmdbyt[0]) { 1099 case READ_6: 1100 case WRITE_6: 1101 { 1102 struct scsi_rw_6 *cdb = (struct scsi_rw_6 *)cmdbyt; 1103 lba = scsi_3btoul(cdb->addr); 1104 lba &= 0x1fffff; 1105 num_blocks = cdb->length; 1106 if (num_blocks == 0) 1107 num_blocks = 256; 1108 lba += nbc; 1109 num_blocks -= nbc; 1110 scsi_ulto3b(lba, cdb->addr); 1111 cdb->length = num_blocks; 1112 break; 1113 } 1114 case READ_10: 1115 case WRITE_10: 1116 { 1117 struct scsi_rw_10 *cdb = (struct scsi_rw_10 *)cmdbyt; 1118 lba = scsi_4btoul(cdb->addr); 1119 num_blocks = scsi_2btoul(cdb->length); 1120 lba += nbc; 1121 num_blocks -= nbc; 1122 scsi_ulto4b(lba, cdb->addr); 1123 scsi_ulto2b(num_blocks, cdb->length); 1124 break; 1125 } 1126 case READ_12: 1127 case WRITE_12: 1128 { 1129 struct scsi_rw_12 *cdb = (struct scsi_rw_12 *)cmdbyt; 1130 lba = scsi_4btoul(cdb->addr); 1131 num_blocks = scsi_4btoul(cdb->length); 1132 lba += nbc; 1133 num_blocks -= nbc; 1134 scsi_ulto4b(lba, cdb->addr); 1135 scsi_ulto4b(num_blocks, cdb->length); 1136 break; 1137 } 1138 case READ_16: 1139 case WRITE_16: 1140 { 1141 struct scsi_rw_16 *cdb = (struct scsi_rw_16 *)cmdbyt; 1142 lba = scsi_8btou64(cdb->addr); 1143 num_blocks = scsi_4btoul(cdb->length); 1144 lba += nbc; 1145 num_blocks -= nbc; 1146 scsi_u64to8b(lba, cdb->addr); 1147 scsi_ulto4b(num_blocks, cdb->length); 1148 break; 1149 } 1150 default: 1151 return -1; 1152 } 1153 return (0); 1154 } 1155 1156 static void 1157 ctlfedone(struct cam_periph *periph, union ccb *done_ccb) 1158 { 1159 struct ctlfe_lun_softc *softc; 1160 struct ctlfe_softc *bus_softc; 1161 struct ccb_accept_tio *atio = NULL; 1162 union ctl_io *io = NULL; 1163 struct mtx *mtx; 1164 1165 KASSERT((done_ccb->ccb_h.flags & CAM_UNLOCKED) != 0, 1166 ("CCB in ctlfedone() without CAM_UNLOCKED flag")); 1167 #ifdef CTLFE_DEBUG 1168 printf("%s: entered, func_code = %#x, type = %#lx\n", __func__, 1169 done_ccb->ccb_h.func_code, done_ccb->ccb_h.ccb_type); 1170 #endif 1171 1172 softc = (struct ctlfe_lun_softc *)periph->softc; 1173 bus_softc = softc->parent_softc; 1174 mtx = cam_periph_mtx(periph); 1175 mtx_lock(mtx); 1176 1177 /* 1178 * If the peripheral is invalid, ATIOs and immediate notify CCBs 1179 * need to be freed. Most of the ATIOs and INOTs that come back 1180 * will be CCBs that are being returned from the SIM as a result of 1181 * our disabling the LUN. 1182 * 1183 * Other CCB types are handled in their respective cases below. 1184 */ 1185 if (periph->flags & CAM_PERIPH_INVALID) { 1186 switch (done_ccb->ccb_h.func_code) { 1187 case XPT_ACCEPT_TARGET_IO: 1188 case XPT_IMMEDIATE_NOTIFY: 1189 case XPT_NOTIFY_ACKNOWLEDGE: 1190 ctlfe_free_ccb(periph, done_ccb); 1191 goto out; 1192 default: 1193 break; 1194 } 1195 1196 } 1197 switch (done_ccb->ccb_h.func_code) { 1198 case XPT_ACCEPT_TARGET_IO: { 1199 1200 atio = &done_ccb->atio; 1201 1202 softc->atios_returned++; 1203 1204 resubmit: 1205 /* 1206 * Allocate a ctl_io, pass it to CTL, and wait for the 1207 * datamove or done. 1208 */ 1209 io = ctl_alloc_io(bus_softc->fe.ctl_pool_ref); 1210 if (io == NULL) { 1211 atio->ccb_h.flags &= ~CAM_DIR_MASK; 1212 atio->ccb_h.flags |= CAM_DIR_NONE; 1213 1214 printf("%s: ctl_alloc_io failed!\n", __func__); 1215 1216 /* 1217 * XXX KDM need to set SCSI_STATUS_BUSY, but there 1218 * is no field in the ATIO structure to do that, 1219 * and we aren't able to allocate a ctl_io here. 1220 * What to do? 1221 */ 1222 atio->sense_len = 0; 1223 done_ccb->ccb_h.io_ptr = NULL; 1224 TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h, 1225 periph_links.tqe); 1226 xpt_schedule(periph, /*priority*/ 1); 1227 break; 1228 } 1229 mtx_unlock(mtx); 1230 ctl_zero_io(io); 1231 1232 /* Save pointers on both sides */ 1233 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = done_ccb; 1234 done_ccb->ccb_h.io_ptr = io; 1235 1236 /* 1237 * Only SCSI I/O comes down this path, resets, etc. come 1238 * down the immediate notify path below. 1239 */ 1240 io->io_hdr.io_type = CTL_IO_SCSI; 1241 io->io_hdr.nexus.initid.id = atio->init_id; 1242 io->io_hdr.nexus.targ_port = bus_softc->fe.targ_port; 1243 io->io_hdr.nexus.targ_target.id = atio->ccb_h.target_id; 1244 io->io_hdr.nexus.targ_lun = atio->ccb_h.target_lun; 1245 io->scsiio.tag_num = atio->tag_id; 1246 switch (atio->tag_action) { 1247 case CAM_TAG_ACTION_NONE: 1248 io->scsiio.tag_type = CTL_TAG_UNTAGGED; 1249 break; 1250 case MSG_SIMPLE_TASK: 1251 io->scsiio.tag_type = CTL_TAG_SIMPLE; 1252 break; 1253 case MSG_HEAD_OF_QUEUE_TASK: 1254 io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE; 1255 break; 1256 case MSG_ORDERED_TASK: 1257 io->scsiio.tag_type = CTL_TAG_ORDERED; 1258 break; 1259 case MSG_ACA_TASK: 1260 io->scsiio.tag_type = CTL_TAG_ACA; 1261 break; 1262 default: 1263 io->scsiio.tag_type = CTL_TAG_UNTAGGED; 1264 printf("%s: unhandled tag type %#x!!\n", __func__, 1265 atio->tag_action); 1266 break; 1267 } 1268 if (atio->cdb_len > sizeof(io->scsiio.cdb)) { 1269 printf("%s: WARNING: CDB len %d > ctl_io space %zd\n", 1270 __func__, atio->cdb_len, sizeof(io->scsiio.cdb)); 1271 } 1272 io->scsiio.cdb_len = min(atio->cdb_len, sizeof(io->scsiio.cdb)); 1273 bcopy(atio->cdb_io.cdb_bytes, io->scsiio.cdb, 1274 io->scsiio.cdb_len); 1275 1276 #ifdef CTLFEDEBUG 1277 printf("%s: %ju:%d:%ju:%d: tag %04x CDB %02x\n", __func__, 1278 (uintmax_t)io->io_hdr.nexus.initid.id, 1279 io->io_hdr.nexus.targ_port, 1280 (uintmax_t)io->io_hdr.nexus.targ_target.id, 1281 io->io_hdr.nexus.targ_lun, 1282 io->scsiio.tag_num, io->scsiio.cdb[0]); 1283 #endif 1284 1285 ctl_queue(io); 1286 return; 1287 } 1288 case XPT_CONT_TARGET_IO: { 1289 int srr = 0; 1290 uint32_t srr_off = 0; 1291 1292 atio = (struct ccb_accept_tio *)done_ccb->ccb_h.ccb_atio; 1293 io = (union ctl_io *)atio->ccb_h.io_ptr; 1294 1295 softc->ctios_returned++; 1296 #ifdef CTLFEDEBUG 1297 printf("%s: got XPT_CONT_TARGET_IO tag %#x flags %#x\n", 1298 __func__, atio->tag_id, done_ccb->ccb_h.flags); 1299 #endif 1300 /* 1301 * Handle SRR case were the data pointer is pushed back hack 1302 */ 1303 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_MESSAGE_RECV 1304 && done_ccb->csio.msg_ptr != NULL 1305 && done_ccb->csio.msg_ptr[0] == MSG_EXTENDED 1306 && done_ccb->csio.msg_ptr[1] == 5 1307 && done_ccb->csio.msg_ptr[2] == 0) { 1308 srr = 1; 1309 srr_off = 1310 (done_ccb->csio.msg_ptr[3] << 24) 1311 | (done_ccb->csio.msg_ptr[4] << 16) 1312 | (done_ccb->csio.msg_ptr[5] << 8) 1313 | (done_ccb->csio.msg_ptr[6]); 1314 } 1315 1316 if (srr && (done_ccb->ccb_h.flags & CAM_SEND_STATUS)) { 1317 /* 1318 * If status was being sent, the back end data is now 1319 * history. Hack it up and resubmit a new command with 1320 * the CDB adjusted. If the SIM does the right thing, 1321 * all of the resid math should work. 1322 */ 1323 softc->ccbs_freed++; 1324 xpt_release_ccb(done_ccb); 1325 ctl_free_io(io); 1326 if (ctlfe_adjust_cdb(atio, srr_off) == 0) { 1327 done_ccb = (union ccb *)atio; 1328 goto resubmit; 1329 } 1330 /* 1331 * Fall through to doom.... 1332 */ 1333 } else if (srr) { 1334 /* 1335 * If we have an srr and we're still sending data, we 1336 * should be able to adjust offsets and cycle again. 1337 */ 1338 io->scsiio.kern_rel_offset = 1339 io->scsiio.ext_data_filled = srr_off; 1340 io->scsiio.ext_data_len = io->scsiio.kern_total_len - 1341 io->scsiio.kern_rel_offset; 1342 softc->ccbs_freed++; 1343 io->scsiio.io_hdr.status = CTL_STATUS_NONE; 1344 xpt_release_ccb(done_ccb); 1345 TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h, 1346 periph_links.tqe); 1347 xpt_schedule(periph, /*priority*/ 1); 1348 break; 1349 } 1350 1351 /* 1352 * If we were sending status back to the initiator, free up 1353 * resources. If we were doing a datamove, call the 1354 * datamove done routine. 1355 */ 1356 if (done_ccb->ccb_h.flags & CAM_SEND_STATUS) { 1357 softc->ccbs_freed++; 1358 xpt_release_ccb(done_ccb); 1359 ctl_free_io(io); 1360 /* 1361 * For a wildcard attachment, commands can come in 1362 * with a specific target/lun. Reset the target 1363 * and LUN fields back to the wildcard values before 1364 * we send them back down to the SIM. The SIM has 1365 * a wildcard LUN enabled, not whatever target/lun 1366 * these happened to be. 1367 */ 1368 if (softc->flags & CTLFE_LUN_WILDCARD) { 1369 atio->ccb_h.target_id = CAM_TARGET_WILDCARD; 1370 atio->ccb_h.target_lun = CAM_LUN_WILDCARD; 1371 } 1372 if (periph->flags & CAM_PERIPH_INVALID) { 1373 ctlfe_free_ccb(periph, (union ccb *)atio); 1374 } else { 1375 softc->atios_sent++; 1376 mtx_unlock(mtx); 1377 xpt_action((union ccb *)atio); 1378 return; 1379 } 1380 } else { 1381 struct ctlfe_lun_cmd_info *cmd_info; 1382 struct ccb_scsiio *csio; 1383 1384 csio = &done_ccb->csio; 1385 cmd_info = (struct ctlfe_lun_cmd_info *) 1386 io->io_hdr.port_priv; 1387 1388 io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; 1389 1390 io->scsiio.ext_data_len += csio->dxfer_len; 1391 if (io->scsiio.ext_data_len > 1392 io->scsiio.kern_total_len) { 1393 xpt_print(periph->path, "%s: tag 0x%04x " 1394 "done len %u > total %u sent %u\n", 1395 __func__, io->scsiio.tag_num, 1396 io->scsiio.ext_data_len, 1397 io->scsiio.kern_total_len, 1398 io->scsiio.ext_data_filled); 1399 } 1400 /* 1401 * Translate CAM status to CTL status. Success 1402 * does not change the overall, ctl_io status. In 1403 * that case we just set port_status to 0. If we 1404 * have a failure, though, set a data phase error 1405 * for the overall ctl_io. 1406 */ 1407 switch (done_ccb->ccb_h.status & CAM_STATUS_MASK) { 1408 case CAM_REQ_CMP: 1409 io->io_hdr.port_status = 0; 1410 break; 1411 default: 1412 /* 1413 * XXX KDM we probably need to figure out a 1414 * standard set of errors that the SIM 1415 * drivers should return in the event of a 1416 * data transfer failure. A data phase 1417 * error will at least point the user to a 1418 * data transfer error of some sort. 1419 * Hopefully the SIM printed out some 1420 * additional information to give the user 1421 * a clue what happened. 1422 */ 1423 io->io_hdr.port_status = 0xbad1; 1424 ctl_set_data_phase_error(&io->scsiio); 1425 /* 1426 * XXX KDM figure out residual. 1427 */ 1428 break; 1429 } 1430 /* 1431 * If we had to break this S/G list into multiple 1432 * pieces, figure out where we are in the list, and 1433 * continue sending pieces if necessary. 1434 */ 1435 if ((cmd_info->flags & CTLFE_CMD_PIECEWISE) 1436 && (io->io_hdr.port_status == 0)) { 1437 ccb_flags flags; 1438 uint8_t scsi_status; 1439 uint8_t *data_ptr; 1440 uint32_t dxfer_len; 1441 1442 flags = atio->ccb_h.flags & 1443 (CAM_DIS_DISCONNECT| 1444 CAM_TAG_ACTION_VALID); 1445 1446 ctlfedata(softc, io, &flags, &data_ptr, 1447 &dxfer_len, &csio->sglist_cnt); 1448 1449 scsi_status = 0; 1450 1451 if (((flags & CAM_SEND_STATUS) == 0) 1452 && (dxfer_len == 0)) { 1453 printf("%s: tag %04x no status or " 1454 "len cdb = %02x\n", __func__, 1455 atio->tag_id, 1456 atio->cdb_io.cdb_bytes[0]); 1457 printf("%s: tag %04x io status %#x\n", 1458 __func__, atio->tag_id, 1459 io->io_hdr.status); 1460 } 1461 1462 cam_fill_ctio(csio, 1463 /*retries*/ 2, 1464 ctlfedone, 1465 flags, 1466 (flags & CAM_TAG_ACTION_VALID) ? 1467 MSG_SIMPLE_Q_TAG : 0, 1468 atio->tag_id, 1469 atio->init_id, 1470 scsi_status, 1471 /*data_ptr*/ data_ptr, 1472 /*dxfer_len*/ dxfer_len, 1473 /*timeout*/ 5 * 1000); 1474 1475 csio->ccb_h.flags |= CAM_UNLOCKED; 1476 csio->resid = 0; 1477 csio->ccb_h.ccb_atio = atio; 1478 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 1479 softc->ctios_sent++; 1480 mtx_unlock(mtx); 1481 xpt_action((union ccb *)csio); 1482 } else { 1483 /* 1484 * Release the CTIO. The ATIO will be sent back 1485 * down to the SIM once we send status. 1486 */ 1487 softc->ccbs_freed++; 1488 xpt_release_ccb(done_ccb); 1489 mtx_unlock(mtx); 1490 1491 /* Call the backend move done callback */ 1492 io->scsiio.be_move_done(io); 1493 } 1494 return; 1495 } 1496 break; 1497 } 1498 case XPT_IMMEDIATE_NOTIFY: { 1499 union ctl_io *io; 1500 struct ccb_immediate_notify *inot; 1501 cam_status status; 1502 int frozen; 1503 1504 inot = &done_ccb->cin1; 1505 1506 softc->inots_returned++; 1507 1508 frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; 1509 1510 printf("%s: got XPT_IMMEDIATE_NOTIFY status %#x tag %#x " 1511 "seq %#x\n", __func__, inot->ccb_h.status, 1512 inot->tag_id, inot->seq_id); 1513 1514 io = ctl_alloc_io(bus_softc->fe.ctl_pool_ref); 1515 if (io != NULL) { 1516 int send_ctl_io; 1517 1518 send_ctl_io = 1; 1519 1520 ctl_zero_io(io); 1521 io->io_hdr.io_type = CTL_IO_TASK; 1522 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr =done_ccb; 1523 inot->ccb_h.io_ptr = io; 1524 io->io_hdr.nexus.initid.id = inot->initiator_id; 1525 io->io_hdr.nexus.targ_port = bus_softc->fe.targ_port; 1526 io->io_hdr.nexus.targ_target.id = inot->ccb_h.target_id; 1527 io->io_hdr.nexus.targ_lun = inot->ccb_h.target_lun; 1528 /* XXX KDM should this be the tag_id? */ 1529 io->taskio.tag_num = inot->seq_id; 1530 1531 status = inot->ccb_h.status & CAM_STATUS_MASK; 1532 switch (status) { 1533 case CAM_SCSI_BUS_RESET: 1534 io->taskio.task_action = CTL_TASK_BUS_RESET; 1535 break; 1536 case CAM_BDR_SENT: 1537 io->taskio.task_action = CTL_TASK_TARGET_RESET; 1538 break; 1539 case CAM_MESSAGE_RECV: 1540 switch (inot->arg) { 1541 case MSG_ABORT_TASK_SET: 1542 /* 1543 * XXX KDM this isn't currently 1544 * supported by CTL. It ends up 1545 * being a no-op. 1546 */ 1547 io->taskio.task_action = 1548 CTL_TASK_ABORT_TASK_SET; 1549 break; 1550 case MSG_TARGET_RESET: 1551 io->taskio.task_action = 1552 CTL_TASK_TARGET_RESET; 1553 break; 1554 case MSG_ABORT_TASK: 1555 io->taskio.task_action = 1556 CTL_TASK_ABORT_TASK; 1557 break; 1558 case MSG_LOGICAL_UNIT_RESET: 1559 io->taskio.task_action = 1560 CTL_TASK_LUN_RESET; 1561 break; 1562 case MSG_CLEAR_TASK_SET: 1563 /* 1564 * XXX KDM this isn't currently 1565 * supported by CTL. It ends up 1566 * being a no-op. 1567 */ 1568 io->taskio.task_action = 1569 CTL_TASK_CLEAR_TASK_SET; 1570 break; 1571 case MSG_CLEAR_ACA: 1572 io->taskio.task_action = 1573 CTL_TASK_CLEAR_ACA; 1574 break; 1575 case MSG_NOOP: 1576 send_ctl_io = 0; 1577 break; 1578 default: 1579 xpt_print(periph->path, "%s: " 1580 "unsupported message 0x%x\n", 1581 __func__, inot->arg); 1582 send_ctl_io = 0; 1583 break; 1584 } 1585 break; 1586 case CAM_REQ_ABORTED: 1587 /* 1588 * This request was sent back by the driver. 1589 * XXX KDM what do we do here? 1590 */ 1591 send_ctl_io = 0; 1592 break; 1593 case CAM_REQ_INVALID: 1594 case CAM_PROVIDE_FAIL: 1595 default: 1596 /* 1597 * We should only get here if we're talking 1598 * to a talking to a SIM that is target 1599 * capable but supports the old API. In 1600 * that case, we need to just free the CCB. 1601 * If we actually send a notify acknowledge, 1602 * it will send that back with an error as 1603 * well. 1604 */ 1605 1606 if ((status != CAM_REQ_INVALID) 1607 && (status != CAM_PROVIDE_FAIL)) 1608 xpt_print(periph->path, "%s: " 1609 "unsupported CAM status " 1610 "0x%x\n", __func__, status); 1611 1612 ctl_free_io(io); 1613 ctlfe_free_ccb(periph, done_ccb); 1614 1615 goto out; 1616 } 1617 if (send_ctl_io != 0) { 1618 ctl_queue(io); 1619 } else { 1620 ctl_free_io(io); 1621 done_ccb->ccb_h.status = CAM_REQ_INPROG; 1622 done_ccb->ccb_h.func_code = 1623 XPT_NOTIFY_ACKNOWLEDGE; 1624 xpt_action(done_ccb); 1625 } 1626 } else { 1627 xpt_print(periph->path, "%s: could not allocate " 1628 "ctl_io for immediate notify!\n", __func__); 1629 /* requeue this to the adapter */ 1630 done_ccb->ccb_h.status = CAM_REQ_INPROG; 1631 done_ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; 1632 xpt_action(done_ccb); 1633 } 1634 1635 if (frozen != 0) { 1636 cam_release_devq(periph->path, 1637 /*relsim_flags*/ 0, 1638 /*opening reduction*/ 0, 1639 /*timeout*/ 0, 1640 /*getcount_only*/ 0); 1641 } 1642 break; 1643 } 1644 case XPT_NOTIFY_ACKNOWLEDGE: 1645 /* 1646 * Queue this back down to the SIM as an immediate notify. 1647 */ 1648 done_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; 1649 xpt_action(done_ccb); 1650 softc->inots_sent++; 1651 break; 1652 case XPT_SET_SIM_KNOB: 1653 case XPT_GET_SIM_KNOB: 1654 break; 1655 default: 1656 panic("%s: unexpected CCB type %#x", __func__, 1657 done_ccb->ccb_h.func_code); 1658 break; 1659 } 1660 1661 out: 1662 mtx_unlock(mtx); 1663 } 1664 1665 static void 1666 ctlfe_onoffline(void *arg, int online) 1667 { 1668 struct ctlfe_softc *bus_softc; 1669 union ccb *ccb; 1670 cam_status status; 1671 struct cam_path *path; 1672 int set_wwnn; 1673 1674 bus_softc = (struct ctlfe_softc *)arg; 1675 1676 set_wwnn = 0; 1677 1678 status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, 1679 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 1680 if (status != CAM_REQ_CMP) { 1681 printf("%s: unable to create path!\n", __func__); 1682 return; 1683 } 1684 ccb = (union ccb *)malloc(sizeof(*ccb), M_TEMP, M_NOWAIT | M_ZERO); 1685 if (ccb == NULL) { 1686 printf("%s: unable to malloc CCB!\n", __func__); 1687 xpt_free_path(path); 1688 return; 1689 } 1690 xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE); 1691 1692 /* 1693 * Copan WWN format: 1694 * 1695 * Bits 63-60: 0x5 NAA, IEEE registered name 1696 * Bits 59-36: 0x000ED5 IEEE Company name assigned to Copan 1697 * Bits 35-12: Copan SSN (Sequential Serial Number) 1698 * Bits 11-8: Type of port: 1699 * 1 == N-Port 1700 * 2 == F-Port 1701 * 3 == NL-Port 1702 * Bits 7-0: 0 == Node Name, >0 == Port Number 1703 */ 1704 1705 if (online != 0) { 1706 1707 ccb->ccb_h.func_code = XPT_GET_SIM_KNOB; 1708 1709 1710 xpt_action(ccb); 1711 1712 1713 if ((ccb->knob.xport_specific.valid & KNOB_VALID_ADDRESS) != 0){ 1714 #ifdef RANDOM_WWNN 1715 uint64_t random_bits; 1716 #endif 1717 1718 printf("%s: %s current WWNN %#jx\n", __func__, 1719 bus_softc->port_name, 1720 ccb->knob.xport_specific.fc.wwnn); 1721 printf("%s: %s current WWPN %#jx\n", __func__, 1722 bus_softc->port_name, 1723 ccb->knob.xport_specific.fc.wwpn); 1724 1725 #ifdef RANDOM_WWNN 1726 arc4rand(&random_bits, sizeof(random_bits), 0); 1727 #endif 1728 1729 /* 1730 * XXX KDM this is a bit of a kludge for now. We 1731 * take the current WWNN/WWPN from the card, and 1732 * replace the company identifier and the NL-Port 1733 * indicator and the port number (for the WWPN). 1734 * This should be replaced later with ddb_GetWWNN, 1735 * or possibly a more centralized scheme. (It 1736 * would be nice to have the WWNN/WWPN for each 1737 * port stored in the ctl_frontend structure.) 1738 */ 1739 #ifdef RANDOM_WWNN 1740 ccb->knob.xport_specific.fc.wwnn = 1741 (random_bits & 1742 0x0000000fffffff00ULL) | 1743 /* Company ID */ 0x5000ED5000000000ULL | 1744 /* NL-Port */ 0x0300; 1745 ccb->knob.xport_specific.fc.wwpn = 1746 (random_bits & 1747 0x0000000fffffff00ULL) | 1748 /* Company ID */ 0x5000ED5000000000ULL | 1749 /* NL-Port */ 0x3000 | 1750 /* Port Num */ (bus_softc->fe.targ_port & 0xff); 1751 1752 /* 1753 * This is a bit of an API break/reversal, but if 1754 * we're doing the random WWNN that's a little 1755 * different anyway. So record what we're actually 1756 * using with the frontend code so it's reported 1757 * accurately. 1758 */ 1759 bus_softc->fe.wwnn = 1760 ccb->knob.xport_specific.fc.wwnn; 1761 bus_softc->fe.wwpn = 1762 ccb->knob.xport_specific.fc.wwpn; 1763 set_wwnn = 1; 1764 #else /* RANDOM_WWNN */ 1765 /* 1766 * If the user has specified a WWNN/WWPN, send them 1767 * down to the SIM. Otherwise, record what the SIM 1768 * has reported. 1769 */ 1770 if ((bus_softc->fe.wwnn != 0) 1771 && (bus_softc->fe.wwpn != 0)) { 1772 ccb->knob.xport_specific.fc.wwnn = 1773 bus_softc->fe.wwnn; 1774 ccb->knob.xport_specific.fc.wwpn = 1775 bus_softc->fe.wwpn; 1776 set_wwnn = 1; 1777 } else { 1778 bus_softc->fe.wwnn = 1779 ccb->knob.xport_specific.fc.wwnn; 1780 bus_softc->fe.wwpn = 1781 ccb->knob.xport_specific.fc.wwpn; 1782 } 1783 #endif /* RANDOM_WWNN */ 1784 1785 1786 if (set_wwnn != 0) { 1787 printf("%s: %s new WWNN %#jx\n", __func__, 1788 bus_softc->port_name, 1789 ccb->knob.xport_specific.fc.wwnn); 1790 printf("%s: %s new WWPN %#jx\n", __func__, 1791 bus_softc->port_name, 1792 ccb->knob.xport_specific.fc.wwpn); 1793 } 1794 } else { 1795 printf("%s: %s has no valid WWNN/WWPN\n", __func__, 1796 bus_softc->port_name); 1797 } 1798 } 1799 ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; 1800 ccb->knob.xport_specific.valid = KNOB_VALID_ROLE; 1801 if (set_wwnn != 0) 1802 ccb->knob.xport_specific.valid |= KNOB_VALID_ADDRESS; 1803 1804 if (online != 0) 1805 ccb->knob.xport_specific.fc.role = KNOB_ROLE_TARGET; 1806 else 1807 ccb->knob.xport_specific.fc.role = KNOB_ROLE_NONE; 1808 1809 xpt_action(ccb); 1810 1811 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1812 printf("%s: SIM %s (path id %d) target %s failed with " 1813 "status %#x\n", 1814 __func__, bus_softc->port_name, bus_softc->path_id, 1815 (online != 0) ? "enable" : "disable", 1816 ccb->ccb_h.status); 1817 } else { 1818 printf("%s: SIM %s (path id %d) target %s succeeded\n", 1819 __func__, bus_softc->port_name, bus_softc->path_id, 1820 (online != 0) ? "enable" : "disable"); 1821 } 1822 1823 xpt_free_path(path); 1824 1825 free(ccb, M_TEMP); 1826 1827 return; 1828 } 1829 1830 static void 1831 ctlfe_online(void *arg) 1832 { 1833 struct ctlfe_softc *bus_softc; 1834 struct cam_path *path; 1835 cam_status status; 1836 struct ctlfe_lun_softc *lun_softc; 1837 1838 bus_softc = (struct ctlfe_softc *)arg; 1839 1840 /* 1841 * Create the wildcard LUN before bringing the port online. 1842 */ 1843 status = xpt_create_path(&path, /*periph*/ NULL, 1844 bus_softc->path_id, CAM_TARGET_WILDCARD, 1845 CAM_LUN_WILDCARD); 1846 if (status != CAM_REQ_CMP) { 1847 printf("%s: unable to create path for wildcard periph\n", 1848 __func__); 1849 return; 1850 } 1851 1852 lun_softc = malloc(sizeof(*lun_softc), M_CTLFE, 1853 M_NOWAIT | M_ZERO); 1854 if (lun_softc == NULL) { 1855 xpt_print(path, "%s: unable to allocate softc for " 1856 "wildcard periph\n", __func__); 1857 xpt_free_path(path); 1858 return; 1859 } 1860 1861 xpt_path_lock(path); 1862 lun_softc->parent_softc = bus_softc; 1863 lun_softc->flags |= CTLFE_LUN_WILDCARD; 1864 1865 mtx_lock(&bus_softc->lun_softc_mtx); 1866 STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, lun_softc, links); 1867 mtx_unlock(&bus_softc->lun_softc_mtx); 1868 1869 status = cam_periph_alloc(ctlferegister, 1870 ctlfeoninvalidate, 1871 ctlfecleanup, 1872 ctlfestart, 1873 "ctl", 1874 CAM_PERIPH_BIO, 1875 path, 1876 ctlfeasync, 1877 0, 1878 lun_softc); 1879 1880 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1881 const struct cam_status_entry *entry; 1882 1883 entry = cam_fetch_status_entry(status); 1884 1885 printf("%s: CAM error %s (%#x) returned from " 1886 "cam_periph_alloc()\n", __func__, (entry != NULL) ? 1887 entry->status_text : "Unknown", status); 1888 } 1889 1890 ctlfe_onoffline(arg, /*online*/ 1); 1891 1892 xpt_path_unlock(path); 1893 xpt_free_path(path); 1894 } 1895 1896 static void 1897 ctlfe_offline(void *arg) 1898 { 1899 struct ctlfe_softc *bus_softc; 1900 struct cam_path *path; 1901 cam_status status; 1902 struct cam_periph *periph; 1903 1904 bus_softc = (struct ctlfe_softc *)arg; 1905 1906 /* 1907 * Disable the wildcard LUN for this port now that we have taken 1908 * the port offline. 1909 */ 1910 status = xpt_create_path(&path, /*periph*/ NULL, 1911 bus_softc->path_id, CAM_TARGET_WILDCARD, 1912 CAM_LUN_WILDCARD); 1913 if (status != CAM_REQ_CMP) { 1914 printf("%s: unable to create path for wildcard periph\n", 1915 __func__); 1916 return; 1917 } 1918 1919 xpt_path_lock(path); 1920 1921 ctlfe_onoffline(arg, /*online*/ 0); 1922 1923 if ((periph = cam_periph_find(path, "ctl")) != NULL) 1924 cam_periph_invalidate(periph); 1925 1926 xpt_path_unlock(path); 1927 xpt_free_path(path); 1928 } 1929 1930 static int 1931 ctlfe_targ_enable(void *arg, struct ctl_id targ_id) 1932 { 1933 return (0); 1934 } 1935 1936 static int 1937 ctlfe_targ_disable(void *arg, struct ctl_id targ_id) 1938 { 1939 return (0); 1940 } 1941 1942 /* 1943 * This will get called to enable a LUN on every bus that is attached to 1944 * CTL. So we only need to create a path/periph for this particular bus. 1945 */ 1946 static int 1947 ctlfe_lun_enable(void *arg, struct ctl_id targ_id, int lun_id) 1948 { 1949 struct ctlfe_softc *bus_softc; 1950 struct ctlfe_lun_softc *softc; 1951 struct cam_path *path; 1952 struct cam_periph *periph; 1953 cam_status status; 1954 1955 bus_softc = (struct ctlfe_softc *)arg; 1956 1957 status = xpt_create_path(&path, /*periph*/ NULL, 1958 bus_softc->path_id, 1959 targ_id.id, lun_id); 1960 /* XXX KDM need some way to return status to CTL here? */ 1961 if (status != CAM_REQ_CMP) { 1962 printf("%s: could not create path, status %#x\n", __func__, 1963 status); 1964 return (1); 1965 } 1966 1967 softc = malloc(sizeof(*softc), M_CTLFE, M_WAITOK | M_ZERO); 1968 xpt_path_lock(path); 1969 periph = cam_periph_find(path, "ctl"); 1970 if (periph != NULL) { 1971 /* We've already got a periph, no need to alloc a new one. */ 1972 xpt_path_unlock(path); 1973 xpt_free_path(path); 1974 free(softc, M_CTLFE); 1975 return (0); 1976 } 1977 1978 softc->parent_softc = bus_softc; 1979 mtx_lock(&bus_softc->lun_softc_mtx); 1980 STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, softc, links); 1981 mtx_unlock(&bus_softc->lun_softc_mtx); 1982 1983 status = cam_periph_alloc(ctlferegister, 1984 ctlfeoninvalidate, 1985 ctlfecleanup, 1986 ctlfestart, 1987 "ctl", 1988 CAM_PERIPH_BIO, 1989 path, 1990 ctlfeasync, 1991 0, 1992 softc); 1993 1994 xpt_path_unlock(path); 1995 xpt_free_path(path); 1996 return (0); 1997 } 1998 1999 /* 2000 * This will get called when the user removes a LUN to disable that LUN 2001 * on every bus that is attached to CTL. 2002 */ 2003 static int 2004 ctlfe_lun_disable(void *arg, struct ctl_id targ_id, int lun_id) 2005 { 2006 struct ctlfe_softc *softc; 2007 struct ctlfe_lun_softc *lun_softc; 2008 2009 softc = (struct ctlfe_softc *)arg; 2010 2011 mtx_lock(&softc->lun_softc_mtx); 2012 STAILQ_FOREACH(lun_softc, &softc->lun_softc_list, links) { 2013 struct cam_path *path; 2014 2015 path = lun_softc->periph->path; 2016 2017 if ((xpt_path_target_id(path) == targ_id.id) 2018 && (xpt_path_lun_id(path) == lun_id)) { 2019 break; 2020 } 2021 } 2022 if (lun_softc == NULL) { 2023 mtx_unlock(&softc->lun_softc_mtx); 2024 printf("%s: can't find target %d lun %d\n", __func__, 2025 targ_id.id, lun_id); 2026 return (1); 2027 } 2028 cam_periph_acquire(lun_softc->periph); 2029 mtx_unlock(&softc->lun_softc_mtx); 2030 2031 cam_periph_lock(lun_softc->periph); 2032 cam_periph_invalidate(lun_softc->periph); 2033 cam_periph_unlock(lun_softc->periph); 2034 cam_periph_release(lun_softc->periph); 2035 return (0); 2036 } 2037 2038 static void 2039 ctlfe_dump_sim(struct cam_sim *sim) 2040 { 2041 2042 printf("%s%d: max tagged openings: %d, max dev openings: %d\n", 2043 sim->sim_name, sim->unit_number, 2044 sim->max_tagged_dev_openings, sim->max_dev_openings); 2045 printf("\n"); 2046 } 2047 2048 /* 2049 * Assumes that the SIM lock is held. 2050 */ 2051 static void 2052 ctlfe_dump_queue(struct ctlfe_lun_softc *softc) 2053 { 2054 struct ccb_hdr *hdr; 2055 struct cam_periph *periph; 2056 int num_items; 2057 2058 periph = softc->periph; 2059 num_items = 0; 2060 2061 TAILQ_FOREACH(hdr, &softc->work_queue, periph_links.tqe) { 2062 union ctl_io *io; 2063 2064 io = hdr->io_ptr; 2065 2066 num_items++; 2067 2068 /* 2069 * This can happen when we get an ATIO but can't allocate 2070 * a ctl_io. See the XPT_ACCEPT_TARGET_IO case in ctlfedone(). 2071 */ 2072 if (io == NULL) { 2073 struct ccb_scsiio *csio; 2074 2075 csio = (struct ccb_scsiio *)hdr; 2076 2077 xpt_print(periph->path, "CCB %#x ctl_io allocation " 2078 "failed\n", csio->tag_id); 2079 continue; 2080 } 2081 2082 /* 2083 * Only regular SCSI I/O is put on the work 2084 * queue, so we can print sense here. There may be no 2085 * sense if it's no the queue for a DMA, but this serves to 2086 * print out the CCB as well. 2087 * 2088 * XXX KDM switch this over to scsi_sense_print() when 2089 * CTL is merged in with CAM. 2090 */ 2091 ctl_io_error_print(io, NULL); 2092 2093 /* 2094 * We're sending status back to the 2095 * initiator, so we're on the queue waiting 2096 * for a CTIO to do that. 2097 */ 2098 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) 2099 continue; 2100 2101 /* 2102 * Otherwise, we're on the queue waiting to 2103 * do a data transfer. 2104 */ 2105 xpt_print(periph->path, "Total %u, Current %u, Resid %u\n", 2106 io->scsiio.kern_total_len, io->scsiio.kern_data_len, 2107 io->scsiio.kern_data_resid); 2108 } 2109 2110 xpt_print(periph->path, "%d requests total waiting for CCBs\n", 2111 num_items); 2112 xpt_print(periph->path, "%ju CCBs outstanding (%ju allocated, %ju " 2113 "freed)\n", (uintmax_t)(softc->ccbs_alloced - 2114 softc->ccbs_freed), (uintmax_t)softc->ccbs_alloced, 2115 (uintmax_t)softc->ccbs_freed); 2116 xpt_print(periph->path, "%ju CTIOs outstanding (%ju sent, %ju " 2117 "returned\n", (uintmax_t)(softc->ctios_sent - 2118 softc->ctios_returned), softc->ctios_sent, 2119 softc->ctios_returned); 2120 } 2121 2122 /* 2123 * This function is called when we fail to get a CCB for a DMA or status return 2124 * to the initiator within the specified time period. 2125 * 2126 * The callout code should insure that we hold the sim mutex here. 2127 */ 2128 static void 2129 ctlfe_dma_timeout(void *arg) 2130 { 2131 struct ctlfe_lun_softc *softc; 2132 struct cam_periph *periph; 2133 struct cam_sim *sim; 2134 int num_queued; 2135 2136 softc = (struct ctlfe_lun_softc *)arg; 2137 periph = softc->periph; 2138 sim = xpt_path_sim(periph->path); 2139 num_queued = 0; 2140 2141 /* 2142 * Nothing to do... 2143 */ 2144 if (TAILQ_FIRST(&softc->work_queue) == NULL) { 2145 xpt_print(periph->path, "TIMEOUT triggered after %d " 2146 "seconds, but nothing on work queue??\n", 2147 CTLFE_DMA_TIMEOUT); 2148 return; 2149 } 2150 2151 xpt_print(periph->path, "TIMEOUT (%d seconds) waiting for DMA to " 2152 "start\n", CTLFE_DMA_TIMEOUT); 2153 2154 ctlfe_dump_queue(softc); 2155 2156 ctlfe_dump_sim(sim); 2157 2158 xpt_print(periph->path, "calling xpt_schedule() to attempt to " 2159 "unstick our queue\n"); 2160 2161 xpt_schedule(periph, /*priority*/ 1); 2162 2163 xpt_print(periph->path, "xpt_schedule() call complete\n"); 2164 } 2165 2166 /* 2167 * Datamove/done routine called by CTL. Put ourselves on the queue to 2168 * receive a CCB from CAM so we can queue the continue I/O request down 2169 * to the adapter. 2170 */ 2171 static void 2172 ctlfe_datamove_done(union ctl_io *io) 2173 { 2174 union ccb *ccb; 2175 struct cam_periph *periph; 2176 struct ctlfe_lun_softc *softc; 2177 2178 ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 2179 2180 periph = xpt_path_periph(ccb->ccb_h.path); 2181 cam_periph_lock(periph); 2182 2183 softc = (struct ctlfe_lun_softc *)periph->softc; 2184 2185 if (io->io_hdr.io_type == CTL_IO_TASK) { 2186 /* 2187 * Task management commands don't require any further 2188 * communication back to the adapter. Requeue the CCB 2189 * to the adapter, and free the CTL I/O. 2190 */ 2191 xpt_print(ccb->ccb_h.path, "%s: returning task I/O " 2192 "tag %#x seq %#x\n", __func__, 2193 ccb->cin1.tag_id, ccb->cin1.seq_id); 2194 /* 2195 * Send the notify acknowledge down to the SIM, to let it 2196 * know we processed the task management command. 2197 */ 2198 ccb->ccb_h.status = CAM_REQ_INPROG; 2199 ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; 2200 xpt_action(ccb); 2201 ctl_free_io(io); 2202 } else { 2203 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) 2204 io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED; 2205 else 2206 io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED; 2207 2208 TAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h, 2209 periph_links.tqe); 2210 2211 /* 2212 * Reset the timeout for our latest active DMA. 2213 */ 2214 callout_reset(&softc->dma_callout, 2215 CTLFE_DMA_TIMEOUT * hz, 2216 ctlfe_dma_timeout, softc); 2217 /* 2218 * Ask for the CAM transport layer to send us a CCB to do 2219 * the DMA or send status, unless ctlfe_dma_enabled is set 2220 * to 0. 2221 */ 2222 if (ctlfe_dma_enabled != 0) 2223 xpt_schedule(periph, /*priority*/ 1); 2224 } 2225 2226 cam_periph_unlock(periph); 2227 } 2228 2229 static void 2230 ctlfe_dump(void) 2231 { 2232 struct ctlfe_softc *bus_softc; 2233 2234 STAILQ_FOREACH(bus_softc, &ctlfe_softc_list, links) { 2235 struct ctlfe_lun_softc *lun_softc; 2236 2237 ctlfe_dump_sim(bus_softc->sim); 2238 2239 STAILQ_FOREACH(lun_softc, &bus_softc->lun_softc_list, links) { 2240 ctlfe_dump_queue(lun_softc); 2241 } 2242 } 2243 } 2244