1 /*- 2 * Copyright (c) 2008, 2009 Silicon Graphics International Corp. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * substantially similar to the "NO WARRANTY" disclaimer below 13 * ("Disclaimer") and any redistribution must be conditioned upon 14 * including a substantially similar Disclaimer requirement for further 15 * binary redistribution. 16 * 17 * NO WARRANTY 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGES. 29 * 30 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/scsi_ctl.c#4 $ 31 */ 32 /* 33 * Peripheral driver interface between CAM and CTL (CAM Target Layer). 34 * 35 * Author: Ken Merry <ken@FreeBSD.org> 36 */ 37 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 #include <sys/param.h> 42 #include <sys/queue.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/condvar.h> 48 #include <sys/malloc.h> 49 #include <sys/bus.h> 50 #include <sys/endian.h> 51 #include <sys/sbuf.h> 52 #include <sys/sysctl.h> 53 #include <sys/types.h> 54 #include <sys/systm.h> 55 #include <machine/bus.h> 56 57 #include <cam/cam.h> 58 #include <cam/cam_ccb.h> 59 #include <cam/cam_periph.h> 60 #include <cam/cam_queue.h> 61 #include <cam/cam_xpt_periph.h> 62 #include <cam/cam_debug.h> 63 #include <cam/cam_sim.h> 64 #include <cam/cam_xpt.h> 65 66 #include <cam/scsi/scsi_all.h> 67 #include <cam/scsi/scsi_message.h> 68 69 #include <cam/ctl/ctl_io.h> 70 #include <cam/ctl/ctl.h> 71 #include <cam/ctl/ctl_frontend.h> 72 #include <cam/ctl/ctl_util.h> 73 #include <cam/ctl/ctl_error.h> 74 75 typedef enum { 76 CTLFE_CCB_WAITING = 0x01 77 } ctlfe_ccb_types; 78 79 struct ctlfe_softc { 80 struct ctl_frontend fe; 81 path_id_t path_id; 82 struct cam_sim *sim; 83 char port_name[DEV_IDLEN]; 84 STAILQ_HEAD(, ctlfe_lun_softc) lun_softc_list; 85 STAILQ_ENTRY(ctlfe_softc) links; 86 }; 87 88 STAILQ_HEAD(, ctlfe_softc) ctlfe_softc_list; 89 struct mtx ctlfe_list_mtx; 90 static char ctlfe_mtx_desc[] = "ctlfelist"; 91 static int ctlfe_dma_enabled = 1; 92 #ifdef CTLFE_INIT_ENABLE 93 static int ctlfe_max_targets = 1; 94 static int ctlfe_num_targets = 0; 95 #endif 96 97 typedef enum { 98 CTLFE_LUN_NONE = 0x00, 99 CTLFE_LUN_WILDCARD = 0x01 100 } ctlfe_lun_flags; 101 102 struct ctlfe_lun_softc { 103 struct ctlfe_softc *parent_softc; 104 struct cam_periph *periph; 105 ctlfe_lun_flags flags; 106 struct callout dma_callout; 107 uint64_t ccbs_alloced; 108 uint64_t ccbs_freed; 109 uint64_t ctios_sent; 110 uint64_t ctios_returned; 111 uint64_t atios_sent; 112 uint64_t atios_returned; 113 uint64_t inots_sent; 114 uint64_t inots_returned; 115 /* bus_dma_tag_t dma_tag; */ 116 TAILQ_HEAD(, ccb_hdr) work_queue; 117 STAILQ_ENTRY(ctlfe_lun_softc) links; 118 }; 119 120 typedef enum { 121 CTLFE_CMD_NONE = 0x00, 122 CTLFE_CMD_PIECEWISE = 0x01 123 } ctlfe_cmd_flags; 124 125 /* 126 * The size limit of this structure is CTL_PORT_PRIV_SIZE, from ctl_io.h. 127 * Currently that is 600 bytes. 128 */ 129 struct ctlfe_lun_cmd_info { 130 int cur_transfer_index; 131 ctlfe_cmd_flags flags; 132 /* 133 * XXX KDM struct bus_dma_segment is 8 bytes on i386, and 16 134 * bytes on amd64. So with 32 elements, this is 256 bytes on 135 * i386 and 512 bytes on amd64. 136 */ 137 bus_dma_segment_t cam_sglist[32]; 138 }; 139 140 /* 141 * When we register the adapter/bus, request that this many ctl_ios be 142 * allocated. This should be the maximum supported by the adapter, but we 143 * currently don't have a way to get that back from the path inquiry. 144 * XXX KDM add that to the path inquiry. 145 */ 146 #define CTLFE_REQ_CTL_IO 4096 147 /* 148 * Number of Accept Target I/O CCBs to allocate and queue down to the 149 * adapter per LUN. 150 * XXX KDM should this be controlled by CTL? 151 */ 152 #define CTLFE_ATIO_PER_LUN 1024 153 /* 154 * Number of Immediate Notify CCBs (used for aborts, resets, etc.) to 155 * allocate and queue down to the adapter per LUN. 156 * XXX KDM should this be controlled by CTL? 157 */ 158 #define CTLFE_IN_PER_LUN 1024 159 160 /* 161 * Timeout (in seconds) on CTIO CCB allocation for doing a DMA or sending 162 * status to the initiator. The SIM is expected to have its own timeouts, 163 * so we're not putting this timeout around the CCB execution time. The 164 * SIM should timeout and let us know if it has an issue. 165 */ 166 #define CTLFE_DMA_TIMEOUT 60 167 168 /* 169 * Turn this on to enable extra debugging prints. 170 */ 171 #if 0 172 #define CTLFE_DEBUG 173 #endif 174 175 /* 176 * Use randomly assigned WWNN/WWPN values. This is to work around an issue 177 * in the FreeBSD initiator that makes it unable to rescan the target if 178 * the target gets rebooted and the WWNN/WWPN stay the same. 179 */ 180 #if 0 181 #define RANDOM_WWNN 182 #endif 183 184 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, dma_enabled, CTLFLAG_RW, 185 &ctlfe_dma_enabled, 0, "DMA enabled"); 186 MALLOC_DEFINE(M_CTLFE, "CAM CTL FE", "CAM CTL FE interface"); 187 188 #define ccb_type ppriv_field0 189 /* This is only used in the ATIO */ 190 #define io_ptr ppriv_ptr1 191 192 /* This is only used in the CTIO */ 193 #define ccb_atio ppriv_ptr1 194 195 int ctlfeinitialize(void); 196 void ctlfeshutdown(void); 197 static periph_init_t ctlfeinit; 198 static void ctlfeasync(void *callback_arg, uint32_t code, 199 struct cam_path *path, void *arg); 200 static periph_ctor_t ctlferegister; 201 static periph_oninv_t ctlfeoninvalidate; 202 static periph_dtor_t ctlfecleanup; 203 static periph_start_t ctlfestart; 204 static void ctlfedone(struct cam_periph *periph, 205 union ccb *done_ccb); 206 207 static void ctlfe_onoffline(void *arg, int online); 208 static void ctlfe_online(void *arg); 209 static void ctlfe_offline(void *arg); 210 static int ctlfe_targ_enable(void *arg, struct ctl_id targ_id); 211 static int ctlfe_targ_disable(void *arg, struct ctl_id targ_id); 212 static int ctlfe_lun_enable(void *arg, struct ctl_id targ_id, 213 int lun_id); 214 static int ctlfe_lun_disable(void *arg, struct ctl_id targ_id, 215 int lun_id); 216 static void ctlfe_dump_sim(struct cam_sim *sim); 217 static void ctlfe_dump_queue(struct ctlfe_lun_softc *softc); 218 static void ctlfe_dma_timeout(void *arg); 219 static void ctlfe_datamove_done(union ctl_io *io); 220 static void ctlfe_dump(void); 221 222 static struct periph_driver ctlfe_driver = 223 { 224 ctlfeinit, "ctl", 225 TAILQ_HEAD_INITIALIZER(ctlfe_driver.units), /*generation*/ 0 226 }; 227 PERIPHDRIVER_DECLARE(ctl, ctlfe_driver); 228 229 extern struct ctl_softc *control_softc; 230 extern int ctl_disable; 231 232 int 233 ctlfeinitialize(void) 234 { 235 cam_status status; 236 237 /* Don't initialize if we're disabled */ 238 if (ctl_disable != 0) 239 return (0); 240 241 STAILQ_INIT(&ctlfe_softc_list); 242 243 mtx_init(&ctlfe_list_mtx, ctlfe_mtx_desc, NULL, MTX_DEF); 244 245 xpt_lock_buses(); 246 periphdriver_register(&ctlfe_driver); 247 xpt_unlock_buses(); 248 249 status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED | 250 AC_CONTRACT, ctlfeasync, NULL, NULL); 251 252 if (status != CAM_REQ_CMP) { 253 printf("ctl: Failed to attach async callback due to CAM " 254 "status 0x%x!\n", status); 255 } 256 257 return (0); 258 } 259 260 void 261 ctlfeshutdown(void) 262 { 263 return; 264 } 265 266 void 267 ctlfeinit(void) 268 { 269 cam_status status; 270 271 /* Don't initialize if we're disabled */ 272 if (ctl_disable != 0) 273 return; 274 275 STAILQ_INIT(&ctlfe_softc_list); 276 277 mtx_init(&ctlfe_list_mtx, ctlfe_mtx_desc, NULL, MTX_DEF); 278 279 KASSERT(control_softc != NULL, ("CTL is not initialized!")); 280 281 status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED | 282 AC_CONTRACT, ctlfeasync, NULL, NULL); 283 284 if (status != CAM_REQ_CMP) { 285 printf("ctl: Failed to attach async callback due to CAM " 286 "status 0x%x!\n", status); 287 } 288 } 289 290 static void 291 ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) 292 { 293 294 #ifdef CTLFEDEBUG 295 printf("%s: entered\n", __func__); 296 #endif 297 298 /* 299 * When a new path gets registered, and it is capable of target 300 * mode, go ahead and attach. Later on, we may need to be more 301 * selective, but for now this will be sufficient. 302 */ 303 switch (code) { 304 case AC_PATH_REGISTERED: { 305 struct ctl_frontend *fe; 306 struct ctlfe_softc *bus_softc; 307 struct ctlfe_lun_softc *lun_softc; 308 struct cam_path *path; 309 struct ccb_pathinq *cpi; 310 cam_status status; 311 int retval; 312 313 cpi = (struct ccb_pathinq *)arg; 314 315 /* Don't attach if it doesn't support target mode */ 316 if ((cpi->target_sprt & PIT_PROCESSOR) == 0) { 317 #ifdef CTLFEDEBUG 318 printf("%s: SIM %s%d doesn't support target mode\n", 319 __func__, cpi->dev_name, cpi->unit_number); 320 #endif 321 break; 322 } 323 324 #ifdef CTLFE_INIT_ENABLE 325 if (ctlfe_num_targets >= ctlfe_max_targets) { 326 union ccb *ccb; 327 struct cam_sim *sim; 328 329 ccb = (union ccb *)malloc(sizeof(*ccb), M_TEMP, 330 M_NOWAIT | M_ZERO); 331 if (ccb == NULL) { 332 printf("%s: unable to malloc CCB!\n", __func__); 333 xpt_free_path(path); 334 return; 335 } 336 xpt_setup_ccb(&ccb->ccb_h, cpi->ccb_h.path, 337 /*priority*/ 1); 338 339 sim = xpt_path_sim(cpi->ccb_h.path); 340 341 ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; 342 ccb->knob.xport_specific.valid = KNOB_VALID_ROLE; 343 ccb->knob.xport_specific.fc.role = KNOB_ROLE_INITIATOR; 344 345 /* We should hold the SIM lock here */ 346 mtx_assert(sim->mtx, MA_OWNED); 347 348 xpt_action(ccb); 349 350 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != 351 CAM_REQ_CMP) { 352 printf("%s: SIM %s%d (path id %d) initiator " 353 "enable failed with status %#x\n", 354 __func__, cpi->dev_name, 355 cpi->unit_number, cpi->ccb_h.path_id, 356 ccb->ccb_h.status); 357 } else { 358 printf("%s: SIM %s%d (path id %d) initiator " 359 "enable succeeded\n", 360 __func__, cpi->dev_name, 361 cpi->unit_number, cpi->ccb_h.path_id); 362 } 363 364 free(ccb, M_TEMP); 365 366 break; 367 } else { 368 ctlfe_num_targets++; 369 } 370 371 printf("%s: ctlfe_num_targets = %d\n", __func__, 372 ctlfe_num_targets); 373 #endif /* CTLFE_INIT_ENABLE */ 374 375 /* 376 * We're in an interrupt context here, so we have to 377 * use M_NOWAIT. Of course this means trouble if we 378 * can't allocate memory. 379 */ 380 bus_softc = malloc(sizeof(*bus_softc), M_CTLFE, 381 M_NOWAIT | M_ZERO); 382 if (bus_softc == NULL) { 383 printf("%s: unable to malloc %zd bytes for softc\n", 384 __func__, sizeof(*bus_softc)); 385 return; 386 } 387 388 bus_softc->path_id = cpi->ccb_h.path_id; 389 bus_softc->sim = xpt_path_sim(cpi->ccb_h.path); 390 STAILQ_INIT(&bus_softc->lun_softc_list); 391 392 fe = &bus_softc->fe; 393 394 /* 395 * XXX KDM should we be more accurate here ? 396 */ 397 if (cpi->transport == XPORT_FC) 398 fe->port_type = CTL_PORT_FC; 399 else 400 fe->port_type = CTL_PORT_SCSI; 401 402 /* XXX KDM what should the real number be here? */ 403 fe->num_requested_ctl_io = 4096; 404 snprintf(bus_softc->port_name, sizeof(bus_softc->port_name), 405 "%s%d", cpi->dev_name, cpi->unit_number); 406 /* 407 * XXX KDM it would be nice to allocate storage in the 408 * frontend structure itself. 409 */ 410 fe->port_name = bus_softc->port_name; 411 fe->physical_port = cpi->unit_number; 412 fe->virtual_port = cpi->bus_id; 413 fe->port_online = ctlfe_online; 414 fe->port_offline = ctlfe_offline; 415 fe->onoff_arg = bus_softc; 416 fe->targ_enable = ctlfe_targ_enable; 417 fe->targ_disable = ctlfe_targ_disable; 418 fe->lun_enable = ctlfe_lun_enable; 419 fe->lun_disable = ctlfe_lun_disable; 420 fe->targ_lun_arg = bus_softc; 421 fe->fe_datamove = ctlfe_datamove_done; 422 fe->fe_done = ctlfe_datamove_done; 423 fe->fe_dump = ctlfe_dump; 424 /* 425 * XXX KDM the path inquiry doesn't give us the maximum 426 * number of targets supported. 427 */ 428 fe->max_targets = cpi->max_target; 429 fe->max_target_id = cpi->max_target; 430 431 /* 432 * XXX KDM need to figure out whether we're the master or 433 * slave. 434 */ 435 #ifdef CTLFEDEBUG 436 printf("%s: calling ctl_frontend_register() for %s%d\n", 437 __func__, cpi->dev_name, cpi->unit_number); 438 #endif 439 retval = ctl_frontend_register(fe, /*master_SC*/ 1); 440 if (retval != 0) { 441 printf("%s: ctl_frontend_register() failed with " 442 "error %d!\n", __func__, retval); 443 free(bus_softc, M_CTLFE); 444 break; 445 } else { 446 mtx_lock(&ctlfe_list_mtx); 447 STAILQ_INSERT_TAIL(&ctlfe_softc_list, bus_softc, links); 448 mtx_unlock(&ctlfe_list_mtx); 449 } 450 451 status = xpt_create_path(&path, /*periph*/ NULL, 452 bus_softc->path_id,CAM_TARGET_WILDCARD, 453 CAM_LUN_WILDCARD); 454 if (status != CAM_REQ_CMP) { 455 printf("%s: unable to create path for wildcard " 456 "periph\n", __func__); 457 break; 458 } 459 lun_softc = malloc(sizeof(*lun_softc), M_CTLFE, 460 M_NOWAIT | M_ZERO); 461 if (lun_softc == NULL) { 462 xpt_print(path, "%s: unable to allocate softc for " 463 "wildcard periph\n", __func__); 464 xpt_free_path(path); 465 break; 466 } 467 468 lun_softc->parent_softc = bus_softc; 469 lun_softc->flags |= CTLFE_LUN_WILDCARD; 470 471 status = cam_periph_alloc(ctlferegister, 472 ctlfeoninvalidate, 473 ctlfecleanup, 474 ctlfestart, 475 "ctl", 476 CAM_PERIPH_BIO, 477 path, 478 ctlfeasync, 479 0, 480 lun_softc); 481 482 xpt_free_path(path); 483 484 break; 485 } 486 case AC_PATH_DEREGISTERED: 487 /* ctl_frontend_deregister() */ 488 break; 489 case AC_CONTRACT: { 490 struct ac_contract *ac; 491 492 ac = (struct ac_contract *)arg; 493 494 switch (ac->contract_number) { 495 case AC_CONTRACT_DEV_CHG: { 496 struct ac_device_changed *dev_chg; 497 struct ctlfe_softc *softc; 498 int retval, found; 499 500 dev_chg = (struct ac_device_changed *)ac->contract_data; 501 502 printf("%s: WWPN %#jx port 0x%06x path %u target %u %s\n", 503 __func__, dev_chg->wwpn, dev_chg->port, 504 xpt_path_path_id(path), dev_chg->target, 505 (dev_chg->arrived == 0) ? "left" : "arrived"); 506 507 found = 0; 508 509 mtx_lock(&ctlfe_list_mtx); 510 STAILQ_FOREACH(softc, &ctlfe_softc_list, links) { 511 if (softc->path_id == xpt_path_path_id(path)) { 512 found = 1; 513 break; 514 } 515 } 516 mtx_unlock(&ctlfe_list_mtx); 517 518 if (found == 0) { 519 printf("%s: CTL port for CAM path %u not " 520 "found!\n", __func__, 521 xpt_path_path_id(path)); 522 break; 523 } 524 if (dev_chg->arrived != 0) { 525 retval = ctl_add_initiator(dev_chg->wwpn, 526 softc->fe.targ_port, dev_chg->target); 527 } else { 528 retval = ctl_remove_initiator( 529 softc->fe.targ_port, dev_chg->target); 530 } 531 532 if (retval != 0) { 533 printf("%s: could not %s port %d iid %u " 534 "WWPN %#jx!\n", __func__, 535 (dev_chg->arrived != 0) ? "add" : 536 "remove", softc->fe.targ_port, 537 dev_chg->target, 538 (uintmax_t)dev_chg->wwpn); 539 } 540 break; 541 } 542 default: 543 printf("%s: unsupported contract number %ju\n", 544 __func__, (uintmax_t)ac->contract_number); 545 break; 546 } 547 break; 548 } 549 default: 550 break; 551 } 552 } 553 554 static cam_status 555 ctlferegister(struct cam_periph *periph, void *arg) 556 { 557 struct ctlfe_softc *bus_softc; 558 struct ctlfe_lun_softc *softc; 559 struct cam_sim *sim; 560 union ccb en_lun_ccb; 561 cam_status status; 562 int i; 563 564 softc = (struct ctlfe_lun_softc *)arg; 565 bus_softc = softc->parent_softc; 566 sim = xpt_path_sim(periph->path); 567 568 TAILQ_INIT(&softc->work_queue); 569 softc->periph = periph; 570 571 callout_init_mtx(&softc->dma_callout, sim->mtx, /*flags*/ 0); 572 periph->softc = softc; 573 574 xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, /*priority*/ 1); 575 en_lun_ccb.ccb_h.func_code = XPT_EN_LUN; 576 en_lun_ccb.cel.grp6_len = 0; 577 en_lun_ccb.cel.grp7_len = 0; 578 en_lun_ccb.cel.enable = 1; 579 xpt_action(&en_lun_ccb); 580 status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK); 581 if (status != CAM_REQ_CMP) { 582 xpt_print(periph->path, "%s: Enable LUN failed, status 0x%x\n", 583 __func__, en_lun_ccb.ccb_h.status); 584 return (status); 585 } 586 587 status = CAM_REQ_CMP; 588 589 for (i = 0; i < CTLFE_ATIO_PER_LUN; i++) { 590 union ccb *new_ccb; 591 592 new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, 593 M_ZERO|M_NOWAIT); 594 if (new_ccb == NULL) { 595 status = CAM_RESRC_UNAVAIL; 596 break; 597 } 598 xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1); 599 new_ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO; 600 new_ccb->ccb_h.cbfcnp = ctlfedone; 601 xpt_action(new_ccb); 602 softc->atios_sent++; 603 status = new_ccb->ccb_h.status; 604 if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 605 free(new_ccb, M_CTLFE); 606 break; 607 } 608 } 609 610 status = cam_periph_acquire(periph); 611 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 612 xpt_print(periph->path, "%s: could not acquire reference " 613 "count, status = %#x\n", __func__, status); 614 return (status); 615 } 616 617 if (i == 0) { 618 xpt_print(periph->path, "%s: could not allocate ATIO CCBs, " 619 "status 0x%x\n", __func__, status); 620 return (CAM_REQ_CMP_ERR); 621 } 622 623 for (i = 0; i < CTLFE_IN_PER_LUN; i++) { 624 union ccb *new_ccb; 625 626 new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, 627 M_ZERO|M_NOWAIT); 628 if (new_ccb == NULL) { 629 status = CAM_RESRC_UNAVAIL; 630 break; 631 } 632 633 xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1); 634 new_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; 635 new_ccb->ccb_h.cbfcnp = ctlfedone; 636 xpt_action(new_ccb); 637 softc->inots_sent++; 638 status = new_ccb->ccb_h.status; 639 if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 640 /* 641 * Note that we don't free the CCB here. If the 642 * status is not CAM_REQ_INPROG, then we're 643 * probably talking to a SIM that says it is 644 * target-capable but doesn't support the 645 * XPT_IMMEDIATE_NOTIFY CCB. i.e. it supports the 646 * older API. In that case, it'll call xpt_done() 647 * on the CCB, and we need to free it in our done 648 * routine as a result. 649 */ 650 break; 651 } 652 } 653 if ((i == 0) 654 || (status != CAM_REQ_INPROG)) { 655 xpt_print(periph->path, "%s: could not allocate immediate " 656 "notify CCBs, status 0x%x\n", __func__, status); 657 return (CAM_REQ_CMP_ERR); 658 } 659 return (CAM_REQ_CMP); 660 } 661 662 static void 663 ctlfeoninvalidate(struct cam_periph *periph) 664 { 665 union ccb en_lun_ccb; 666 cam_status status; 667 struct ctlfe_lun_softc *softc; 668 669 softc = (struct ctlfe_lun_softc *)periph->softc; 670 671 xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, /*priority*/ 1); 672 en_lun_ccb.ccb_h.func_code = XPT_EN_LUN; 673 en_lun_ccb.cel.grp6_len = 0; 674 en_lun_ccb.cel.grp7_len = 0; 675 en_lun_ccb.cel.enable = 0; 676 xpt_action(&en_lun_ccb); 677 status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK); 678 if (status != CAM_REQ_CMP) { 679 xpt_print(periph->path, "%s: Disable LUN failed, status 0x%x\n", 680 __func__, en_lun_ccb.ccb_h.status); 681 /* 682 * XXX KDM what do we do now? 683 */ 684 } 685 xpt_print(periph->path, "LUN removed, %ju ATIOs outstanding, %ju " 686 "INOTs outstanding, %d refs\n", softc->atios_sent - 687 softc->atios_returned, softc->inots_sent - 688 softc->inots_returned, periph->refcount); 689 } 690 691 static void 692 ctlfecleanup(struct cam_periph *periph) 693 { 694 struct ctlfe_lun_softc *softc; 695 struct ctlfe_softc *bus_softc; 696 697 xpt_print(periph->path, "%s: Called\n", __func__); 698 699 softc = (struct ctlfe_lun_softc *)periph->softc; 700 bus_softc = softc->parent_softc; 701 702 STAILQ_REMOVE(&bus_softc->lun_softc_list, softc, ctlfe_lun_softc,links); 703 704 /* 705 * XXX KDM is there anything else that needs to be done here? 706 */ 707 free(softc, M_CTLFE); 708 } 709 710 static void 711 ctlfestart(struct cam_periph *periph, union ccb *start_ccb) 712 { 713 struct ctlfe_lun_softc *softc; 714 struct ccb_hdr *ccb_h; 715 716 softc = (struct ctlfe_lun_softc *)periph->softc; 717 718 softc->ccbs_alloced++; 719 720 ccb_h = TAILQ_FIRST(&softc->work_queue); 721 if (periph->immediate_priority <= periph->pinfo.priority) { 722 panic("shouldn't get to the CCB waiting case!"); 723 start_ccb->ccb_h.ccb_type = CTLFE_CCB_WAITING; 724 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, 725 periph_links.sle); 726 periph->immediate_priority = CAM_PRIORITY_NONE; 727 wakeup(&periph->ccb_list); 728 } else if (ccb_h == NULL) { 729 softc->ccbs_freed++; 730 xpt_release_ccb(start_ccb); 731 } else { 732 struct ccb_accept_tio *atio; 733 struct ccb_scsiio *csio; 734 uint8_t *data_ptr; 735 uint32_t dxfer_len; 736 ccb_flags flags; 737 union ctl_io *io; 738 uint8_t scsi_status; 739 740 /* Take the ATIO off the work queue */ 741 TAILQ_REMOVE(&softc->work_queue, ccb_h, periph_links.tqe); 742 atio = (struct ccb_accept_tio *)ccb_h; 743 io = (union ctl_io *)ccb_h->io_ptr; 744 csio = &start_ccb->csio; 745 746 flags = atio->ccb_h.flags & 747 (CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK); 748 749 if ((io == NULL) 750 || (io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) { 751 /* 752 * We're done, send status back. 753 */ 754 flags |= CAM_SEND_STATUS; 755 if (io == NULL) { 756 scsi_status = SCSI_STATUS_BUSY; 757 csio->sense_len = 0; 758 } else if ((io->io_hdr.status & CTL_STATUS_MASK) == 759 CTL_CMD_ABORTED) { 760 io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED; 761 762 /* 763 * If this command was aborted, we don't 764 * need to send status back to the SIM. 765 * Just free the CTIO and ctl_io, and 766 * recycle the ATIO back to the SIM. 767 */ 768 xpt_print(periph->path, "%s: aborted " 769 "command 0x%04x discarded\n", 770 __func__, io->scsiio.tag_num); 771 ctl_free_io(io); 772 /* 773 * For a wildcard attachment, commands can 774 * come in with a specific target/lun. Reset 775 * the target and LUN fields back to the 776 * wildcard values before we send them back 777 * down to the SIM. The SIM has a wildcard 778 * LUN enabled, not whatever target/lun 779 * these happened to be. 780 */ 781 if (softc->flags & CTLFE_LUN_WILDCARD) { 782 atio->ccb_h.target_id = 783 CAM_TARGET_WILDCARD; 784 atio->ccb_h.target_lun = 785 CAM_LUN_WILDCARD; 786 } 787 788 if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) { 789 cam_release_devq(periph->path, 790 /*relsim_flags*/0, 791 /*reduction*/0, 792 /*timeout*/0, 793 /*getcount_only*/0); 794 atio->ccb_h.status &= ~CAM_DEV_QFRZN; 795 } 796 797 ccb_h = TAILQ_FIRST(&softc->work_queue); 798 799 if (atio->ccb_h.func_code != 800 XPT_ACCEPT_TARGET_IO) { 801 xpt_print(periph->path, "%s: func_code " 802 "is %#x\n", __func__, 803 atio->ccb_h.func_code); 804 } 805 start_ccb->ccb_h.func_code = XPT_ABORT; 806 start_ccb->cab.abort_ccb = (union ccb *)atio; 807 start_ccb->ccb_h.cbfcnp = ctlfedone; 808 809 /* Tell the SIM that we've aborted this ATIO */ 810 xpt_action(start_ccb); 811 softc->ccbs_freed++; 812 xpt_release_ccb(start_ccb); 813 814 /* 815 * Send the ATIO back down to the SIM. 816 */ 817 xpt_action((union ccb *)atio); 818 softc->atios_sent++; 819 820 /* 821 * If we still have work to do, ask for 822 * another CCB. Otherwise, deactivate our 823 * callout. 824 */ 825 if (ccb_h != NULL) 826 xpt_schedule(periph, /*priority*/ 1); 827 else 828 callout_stop(&softc->dma_callout); 829 830 return; 831 } else { 832 io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED; 833 scsi_status = io->scsiio.scsi_status; 834 csio->sense_len = io->scsiio.sense_len; 835 } 836 data_ptr = NULL; 837 dxfer_len = 0; 838 if (io == NULL) { 839 printf("%s: tag %04x io is NULL\n", __func__, 840 atio->tag_id); 841 } else { 842 #ifdef CTLFEDEBUG 843 printf("%s: tag %04x status %x\n", __func__, 844 atio->tag_id, io->io_hdr.status); 845 #endif 846 } 847 csio->sglist_cnt = 0; 848 if (csio->sense_len != 0) { 849 csio->sense_data = io->scsiio.sense_data; 850 flags |= CAM_SEND_SENSE; 851 } else if (scsi_status == SCSI_STATUS_CHECK_COND) { 852 xpt_print(periph->path, "%s: check condition " 853 "with no sense\n", __func__); 854 } 855 } else { 856 struct ctlfe_lun_cmd_info *cmd_info; 857 858 /* 859 * Datamove call, we need to setup the S/G list. 860 * If we pass in a S/G list, the isp(4) driver at 861 * least expects physical/bus addresses. 862 */ 863 864 cmd_info = (struct ctlfe_lun_cmd_info *) 865 io->io_hdr.port_priv; 866 867 KASSERT(sizeof(*cmd_info) < CTL_PORT_PRIV_SIZE, 868 ("%s: sizeof(struct ctlfe_lun_cmd_info) %zd < " 869 "CTL_PORT_PRIV_SIZE %d", __func__, 870 sizeof(*cmd_info), CTL_PORT_PRIV_SIZE)); 871 io->io_hdr.flags &= ~CTL_FLAG_DMA_QUEUED; 872 873 /* 874 * Need to zero this, in case it has been used for 875 * a previous datamove for this particular I/O. 876 */ 877 bzero(cmd_info, sizeof(*cmd_info)); 878 scsi_status = 0; 879 880 /* 881 * Set the direction, relative to the initiator. 882 */ 883 flags &= ~CAM_DIR_MASK; 884 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 885 CTL_FLAG_DATA_IN) 886 flags |= CAM_DIR_IN; 887 else 888 flags |= CAM_DIR_OUT; 889 890 csio->cdb_len = atio->cdb_len; 891 892 if (io->scsiio.kern_sg_entries == 0) { 893 /* No S/G list */ 894 data_ptr = io->scsiio.kern_data_ptr; 895 dxfer_len = io->scsiio.kern_data_len; 896 csio->sglist_cnt = 0; 897 898 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) 899 flags |= CAM_DATA_PHYS; 900 } else if (io->scsiio.kern_sg_entries <= 901 (sizeof(cmd_info->cam_sglist)/ 902 sizeof(cmd_info->cam_sglist[0]))) { 903 /* 904 * S/G list with physical or virtual pointers. 905 * Just populate the CAM S/G list with the 906 * pointers. 907 */ 908 int i; 909 struct ctl_sg_entry *ctl_sglist; 910 bus_dma_segment_t *cam_sglist; 911 912 ctl_sglist = (struct ctl_sg_entry *) 913 io->scsiio.kern_data_ptr; 914 cam_sglist = cmd_info->cam_sglist; 915 916 for (i = 0; i < io->scsiio.kern_sg_entries;i++){ 917 cam_sglist[i].ds_addr = 918 (bus_addr_t)ctl_sglist[i].addr; 919 cam_sglist[i].ds_len = 920 ctl_sglist[i].len; 921 } 922 csio->sglist_cnt = io->scsiio.kern_sg_entries; 923 flags |= CAM_SCATTER_VALID; 924 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) 925 flags |= CAM_SG_LIST_PHYS; 926 else 927 flags &= ~CAM_SG_LIST_PHYS; 928 data_ptr = (uint8_t *)cam_sglist; 929 dxfer_len = io->scsiio.kern_data_len; 930 } else { 931 /* S/G list with virtual pointers */ 932 struct ctl_sg_entry *sglist; 933 int *ti; 934 935 /* 936 * XXX KDM this is a temporary hack. The 937 * isp(4) driver can't deal with S/G lists 938 * with virtual pointers, so we need to 939 * go through and send down one virtual 940 * pointer at a time. 941 */ 942 sglist = (struct ctl_sg_entry *) 943 io->scsiio.kern_data_ptr; 944 ti = &cmd_info->cur_transfer_index; 945 data_ptr = sglist[*ti].addr; 946 dxfer_len = sglist[*ti].len; 947 csio->sglist_cnt = 0; 948 cmd_info->flags |= CTLFE_CMD_PIECEWISE; 949 (*ti)++; 950 } 951 952 io->scsiio.ext_data_filled += dxfer_len; 953 954 if (io->scsiio.ext_data_filled > 955 io->scsiio.kern_total_len) { 956 xpt_print(periph->path, "%s: tag 0x%04x " 957 "fill len %u > total %u\n", 958 __func__, io->scsiio.tag_num, 959 io->scsiio.ext_data_filled, 960 io->scsiio.kern_total_len); 961 } 962 } 963 964 #ifdef CTLFEDEBUG 965 printf("%s: %s: tag %04x flags %x ptr %p len %u\n", __func__, 966 (flags & CAM_SEND_STATUS) ? "done" : "datamove", 967 atio->tag_id, flags, data_ptr, dxfer_len); 968 #endif 969 970 /* 971 * Valid combinations: 972 * - CAM_SEND_STATUS, SCATTER_VALID = 0, dxfer_len = 0, 973 * sglist_cnt = 0 974 * - CAM_SEND_STATUS = 0, SCATTER_VALID = 0, dxfer_len != 0, 975 * sglist_cnt = 0 976 * - CAM_SEND_STATUS = 0, SCATTER_VALID, dxfer_len != 0, 977 * sglist_cnt != 0 978 */ 979 #ifdef CTLFEDEBUG 980 if (((flags & CAM_SEND_STATUS) 981 && (((flags & CAM_SCATTER_VALID) != 0) 982 || (dxfer_len != 0) 983 || (csio->sglist_cnt != 0))) 984 || (((flags & CAM_SEND_STATUS) == 0) 985 && (dxfer_len == 0)) 986 || ((flags & CAM_SCATTER_VALID) 987 && (csio->sglist_cnt == 0)) 988 || (((flags & CAM_SCATTER_VALID) == 0) 989 && (csio->sglist_cnt != 0))) { 990 printf("%s: tag %04x cdb %02x flags %#x dxfer_len " 991 "%d sg %u\n", __func__, atio->tag_id, 992 atio->cdb_io.cdb_bytes[0], flags, dxfer_len, 993 csio->sglist_cnt); 994 if (io != NULL) { 995 printf("%s: tag %04x io status %#x\n", __func__, 996 atio->tag_id, io->io_hdr.status); 997 } else { 998 printf("%s: tag %04x no associated io\n", 999 __func__, atio->tag_id); 1000 } 1001 } 1002 #endif 1003 cam_fill_ctio(csio, 1004 /*retries*/ 2, 1005 ctlfedone, 1006 flags, 1007 (flags & CAM_TAG_ACTION_VALID) ? 1008 MSG_SIMPLE_Q_TAG : 0, 1009 atio->tag_id, 1010 atio->init_id, 1011 scsi_status, 1012 /*data_ptr*/ data_ptr, 1013 /*dxfer_len*/ dxfer_len, 1014 /*timeout*/ 5 * 1000); 1015 start_ccb->ccb_h.ccb_atio = atio; 1016 if (((flags & CAM_SEND_STATUS) == 0) 1017 && (io != NULL)) 1018 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 1019 1020 softc->ctios_sent++; 1021 1022 xpt_action(start_ccb); 1023 1024 if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) { 1025 cam_release_devq(periph->path, 1026 /*relsim_flags*/0, 1027 /*reduction*/0, 1028 /*timeout*/0, 1029 /*getcount_only*/0); 1030 atio->ccb_h.status &= ~CAM_DEV_QFRZN; 1031 } 1032 1033 ccb_h = TAILQ_FIRST(&softc->work_queue); 1034 } 1035 /* 1036 * If we still have work to do, ask for another CCB. Otherwise, 1037 * deactivate our callout. 1038 */ 1039 if (ccb_h != NULL) 1040 xpt_schedule(periph, /*priority*/ 1); 1041 else 1042 callout_stop(&softc->dma_callout); 1043 } 1044 1045 static void 1046 ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb) 1047 { 1048 struct ctlfe_lun_softc *softc; 1049 1050 softc = (struct ctlfe_lun_softc *)periph->softc; 1051 1052 switch (ccb->ccb_h.func_code) { 1053 case XPT_ACCEPT_TARGET_IO: 1054 softc->atios_returned++; 1055 break; 1056 case XPT_IMMEDIATE_NOTIFY: 1057 case XPT_NOTIFY_ACKNOWLEDGE: 1058 softc->inots_returned++; 1059 break; 1060 default: 1061 break; 1062 } 1063 1064 free(ccb, M_CTLFE); 1065 1066 KASSERT(softc->atios_returned <= softc->atios_sent, ("%s: " 1067 "atios_returned %ju > atios_sent %ju", __func__, 1068 softc->atios_returned, softc->atios_sent)); 1069 KASSERT(softc->inots_returned <= softc->inots_sent, ("%s: " 1070 "inots_returned %ju > inots_sent %ju", __func__, 1071 softc->inots_returned, softc->inots_sent)); 1072 1073 /* 1074 * If we have received all of our CCBs, we can release our 1075 * reference on the peripheral driver. It will probably go away 1076 * now. 1077 */ 1078 if ((softc->atios_returned == softc->atios_sent) 1079 && (softc->inots_returned == softc->inots_sent)) { 1080 cam_periph_release_locked(periph); 1081 } 1082 } 1083 1084 static int 1085 ctlfe_adjust_cdb(struct ccb_accept_tio *atio, uint32_t offset) 1086 { 1087 uint64_t lba; 1088 uint32_t num_blocks, nbc; 1089 uint8_t *cmdbyt = (atio->ccb_h.flags & CAM_CDB_POINTER)? 1090 atio->cdb_io.cdb_ptr : atio->cdb_io.cdb_bytes; 1091 1092 nbc = offset >> 9; /* ASSUMING 512 BYTE BLOCKS */ 1093 1094 switch (cmdbyt[0]) { 1095 case READ_6: 1096 case WRITE_6: 1097 { 1098 struct scsi_rw_6 *cdb = (struct scsi_rw_6 *)cmdbyt; 1099 lba = scsi_3btoul(cdb->addr); 1100 lba &= 0x1fffff; 1101 num_blocks = cdb->length; 1102 if (num_blocks == 0) 1103 num_blocks = 256; 1104 lba += nbc; 1105 num_blocks -= nbc; 1106 scsi_ulto3b(lba, cdb->addr); 1107 cdb->length = num_blocks; 1108 break; 1109 } 1110 case READ_10: 1111 case WRITE_10: 1112 { 1113 struct scsi_rw_10 *cdb = (struct scsi_rw_10 *)cmdbyt; 1114 lba = scsi_4btoul(cdb->addr); 1115 num_blocks = scsi_2btoul(cdb->length); 1116 lba += nbc; 1117 num_blocks -= nbc; 1118 scsi_ulto4b(lba, cdb->addr); 1119 scsi_ulto2b(num_blocks, cdb->length); 1120 break; 1121 } 1122 case READ_12: 1123 case WRITE_12: 1124 { 1125 struct scsi_rw_12 *cdb = (struct scsi_rw_12 *)cmdbyt; 1126 lba = scsi_4btoul(cdb->addr); 1127 num_blocks = scsi_4btoul(cdb->length); 1128 lba += nbc; 1129 num_blocks -= nbc; 1130 scsi_ulto4b(lba, cdb->addr); 1131 scsi_ulto4b(num_blocks, cdb->length); 1132 break; 1133 } 1134 case READ_16: 1135 case WRITE_16: 1136 { 1137 struct scsi_rw_16 *cdb = (struct scsi_rw_16 *)cmdbyt; 1138 lba = scsi_8btou64(cdb->addr); 1139 num_blocks = scsi_4btoul(cdb->length); 1140 lba += nbc; 1141 num_blocks -= nbc; 1142 scsi_u64to8b(lba, cdb->addr); 1143 scsi_ulto4b(num_blocks, cdb->length); 1144 break; 1145 } 1146 default: 1147 return -1; 1148 } 1149 return (0); 1150 } 1151 1152 static void 1153 ctlfedone(struct cam_periph *periph, union ccb *done_ccb) 1154 { 1155 struct ctlfe_lun_softc *softc; 1156 struct ctlfe_softc *bus_softc; 1157 struct ccb_accept_tio *atio = NULL; 1158 union ctl_io *io = NULL; 1159 1160 #ifdef CTLFE_DEBUG 1161 printf("%s: entered, func_code = %#x, type = %#lx\n", __func__, 1162 done_ccb->ccb_h.func_code, done_ccb->ccb_h.ccb_type); 1163 #endif 1164 1165 softc = (struct ctlfe_lun_softc *)periph->softc; 1166 bus_softc = softc->parent_softc; 1167 1168 if (done_ccb->ccb_h.ccb_type == CTLFE_CCB_WAITING) { 1169 panic("shouldn't get to the CCB waiting case!"); 1170 wakeup(&done_ccb->ccb_h.cbfcnp); 1171 return; 1172 } 1173 1174 /* 1175 * If the peripheral is invalid, ATIOs and immediate notify CCBs 1176 * need to be freed. Most of the ATIOs and INOTs that come back 1177 * will be CCBs that are being returned from the SIM as a result of 1178 * our disabling the LUN. 1179 * 1180 * Other CCB types are handled in their respective cases below. 1181 */ 1182 if (periph->flags & CAM_PERIPH_INVALID) { 1183 switch (done_ccb->ccb_h.func_code) { 1184 case XPT_ACCEPT_TARGET_IO: 1185 case XPT_IMMEDIATE_NOTIFY: 1186 case XPT_NOTIFY_ACKNOWLEDGE: 1187 ctlfe_free_ccb(periph, done_ccb); 1188 return; 1189 default: 1190 break; 1191 } 1192 1193 } 1194 switch (done_ccb->ccb_h.func_code) { 1195 case XPT_ACCEPT_TARGET_IO: { 1196 1197 atio = &done_ccb->atio; 1198 1199 softc->atios_returned++; 1200 1201 resubmit: 1202 /* 1203 * Allocate a ctl_io, pass it to CTL, and wait for the 1204 * datamove or done. 1205 */ 1206 io = ctl_alloc_io(bus_softc->fe.ctl_pool_ref); 1207 if (io == NULL) { 1208 atio->ccb_h.flags &= ~CAM_DIR_MASK; 1209 atio->ccb_h.flags |= CAM_DIR_NONE; 1210 1211 printf("%s: ctl_alloc_io failed!\n", __func__); 1212 1213 /* 1214 * XXX KDM need to set SCSI_STATUS_BUSY, but there 1215 * is no field in the ATIO structure to do that, 1216 * and we aren't able to allocate a ctl_io here. 1217 * What to do? 1218 */ 1219 atio->sense_len = 0; 1220 done_ccb->ccb_h.io_ptr = NULL; 1221 TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h, 1222 periph_links.tqe); 1223 xpt_schedule(periph, /*priority*/ 1); 1224 break; 1225 } 1226 ctl_zero_io(io); 1227 1228 /* Save pointers on both sides */ 1229 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = done_ccb; 1230 done_ccb->ccb_h.io_ptr = io; 1231 1232 /* 1233 * Only SCSI I/O comes down this path, resets, etc. come 1234 * down the immediate notify path below. 1235 */ 1236 io->io_hdr.io_type = CTL_IO_SCSI; 1237 io->io_hdr.nexus.initid.id = atio->init_id; 1238 io->io_hdr.nexus.targ_port = bus_softc->fe.targ_port; 1239 io->io_hdr.nexus.targ_target.id = atio->ccb_h.target_id; 1240 io->io_hdr.nexus.targ_lun = atio->ccb_h.target_lun; 1241 io->scsiio.tag_num = atio->tag_id; 1242 switch (atio->tag_action) { 1243 case CAM_TAG_ACTION_NONE: 1244 io->scsiio.tag_type = CTL_TAG_UNTAGGED; 1245 break; 1246 case MSG_SIMPLE_TASK: 1247 io->scsiio.tag_type = CTL_TAG_SIMPLE; 1248 break; 1249 case MSG_HEAD_OF_QUEUE_TASK: 1250 io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE; 1251 break; 1252 case MSG_ORDERED_TASK: 1253 io->scsiio.tag_type = CTL_TAG_ORDERED; 1254 break; 1255 case MSG_ACA_TASK: 1256 io->scsiio.tag_type = CTL_TAG_ACA; 1257 break; 1258 default: 1259 io->scsiio.tag_type = CTL_TAG_UNTAGGED; 1260 printf("%s: unhandled tag type %#x!!\n", __func__, 1261 atio->tag_action); 1262 break; 1263 } 1264 if (atio->cdb_len > sizeof(io->scsiio.cdb)) { 1265 printf("%s: WARNING: CDB len %d > ctl_io space %zd\n", 1266 __func__, atio->cdb_len, sizeof(io->scsiio.cdb)); 1267 } 1268 io->scsiio.cdb_len = min(atio->cdb_len, sizeof(io->scsiio.cdb)); 1269 bcopy(atio->cdb_io.cdb_bytes, io->scsiio.cdb, 1270 io->scsiio.cdb_len); 1271 1272 #ifdef CTLFEDEBUG 1273 printf("%s: %ju:%d:%ju:%d: tag %04x CDB %02x\n", __func__, 1274 (uintmax_t)io->io_hdr.nexus.initid.id, 1275 io->io_hdr.nexus.targ_port, 1276 (uintmax_t)io->io_hdr.nexus.targ_target.id, 1277 io->io_hdr.nexus.targ_lun, 1278 io->scsiio.tag_num, io->scsiio.cdb[0]); 1279 #endif 1280 1281 ctl_queue(io); 1282 break; 1283 } 1284 case XPT_CONT_TARGET_IO: { 1285 int srr = 0; 1286 uint32_t srr_off = 0; 1287 1288 atio = (struct ccb_accept_tio *)done_ccb->ccb_h.ccb_atio; 1289 io = (union ctl_io *)atio->ccb_h.io_ptr; 1290 1291 softc->ctios_returned++; 1292 #ifdef CTLFEDEBUG 1293 printf("%s: got XPT_CONT_TARGET_IO tag %#x flags %#x\n", 1294 __func__, atio->tag_id, done_ccb->ccb_h.flags); 1295 #endif 1296 /* 1297 * Handle SRR case were the data pointer is pushed back hack 1298 */ 1299 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_MESSAGE_RECV 1300 && done_ccb->csio.msg_ptr != NULL 1301 && done_ccb->csio.msg_ptr[0] == MSG_EXTENDED 1302 && done_ccb->csio.msg_ptr[1] == 5 1303 && done_ccb->csio.msg_ptr[2] == 0) { 1304 srr = 1; 1305 srr_off = 1306 (done_ccb->csio.msg_ptr[3] << 24) 1307 | (done_ccb->csio.msg_ptr[4] << 16) 1308 | (done_ccb->csio.msg_ptr[5] << 8) 1309 | (done_ccb->csio.msg_ptr[6]); 1310 } 1311 1312 if (srr && (done_ccb->ccb_h.flags & CAM_SEND_STATUS)) { 1313 /* 1314 * If status was being sent, the back end data is now 1315 * history. Hack it up and resubmit a new command with 1316 * the CDB adjusted. If the SIM does the right thing, 1317 * all of the resid math should work. 1318 */ 1319 softc->ccbs_freed++; 1320 xpt_release_ccb(done_ccb); 1321 ctl_free_io(io); 1322 if (ctlfe_adjust_cdb(atio, srr_off) == 0) { 1323 done_ccb = (union ccb *)atio; 1324 goto resubmit; 1325 } 1326 /* 1327 * Fall through to doom.... 1328 */ 1329 } else if (srr) { 1330 /* 1331 * If we have an srr and we're still sending data, we 1332 * should be able to adjust offsets and cycle again. 1333 */ 1334 io->scsiio.kern_rel_offset = 1335 io->scsiio.ext_data_filled = srr_off; 1336 io->scsiio.ext_data_len = io->scsiio.kern_total_len - 1337 io->scsiio.kern_rel_offset; 1338 softc->ccbs_freed++; 1339 io->scsiio.io_hdr.status = CTL_STATUS_NONE; 1340 xpt_release_ccb(done_ccb); 1341 TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h, 1342 periph_links.tqe); 1343 xpt_schedule(periph, /*priority*/ 1); 1344 return; 1345 } 1346 1347 /* 1348 * If we were sending status back to the initiator, free up 1349 * resources. If we were doing a datamove, call the 1350 * datamove done routine. 1351 */ 1352 if (done_ccb->ccb_h.flags & CAM_SEND_STATUS) { 1353 softc->ccbs_freed++; 1354 xpt_release_ccb(done_ccb); 1355 ctl_free_io(io); 1356 /* 1357 * For a wildcard attachment, commands can come in 1358 * with a specific target/lun. Reset the target 1359 * and LUN fields back to the wildcard values before 1360 * we send them back down to the SIM. The SIM has 1361 * a wildcard LUN enabled, not whatever target/lun 1362 * these happened to be. 1363 */ 1364 if (softc->flags & CTLFE_LUN_WILDCARD) { 1365 atio->ccb_h.target_id = CAM_TARGET_WILDCARD; 1366 atio->ccb_h.target_lun = CAM_LUN_WILDCARD; 1367 } 1368 if (periph->flags & CAM_PERIPH_INVALID) { 1369 ctlfe_free_ccb(periph, (union ccb *)atio); 1370 return; 1371 } else { 1372 xpt_action((union ccb *)atio); 1373 softc->atios_sent++; 1374 } 1375 } else { 1376 struct ctlfe_lun_cmd_info *cmd_info; 1377 struct ccb_scsiio *csio; 1378 1379 csio = &done_ccb->csio; 1380 cmd_info = (struct ctlfe_lun_cmd_info *) 1381 io->io_hdr.port_priv; 1382 1383 io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; 1384 1385 io->scsiio.ext_data_len += csio->dxfer_len; 1386 if (io->scsiio.ext_data_len > 1387 io->scsiio.kern_total_len) { 1388 xpt_print(periph->path, "%s: tag 0x%04x " 1389 "done len %u > total %u sent %u\n", 1390 __func__, io->scsiio.tag_num, 1391 io->scsiio.ext_data_len, 1392 io->scsiio.kern_total_len, 1393 io->scsiio.ext_data_filled); 1394 } 1395 /* 1396 * Translate CAM status to CTL status. Success 1397 * does not change the overall, ctl_io status. In 1398 * that case we just set port_status to 0. If we 1399 * have a failure, though, set a data phase error 1400 * for the overall ctl_io. 1401 */ 1402 switch (done_ccb->ccb_h.status & CAM_STATUS_MASK) { 1403 case CAM_REQ_CMP: 1404 io->io_hdr.port_status = 0; 1405 break; 1406 default: 1407 /* 1408 * XXX KDM the isp(4) driver doesn't really 1409 * seem to send errors back for data 1410 * transfers that I can tell. There is one 1411 * case where it'll send CAM_REQ_CMP_ERR, 1412 * but probably not that many more cases. 1413 * So set a generic data phase error here, 1414 * like the SXP driver sets. 1415 */ 1416 io->io_hdr.port_status = 0xbad1; 1417 ctl_set_data_phase_error(&io->scsiio); 1418 /* 1419 * XXX KDM figure out residual. 1420 */ 1421 break; 1422 } 1423 /* 1424 * If we had to break this S/G list into multiple 1425 * pieces, figure out where we are in the list, and 1426 * continue sending pieces if necessary. 1427 */ 1428 if ((cmd_info->flags & CTLFE_CMD_PIECEWISE) 1429 && (io->io_hdr.port_status == 0) 1430 && (cmd_info->cur_transfer_index < 1431 io->scsiio.kern_sg_entries)) { 1432 struct ctl_sg_entry *sglist; 1433 ccb_flags flags; 1434 uint8_t scsi_status; 1435 uint8_t *data_ptr; 1436 uint32_t dxfer_len; 1437 int *ti; 1438 1439 sglist = (struct ctl_sg_entry *) 1440 io->scsiio.kern_data_ptr; 1441 ti = &cmd_info->cur_transfer_index; 1442 flags = atio->ccb_h.flags & 1443 (CAM_DIS_DISCONNECT| 1444 CAM_TAG_ACTION_VALID| 1445 CAM_DIR_MASK); 1446 1447 /* 1448 * Set the direction, relative to the initiator. 1449 */ 1450 flags &= ~CAM_DIR_MASK; 1451 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 1452 CTL_FLAG_DATA_IN) 1453 flags |= CAM_DIR_IN; 1454 else 1455 flags |= CAM_DIR_OUT; 1456 1457 data_ptr = sglist[*ti].addr; 1458 dxfer_len = sglist[*ti].len; 1459 (*ti)++; 1460 1461 scsi_status = 0; 1462 1463 if (((flags & CAM_SEND_STATUS) == 0) 1464 && (dxfer_len == 0)) { 1465 printf("%s: tag %04x no status or " 1466 "len cdb = %02x\n", __func__, 1467 atio->tag_id, 1468 atio->cdb_io.cdb_bytes[0]); 1469 printf("%s: tag %04x io status %#x\n", 1470 __func__, atio->tag_id, 1471 io->io_hdr.status); 1472 } 1473 1474 cam_fill_ctio(csio, 1475 /*retries*/ 2, 1476 ctlfedone, 1477 flags, 1478 (flags & CAM_TAG_ACTION_VALID) ? 1479 MSG_SIMPLE_Q_TAG : 0, 1480 atio->tag_id, 1481 atio->init_id, 1482 scsi_status, 1483 /*data_ptr*/ data_ptr, 1484 /*dxfer_len*/ dxfer_len, 1485 /*timeout*/ 5 * 1000); 1486 1487 csio->resid = 0; 1488 csio->ccb_h.ccb_atio = atio; 1489 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 1490 softc->ctios_sent++; 1491 xpt_action((union ccb *)csio); 1492 } else { 1493 /* 1494 * Release the CTIO. The ATIO will be sent back 1495 * down to the SIM once we send status. 1496 */ 1497 softc->ccbs_freed++; 1498 xpt_release_ccb(done_ccb); 1499 1500 /* Call the backend move done callback */ 1501 io->scsiio.be_move_done(io); 1502 } 1503 } 1504 break; 1505 } 1506 case XPT_IMMEDIATE_NOTIFY: { 1507 union ctl_io *io; 1508 struct ccb_immediate_notify *inot; 1509 cam_status status; 1510 int frozen; 1511 1512 inot = &done_ccb->cin1; 1513 1514 softc->inots_returned++; 1515 1516 frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; 1517 1518 printf("%s: got XPT_IMMEDIATE_NOTIFY status %#x tag %#x " 1519 "seq %#x\n", __func__, inot->ccb_h.status, 1520 inot->tag_id, inot->seq_id); 1521 1522 io = ctl_alloc_io(bus_softc->fe.ctl_pool_ref); 1523 if (io != NULL) { 1524 int send_ctl_io; 1525 1526 send_ctl_io = 1; 1527 1528 ctl_zero_io(io); 1529 io->io_hdr.io_type = CTL_IO_TASK; 1530 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr =done_ccb; 1531 inot->ccb_h.io_ptr = io; 1532 io->io_hdr.nexus.initid.id = inot->initiator_id; 1533 io->io_hdr.nexus.targ_port = bus_softc->fe.targ_port; 1534 io->io_hdr.nexus.targ_target.id = inot->ccb_h.target_id; 1535 io->io_hdr.nexus.targ_lun = inot->ccb_h.target_lun; 1536 /* XXX KDM should this be the tag_id? */ 1537 io->taskio.tag_num = inot->seq_id; 1538 1539 status = inot->ccb_h.status & CAM_STATUS_MASK; 1540 switch (status) { 1541 case CAM_SCSI_BUS_RESET: 1542 io->taskio.task_action = CTL_TASK_BUS_RESET; 1543 break; 1544 case CAM_BDR_SENT: 1545 io->taskio.task_action = CTL_TASK_TARGET_RESET; 1546 break; 1547 case CAM_MESSAGE_RECV: 1548 switch (inot->arg) { 1549 case MSG_ABORT_TASK_SET: 1550 /* 1551 * XXX KDM this isn't currently 1552 * supported by CTL. It ends up 1553 * being a no-op. 1554 */ 1555 io->taskio.task_action = 1556 CTL_TASK_ABORT_TASK_SET; 1557 break; 1558 case MSG_TARGET_RESET: 1559 io->taskio.task_action = 1560 CTL_TASK_TARGET_RESET; 1561 break; 1562 case MSG_ABORT_TASK: 1563 io->taskio.task_action = 1564 CTL_TASK_ABORT_TASK; 1565 break; 1566 case MSG_LOGICAL_UNIT_RESET: 1567 io->taskio.task_action = 1568 CTL_TASK_LUN_RESET; 1569 break; 1570 case MSG_CLEAR_TASK_SET: 1571 /* 1572 * XXX KDM this isn't currently 1573 * supported by CTL. It ends up 1574 * being a no-op. 1575 */ 1576 io->taskio.task_action = 1577 CTL_TASK_CLEAR_TASK_SET; 1578 break; 1579 case MSG_CLEAR_ACA: 1580 io->taskio.task_action = 1581 CTL_TASK_CLEAR_ACA; 1582 break; 1583 case MSG_NOOP: 1584 send_ctl_io = 0; 1585 break; 1586 default: 1587 xpt_print(periph->path, "%s: " 1588 "unsupported message 0x%x\n", 1589 __func__, inot->arg); 1590 send_ctl_io = 0; 1591 break; 1592 } 1593 break; 1594 case CAM_REQ_ABORTED: 1595 /* 1596 * This request was sent back by the driver. 1597 * XXX KDM what do we do here? 1598 */ 1599 send_ctl_io = 0; 1600 break; 1601 case CAM_REQ_INVALID: 1602 case CAM_PROVIDE_FAIL: 1603 default: 1604 /* 1605 * We should only get here if we're talking 1606 * to a talking to a SIM that is target 1607 * capable but supports the old API. In 1608 * that case, we need to just free the CCB. 1609 * If we actually send a notify acknowledge, 1610 * it will send that back with an error as 1611 * well. 1612 */ 1613 1614 if ((status != CAM_REQ_INVALID) 1615 && (status != CAM_PROVIDE_FAIL)) 1616 xpt_print(periph->path, "%s: " 1617 "unsupported CAM status " 1618 "0x%x\n", __func__, status); 1619 1620 ctl_free_io(io); 1621 ctlfe_free_ccb(periph, done_ccb); 1622 1623 return; 1624 } 1625 if (send_ctl_io != 0) { 1626 ctl_queue(io); 1627 } else { 1628 ctl_free_io(io); 1629 done_ccb->ccb_h.status = CAM_REQ_INPROG; 1630 done_ccb->ccb_h.func_code = 1631 XPT_NOTIFY_ACKNOWLEDGE; 1632 xpt_action(done_ccb); 1633 } 1634 } else { 1635 xpt_print(periph->path, "%s: could not allocate " 1636 "ctl_io for immediate notify!\n", __func__); 1637 /* requeue this to the adapter */ 1638 done_ccb->ccb_h.status = CAM_REQ_INPROG; 1639 done_ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; 1640 xpt_action(done_ccb); 1641 } 1642 1643 if (frozen != 0) { 1644 cam_release_devq(periph->path, 1645 /*relsim_flags*/ 0, 1646 /*opening reduction*/ 0, 1647 /*timeout*/ 0, 1648 /*getcount_only*/ 0); 1649 } 1650 break; 1651 } 1652 case XPT_NOTIFY_ACKNOWLEDGE: 1653 /* 1654 * Queue this back down to the SIM as an immediate notify. 1655 */ 1656 done_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; 1657 xpt_action(done_ccb); 1658 softc->inots_sent++; 1659 break; 1660 case XPT_ABORT: 1661 /* 1662 * XPT_ABORT is an immediate CCB, we shouldn't get here. 1663 */ 1664 panic("%s: XPT_ABORT CCB returned!", __func__); 1665 break; 1666 case XPT_SET_SIM_KNOB: 1667 case XPT_GET_SIM_KNOB: 1668 break; 1669 default: 1670 panic("%s: unexpected CCB type %#x", __func__, 1671 done_ccb->ccb_h.func_code); 1672 break; 1673 } 1674 } 1675 1676 static void 1677 ctlfe_onoffline(void *arg, int online) 1678 { 1679 struct ctlfe_softc *bus_softc; 1680 union ccb *ccb; 1681 cam_status status; 1682 struct cam_path *path; 1683 struct cam_sim *sim; 1684 int set_wwnn; 1685 1686 bus_softc = (struct ctlfe_softc *)arg; 1687 1688 set_wwnn = 0; 1689 1690 status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, 1691 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 1692 if (status != CAM_REQ_CMP) { 1693 printf("%s: unable to create path!\n", __func__); 1694 return; 1695 } 1696 ccb = (union ccb *)malloc(sizeof(*ccb), M_TEMP, M_WAITOK | M_ZERO); 1697 xpt_setup_ccb(&ccb->ccb_h, path, /*priority*/ 1); 1698 1699 sim = xpt_path_sim(path); 1700 1701 /* 1702 * Copan WWN format: 1703 * 1704 * Bits 63-60: 0x5 NAA, IEEE registered name 1705 * Bits 59-36: 0x000ED5 IEEE Company name assigned to Copan 1706 * Bits 35-12: Copan SSN (Sequential Serial Number) 1707 * Bits 11-8: Type of port: 1708 * 1 == N-Port 1709 * 2 == F-Port 1710 * 3 == NL-Port 1711 * Bits 7-0: 0 == Node Name, >0 == Port Number 1712 */ 1713 1714 if (online != 0) { 1715 1716 ccb->ccb_h.func_code = XPT_GET_SIM_KNOB; 1717 1718 CAM_SIM_LOCK(sim); 1719 1720 xpt_action(ccb); 1721 1722 CAM_SIM_UNLOCK(sim); 1723 1724 if ((ccb->knob.xport_specific.valid & KNOB_VALID_ADDRESS) != 0){ 1725 #ifdef RANDOM_WWNN 1726 uint64_t random_bits; 1727 #endif 1728 1729 printf("%s: %s current WWNN %#jx\n", __func__, 1730 bus_softc->port_name, 1731 ccb->knob.xport_specific.fc.wwnn); 1732 printf("%s: %s current WWPN %#jx\n", __func__, 1733 bus_softc->port_name, 1734 ccb->knob.xport_specific.fc.wwpn); 1735 1736 #ifdef RANDOM_WWNN 1737 arc4rand(&random_bits, sizeof(random_bits), 0); 1738 #endif 1739 1740 /* 1741 * XXX KDM this is a bit of a kludge for now. We 1742 * take the current WWNN/WWPN from the card, and 1743 * replace the company identifier and the NL-Port 1744 * indicator and the port number (for the WWPN). 1745 * This should be replaced later with ddb_GetWWNN, 1746 * or possibly a more centralized scheme. (It 1747 * would be nice to have the WWNN/WWPN for each 1748 * port stored in the ctl_frontend structure.) 1749 */ 1750 #ifdef RANDOM_WWNN 1751 ccb->knob.xport_specific.fc.wwnn = 1752 (random_bits & 1753 0x0000000fffffff00ULL) | 1754 /* Company ID */ 0x5000ED5000000000ULL | 1755 /* NL-Port */ 0x0300; 1756 ccb->knob.xport_specific.fc.wwpn = 1757 (random_bits & 1758 0x0000000fffffff00ULL) | 1759 /* Company ID */ 0x5000ED5000000000ULL | 1760 /* NL-Port */ 0x3000 | 1761 /* Port Num */ (bus_softc->fe.targ_port & 0xff); 1762 1763 /* 1764 * This is a bit of an API break/reversal, but if 1765 * we're doing the random WWNN that's a little 1766 * different anyway. So record what we're actually 1767 * using with the frontend code so it's reported 1768 * accurately. 1769 */ 1770 bus_softc->fe.wwnn = 1771 ccb->knob.xport_specific.fc.wwnn; 1772 bus_softc->fe.wwpn = 1773 ccb->knob.xport_specific.fc.wwpn; 1774 set_wwnn = 1; 1775 #else /* RANDOM_WWNN */ 1776 /* 1777 * If the user has specified a WWNN/WWPN, send them 1778 * down to the SIM. Otherwise, record what the SIM 1779 * has reported. 1780 */ 1781 if ((bus_softc->fe.wwnn != 0) 1782 && (bus_softc->fe.wwpn != 0)) { 1783 ccb->knob.xport_specific.fc.wwnn = 1784 bus_softc->fe.wwnn; 1785 ccb->knob.xport_specific.fc.wwpn = 1786 bus_softc->fe.wwpn; 1787 set_wwnn = 1; 1788 } else { 1789 bus_softc->fe.wwnn = 1790 ccb->knob.xport_specific.fc.wwnn; 1791 bus_softc->fe.wwpn = 1792 ccb->knob.xport_specific.fc.wwpn; 1793 } 1794 #endif /* RANDOM_WWNN */ 1795 1796 1797 if (set_wwnn != 0) { 1798 printf("%s: %s new WWNN %#jx\n", __func__, 1799 bus_softc->port_name, 1800 ccb->knob.xport_specific.fc.wwnn); 1801 printf("%s: %s new WWPN %#jx\n", __func__, 1802 bus_softc->port_name, 1803 ccb->knob.xport_specific.fc.wwpn); 1804 } 1805 } else { 1806 printf("%s: %s has no valid WWNN/WWPN\n", __func__, 1807 bus_softc->port_name); 1808 } 1809 } 1810 ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; 1811 ccb->knob.xport_specific.valid = KNOB_VALID_ROLE; 1812 if (set_wwnn != 0) 1813 ccb->knob.xport_specific.valid |= KNOB_VALID_ADDRESS; 1814 1815 if (online != 0) 1816 ccb->knob.xport_specific.fc.role = KNOB_ROLE_TARGET; 1817 else 1818 ccb->knob.xport_specific.fc.role = KNOB_ROLE_NONE; 1819 1820 1821 CAM_SIM_LOCK(sim); 1822 1823 xpt_action(ccb); 1824 1825 CAM_SIM_UNLOCK(sim); 1826 1827 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1828 printf("%s: SIM %s (path id %d) target %s failed with " 1829 "status %#x\n", 1830 __func__, bus_softc->port_name, bus_softc->path_id, 1831 (online != 0) ? "enable" : "disable", 1832 ccb->ccb_h.status); 1833 } else { 1834 printf("%s: SIM %s (path id %d) target %s succeeded\n", 1835 __func__, bus_softc->port_name, bus_softc->path_id, 1836 (online != 0) ? "enable" : "disable"); 1837 } 1838 1839 free(ccb, M_TEMP); 1840 xpt_free_path(path); 1841 1842 return; 1843 } 1844 1845 static void 1846 ctlfe_online(void *arg) 1847 { 1848 ctlfe_onoffline(arg, /*online*/ 1); 1849 } 1850 1851 static void 1852 ctlfe_offline(void *arg) 1853 { 1854 ctlfe_onoffline(arg, /*online*/ 0); 1855 } 1856 1857 static int 1858 ctlfe_targ_enable(void *arg, struct ctl_id targ_id) 1859 { 1860 return (0); 1861 } 1862 1863 static int 1864 ctlfe_targ_disable(void *arg, struct ctl_id targ_id) 1865 { 1866 return (0); 1867 } 1868 1869 /* 1870 * This will get called to enable a LUN on every bus that is attached to 1871 * CTL. So we only need to create a path/periph for this particular bus. 1872 */ 1873 static int 1874 ctlfe_lun_enable(void *arg, struct ctl_id targ_id, int lun_id) 1875 { 1876 struct ctlfe_softc *bus_softc; 1877 struct ctlfe_lun_softc *softc; 1878 struct cam_path *path; 1879 struct cam_periph *periph; 1880 struct cam_sim *sim; 1881 cam_status status; 1882 1883 1884 bus_softc = (struct ctlfe_softc *)arg; 1885 1886 status = xpt_create_path_unlocked(&path, /*periph*/ NULL, 1887 bus_softc->path_id, 1888 targ_id.id, 1889 lun_id); 1890 /* XXX KDM need some way to return status to CTL here? */ 1891 if (status != CAM_REQ_CMP) { 1892 printf("%s: could not create path, status %#x\n", __func__, 1893 status); 1894 return (1); 1895 } 1896 1897 softc = malloc(sizeof(*softc), M_CTLFE, M_WAITOK | M_ZERO); 1898 sim = xpt_path_sim(path); 1899 mtx_lock(sim->mtx); 1900 periph = cam_periph_find(path, "ctl"); 1901 if (periph != NULL) { 1902 /* We've already got a periph, no need to alloc a new one. */ 1903 xpt_free_path(path); 1904 free(softc, M_CTLFE); 1905 mtx_unlock(sim->mtx); 1906 return (0); 1907 } 1908 1909 softc->parent_softc = bus_softc; 1910 STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, softc, links); 1911 1912 status = cam_periph_alloc(ctlferegister, 1913 ctlfeoninvalidate, 1914 ctlfecleanup, 1915 ctlfestart, 1916 "ctl", 1917 CAM_PERIPH_BIO, 1918 path, 1919 ctlfeasync, 1920 0, 1921 softc); 1922 1923 mtx_unlock(sim->mtx); 1924 1925 xpt_free_path(path); 1926 1927 return (0); 1928 } 1929 1930 /* 1931 * XXX KDM we disable LUN removal here. The problem is that the isp(4) 1932 * driver doesn't currently handle LUN removal properly. We need to keep 1933 * enough state here at the peripheral level even after LUNs have been 1934 * removed inside CTL. 1935 * 1936 * Once the isp(4) driver is fixed, this can be re-enabled. 1937 */ 1938 static int 1939 ctlfe_lun_disable(void *arg, struct ctl_id targ_id, int lun_id) 1940 { 1941 #ifdef NOTYET 1942 struct ctlfe_softc *softc; 1943 struct ctlfe_lun_softc *lun_softc; 1944 1945 softc = (struct ctlfe_softc *)arg; 1946 1947 mtx_lock(softc->sim->mtx); 1948 STAILQ_FOREACH(lun_softc, &softc->lun_softc_list, links) { 1949 struct cam_path *path; 1950 1951 path = lun_softc->periph->path; 1952 1953 if ((xpt_path_target_id(path) == targ_id.id) 1954 && (xpt_path_lun_id(path) == lun_id)) { 1955 break; 1956 } 1957 } 1958 if (lun_softc == NULL) { 1959 mtx_unlock(softc->sim->mtx); 1960 printf("%s: can't find target %d lun %d\n", __func__, 1961 targ_id.id, lun_id); 1962 return (1); 1963 } 1964 1965 cam_periph_invalidate(lun_softc->periph); 1966 1967 mtx_unlock(softc->sim->mtx); 1968 #endif 1969 1970 return (0); 1971 } 1972 1973 static void 1974 ctlfe_dump_sim(struct cam_sim *sim) 1975 { 1976 int i; 1977 1978 printf("%s%d: max tagged openings: %d, max dev openings: %d\n", 1979 sim->sim_name, sim->unit_number, 1980 sim->max_tagged_dev_openings, sim->max_dev_openings); 1981 printf("%s%d: max_ccbs: %u, ccb_count: %u\n", 1982 sim->sim_name, sim->unit_number, 1983 sim->max_ccbs, sim->ccb_count); 1984 printf("%s%d: ccb_freeq is %sempty\n", 1985 sim->sim_name, sim->unit_number, 1986 (SLIST_FIRST(&sim->ccb_freeq) == NULL) ? "" : "NOT "); 1987 printf("%s%d: alloc_queue.entries %d, alloc_openings %d\n", 1988 sim->sim_name, sim->unit_number, 1989 sim->devq->alloc_queue.entries, sim->devq->alloc_openings); 1990 printf("%s%d: qfrozen_cnt:", sim->sim_name, sim->unit_number); 1991 for (i = 0; i < CAM_RL_VALUES; i++) { 1992 printf("%s%u", (i != 0) ? ":" : "", 1993 sim->devq->alloc_queue.qfrozen_cnt[i]); 1994 } 1995 printf("\n"); 1996 } 1997 1998 /* 1999 * Assumes that the SIM lock is held. 2000 */ 2001 static void 2002 ctlfe_dump_queue(struct ctlfe_lun_softc *softc) 2003 { 2004 struct ccb_hdr *hdr; 2005 struct cam_periph *periph; 2006 int num_items; 2007 2008 periph = softc->periph; 2009 num_items = 0; 2010 2011 TAILQ_FOREACH(hdr, &softc->work_queue, periph_links.tqe) { 2012 union ctl_io *io; 2013 2014 io = hdr->io_ptr; 2015 2016 num_items++; 2017 2018 /* 2019 * This can happen when we get an ATIO but can't allocate 2020 * a ctl_io. See the XPT_ACCEPT_TARGET_IO case in ctlfedone(). 2021 */ 2022 if (io == NULL) { 2023 struct ccb_scsiio *csio; 2024 2025 csio = (struct ccb_scsiio *)hdr; 2026 2027 xpt_print(periph->path, "CCB %#x ctl_io allocation " 2028 "failed\n", csio->tag_id); 2029 continue; 2030 } 2031 2032 /* 2033 * Only regular SCSI I/O is put on the work 2034 * queue, so we can print sense here. There may be no 2035 * sense if it's no the queue for a DMA, but this serves to 2036 * print out the CCB as well. 2037 * 2038 * XXX KDM switch this over to scsi_sense_print() when 2039 * CTL is merged in with CAM. 2040 */ 2041 ctl_io_error_print(io, NULL); 2042 2043 /* 2044 * We're sending status back to the 2045 * initiator, so we're on the queue waiting 2046 * for a CTIO to do that. 2047 */ 2048 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) 2049 continue; 2050 2051 /* 2052 * Otherwise, we're on the queue waiting to 2053 * do a data transfer. 2054 */ 2055 xpt_print(periph->path, "Total %u, Current %u, Resid %u\n", 2056 io->scsiio.kern_total_len, io->scsiio.kern_data_len, 2057 io->scsiio.kern_data_resid); 2058 } 2059 2060 xpt_print(periph->path, "%d requests total waiting for CCBs\n", 2061 num_items); 2062 xpt_print(periph->path, "%ju CCBs oustanding (%ju allocated, %ju " 2063 "freed)\n", (uintmax_t)(softc->ccbs_alloced - 2064 softc->ccbs_freed), (uintmax_t)softc->ccbs_alloced, 2065 (uintmax_t)softc->ccbs_freed); 2066 xpt_print(periph->path, "%ju CTIOs outstanding (%ju sent, %ju " 2067 "returned\n", (uintmax_t)(softc->ctios_sent - 2068 softc->ctios_returned), softc->ctios_sent, 2069 softc->ctios_returned); 2070 } 2071 2072 /* 2073 * This function is called when we fail to get a CCB for a DMA or status return 2074 * to the initiator within the specified time period. 2075 * 2076 * The callout code should insure that we hold the sim mutex here. 2077 */ 2078 static void 2079 ctlfe_dma_timeout(void *arg) 2080 { 2081 struct ctlfe_lun_softc *softc; 2082 struct cam_periph *periph; 2083 struct cam_sim *sim; 2084 int num_queued; 2085 2086 softc = (struct ctlfe_lun_softc *)arg; 2087 periph = softc->periph; 2088 sim = xpt_path_sim(periph->path); 2089 num_queued = 0; 2090 2091 /* 2092 * Nothing to do... 2093 */ 2094 if (TAILQ_FIRST(&softc->work_queue) == NULL) { 2095 xpt_print(periph->path, "TIMEOUT triggered after %d " 2096 "seconds, but nothing on work queue??\n", 2097 CTLFE_DMA_TIMEOUT); 2098 return; 2099 } 2100 2101 xpt_print(periph->path, "TIMEOUT (%d seconds) waiting for DMA to " 2102 "start\n", CTLFE_DMA_TIMEOUT); 2103 2104 ctlfe_dump_queue(softc); 2105 2106 ctlfe_dump_sim(sim); 2107 2108 xpt_print(periph->path, "calling xpt_schedule() to attempt to " 2109 "unstick our queue\n"); 2110 2111 xpt_schedule(periph, /*priority*/ 1); 2112 2113 xpt_print(periph->path, "xpt_schedule() call complete\n"); 2114 } 2115 2116 /* 2117 * Datamove/done routine called by CTL. Put ourselves on the queue to 2118 * receive a CCB from CAM so we can queue the continue I/O request down 2119 * to the adapter. 2120 */ 2121 static void 2122 ctlfe_datamove_done(union ctl_io *io) 2123 { 2124 union ccb *ccb; 2125 struct cam_sim *sim; 2126 struct cam_periph *periph; 2127 struct ctlfe_lun_softc *softc; 2128 2129 ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 2130 2131 sim = xpt_path_sim(ccb->ccb_h.path); 2132 2133 mtx_lock(sim->mtx); 2134 2135 periph = xpt_path_periph(ccb->ccb_h.path); 2136 2137 softc = (struct ctlfe_lun_softc *)periph->softc; 2138 2139 if (io->io_hdr.io_type == CTL_IO_TASK) { 2140 /* 2141 * Task management commands don't require any further 2142 * communication back to the adapter. Requeue the CCB 2143 * to the adapter, and free the CTL I/O. 2144 */ 2145 xpt_print(ccb->ccb_h.path, "%s: returning task I/O " 2146 "tag %#x seq %#x\n", __func__, 2147 ccb->cin1.tag_id, ccb->cin1.seq_id); 2148 /* 2149 * Send the notify acknowledge down to the SIM, to let it 2150 * know we processed the task management command. 2151 */ 2152 ccb->ccb_h.status = CAM_REQ_INPROG; 2153 ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; 2154 xpt_action(ccb); 2155 ctl_free_io(io); 2156 } else { 2157 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) 2158 io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED; 2159 else 2160 io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED; 2161 2162 TAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h, 2163 periph_links.tqe); 2164 2165 /* 2166 * Reset the timeout for our latest active DMA. 2167 */ 2168 callout_reset(&softc->dma_callout, 2169 CTLFE_DMA_TIMEOUT * hz, 2170 ctlfe_dma_timeout, softc); 2171 /* 2172 * Ask for the CAM transport layer to send us a CCB to do 2173 * the DMA or send status, unless ctlfe_dma_enabled is set 2174 * to 0. 2175 */ 2176 if (ctlfe_dma_enabled != 0) 2177 xpt_schedule(periph, /*priority*/ 1); 2178 } 2179 2180 mtx_unlock(sim->mtx); 2181 } 2182 2183 static void 2184 ctlfe_dump(void) 2185 { 2186 struct ctlfe_softc *bus_softc; 2187 2188 STAILQ_FOREACH(bus_softc, &ctlfe_softc_list, links) { 2189 struct ctlfe_lun_softc *lun_softc; 2190 2191 ctlfe_dump_sim(bus_softc->sim); 2192 2193 STAILQ_FOREACH(lun_softc, &bus_softc->lun_softc_list, links) { 2194 ctlfe_dump_queue(lun_softc); 2195 } 2196 } 2197 } 2198