1 /*- 2 * Implementation of the Target Mode 'Black Hole device' for CAM. 3 * 4 * SPDX-License-Identifier: BSD-2-Clause 5 * 6 * Copyright (c) 1999 Justin T. Gibbs. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions, and the following disclaimer, 14 * without modification, immediately at the beginning of the file. 15 * 2. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 #include <sys/param.h> 33 #include <sys/queue.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/types.h> 37 #include <sys/bio.h> 38 #include <sys/conf.h> 39 #include <sys/devicestat.h> 40 #include <sys/malloc.h> 41 #include <sys/uio.h> 42 43 #include <cam/cam.h> 44 #include <cam/cam_ccb.h> 45 #include <cam/cam_periph.h> 46 #include <cam/cam_queue.h> 47 #include <cam/cam_xpt_periph.h> 48 #include <cam/cam_debug.h> 49 #include <cam/cam_sim.h> 50 51 #include <cam/scsi/scsi_all.h> 52 #include <cam/scsi/scsi_message.h> 53 54 static MALLOC_DEFINE(M_SCSIBH, "SCSI bh", "SCSI blackhole buffers"); 55 56 typedef enum { 57 TARGBH_STATE_NORMAL, 58 TARGBH_STATE_EXCEPTION, 59 TARGBH_STATE_TEARDOWN 60 } targbh_state; 61 62 typedef enum { 63 TARGBH_FLAG_NONE = 0x00, 64 TARGBH_FLAG_LUN_ENABLED = 0x01 65 } targbh_flags; 66 67 typedef enum { 68 TARGBH_CCB_WORKQ 69 } targbh_ccb_types; 70 71 #define MAX_ACCEPT 8 72 #define MAX_IMMEDIATE 16 73 #define MAX_BUF_SIZE 256 /* Max inquiry/sense/mode page transfer */ 74 75 /* Offsets into our private CCB area for storing accept information */ 76 #define ccb_type ppriv_field0 77 #define ccb_descr ppriv_ptr1 78 79 /* We stick a pointer to the originating accept TIO in each continue I/O CCB */ 80 #define ccb_atio ppriv_ptr1 81 82 TAILQ_HEAD(ccb_queue, ccb_hdr); 83 84 struct targbh_softc { 85 struct ccb_queue pending_queue; 86 struct ccb_queue work_queue; 87 struct ccb_queue unknown_atio_queue; 88 struct devstat device_stats; 89 targbh_state state; 90 targbh_flags flags; 91 u_int init_level; 92 u_int inq_data_len; 93 struct ccb_accept_tio *accept_tio_list; 94 struct ccb_hdr_slist immed_notify_slist; 95 }; 96 97 struct targbh_cmd_desc { 98 struct ccb_accept_tio* atio_link; 99 u_int data_resid; /* How much left to transfer */ 100 u_int data_increment;/* Amount to send before next disconnect */ 101 void* data; /* The data. Can be from backing_store or not */ 102 void* backing_store;/* Backing store allocated for this descriptor*/ 103 u_int max_size; /* Size of backing_store */ 104 uint32_t timeout; 105 uint8_t status; /* Status to return to initiator */ 106 }; 107 108 static struct scsi_inquiry_data no_lun_inq_data = 109 { 110 T_NODEVICE | (SID_QUAL_BAD_LU << 5), 0, 111 /* version */2, /* format version */2 112 }; 113 114 static struct scsi_sense_data_fixed no_lun_sense_data = 115 { 116 SSD_CURRENT_ERROR|SSD_ERRCODE_VALID, 117 0, 118 SSD_KEY_NOT_READY, 119 { 0, 0, 0, 0 }, 120 /*extra_len*/offsetof(struct scsi_sense_data_fixed, fru) 121 - offsetof(struct scsi_sense_data_fixed, extra_len), 122 { 0, 0, 0, 0 }, 123 /* Logical Unit Not Supported */ 124 /*ASC*/0x25, /*ASCQ*/0 125 }; 126 127 static const int request_sense_size = offsetof(struct scsi_sense_data_fixed, fru); 128 129 static periph_init_t targbhinit; 130 static void targbhasync(void *callback_arg, uint32_t code, 131 struct cam_path *path, void *arg); 132 static cam_status targbhenlun(struct cam_periph *periph); 133 static cam_status targbhdislun(struct cam_periph *periph); 134 static periph_ctor_t targbhctor; 135 static periph_dtor_t targbhdtor; 136 static periph_start_t targbhstart; 137 static void targbhdone(struct cam_periph *periph, 138 union ccb *done_ccb); 139 #ifdef NOTYET 140 static int targbherror(union ccb *ccb, uint32_t cam_flags, 141 uint32_t sense_flags); 142 #endif 143 static struct targbh_cmd_desc* targbhallocdescr(void); 144 static void targbhfreedescr(struct targbh_cmd_desc *buf); 145 146 static struct periph_driver targbhdriver = 147 { 148 targbhinit, "targbh", 149 TAILQ_HEAD_INITIALIZER(targbhdriver.units), /* generation */ 0 150 }; 151 152 PERIPHDRIVER_DECLARE(targbh, targbhdriver); 153 154 static void 155 targbhinit(void) 156 { 157 cam_status status; 158 159 /* 160 * Install a global async callback. This callback will 161 * receive async callbacks like "new path registered". 162 */ 163 status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED, 164 targbhasync, NULL, NULL); 165 166 if (status != CAM_REQ_CMP) { 167 printf("targbh: Failed to attach master async callback " 168 "due to status 0x%x!\n", status); 169 } 170 } 171 172 static void 173 targbhasync(void *callback_arg, uint32_t code, 174 struct cam_path *path, void *arg) 175 { 176 struct cam_path *new_path; 177 struct ccb_pathinq *cpi; 178 path_id_t bus_path_id; 179 cam_status status; 180 181 cpi = (struct ccb_pathinq *)arg; 182 if (code == AC_PATH_REGISTERED) 183 bus_path_id = cpi->ccb_h.path_id; 184 else 185 bus_path_id = xpt_path_path_id(path); 186 /* 187 * Allocate a peripheral instance for 188 * this target instance. 189 */ 190 status = xpt_create_path(&new_path, NULL, 191 bus_path_id, 192 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 193 if (status != CAM_REQ_CMP) { 194 printf("targbhasync: Unable to create path " 195 "due to status 0x%x\n", status); 196 return; 197 } 198 199 switch (code) { 200 case AC_PATH_REGISTERED: 201 { 202 /* Only attach to controllers that support target mode */ 203 if ((cpi->target_sprt & PIT_PROCESSOR) == 0) 204 break; 205 206 status = cam_periph_alloc(targbhctor, NULL, targbhdtor, 207 targbhstart, 208 "targbh", CAM_PERIPH_BIO, 209 new_path, targbhasync, 210 AC_PATH_REGISTERED, 211 cpi); 212 break; 213 } 214 case AC_PATH_DEREGISTERED: 215 { 216 struct cam_periph *periph; 217 218 if ((periph = cam_periph_find(new_path, "targbh")) != NULL) 219 cam_periph_invalidate(periph); 220 break; 221 } 222 default: 223 break; 224 } 225 xpt_free_path(new_path); 226 } 227 228 /* Attempt to enable our lun */ 229 static cam_status 230 targbhenlun(struct cam_periph *periph) 231 { 232 union ccb immed_ccb; 233 struct targbh_softc *softc; 234 cam_status status; 235 int i; 236 237 softc = (struct targbh_softc *)periph->softc; 238 239 if ((softc->flags & TARGBH_FLAG_LUN_ENABLED) != 0) 240 return (CAM_REQ_CMP); 241 242 memset(&immed_ccb, 0, sizeof(immed_ccb)); 243 xpt_setup_ccb(&immed_ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 244 immed_ccb.ccb_h.func_code = XPT_EN_LUN; 245 246 /* Don't need support for any vendor specific commands */ 247 immed_ccb.cel.grp6_len = 0; 248 immed_ccb.cel.grp7_len = 0; 249 immed_ccb.cel.enable = 1; 250 xpt_action(&immed_ccb); 251 status = immed_ccb.ccb_h.status; 252 if (status != CAM_REQ_CMP) { 253 xpt_print(periph->path, 254 "targbhenlun - Enable Lun Rejected with status 0x%x\n", 255 status); 256 return (status); 257 } 258 259 softc->flags |= TARGBH_FLAG_LUN_ENABLED; 260 261 /* 262 * Build up a buffer of accept target I/O 263 * operations for incoming selections. 264 */ 265 for (i = 0; i < MAX_ACCEPT; i++) { 266 struct ccb_accept_tio *atio; 267 268 atio = (struct ccb_accept_tio*)malloc(sizeof(*atio), M_SCSIBH, 269 M_ZERO | M_NOWAIT); 270 if (atio == NULL) { 271 status = CAM_RESRC_UNAVAIL; 272 break; 273 } 274 275 atio->ccb_h.ccb_descr = targbhallocdescr(); 276 277 if (atio->ccb_h.ccb_descr == NULL) { 278 free(atio, M_SCSIBH); 279 status = CAM_RESRC_UNAVAIL; 280 break; 281 } 282 283 xpt_setup_ccb(&atio->ccb_h, periph->path, CAM_PRIORITY_NORMAL); 284 atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO; 285 atio->ccb_h.cbfcnp = targbhdone; 286 ((struct targbh_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link = 287 softc->accept_tio_list; 288 softc->accept_tio_list = atio; 289 xpt_action((union ccb *)atio); 290 status = atio->ccb_h.status; 291 if (status != CAM_REQ_INPROG) 292 break; 293 } 294 295 if (i == 0) { 296 xpt_print(periph->path, 297 "targbhenlun - Could not allocate accept tio CCBs: status " 298 "= 0x%x\n", status); 299 targbhdislun(periph); 300 return (CAM_REQ_CMP_ERR); 301 } 302 303 /* 304 * Build up a buffer of immediate notify CCBs 305 * so the SIM can tell us of asynchronous target mode events. 306 */ 307 for (i = 0; i < MAX_ACCEPT; i++) { 308 struct ccb_immediate_notify *inot; 309 310 inot = (struct ccb_immediate_notify*)malloc(sizeof(*inot), 311 M_SCSIBH, M_ZERO | M_NOWAIT); 312 313 if (inot == NULL) { 314 status = CAM_RESRC_UNAVAIL; 315 break; 316 } 317 318 xpt_setup_ccb(&inot->ccb_h, periph->path, CAM_PRIORITY_NORMAL); 319 inot->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; 320 inot->ccb_h.cbfcnp = targbhdone; 321 SLIST_INSERT_HEAD(&softc->immed_notify_slist, &inot->ccb_h, 322 periph_links.sle); 323 xpt_action((union ccb *)inot); 324 status = inot->ccb_h.status; 325 if (status != CAM_REQ_INPROG) 326 break; 327 } 328 329 if (i == 0) { 330 xpt_print(periph->path, 331 "targbhenlun - Could not allocate immediate notify " 332 "CCBs: status = 0x%x\n", status); 333 targbhdislun(periph); 334 return (CAM_REQ_CMP_ERR); 335 } 336 337 return (CAM_REQ_CMP); 338 } 339 340 static cam_status 341 targbhdislun(struct cam_periph *periph) 342 { 343 union ccb ccb; 344 struct targbh_softc *softc; 345 struct ccb_accept_tio* atio; 346 struct ccb_hdr *ccb_h; 347 348 softc = (struct targbh_softc *)periph->softc; 349 if ((softc->flags & TARGBH_FLAG_LUN_ENABLED) == 0) 350 return CAM_REQ_CMP; 351 352 memset(&ccb, 0, sizeof(ccb)); 353 354 /* XXX Block for Continue I/O completion */ 355 356 /* Kill off all ACCECPT and IMMEDIATE CCBs */ 357 while ((atio = softc->accept_tio_list) != NULL) { 358 359 softc->accept_tio_list = 360 ((struct targbh_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link; 361 xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 362 ccb.cab.ccb_h.func_code = XPT_ABORT; 363 ccb.cab.abort_ccb = (union ccb *)atio; 364 xpt_action(&ccb); 365 } 366 367 while ((ccb_h = SLIST_FIRST(&softc->immed_notify_slist)) != NULL) { 368 SLIST_REMOVE_HEAD(&softc->immed_notify_slist, periph_links.sle); 369 xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 370 ccb.cab.ccb_h.func_code = XPT_ABORT; 371 ccb.cab.abort_ccb = (union ccb *)ccb_h; 372 xpt_action(&ccb); 373 } 374 375 /* 376 * Dissable this lun. 377 */ 378 xpt_setup_ccb(&ccb.cel.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 379 ccb.cel.ccb_h.func_code = XPT_EN_LUN; 380 ccb.cel.enable = 0; 381 xpt_action(&ccb); 382 383 if (ccb.cel.ccb_h.status != CAM_REQ_CMP) 384 printf("targbhdislun - Disabling lun on controller failed " 385 "with status 0x%x\n", ccb.cel.ccb_h.status); 386 else 387 softc->flags &= ~TARGBH_FLAG_LUN_ENABLED; 388 return (ccb.cel.ccb_h.status); 389 } 390 391 static cam_status 392 targbhctor(struct cam_periph *periph, void *arg) 393 { 394 struct targbh_softc *softc; 395 396 /* Allocate our per-instance private storage */ 397 softc = (struct targbh_softc *)malloc(sizeof(*softc), 398 M_SCSIBH, M_NOWAIT); 399 if (softc == NULL) { 400 printf("targctor: unable to malloc softc\n"); 401 return (CAM_REQ_CMP_ERR); 402 } 403 404 bzero(softc, sizeof(*softc)); 405 TAILQ_INIT(&softc->pending_queue); 406 TAILQ_INIT(&softc->work_queue); 407 softc->accept_tio_list = NULL; 408 SLIST_INIT(&softc->immed_notify_slist); 409 softc->state = TARGBH_STATE_NORMAL; 410 periph->softc = softc; 411 softc->init_level++; 412 413 if (targbhenlun(periph) != CAM_REQ_CMP) 414 cam_periph_invalidate(periph); 415 return (CAM_REQ_CMP); 416 } 417 418 static void 419 targbhdtor(struct cam_periph *periph) 420 { 421 struct targbh_softc *softc; 422 423 softc = (struct targbh_softc *)periph->softc; 424 425 softc->state = TARGBH_STATE_TEARDOWN; 426 427 targbhdislun(periph); 428 429 switch (softc->init_level) { 430 case 0: 431 panic("targdtor - impossible init level"); 432 case 1: 433 /* FALLTHROUGH */ 434 default: 435 /* XXX Wait for callback of targbhdislun() */ 436 cam_periph_sleep(periph, softc, PRIBIO, "targbh", hz/2); 437 free(softc, M_SCSIBH); 438 break; 439 } 440 } 441 442 static void 443 targbhstart(struct cam_periph *periph, union ccb *start_ccb) 444 { 445 struct targbh_softc *softc; 446 struct ccb_hdr *ccbh; 447 struct ccb_accept_tio *atio; 448 struct targbh_cmd_desc *desc; 449 struct ccb_scsiio *csio; 450 ccb_flags flags; 451 452 softc = (struct targbh_softc *)periph->softc; 453 454 ccbh = TAILQ_FIRST(&softc->work_queue); 455 if (ccbh == NULL) { 456 xpt_release_ccb(start_ccb); 457 } else { 458 TAILQ_REMOVE(&softc->work_queue, ccbh, periph_links.tqe); 459 TAILQ_INSERT_HEAD(&softc->pending_queue, ccbh, 460 periph_links.tqe); 461 atio = (struct ccb_accept_tio*)ccbh; 462 desc = (struct targbh_cmd_desc *)atio->ccb_h.ccb_descr; 463 464 /* Is this a tagged request? */ 465 flags = atio->ccb_h.flags & 466 (CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK); 467 468 csio = &start_ccb->csio; 469 /* 470 * If we are done with the transaction, tell the 471 * controller to send status and perform a CMD_CMPLT. 472 * If we have associated sense data, see if we can 473 * send that too. 474 */ 475 if (desc->data_resid == desc->data_increment) { 476 flags |= CAM_SEND_STATUS; 477 if (atio->sense_len) { 478 csio->sense_len = atio->sense_len; 479 csio->sense_data = atio->sense_data; 480 flags |= CAM_SEND_SENSE; 481 } 482 } 483 484 cam_fill_ctio(csio, 485 /*retries*/2, 486 targbhdone, 487 flags, 488 (flags & CAM_TAG_ACTION_VALID)? 489 MSG_SIMPLE_Q_TAG : 0, 490 atio->tag_id, 491 atio->init_id, 492 desc->status, 493 /*data_ptr*/desc->data_increment == 0 494 ? NULL : desc->data, 495 /*dxfer_len*/desc->data_increment, 496 /*timeout*/desc->timeout); 497 498 /* Override our wildcard attachment */ 499 start_ccb->ccb_h.target_id = atio->ccb_h.target_id; 500 start_ccb->ccb_h.target_lun = atio->ccb_h.target_lun; 501 502 start_ccb->ccb_h.ccb_type = TARGBH_CCB_WORKQ; 503 start_ccb->ccb_h.ccb_atio = atio; 504 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 505 ("Sending a CTIO\n")); 506 xpt_action(start_ccb); 507 /* 508 * If the queue was frozen waiting for the response 509 * to this ATIO (for instance disconnection was disallowed), 510 * then release it now that our response has been queued. 511 */ 512 if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) { 513 cam_release_devq(periph->path, 514 /*relsim_flags*/0, 515 /*reduction*/0, 516 /*timeout*/0, 517 /*getcount_only*/0); 518 atio->ccb_h.status &= ~CAM_DEV_QFRZN; 519 } 520 ccbh = TAILQ_FIRST(&softc->work_queue); 521 } 522 if (ccbh != NULL) 523 xpt_schedule(periph, CAM_PRIORITY_NORMAL); 524 } 525 526 static void 527 targbhdone(struct cam_periph *periph, union ccb *done_ccb) 528 { 529 struct targbh_softc *softc; 530 531 softc = (struct targbh_softc *)periph->softc; 532 533 switch (done_ccb->ccb_h.func_code) { 534 case XPT_ACCEPT_TARGET_IO: 535 { 536 struct ccb_accept_tio *atio; 537 struct targbh_cmd_desc *descr; 538 uint8_t *cdb; 539 int priority; 540 541 atio = &done_ccb->atio; 542 descr = (struct targbh_cmd_desc*)atio->ccb_h.ccb_descr; 543 cdb = atio->cdb_io.cdb_bytes; 544 if (softc->state == TARGBH_STATE_TEARDOWN 545 || atio->ccb_h.status == CAM_REQ_ABORTED) { 546 targbhfreedescr(descr); 547 xpt_free_ccb(done_ccb); 548 return; 549 } 550 551 /* 552 * Determine the type of incoming command and 553 * setup our buffer for a response. 554 */ 555 switch (cdb[0]) { 556 case INQUIRY: 557 { 558 struct scsi_inquiry *inq; 559 560 inq = (struct scsi_inquiry *)cdb; 561 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 562 ("Saw an inquiry!\n")); 563 /* 564 * Validate the command. We don't 565 * support any VPD pages, so complain 566 * if EVPD is set. 567 */ 568 if ((inq->byte2 & SI_EVPD) != 0 569 || inq->page_code != 0) { 570 atio->ccb_h.flags &= ~CAM_DIR_MASK; 571 atio->ccb_h.flags |= CAM_DIR_NONE; 572 /* 573 * This needs to have other than a 574 * no_lun_sense_data response. 575 */ 576 bcopy(&no_lun_sense_data, &atio->sense_data, 577 min(sizeof(no_lun_sense_data), 578 sizeof(atio->sense_data))); 579 atio->sense_len = sizeof(no_lun_sense_data); 580 descr->data_resid = 0; 581 descr->data_increment = 0; 582 descr->status = SCSI_STATUS_CHECK_COND; 583 break; 584 } 585 /* 586 * Direction is always relative 587 * to the initator. 588 */ 589 atio->ccb_h.flags &= ~CAM_DIR_MASK; 590 atio->ccb_h.flags |= CAM_DIR_IN; 591 descr->data = &no_lun_inq_data; 592 descr->data_resid = MIN(sizeof(no_lun_inq_data), 593 scsi_2btoul(inq->length)); 594 descr->data_increment = descr->data_resid; 595 descr->timeout = 5 * 1000; 596 descr->status = SCSI_STATUS_OK; 597 break; 598 } 599 case REQUEST_SENSE: 600 { 601 struct scsi_request_sense *rsense; 602 603 rsense = (struct scsi_request_sense *)cdb; 604 /* Refer to static sense data */ 605 atio->ccb_h.flags &= ~CAM_DIR_MASK; 606 atio->ccb_h.flags |= CAM_DIR_IN; 607 descr->data = &no_lun_sense_data; 608 descr->data_resid = request_sense_size; 609 descr->data_resid = MIN(descr->data_resid, 610 SCSI_CDB6_LEN(rsense->length)); 611 descr->data_increment = descr->data_resid; 612 descr->timeout = 5 * 1000; 613 descr->status = SCSI_STATUS_OK; 614 break; 615 } 616 default: 617 /* Constant CA, tell initiator */ 618 /* Direction is always relative to the initator */ 619 atio->ccb_h.flags &= ~CAM_DIR_MASK; 620 atio->ccb_h.flags |= CAM_DIR_NONE; 621 bcopy(&no_lun_sense_data, &atio->sense_data, 622 min(sizeof(no_lun_sense_data), 623 sizeof(atio->sense_data))); 624 atio->sense_len = sizeof (no_lun_sense_data); 625 descr->data_resid = 0; 626 descr->data_increment = 0; 627 descr->timeout = 5 * 1000; 628 descr->status = SCSI_STATUS_CHECK_COND; 629 break; 630 } 631 632 /* Queue us up to receive a Continue Target I/O ccb. */ 633 if ((atio->ccb_h.flags & CAM_DIS_DISCONNECT) != 0) { 634 TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h, 635 periph_links.tqe); 636 priority = 0; 637 } else { 638 TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h, 639 periph_links.tqe); 640 priority = CAM_PRIORITY_NORMAL; 641 } 642 xpt_schedule(periph, priority); 643 break; 644 } 645 case XPT_CONT_TARGET_IO: 646 { 647 struct ccb_accept_tio *atio; 648 struct targbh_cmd_desc *desc; 649 650 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 651 ("Received completed CTIO\n")); 652 atio = (struct ccb_accept_tio*)done_ccb->ccb_h.ccb_atio; 653 desc = (struct targbh_cmd_desc *)atio->ccb_h.ccb_descr; 654 655 TAILQ_REMOVE(&softc->pending_queue, &atio->ccb_h, 656 periph_links.tqe); 657 658 /* 659 * We could check for CAM_SENT_SENSE bein set here, 660 * but since we're not maintaining any CA/UA state, 661 * there's no point. 662 */ 663 atio->sense_len = 0; 664 done_ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 665 done_ccb->ccb_h.status &= ~CAM_SENT_SENSE; 666 667 /* 668 * Any errors will not change the data we return, 669 * so make sure the queue is not left frozen. 670 * XXX - At some point there may be errors that 671 * leave us in a connected state with the 672 * initiator... 673 */ 674 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 675 printf("Releasing Queue\n"); 676 cam_release_devq(done_ccb->ccb_h.path, 677 /*relsim_flags*/0, 678 /*reduction*/0, 679 /*timeout*/0, 680 /*getcount_only*/0); 681 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 682 } 683 desc->data_resid -= desc->data_increment; 684 xpt_release_ccb(done_ccb); 685 if (softc->state != TARGBH_STATE_TEARDOWN) { 686 /* 687 * Send the original accept TIO back to the 688 * controller to handle more work. 689 */ 690 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 691 ("Returning ATIO to target\n")); 692 /* Restore wildcards */ 693 atio->ccb_h.target_id = CAM_TARGET_WILDCARD; 694 atio->ccb_h.target_lun = CAM_LUN_WILDCARD; 695 xpt_action((union ccb *)atio); 696 break; 697 } else { 698 targbhfreedescr(desc); 699 free(atio, M_SCSIBH); 700 } 701 break; 702 } 703 case XPT_IMMEDIATE_NOTIFY: 704 { 705 int frozen; 706 707 frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; 708 if (softc->state == TARGBH_STATE_TEARDOWN 709 || done_ccb->ccb_h.status == CAM_REQ_ABORTED) { 710 printf("Freed an immediate notify\n"); 711 xpt_free_ccb(done_ccb); 712 } else { 713 /* Requeue for another immediate event */ 714 xpt_action(done_ccb); 715 } 716 if (frozen != 0) 717 cam_release_devq(periph->path, 718 /*relsim_flags*/0, 719 /*opening reduction*/0, 720 /*timeout*/0, 721 /*getcount_only*/0); 722 break; 723 } 724 default: 725 panic("targbhdone: Unexpected ccb opcode"); 726 break; 727 } 728 } 729 730 #ifdef NOTYET 731 static int 732 targbherror(union ccb *ccb, uint32_t cam_flags, uint32_t sense_flags) 733 { 734 return 0; 735 } 736 #endif 737 738 static struct targbh_cmd_desc* 739 targbhallocdescr(void) 740 { 741 struct targbh_cmd_desc* descr; 742 743 /* Allocate the targbh_descr structure */ 744 descr = (struct targbh_cmd_desc *)malloc(sizeof(*descr), 745 M_SCSIBH, M_NOWAIT); 746 if (descr == NULL) 747 return (NULL); 748 749 bzero(descr, sizeof(*descr)); 750 751 /* Allocate buffer backing store */ 752 descr->backing_store = malloc(MAX_BUF_SIZE, M_SCSIBH, M_NOWAIT); 753 if (descr->backing_store == NULL) { 754 free(descr, M_SCSIBH); 755 return (NULL); 756 } 757 descr->max_size = MAX_BUF_SIZE; 758 return (descr); 759 } 760 761 static void 762 targbhfreedescr(struct targbh_cmd_desc *descr) 763 { 764 free(descr->backing_store, M_SCSIBH); 765 free(descr, M_SCSIBH); 766 } 767