1 /*- 2 * Implementation of the Target Mode 'Black Hole device' for CAM. 3 * 4 * Copyright (c) 1999 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/queue.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/types.h> 37 #include <sys/bio.h> 38 #include <sys/conf.h> 39 #include <sys/devicestat.h> 40 #include <sys/malloc.h> 41 #include <sys/uio.h> 42 43 #include <cam/cam.h> 44 #include <cam/cam_ccb.h> 45 #include <cam/cam_periph.h> 46 #include <cam/cam_queue.h> 47 #include <cam/cam_xpt_periph.h> 48 #include <cam/cam_debug.h> 49 #include <cam/cam_sim.h> 50 51 #include <cam/scsi/scsi_all.h> 52 #include <cam/scsi/scsi_message.h> 53 54 static MALLOC_DEFINE(M_SCSIBH, "SCSI bh", "SCSI blackhole buffers"); 55 56 typedef enum { 57 TARGBH_STATE_NORMAL, 58 TARGBH_STATE_EXCEPTION, 59 TARGBH_STATE_TEARDOWN 60 } targbh_state; 61 62 typedef enum { 63 TARGBH_FLAG_NONE = 0x00, 64 TARGBH_FLAG_LUN_ENABLED = 0x01 65 } targbh_flags; 66 67 typedef enum { 68 TARGBH_CCB_WORKQ 69 } targbh_ccb_types; 70 71 #define MAX_ACCEPT 8 72 #define MAX_IMMEDIATE 16 73 #define MAX_BUF_SIZE 256 /* Max inquiry/sense/mode page transfer */ 74 75 /* Offsets into our private CCB area for storing accept information */ 76 #define ccb_type ppriv_field0 77 #define ccb_descr ppriv_ptr1 78 79 /* We stick a pointer to the originating accept TIO in each continue I/O CCB */ 80 #define ccb_atio ppriv_ptr1 81 82 TAILQ_HEAD(ccb_queue, ccb_hdr); 83 84 struct targbh_softc { 85 struct ccb_queue pending_queue; 86 struct ccb_queue work_queue; 87 struct ccb_queue unknown_atio_queue; 88 struct devstat device_stats; 89 targbh_state state; 90 targbh_flags flags; 91 u_int init_level; 92 u_int inq_data_len; 93 struct ccb_accept_tio *accept_tio_list; 94 struct ccb_hdr_slist immed_notify_slist; 95 }; 96 97 struct targbh_cmd_desc { 98 struct ccb_accept_tio* atio_link; 99 u_int data_resid; /* How much left to transfer */ 100 u_int data_increment;/* Amount to send before next disconnect */ 101 void* data; /* The data. Can be from backing_store or not */ 102 void* backing_store;/* Backing store allocated for this descriptor*/ 103 u_int max_size; /* Size of backing_store */ 104 u_int32_t timeout; 105 u_int8_t status; /* Status to return to initiator */ 106 }; 107 108 static struct scsi_inquiry_data no_lun_inq_data = 109 { 110 T_NODEVICE | (SID_QUAL_BAD_LU << 5), 0, 111 /* version */2, /* format version */2 112 }; 113 114 static struct scsi_sense_data_fixed no_lun_sense_data = 115 { 116 SSD_CURRENT_ERROR|SSD_ERRCODE_VALID, 117 0, 118 SSD_KEY_NOT_READY, 119 { 0, 0, 0, 0 }, 120 /*extra_len*/offsetof(struct scsi_sense_data_fixed, fru) 121 - offsetof(struct scsi_sense_data_fixed, extra_len), 122 { 0, 0, 0, 0 }, 123 /* Logical Unit Not Supported */ 124 /*ASC*/0x25, /*ASCQ*/0 125 }; 126 127 static const int request_sense_size = offsetof(struct scsi_sense_data_fixed, fru); 128 129 static periph_init_t targbhinit; 130 static void targbhasync(void *callback_arg, u_int32_t code, 131 struct cam_path *path, void *arg); 132 static cam_status targbhenlun(struct cam_periph *periph); 133 static cam_status targbhdislun(struct cam_periph *periph); 134 static periph_ctor_t targbhctor; 135 static periph_dtor_t targbhdtor; 136 static periph_start_t targbhstart; 137 static void targbhdone(struct cam_periph *periph, 138 union ccb *done_ccb); 139 #ifdef NOTYET 140 static int targbherror(union ccb *ccb, u_int32_t cam_flags, 141 u_int32_t sense_flags); 142 #endif 143 static struct targbh_cmd_desc* targbhallocdescr(void); 144 static void targbhfreedescr(struct targbh_cmd_desc *buf); 145 146 static struct periph_driver targbhdriver = 147 { 148 targbhinit, "targbh", 149 TAILQ_HEAD_INITIALIZER(targbhdriver.units), /* generation */ 0 150 }; 151 152 PERIPHDRIVER_DECLARE(targbh, targbhdriver); 153 154 static void 155 targbhinit(void) 156 { 157 cam_status status; 158 159 /* 160 * Install a global async callback. This callback will 161 * receive async callbacks like "new path registered". 162 */ 163 status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED, 164 targbhasync, NULL, NULL); 165 166 if (status != CAM_REQ_CMP) { 167 printf("targbh: Failed to attach master async callback " 168 "due to status 0x%x!\n", status); 169 } 170 } 171 172 static void 173 targbhasync(void *callback_arg, u_int32_t code, 174 struct cam_path *path, void *arg) 175 { 176 struct cam_path *new_path; 177 struct ccb_pathinq *cpi; 178 path_id_t bus_path_id; 179 cam_status status; 180 181 cpi = (struct ccb_pathinq *)arg; 182 if (code == AC_PATH_REGISTERED) 183 bus_path_id = cpi->ccb_h.path_id; 184 else 185 bus_path_id = xpt_path_path_id(path); 186 /* 187 * Allocate a peripheral instance for 188 * this target instance. 189 */ 190 status = xpt_create_path(&new_path, NULL, 191 bus_path_id, 192 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 193 if (status != CAM_REQ_CMP) { 194 printf("targbhasync: Unable to create path " 195 "due to status 0x%x\n", status); 196 return; 197 } 198 199 switch (code) { 200 case AC_PATH_REGISTERED: 201 { 202 /* Only attach to controllers that support target mode */ 203 if ((cpi->target_sprt & PIT_PROCESSOR) == 0) 204 break; 205 206 status = cam_periph_alloc(targbhctor, NULL, targbhdtor, 207 targbhstart, 208 "targbh", CAM_PERIPH_BIO, 209 new_path, targbhasync, 210 AC_PATH_REGISTERED, 211 cpi); 212 break; 213 } 214 case AC_PATH_DEREGISTERED: 215 { 216 struct cam_periph *periph; 217 218 if ((periph = cam_periph_find(new_path, "targbh")) != NULL) 219 cam_periph_invalidate(periph); 220 break; 221 } 222 default: 223 break; 224 } 225 xpt_free_path(new_path); 226 } 227 228 /* Attempt to enable our lun */ 229 static cam_status 230 targbhenlun(struct cam_periph *periph) 231 { 232 union ccb immed_ccb; 233 struct targbh_softc *softc; 234 cam_status status; 235 int i; 236 237 softc = (struct targbh_softc *)periph->softc; 238 239 if ((softc->flags & TARGBH_FLAG_LUN_ENABLED) != 0) 240 return (CAM_REQ_CMP); 241 242 xpt_setup_ccb(&immed_ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 243 immed_ccb.ccb_h.func_code = XPT_EN_LUN; 244 245 /* Don't need support for any vendor specific commands */ 246 immed_ccb.cel.grp6_len = 0; 247 immed_ccb.cel.grp7_len = 0; 248 immed_ccb.cel.enable = 1; 249 xpt_action(&immed_ccb); 250 status = immed_ccb.ccb_h.status; 251 if (status != CAM_REQ_CMP) { 252 xpt_print(periph->path, 253 "targbhenlun - Enable Lun Rejected with status 0x%x\n", 254 status); 255 return (status); 256 } 257 258 softc->flags |= TARGBH_FLAG_LUN_ENABLED; 259 260 /* 261 * Build up a buffer of accept target I/O 262 * operations for incoming selections. 263 */ 264 for (i = 0; i < MAX_ACCEPT; i++) { 265 struct ccb_accept_tio *atio; 266 267 atio = (struct ccb_accept_tio*)malloc(sizeof(*atio), M_SCSIBH, 268 M_NOWAIT); 269 if (atio == NULL) { 270 status = CAM_RESRC_UNAVAIL; 271 break; 272 } 273 274 atio->ccb_h.ccb_descr = targbhallocdescr(); 275 276 if (atio->ccb_h.ccb_descr == NULL) { 277 free(atio, M_SCSIBH); 278 status = CAM_RESRC_UNAVAIL; 279 break; 280 } 281 282 xpt_setup_ccb(&atio->ccb_h, periph->path, CAM_PRIORITY_NORMAL); 283 atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO; 284 atio->ccb_h.cbfcnp = targbhdone; 285 ((struct targbh_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link = 286 softc->accept_tio_list; 287 softc->accept_tio_list = atio; 288 xpt_action((union ccb *)atio); 289 status = atio->ccb_h.status; 290 if (status != CAM_REQ_INPROG) 291 break; 292 } 293 294 if (i == 0) { 295 xpt_print(periph->path, 296 "targbhenlun - Could not allocate accept tio CCBs: status " 297 "= 0x%x\n", status); 298 targbhdislun(periph); 299 return (CAM_REQ_CMP_ERR); 300 } 301 302 /* 303 * Build up a buffer of immediate notify CCBs 304 * so the SIM can tell us of asynchronous target mode events. 305 */ 306 for (i = 0; i < MAX_ACCEPT; i++) { 307 struct ccb_immediate_notify *inot; 308 309 inot = (struct ccb_immediate_notify*)malloc(sizeof(*inot), 310 M_SCSIBH, M_NOWAIT); 311 312 if (inot == NULL) { 313 status = CAM_RESRC_UNAVAIL; 314 break; 315 } 316 317 xpt_setup_ccb(&inot->ccb_h, periph->path, CAM_PRIORITY_NORMAL); 318 inot->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; 319 inot->ccb_h.cbfcnp = targbhdone; 320 SLIST_INSERT_HEAD(&softc->immed_notify_slist, &inot->ccb_h, 321 periph_links.sle); 322 xpt_action((union ccb *)inot); 323 status = inot->ccb_h.status; 324 if (status != CAM_REQ_INPROG) 325 break; 326 } 327 328 if (i == 0) { 329 xpt_print(periph->path, 330 "targbhenlun - Could not allocate immediate notify " 331 "CCBs: status = 0x%x\n", status); 332 targbhdislun(periph); 333 return (CAM_REQ_CMP_ERR); 334 } 335 336 return (CAM_REQ_CMP); 337 } 338 339 static cam_status 340 targbhdislun(struct cam_periph *periph) 341 { 342 union ccb ccb; 343 struct targbh_softc *softc; 344 struct ccb_accept_tio* atio; 345 struct ccb_hdr *ccb_h; 346 347 softc = (struct targbh_softc *)periph->softc; 348 if ((softc->flags & TARGBH_FLAG_LUN_ENABLED) == 0) 349 return CAM_REQ_CMP; 350 351 /* XXX Block for Continue I/O completion */ 352 353 /* Kill off all ACCECPT and IMMEDIATE CCBs */ 354 while ((atio = softc->accept_tio_list) != NULL) { 355 356 softc->accept_tio_list = 357 ((struct targbh_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link; 358 xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 359 ccb.cab.ccb_h.func_code = XPT_ABORT; 360 ccb.cab.abort_ccb = (union ccb *)atio; 361 xpt_action(&ccb); 362 } 363 364 while ((ccb_h = SLIST_FIRST(&softc->immed_notify_slist)) != NULL) { 365 SLIST_REMOVE_HEAD(&softc->immed_notify_slist, periph_links.sle); 366 xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 367 ccb.cab.ccb_h.func_code = XPT_ABORT; 368 ccb.cab.abort_ccb = (union ccb *)ccb_h; 369 xpt_action(&ccb); 370 } 371 372 /* 373 * Dissable this lun. 374 */ 375 xpt_setup_ccb(&ccb.cel.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 376 ccb.cel.ccb_h.func_code = XPT_EN_LUN; 377 ccb.cel.enable = 0; 378 xpt_action(&ccb); 379 380 if (ccb.cel.ccb_h.status != CAM_REQ_CMP) 381 printf("targbhdislun - Disabling lun on controller failed " 382 "with status 0x%x\n", ccb.cel.ccb_h.status); 383 else 384 softc->flags &= ~TARGBH_FLAG_LUN_ENABLED; 385 return (ccb.cel.ccb_h.status); 386 } 387 388 static cam_status 389 targbhctor(struct cam_periph *periph, void *arg) 390 { 391 struct targbh_softc *softc; 392 393 /* Allocate our per-instance private storage */ 394 softc = (struct targbh_softc *)malloc(sizeof(*softc), 395 M_SCSIBH, M_NOWAIT); 396 if (softc == NULL) { 397 printf("targctor: unable to malloc softc\n"); 398 return (CAM_REQ_CMP_ERR); 399 } 400 401 bzero(softc, sizeof(*softc)); 402 TAILQ_INIT(&softc->pending_queue); 403 TAILQ_INIT(&softc->work_queue); 404 softc->accept_tio_list = NULL; 405 SLIST_INIT(&softc->immed_notify_slist); 406 softc->state = TARGBH_STATE_NORMAL; 407 periph->softc = softc; 408 softc->init_level++; 409 410 if (targbhenlun(periph) != CAM_REQ_CMP) 411 cam_periph_invalidate(periph); 412 return (CAM_REQ_CMP); 413 } 414 415 static void 416 targbhdtor(struct cam_periph *periph) 417 { 418 struct targbh_softc *softc; 419 420 softc = (struct targbh_softc *)periph->softc; 421 422 softc->state = TARGBH_STATE_TEARDOWN; 423 424 targbhdislun(periph); 425 426 switch (softc->init_level) { 427 case 0: 428 panic("targdtor - impossible init level"); 429 case 1: 430 /* FALLTHROUGH */ 431 default: 432 /* XXX Wait for callback of targbhdislun() */ 433 cam_periph_sleep(periph, softc, PRIBIO, "targbh", hz/2); 434 free(softc, M_SCSIBH); 435 break; 436 } 437 } 438 439 static void 440 targbhstart(struct cam_periph *periph, union ccb *start_ccb) 441 { 442 struct targbh_softc *softc; 443 struct ccb_hdr *ccbh; 444 struct ccb_accept_tio *atio; 445 struct targbh_cmd_desc *desc; 446 struct ccb_scsiio *csio; 447 ccb_flags flags; 448 449 softc = (struct targbh_softc *)periph->softc; 450 451 ccbh = TAILQ_FIRST(&softc->work_queue); 452 if (ccbh == NULL) { 453 xpt_release_ccb(start_ccb); 454 } else { 455 TAILQ_REMOVE(&softc->work_queue, ccbh, periph_links.tqe); 456 TAILQ_INSERT_HEAD(&softc->pending_queue, ccbh, 457 periph_links.tqe); 458 atio = (struct ccb_accept_tio*)ccbh; 459 desc = (struct targbh_cmd_desc *)atio->ccb_h.ccb_descr; 460 461 /* Is this a tagged request? */ 462 flags = atio->ccb_h.flags & 463 (CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK); 464 465 csio = &start_ccb->csio; 466 /* 467 * If we are done with the transaction, tell the 468 * controller to send status and perform a CMD_CMPLT. 469 * If we have associated sense data, see if we can 470 * send that too. 471 */ 472 if (desc->data_resid == desc->data_increment) { 473 flags |= CAM_SEND_STATUS; 474 if (atio->sense_len) { 475 csio->sense_len = atio->sense_len; 476 csio->sense_data = atio->sense_data; 477 flags |= CAM_SEND_SENSE; 478 } 479 480 } 481 482 cam_fill_ctio(csio, 483 /*retries*/2, 484 targbhdone, 485 flags, 486 (flags & CAM_TAG_ACTION_VALID)? 487 MSG_SIMPLE_Q_TAG : 0, 488 atio->tag_id, 489 atio->init_id, 490 desc->status, 491 /*data_ptr*/desc->data_increment == 0 492 ? NULL : desc->data, 493 /*dxfer_len*/desc->data_increment, 494 /*timeout*/desc->timeout); 495 496 /* Override our wildcard attachment */ 497 start_ccb->ccb_h.target_id = atio->ccb_h.target_id; 498 start_ccb->ccb_h.target_lun = atio->ccb_h.target_lun; 499 500 start_ccb->ccb_h.ccb_type = TARGBH_CCB_WORKQ; 501 start_ccb->ccb_h.ccb_atio = atio; 502 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 503 ("Sending a CTIO\n")); 504 xpt_action(start_ccb); 505 /* 506 * If the queue was frozen waiting for the response 507 * to this ATIO (for instance disconnection was disallowed), 508 * then release it now that our response has been queued. 509 */ 510 if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) { 511 cam_release_devq(periph->path, 512 /*relsim_flags*/0, 513 /*reduction*/0, 514 /*timeout*/0, 515 /*getcount_only*/0); 516 atio->ccb_h.status &= ~CAM_DEV_QFRZN; 517 } 518 ccbh = TAILQ_FIRST(&softc->work_queue); 519 } 520 if (ccbh != NULL) 521 xpt_schedule(periph, CAM_PRIORITY_NORMAL); 522 } 523 524 static void 525 targbhdone(struct cam_periph *periph, union ccb *done_ccb) 526 { 527 struct targbh_softc *softc; 528 529 softc = (struct targbh_softc *)periph->softc; 530 531 switch (done_ccb->ccb_h.func_code) { 532 case XPT_ACCEPT_TARGET_IO: 533 { 534 struct ccb_accept_tio *atio; 535 struct targbh_cmd_desc *descr; 536 u_int8_t *cdb; 537 int priority; 538 539 atio = &done_ccb->atio; 540 descr = (struct targbh_cmd_desc*)atio->ccb_h.ccb_descr; 541 cdb = atio->cdb_io.cdb_bytes; 542 if (softc->state == TARGBH_STATE_TEARDOWN 543 || atio->ccb_h.status == CAM_REQ_ABORTED) { 544 targbhfreedescr(descr); 545 xpt_free_ccb(done_ccb); 546 return; 547 } 548 549 /* 550 * Determine the type of incoming command and 551 * setup our buffer for a response. 552 */ 553 switch (cdb[0]) { 554 case INQUIRY: 555 { 556 struct scsi_inquiry *inq; 557 558 inq = (struct scsi_inquiry *)cdb; 559 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 560 ("Saw an inquiry!\n")); 561 /* 562 * Validate the command. We don't 563 * support any VPD pages, so complain 564 * if EVPD is set. 565 */ 566 if ((inq->byte2 & SI_EVPD) != 0 567 || inq->page_code != 0) { 568 atio->ccb_h.flags &= ~CAM_DIR_MASK; 569 atio->ccb_h.flags |= CAM_DIR_NONE; 570 /* 571 * This needs to have other than a 572 * no_lun_sense_data response. 573 */ 574 bcopy(&no_lun_sense_data, &atio->sense_data, 575 min(sizeof(no_lun_sense_data), 576 sizeof(atio->sense_data))); 577 atio->sense_len = sizeof(no_lun_sense_data); 578 descr->data_resid = 0; 579 descr->data_increment = 0; 580 descr->status = SCSI_STATUS_CHECK_COND; 581 break; 582 } 583 /* 584 * Direction is always relative 585 * to the initator. 586 */ 587 atio->ccb_h.flags &= ~CAM_DIR_MASK; 588 atio->ccb_h.flags |= CAM_DIR_IN; 589 descr->data = &no_lun_inq_data; 590 descr->data_resid = MIN(sizeof(no_lun_inq_data), 591 scsi_2btoul(inq->length)); 592 descr->data_increment = descr->data_resid; 593 descr->timeout = 5 * 1000; 594 descr->status = SCSI_STATUS_OK; 595 break; 596 } 597 case REQUEST_SENSE: 598 { 599 struct scsi_request_sense *rsense; 600 601 rsense = (struct scsi_request_sense *)cdb; 602 /* Refer to static sense data */ 603 atio->ccb_h.flags &= ~CAM_DIR_MASK; 604 atio->ccb_h.flags |= CAM_DIR_IN; 605 descr->data = &no_lun_sense_data; 606 descr->data_resid = request_sense_size; 607 descr->data_resid = MIN(descr->data_resid, 608 SCSI_CDB6_LEN(rsense->length)); 609 descr->data_increment = descr->data_resid; 610 descr->timeout = 5 * 1000; 611 descr->status = SCSI_STATUS_OK; 612 break; 613 } 614 default: 615 /* Constant CA, tell initiator */ 616 /* Direction is always relative to the initator */ 617 atio->ccb_h.flags &= ~CAM_DIR_MASK; 618 atio->ccb_h.flags |= CAM_DIR_NONE; 619 bcopy(&no_lun_sense_data, &atio->sense_data, 620 min(sizeof(no_lun_sense_data), 621 sizeof(atio->sense_data))); 622 atio->sense_len = sizeof (no_lun_sense_data); 623 descr->data_resid = 0; 624 descr->data_increment = 0; 625 descr->timeout = 5 * 1000; 626 descr->status = SCSI_STATUS_CHECK_COND; 627 break; 628 } 629 630 /* Queue us up to receive a Continue Target I/O ccb. */ 631 if ((atio->ccb_h.flags & CAM_DIS_DISCONNECT) != 0) { 632 TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h, 633 periph_links.tqe); 634 priority = 0; 635 } else { 636 TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h, 637 periph_links.tqe); 638 priority = CAM_PRIORITY_NORMAL; 639 } 640 xpt_schedule(periph, priority); 641 break; 642 } 643 case XPT_CONT_TARGET_IO: 644 { 645 struct ccb_accept_tio *atio; 646 struct targbh_cmd_desc *desc; 647 648 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 649 ("Received completed CTIO\n")); 650 atio = (struct ccb_accept_tio*)done_ccb->ccb_h.ccb_atio; 651 desc = (struct targbh_cmd_desc *)atio->ccb_h.ccb_descr; 652 653 TAILQ_REMOVE(&softc->pending_queue, &atio->ccb_h, 654 periph_links.tqe); 655 656 /* 657 * We could check for CAM_SENT_SENSE bein set here, 658 * but since we're not maintaining any CA/UA state, 659 * there's no point. 660 */ 661 atio->sense_len = 0; 662 done_ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 663 done_ccb->ccb_h.status &= ~CAM_SENT_SENSE; 664 665 /* 666 * Any errors will not change the data we return, 667 * so make sure the queue is not left frozen. 668 * XXX - At some point there may be errors that 669 * leave us in a connected state with the 670 * initiator... 671 */ 672 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 673 printf("Releasing Queue\n"); 674 cam_release_devq(done_ccb->ccb_h.path, 675 /*relsim_flags*/0, 676 /*reduction*/0, 677 /*timeout*/0, 678 /*getcount_only*/0); 679 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 680 } 681 desc->data_resid -= desc->data_increment; 682 xpt_release_ccb(done_ccb); 683 if (softc->state != TARGBH_STATE_TEARDOWN) { 684 685 /* 686 * Send the original accept TIO back to the 687 * controller to handle more work. 688 */ 689 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 690 ("Returning ATIO to target\n")); 691 /* Restore wildcards */ 692 atio->ccb_h.target_id = CAM_TARGET_WILDCARD; 693 atio->ccb_h.target_lun = CAM_LUN_WILDCARD; 694 xpt_action((union ccb *)atio); 695 break; 696 } else { 697 targbhfreedescr(desc); 698 free(atio, M_SCSIBH); 699 } 700 break; 701 } 702 case XPT_IMMEDIATE_NOTIFY: 703 { 704 int frozen; 705 706 frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; 707 if (softc->state == TARGBH_STATE_TEARDOWN 708 || done_ccb->ccb_h.status == CAM_REQ_ABORTED) { 709 printf("Freed an immediate notify\n"); 710 xpt_free_ccb(done_ccb); 711 } else { 712 /* Requeue for another immediate event */ 713 xpt_action(done_ccb); 714 } 715 if (frozen != 0) 716 cam_release_devq(periph->path, 717 /*relsim_flags*/0, 718 /*opening reduction*/0, 719 /*timeout*/0, 720 /*getcount_only*/0); 721 break; 722 } 723 default: 724 panic("targbhdone: Unexpected ccb opcode"); 725 break; 726 } 727 } 728 729 #ifdef NOTYET 730 static int 731 targbherror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) 732 { 733 return 0; 734 } 735 #endif 736 737 static struct targbh_cmd_desc* 738 targbhallocdescr() 739 { 740 struct targbh_cmd_desc* descr; 741 742 /* Allocate the targbh_descr structure */ 743 descr = (struct targbh_cmd_desc *)malloc(sizeof(*descr), 744 M_SCSIBH, M_NOWAIT); 745 if (descr == NULL) 746 return (NULL); 747 748 bzero(descr, sizeof(*descr)); 749 750 /* Allocate buffer backing store */ 751 descr->backing_store = malloc(MAX_BUF_SIZE, M_SCSIBH, M_NOWAIT); 752 if (descr->backing_store == NULL) { 753 free(descr, M_SCSIBH); 754 return (NULL); 755 } 756 descr->max_size = MAX_BUF_SIZE; 757 return (descr); 758 } 759 760 static void 761 targbhfreedescr(struct targbh_cmd_desc *descr) 762 { 763 free(descr->backing_store, M_SCSIBH); 764 free(descr, M_SCSIBH); 765 } 766