1 /*- 2 * Implementation of the Target Mode 'Black Hole device' for CAM. 3 * 4 * Copyright (c) 1999 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/queue.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/types.h> 37 #include <sys/bio.h> 38 #include <sys/conf.h> 39 #include <sys/devicestat.h> 40 #include <sys/malloc.h> 41 #include <sys/uio.h> 42 43 #include <cam/cam.h> 44 #include <cam/cam_ccb.h> 45 #include <cam/cam_periph.h> 46 #include <cam/cam_queue.h> 47 #include <cam/cam_xpt_periph.h> 48 #include <cam/cam_debug.h> 49 #include <cam/cam_sim.h> 50 51 #include <cam/scsi/scsi_all.h> 52 #include <cam/scsi/scsi_message.h> 53 54 MALLOC_DEFINE(M_SCSIBH, "SCSI bh", "SCSI blackhole buffers"); 55 56 typedef enum { 57 TARGBH_STATE_NORMAL, 58 TARGBH_STATE_EXCEPTION, 59 TARGBH_STATE_TEARDOWN 60 } targbh_state; 61 62 typedef enum { 63 TARGBH_FLAG_NONE = 0x00, 64 TARGBH_FLAG_LUN_ENABLED = 0x01 65 } targbh_flags; 66 67 typedef enum { 68 TARGBH_CCB_WORKQ, 69 TARGBH_CCB_WAITING 70 } targbh_ccb_types; 71 72 #define MAX_ACCEPT 8 73 #define MAX_IMMEDIATE 16 74 #define MAX_BUF_SIZE 256 /* Max inquiry/sense/mode page transfer */ 75 76 /* Offsets into our private CCB area for storing accept information */ 77 #define ccb_type ppriv_field0 78 #define ccb_descr ppriv_ptr1 79 80 /* We stick a pointer to the originating accept TIO in each continue I/O CCB */ 81 #define ccb_atio ppriv_ptr1 82 83 TAILQ_HEAD(ccb_queue, ccb_hdr); 84 85 struct targbh_softc { 86 struct ccb_queue pending_queue; 87 struct ccb_queue work_queue; 88 struct ccb_queue unknown_atio_queue; 89 struct devstat device_stats; 90 targbh_state state; 91 targbh_flags flags; 92 u_int init_level; 93 u_int inq_data_len; 94 struct ccb_accept_tio *accept_tio_list; 95 struct ccb_hdr_slist immed_notify_slist; 96 }; 97 98 struct targbh_cmd_desc { 99 struct ccb_accept_tio* atio_link; 100 u_int data_resid; /* How much left to transfer */ 101 u_int data_increment;/* Amount to send before next disconnect */ 102 void* data; /* The data. Can be from backing_store or not */ 103 void* backing_store;/* Backing store allocated for this descriptor*/ 104 u_int max_size; /* Size of backing_store */ 105 u_int32_t timeout; 106 u_int8_t status; /* Status to return to initiator */ 107 }; 108 109 static struct scsi_inquiry_data no_lun_inq_data = 110 { 111 T_NODEVICE | (SID_QUAL_BAD_LU << 5), 0, 112 /* version */2, /* format version */2 113 }; 114 115 static struct scsi_sense_data no_lun_sense_data = 116 { 117 SSD_CURRENT_ERROR|SSD_ERRCODE_VALID, 118 0, 119 SSD_KEY_NOT_READY, 120 { 0, 0, 0, 0 }, 121 /*extra_len*/offsetof(struct scsi_sense_data, fru) 122 - offsetof(struct scsi_sense_data, extra_len), 123 { 0, 0, 0, 0 }, 124 /* Logical Unit Not Supported */ 125 /*ASC*/0x25, /*ASCQ*/0 126 }; 127 128 static const int request_sense_size = offsetof(struct scsi_sense_data, fru); 129 130 static periph_init_t targbhinit; 131 static void targbhasync(void *callback_arg, u_int32_t code, 132 struct cam_path *path, void *arg); 133 static cam_status targbhenlun(struct cam_periph *periph); 134 static cam_status targbhdislun(struct cam_periph *periph); 135 static periph_ctor_t targbhctor; 136 static periph_dtor_t targbhdtor; 137 static periph_start_t targbhstart; 138 static void targbhdone(struct cam_periph *periph, 139 union ccb *done_ccb); 140 #ifdef NOTYET 141 static int targbherror(union ccb *ccb, u_int32_t cam_flags, 142 u_int32_t sense_flags); 143 #endif 144 static struct targbh_cmd_desc* targbhallocdescr(void); 145 static void targbhfreedescr(struct targbh_cmd_desc *buf); 146 147 static struct periph_driver targbhdriver = 148 { 149 targbhinit, "targbh", 150 TAILQ_HEAD_INITIALIZER(targbhdriver.units), /* generation */ 0 151 }; 152 153 PERIPHDRIVER_DECLARE(targbh, targbhdriver); 154 155 static void 156 targbhinit(void) 157 { 158 cam_status status; 159 160 /* 161 * Install a global async callback. This callback will 162 * receive async callbacks like "new path registered". 163 */ 164 status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED, 165 targbhasync, NULL, NULL); 166 167 if (status != CAM_REQ_CMP) { 168 printf("targbh: Failed to attach master async callback " 169 "due to status 0x%x!\n", status); 170 } 171 } 172 173 static void 174 targbhasync(void *callback_arg, u_int32_t code, 175 struct cam_path *path, void *arg) 176 { 177 struct cam_path *new_path; 178 struct ccb_pathinq *cpi; 179 path_id_t bus_path_id; 180 cam_status status; 181 182 cpi = (struct ccb_pathinq *)arg; 183 if (code == AC_PATH_REGISTERED) 184 bus_path_id = cpi->ccb_h.path_id; 185 else 186 bus_path_id = xpt_path_path_id(path); 187 /* 188 * Allocate a peripheral instance for 189 * this target instance. 190 */ 191 status = xpt_create_path(&new_path, NULL, 192 bus_path_id, 193 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 194 if (status != CAM_REQ_CMP) { 195 printf("targbhasync: Unable to create path " 196 "due to status 0x%x\n", status); 197 return; 198 } 199 200 switch (code) { 201 case AC_PATH_REGISTERED: 202 { 203 /* Only attach to controllers that support target mode */ 204 if ((cpi->target_sprt & PIT_PROCESSOR) == 0) 205 break; 206 207 status = cam_periph_alloc(targbhctor, NULL, targbhdtor, 208 targbhstart, 209 "targbh", CAM_PERIPH_BIO, 210 new_path, targbhasync, 211 AC_PATH_REGISTERED, 212 cpi); 213 break; 214 } 215 case AC_PATH_DEREGISTERED: 216 { 217 struct cam_periph *periph; 218 219 if ((periph = cam_periph_find(new_path, "targbh")) != NULL) 220 cam_periph_invalidate(periph); 221 break; 222 } 223 default: 224 break; 225 } 226 xpt_free_path(new_path); 227 } 228 229 /* Attempt to enable our lun */ 230 static cam_status 231 targbhenlun(struct cam_periph *periph) 232 { 233 union ccb immed_ccb; 234 struct targbh_softc *softc; 235 cam_status status; 236 int i; 237 238 softc = (struct targbh_softc *)periph->softc; 239 240 if ((softc->flags & TARGBH_FLAG_LUN_ENABLED) != 0) 241 return (CAM_REQ_CMP); 242 243 xpt_setup_ccb(&immed_ccb.ccb_h, periph->path, /*priority*/1); 244 immed_ccb.ccb_h.func_code = XPT_EN_LUN; 245 246 /* Don't need support for any vendor specific commands */ 247 immed_ccb.cel.grp6_len = 0; 248 immed_ccb.cel.grp7_len = 0; 249 immed_ccb.cel.enable = 1; 250 xpt_action(&immed_ccb); 251 status = immed_ccb.ccb_h.status; 252 if (status != CAM_REQ_CMP) { 253 xpt_print(periph->path, 254 "targbhenlun - Enable Lun Rejected with status 0x%x\n", 255 status); 256 return (status); 257 } 258 259 softc->flags |= TARGBH_FLAG_LUN_ENABLED; 260 261 /* 262 * Build up a buffer of accept target I/O 263 * operations for incoming selections. 264 */ 265 for (i = 0; i < MAX_ACCEPT; i++) { 266 struct ccb_accept_tio *atio; 267 268 atio = (struct ccb_accept_tio*)malloc(sizeof(*atio), M_SCSIBH, 269 M_NOWAIT); 270 if (atio == NULL) { 271 status = CAM_RESRC_UNAVAIL; 272 break; 273 } 274 275 atio->ccb_h.ccb_descr = targbhallocdescr(); 276 277 if (atio->ccb_h.ccb_descr == NULL) { 278 free(atio, M_SCSIBH); 279 status = CAM_RESRC_UNAVAIL; 280 break; 281 } 282 283 xpt_setup_ccb(&atio->ccb_h, periph->path, /*priority*/1); 284 atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO; 285 atio->ccb_h.cbfcnp = targbhdone; 286 xpt_action((union ccb *)atio); 287 status = atio->ccb_h.status; 288 if (status != CAM_REQ_INPROG) { 289 targbhfreedescr(atio->ccb_h.ccb_descr); 290 free(atio, M_SCSIBH); 291 break; 292 } 293 ((struct targbh_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link = 294 softc->accept_tio_list; 295 softc->accept_tio_list = atio; 296 } 297 298 if (i == 0) { 299 xpt_print(periph->path, 300 "targbhenlun - Could not allocate accept tio CCBs: status " 301 "= 0x%x\n", status); 302 targbhdislun(periph); 303 return (CAM_REQ_CMP_ERR); 304 } 305 306 /* 307 * Build up a buffer of immediate notify CCBs 308 * so the SIM can tell us of asynchronous target mode events. 309 */ 310 for (i = 0; i < MAX_ACCEPT; i++) { 311 struct ccb_immed_notify *inot; 312 313 inot = (struct ccb_immed_notify*)malloc(sizeof(*inot), M_SCSIBH, 314 M_NOWAIT); 315 316 if (inot == NULL) { 317 status = CAM_RESRC_UNAVAIL; 318 break; 319 } 320 321 xpt_setup_ccb(&inot->ccb_h, periph->path, /*priority*/1); 322 inot->ccb_h.func_code = XPT_IMMED_NOTIFY; 323 inot->ccb_h.cbfcnp = targbhdone; 324 xpt_action((union ccb *)inot); 325 status = inot->ccb_h.status; 326 if (status != CAM_REQ_INPROG) { 327 free(inot, M_SCSIBH); 328 break; 329 } 330 SLIST_INSERT_HEAD(&softc->immed_notify_slist, &inot->ccb_h, 331 periph_links.sle); 332 } 333 334 if (i == 0) { 335 xpt_print(periph->path, 336 "targbhenlun - Could not allocate immediate notify " 337 "CCBs: status = 0x%x\n", status); 338 targbhdislun(periph); 339 return (CAM_REQ_CMP_ERR); 340 } 341 342 return (CAM_REQ_CMP); 343 } 344 345 static cam_status 346 targbhdislun(struct cam_periph *periph) 347 { 348 union ccb ccb; 349 struct targbh_softc *softc; 350 struct ccb_accept_tio* atio; 351 struct ccb_hdr *ccb_h; 352 353 softc = (struct targbh_softc *)periph->softc; 354 if ((softc->flags & TARGBH_FLAG_LUN_ENABLED) == 0) 355 return CAM_REQ_CMP; 356 357 /* XXX Block for Continue I/O completion */ 358 359 /* Kill off all ACCECPT and IMMEDIATE CCBs */ 360 while ((atio = softc->accept_tio_list) != NULL) { 361 362 softc->accept_tio_list = 363 ((struct targbh_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link; 364 xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1); 365 ccb.cab.ccb_h.func_code = XPT_ABORT; 366 ccb.cab.abort_ccb = (union ccb *)atio; 367 xpt_action(&ccb); 368 } 369 370 while ((ccb_h = SLIST_FIRST(&softc->immed_notify_slist)) != NULL) { 371 SLIST_REMOVE_HEAD(&softc->immed_notify_slist, periph_links.sle); 372 xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1); 373 ccb.cab.ccb_h.func_code = XPT_ABORT; 374 ccb.cab.abort_ccb = (union ccb *)ccb_h; 375 xpt_action(&ccb); 376 } 377 378 /* 379 * Dissable this lun. 380 */ 381 xpt_setup_ccb(&ccb.cel.ccb_h, periph->path, /*priority*/1); 382 ccb.cel.ccb_h.func_code = XPT_EN_LUN; 383 ccb.cel.enable = 0; 384 xpt_action(&ccb); 385 386 if (ccb.cel.ccb_h.status != CAM_REQ_CMP) 387 printf("targbhdislun - Disabling lun on controller failed " 388 "with status 0x%x\n", ccb.cel.ccb_h.status); 389 else 390 softc->flags &= ~TARGBH_FLAG_LUN_ENABLED; 391 return (ccb.cel.ccb_h.status); 392 } 393 394 static cam_status 395 targbhctor(struct cam_periph *periph, void *arg) 396 { 397 struct targbh_softc *softc; 398 399 /* Allocate our per-instance private storage */ 400 softc = (struct targbh_softc *)malloc(sizeof(*softc), 401 M_SCSIBH, M_NOWAIT); 402 if (softc == NULL) { 403 printf("targctor: unable to malloc softc\n"); 404 return (CAM_REQ_CMP_ERR); 405 } 406 407 bzero(softc, sizeof(*softc)); 408 TAILQ_INIT(&softc->pending_queue); 409 TAILQ_INIT(&softc->work_queue); 410 softc->accept_tio_list = NULL; 411 SLIST_INIT(&softc->immed_notify_slist); 412 softc->state = TARGBH_STATE_NORMAL; 413 periph->softc = softc; 414 softc->init_level++; 415 416 return (targbhenlun(periph)); 417 } 418 419 static void 420 targbhdtor(struct cam_periph *periph) 421 { 422 struct targbh_softc *softc; 423 424 softc = (struct targbh_softc *)periph->softc; 425 426 softc->state = TARGBH_STATE_TEARDOWN; 427 428 targbhdislun(periph); 429 430 switch (softc->init_level) { 431 case 0: 432 panic("targdtor - impossible init level");; 433 case 1: 434 /* FALLTHROUGH */ 435 default: 436 /* XXX Wait for callback of targbhdislun() */ 437 msleep(softc, periph->sim->mtx, PRIBIO, "targbh", hz/2); 438 free(softc, M_SCSIBH); 439 break; 440 } 441 } 442 443 static void 444 targbhstart(struct cam_periph *periph, union ccb *start_ccb) 445 { 446 struct targbh_softc *softc; 447 struct ccb_hdr *ccbh; 448 struct ccb_accept_tio *atio; 449 struct targbh_cmd_desc *desc; 450 struct ccb_scsiio *csio; 451 ccb_flags flags; 452 453 softc = (struct targbh_softc *)periph->softc; 454 455 ccbh = TAILQ_FIRST(&softc->work_queue); 456 if (periph->immediate_priority <= periph->pinfo.priority) { 457 start_ccb->ccb_h.ccb_type = TARGBH_CCB_WAITING; 458 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, 459 periph_links.sle); 460 periph->immediate_priority = CAM_PRIORITY_NONE; 461 wakeup(&periph->ccb_list); 462 } else if (ccbh == NULL) { 463 xpt_release_ccb(start_ccb); 464 } else { 465 TAILQ_REMOVE(&softc->work_queue, ccbh, periph_links.tqe); 466 TAILQ_INSERT_HEAD(&softc->pending_queue, ccbh, 467 periph_links.tqe); 468 atio = (struct ccb_accept_tio*)ccbh; 469 desc = (struct targbh_cmd_desc *)atio->ccb_h.ccb_descr; 470 471 /* Is this a tagged request? */ 472 flags = atio->ccb_h.flags & 473 (CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK); 474 475 csio = &start_ccb->csio; 476 /* 477 * If we are done with the transaction, tell the 478 * controller to send status and perform a CMD_CMPLT. 479 * If we have associated sense data, see if we can 480 * send that too. 481 */ 482 if (desc->data_resid == desc->data_increment) { 483 flags |= CAM_SEND_STATUS; 484 if (atio->sense_len) { 485 csio->sense_len = atio->sense_len; 486 csio->sense_data = atio->sense_data; 487 flags |= CAM_SEND_SENSE; 488 } 489 490 } 491 492 cam_fill_ctio(csio, 493 /*retries*/2, 494 targbhdone, 495 flags, 496 (flags & CAM_TAG_ACTION_VALID)? 497 MSG_SIMPLE_Q_TAG : 0, 498 atio->tag_id, 499 atio->init_id, 500 desc->status, 501 /*data_ptr*/desc->data_increment == 0 502 ? NULL : desc->data, 503 /*dxfer_len*/desc->data_increment, 504 /*timeout*/desc->timeout); 505 506 /* Override our wildcard attachment */ 507 start_ccb->ccb_h.target_id = atio->ccb_h.target_id; 508 start_ccb->ccb_h.target_lun = atio->ccb_h.target_lun; 509 510 start_ccb->ccb_h.ccb_type = TARGBH_CCB_WORKQ; 511 start_ccb->ccb_h.ccb_atio = atio; 512 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 513 ("Sending a CTIO\n")); 514 xpt_action(start_ccb); 515 /* 516 * If the queue was frozen waiting for the response 517 * to this ATIO (for instance disconnection was disallowed), 518 * then release it now that our response has been queued. 519 */ 520 if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) { 521 cam_release_devq(periph->path, 522 /*relsim_flags*/0, 523 /*reduction*/0, 524 /*timeout*/0, 525 /*getcount_only*/0); 526 atio->ccb_h.status &= ~CAM_DEV_QFRZN; 527 } 528 ccbh = TAILQ_FIRST(&softc->work_queue); 529 } 530 if (ccbh != NULL) 531 xpt_schedule(periph, /*priority*/1); 532 } 533 534 static void 535 targbhdone(struct cam_periph *periph, union ccb *done_ccb) 536 { 537 struct targbh_softc *softc; 538 539 softc = (struct targbh_softc *)periph->softc; 540 541 if (done_ccb->ccb_h.ccb_type == TARGBH_CCB_WAITING) { 542 /* Caller will release the CCB */ 543 wakeup(&done_ccb->ccb_h.cbfcnp); 544 return; 545 } 546 547 switch (done_ccb->ccb_h.func_code) { 548 case XPT_ACCEPT_TARGET_IO: 549 { 550 struct ccb_accept_tio *atio; 551 struct targbh_cmd_desc *descr; 552 u_int8_t *cdb; 553 int priority; 554 555 atio = &done_ccb->atio; 556 descr = (struct targbh_cmd_desc*)atio->ccb_h.ccb_descr; 557 cdb = atio->cdb_io.cdb_bytes; 558 if (softc->state == TARGBH_STATE_TEARDOWN 559 || atio->ccb_h.status == CAM_REQ_ABORTED) { 560 targbhfreedescr(descr); 561 xpt_free_ccb(done_ccb); 562 return; 563 } 564 565 /* 566 * Determine the type of incoming command and 567 * setup our buffer for a response. 568 */ 569 switch (cdb[0]) { 570 case INQUIRY: 571 { 572 struct scsi_inquiry *inq; 573 574 inq = (struct scsi_inquiry *)cdb; 575 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 576 ("Saw an inquiry!\n")); 577 /* 578 * Validate the command. We don't 579 * support any VPD pages, so complain 580 * if EVPD is set. 581 */ 582 if ((inq->byte2 & SI_EVPD) != 0 583 || inq->page_code != 0) { 584 atio->ccb_h.flags &= ~CAM_DIR_MASK; 585 atio->ccb_h.flags |= CAM_DIR_NONE; 586 /* 587 * This needs to have other than a 588 * no_lun_sense_data response. 589 */ 590 atio->sense_data = no_lun_sense_data; 591 atio->sense_len = sizeof(no_lun_sense_data); 592 descr->data_resid = 0; 593 descr->data_increment = 0; 594 descr->status = SCSI_STATUS_CHECK_COND; 595 break; 596 } 597 /* 598 * Direction is always relative 599 * to the initator. 600 */ 601 atio->ccb_h.flags &= ~CAM_DIR_MASK; 602 atio->ccb_h.flags |= CAM_DIR_IN; 603 descr->data = &no_lun_inq_data; 604 descr->data_resid = MIN(sizeof(no_lun_inq_data), 605 SCSI_CDB6_LEN(inq->length)); 606 descr->data_increment = descr->data_resid; 607 descr->timeout = 5 * 1000; 608 descr->status = SCSI_STATUS_OK; 609 break; 610 } 611 case REQUEST_SENSE: 612 { 613 struct scsi_request_sense *rsense; 614 615 rsense = (struct scsi_request_sense *)cdb; 616 /* Refer to static sense data */ 617 atio->ccb_h.flags &= ~CAM_DIR_MASK; 618 atio->ccb_h.flags |= CAM_DIR_IN; 619 descr->data = &no_lun_sense_data; 620 descr->data_resid = request_sense_size; 621 descr->data_resid = MIN(descr->data_resid, 622 SCSI_CDB6_LEN(rsense->length)); 623 descr->data_increment = descr->data_resid; 624 descr->timeout = 5 * 1000; 625 descr->status = SCSI_STATUS_OK; 626 break; 627 } 628 default: 629 /* Constant CA, tell initiator */ 630 /* Direction is always relative to the initator */ 631 atio->ccb_h.flags &= ~CAM_DIR_MASK; 632 atio->ccb_h.flags |= CAM_DIR_NONE; 633 atio->sense_data = no_lun_sense_data; 634 atio->sense_len = sizeof (no_lun_sense_data); 635 descr->data_resid = 0; 636 descr->data_increment = 0; 637 descr->timeout = 5 * 1000; 638 descr->status = SCSI_STATUS_CHECK_COND; 639 break; 640 } 641 642 /* Queue us up to receive a Continue Target I/O ccb. */ 643 if ((atio->ccb_h.flags & CAM_DIS_DISCONNECT) != 0) { 644 TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h, 645 periph_links.tqe); 646 priority = 0; 647 } else { 648 TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h, 649 periph_links.tqe); 650 priority = 1; 651 } 652 xpt_schedule(periph, priority); 653 break; 654 } 655 case XPT_CONT_TARGET_IO: 656 { 657 struct ccb_accept_tio *atio; 658 struct targbh_cmd_desc *desc; 659 660 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 661 ("Received completed CTIO\n")); 662 atio = (struct ccb_accept_tio*)done_ccb->ccb_h.ccb_atio; 663 desc = (struct targbh_cmd_desc *)atio->ccb_h.ccb_descr; 664 665 TAILQ_REMOVE(&softc->pending_queue, &atio->ccb_h, 666 periph_links.tqe); 667 668 /* 669 * We could check for CAM_SENT_SENSE bein set here, 670 * but since we're not maintaining any CA/UA state, 671 * there's no point. 672 */ 673 atio->sense_len = 0; 674 done_ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 675 done_ccb->ccb_h.status &= ~CAM_SENT_SENSE; 676 677 /* 678 * Any errors will not change the data we return, 679 * so make sure the queue is not left frozen. 680 * XXX - At some point there may be errors that 681 * leave us in a connected state with the 682 * initiator... 683 */ 684 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 685 printf("Releasing Queue\n"); 686 cam_release_devq(done_ccb->ccb_h.path, 687 /*relsim_flags*/0, 688 /*reduction*/0, 689 /*timeout*/0, 690 /*getcount_only*/0); 691 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 692 } 693 desc->data_resid -= desc->data_increment; 694 xpt_release_ccb(done_ccb); 695 if (softc->state != TARGBH_STATE_TEARDOWN) { 696 697 /* 698 * Send the original accept TIO back to the 699 * controller to handle more work. 700 */ 701 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 702 ("Returning ATIO to target\n")); 703 /* Restore wildcards */ 704 atio->ccb_h.target_id = CAM_TARGET_WILDCARD; 705 atio->ccb_h.target_lun = CAM_LUN_WILDCARD; 706 xpt_action((union ccb *)atio); 707 break; 708 } else { 709 targbhfreedescr(desc); 710 free(atio, M_SCSIBH); 711 } 712 break; 713 } 714 case XPT_IMMED_NOTIFY: 715 { 716 int frozen; 717 718 frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; 719 if (softc->state == TARGBH_STATE_TEARDOWN 720 || done_ccb->ccb_h.status == CAM_REQ_ABORTED) { 721 printf("Freed an immediate notify\n"); 722 xpt_free_ccb(done_ccb); 723 } else { 724 /* Requeue for another immediate event */ 725 xpt_action(done_ccb); 726 } 727 if (frozen != 0) 728 cam_release_devq(periph->path, 729 /*relsim_flags*/0, 730 /*opening reduction*/0, 731 /*timeout*/0, 732 /*getcount_only*/0); 733 break; 734 } 735 default: 736 panic("targbhdone: Unexpected ccb opcode"); 737 break; 738 } 739 } 740 741 #ifdef NOTYET 742 static int 743 targbherror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) 744 { 745 return 0; 746 } 747 #endif 748 749 static struct targbh_cmd_desc* 750 targbhallocdescr() 751 { 752 struct targbh_cmd_desc* descr; 753 754 /* Allocate the targbh_descr structure */ 755 descr = (struct targbh_cmd_desc *)malloc(sizeof(*descr), 756 M_SCSIBH, M_NOWAIT); 757 if (descr == NULL) 758 return (NULL); 759 760 bzero(descr, sizeof(*descr)); 761 762 /* Allocate buffer backing store */ 763 descr->backing_store = malloc(MAX_BUF_SIZE, M_SCSIBH, M_NOWAIT); 764 if (descr->backing_store == NULL) { 765 free(descr, M_SCSIBH); 766 return (NULL); 767 } 768 descr->max_size = MAX_BUF_SIZE; 769 return (descr); 770 } 771 772 static void 773 targbhfreedescr(struct targbh_cmd_desc *descr) 774 { 775 free(descr->backing_store, M_SCSIBH); 776 free(descr, M_SCSIBH); 777 } 778