1 /*- 2 * Implementation of the Target Mode 'Black Hole device' for CAM. 3 * 4 * Copyright (c) 1999 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/queue.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/types.h> 37 #include <sys/bio.h> 38 #include <sys/conf.h> 39 #include <sys/devicestat.h> 40 #include <sys/malloc.h> 41 #include <sys/uio.h> 42 43 #include <cam/cam.h> 44 #include <cam/cam_ccb.h> 45 #include <cam/cam_periph.h> 46 #include <cam/cam_queue.h> 47 #include <cam/cam_xpt_periph.h> 48 #include <cam/cam_debug.h> 49 #include <cam/cam_sim.h> 50 51 #include <cam/scsi/scsi_all.h> 52 #include <cam/scsi/scsi_message.h> 53 54 static MALLOC_DEFINE(M_SCSIBH, "SCSI bh", "SCSI blackhole buffers"); 55 56 typedef enum { 57 TARGBH_STATE_NORMAL, 58 TARGBH_STATE_EXCEPTION, 59 TARGBH_STATE_TEARDOWN 60 } targbh_state; 61 62 typedef enum { 63 TARGBH_FLAG_NONE = 0x00, 64 TARGBH_FLAG_LUN_ENABLED = 0x01 65 } targbh_flags; 66 67 typedef enum { 68 TARGBH_CCB_WORKQ, 69 TARGBH_CCB_WAITING 70 } targbh_ccb_types; 71 72 #define MAX_ACCEPT 8 73 #define MAX_IMMEDIATE 16 74 #define MAX_BUF_SIZE 256 /* Max inquiry/sense/mode page transfer */ 75 76 /* Offsets into our private CCB area for storing accept information */ 77 #define ccb_type ppriv_field0 78 #define ccb_descr ppriv_ptr1 79 80 /* We stick a pointer to the originating accept TIO in each continue I/O CCB */ 81 #define ccb_atio ppriv_ptr1 82 83 TAILQ_HEAD(ccb_queue, ccb_hdr); 84 85 struct targbh_softc { 86 struct ccb_queue pending_queue; 87 struct ccb_queue work_queue; 88 struct ccb_queue unknown_atio_queue; 89 struct devstat device_stats; 90 targbh_state state; 91 targbh_flags flags; 92 u_int init_level; 93 u_int inq_data_len; 94 struct ccb_accept_tio *accept_tio_list; 95 struct ccb_hdr_slist immed_notify_slist; 96 }; 97 98 struct targbh_cmd_desc { 99 struct ccb_accept_tio* atio_link; 100 u_int data_resid; /* How much left to transfer */ 101 u_int data_increment;/* Amount to send before next disconnect */ 102 void* data; /* The data. Can be from backing_store or not */ 103 void* backing_store;/* Backing store allocated for this descriptor*/ 104 u_int max_size; /* Size of backing_store */ 105 u_int32_t timeout; 106 u_int8_t status; /* Status to return to initiator */ 107 }; 108 109 static struct scsi_inquiry_data no_lun_inq_data = 110 { 111 T_NODEVICE | (SID_QUAL_BAD_LU << 5), 0, 112 /* version */2, /* format version */2 113 }; 114 115 static struct scsi_sense_data_fixed no_lun_sense_data = 116 { 117 SSD_CURRENT_ERROR|SSD_ERRCODE_VALID, 118 0, 119 SSD_KEY_NOT_READY, 120 { 0, 0, 0, 0 }, 121 /*extra_len*/offsetof(struct scsi_sense_data_fixed, fru) 122 - offsetof(struct scsi_sense_data_fixed, extra_len), 123 { 0, 0, 0, 0 }, 124 /* Logical Unit Not Supported */ 125 /*ASC*/0x25, /*ASCQ*/0 126 }; 127 128 static const int request_sense_size = offsetof(struct scsi_sense_data_fixed, fru); 129 130 static periph_init_t targbhinit; 131 static void targbhasync(void *callback_arg, u_int32_t code, 132 struct cam_path *path, void *arg); 133 static cam_status targbhenlun(struct cam_periph *periph); 134 static cam_status targbhdislun(struct cam_periph *periph); 135 static periph_ctor_t targbhctor; 136 static periph_dtor_t targbhdtor; 137 static periph_start_t targbhstart; 138 static void targbhdone(struct cam_periph *periph, 139 union ccb *done_ccb); 140 #ifdef NOTYET 141 static int targbherror(union ccb *ccb, u_int32_t cam_flags, 142 u_int32_t sense_flags); 143 #endif 144 static struct targbh_cmd_desc* targbhallocdescr(void); 145 static void targbhfreedescr(struct targbh_cmd_desc *buf); 146 147 static struct periph_driver targbhdriver = 148 { 149 targbhinit, "targbh", 150 TAILQ_HEAD_INITIALIZER(targbhdriver.units), /* generation */ 0 151 }; 152 153 PERIPHDRIVER_DECLARE(targbh, targbhdriver); 154 155 static void 156 targbhinit(void) 157 { 158 cam_status status; 159 160 /* 161 * Install a global async callback. This callback will 162 * receive async callbacks like "new path registered". 163 */ 164 status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED, 165 targbhasync, NULL, NULL); 166 167 if (status != CAM_REQ_CMP) { 168 printf("targbh: Failed to attach master async callback " 169 "due to status 0x%x!\n", status); 170 } 171 } 172 173 static void 174 targbhasync(void *callback_arg, u_int32_t code, 175 struct cam_path *path, void *arg) 176 { 177 struct cam_path *new_path; 178 struct ccb_pathinq *cpi; 179 path_id_t bus_path_id; 180 cam_status status; 181 182 cpi = (struct ccb_pathinq *)arg; 183 if (code == AC_PATH_REGISTERED) 184 bus_path_id = cpi->ccb_h.path_id; 185 else 186 bus_path_id = xpt_path_path_id(path); 187 /* 188 * Allocate a peripheral instance for 189 * this target instance. 190 */ 191 status = xpt_create_path(&new_path, NULL, 192 bus_path_id, 193 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 194 if (status != CAM_REQ_CMP) { 195 printf("targbhasync: Unable to create path " 196 "due to status 0x%x\n", status); 197 return; 198 } 199 200 switch (code) { 201 case AC_PATH_REGISTERED: 202 { 203 /* Only attach to controllers that support target mode */ 204 if ((cpi->target_sprt & PIT_PROCESSOR) == 0) 205 break; 206 207 status = cam_periph_alloc(targbhctor, NULL, targbhdtor, 208 targbhstart, 209 "targbh", CAM_PERIPH_BIO, 210 new_path, targbhasync, 211 AC_PATH_REGISTERED, 212 cpi); 213 break; 214 } 215 case AC_PATH_DEREGISTERED: 216 { 217 struct cam_periph *periph; 218 219 if ((periph = cam_periph_find(new_path, "targbh")) != NULL) 220 cam_periph_invalidate(periph); 221 break; 222 } 223 default: 224 break; 225 } 226 xpt_free_path(new_path); 227 } 228 229 /* Attempt to enable our lun */ 230 static cam_status 231 targbhenlun(struct cam_periph *periph) 232 { 233 union ccb immed_ccb; 234 struct targbh_softc *softc; 235 cam_status status; 236 int i; 237 238 softc = (struct targbh_softc *)periph->softc; 239 240 if ((softc->flags & TARGBH_FLAG_LUN_ENABLED) != 0) 241 return (CAM_REQ_CMP); 242 243 xpt_setup_ccb(&immed_ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 244 immed_ccb.ccb_h.func_code = XPT_EN_LUN; 245 246 /* Don't need support for any vendor specific commands */ 247 immed_ccb.cel.grp6_len = 0; 248 immed_ccb.cel.grp7_len = 0; 249 immed_ccb.cel.enable = 1; 250 xpt_action(&immed_ccb); 251 status = immed_ccb.ccb_h.status; 252 if (status != CAM_REQ_CMP) { 253 xpt_print(periph->path, 254 "targbhenlun - Enable Lun Rejected with status 0x%x\n", 255 status); 256 return (status); 257 } 258 259 softc->flags |= TARGBH_FLAG_LUN_ENABLED; 260 261 /* 262 * Build up a buffer of accept target I/O 263 * operations for incoming selections. 264 */ 265 for (i = 0; i < MAX_ACCEPT; i++) { 266 struct ccb_accept_tio *atio; 267 268 atio = (struct ccb_accept_tio*)malloc(sizeof(*atio), M_SCSIBH, 269 M_NOWAIT); 270 if (atio == NULL) { 271 status = CAM_RESRC_UNAVAIL; 272 break; 273 } 274 275 atio->ccb_h.ccb_descr = targbhallocdescr(); 276 277 if (atio->ccb_h.ccb_descr == NULL) { 278 free(atio, M_SCSIBH); 279 status = CAM_RESRC_UNAVAIL; 280 break; 281 } 282 283 xpt_setup_ccb(&atio->ccb_h, periph->path, CAM_PRIORITY_NORMAL); 284 atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO; 285 atio->ccb_h.cbfcnp = targbhdone; 286 ((struct targbh_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link = 287 softc->accept_tio_list; 288 softc->accept_tio_list = atio; 289 xpt_action((union ccb *)atio); 290 status = atio->ccb_h.status; 291 if (status != CAM_REQ_INPROG) 292 break; 293 } 294 295 if (i == 0) { 296 xpt_print(periph->path, 297 "targbhenlun - Could not allocate accept tio CCBs: status " 298 "= 0x%x\n", status); 299 targbhdislun(periph); 300 return (CAM_REQ_CMP_ERR); 301 } 302 303 /* 304 * Build up a buffer of immediate notify CCBs 305 * so the SIM can tell us of asynchronous target mode events. 306 */ 307 for (i = 0; i < MAX_ACCEPT; i++) { 308 struct ccb_immediate_notify *inot; 309 310 inot = (struct ccb_immediate_notify*)malloc(sizeof(*inot), 311 M_SCSIBH, M_NOWAIT); 312 313 if (inot == NULL) { 314 status = CAM_RESRC_UNAVAIL; 315 break; 316 } 317 318 xpt_setup_ccb(&inot->ccb_h, periph->path, CAM_PRIORITY_NORMAL); 319 inot->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; 320 inot->ccb_h.cbfcnp = targbhdone; 321 SLIST_INSERT_HEAD(&softc->immed_notify_slist, &inot->ccb_h, 322 periph_links.sle); 323 xpt_action((union ccb *)inot); 324 status = inot->ccb_h.status; 325 if (status != CAM_REQ_INPROG) 326 break; 327 } 328 329 if (i == 0) { 330 xpt_print(periph->path, 331 "targbhenlun - Could not allocate immediate notify " 332 "CCBs: status = 0x%x\n", status); 333 targbhdislun(periph); 334 return (CAM_REQ_CMP_ERR); 335 } 336 337 return (CAM_REQ_CMP); 338 } 339 340 static cam_status 341 targbhdislun(struct cam_periph *periph) 342 { 343 union ccb ccb; 344 struct targbh_softc *softc; 345 struct ccb_accept_tio* atio; 346 struct ccb_hdr *ccb_h; 347 348 softc = (struct targbh_softc *)periph->softc; 349 if ((softc->flags & TARGBH_FLAG_LUN_ENABLED) == 0) 350 return CAM_REQ_CMP; 351 352 /* XXX Block for Continue I/O completion */ 353 354 /* Kill off all ACCECPT and IMMEDIATE CCBs */ 355 while ((atio = softc->accept_tio_list) != NULL) { 356 357 softc->accept_tio_list = 358 ((struct targbh_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link; 359 xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 360 ccb.cab.ccb_h.func_code = XPT_ABORT; 361 ccb.cab.abort_ccb = (union ccb *)atio; 362 xpt_action(&ccb); 363 } 364 365 while ((ccb_h = SLIST_FIRST(&softc->immed_notify_slist)) != NULL) { 366 SLIST_REMOVE_HEAD(&softc->immed_notify_slist, periph_links.sle); 367 xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 368 ccb.cab.ccb_h.func_code = XPT_ABORT; 369 ccb.cab.abort_ccb = (union ccb *)ccb_h; 370 xpt_action(&ccb); 371 } 372 373 /* 374 * Dissable this lun. 375 */ 376 xpt_setup_ccb(&ccb.cel.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 377 ccb.cel.ccb_h.func_code = XPT_EN_LUN; 378 ccb.cel.enable = 0; 379 xpt_action(&ccb); 380 381 if (ccb.cel.ccb_h.status != CAM_REQ_CMP) 382 printf("targbhdislun - Disabling lun on controller failed " 383 "with status 0x%x\n", ccb.cel.ccb_h.status); 384 else 385 softc->flags &= ~TARGBH_FLAG_LUN_ENABLED; 386 return (ccb.cel.ccb_h.status); 387 } 388 389 static cam_status 390 targbhctor(struct cam_periph *periph, void *arg) 391 { 392 struct targbh_softc *softc; 393 394 /* Allocate our per-instance private storage */ 395 softc = (struct targbh_softc *)malloc(sizeof(*softc), 396 M_SCSIBH, M_NOWAIT); 397 if (softc == NULL) { 398 printf("targctor: unable to malloc softc\n"); 399 return (CAM_REQ_CMP_ERR); 400 } 401 402 bzero(softc, sizeof(*softc)); 403 TAILQ_INIT(&softc->pending_queue); 404 TAILQ_INIT(&softc->work_queue); 405 softc->accept_tio_list = NULL; 406 SLIST_INIT(&softc->immed_notify_slist); 407 softc->state = TARGBH_STATE_NORMAL; 408 periph->softc = softc; 409 softc->init_level++; 410 411 if (targbhenlun(periph) != CAM_REQ_CMP) 412 cam_periph_invalidate(periph); 413 return (CAM_REQ_CMP); 414 } 415 416 static void 417 targbhdtor(struct cam_periph *periph) 418 { 419 struct targbh_softc *softc; 420 421 softc = (struct targbh_softc *)periph->softc; 422 423 softc->state = TARGBH_STATE_TEARDOWN; 424 425 targbhdislun(periph); 426 427 switch (softc->init_level) { 428 case 0: 429 panic("targdtor - impossible init level"); 430 case 1: 431 /* FALLTHROUGH */ 432 default: 433 /* XXX Wait for callback of targbhdislun() */ 434 msleep(softc, periph->sim->mtx, PRIBIO, "targbh", hz/2); 435 free(softc, M_SCSIBH); 436 break; 437 } 438 } 439 440 static void 441 targbhstart(struct cam_periph *periph, union ccb *start_ccb) 442 { 443 struct targbh_softc *softc; 444 struct ccb_hdr *ccbh; 445 struct ccb_accept_tio *atio; 446 struct targbh_cmd_desc *desc; 447 struct ccb_scsiio *csio; 448 ccb_flags flags; 449 450 softc = (struct targbh_softc *)periph->softc; 451 452 ccbh = TAILQ_FIRST(&softc->work_queue); 453 if (periph->immediate_priority <= periph->pinfo.priority) { 454 start_ccb->ccb_h.ccb_type = TARGBH_CCB_WAITING; 455 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, 456 periph_links.sle); 457 periph->immediate_priority = CAM_PRIORITY_NONE; 458 wakeup(&periph->ccb_list); 459 } else if (ccbh == NULL) { 460 xpt_release_ccb(start_ccb); 461 } else { 462 TAILQ_REMOVE(&softc->work_queue, ccbh, periph_links.tqe); 463 TAILQ_INSERT_HEAD(&softc->pending_queue, ccbh, 464 periph_links.tqe); 465 atio = (struct ccb_accept_tio*)ccbh; 466 desc = (struct targbh_cmd_desc *)atio->ccb_h.ccb_descr; 467 468 /* Is this a tagged request? */ 469 flags = atio->ccb_h.flags & 470 (CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK); 471 472 csio = &start_ccb->csio; 473 /* 474 * If we are done with the transaction, tell the 475 * controller to send status and perform a CMD_CMPLT. 476 * If we have associated sense data, see if we can 477 * send that too. 478 */ 479 if (desc->data_resid == desc->data_increment) { 480 flags |= CAM_SEND_STATUS; 481 if (atio->sense_len) { 482 csio->sense_len = atio->sense_len; 483 csio->sense_data = atio->sense_data; 484 flags |= CAM_SEND_SENSE; 485 } 486 487 } 488 489 cam_fill_ctio(csio, 490 /*retries*/2, 491 targbhdone, 492 flags, 493 (flags & CAM_TAG_ACTION_VALID)? 494 MSG_SIMPLE_Q_TAG : 0, 495 atio->tag_id, 496 atio->init_id, 497 desc->status, 498 /*data_ptr*/desc->data_increment == 0 499 ? NULL : desc->data, 500 /*dxfer_len*/desc->data_increment, 501 /*timeout*/desc->timeout); 502 503 /* Override our wildcard attachment */ 504 start_ccb->ccb_h.target_id = atio->ccb_h.target_id; 505 start_ccb->ccb_h.target_lun = atio->ccb_h.target_lun; 506 507 start_ccb->ccb_h.ccb_type = TARGBH_CCB_WORKQ; 508 start_ccb->ccb_h.ccb_atio = atio; 509 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 510 ("Sending a CTIO\n")); 511 xpt_action(start_ccb); 512 /* 513 * If the queue was frozen waiting for the response 514 * to this ATIO (for instance disconnection was disallowed), 515 * then release it now that our response has been queued. 516 */ 517 if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) { 518 cam_release_devq(periph->path, 519 /*relsim_flags*/0, 520 /*reduction*/0, 521 /*timeout*/0, 522 /*getcount_only*/0); 523 atio->ccb_h.status &= ~CAM_DEV_QFRZN; 524 } 525 ccbh = TAILQ_FIRST(&softc->work_queue); 526 } 527 if (ccbh != NULL) 528 xpt_schedule(periph, CAM_PRIORITY_NORMAL); 529 } 530 531 static void 532 targbhdone(struct cam_periph *periph, union ccb *done_ccb) 533 { 534 struct targbh_softc *softc; 535 536 softc = (struct targbh_softc *)periph->softc; 537 538 if (done_ccb->ccb_h.ccb_type == TARGBH_CCB_WAITING) { 539 /* Caller will release the CCB */ 540 wakeup(&done_ccb->ccb_h.cbfcnp); 541 return; 542 } 543 544 switch (done_ccb->ccb_h.func_code) { 545 case XPT_ACCEPT_TARGET_IO: 546 { 547 struct ccb_accept_tio *atio; 548 struct targbh_cmd_desc *descr; 549 u_int8_t *cdb; 550 int priority; 551 552 atio = &done_ccb->atio; 553 descr = (struct targbh_cmd_desc*)atio->ccb_h.ccb_descr; 554 cdb = atio->cdb_io.cdb_bytes; 555 if (softc->state == TARGBH_STATE_TEARDOWN 556 || atio->ccb_h.status == CAM_REQ_ABORTED) { 557 targbhfreedescr(descr); 558 xpt_free_ccb(done_ccb); 559 return; 560 } 561 562 /* 563 * Determine the type of incoming command and 564 * setup our buffer for a response. 565 */ 566 switch (cdb[0]) { 567 case INQUIRY: 568 { 569 struct scsi_inquiry *inq; 570 571 inq = (struct scsi_inquiry *)cdb; 572 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 573 ("Saw an inquiry!\n")); 574 /* 575 * Validate the command. We don't 576 * support any VPD pages, so complain 577 * if EVPD is set. 578 */ 579 if ((inq->byte2 & SI_EVPD) != 0 580 || inq->page_code != 0) { 581 atio->ccb_h.flags &= ~CAM_DIR_MASK; 582 atio->ccb_h.flags |= CAM_DIR_NONE; 583 /* 584 * This needs to have other than a 585 * no_lun_sense_data response. 586 */ 587 bcopy(&no_lun_sense_data, &atio->sense_data, 588 min(sizeof(no_lun_sense_data), 589 sizeof(atio->sense_data))); 590 atio->sense_len = sizeof(no_lun_sense_data); 591 descr->data_resid = 0; 592 descr->data_increment = 0; 593 descr->status = SCSI_STATUS_CHECK_COND; 594 break; 595 } 596 /* 597 * Direction is always relative 598 * to the initator. 599 */ 600 atio->ccb_h.flags &= ~CAM_DIR_MASK; 601 atio->ccb_h.flags |= CAM_DIR_IN; 602 descr->data = &no_lun_inq_data; 603 descr->data_resid = MIN(sizeof(no_lun_inq_data), 604 scsi_2btoul(inq->length)); 605 descr->data_increment = descr->data_resid; 606 descr->timeout = 5 * 1000; 607 descr->status = SCSI_STATUS_OK; 608 break; 609 } 610 case REQUEST_SENSE: 611 { 612 struct scsi_request_sense *rsense; 613 614 rsense = (struct scsi_request_sense *)cdb; 615 /* Refer to static sense data */ 616 atio->ccb_h.flags &= ~CAM_DIR_MASK; 617 atio->ccb_h.flags |= CAM_DIR_IN; 618 descr->data = &no_lun_sense_data; 619 descr->data_resid = request_sense_size; 620 descr->data_resid = MIN(descr->data_resid, 621 SCSI_CDB6_LEN(rsense->length)); 622 descr->data_increment = descr->data_resid; 623 descr->timeout = 5 * 1000; 624 descr->status = SCSI_STATUS_OK; 625 break; 626 } 627 default: 628 /* Constant CA, tell initiator */ 629 /* Direction is always relative to the initator */ 630 atio->ccb_h.flags &= ~CAM_DIR_MASK; 631 atio->ccb_h.flags |= CAM_DIR_NONE; 632 bcopy(&no_lun_sense_data, &atio->sense_data, 633 min(sizeof(no_lun_sense_data), 634 sizeof(atio->sense_data))); 635 atio->sense_len = sizeof (no_lun_sense_data); 636 descr->data_resid = 0; 637 descr->data_increment = 0; 638 descr->timeout = 5 * 1000; 639 descr->status = SCSI_STATUS_CHECK_COND; 640 break; 641 } 642 643 /* Queue us up to receive a Continue Target I/O ccb. */ 644 if ((atio->ccb_h.flags & CAM_DIS_DISCONNECT) != 0) { 645 TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h, 646 periph_links.tqe); 647 priority = 0; 648 } else { 649 TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h, 650 periph_links.tqe); 651 priority = CAM_PRIORITY_NORMAL; 652 } 653 xpt_schedule(periph, priority); 654 break; 655 } 656 case XPT_CONT_TARGET_IO: 657 { 658 struct ccb_accept_tio *atio; 659 struct targbh_cmd_desc *desc; 660 661 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 662 ("Received completed CTIO\n")); 663 atio = (struct ccb_accept_tio*)done_ccb->ccb_h.ccb_atio; 664 desc = (struct targbh_cmd_desc *)atio->ccb_h.ccb_descr; 665 666 TAILQ_REMOVE(&softc->pending_queue, &atio->ccb_h, 667 periph_links.tqe); 668 669 /* 670 * We could check for CAM_SENT_SENSE bein set here, 671 * but since we're not maintaining any CA/UA state, 672 * there's no point. 673 */ 674 atio->sense_len = 0; 675 done_ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 676 done_ccb->ccb_h.status &= ~CAM_SENT_SENSE; 677 678 /* 679 * Any errors will not change the data we return, 680 * so make sure the queue is not left frozen. 681 * XXX - At some point there may be errors that 682 * leave us in a connected state with the 683 * initiator... 684 */ 685 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 686 printf("Releasing Queue\n"); 687 cam_release_devq(done_ccb->ccb_h.path, 688 /*relsim_flags*/0, 689 /*reduction*/0, 690 /*timeout*/0, 691 /*getcount_only*/0); 692 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 693 } 694 desc->data_resid -= desc->data_increment; 695 xpt_release_ccb(done_ccb); 696 if (softc->state != TARGBH_STATE_TEARDOWN) { 697 698 /* 699 * Send the original accept TIO back to the 700 * controller to handle more work. 701 */ 702 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 703 ("Returning ATIO to target\n")); 704 /* Restore wildcards */ 705 atio->ccb_h.target_id = CAM_TARGET_WILDCARD; 706 atio->ccb_h.target_lun = CAM_LUN_WILDCARD; 707 xpt_action((union ccb *)atio); 708 break; 709 } else { 710 targbhfreedescr(desc); 711 free(atio, M_SCSIBH); 712 } 713 break; 714 } 715 case XPT_IMMEDIATE_NOTIFY: 716 { 717 int frozen; 718 719 frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; 720 if (softc->state == TARGBH_STATE_TEARDOWN 721 || done_ccb->ccb_h.status == CAM_REQ_ABORTED) { 722 printf("Freed an immediate notify\n"); 723 xpt_free_ccb(done_ccb); 724 } else { 725 /* Requeue for another immediate event */ 726 xpt_action(done_ccb); 727 } 728 if (frozen != 0) 729 cam_release_devq(periph->path, 730 /*relsim_flags*/0, 731 /*opening reduction*/0, 732 /*timeout*/0, 733 /*getcount_only*/0); 734 break; 735 } 736 default: 737 panic("targbhdone: Unexpected ccb opcode"); 738 break; 739 } 740 } 741 742 #ifdef NOTYET 743 static int 744 targbherror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) 745 { 746 return 0; 747 } 748 #endif 749 750 static struct targbh_cmd_desc* 751 targbhallocdescr() 752 { 753 struct targbh_cmd_desc* descr; 754 755 /* Allocate the targbh_descr structure */ 756 descr = (struct targbh_cmd_desc *)malloc(sizeof(*descr), 757 M_SCSIBH, M_NOWAIT); 758 if (descr == NULL) 759 return (NULL); 760 761 bzero(descr, sizeof(*descr)); 762 763 /* Allocate buffer backing store */ 764 descr->backing_store = malloc(MAX_BUF_SIZE, M_SCSIBH, M_NOWAIT); 765 if (descr->backing_store == NULL) { 766 free(descr, M_SCSIBH); 767 return (NULL); 768 } 769 descr->max_size = MAX_BUF_SIZE; 770 return (descr); 771 } 772 773 static void 774 targbhfreedescr(struct targbh_cmd_desc *descr) 775 { 776 free(descr->backing_store, M_SCSIBH); 777 free(descr, M_SCSIBH); 778 } 779