1 /* 2 * Implementation of a simple Target Mode SCSI Proccessor Target driver for CAM. 3 * 4 * Copyright (c) 1998, 1999 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 #include <stddef.h> /* For offsetof */ 31 32 #include <sys/param.h> 33 #include <sys/queue.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/types.h> 37 #include <sys/buf.h> 38 #include <sys/conf.h> 39 #include <sys/devicestat.h> 40 #include <sys/malloc.h> 41 #include <sys/poll.h> 42 #include <sys/select.h> /* For struct selinfo. */ 43 #include <sys/uio.h> 44 45 #include <cam/cam.h> 46 #include <cam/cam_ccb.h> 47 #include <cam/cam_extend.h> 48 #include <cam/cam_periph.h> 49 #include <cam/cam_queue.h> 50 #include <cam/cam_xpt_periph.h> 51 #include <cam/cam_debug.h> 52 53 #include <cam/scsi/scsi_all.h> 54 #include <cam/scsi/scsi_pt.h> 55 #include <cam/scsi/scsi_targetio.h> 56 #include <cam/scsi/scsi_message.h> 57 58 typedef enum { 59 TARG_STATE_NORMAL, 60 TARG_STATE_EXCEPTION, 61 TARG_STATE_TEARDOWN 62 } targ_state; 63 64 typedef enum { 65 TARG_FLAG_NONE = 0x00, 66 TARG_FLAG_SEND_EOF = 0x01, 67 TARG_FLAG_RECEIVE_EOF = 0x02, 68 TARG_FLAG_LUN_ENABLED = 0x04 69 } targ_flags; 70 71 typedef enum { 72 TARG_CCB_NONE = 0x00, 73 TARG_CCB_WAITING = 0x01, 74 TARG_CCB_HELDQ = 0x02, 75 TARG_CCB_ABORT_TO_HELDQ = 0x04 76 } targ_ccb_flags; 77 78 #define MAX_ACCEPT 16 79 #define MAX_IMMEDIATE 16 80 #define MAX_BUF_SIZE 256 /* Max inquiry/sense/mode page transfer */ 81 #define MAX_INITIATORS 16 /* XXX More for Fibre-Channel */ 82 83 #define MIN(a, b) ((a > b) ? b : a) 84 85 #define TARG_CONTROL_UNIT 0xffff00ff 86 #define TARG_IS_CONTROL_DEV(unit) ((unit) == TARG_CONTROL_UNIT) 87 88 #define TARG_TAG_WILDCARD ((u_int)~0) 89 90 /* Offsets into our private CCB area for storing accept information */ 91 #define ccb_flags ppriv_field0 92 #define ccb_descr ppriv_ptr1 93 94 /* We stick a pointer to the originating accept TIO in each continue I/O CCB */ 95 #define ccb_atio ppriv_ptr1 96 97 struct targ_softc { 98 /* CTIOs pending on the controller */ 99 struct ccb_queue pending_queue; 100 101 /* ATIOs awaiting CTIO resources from the XPT */ 102 struct ccb_queue work_queue; 103 104 /* 105 * ATIOs for SEND operations waiting for 'write' 106 * buffer resources from our userland daemon. 107 */ 108 struct ccb_queue snd_ccb_queue; 109 110 /* 111 * ATIOs for RCV operations waiting for 'read' 112 * buffer resources from our userland daemon. 113 */ 114 struct ccb_queue rcv_ccb_queue; 115 116 /* 117 * ATIOs for commands unknown to the kernel driver. 118 * These are queued for the userland daemon to 119 * consume. 120 */ 121 struct ccb_queue unknown_atio_queue; 122 123 /* 124 * Userland buffers for SEND commands waiting for 125 * SEND ATIOs to be queued by an initiator. 126 */ 127 struct buf_queue_head snd_buf_queue; 128 129 /* 130 * Userland buffers for RCV commands waiting for 131 * RCV ATIOs to be queued by an initiator. 132 */ 133 struct buf_queue_head rcv_buf_queue; 134 struct devstat device_stats; 135 struct selinfo snd_select; 136 struct selinfo rcv_select; 137 targ_state state; 138 targ_flags flags; 139 targ_exception exceptions; 140 u_int init_level; 141 u_int inq_data_len; 142 struct scsi_inquiry_data *inq_data; 143 struct ccb_accept_tio *accept_tio_list; 144 struct ccb_hdr_slist immed_notify_slist; 145 struct initiator_state istate[MAX_INITIATORS]; 146 }; 147 148 struct targ_cmd_desc { 149 struct ccb_accept_tio* atio_link; 150 u_int data_resid; /* How much left to transfer */ 151 u_int data_increment;/* Amount to send before next disconnect */ 152 void* data; /* The data. Can be from backing_store or not */ 153 void* backing_store;/* Backing store allocated for this descriptor*/ 154 struct buf *bp; /* Buffer for this transfer */ 155 u_int max_size; /* Size of backing_store */ 156 u_int32_t timeout; 157 u_int8_t status; /* Status to return to initiator */ 158 }; 159 160 static d_open_t targopen; 161 static d_close_t targclose; 162 static d_read_t targread; 163 static d_write_t targwrite; 164 static d_ioctl_t targioctl; 165 static d_poll_t targpoll; 166 static d_strategy_t targstrategy; 167 168 #define TARG_CDEV_MAJOR 65 169 static struct cdevsw targ_cdevsw = { 170 /* open */ targopen, 171 /* close */ targclose, 172 /* read */ targread, 173 /* write */ targwrite, 174 /* ioctl */ targioctl, 175 /* poll */ targpoll, 176 /* mmap */ nommap, 177 /* strategy */ targstrategy, 178 /* name */ "targ", 179 /* maj */ TARG_CDEV_MAJOR, 180 /* dump */ nodump, 181 /* psize */ nopsize, 182 /* flags */ 0, 183 /* bmaj */ -1 184 }; 185 186 static int targsendccb(struct cam_periph *periph, union ccb *ccb, 187 union ccb *inccb); 188 static periph_init_t targinit; 189 static void targasync(void *callback_arg, u_int32_t code, 190 struct cam_path *path, void *arg); 191 static int targallocinstance(struct ioc_alloc_unit *alloc_unit); 192 static int targfreeinstance(struct ioc_alloc_unit *alloc_unit); 193 static cam_status targenlun(struct cam_periph *periph); 194 static cam_status targdislun(struct cam_periph *periph); 195 static periph_ctor_t targctor; 196 static periph_dtor_t targdtor; 197 static void targrunqueue(struct cam_periph *periph, 198 struct targ_softc *softc); 199 static periph_start_t targstart; 200 static void targdone(struct cam_periph *periph, 201 union ccb *done_ccb); 202 static void targfireexception(struct cam_periph *periph, 203 struct targ_softc *softc); 204 static void targinoterror(struct cam_periph *periph, 205 struct targ_softc *softc, 206 struct ccb_immed_notify *inot); 207 static int targerror(union ccb *ccb, u_int32_t cam_flags, 208 u_int32_t sense_flags); 209 static struct targ_cmd_desc* allocdescr(void); 210 static void freedescr(struct targ_cmd_desc *buf); 211 static void fill_sense(struct targ_softc *softc, 212 u_int initiator_id, u_int error_code, 213 u_int sense_key, u_int asc, u_int ascq); 214 static void copy_sense(struct targ_softc *softc, 215 struct ccb_scsiio *csio); 216 static void set_unit_attention_cond(struct cam_periph *periph, 217 u_int initiator_id, ua_types ua); 218 static void set_contingent_allegiance_cond(struct cam_periph *periph, 219 u_int initiator_id, ca_types ca); 220 static void abort_pending_transactions(struct cam_periph *periph, 221 u_int initiator_id, u_int tag_id, 222 int errno, int to_held_queue); 223 224 static struct periph_driver targdriver = 225 { 226 targinit, "targ", 227 TAILQ_HEAD_INITIALIZER(targdriver.units), /* generation */ 0 228 }; 229 230 DATA_SET(periphdriver_set, targdriver); 231 232 static struct extend_array *targperiphs; 233 234 static void 235 targinit(void) 236 { 237 238 /* 239 * Create our extend array for storing the devices we attach to. 240 */ 241 targperiphs = cam_extend_new(); 242 if (targperiphs == NULL) { 243 printf("targ: Failed to alloc extend array!\n"); 244 return; 245 } 246 247 /* If we were successfull, register our devsw */ 248 cdevsw_add(&targ_cdevsw); 249 } 250 251 static void 252 targasync(void *callback_arg, u_int32_t code, 253 struct cam_path *path, void *arg) 254 { 255 struct cam_periph *periph; 256 struct targ_softc *softc; 257 258 periph = (struct cam_periph *)callback_arg; 259 softc = (struct targ_softc *)periph->softc; 260 switch (code) { 261 case AC_PATH_DEREGISTERED: 262 { 263 /* XXX Implement */ 264 break; 265 } 266 default: 267 break; 268 } 269 } 270 271 /* Attempt to enable our lun */ 272 static cam_status 273 targenlun(struct cam_periph *periph) 274 { 275 union ccb immed_ccb; 276 struct targ_softc *softc; 277 cam_status status; 278 int i; 279 280 softc = (struct targ_softc *)periph->softc; 281 282 if ((softc->flags & TARG_FLAG_LUN_ENABLED) != 0) 283 return (CAM_REQ_CMP); 284 285 xpt_setup_ccb(&immed_ccb.ccb_h, periph->path, /*priority*/1); 286 immed_ccb.ccb_h.func_code = XPT_EN_LUN; 287 288 /* Don't need support for any vendor specific commands */ 289 immed_ccb.cel.grp6_len = 0; 290 immed_ccb.cel.grp7_len = 0; 291 immed_ccb.cel.enable = 1; 292 xpt_action(&immed_ccb); 293 status = immed_ccb.ccb_h.status; 294 if (status != CAM_REQ_CMP) { 295 xpt_print_path(periph->path); 296 printf("targenlun - Enable Lun Rejected for status 0x%x\n", 297 status); 298 return (status); 299 } 300 301 softc->flags |= TARG_FLAG_LUN_ENABLED; 302 303 /* 304 * Build up a buffer of accept target I/O 305 * operations for incoming selections. 306 */ 307 for (i = 0; i < MAX_ACCEPT; i++) { 308 struct ccb_accept_tio *atio; 309 310 atio = (struct ccb_accept_tio*)malloc(sizeof(*atio), M_DEVBUF, 311 M_NOWAIT); 312 if (atio == NULL) { 313 status = CAM_RESRC_UNAVAIL; 314 break; 315 } 316 317 atio->ccb_h.ccb_descr = allocdescr(); 318 319 if (atio->ccb_h.ccb_descr == NULL) { 320 free(atio, M_DEVBUF); 321 status = CAM_RESRC_UNAVAIL; 322 break; 323 } 324 325 xpt_setup_ccb(&atio->ccb_h, periph->path, /*priority*/1); 326 atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO; 327 atio->ccb_h.cbfcnp = targdone; 328 xpt_action((union ccb *)atio); 329 status = atio->ccb_h.status; 330 if (status != CAM_REQ_INPROG) { 331 xpt_print_path(periph->path); 332 printf("Queue of atio failed\n"); 333 freedescr(atio->ccb_h.ccb_descr); 334 free(atio, M_DEVBUF); 335 break; 336 } 337 ((struct targ_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link = 338 softc->accept_tio_list; 339 softc->accept_tio_list = atio; 340 } 341 342 if (i == 0) { 343 xpt_print_path(periph->path); 344 printf("targenlun - Could not allocate accept tio CCBs: " 345 "status = 0x%x\n", status); 346 targdislun(periph); 347 return (CAM_REQ_CMP_ERR); 348 } 349 350 /* 351 * Build up a buffer of immediate notify CCBs 352 * so the SIM can tell us of asynchronous target mode events. 353 */ 354 for (i = 0; i < MAX_ACCEPT; i++) { 355 struct ccb_immed_notify *inot; 356 357 inot = (struct ccb_immed_notify*)malloc(sizeof(*inot), M_DEVBUF, 358 M_NOWAIT); 359 360 if (inot == NULL) { 361 status = CAM_RESRC_UNAVAIL; 362 break; 363 } 364 365 xpt_setup_ccb(&inot->ccb_h, periph->path, /*priority*/1); 366 inot->ccb_h.func_code = XPT_IMMED_NOTIFY; 367 inot->ccb_h.cbfcnp = targdone; 368 SLIST_INSERT_HEAD(&softc->immed_notify_slist, &inot->ccb_h, 369 periph_links.sle); 370 xpt_action((union ccb *)inot); 371 } 372 373 if (i == 0) { 374 xpt_print_path(periph->path); 375 printf("targenlun - Could not allocate immediate notify CCBs: " 376 "status = 0x%x\n", status); 377 targdislun(periph); 378 return (CAM_REQ_CMP_ERR); 379 } 380 381 return (CAM_REQ_CMP); 382 } 383 384 static cam_status 385 targdislun(struct cam_periph *periph) 386 { 387 union ccb ccb; 388 struct targ_softc *softc; 389 struct ccb_accept_tio* atio; 390 struct ccb_hdr *ccb_h; 391 392 softc = (struct targ_softc *)periph->softc; 393 if ((softc->flags & TARG_FLAG_LUN_ENABLED) == 0) 394 return CAM_REQ_CMP; 395 396 /* XXX Block for Continue I/O completion */ 397 398 /* Kill off all ACCECPT and IMMEDIATE CCBs */ 399 while ((atio = softc->accept_tio_list) != NULL) { 400 401 softc->accept_tio_list = 402 ((struct targ_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link; 403 xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1); 404 ccb.cab.ccb_h.func_code = XPT_ABORT; 405 ccb.cab.abort_ccb = (union ccb *)atio; 406 xpt_action(&ccb); 407 } 408 409 while ((ccb_h = SLIST_FIRST(&softc->immed_notify_slist)) != NULL) { 410 SLIST_REMOVE_HEAD(&softc->immed_notify_slist, periph_links.sle); 411 xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1); 412 ccb.cab.ccb_h.func_code = XPT_ABORT; 413 ccb.cab.abort_ccb = (union ccb *)ccb_h; 414 xpt_action(&ccb); 415 } 416 417 /* 418 * Dissable this lun. 419 */ 420 xpt_setup_ccb(&ccb.cel.ccb_h, periph->path, /*priority*/1); 421 ccb.cel.ccb_h.func_code = XPT_EN_LUN; 422 ccb.cel.enable = 0; 423 xpt_action(&ccb); 424 425 if (ccb.cel.ccb_h.status != CAM_REQ_CMP) 426 printf("targdislun - Disabling lun on controller failed " 427 "with status 0x%x\n", ccb.cel.ccb_h.status); 428 else 429 softc->flags &= ~TARG_FLAG_LUN_ENABLED; 430 return (ccb.cel.ccb_h.status); 431 } 432 433 static cam_status 434 targctor(struct cam_periph *periph, void *arg) 435 { 436 struct ccb_pathinq *cpi; 437 struct targ_softc *softc; 438 int i; 439 440 cpi = (struct ccb_pathinq *)arg; 441 442 /* Allocate our per-instance private storage */ 443 softc = (struct targ_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT); 444 if (softc == NULL) { 445 printf("targctor: unable to malloc softc\n"); 446 return (CAM_REQ_CMP_ERR); 447 } 448 449 bzero(softc, sizeof(*softc)); 450 TAILQ_INIT(&softc->pending_queue); 451 TAILQ_INIT(&softc->work_queue); 452 TAILQ_INIT(&softc->snd_ccb_queue); 453 TAILQ_INIT(&softc->rcv_ccb_queue); 454 TAILQ_INIT(&softc->unknown_atio_queue); 455 bufq_init(&softc->snd_buf_queue); 456 bufq_init(&softc->rcv_buf_queue); 457 softc->accept_tio_list = NULL; 458 SLIST_INIT(&softc->immed_notify_slist); 459 softc->state = TARG_STATE_NORMAL; 460 periph->softc = softc; 461 softc->init_level++; 462 463 cam_extend_set(targperiphs, periph->unit_number, periph); 464 465 /* 466 * We start out life with a UA to indicate power-on/reset. 467 */ 468 for (i = 0; i < MAX_INITIATORS; i++) 469 softc->istate[i].pending_ua = UA_POWER_ON; 470 471 /* 472 * Allocate an initial inquiry data buffer. We might allow the 473 * user to override this later via an ioctl. 474 */ 475 softc->inq_data_len = sizeof(*softc->inq_data); 476 softc->inq_data = malloc(softc->inq_data_len, M_DEVBUF, M_NOWAIT); 477 if (softc->inq_data == NULL) { 478 printf("targctor - Unable to malloc inquiry data\n"); 479 targdtor(periph); 480 return (CAM_RESRC_UNAVAIL); 481 } 482 bzero(softc->inq_data, softc->inq_data_len); 483 softc->inq_data->device = T_PROCESSOR | (SID_QUAL_LU_CONNECTED << 5); 484 softc->inq_data->version = 2; 485 softc->inq_data->response_format = 2; /* SCSI2 Inquiry Format */ 486 softc->inq_data->flags = 487 cpi->hba_inquiry & (PI_SDTR_ABLE|PI_WIDE_16|PI_WIDE_32); 488 softc->inq_data->additional_length = softc->inq_data_len - 4; 489 strncpy(softc->inq_data->vendor, "FreeBSD ", SID_VENDOR_SIZE); 490 strncpy(softc->inq_data->product, "TM-PT ", SID_PRODUCT_SIZE); 491 strncpy(softc->inq_data->revision, "0.0 ", SID_REVISION_SIZE); 492 softc->init_level++; 493 return (CAM_REQ_CMP); 494 } 495 496 static void 497 targdtor(struct cam_periph *periph) 498 { 499 struct targ_softc *softc; 500 501 softc = (struct targ_softc *)periph->softc; 502 503 softc->state = TARG_STATE_TEARDOWN; 504 505 targdislun(periph); 506 507 cam_extend_release(targperiphs, periph->unit_number); 508 509 switch (softc->init_level) { 510 default: 511 /* FALLTHROUGH */ 512 case 2: 513 free(softc->inq_data, M_DEVBUF); 514 /* FALLTHROUGH */ 515 case 1: 516 free(softc, M_DEVBUF); 517 break; 518 case 0: 519 panic("targdtor - impossible init level");; 520 } 521 } 522 523 static int 524 targopen(dev_t dev, int flags, int fmt, struct proc *p) 525 { 526 struct cam_periph *periph; 527 struct targ_softc *softc; 528 u_int unit; 529 cam_status status; 530 int error; 531 int s; 532 533 unit = minor(dev); 534 535 /* An open of the control device always succeeds */ 536 if (TARG_IS_CONTROL_DEV(unit)) 537 return 0; 538 539 s = splsoftcam(); 540 periph = cam_extend_get(targperiphs, unit); 541 if (periph == NULL) { 542 return (ENXIO); 543 splx(s); 544 } 545 if ((error = cam_periph_lock(periph, PRIBIO | PCATCH)) != 0) { 546 splx(s); 547 return (error); 548 } 549 550 softc = (struct targ_softc *)periph->softc; 551 if ((softc->flags & TARG_FLAG_LUN_ENABLED) == 0) { 552 if (cam_periph_acquire(periph) != CAM_REQ_CMP) { 553 splx(s); 554 cam_periph_unlock(periph); 555 return(ENXIO); 556 } 557 } 558 splx(s); 559 560 status = targenlun(periph); 561 switch (status) { 562 case CAM_REQ_CMP: 563 error = 0; 564 break; 565 case CAM_RESRC_UNAVAIL: 566 error = ENOMEM; 567 break; 568 case CAM_LUN_ALRDY_ENA: 569 error = EADDRINUSE; 570 break; 571 default: 572 error = ENXIO; 573 break; 574 } 575 cam_periph_unlock(periph); 576 return (error); 577 } 578 579 static int 580 targclose(dev_t dev, int flag, int fmt, struct proc *p) 581 { 582 struct cam_periph *periph; 583 struct targ_softc *softc; 584 u_int unit; 585 int s; 586 int error; 587 588 unit = minor(dev); 589 590 /* A close of the control device always succeeds */ 591 if (TARG_IS_CONTROL_DEV(unit)) 592 return 0; 593 594 s = splsoftcam(); 595 periph = cam_extend_get(targperiphs, unit); 596 if (periph == NULL) { 597 splx(s); 598 return (ENXIO); 599 } 600 if ((error = cam_periph_lock(periph, PRIBIO)) != 0) 601 return (error); 602 softc = (struct targ_softc *)periph->softc; 603 splx(s); 604 605 targdislun(periph); 606 607 cam_periph_unlock(periph); 608 cam_periph_release(periph); 609 610 return (0); 611 } 612 613 static int 614 targallocinstance(struct ioc_alloc_unit *alloc_unit) 615 { 616 struct ccb_pathinq cpi; 617 struct cam_path *path; 618 struct cam_periph *periph; 619 cam_status status; 620 int free_path_on_return; 621 int error; 622 623 free_path_on_return = 0; 624 status = xpt_create_path(&path, /*periph*/NULL, 625 alloc_unit->path_id, 626 alloc_unit->target_id, 627 alloc_unit->lun_id); 628 free_path_on_return++; 629 630 if (status != CAM_REQ_CMP) { 631 printf("Couldn't Allocate Path %x\n", status); 632 goto fail; 633 } 634 635 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1); 636 cpi.ccb_h.func_code = XPT_PATH_INQ; 637 xpt_action((union ccb *)&cpi); 638 status = cpi.ccb_h.status; 639 640 if (status != CAM_REQ_CMP) { 641 printf("Couldn't CPI %x\n", status); 642 goto fail; 643 } 644 645 /* Can only alloc units on controllers that support target mode */ 646 if ((cpi.target_sprt & PIT_PROCESSOR) == 0) { 647 printf("Controller does not support target mode%x\n", status); 648 status = CAM_PATH_INVALID; 649 goto fail; 650 } 651 652 /* Ensure that we don't already have an instance for this unit. */ 653 if ((periph = cam_periph_find(path, "targ")) != NULL) { 654 status = CAM_LUN_ALRDY_ENA; 655 goto fail; 656 } 657 658 /* 659 * Allocate a peripheral instance for 660 * this target instance. 661 */ 662 status = cam_periph_alloc(targctor, NULL, targdtor, targstart, 663 "targ", CAM_PERIPH_BIO, path, targasync, 664 0, &cpi); 665 666 fail: 667 switch (status) { 668 case CAM_REQ_CMP: 669 { 670 struct cam_periph *periph; 671 672 if ((periph = cam_periph_find(path, "targ")) == NULL) 673 panic("targallocinstance: Succeeded but no periph?"); 674 error = 0; 675 alloc_unit->unit = periph->unit_number; 676 break; 677 } 678 case CAM_RESRC_UNAVAIL: 679 error = ENOMEM; 680 break; 681 case CAM_LUN_ALRDY_ENA: 682 error = EADDRINUSE; 683 break; 684 default: 685 printf("targallocinstance: Unexpected CAM status %x\n", status); 686 /* FALLTHROUGH */ 687 case CAM_PATH_INVALID: 688 error = ENXIO; 689 break; 690 case CAM_PROVIDE_FAIL: 691 error = ENODEV; 692 break; 693 } 694 695 if (free_path_on_return != 0) 696 xpt_free_path(path); 697 698 return (error); 699 } 700 701 static int 702 targfreeinstance(struct ioc_alloc_unit *alloc_unit) 703 { 704 struct cam_path *path; 705 struct cam_periph *periph; 706 struct targ_softc *softc; 707 cam_status status; 708 int free_path_on_return; 709 int error; 710 711 periph = NULL; 712 free_path_on_return = 0; 713 status = xpt_create_path(&path, /*periph*/NULL, 714 alloc_unit->path_id, 715 alloc_unit->target_id, 716 alloc_unit->lun_id); 717 free_path_on_return++; 718 719 if (status != CAM_REQ_CMP) 720 goto fail; 721 722 /* Find our instance. */ 723 if ((periph = cam_periph_find(path, "targ")) == NULL) { 724 xpt_print_path(path); 725 status = CAM_PATH_INVALID; 726 goto fail; 727 } 728 729 softc = (struct targ_softc *)periph->softc; 730 731 if ((softc->flags & TARG_FLAG_LUN_ENABLED) != 0) { 732 status = CAM_BUSY; 733 goto fail; 734 } 735 736 fail: 737 if (free_path_on_return != 0) 738 xpt_free_path(path); 739 740 switch (status) { 741 case CAM_REQ_CMP: 742 if (periph != NULL) 743 cam_periph_invalidate(periph); 744 error = 0; 745 break; 746 case CAM_RESRC_UNAVAIL: 747 error = ENOMEM; 748 break; 749 case CAM_LUN_ALRDY_ENA: 750 error = EADDRINUSE; 751 break; 752 default: 753 printf("targfreeinstance: Unexpected CAM status %x\n", status); 754 /* FALLTHROUGH */ 755 case CAM_PATH_INVALID: 756 error = ENODEV; 757 break; 758 } 759 return (error); 760 } 761 762 static int 763 targioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p) 764 { 765 struct cam_periph *periph; 766 struct targ_softc *softc; 767 u_int unit; 768 int error; 769 770 unit = minor(dev); 771 error = 0; 772 if (TARG_IS_CONTROL_DEV(unit)) { 773 switch (cmd) { 774 case TARGCTLIOALLOCUNIT: 775 error = targallocinstance((struct ioc_alloc_unit*)addr); 776 break; 777 case TARGCTLIOFREEUNIT: 778 error = targfreeinstance((struct ioc_alloc_unit*)addr); 779 break; 780 default: 781 error = EINVAL; 782 break; 783 } 784 return (error); 785 } 786 787 periph = cam_extend_get(targperiphs, unit); 788 if (periph == NULL) 789 return (ENXIO); 790 softc = (struct targ_softc *)periph->softc; 791 switch (cmd) { 792 case TARGIOCFETCHEXCEPTION: 793 *((targ_exception *)addr) = softc->exceptions; 794 break; 795 case TARGIOCCLEAREXCEPTION: 796 { 797 targ_exception clear_mask; 798 799 clear_mask = *((targ_exception *)addr); 800 if ((clear_mask & TARG_EXCEPT_UNKNOWN_ATIO) != 0) { 801 struct ccb_hdr *ccbh; 802 803 ccbh = TAILQ_FIRST(&softc->unknown_atio_queue); 804 if (ccbh != NULL) { 805 TAILQ_REMOVE(&softc->unknown_atio_queue, 806 ccbh, periph_links.tqe); 807 /* Requeue the ATIO back to the controller */ 808 xpt_action((union ccb *)ccbh); 809 ccbh = TAILQ_FIRST(&softc->unknown_atio_queue); 810 } 811 if (ccbh != NULL) 812 clear_mask &= ~TARG_EXCEPT_UNKNOWN_ATIO; 813 } 814 softc->exceptions &= ~clear_mask; 815 if (softc->exceptions == TARG_EXCEPT_NONE 816 && softc->state == TARG_STATE_EXCEPTION) { 817 softc->state = TARG_STATE_NORMAL; 818 targrunqueue(periph, softc); 819 } 820 break; 821 } 822 case TARGIOCFETCHATIO: 823 { 824 struct ccb_hdr *ccbh; 825 826 ccbh = TAILQ_FIRST(&softc->unknown_atio_queue); 827 if (ccbh != NULL) { 828 bcopy(ccbh, addr, sizeof(struct ccb_accept_tio)); 829 } else { 830 error = ENOENT; 831 } 832 break; 833 } 834 case TARGIOCCOMMAND: 835 { 836 union ccb *inccb; 837 union ccb *ccb; 838 839 /* 840 * XXX JGibbs 841 * This code is lifted directly from the pass-thru driver. 842 * Perhaps this should be moved to a library???? 843 */ 844 inccb = (union ccb *)addr; 845 ccb = cam_periph_getccb(periph, inccb->ccb_h.pinfo.priority); 846 847 error = targsendccb(periph, ccb, inccb); 848 849 xpt_release_ccb(ccb); 850 851 break; 852 } 853 case TARGIOCGETISTATE: 854 case TARGIOCSETISTATE: 855 { 856 struct ioc_initiator_state *ioc_istate; 857 858 ioc_istate = (struct ioc_initiator_state *)addr; 859 if (ioc_istate->initiator_id > MAX_INITIATORS) { 860 error = EINVAL; 861 break; 862 } 863 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 864 ("GET/SETISTATE for %d\n", ioc_istate->initiator_id)); 865 if (cmd == TARGIOCGETISTATE) { 866 bcopy(&softc->istate[ioc_istate->initiator_id], 867 &ioc_istate->istate, sizeof(ioc_istate->istate)); 868 } else { 869 bcopy(&ioc_istate->istate, 870 &softc->istate[ioc_istate->initiator_id], 871 sizeof(ioc_istate->istate)); 872 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 873 ("pending_ca now %x\n", 874 softc->istate[ioc_istate->initiator_id].pending_ca)); 875 } 876 break; 877 } 878 default: 879 error = ENOTTY; 880 break; 881 } 882 return (error); 883 } 884 885 /* 886 * XXX JGibbs lifted from pass-thru driver. 887 * Generally, "ccb" should be the CCB supplied by the kernel. "inccb" 888 * should be the CCB that is copied in from the user. 889 */ 890 static int 891 targsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb) 892 { 893 struct targ_softc *softc; 894 struct cam_periph_map_info mapinfo; 895 int error, need_unmap; 896 int s; 897 898 softc = (struct targ_softc *)periph->softc; 899 900 need_unmap = 0; 901 902 /* 903 * There are some fields in the CCB header that need to be 904 * preserved, the rest we get from the user. 905 */ 906 xpt_merge_ccb(ccb, inccb); 907 908 /* 909 * There's no way for the user to have a completion 910 * function, so we put our own completion function in here. 911 */ 912 ccb->ccb_h.cbfcnp = targdone; 913 914 /* 915 * We only attempt to map the user memory into kernel space 916 * if they haven't passed in a physical memory pointer, 917 * and if there is actually an I/O operation to perform. 918 * Right now cam_periph_mapmem() only supports SCSI and device 919 * match CCBs. For the SCSI CCBs, we only pass the CCB in if 920 * there's actually data to map. cam_periph_mapmem() will do the 921 * right thing, even if there isn't data to map, but since CCBs 922 * without data are a reasonably common occurance (e.g. test unit 923 * ready), it will save a few cycles if we check for it here. 924 */ 925 if (((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) 926 && (((ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) 927 && ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE)) 928 || (ccb->ccb_h.func_code == XPT_DEV_MATCH))) { 929 930 bzero(&mapinfo, sizeof(mapinfo)); 931 932 error = cam_periph_mapmem(ccb, &mapinfo); 933 934 /* 935 * cam_periph_mapmem returned an error, we can't continue. 936 * Return the error to the user. 937 */ 938 if (error) 939 return(error); 940 941 /* 942 * We successfully mapped the memory in, so we need to 943 * unmap it when the transaction is done. 944 */ 945 need_unmap = 1; 946 } 947 948 /* 949 * Once queued on the pending CCB list, this CCB will be protected 950 * by the error recovery handling used for 'buffer I/O' ccbs. Since 951 * we are in a process context here, however, the software interrupt 952 * for this driver may deliver an event invalidating this CCB just 953 * before we queue it. Close this race condition by blocking 954 * software interrupt delivery, checking for any pertinent queued 955 * events, and only then queuing this CCB. 956 */ 957 s = splsoftcam(); 958 if (softc->exceptions == 0) { 959 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) 960 TAILQ_INSERT_TAIL(&softc->pending_queue, &ccb->ccb_h, 961 periph_links.tqe); 962 963 /* 964 * If the user wants us to perform any error recovery, 965 * then honor that request. Otherwise, it's up to the 966 * user to perform any error recovery. 967 */ 968 error = cam_periph_runccb(ccb, 969 /* error handler */NULL, 970 /* cam_flags */ 0, 971 /* sense_flags */SF_RETRY_UA, 972 &softc->device_stats); 973 974 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) 975 TAILQ_INSERT_TAIL(&softc->pending_queue, &ccb->ccb_h, 976 periph_links.tqe); 977 } else { 978 ccb->ccb_h.status = CAM_UNACKED_EVENT; 979 error = 0; 980 } 981 splx(s); 982 983 if (need_unmap != 0) 984 cam_periph_unmapmem(ccb, &mapinfo); 985 986 ccb->ccb_h.cbfcnp = NULL; 987 ccb->ccb_h.periph_priv = inccb->ccb_h.periph_priv; 988 bcopy(ccb, inccb, sizeof(union ccb)); 989 990 return(error); 991 } 992 993 994 static int 995 targpoll(dev_t dev, int poll_events, struct proc *p) 996 { 997 struct cam_periph *periph; 998 struct targ_softc *softc; 999 u_int unit; 1000 int revents; 1001 int s; 1002 1003 unit = minor(dev); 1004 1005 /* ioctl is the only supported operation of the control device */ 1006 if (TARG_IS_CONTROL_DEV(unit)) 1007 return EINVAL; 1008 1009 periph = cam_extend_get(targperiphs, unit); 1010 if (periph == NULL) 1011 return (ENXIO); 1012 softc = (struct targ_softc *)periph->softc; 1013 1014 revents = 0; 1015 s = splcam(); 1016 if ((poll_events & (POLLOUT | POLLWRNORM)) != 0) { 1017 if (TAILQ_FIRST(&softc->rcv_ccb_queue) != NULL 1018 && bufq_first(&softc->rcv_buf_queue) == NULL) 1019 revents |= poll_events & (POLLOUT | POLLWRNORM); 1020 } 1021 if ((poll_events & (POLLIN | POLLRDNORM)) != 0) { 1022 if (TAILQ_FIRST(&softc->snd_ccb_queue) != NULL 1023 && bufq_first(&softc->snd_buf_queue) == NULL) 1024 revents |= poll_events & (POLLIN | POLLRDNORM); 1025 } 1026 1027 if (softc->state != TARG_STATE_NORMAL) 1028 revents |= POLLERR; 1029 1030 if (revents == 0) { 1031 if (poll_events & (POLLOUT | POLLWRNORM)) 1032 selrecord(p, &softc->rcv_select); 1033 if (poll_events & (POLLIN | POLLRDNORM)) 1034 selrecord(p, &softc->snd_select); 1035 } 1036 splx(s); 1037 return (revents); 1038 } 1039 1040 static int 1041 targread(dev_t dev, struct uio *uio, int ioflag) 1042 { 1043 u_int unit; 1044 1045 unit = minor(dev); 1046 /* ioctl is the only supported operation of the control device */ 1047 if (TARG_IS_CONTROL_DEV(unit)) 1048 return EINVAL; 1049 1050 if (uio->uio_iovcnt == 0 1051 || uio->uio_iov->iov_len == 0) { 1052 /* EOF */ 1053 struct cam_periph *periph; 1054 struct targ_softc *softc; 1055 int s; 1056 1057 s = splcam(); 1058 periph = cam_extend_get(targperiphs, unit); 1059 if (periph == NULL) 1060 return (ENXIO); 1061 softc = (struct targ_softc *)periph->softc; 1062 softc->flags |= TARG_FLAG_SEND_EOF; 1063 splx(s); 1064 targrunqueue(periph, softc); 1065 return (0); 1066 } 1067 return(physread(dev, uio, ioflag)); 1068 } 1069 1070 static int 1071 targwrite(dev_t dev, struct uio *uio, int ioflag) 1072 { 1073 u_int unit; 1074 1075 unit = minor(dev); 1076 /* ioctl is the only supported operation of the control device */ 1077 if (TARG_IS_CONTROL_DEV(unit)) 1078 return EINVAL; 1079 1080 if (uio->uio_iovcnt == 0 1081 || uio->uio_iov->iov_len == 0) { 1082 /* EOF */ 1083 struct cam_periph *periph; 1084 struct targ_softc *softc; 1085 int s; 1086 1087 s = splcam(); 1088 periph = cam_extend_get(targperiphs, unit); 1089 if (periph == NULL) 1090 return (ENXIO); 1091 softc = (struct targ_softc *)periph->softc; 1092 softc->flags |= TARG_FLAG_RECEIVE_EOF; 1093 splx(s); 1094 targrunqueue(periph, softc); 1095 return (0); 1096 } 1097 return(physwrite(dev, uio, ioflag)); 1098 } 1099 1100 /* 1101 * Actually translate the requested transfer into one the physical driver 1102 * can understand. The transfer is described by a buf and will include 1103 * only one physical transfer. 1104 */ 1105 static void 1106 targstrategy(struct buf *bp) 1107 { 1108 struct cam_periph *periph; 1109 struct targ_softc *softc; 1110 u_int unit; 1111 int s; 1112 1113 unit = minor(bp->b_dev); 1114 1115 /* ioctl is the only supported operation of the control device */ 1116 if (TARG_IS_CONTROL_DEV(unit)) { 1117 bp->b_error = EINVAL; 1118 goto bad; 1119 } 1120 1121 periph = cam_extend_get(targperiphs, unit); 1122 if (periph == NULL) { 1123 bp->b_error = ENXIO; 1124 goto bad; 1125 } 1126 softc = (struct targ_softc *)periph->softc; 1127 1128 /* 1129 * Mask interrupts so that the device cannot be invalidated until 1130 * after we are in the queue. Otherwise, we might not properly 1131 * clean up one of the buffers. 1132 */ 1133 s = splbio(); 1134 1135 /* 1136 * If there is an exception pending, error out 1137 */ 1138 if (softc->state != TARG_STATE_NORMAL) { 1139 splx(s); 1140 if (softc->state == TARG_STATE_EXCEPTION 1141 && (softc->exceptions & TARG_EXCEPT_DEVICE_INVALID) == 0) 1142 bp->b_error = EBUSY; 1143 else 1144 bp->b_error = ENXIO; 1145 goto bad; 1146 } 1147 1148 /* 1149 * Place it in the queue of buffers available for either 1150 * SEND or RECEIVE commands. 1151 * 1152 */ 1153 bp->b_resid = bp->b_bcount; 1154 if ((bp->b_flags & B_READ) != 0) { 1155 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 1156 ("Queued a SEND buffer\n")); 1157 bufq_insert_tail(&softc->snd_buf_queue, bp); 1158 } else { 1159 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 1160 ("Queued a RECEIVE buffer\n")); 1161 bufq_insert_tail(&softc->rcv_buf_queue, bp); 1162 } 1163 1164 splx(s); 1165 1166 /* 1167 * Attempt to use the new buffer to service any pending 1168 * target commands. 1169 */ 1170 targrunqueue(periph, softc); 1171 1172 return; 1173 bad: 1174 bp->b_flags |= B_ERROR; 1175 1176 /* 1177 * Correctly set the buf to indicate a completed xfer 1178 */ 1179 bp->b_resid = bp->b_bcount; 1180 biodone(bp); 1181 } 1182 1183 static void 1184 targrunqueue(struct cam_periph *periph, struct targ_softc *softc) 1185 { 1186 struct ccb_queue *pending_queue; 1187 struct ccb_accept_tio *atio; 1188 struct buf_queue_head *bufq; 1189 struct buf *bp; 1190 struct targ_cmd_desc *desc; 1191 struct ccb_hdr *ccbh; 1192 int s; 1193 1194 s = splbio(); 1195 pending_queue = NULL; 1196 bufq = NULL; 1197 ccbh = NULL; 1198 /* Only run one request at a time to maintain data ordering. */ 1199 if (softc->state != TARG_STATE_NORMAL 1200 || TAILQ_FIRST(&softc->work_queue) != NULL 1201 || TAILQ_FIRST(&softc->pending_queue) != NULL) { 1202 splx(s); 1203 return; 1204 } 1205 1206 if (((bp = bufq_first(&softc->snd_buf_queue)) != NULL 1207 || (softc->flags & TARG_FLAG_SEND_EOF) != 0) 1208 && (ccbh = TAILQ_FIRST(&softc->snd_ccb_queue)) != NULL) { 1209 1210 if (bp == NULL) 1211 softc->flags &= ~TARG_FLAG_SEND_EOF; 1212 else { 1213 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 1214 ("De-Queued a SEND buffer %ld\n", 1215 bp->b_bcount)); 1216 } 1217 bufq = &softc->snd_buf_queue; 1218 pending_queue = &softc->snd_ccb_queue; 1219 } else if (((bp = bufq_first(&softc->rcv_buf_queue)) != NULL 1220 || (softc->flags & TARG_FLAG_RECEIVE_EOF) != 0) 1221 && (ccbh = TAILQ_FIRST(&softc->rcv_ccb_queue)) != NULL) { 1222 1223 if (bp == NULL) 1224 softc->flags &= ~TARG_FLAG_RECEIVE_EOF; 1225 else { 1226 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 1227 ("De-Queued a RECEIVE buffer %ld\n", 1228 bp->b_bcount)); 1229 } 1230 bufq = &softc->rcv_buf_queue; 1231 pending_queue = &softc->rcv_ccb_queue; 1232 } 1233 1234 if (pending_queue != NULL) { 1235 /* Process a request */ 1236 atio = (struct ccb_accept_tio *)ccbh; 1237 TAILQ_REMOVE(pending_queue, ccbh, periph_links.tqe); 1238 desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr; 1239 desc->bp = bp; 1240 if (bp == NULL) { 1241 /* EOF */ 1242 desc->data = NULL; 1243 desc->data_increment = 0; 1244 desc->data_resid = 0; 1245 atio->ccb_h.flags &= ~CAM_DIR_MASK; 1246 atio->ccb_h.flags |= CAM_DIR_NONE; 1247 } else { 1248 bufq_remove(bufq, bp); 1249 desc->data = &bp->b_data[bp->b_bcount - bp->b_resid]; 1250 desc->data_increment = 1251 MIN(desc->data_resid, bp->b_resid); 1252 } 1253 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 1254 ("Buffer command: data %x: datacnt %d\n", 1255 (intptr_t)desc->data, desc->data_increment)); 1256 TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h, 1257 periph_links.tqe); 1258 } 1259 if (TAILQ_FIRST(&softc->work_queue) != NULL) { 1260 splx(s); 1261 xpt_schedule(periph, /*XXX priority*/1); 1262 } else 1263 splx(s); 1264 } 1265 1266 static void 1267 targstart(struct cam_periph *periph, union ccb *start_ccb) 1268 { 1269 struct targ_softc *softc; 1270 struct ccb_hdr *ccbh; 1271 struct ccb_accept_tio *atio; 1272 struct targ_cmd_desc *desc; 1273 struct ccb_scsiio *csio; 1274 targ_ccb_flags flags; 1275 int s; 1276 1277 softc = (struct targ_softc *)periph->softc; 1278 1279 s = splbio(); 1280 ccbh = TAILQ_FIRST(&softc->work_queue); 1281 if (periph->immediate_priority <= periph->pinfo.priority) { 1282 start_ccb->ccb_h.ccb_flags = TARG_CCB_WAITING; 1283 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, 1284 periph_links.sle); 1285 periph->immediate_priority = CAM_PRIORITY_NONE; 1286 splx(s); 1287 wakeup(&periph->ccb_list); 1288 } else if (ccbh == NULL) { 1289 splx(s); 1290 xpt_release_ccb(start_ccb); 1291 } else { 1292 TAILQ_REMOVE(&softc->work_queue, ccbh, periph_links.tqe); 1293 splx(s); 1294 atio = (struct ccb_accept_tio*)ccbh; 1295 desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr; 1296 1297 /* Is this a tagged request? */ 1298 flags = atio->ccb_h.flags & (CAM_TAG_ACTION_VALID|CAM_DIR_MASK); 1299 1300 /* 1301 * If we are done with the transaction, tell the 1302 * controller to send status and perform a CMD_CMPLT. 1303 */ 1304 if (desc->data_resid == desc->data_increment) 1305 flags |= CAM_SEND_STATUS; 1306 1307 csio = &start_ccb->csio; 1308 cam_fill_ctio(csio, 1309 /*retries*/2, 1310 targdone, 1311 flags, 1312 /*tag_action*/MSG_SIMPLE_Q_TAG, 1313 atio->tag_id, 1314 atio->init_id, 1315 desc->status, 1316 /*data_ptr*/desc->data_increment == 0 1317 ? NULL : desc->data, 1318 /*dxfer_len*/desc->data_increment, 1319 /*timeout*/desc->timeout); 1320 1321 start_ccb->ccb_h.ccb_flags = TARG_CCB_NONE; 1322 start_ccb->ccb_h.ccb_atio = atio; 1323 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 1324 ("Sending a CTIO\n")); 1325 TAILQ_INSERT_TAIL(&softc->pending_queue, &csio->ccb_h, 1326 periph_links.tqe); 1327 xpt_action(start_ccb); 1328 s = splbio(); 1329 ccbh = TAILQ_FIRST(&softc->work_queue); 1330 splx(s); 1331 } 1332 if (ccbh != NULL) 1333 targrunqueue(periph, softc); 1334 } 1335 1336 static void 1337 targdone(struct cam_periph *periph, union ccb *done_ccb) 1338 { 1339 struct targ_softc *softc; 1340 1341 softc = (struct targ_softc *)periph->softc; 1342 1343 if (done_ccb->ccb_h.ccb_flags == TARG_CCB_WAITING) { 1344 /* Caller will release the CCB */ 1345 wakeup(&done_ccb->ccb_h.cbfcnp); 1346 return; 1347 } 1348 1349 switch (done_ccb->ccb_h.func_code) { 1350 case XPT_ACCEPT_TARGET_IO: 1351 { 1352 struct ccb_accept_tio *atio; 1353 struct targ_cmd_desc *descr; 1354 struct initiator_state *istate; 1355 u_int8_t *cdb; 1356 1357 atio = &done_ccb->atio; 1358 descr = (struct targ_cmd_desc*)atio->ccb_h.ccb_descr; 1359 istate = &softc->istate[atio->init_id]; 1360 cdb = atio->cdb_io.cdb_bytes; 1361 if (softc->state == TARG_STATE_TEARDOWN 1362 || atio->ccb_h.status == CAM_REQ_ABORTED) { 1363 freedescr(descr); 1364 free(done_ccb, M_DEVBUF); 1365 return; 1366 } 1367 1368 if (istate->pending_ca == 0 1369 && istate->pending_ua != 0 1370 && cdb[0] != INQUIRY) { 1371 /* Pending UA, tell initiator */ 1372 /* Direction is always relative to the initator */ 1373 istate->pending_ca = CA_UNIT_ATTN; 1374 atio->ccb_h.flags &= ~CAM_DIR_MASK; 1375 atio->ccb_h.flags |= CAM_DIR_NONE; 1376 descr->data_resid = 0; 1377 descr->data_increment = 0; 1378 descr->timeout = 5 * 1000; 1379 descr->status = SCSI_STATUS_CHECK_COND; 1380 } else { 1381 /* 1382 * Save the current CA and UA status so 1383 * they can be used by this command. 1384 */ 1385 ua_types pending_ua; 1386 ca_types pending_ca; 1387 1388 pending_ua = istate->pending_ua; 1389 pending_ca = istate->pending_ca; 1390 1391 /* 1392 * As per the SCSI2 spec, any command that occurs 1393 * after a CA is reported, clears the CA. If the 1394 * command is not an inquiry, we are also supposed 1395 * to clear the UA condition, if any, that caused 1396 * the CA to occur assuming the UA is not a 1397 * persistant state. 1398 */ 1399 istate->pending_ca = CA_NONE; 1400 if ((pending_ca 1401 & (CA_CMD_SENSE|CA_UNIT_ATTN)) == CA_UNIT_ATTN 1402 && cdb[0] != INQUIRY) 1403 istate->pending_ua = UA_NONE; 1404 1405 /* 1406 * Determine the type of incoming command and 1407 * setup our buffer for a response. 1408 */ 1409 switch (cdb[0]) { 1410 case INQUIRY: 1411 { 1412 struct scsi_inquiry *inq; 1413 struct scsi_sense_data *sense; 1414 1415 inq = (struct scsi_inquiry *)cdb; 1416 sense = &istate->sense_data; 1417 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 1418 ("Saw an inquiry!\n")); 1419 /* 1420 * Validate the command. We don't 1421 * support any VPD pages, so complain 1422 * if EVPD is set. 1423 */ 1424 if ((inq->byte2 & SI_EVPD) != 0 1425 || inq->page_code != 0) { 1426 istate->pending_ca = CA_CMD_SENSE; 1427 atio->ccb_h.flags &= ~CAM_DIR_MASK; 1428 atio->ccb_h.flags |= CAM_DIR_NONE; 1429 descr->data_resid = 0; 1430 descr->data_increment = 0; 1431 descr->status = SCSI_STATUS_CHECK_COND; 1432 fill_sense(softc, atio->init_id, 1433 SSD_CURRENT_ERROR, 1434 SSD_KEY_ILLEGAL_REQUEST, 1435 /*asc*/0x24, /*ascq*/0x00); 1436 sense->extra_len = 1437 offsetof(struct scsi_sense_data, 1438 extra_bytes) 1439 - offsetof(struct scsi_sense_data, 1440 extra_len); 1441 } 1442 1443 if ((inq->byte2 & SI_EVPD) != 0) { 1444 sense->sense_key_spec[0] = 1445 SSD_SCS_VALID|SSD_FIELDPTR_CMD 1446 |SSD_BITPTR_VALID| /*bit value*/1; 1447 sense->sense_key_spec[1] = 0; 1448 sense->sense_key_spec[2] = 1449 offsetof(struct scsi_inquiry, 1450 byte2); 1451 break; 1452 } else if (inq->page_code != 0) { 1453 sense->sense_key_spec[0] = 1454 SSD_SCS_VALID|SSD_FIELDPTR_CMD; 1455 sense->sense_key_spec[1] = 0; 1456 sense->sense_key_spec[2] = 1457 offsetof(struct scsi_inquiry, 1458 page_code); 1459 break; 1460 } 1461 /* 1462 * Direction is always relative 1463 * to the initator. 1464 */ 1465 atio->ccb_h.flags &= ~CAM_DIR_MASK; 1466 atio->ccb_h.flags |= CAM_DIR_IN; 1467 descr->data = softc->inq_data; 1468 descr->data_resid = MIN(softc->inq_data_len, 1469 inq->length); 1470 descr->data_increment = descr->data_resid; 1471 descr->timeout = 5 * 1000; 1472 descr->status = SCSI_STATUS_OK; 1473 break; 1474 } 1475 case TEST_UNIT_READY: 1476 atio->ccb_h.flags &= ~CAM_DIR_MASK; 1477 atio->ccb_h.flags |= CAM_DIR_NONE; 1478 descr->data_resid = 0; 1479 descr->data_increment = 0; 1480 descr->timeout = 5 * 1000; 1481 descr->status = SCSI_STATUS_OK; 1482 break; 1483 case REQUEST_SENSE: 1484 { 1485 struct scsi_request_sense *rsense; 1486 struct scsi_sense_data *sense; 1487 1488 rsense = (struct scsi_request_sense *)cdb; 1489 sense = &istate->sense_data; 1490 if (pending_ca == 0) { 1491 fill_sense(softc, atio->init_id, 1492 SSD_CURRENT_ERROR, 1493 SSD_KEY_NO_SENSE, 0x00, 1494 0x00); 1495 CAM_DEBUG(periph->path, 1496 CAM_DEBUG_PERIPH, 1497 ("No pending CA!\n")); 1498 } else if (pending_ca == CA_UNIT_ATTN) { 1499 u_int ascq; 1500 1501 if (pending_ua == UA_POWER_ON) 1502 ascq = 0x1; 1503 else 1504 ascq = 0x2; 1505 fill_sense(softc, atio->init_id, 1506 SSD_CURRENT_ERROR, 1507 SSD_KEY_UNIT_ATTENTION, 1508 0x29, ascq); 1509 CAM_DEBUG(periph->path, 1510 CAM_DEBUG_PERIPH, 1511 ("Pending UA!\n")); 1512 } 1513 /* 1514 * Direction is always relative 1515 * to the initator. 1516 */ 1517 atio->ccb_h.flags &= ~CAM_DIR_MASK; 1518 atio->ccb_h.flags |= CAM_DIR_IN; 1519 descr->data = sense; 1520 descr->data_resid = 1521 offsetof(struct scsi_sense_data, 1522 extra_len) 1523 + sense->extra_len; 1524 descr->data_resid = MIN(descr->data_resid, 1525 rsense->length); 1526 descr->data_increment = descr->data_resid; 1527 descr->timeout = 5 * 1000; 1528 descr->status = SCSI_STATUS_OK; 1529 break; 1530 } 1531 case RECEIVE: 1532 case SEND: 1533 { 1534 struct scsi_send_receive *sr; 1535 1536 sr = (struct scsi_send_receive *)cdb; 1537 1538 /* 1539 * Direction is always relative 1540 * to the initator. 1541 */ 1542 atio->ccb_h.flags &= ~CAM_DIR_MASK; 1543 descr->data_resid = scsi_3btoul(sr->xfer_len); 1544 descr->timeout = 5 * 1000; 1545 descr->status = SCSI_STATUS_OK; 1546 if (cdb[0] == SEND) { 1547 atio->ccb_h.flags |= CAM_DIR_OUT; 1548 CAM_DEBUG(periph->path, 1549 CAM_DEBUG_PERIPH, 1550 ("Saw a SEND!\n")); 1551 atio->ccb_h.flags |= CAM_DIR_OUT; 1552 TAILQ_INSERT_TAIL(&softc->snd_ccb_queue, 1553 &atio->ccb_h, 1554 periph_links.tqe); 1555 selwakeup(&softc->snd_select); 1556 } else { 1557 atio->ccb_h.flags |= CAM_DIR_IN; 1558 CAM_DEBUG(periph->path, 1559 CAM_DEBUG_PERIPH, 1560 ("Saw a RECEIVE!\n")); 1561 TAILQ_INSERT_TAIL(&softc->rcv_ccb_queue, 1562 &atio->ccb_h, 1563 periph_links.tqe); 1564 selwakeup(&softc->rcv_select); 1565 } 1566 /* 1567 * Attempt to satisfy this request with 1568 * a user buffer. 1569 */ 1570 targrunqueue(periph, softc); 1571 return; 1572 } 1573 default: 1574 /* 1575 * Queue for consumption by our userland 1576 * counterpart and transition to the exception 1577 * state. 1578 */ 1579 TAILQ_INSERT_TAIL(&softc->unknown_atio_queue, 1580 &atio->ccb_h, 1581 periph_links.tqe); 1582 softc->exceptions |= TARG_EXCEPT_UNKNOWN_ATIO; 1583 targfireexception(periph, softc); 1584 return; 1585 } 1586 } 1587 1588 /* Queue us up to receive a Continue Target I/O ccb. */ 1589 TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h, 1590 periph_links.tqe); 1591 xpt_schedule(periph, /*priority*/1); 1592 break; 1593 } 1594 case XPT_CONT_TARGET_IO: 1595 { 1596 struct ccb_scsiio *csio; 1597 struct ccb_accept_tio *atio; 1598 struct targ_cmd_desc *desc; 1599 struct buf *bp; 1600 int error; 1601 1602 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 1603 ("Received completed CTIO\n")); 1604 csio = &done_ccb->csio; 1605 atio = (struct ccb_accept_tio*)done_ccb->ccb_h.ccb_atio; 1606 desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr; 1607 1608 TAILQ_REMOVE(&softc->pending_queue, &done_ccb->ccb_h, 1609 periph_links.tqe); 1610 1611 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1612 printf("CCB with error %x\n", done_ccb->ccb_h.status); 1613 error = targerror(done_ccb, 0, 0); 1614 if (error == ERESTART) 1615 break; 1616 /* 1617 * Right now we don't need to do anything 1618 * prior to unfreezing the queue... 1619 */ 1620 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 1621 printf("Releasing Queue\n"); 1622 cam_release_devq(done_ccb->ccb_h.path, 1623 /*relsim_flags*/0, 1624 /*reduction*/0, 1625 /*timeout*/0, 1626 /*getcount_only*/0); 1627 } 1628 } else 1629 error = 0; 1630 desc->data_increment -= csio->resid; 1631 desc->data_resid -= desc->data_increment; 1632 if ((bp = desc->bp) != NULL) { 1633 1634 bp->b_resid -= desc->data_increment; 1635 bp->b_error = error; 1636 1637 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 1638 ("Buffer I/O Completed - Resid %ld:%d\n", 1639 bp->b_resid, desc->data_resid)); 1640 /* 1641 * Send the buffer back to the client if 1642 * either the command has completed or all 1643 * buffer space has been consumed. 1644 */ 1645 if (desc->data_resid == 0 1646 || bp->b_resid == 0 1647 || error != 0) { 1648 if (bp->b_resid != 0) 1649 /* Short transfer */ 1650 bp->b_flags |= B_ERROR; 1651 1652 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 1653 ("Completing a buffer\n")); 1654 biodone(bp); 1655 desc->bp = NULL; 1656 } 1657 } 1658 1659 xpt_release_ccb(done_ccb); 1660 if (softc->state != TARG_STATE_TEARDOWN) { 1661 1662 if (desc->data_resid == 0) { 1663 /* 1664 * Send the original accept TIO back to the 1665 * controller to handle more work. 1666 */ 1667 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 1668 ("Returning ATIO to target\n")); 1669 xpt_action((union ccb *)atio); 1670 break; 1671 } 1672 1673 /* Queue us up for another buffer */ 1674 if (atio->cdb_io.cdb_bytes[0] == SEND) { 1675 if (desc->bp != NULL) 1676 TAILQ_INSERT_HEAD( 1677 &softc->snd_buf_queue.queue, 1678 bp, b_act); 1679 TAILQ_INSERT_HEAD(&softc->snd_ccb_queue, 1680 &atio->ccb_h, 1681 periph_links.tqe); 1682 } else { 1683 if (desc->bp != NULL) 1684 TAILQ_INSERT_HEAD( 1685 &softc->rcv_buf_queue.queue, 1686 bp, b_act); 1687 TAILQ_INSERT_HEAD(&softc->rcv_ccb_queue, 1688 &atio->ccb_h, 1689 periph_links.tqe); 1690 } 1691 desc->bp = NULL; 1692 targrunqueue(periph, softc); 1693 } else { 1694 if (desc->bp != NULL) { 1695 bp->b_flags |= B_ERROR; 1696 bp->b_error = ENXIO; 1697 biodone(bp); 1698 } 1699 freedescr(desc); 1700 free(atio, M_DEVBUF); 1701 } 1702 break; 1703 } 1704 case XPT_IMMED_NOTIFY: 1705 { 1706 int frozen; 1707 1708 frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; 1709 if (softc->state == TARG_STATE_TEARDOWN) { 1710 SLIST_REMOVE(&softc->immed_notify_slist, 1711 &done_ccb->ccb_h, ccb_hdr, 1712 periph_links.sle); 1713 free(done_ccb, M_DEVBUF); 1714 } else if (done_ccb->ccb_h.status == CAM_REQ_ABORTED) { 1715 free(done_ccb, M_DEVBUF); 1716 } else { 1717 printf("Saw event %x:%x\n", done_ccb->ccb_h.status, 1718 done_ccb->cin.message_args[0]); 1719 /* Process error condition. */ 1720 targinoterror(periph, softc, &done_ccb->cin); 1721 1722 /* Requeue for another immediate event */ 1723 xpt_action(done_ccb); 1724 } 1725 if (frozen != 0) 1726 cam_release_devq(periph->path, 1727 /*relsim_flags*/0, 1728 /*opening reduction*/0, 1729 /*timeout*/0, 1730 /*getcount_only*/0); 1731 break; 1732 } 1733 default: 1734 panic("targdone: Impossible xpt opcode %x encountered.", 1735 done_ccb->ccb_h.func_code); 1736 /* NOTREACHED */ 1737 break; 1738 } 1739 } 1740 1741 /* 1742 * Transition to the exception state and notify our symbiotic 1743 * userland process of the change. 1744 */ 1745 static void 1746 targfireexception(struct cam_periph *periph, struct targ_softc *softc) 1747 { 1748 /* 1749 * return all pending buffers with short read/write status so our 1750 * process unblocks, and do a selwakeup on any process queued 1751 * waiting for reads or writes. When the selwakeup is performed, 1752 * the waking process will wakeup, call our poll routine again, 1753 * and pick up the exception. 1754 */ 1755 struct buf *bp; 1756 1757 if (softc->state != TARG_STATE_NORMAL) 1758 /* Already either tearing down or in exception state */ 1759 return; 1760 1761 softc->state = TARG_STATE_EXCEPTION; 1762 1763 while ((bp = bufq_first(&softc->snd_buf_queue)) != NULL) { 1764 bufq_remove(&softc->snd_buf_queue, bp); 1765 bp->b_flags |= B_ERROR; 1766 biodone(bp); 1767 } 1768 1769 while ((bp = bufq_first(&softc->rcv_buf_queue)) != NULL) { 1770 bufq_remove(&softc->snd_buf_queue, bp); 1771 bp->b_flags |= B_ERROR; 1772 biodone(bp); 1773 } 1774 1775 selwakeup(&softc->snd_select); 1776 selwakeup(&softc->rcv_select); 1777 } 1778 1779 static void 1780 targinoterror(struct cam_periph *periph, struct targ_softc *softc, 1781 struct ccb_immed_notify *inot) 1782 { 1783 cam_status status; 1784 int sense; 1785 1786 status = inot->ccb_h.status; 1787 sense = (status & CAM_AUTOSNS_VALID) != 0; 1788 status &= CAM_STATUS_MASK; 1789 switch (status) { 1790 case CAM_SCSI_BUS_RESET: 1791 set_unit_attention_cond(periph, /*init_id*/CAM_TARGET_WILDCARD, 1792 UA_BUS_RESET); 1793 abort_pending_transactions(periph, 1794 /*init_id*/CAM_TARGET_WILDCARD, 1795 TARG_TAG_WILDCARD, EINTR, 1796 /*to_held_queue*/FALSE); 1797 softc->exceptions |= TARG_EXCEPT_BUS_RESET_SEEN; 1798 targfireexception(periph, softc); 1799 break; 1800 case CAM_BDR_SENT: 1801 set_unit_attention_cond(periph, /*init_id*/CAM_TARGET_WILDCARD, 1802 UA_BDR); 1803 abort_pending_transactions(periph, CAM_TARGET_WILDCARD, 1804 TARG_TAG_WILDCARD, EINTR, 1805 /*to_held_queue*/FALSE); 1806 softc->exceptions |= TARG_EXCEPT_BDR_RECEIVED; 1807 targfireexception(periph, softc); 1808 break; 1809 case CAM_MESSAGE_RECV: 1810 switch (inot->message_args[0]) { 1811 case MSG_INITIATOR_DET_ERR: 1812 break; 1813 case MSG_ABORT: 1814 break; 1815 case MSG_BUS_DEV_RESET: 1816 break; 1817 case MSG_ABORT_TAG: 1818 break; 1819 case MSG_CLEAR_QUEUE: 1820 break; 1821 case MSG_TERM_IO_PROC: 1822 break; 1823 default: 1824 break; 1825 } 1826 break; 1827 default: 1828 break; 1829 } 1830 } 1831 1832 static int 1833 targerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) 1834 { 1835 struct cam_periph *periph; 1836 struct targ_softc *softc; 1837 struct ccb_scsiio *csio; 1838 cam_status status; 1839 int frozen; 1840 int sense; 1841 int error; 1842 int on_held_queue; 1843 1844 periph = xpt_path_periph(ccb->ccb_h.path); 1845 softc = (struct targ_softc *)periph->softc; 1846 status = ccb->ccb_h.status; 1847 sense = (status & CAM_AUTOSNS_VALID) != 0; 1848 frozen = (status & CAM_DEV_QFRZN) != 0; 1849 status &= CAM_STATUS_MASK; 1850 on_held_queue = FALSE; 1851 csio = &ccb->csio; 1852 switch (status) { 1853 case CAM_REQ_ABORTED: 1854 printf("Request Aborted!\n"); 1855 if ((ccb->ccb_h.ccb_flags & TARG_CCB_ABORT_TO_HELDQ) != 0) { 1856 struct initiator_state *istate; 1857 1858 /* 1859 * Place this CCB into the initiators 1860 * 'held' queue until the pending CA is cleared. 1861 * If there is no CA pending, reissue immediately. 1862 */ 1863 istate = &softc->istate[ccb->csio.init_id]; 1864 if (istate->pending_ca == 0) { 1865 ccb->ccb_h.ccb_flags = TARG_CCB_NONE; 1866 xpt_action(ccb); 1867 } else { 1868 ccb->ccb_h.ccb_flags = TARG_CCB_HELDQ; 1869 TAILQ_INSERT_TAIL(&softc->pending_queue, 1870 &ccb->ccb_h, 1871 periph_links.tqe); 1872 } 1873 /* The command will be retried at a later time. */ 1874 on_held_queue = TRUE; 1875 error = ERESTART; 1876 break; 1877 } 1878 /* FALLTHROUGH */ 1879 case CAM_SCSI_BUS_RESET: 1880 case CAM_BDR_SENT: 1881 case CAM_REQ_TERMIO: 1882 case CAM_CMD_TIMEOUT: 1883 /* Assume we did not send any data */ 1884 csio->resid = csio->dxfer_len; 1885 error = EIO; 1886 break; 1887 case CAM_SEL_TIMEOUT: 1888 if (ccb->ccb_h.retry_count > 0) { 1889 ccb->ccb_h.retry_count--; 1890 error = ERESTART; 1891 } else { 1892 /* "Select or reselect failure" */ 1893 csio->resid = csio->dxfer_len; 1894 fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR, 1895 SSD_KEY_HARDWARE_ERROR, 0x45, 0x00); 1896 set_contingent_allegiance_cond(periph, 1897 csio->init_id, 1898 CA_CMD_SENSE); 1899 error = EIO; 1900 } 1901 break; 1902 case CAM_UNCOR_PARITY: 1903 /* "SCSI parity error" */ 1904 fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR, 1905 SSD_KEY_HARDWARE_ERROR, 0x47, 0x00); 1906 set_contingent_allegiance_cond(periph, csio->init_id, 1907 CA_CMD_SENSE); 1908 csio->resid = csio->dxfer_len; 1909 error = EIO; 1910 break; 1911 case CAM_NO_HBA: 1912 csio->resid = csio->dxfer_len; 1913 error = ENXIO; 1914 break; 1915 case CAM_SEQUENCE_FAIL: 1916 if (sense != 0) { 1917 copy_sense(softc, csio); 1918 set_contingent_allegiance_cond(periph, 1919 csio->init_id, 1920 CA_CMD_SENSE); 1921 } 1922 csio->resid = csio->dxfer_len; 1923 error = EIO; 1924 break; 1925 case CAM_IDE: 1926 /* "Initiator detected error message received" */ 1927 fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR, 1928 SSD_KEY_HARDWARE_ERROR, 0x48, 0x00); 1929 set_contingent_allegiance_cond(periph, csio->init_id, 1930 CA_CMD_SENSE); 1931 csio->resid = csio->dxfer_len; 1932 error = EIO; 1933 break; 1934 case CAM_REQUEUE_REQ: 1935 printf("Requeue Request!\n"); 1936 error = ERESTART; 1937 break; 1938 default: 1939 csio->resid = csio->dxfer_len; 1940 error = EIO; 1941 panic("targerror: Unexpected status %x encounterd", status); 1942 /* NOTREACHED */ 1943 } 1944 1945 if (error == ERESTART || error == 0) { 1946 /* Clear the QFRZN flag as we will release the queue */ 1947 if (frozen != 0) 1948 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1949 1950 if (error == ERESTART && !on_held_queue) 1951 xpt_action(ccb); 1952 1953 if (frozen != 0) 1954 cam_release_devq(ccb->ccb_h.path, 1955 /*relsim_flags*/0, 1956 /*opening reduction*/0, 1957 /*timeout*/0, 1958 /*getcount_only*/0); 1959 } 1960 return (error); 1961 } 1962 1963 static struct targ_cmd_desc* 1964 allocdescr() 1965 { 1966 struct targ_cmd_desc* descr; 1967 1968 /* Allocate the targ_descr structure */ 1969 descr = (struct targ_cmd_desc *)malloc(sizeof(*descr), 1970 M_DEVBUF, M_NOWAIT); 1971 if (descr == NULL) 1972 return (NULL); 1973 1974 bzero(descr, sizeof(*descr)); 1975 1976 /* Allocate buffer backing store */ 1977 descr->backing_store = malloc(MAX_BUF_SIZE, M_DEVBUF, M_NOWAIT); 1978 if (descr->backing_store == NULL) { 1979 free(descr, M_DEVBUF); 1980 return (NULL); 1981 } 1982 descr->max_size = MAX_BUF_SIZE; 1983 return (descr); 1984 } 1985 1986 static void 1987 freedescr(struct targ_cmd_desc *descr) 1988 { 1989 free(descr->backing_store, M_DEVBUF); 1990 free(descr, M_DEVBUF); 1991 } 1992 1993 static void 1994 fill_sense(struct targ_softc *softc, u_int initiator_id, u_int error_code, 1995 u_int sense_key, u_int asc, u_int ascq) 1996 { 1997 struct initiator_state *istate; 1998 struct scsi_sense_data *sense; 1999 2000 istate = &softc->istate[initiator_id]; 2001 sense = &istate->sense_data; 2002 bzero(sense, sizeof(*sense)); 2003 sense->error_code = error_code; 2004 sense->flags = sense_key; 2005 sense->add_sense_code = asc; 2006 sense->add_sense_code_qual = ascq; 2007 2008 sense->extra_len = offsetof(struct scsi_sense_data, fru) 2009 - offsetof(struct scsi_sense_data, extra_len); 2010 } 2011 2012 static void 2013 copy_sense(struct targ_softc *softc, struct ccb_scsiio *csio) 2014 { 2015 struct initiator_state *istate; 2016 struct scsi_sense_data *sense; 2017 size_t copylen; 2018 2019 istate = &softc->istate[csio->init_id]; 2020 sense = &istate->sense_data; 2021 copylen = sizeof(*sense); 2022 if (copylen > csio->sense_len) 2023 copylen = csio->sense_len; 2024 bcopy(&csio->sense_data, sense, copylen); 2025 } 2026 2027 static void 2028 set_unit_attention_cond(struct cam_periph *periph, 2029 u_int initiator_id, ua_types ua) 2030 { 2031 int start; 2032 int end; 2033 struct targ_softc *softc; 2034 2035 softc = (struct targ_softc *)periph->softc; 2036 if (initiator_id == CAM_TARGET_WILDCARD) { 2037 start = 0; 2038 end = MAX_INITIATORS - 1; 2039 } else 2040 start = end = initiator_id; 2041 2042 while (start <= end) { 2043 softc->istate[start].pending_ua = ua; 2044 start++; 2045 } 2046 } 2047 2048 static void 2049 set_contingent_allegiance_cond(struct cam_periph *periph, 2050 u_int initiator_id, ca_types ca) 2051 { 2052 struct targ_softc *softc; 2053 2054 softc = (struct targ_softc *)periph->softc; 2055 softc->istate[initiator_id].pending_ca = ca; 2056 abort_pending_transactions(periph, initiator_id, TARG_TAG_WILDCARD, 2057 /* errno */0, /*to_held_queue*/TRUE); 2058 } 2059 2060 static void 2061 abort_pending_transactions(struct cam_periph *periph, u_int initiator_id, 2062 u_int tag_id, int errno, int to_held_queue) 2063 { 2064 struct ccb_abort cab; 2065 struct ccb_queue *atio_queues[3]; 2066 struct targ_softc *softc; 2067 struct ccb_hdr *ccbh; 2068 u_int i; 2069 2070 softc = (struct targ_softc *)periph->softc; 2071 2072 atio_queues[0] = &softc->work_queue; 2073 atio_queues[1] = &softc->snd_ccb_queue; 2074 atio_queues[2] = &softc->rcv_ccb_queue; 2075 2076 /* First address the ATIOs awaiting resources */ 2077 for (i = 0; i < (sizeof(atio_queues) / sizeof(*atio_queues)); i++) { 2078 struct ccb_queue *atio_queue; 2079 2080 if (to_held_queue) { 2081 /* 2082 * The device queue is frozen anyway, so there 2083 * is nothing for us to do. 2084 */ 2085 continue; 2086 } 2087 atio_queue = atio_queues[i]; 2088 ccbh = TAILQ_FIRST(atio_queue); 2089 while (ccbh != NULL) { 2090 struct ccb_accept_tio *atio; 2091 struct targ_cmd_desc *desc; 2092 2093 atio = (struct ccb_accept_tio *)ccbh; 2094 desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr; 2095 ccbh = TAILQ_NEXT(ccbh, periph_links.tqe); 2096 2097 /* Only abort the CCBs that match */ 2098 if ((atio->init_id != initiator_id 2099 && initiator_id != CAM_TARGET_WILDCARD) 2100 || (tag_id != TARG_TAG_WILDCARD 2101 && ((atio->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0 2102 || atio->tag_id != tag_id))) 2103 continue; 2104 2105 TAILQ_REMOVE(atio_queue, &atio->ccb_h, 2106 periph_links.tqe); 2107 2108 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 2109 ("Aborting ATIO\n")); 2110 if (desc->bp != NULL) { 2111 desc->bp->b_flags |= B_ERROR; 2112 if (softc->state != TARG_STATE_TEARDOWN) 2113 desc->bp->b_error = errno; 2114 else 2115 desc->bp->b_error = ENXIO; 2116 biodone(desc->bp); 2117 desc->bp = NULL; 2118 } 2119 if (softc->state == TARG_STATE_TEARDOWN) { 2120 freedescr(desc); 2121 free(atio, M_DEVBUF); 2122 } else { 2123 /* Return the ATIO back to the controller */ 2124 xpt_action((union ccb *)atio); 2125 } 2126 } 2127 } 2128 2129 ccbh = TAILQ_FIRST(&softc->pending_queue); 2130 while (ccbh != NULL) { 2131 struct ccb_scsiio *csio; 2132 2133 csio = (struct ccb_scsiio *)ccbh; 2134 ccbh = TAILQ_NEXT(ccbh, periph_links.tqe); 2135 2136 /* Only abort the CCBs that match */ 2137 if ((csio->init_id != initiator_id 2138 && initiator_id != CAM_TARGET_WILDCARD) 2139 || (tag_id != TARG_TAG_WILDCARD 2140 && ((csio->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0 2141 || csio->tag_id != tag_id))) 2142 continue; 2143 2144 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 2145 ("Aborting CTIO\n")); 2146 2147 TAILQ_REMOVE(&softc->work_queue, &csio->ccb_h, 2148 periph_links.tqe); 2149 2150 if (to_held_queue != 0) 2151 csio->ccb_h.ccb_flags |= TARG_CCB_ABORT_TO_HELDQ; 2152 xpt_setup_ccb(&cab.ccb_h, csio->ccb_h.path, /*priority*/1); 2153 cab.abort_ccb = (union ccb *)csio; 2154 xpt_action((union ccb *)&cab); 2155 if (cab.ccb_h.status != CAM_REQ_CMP) { 2156 xpt_print_path(cab.ccb_h.path); 2157 printf("Unable to abort CCB. Status %x\n", 2158 cab.ccb_h.status); 2159 } 2160 } 2161 } 2162