1 /* 2 * Implementation of a simple Target Mode SCSI Proccessor Target driver for CAM. 3 * 4 * Copyright (c) 1998, 1999 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 #include <stddef.h> /* For offsetof */ 31 32 #include <sys/param.h> 33 #include <sys/queue.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/types.h> 37 #include <sys/bio.h> 38 #include <sys/conf.h> 39 #include <sys/devicestat.h> 40 #include <sys/malloc.h> 41 #include <sys/poll.h> 42 #include <sys/select.h> /* For struct selinfo. */ 43 #include <sys/uio.h> 44 45 #include <cam/cam.h> 46 #include <cam/cam_ccb.h> 47 #include <cam/cam_extend.h> 48 #include <cam/cam_periph.h> 49 #include <cam/cam_queue.h> 50 #include <cam/cam_xpt_periph.h> 51 #include <cam/cam_debug.h> 52 53 #include <cam/scsi/scsi_all.h> 54 #include <cam/scsi/scsi_pt.h> 55 #include <cam/scsi/scsi_targetio.h> 56 #include <cam/scsi/scsi_message.h> 57 58 typedef enum { 59 TARG_STATE_NORMAL, 60 TARG_STATE_EXCEPTION, 61 TARG_STATE_TEARDOWN 62 } targ_state; 63 64 typedef enum { 65 TARG_FLAG_NONE = 0x00, 66 TARG_FLAG_SEND_EOF = 0x01, 67 TARG_FLAG_RECEIVE_EOF = 0x02, 68 TARG_FLAG_LUN_ENABLED = 0x04 69 } targ_flags; 70 71 typedef enum { 72 TARG_CCB_NONE = 0x00, 73 TARG_CCB_WAITING = 0x01, 74 TARG_CCB_HELDQ = 0x02, 75 TARG_CCB_ABORT_TO_HELDQ = 0x04 76 } targ_ccb_flags; 77 78 #define MAX_ACCEPT 16 79 #define MAX_IMMEDIATE 16 80 #define MAX_BUF_SIZE 256 /* Max inquiry/sense/mode page transfer */ 81 #define MAX_INITIATORS 256 /* includes widest fibre channel for now */ 82 83 #define MIN(a, b) ((a > b) ? b : a) 84 85 #define TARG_CONTROL_UNIT 0xffff00ff 86 #define TARG_IS_CONTROL_DEV(unit) ((unit) == TARG_CONTROL_UNIT) 87 88 #define TARG_TAG_WILDCARD ((u_int)~0) 89 90 /* Offsets into our private CCB area for storing accept information */ 91 #define ccb_flags ppriv_field0 92 #define ccb_descr ppriv_ptr1 93 94 /* We stick a pointer to the originating accept TIO in each continue I/O CCB */ 95 #define ccb_atio ppriv_ptr1 96 97 struct targ_softc { 98 /* CTIOs pending on the controller */ 99 struct ccb_queue pending_queue; 100 101 /* ATIOs awaiting CTIO resources from the XPT */ 102 struct ccb_queue work_queue; 103 104 /* 105 * ATIOs for SEND operations waiting for 'write' 106 * buffer resources from our userland daemon. 107 */ 108 struct ccb_queue snd_ccb_queue; 109 110 /* 111 * ATIOs for RCV operations waiting for 'read' 112 * buffer resources from our userland daemon. 113 */ 114 struct ccb_queue rcv_ccb_queue; 115 116 /* 117 * ATIOs for commands unknown to the kernel driver. 118 * These are queued for the userland daemon to 119 * consume. 120 */ 121 struct ccb_queue unknown_atio_queue; 122 123 /* 124 * Userland buffers for SEND commands waiting for 125 * SEND ATIOs to be queued by an initiator. 126 */ 127 struct bio_queue_head snd_bio_queue; 128 129 /* 130 * Userland buffers for RCV commands waiting for 131 * RCV ATIOs to be queued by an initiator. 132 */ 133 struct bio_queue_head rcv_bio_queue; 134 struct devstat device_stats; 135 dev_t targ_dev; 136 struct selinfo snd_select; 137 struct selinfo rcv_select; 138 targ_state state; 139 targ_flags flags; 140 targ_exception exceptions; 141 u_int init_level; 142 u_int inq_data_len; 143 struct scsi_inquiry_data *inq_data; 144 struct ccb_accept_tio *accept_tio_list; 145 struct ccb_hdr_slist immed_notify_slist; 146 struct initiator_state istate[MAX_INITIATORS]; 147 }; 148 149 struct targ_cmd_desc { 150 struct ccb_accept_tio* atio_link; 151 u_int data_resid; /* How much left to transfer */ 152 u_int data_increment;/* Amount to send before next disconnect */ 153 void* data; /* The data. Can be from backing_store or not */ 154 void* backing_store;/* Backing store allocated for this descriptor*/ 155 struct bio *bp; /* Buffer for this transfer */ 156 u_int max_size; /* Size of backing_store */ 157 u_int32_t timeout; 158 u_int8_t status; /* Status to return to initiator */ 159 }; 160 161 static d_open_t targopen; 162 static d_close_t targclose; 163 static d_read_t targread; 164 static d_write_t targwrite; 165 static d_ioctl_t targioctl; 166 static d_poll_t targpoll; 167 static d_strategy_t targstrategy; 168 169 #define TARG_CDEV_MAJOR 65 170 static struct cdevsw targ_cdevsw = { 171 /* open */ targopen, 172 /* close */ targclose, 173 /* read */ targread, 174 /* write */ targwrite, 175 /* ioctl */ targioctl, 176 /* poll */ targpoll, 177 /* mmap */ nommap, 178 /* strategy */ targstrategy, 179 /* name */ "targ", 180 /* maj */ TARG_CDEV_MAJOR, 181 /* dump */ nodump, 182 /* psize */ nopsize, 183 /* flags */ 0, 184 /* bmaj */ -1 185 }; 186 187 static int targsendccb(struct cam_periph *periph, union ccb *ccb, 188 union ccb *inccb); 189 static periph_init_t targinit; 190 static void targasync(void *callback_arg, u_int32_t code, 191 struct cam_path *path, void *arg); 192 static int targallocinstance(struct ioc_alloc_unit *alloc_unit); 193 static int targfreeinstance(struct ioc_alloc_unit *alloc_unit); 194 static cam_status targenlun(struct cam_periph *periph); 195 static cam_status targdislun(struct cam_periph *periph); 196 static periph_ctor_t targctor; 197 static periph_dtor_t targdtor; 198 static void targrunqueue(struct cam_periph *periph, 199 struct targ_softc *softc); 200 static periph_start_t targstart; 201 static void targdone(struct cam_periph *periph, 202 union ccb *done_ccb); 203 static void targfireexception(struct cam_periph *periph, 204 struct targ_softc *softc); 205 static void targinoterror(struct cam_periph *periph, 206 struct targ_softc *softc, 207 struct ccb_immed_notify *inot); 208 static int targerror(union ccb *ccb, u_int32_t cam_flags, 209 u_int32_t sense_flags); 210 static struct targ_cmd_desc* allocdescr(void); 211 static void freedescr(struct targ_cmd_desc *buf); 212 static void fill_sense(struct targ_softc *softc, 213 u_int initiator_id, u_int error_code, 214 u_int sense_key, u_int asc, u_int ascq); 215 static void copy_sense(struct targ_softc *softc, 216 struct initiator_state *istate, 217 u_int8_t *sense_buffer, size_t sense_len); 218 static void set_unit_attention_cond(struct cam_periph *periph, 219 u_int initiator_id, ua_types ua); 220 static void set_ca_condition(struct cam_periph *periph, 221 u_int initiator_id, ca_types ca); 222 static void abort_pending_transactions(struct cam_periph *periph, 223 u_int initiator_id, u_int tag_id, 224 int errno, int to_held_queue); 225 226 static struct periph_driver targdriver = 227 { 228 targinit, "targ", 229 TAILQ_HEAD_INITIALIZER(targdriver.units), /* generation */ 0 230 }; 231 232 DATA_SET(periphdriver_set, targdriver); 233 234 static struct extend_array *targperiphs; 235 static dev_t targ_ctl_dev; 236 237 static void 238 targinit(void) 239 { 240 /* 241 * Create our extend array for storing the devices we attach to. 242 */ 243 targperiphs = cam_extend_new(); 244 if (targperiphs == NULL) { 245 printf("targ: Failed to alloc extend array!\n"); 246 return; 247 } 248 targ_ctl_dev = make_dev(&targ_cdevsw, TARG_CONTROL_UNIT, UID_ROOT, 249 GID_OPERATOR, 0600, "%s.ctl", "targ"); 250 if (targ_ctl_dev == (dev_t) 0) { 251 printf("targ: failed to create control dev\n"); 252 } 253 } 254 255 static void 256 targasync(void *callback_arg, u_int32_t code, 257 struct cam_path *path, void *arg) 258 { 259 struct cam_periph *periph; 260 struct targ_softc *softc; 261 262 periph = (struct cam_periph *)callback_arg; 263 softc = (struct targ_softc *)periph->softc; 264 switch (code) { 265 case AC_PATH_DEREGISTERED: 266 { 267 /* XXX Implement */ 268 break; 269 } 270 default: 271 break; 272 } 273 } 274 275 /* Attempt to enable our lun */ 276 static cam_status 277 targenlun(struct cam_periph *periph) 278 { 279 union ccb immed_ccb; 280 struct targ_softc *softc; 281 cam_status status; 282 int i; 283 284 softc = (struct targ_softc *)periph->softc; 285 286 if ((softc->flags & TARG_FLAG_LUN_ENABLED) != 0) 287 return (CAM_REQ_CMP); 288 289 xpt_setup_ccb(&immed_ccb.ccb_h, periph->path, /*priority*/1); 290 immed_ccb.ccb_h.func_code = XPT_EN_LUN; 291 292 /* Don't need support for any vendor specific commands */ 293 immed_ccb.cel.grp6_len = 0; 294 immed_ccb.cel.grp7_len = 0; 295 immed_ccb.cel.enable = 1; 296 xpt_action(&immed_ccb); 297 status = immed_ccb.ccb_h.status; 298 if (status != CAM_REQ_CMP) { 299 xpt_print_path(periph->path); 300 printf("targenlun - Enable Lun Rejected with status 0x%x\n", 301 status); 302 return (status); 303 } 304 305 softc->flags |= TARG_FLAG_LUN_ENABLED; 306 307 /* 308 * Build up a buffer of accept target I/O 309 * operations for incoming selections. 310 */ 311 for (i = 0; i < MAX_ACCEPT; i++) { 312 struct ccb_accept_tio *atio; 313 314 atio = (struct ccb_accept_tio*)malloc(sizeof(*atio), M_DEVBUF, 315 M_NOWAIT); 316 if (atio == NULL) { 317 status = CAM_RESRC_UNAVAIL; 318 break; 319 } 320 321 atio->ccb_h.ccb_descr = allocdescr(); 322 323 if (atio->ccb_h.ccb_descr == NULL) { 324 free(atio, M_DEVBUF); 325 status = CAM_RESRC_UNAVAIL; 326 break; 327 } 328 329 xpt_setup_ccb(&atio->ccb_h, periph->path, /*priority*/1); 330 atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO; 331 atio->ccb_h.cbfcnp = targdone; 332 atio->ccb_h.ccb_flags = TARG_CCB_NONE; 333 xpt_action((union ccb *)atio); 334 status = atio->ccb_h.status; 335 if (status != CAM_REQ_INPROG) { 336 xpt_print_path(periph->path); 337 printf("Queue of atio failed\n"); 338 freedescr(atio->ccb_h.ccb_descr); 339 free(atio, M_DEVBUF); 340 break; 341 } 342 ((struct targ_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link = 343 softc->accept_tio_list; 344 softc->accept_tio_list = atio; 345 } 346 347 if (i == 0) { 348 xpt_print_path(periph->path); 349 printf("targenlun - Could not allocate accept tio CCBs: " 350 "status = 0x%x\n", status); 351 targdislun(periph); 352 return (CAM_REQ_CMP_ERR); 353 } 354 355 /* 356 * Build up a buffer of immediate notify CCBs 357 * so the SIM can tell us of asynchronous target mode events. 358 */ 359 for (i = 0; i < MAX_ACCEPT; i++) { 360 struct ccb_immed_notify *inot; 361 362 inot = (struct ccb_immed_notify*)malloc(sizeof(*inot), M_DEVBUF, 363 M_NOWAIT); 364 365 if (inot == NULL) { 366 status = CAM_RESRC_UNAVAIL; 367 break; 368 } 369 370 xpt_setup_ccb(&inot->ccb_h, periph->path, /*priority*/1); 371 inot->ccb_h.func_code = XPT_IMMED_NOTIFY; 372 inot->ccb_h.cbfcnp = targdone; 373 SLIST_INSERT_HEAD(&softc->immed_notify_slist, &inot->ccb_h, 374 periph_links.sle); 375 xpt_action((union ccb *)inot); 376 } 377 378 if (i == 0) { 379 xpt_print_path(periph->path); 380 printf("targenlun - Could not allocate immediate notify CCBs: " 381 "status = 0x%x\n", status); 382 targdislun(periph); 383 return (CAM_REQ_CMP_ERR); 384 } 385 386 return (CAM_REQ_CMP); 387 } 388 389 static cam_status 390 targdislun(struct cam_periph *periph) 391 { 392 union ccb ccb; 393 struct targ_softc *softc; 394 struct ccb_accept_tio* atio; 395 struct ccb_hdr *ccb_h; 396 397 softc = (struct targ_softc *)periph->softc; 398 if ((softc->flags & TARG_FLAG_LUN_ENABLED) == 0) 399 return CAM_REQ_CMP; 400 401 /* XXX Block for Continue I/O completion */ 402 403 /* Kill off all ACCECPT and IMMEDIATE CCBs */ 404 while ((atio = softc->accept_tio_list) != NULL) { 405 406 softc->accept_tio_list = 407 ((struct targ_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link; 408 xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1); 409 ccb.cab.ccb_h.func_code = XPT_ABORT; 410 ccb.cab.abort_ccb = (union ccb *)atio; 411 xpt_action(&ccb); 412 } 413 414 while ((ccb_h = SLIST_FIRST(&softc->immed_notify_slist)) != NULL) { 415 SLIST_REMOVE_HEAD(&softc->immed_notify_slist, periph_links.sle); 416 xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1); 417 ccb.cab.ccb_h.func_code = XPT_ABORT; 418 ccb.cab.abort_ccb = (union ccb *)ccb_h; 419 xpt_action(&ccb); 420 } 421 422 /* 423 * Dissable this lun. 424 */ 425 xpt_setup_ccb(&ccb.cel.ccb_h, periph->path, /*priority*/1); 426 ccb.cel.ccb_h.func_code = XPT_EN_LUN; 427 ccb.cel.enable = 0; 428 xpt_action(&ccb); 429 430 if (ccb.cel.ccb_h.status != CAM_REQ_CMP) 431 printf("targdislun - Disabling lun on controller failed " 432 "with status 0x%x\n", ccb.cel.ccb_h.status); 433 else 434 softc->flags &= ~TARG_FLAG_LUN_ENABLED; 435 return (ccb.cel.ccb_h.status); 436 } 437 438 static cam_status 439 targctor(struct cam_periph *periph, void *arg) 440 { 441 struct ccb_pathinq *cpi; 442 struct targ_softc *softc; 443 int i; 444 445 cpi = (struct ccb_pathinq *)arg; 446 447 /* Allocate our per-instance private storage */ 448 softc = (struct targ_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT); 449 if (softc == NULL) { 450 printf("targctor: unable to malloc softc\n"); 451 return (CAM_REQ_CMP_ERR); 452 } 453 454 bzero(softc, sizeof(*softc)); 455 TAILQ_INIT(&softc->pending_queue); 456 TAILQ_INIT(&softc->work_queue); 457 TAILQ_INIT(&softc->snd_ccb_queue); 458 TAILQ_INIT(&softc->rcv_ccb_queue); 459 TAILQ_INIT(&softc->unknown_atio_queue); 460 bioq_init(&softc->snd_bio_queue); 461 bioq_init(&softc->rcv_bio_queue); 462 softc->accept_tio_list = NULL; 463 SLIST_INIT(&softc->immed_notify_slist); 464 softc->state = TARG_STATE_NORMAL; 465 periph->softc = softc; 466 softc->init_level++; 467 468 cam_extend_set(targperiphs, periph->unit_number, periph); 469 470 /* 471 * We start out life with a UA to indicate power-on/reset. 472 */ 473 for (i = 0; i < MAX_INITIATORS; i++) 474 softc->istate[i].pending_ua = UA_POWER_ON; 475 476 /* 477 * Allocate an initial inquiry data buffer. We might allow the 478 * user to override this later via an ioctl. 479 */ 480 softc->inq_data_len = sizeof(*softc->inq_data); 481 softc->inq_data = malloc(softc->inq_data_len, M_DEVBUF, M_NOWAIT); 482 if (softc->inq_data == NULL) { 483 printf("targctor - Unable to malloc inquiry data\n"); 484 targdtor(periph); 485 return (CAM_RESRC_UNAVAIL); 486 } 487 bzero(softc->inq_data, softc->inq_data_len); 488 softc->inq_data->device = T_PROCESSOR | (SID_QUAL_LU_CONNECTED << 5); 489 softc->inq_data->version = 2; 490 softc->inq_data->response_format = 2; /* SCSI2 Inquiry Format */ 491 softc->inq_data->flags = 492 cpi->hba_inquiry & (PI_SDTR_ABLE|PI_WIDE_16|PI_WIDE_32|PI_TAG_ABLE); 493 softc->inq_data->additional_length = softc->inq_data_len - 4; 494 strncpy(softc->inq_data->vendor, "FreeBSD ", SID_VENDOR_SIZE); 495 strncpy(softc->inq_data->product, "TM-PT ", SID_PRODUCT_SIZE); 496 strncpy(softc->inq_data->revision, "0.0 ", SID_REVISION_SIZE); 497 softc->targ_dev = make_dev(&targ_cdevsw, periph->unit_number, UID_ROOT, 498 GID_OPERATOR, 0600, "%s%d", 499 periph->periph_name, periph->unit_number); 500 softc->init_level++; 501 return (CAM_REQ_CMP); 502 } 503 504 static void 505 targdtor(struct cam_periph *periph) 506 { 507 struct targ_softc *softc; 508 509 softc = (struct targ_softc *)periph->softc; 510 511 softc->state = TARG_STATE_TEARDOWN; 512 513 targdislun(periph); 514 515 cam_extend_release(targperiphs, periph->unit_number); 516 517 switch (softc->init_level) { 518 default: 519 /* FALLTHROUGH */ 520 case 2: 521 free(softc->inq_data, M_DEVBUF); 522 destroy_dev(softc->targ_dev); 523 /* FALLTHROUGH */ 524 case 1: 525 free(softc, M_DEVBUF); 526 break; 527 case 0: 528 panic("targdtor - impossible init level");; 529 } 530 } 531 532 static int 533 targopen(dev_t dev, int flags, int fmt, struct proc *p) 534 { 535 struct cam_periph *periph; 536 struct targ_softc *softc; 537 u_int unit; 538 cam_status status; 539 int error; 540 int s; 541 542 unit = minor(dev); 543 544 /* An open of the control device always succeeds */ 545 if (TARG_IS_CONTROL_DEV(unit)) 546 return 0; 547 548 s = splsoftcam(); 549 periph = cam_extend_get(targperiphs, unit); 550 if (periph == NULL) { 551 splx(s); 552 return (ENXIO); 553 } 554 if ((error = cam_periph_lock(periph, PRIBIO | PCATCH)) != 0) { 555 splx(s); 556 return (error); 557 } 558 559 softc = (struct targ_softc *)periph->softc; 560 if ((softc->flags & TARG_FLAG_LUN_ENABLED) == 0) { 561 if (cam_periph_acquire(periph) != CAM_REQ_CMP) { 562 splx(s); 563 cam_periph_unlock(periph); 564 return(ENXIO); 565 } 566 } 567 splx(s); 568 569 status = targenlun(periph); 570 switch (status) { 571 case CAM_REQ_CMP: 572 error = 0; 573 break; 574 case CAM_RESRC_UNAVAIL: 575 error = ENOMEM; 576 break; 577 case CAM_LUN_ALRDY_ENA: 578 error = EADDRINUSE; 579 break; 580 default: 581 error = ENXIO; 582 break; 583 } 584 cam_periph_unlock(periph); 585 if (error) { 586 cam_periph_release(periph); 587 } 588 return (error); 589 } 590 591 static int 592 targclose(dev_t dev, int flag, int fmt, struct proc *p) 593 { 594 struct cam_periph *periph; 595 struct targ_softc *softc; 596 u_int unit; 597 int s; 598 int error; 599 600 unit = minor(dev); 601 602 /* A close of the control device always succeeds */ 603 if (TARG_IS_CONTROL_DEV(unit)) 604 return 0; 605 606 s = splsoftcam(); 607 periph = cam_extend_get(targperiphs, unit); 608 if (periph == NULL) { 609 splx(s); 610 return (ENXIO); 611 } 612 if ((error = cam_periph_lock(periph, PRIBIO)) != 0) 613 return (error); 614 softc = (struct targ_softc *)periph->softc; 615 splx(s); 616 617 targdislun(periph); 618 619 cam_periph_unlock(periph); 620 cam_periph_release(periph); 621 622 return (0); 623 } 624 625 static int 626 targallocinstance(struct ioc_alloc_unit *alloc_unit) 627 { 628 struct ccb_pathinq cpi; 629 struct cam_path *path; 630 struct cam_periph *periph; 631 cam_status status; 632 int free_path_on_return; 633 int error; 634 635 free_path_on_return = 0; 636 status = xpt_create_path(&path, /*periph*/NULL, 637 alloc_unit->path_id, 638 alloc_unit->target_id, 639 alloc_unit->lun_id); 640 if (status != CAM_REQ_CMP) { 641 printf("Couldn't Allocate Path %x\n", status); 642 goto fail; 643 } 644 645 free_path_on_return++; 646 647 648 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1); 649 cpi.ccb_h.func_code = XPT_PATH_INQ; 650 xpt_action((union ccb *)&cpi); 651 status = cpi.ccb_h.status; 652 653 if (status != CAM_REQ_CMP) { 654 printf("Couldn't CPI %x\n", status); 655 goto fail; 656 } 657 658 /* Can only alloc units on controllers that support target mode */ 659 if ((cpi.target_sprt & PIT_PROCESSOR) == 0) { 660 printf("Controller does not support target mode%x\n", status); 661 status = CAM_PATH_INVALID; 662 goto fail; 663 } 664 665 /* Ensure that we don't already have an instance for this unit. */ 666 if ((periph = cam_periph_find(path, "targ")) != NULL) { 667 status = CAM_LUN_ALRDY_ENA; 668 goto fail; 669 } 670 671 /* 672 * Allocate a peripheral instance for 673 * this target instance. 674 */ 675 status = cam_periph_alloc(targctor, NULL, targdtor, targstart, 676 "targ", CAM_PERIPH_BIO, path, targasync, 677 0, &cpi); 678 679 fail: 680 switch (status) { 681 case CAM_REQ_CMP: 682 { 683 struct cam_periph *periph; 684 685 if ((periph = cam_periph_find(path, "targ")) == NULL) 686 panic("targallocinstance: Succeeded but no periph?"); 687 error = 0; 688 alloc_unit->unit = periph->unit_number; 689 break; 690 } 691 case CAM_RESRC_UNAVAIL: 692 error = ENOMEM; 693 break; 694 case CAM_LUN_ALRDY_ENA: 695 error = EADDRINUSE; 696 break; 697 default: 698 printf("targallocinstance: Unexpected CAM status %x\n", status); 699 /* FALLTHROUGH */ 700 case CAM_PATH_INVALID: 701 error = ENXIO; 702 break; 703 case CAM_PROVIDE_FAIL: 704 error = ENODEV; 705 break; 706 } 707 708 if (free_path_on_return != 0) 709 xpt_free_path(path); 710 711 return (error); 712 } 713 714 static int 715 targfreeinstance(struct ioc_alloc_unit *alloc_unit) 716 { 717 struct cam_path *path; 718 struct cam_periph *periph; 719 struct targ_softc *softc; 720 cam_status status; 721 int free_path_on_return; 722 int error; 723 724 periph = NULL; 725 free_path_on_return = 0; 726 status = xpt_create_path(&path, /*periph*/NULL, 727 alloc_unit->path_id, 728 alloc_unit->target_id, 729 alloc_unit->lun_id); 730 free_path_on_return++; 731 732 if (status != CAM_REQ_CMP) 733 goto fail; 734 735 /* Find our instance. */ 736 if ((periph = cam_periph_find(path, "targ")) == NULL) { 737 xpt_print_path(path); 738 printf("Invalid path specified for freeing target instance\n"); 739 status = CAM_PATH_INVALID; 740 goto fail; 741 } 742 743 softc = (struct targ_softc *)periph->softc; 744 745 if ((softc->flags & TARG_FLAG_LUN_ENABLED) != 0) { 746 status = CAM_BUSY; 747 goto fail; 748 } 749 750 fail: 751 if (free_path_on_return != 0) 752 xpt_free_path(path); 753 754 switch (status) { 755 case CAM_REQ_CMP: 756 if (periph != NULL) 757 cam_periph_invalidate(periph); 758 error = 0; 759 break; 760 case CAM_RESRC_UNAVAIL: 761 error = ENOMEM; 762 break; 763 case CAM_LUN_ALRDY_ENA: 764 error = EADDRINUSE; 765 break; 766 default: 767 printf("targfreeinstance: Unexpected CAM status %x\n", status); 768 /* FALLTHROUGH */ 769 case CAM_PATH_INVALID: 770 error = ENODEV; 771 break; 772 } 773 return (error); 774 } 775 776 static int 777 targioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p) 778 { 779 struct cam_periph *periph; 780 struct targ_softc *softc; 781 u_int unit; 782 int error; 783 784 unit = minor(dev); 785 error = 0; 786 if (TARG_IS_CONTROL_DEV(unit)) { 787 switch (cmd) { 788 case TARGCTLIOALLOCUNIT: 789 error = targallocinstance((struct ioc_alloc_unit*)addr); 790 break; 791 case TARGCTLIOFREEUNIT: 792 error = targfreeinstance((struct ioc_alloc_unit*)addr); 793 break; 794 default: 795 error = EINVAL; 796 break; 797 } 798 return (error); 799 } 800 801 periph = cam_extend_get(targperiphs, unit); 802 if (periph == NULL) 803 return (ENXIO); 804 softc = (struct targ_softc *)periph->softc; 805 switch (cmd) { 806 case TARGIOCFETCHEXCEPTION: 807 *((targ_exception *)addr) = softc->exceptions; 808 break; 809 case TARGIOCCLEAREXCEPTION: 810 { 811 targ_exception clear_mask; 812 813 clear_mask = *((targ_exception *)addr); 814 if ((clear_mask & TARG_EXCEPT_UNKNOWN_ATIO) != 0) { 815 struct ccb_hdr *ccbh; 816 817 ccbh = TAILQ_FIRST(&softc->unknown_atio_queue); 818 if (ccbh != NULL) { 819 TAILQ_REMOVE(&softc->unknown_atio_queue, 820 ccbh, periph_links.tqe); 821 /* Requeue the ATIO back to the controller */ 822 ccbh->ccb_flags = TARG_CCB_NONE; 823 xpt_action((union ccb *)ccbh); 824 ccbh = TAILQ_FIRST(&softc->unknown_atio_queue); 825 } 826 if (ccbh != NULL) 827 clear_mask &= ~TARG_EXCEPT_UNKNOWN_ATIO; 828 } 829 softc->exceptions &= ~clear_mask; 830 if (softc->exceptions == TARG_EXCEPT_NONE 831 && softc->state == TARG_STATE_EXCEPTION) { 832 softc->state = TARG_STATE_NORMAL; 833 targrunqueue(periph, softc); 834 } 835 break; 836 } 837 case TARGIOCFETCHATIO: 838 { 839 struct ccb_hdr *ccbh; 840 841 ccbh = TAILQ_FIRST(&softc->unknown_atio_queue); 842 if (ccbh != NULL) { 843 bcopy(ccbh, addr, sizeof(struct ccb_accept_tio)); 844 } else { 845 error = ENOENT; 846 } 847 break; 848 } 849 case TARGIOCCOMMAND: 850 { 851 union ccb *inccb; 852 union ccb *ccb; 853 854 /* 855 * XXX JGibbs 856 * This code is lifted directly from the pass-thru driver. 857 * Perhaps this should be moved to a library???? 858 */ 859 inccb = (union ccb *)addr; 860 ccb = cam_periph_getccb(periph, inccb->ccb_h.pinfo.priority); 861 862 error = targsendccb(periph, ccb, inccb); 863 864 xpt_release_ccb(ccb); 865 866 break; 867 } 868 case TARGIOCGETISTATE: 869 case TARGIOCSETISTATE: 870 { 871 struct ioc_initiator_state *ioc_istate; 872 873 ioc_istate = (struct ioc_initiator_state *)addr; 874 if (ioc_istate->initiator_id > MAX_INITIATORS) { 875 error = EINVAL; 876 break; 877 } 878 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 879 ("GET/SETISTATE for %d\n", ioc_istate->initiator_id)); 880 if (cmd == TARGIOCGETISTATE) { 881 bcopy(&softc->istate[ioc_istate->initiator_id], 882 &ioc_istate->istate, sizeof(ioc_istate->istate)); 883 } else { 884 bcopy(&ioc_istate->istate, 885 &softc->istate[ioc_istate->initiator_id], 886 sizeof(ioc_istate->istate)); 887 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 888 ("pending_ca now %x\n", 889 softc->istate[ioc_istate->initiator_id].pending_ca)); 890 } 891 break; 892 } 893 #ifdef CAMDEBUG 894 case TARGIODEBUG: 895 { 896 union ccb ccb; 897 bzero (&ccb, sizeof ccb); 898 if (xpt_create_path(&ccb.ccb_h.path, periph, 899 xpt_path_path_id(periph->path), 900 xpt_path_target_id(periph->path), 901 xpt_path_lun_id(periph->path)) != CAM_REQ_CMP) { 902 error = EINVAL; 903 break; 904 } 905 if (*((int *)addr)) { 906 ccb.cdbg.flags = CAM_DEBUG_PERIPH; 907 } else { 908 ccb.cdbg.flags = CAM_DEBUG_NONE; 909 } 910 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path, 0); 911 ccb.ccb_h.func_code = XPT_DEBUG; 912 ccb.ccb_h.path_id = xpt_path_path_id(ccb.ccb_h.path); 913 ccb.ccb_h.target_id = xpt_path_target_id(ccb.ccb_h.path); 914 ccb.ccb_h.target_lun = xpt_path_lun_id(ccb.ccb_h.path); 915 ccb.ccb_h.cbfcnp = targdone; 916 xpt_action(&ccb); 917 if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 918 error = EIO; 919 } else { 920 error = 0; 921 } 922 xpt_free_path(ccb.ccb_h.path); 923 break; 924 } 925 #endif 926 default: 927 error = ENOTTY; 928 break; 929 } 930 return (error); 931 } 932 933 /* 934 * XXX JGibbs lifted from pass-thru driver. 935 * Generally, "ccb" should be the CCB supplied by the kernel. "inccb" 936 * should be the CCB that is copied in from the user. 937 */ 938 static int 939 targsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb) 940 { 941 struct targ_softc *softc; 942 struct cam_periph_map_info mapinfo; 943 int error, need_unmap; 944 int s; 945 946 softc = (struct targ_softc *)periph->softc; 947 948 need_unmap = 0; 949 950 /* 951 * There are some fields in the CCB header that need to be 952 * preserved, the rest we get from the user. 953 */ 954 xpt_merge_ccb(ccb, inccb); 955 956 /* 957 * There's no way for the user to have a completion 958 * function, so we put our own completion function in here. 959 */ 960 ccb->ccb_h.cbfcnp = targdone; 961 962 /* 963 * We only attempt to map the user memory into kernel space 964 * if they haven't passed in a physical memory pointer, 965 * and if there is actually an I/O operation to perform. 966 * Right now cam_periph_mapmem() only supports SCSI and device 967 * match CCBs. For the SCSI CCBs, we only pass the CCB in if 968 * there's actually data to map. cam_periph_mapmem() will do the 969 * right thing, even if there isn't data to map, but since CCBs 970 * without data are a reasonably common occurance (e.g. test unit 971 * ready), it will save a few cycles if we check for it here. 972 */ 973 if (((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) 974 && (((ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) 975 && ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE)) 976 || (ccb->ccb_h.func_code == XPT_DEV_MATCH))) { 977 978 bzero(&mapinfo, sizeof(mapinfo)); 979 980 error = cam_periph_mapmem(ccb, &mapinfo); 981 982 /* 983 * cam_periph_mapmem returned an error, we can't continue. 984 * Return the error to the user. 985 */ 986 if (error) 987 return(error); 988 989 /* 990 * We successfully mapped the memory in, so we need to 991 * unmap it when the transaction is done. 992 */ 993 need_unmap = 1; 994 } 995 996 /* 997 * Once queued on the pending CCB list, this CCB will be protected 998 * by the error recovery handling used for 'buffer I/O' ccbs. Since 999 * we are in a process context here, however, the software interrupt 1000 * for this driver may deliver an event invalidating this CCB just 1001 * before we queue it. Close this race condition by blocking 1002 * software interrupt delivery, checking for any pertinent queued 1003 * events, and only then queuing this CCB. 1004 */ 1005 s = splsoftcam(); 1006 if (softc->exceptions == 0) { 1007 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) 1008 TAILQ_INSERT_TAIL(&softc->pending_queue, &ccb->ccb_h, 1009 periph_links.tqe); 1010 1011 /* 1012 * If the user wants us to perform any error recovery, 1013 * then honor that request. Otherwise, it's up to the 1014 * user to perform any error recovery. 1015 */ 1016 error = cam_periph_runccb(ccb, 1017 /* error handler */NULL, 1018 /* cam_flags */ 0, 1019 /* sense_flags */SF_RETRY_UA, 1020 &softc->device_stats); 1021 1022 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) 1023 TAILQ_REMOVE(&softc->pending_queue, &ccb->ccb_h, 1024 periph_links.tqe); 1025 } else { 1026 ccb->ccb_h.status = CAM_UNACKED_EVENT; 1027 error = 0; 1028 } 1029 splx(s); 1030 1031 if (need_unmap != 0) 1032 cam_periph_unmapmem(ccb, &mapinfo); 1033 1034 ccb->ccb_h.cbfcnp = NULL; 1035 ccb->ccb_h.periph_priv = inccb->ccb_h.periph_priv; 1036 bcopy(ccb, inccb, sizeof(union ccb)); 1037 1038 return(error); 1039 } 1040 1041 1042 static int 1043 targpoll(dev_t dev, int poll_events, struct proc *p) 1044 { 1045 struct cam_periph *periph; 1046 struct targ_softc *softc; 1047 u_int unit; 1048 int revents; 1049 int s; 1050 1051 unit = minor(dev); 1052 1053 /* ioctl is the only supported operation of the control device */ 1054 if (TARG_IS_CONTROL_DEV(unit)) 1055 return EINVAL; 1056 1057 periph = cam_extend_get(targperiphs, unit); 1058 if (periph == NULL) 1059 return (ENXIO); 1060 softc = (struct targ_softc *)periph->softc; 1061 1062 revents = 0; 1063 s = splcam(); 1064 if ((poll_events & (POLLOUT | POLLWRNORM)) != 0) { 1065 if (TAILQ_FIRST(&softc->rcv_ccb_queue) != NULL 1066 && bioq_first(&softc->rcv_bio_queue) == NULL) 1067 revents |= poll_events & (POLLOUT | POLLWRNORM); 1068 } 1069 if ((poll_events & (POLLIN | POLLRDNORM)) != 0) { 1070 if (TAILQ_FIRST(&softc->snd_ccb_queue) != NULL 1071 && bioq_first(&softc->snd_bio_queue) == NULL) 1072 revents |= poll_events & (POLLIN | POLLRDNORM); 1073 } 1074 1075 if (softc->state != TARG_STATE_NORMAL) 1076 revents |= POLLERR; 1077 1078 if (revents == 0) { 1079 if (poll_events & (POLLOUT | POLLWRNORM)) 1080 selrecord(p, &softc->rcv_select); 1081 if (poll_events & (POLLIN | POLLRDNORM)) 1082 selrecord(p, &softc->snd_select); 1083 } 1084 splx(s); 1085 return (revents); 1086 } 1087 1088 static int 1089 targread(dev_t dev, struct uio *uio, int ioflag) 1090 { 1091 u_int unit; 1092 1093 unit = minor(dev); 1094 /* ioctl is the only supported operation of the control device */ 1095 if (TARG_IS_CONTROL_DEV(unit)) 1096 return EINVAL; 1097 1098 if (uio->uio_iovcnt == 0 1099 || uio->uio_iov->iov_len == 0) { 1100 /* EOF */ 1101 struct cam_periph *periph; 1102 struct targ_softc *softc; 1103 int s; 1104 1105 s = splcam(); 1106 periph = cam_extend_get(targperiphs, unit); 1107 if (periph == NULL) 1108 return (ENXIO); 1109 softc = (struct targ_softc *)periph->softc; 1110 softc->flags |= TARG_FLAG_SEND_EOF; 1111 splx(s); 1112 targrunqueue(periph, softc); 1113 return (0); 1114 } 1115 return(physread(dev, uio, ioflag)); 1116 } 1117 1118 static int 1119 targwrite(dev_t dev, struct uio *uio, int ioflag) 1120 { 1121 u_int unit; 1122 1123 unit = minor(dev); 1124 /* ioctl is the only supported operation of the control device */ 1125 if (TARG_IS_CONTROL_DEV(unit)) 1126 return EINVAL; 1127 1128 if (uio->uio_iovcnt == 0 1129 || uio->uio_iov->iov_len == 0) { 1130 /* EOF */ 1131 struct cam_periph *periph; 1132 struct targ_softc *softc; 1133 int s; 1134 1135 s = splcam(); 1136 periph = cam_extend_get(targperiphs, unit); 1137 if (periph == NULL) 1138 return (ENXIO); 1139 softc = (struct targ_softc *)periph->softc; 1140 softc->flags |= TARG_FLAG_RECEIVE_EOF; 1141 splx(s); 1142 targrunqueue(periph, softc); 1143 return (0); 1144 } 1145 return(physwrite(dev, uio, ioflag)); 1146 } 1147 1148 /* 1149 * Actually translate the requested transfer into one the physical driver 1150 * can understand. The transfer is described by a buf and will include 1151 * only one physical transfer. 1152 */ 1153 static void 1154 targstrategy(struct bio *bp) 1155 { 1156 struct cam_periph *periph; 1157 struct targ_softc *softc; 1158 u_int unit; 1159 int s; 1160 1161 unit = minor(bp->bio_dev); 1162 1163 /* ioctl is the only supported operation of the control device */ 1164 if (TARG_IS_CONTROL_DEV(unit)) { 1165 bp->bio_error = EINVAL; 1166 goto bad; 1167 } 1168 1169 periph = cam_extend_get(targperiphs, unit); 1170 if (periph == NULL) { 1171 bp->bio_error = ENXIO; 1172 goto bad; 1173 } 1174 softc = (struct targ_softc *)periph->softc; 1175 1176 /* 1177 * Mask interrupts so that the device cannot be invalidated until 1178 * after we are in the queue. Otherwise, we might not properly 1179 * clean up one of the buffers. 1180 */ 1181 s = splbio(); 1182 1183 /* 1184 * If there is an exception pending, error out 1185 */ 1186 if (softc->state != TARG_STATE_NORMAL) { 1187 splx(s); 1188 if (softc->state == TARG_STATE_EXCEPTION 1189 && (softc->exceptions & TARG_EXCEPT_DEVICE_INVALID) == 0) 1190 bp->bio_error = EBUSY; 1191 else 1192 bp->bio_error = ENXIO; 1193 goto bad; 1194 } 1195 1196 /* 1197 * Place it in the queue of buffers available for either 1198 * SEND or RECEIVE commands. 1199 * 1200 */ 1201 bp->bio_resid = bp->bio_bcount; 1202 if (bp->bio_cmd == BIO_READ) { 1203 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 1204 ("Queued a SEND buffer\n")); 1205 bioq_insert_tail(&softc->snd_bio_queue, bp); 1206 } else { 1207 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 1208 ("Queued a RECEIVE buffer\n")); 1209 bioq_insert_tail(&softc->rcv_bio_queue, bp); 1210 } 1211 1212 splx(s); 1213 1214 /* 1215 * Attempt to use the new buffer to service any pending 1216 * target commands. 1217 */ 1218 targrunqueue(periph, softc); 1219 1220 return; 1221 bad: 1222 bp->bio_flags |= BIO_ERROR; 1223 1224 /* 1225 * Correctly set the buf to indicate a completed xfer 1226 */ 1227 bp->bio_resid = bp->bio_bcount; 1228 biodone(bp); 1229 } 1230 1231 static void 1232 targrunqueue(struct cam_periph *periph, struct targ_softc *softc) 1233 { 1234 struct ccb_queue *pending_queue; 1235 struct ccb_accept_tio *atio; 1236 struct bio_queue_head *bioq; 1237 struct bio *bp; 1238 struct targ_cmd_desc *desc; 1239 struct ccb_hdr *ccbh; 1240 int s; 1241 1242 s = splbio(); 1243 pending_queue = NULL; 1244 bioq = NULL; 1245 ccbh = NULL; 1246 /* Only run one request at a time to maintain data ordering. */ 1247 if (softc->state != TARG_STATE_NORMAL 1248 || TAILQ_FIRST(&softc->work_queue) != NULL 1249 || TAILQ_FIRST(&softc->pending_queue) != NULL) { 1250 splx(s); 1251 return; 1252 } 1253 1254 if (((bp = bioq_first(&softc->snd_bio_queue)) != NULL 1255 || (softc->flags & TARG_FLAG_SEND_EOF) != 0) 1256 && (ccbh = TAILQ_FIRST(&softc->snd_ccb_queue)) != NULL) { 1257 1258 if (bp == NULL) 1259 softc->flags &= ~TARG_FLAG_SEND_EOF; 1260 else { 1261 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 1262 ("De-Queued a SEND buffer %ld\n", 1263 bp->bio_bcount)); 1264 } 1265 bioq = &softc->snd_bio_queue; 1266 pending_queue = &softc->snd_ccb_queue; 1267 } else if (((bp = bioq_first(&softc->rcv_bio_queue)) != NULL 1268 || (softc->flags & TARG_FLAG_RECEIVE_EOF) != 0) 1269 && (ccbh = TAILQ_FIRST(&softc->rcv_ccb_queue)) != NULL) { 1270 1271 if (bp == NULL) 1272 softc->flags &= ~TARG_FLAG_RECEIVE_EOF; 1273 else { 1274 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 1275 ("De-Queued a RECEIVE buffer %ld\n", 1276 bp->bio_bcount)); 1277 } 1278 bioq = &softc->rcv_bio_queue; 1279 pending_queue = &softc->rcv_ccb_queue; 1280 } 1281 1282 if (pending_queue != NULL) { 1283 /* Process a request */ 1284 atio = (struct ccb_accept_tio *)ccbh; 1285 TAILQ_REMOVE(pending_queue, ccbh, periph_links.tqe); 1286 desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr; 1287 desc->bp = bp; 1288 if (bp == NULL) { 1289 /* EOF */ 1290 desc->data = NULL; 1291 desc->data_increment = 0; 1292 desc->data_resid = 0; 1293 atio->ccb_h.flags &= ~CAM_DIR_MASK; 1294 atio->ccb_h.flags |= CAM_DIR_NONE; 1295 } else { 1296 bioq_remove(bioq, bp); 1297 desc->data = &bp->bio_data[bp->bio_bcount - bp->bio_resid]; 1298 desc->data_increment = 1299 MIN(desc->data_resid, bp->bio_resid); 1300 } 1301 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 1302 ("Buffer command: data %p: datacnt %d\n", 1303 desc->data, desc->data_increment)); 1304 TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h, 1305 periph_links.tqe); 1306 } 1307 atio = (struct ccb_accept_tio *)TAILQ_FIRST(&softc->work_queue); 1308 if (atio != NULL) { 1309 int priority; 1310 1311 priority = (atio->ccb_h.flags & CAM_DIS_DISCONNECT) ? 0 : 1; 1312 splx(s); 1313 xpt_schedule(periph, priority); 1314 } else 1315 splx(s); 1316 } 1317 1318 static void 1319 targstart(struct cam_periph *periph, union ccb *start_ccb) 1320 { 1321 struct targ_softc *softc; 1322 struct ccb_hdr *ccbh; 1323 struct ccb_accept_tio *atio; 1324 struct targ_cmd_desc *desc; 1325 struct ccb_scsiio *csio; 1326 targ_ccb_flags flags; 1327 int s; 1328 1329 softc = (struct targ_softc *)periph->softc; 1330 1331 s = splbio(); 1332 ccbh = TAILQ_FIRST(&softc->work_queue); 1333 if (periph->immediate_priority <= periph->pinfo.priority) { 1334 start_ccb->ccb_h.ccb_flags = TARG_CCB_WAITING; 1335 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, 1336 periph_links.sle); 1337 periph->immediate_priority = CAM_PRIORITY_NONE; 1338 splx(s); 1339 wakeup(&periph->ccb_list); 1340 } else if (ccbh == NULL) { 1341 splx(s); 1342 xpt_release_ccb(start_ccb); 1343 } else { 1344 TAILQ_REMOVE(&softc->work_queue, ccbh, periph_links.tqe); 1345 splx(s); 1346 atio = (struct ccb_accept_tio*)ccbh; 1347 desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr; 1348 1349 /* Is this a tagged request? */ 1350 flags = atio->ccb_h.flags & 1351 (CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK); 1352 1353 /* 1354 * If we are done with the transaction, tell the 1355 * controller to send status and perform a CMD_CMPLT. 1356 */ 1357 if (desc->data_resid == desc->data_increment) 1358 flags |= CAM_SEND_STATUS; 1359 1360 csio = &start_ccb->csio; 1361 cam_fill_ctio(csio, 1362 /*retries*/2, 1363 targdone, 1364 flags, 1365 (flags & CAM_TAG_ACTION_VALID)? 1366 MSG_SIMPLE_Q_TAG : 0, 1367 atio->tag_id, 1368 atio->init_id, 1369 desc->status, 1370 /*data_ptr*/desc->data_increment == 0 1371 ? NULL : desc->data, 1372 /*dxfer_len*/desc->data_increment, 1373 /*timeout*/desc->timeout); 1374 1375 if ((flags & CAM_SEND_STATUS) != 0 1376 && (desc->status == SCSI_STATUS_CHECK_COND 1377 || desc->status == SCSI_STATUS_CMD_TERMINATED)) { 1378 struct initiator_state *istate; 1379 1380 istate = &softc->istate[atio->init_id]; 1381 csio->sense_len = istate->sense_data.extra_len 1382 + offsetof(struct scsi_sense_data, 1383 extra_len); 1384 bcopy(&istate->sense_data, &csio->sense_data, 1385 csio->sense_len); 1386 csio->ccb_h.flags |= CAM_SEND_SENSE; 1387 } else { 1388 csio->sense_len = 0; 1389 } 1390 1391 start_ccb->ccb_h.ccb_flags = TARG_CCB_NONE; 1392 start_ccb->ccb_h.ccb_atio = atio; 1393 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 1394 ("Sending a CTIO (flags 0x%x)\n", csio->ccb_h.flags)); 1395 TAILQ_INSERT_TAIL(&softc->pending_queue, &csio->ccb_h, 1396 periph_links.tqe); 1397 xpt_action(start_ccb); 1398 /* 1399 * If the queue was frozen waiting for the response 1400 * to this ATIO (for instance disconnection was disallowed), 1401 * then release it now that our response has been queued. 1402 */ 1403 if ((atio->ccb_h.flags & CAM_DEV_QFRZN) != 0) { 1404 cam_release_devq(periph->path, 1405 /*relsim_flags*/0, 1406 /*reduction*/0, 1407 /*timeout*/0, 1408 /*getcount_only*/0); 1409 atio->ccb_h.flags &= ~CAM_DEV_QFRZN; 1410 } 1411 s = splbio(); 1412 ccbh = TAILQ_FIRST(&softc->work_queue); 1413 splx(s); 1414 } 1415 if (ccbh != NULL) 1416 targrunqueue(periph, softc); 1417 } 1418 1419 static void 1420 targdone(struct cam_periph *periph, union ccb *done_ccb) 1421 { 1422 struct targ_softc *softc; 1423 1424 softc = (struct targ_softc *)periph->softc; 1425 1426 if (done_ccb->ccb_h.ccb_flags == TARG_CCB_WAITING) { 1427 /* Caller will release the CCB */ 1428 wakeup(&done_ccb->ccb_h.cbfcnp); 1429 return; 1430 } 1431 1432 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 1433 ("targdone %x\n", done_ccb->ccb_h.func_code)); 1434 1435 switch (done_ccb->ccb_h.func_code) { 1436 case XPT_ACCEPT_TARGET_IO: 1437 { 1438 struct ccb_accept_tio *atio; 1439 struct targ_cmd_desc *descr; 1440 struct initiator_state *istate; 1441 u_int8_t *cdb; 1442 int priority; 1443 1444 atio = &done_ccb->atio; 1445 descr = (struct targ_cmd_desc*)atio->ccb_h.ccb_descr; 1446 istate = &softc->istate[atio->init_id]; 1447 cdb = atio->cdb_io.cdb_bytes; 1448 if (softc->state == TARG_STATE_TEARDOWN 1449 || atio->ccb_h.status == CAM_REQ_ABORTED) { 1450 freedescr(descr); 1451 free(done_ccb, M_DEVBUF); 1452 return; 1453 } 1454 1455 #ifdef CAMDEBUG 1456 { 1457 int i; 1458 char dcb[128]; 1459 for (dcb[0] = 0, i = 0; i < atio->cdb_len; i++) { 1460 snprintf(dcb, sizeof dcb, 1461 "%s %02x", dcb, cdb[i] & 0xff); 1462 } 1463 CAM_DEBUG(periph->path, 1464 CAM_DEBUG_PERIPH, ("cdb:%s\n", dcb)); 1465 } 1466 #endif 1467 if (atio->sense_len != 0) { 1468 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 1469 ("ATIO with sense_len\n")); 1470 1471 /* 1472 * We had an error in the reception of 1473 * this command. Immediately issue a CA. 1474 */ 1475 atio->ccb_h.flags &= ~CAM_DIR_MASK; 1476 atio->ccb_h.flags |= CAM_DIR_NONE; 1477 descr->data_resid = 0; 1478 descr->data_increment = 0; 1479 descr->timeout = 5 * 1000; 1480 descr->status = SCSI_STATUS_CHECK_COND; 1481 copy_sense(softc, istate, (u_int8_t *)&atio->sense_data, 1482 atio->sense_len); 1483 set_ca_condition(periph, atio->init_id, CA_CMD_SENSE); 1484 } else if (istate->pending_ca == 0 1485 && istate->pending_ua != 0 1486 && cdb[0] != INQUIRY) { 1487 1488 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 1489 ("pending_ca %d pending_ua %d\n", 1490 istate->pending_ca, istate->pending_ua)); 1491 1492 /* Pending UA, tell initiator */ 1493 /* Direction is always relative to the initator */ 1494 atio->ccb_h.flags &= ~CAM_DIR_MASK; 1495 atio->ccb_h.flags |= CAM_DIR_NONE; 1496 descr->data_resid = 0; 1497 descr->data_increment = 0; 1498 descr->timeout = 5 * 1000; 1499 descr->status = SCSI_STATUS_CHECK_COND; 1500 fill_sense(softc, atio->init_id, 1501 SSD_CURRENT_ERROR, SSD_KEY_UNIT_ATTENTION, 1502 0x29, 1503 istate->pending_ua == UA_POWER_ON ? 1 : 2); 1504 set_ca_condition(periph, atio->init_id, CA_UNIT_ATTN); 1505 } else { 1506 /* 1507 * Save the current CA and UA status so 1508 * they can be used by this command. 1509 */ 1510 ua_types pending_ua; 1511 ca_types pending_ca; 1512 1513 pending_ua = istate->pending_ua; 1514 pending_ca = istate->pending_ca; 1515 1516 /* 1517 * As per the SCSI2 spec, any command that occurs 1518 * after a CA is reported, clears the CA. We must 1519 * also clear the UA condition, if any, that caused 1520 * the CA to occur assuming the UA is not for a 1521 * persistant condition. 1522 */ 1523 istate->pending_ca = CA_NONE; 1524 if (pending_ca == CA_UNIT_ATTN) 1525 istate->pending_ua = UA_NONE; 1526 1527 /* 1528 * Determine the type of incoming command and 1529 * setup our buffer for a response. 1530 */ 1531 switch (cdb[0]) { 1532 case INQUIRY: 1533 { 1534 struct scsi_inquiry *inq; 1535 struct scsi_sense_data *sense; 1536 1537 inq = (struct scsi_inquiry *)cdb; 1538 sense = &istate->sense_data; 1539 descr->status = SCSI_STATUS_OK; 1540 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 1541 ("Saw an inquiry!\n")); 1542 /* 1543 * Validate the command. We don't 1544 * support any VPD pages, so complain 1545 * if EVPD is set. 1546 */ 1547 if ((inq->byte2 & SI_EVPD) != 0 1548 || inq->page_code != 0) { 1549 atio->ccb_h.flags &= ~CAM_DIR_MASK; 1550 atio->ccb_h.flags |= CAM_DIR_NONE; 1551 descr->data_resid = 0; 1552 descr->data_increment = 0; 1553 descr->timeout = 5 * 1000; 1554 descr->status = SCSI_STATUS_CHECK_COND; 1555 fill_sense(softc, atio->init_id, 1556 SSD_CURRENT_ERROR, 1557 SSD_KEY_ILLEGAL_REQUEST, 1558 /*asc*/0x24, /*ascq*/0x00); 1559 sense->extra_len = 1560 offsetof(struct scsi_sense_data, 1561 extra_bytes) 1562 - offsetof(struct scsi_sense_data, 1563 extra_len); 1564 set_ca_condition(periph, atio->init_id, 1565 CA_CMD_SENSE); 1566 } 1567 1568 if ((inq->byte2 & SI_EVPD) != 0) { 1569 sense->sense_key_spec[0] = 1570 SSD_SCS_VALID|SSD_FIELDPTR_CMD 1571 |SSD_BITPTR_VALID| /*bit value*/1; 1572 sense->sense_key_spec[1] = 0; 1573 sense->sense_key_spec[2] = 1574 offsetof(struct scsi_inquiry, 1575 byte2); 1576 } else if (inq->page_code != 0) { 1577 sense->sense_key_spec[0] = 1578 SSD_SCS_VALID|SSD_FIELDPTR_CMD; 1579 sense->sense_key_spec[1] = 0; 1580 sense->sense_key_spec[2] = 1581 offsetof(struct scsi_inquiry, 1582 page_code); 1583 } 1584 if (descr->status == SCSI_STATUS_CHECK_COND) 1585 break; 1586 1587 /* 1588 * Direction is always relative 1589 * to the initator. 1590 */ 1591 atio->ccb_h.flags &= ~CAM_DIR_MASK; 1592 atio->ccb_h.flags |= CAM_DIR_IN; 1593 descr->data = softc->inq_data; 1594 descr->data_resid = 1595 MIN(softc->inq_data_len, 1596 SCSI_CDB6_LEN(inq->length)); 1597 descr->data_increment = descr->data_resid; 1598 descr->timeout = 5 * 1000; 1599 break; 1600 } 1601 case TEST_UNIT_READY: 1602 atio->ccb_h.flags &= ~CAM_DIR_MASK; 1603 atio->ccb_h.flags |= CAM_DIR_NONE; 1604 descr->data_resid = 0; 1605 descr->data_increment = 0; 1606 descr->timeout = 5 * 1000; 1607 descr->status = SCSI_STATUS_OK; 1608 break; 1609 case REQUEST_SENSE: 1610 { 1611 struct scsi_request_sense *rsense; 1612 struct scsi_sense_data *sense; 1613 1614 rsense = (struct scsi_request_sense *)cdb; 1615 sense = &istate->sense_data; 1616 if (pending_ca == 0) { 1617 fill_sense(softc, atio->init_id, 1618 SSD_CURRENT_ERROR, 1619 SSD_KEY_NO_SENSE, 0x00, 1620 0x00); 1621 CAM_DEBUG(periph->path, 1622 CAM_DEBUG_PERIPH, 1623 ("No pending CA!\n")); 1624 } 1625 /* 1626 * Direction is always relative 1627 * to the initator. 1628 */ 1629 atio->ccb_h.flags &= ~CAM_DIR_MASK; 1630 atio->ccb_h.flags |= CAM_DIR_IN; 1631 descr->data = sense; 1632 descr->data_resid = 1633 offsetof(struct scsi_sense_data, 1634 extra_len) 1635 + sense->extra_len; 1636 descr->data_resid = 1637 MIN(descr->data_resid, 1638 SCSI_CDB6_LEN(rsense->length)); 1639 descr->data_increment = descr->data_resid; 1640 descr->timeout = 5 * 1000; 1641 descr->status = SCSI_STATUS_OK; 1642 break; 1643 } 1644 case RECEIVE: 1645 case SEND: 1646 { 1647 struct scsi_send_receive *sr; 1648 1649 sr = (struct scsi_send_receive *)cdb; 1650 1651 /* 1652 * Direction is always relative 1653 * to the initator. 1654 */ 1655 atio->ccb_h.flags &= ~CAM_DIR_MASK; 1656 descr->data_resid = scsi_3btoul(sr->xfer_len); 1657 descr->timeout = 5 * 1000; 1658 descr->status = SCSI_STATUS_OK; 1659 if (cdb[0] == SEND) { 1660 atio->ccb_h.flags |= CAM_DIR_OUT; 1661 CAM_DEBUG(periph->path, 1662 CAM_DEBUG_PERIPH, 1663 ("Saw a SEND!\n")); 1664 atio->ccb_h.flags |= CAM_DIR_OUT; 1665 TAILQ_INSERT_TAIL(&softc->snd_ccb_queue, 1666 &atio->ccb_h, 1667 periph_links.tqe); 1668 selwakeup(&softc->snd_select); 1669 } else { 1670 atio->ccb_h.flags |= CAM_DIR_IN; 1671 CAM_DEBUG(periph->path, 1672 CAM_DEBUG_PERIPH, 1673 ("Saw a RECEIVE!\n")); 1674 TAILQ_INSERT_TAIL(&softc->rcv_ccb_queue, 1675 &atio->ccb_h, 1676 periph_links.tqe); 1677 selwakeup(&softc->rcv_select); 1678 } 1679 /* 1680 * Attempt to satisfy this request with 1681 * a user buffer. 1682 */ 1683 targrunqueue(periph, softc); 1684 return; 1685 } 1686 default: 1687 /* 1688 * Queue for consumption by our userland 1689 * counterpart and transition to the exception 1690 * state. 1691 */ 1692 TAILQ_INSERT_TAIL(&softc->unknown_atio_queue, 1693 &atio->ccb_h, 1694 periph_links.tqe); 1695 softc->exceptions |= TARG_EXCEPT_UNKNOWN_ATIO; 1696 targfireexception(periph, softc); 1697 return; 1698 } 1699 } 1700 1701 /* Queue us up to receive a Continue Target I/O ccb. */ 1702 if ((atio->ccb_h.flags & CAM_DIS_DISCONNECT) != 0) { 1703 TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h, 1704 periph_links.tqe); 1705 priority = 0; 1706 } else { 1707 TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h, 1708 periph_links.tqe); 1709 priority = 1; 1710 } 1711 xpt_schedule(periph, priority); 1712 break; 1713 } 1714 case XPT_CONT_TARGET_IO: 1715 { 1716 struct ccb_scsiio *csio; 1717 struct ccb_accept_tio *atio; 1718 struct targ_cmd_desc *desc; 1719 struct bio *bp; 1720 int error; 1721 1722 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 1723 ("Received completed CTIO\n")); 1724 csio = &done_ccb->csio; 1725 atio = (struct ccb_accept_tio*)done_ccb->ccb_h.ccb_atio; 1726 desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr; 1727 1728 TAILQ_REMOVE(&softc->pending_queue, &done_ccb->ccb_h, 1729 periph_links.tqe); 1730 1731 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1732 printf("CCB with error %x\n", done_ccb->ccb_h.status); 1733 error = targerror(done_ccb, 0, 0); 1734 if (error == ERESTART) 1735 break; 1736 /* 1737 * Right now we don't need to do anything 1738 * prior to unfreezing the queue... 1739 */ 1740 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 1741 printf("Releasing Queue\n"); 1742 cam_release_devq(done_ccb->ccb_h.path, 1743 /*relsim_flags*/0, 1744 /*reduction*/0, 1745 /*timeout*/0, 1746 /*getcount_only*/0); 1747 } 1748 } else 1749 error = 0; 1750 1751 /* 1752 * If we shipped back sense data when completing 1753 * this command, clear the pending CA for it. 1754 */ 1755 if (done_ccb->ccb_h.status & CAM_SENT_SENSE) { 1756 struct initiator_state *istate; 1757 1758 istate = &softc->istate[csio->init_id]; 1759 if (istate->pending_ca == CA_UNIT_ATTN) 1760 istate->pending_ua = UA_NONE; 1761 istate->pending_ca = CA_NONE; 1762 softc->istate[csio->init_id].pending_ca = CA_NONE; 1763 done_ccb->ccb_h.status &= ~CAM_SENT_SENSE; 1764 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 1765 ("Sent Sense\n")); 1766 } 1767 done_ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1768 1769 desc->data_increment -= csio->resid; 1770 desc->data_resid -= desc->data_increment; 1771 if ((bp = desc->bp) != NULL) { 1772 1773 bp->bio_resid -= desc->data_increment; 1774 bp->bio_error = error; 1775 1776 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 1777 ("Buffer I/O Completed - Resid %ld:%d\n", 1778 bp->bio_resid, desc->data_resid)); 1779 /* 1780 * Send the buffer back to the client if 1781 * either the command has completed or all 1782 * buffer space has been consumed. 1783 */ 1784 if (desc->data_resid == 0 1785 || bp->bio_resid == 0 1786 || error != 0) { 1787 if (bp->bio_resid != 0) 1788 /* Short transfer */ 1789 bp->bio_flags |= BIO_ERROR; 1790 1791 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 1792 ("Completing a buffer\n")); 1793 biodone(bp); 1794 desc->bp = NULL; 1795 } 1796 } 1797 1798 xpt_release_ccb(done_ccb); 1799 if (softc->state != TARG_STATE_TEARDOWN) { 1800 1801 if (desc->data_resid == 0) { 1802 /* 1803 * Send the original accept TIO back to the 1804 * controller to handle more work. 1805 */ 1806 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 1807 ("Returning ATIO to target SIM\n")); 1808 atio->ccb_h.ccb_flags = TARG_CCB_NONE; 1809 xpt_action((union ccb *)atio); 1810 break; 1811 } 1812 1813 /* Queue us up for another buffer */ 1814 if (atio->cdb_io.cdb_bytes[0] == SEND) { 1815 if (desc->bp != NULL) 1816 TAILQ_INSERT_HEAD( 1817 &softc->snd_bio_queue.queue, 1818 bp, bio_queue); 1819 TAILQ_INSERT_HEAD(&softc->snd_ccb_queue, 1820 &atio->ccb_h, 1821 periph_links.tqe); 1822 } else { 1823 if (desc->bp != NULL) 1824 TAILQ_INSERT_HEAD( 1825 &softc->rcv_bio_queue.queue, 1826 bp, bio_queue); 1827 TAILQ_INSERT_HEAD(&softc->rcv_ccb_queue, 1828 &atio->ccb_h, 1829 periph_links.tqe); 1830 } 1831 desc->bp = NULL; 1832 targrunqueue(periph, softc); 1833 } else { 1834 if (desc->bp != NULL) { 1835 bp->bio_flags |= BIO_ERROR; 1836 bp->bio_error = ENXIO; 1837 biodone(bp); 1838 } 1839 freedescr(desc); 1840 free(atio, M_DEVBUF); 1841 } 1842 break; 1843 } 1844 case XPT_IMMED_NOTIFY: 1845 { 1846 int frozen; 1847 1848 frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; 1849 if (softc->state == TARG_STATE_TEARDOWN) { 1850 SLIST_REMOVE(&softc->immed_notify_slist, 1851 &done_ccb->ccb_h, ccb_hdr, 1852 periph_links.sle); 1853 free(done_ccb, M_DEVBUF); 1854 } else if (done_ccb->ccb_h.status == CAM_REQ_ABORTED) { 1855 free(done_ccb, M_DEVBUF); 1856 } else { 1857 printf("Saw event %x:%x\n", done_ccb->ccb_h.status, 1858 done_ccb->cin.message_args[0]); 1859 /* Process error condition. */ 1860 targinoterror(periph, softc, &done_ccb->cin); 1861 1862 /* Requeue for another immediate event */ 1863 xpt_action(done_ccb); 1864 } 1865 if (frozen != 0) 1866 cam_release_devq(periph->path, 1867 /*relsim_flags*/0, 1868 /*opening reduction*/0, 1869 /*timeout*/0, 1870 /*getcount_only*/0); 1871 break; 1872 } 1873 case XPT_DEBUG: 1874 wakeup(&done_ccb->ccb_h.cbfcnp); 1875 break; 1876 default: 1877 panic("targdone: Impossible xpt opcode %x encountered.", 1878 done_ccb->ccb_h.func_code); 1879 /* NOTREACHED */ 1880 break; 1881 } 1882 } 1883 1884 /* 1885 * Transition to the exception state and notify our symbiotic 1886 * userland process of the change. 1887 */ 1888 static void 1889 targfireexception(struct cam_periph *periph, struct targ_softc *softc) 1890 { 1891 /* 1892 * return all pending buffers with short read/write status so our 1893 * process unblocks, and do a selwakeup on any process queued 1894 * waiting for reads or writes. When the selwakeup is performed, 1895 * the waking process will wakeup, call our poll routine again, 1896 * and pick up the exception. 1897 */ 1898 struct bio *bp; 1899 1900 if (softc->state != TARG_STATE_NORMAL) 1901 /* Already either tearing down or in exception state */ 1902 return; 1903 1904 softc->state = TARG_STATE_EXCEPTION; 1905 1906 while ((bp = bioq_first(&softc->snd_bio_queue)) != NULL) { 1907 bioq_remove(&softc->snd_bio_queue, bp); 1908 bp->bio_flags |= BIO_ERROR; 1909 biodone(bp); 1910 } 1911 1912 while ((bp = bioq_first(&softc->rcv_bio_queue)) != NULL) { 1913 bioq_remove(&softc->snd_bio_queue, bp); 1914 bp->bio_flags |= BIO_ERROR; 1915 biodone(bp); 1916 } 1917 1918 selwakeup(&softc->snd_select); 1919 selwakeup(&softc->rcv_select); 1920 } 1921 1922 static void 1923 targinoterror(struct cam_periph *periph, struct targ_softc *softc, 1924 struct ccb_immed_notify *inot) 1925 { 1926 cam_status status; 1927 int sense; 1928 1929 status = inot->ccb_h.status; 1930 sense = (status & CAM_AUTOSNS_VALID) != 0; 1931 status &= CAM_STATUS_MASK; 1932 switch (status) { 1933 case CAM_SCSI_BUS_RESET: 1934 set_unit_attention_cond(periph, /*init_id*/CAM_TARGET_WILDCARD, 1935 UA_BUS_RESET); 1936 abort_pending_transactions(periph, 1937 /*init_id*/CAM_TARGET_WILDCARD, 1938 TARG_TAG_WILDCARD, EINTR, 1939 /*to_held_queue*/FALSE); 1940 softc->exceptions |= TARG_EXCEPT_BUS_RESET_SEEN; 1941 targfireexception(periph, softc); 1942 break; 1943 case CAM_BDR_SENT: 1944 set_unit_attention_cond(periph, /*init_id*/CAM_TARGET_WILDCARD, 1945 UA_BDR); 1946 abort_pending_transactions(periph, CAM_TARGET_WILDCARD, 1947 TARG_TAG_WILDCARD, EINTR, 1948 /*to_held_queue*/FALSE); 1949 softc->exceptions |= TARG_EXCEPT_BDR_RECEIVED; 1950 targfireexception(periph, softc); 1951 break; 1952 case CAM_MESSAGE_RECV: 1953 switch (inot->message_args[0]) { 1954 case MSG_INITIATOR_DET_ERR: 1955 break; 1956 case MSG_ABORT: 1957 break; 1958 case MSG_BUS_DEV_RESET: 1959 break; 1960 case MSG_ABORT_TAG: 1961 break; 1962 case MSG_CLEAR_QUEUE: 1963 break; 1964 case MSG_TERM_IO_PROC: 1965 break; 1966 default: 1967 break; 1968 } 1969 break; 1970 default: 1971 break; 1972 } 1973 } 1974 1975 static int 1976 targerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) 1977 { 1978 struct cam_periph *periph; 1979 struct targ_softc *softc; 1980 struct ccb_scsiio *csio; 1981 struct initiator_state *istate; 1982 cam_status status; 1983 int frozen; 1984 int sense; 1985 int error; 1986 int on_held_queue; 1987 1988 periph = xpt_path_periph(ccb->ccb_h.path); 1989 softc = (struct targ_softc *)periph->softc; 1990 status = ccb->ccb_h.status; 1991 sense = (status & CAM_AUTOSNS_VALID) != 0; 1992 frozen = (status & CAM_DEV_QFRZN) != 0; 1993 status &= CAM_STATUS_MASK; 1994 on_held_queue = FALSE; 1995 csio = &ccb->csio; 1996 istate = &softc->istate[csio->init_id]; 1997 switch (status) { 1998 case CAM_REQ_ABORTED: 1999 if ((ccb->ccb_h.ccb_flags & TARG_CCB_ABORT_TO_HELDQ) != 0) { 2000 2001 /* 2002 * Place this CCB into the initiators 2003 * 'held' queue until the pending CA is cleared. 2004 * If there is no CA pending, reissue immediately. 2005 */ 2006 if (istate->pending_ca == 0) { 2007 ccb->ccb_h.ccb_flags = TARG_CCB_NONE; 2008 xpt_action(ccb); 2009 } else { 2010 ccb->ccb_h.ccb_flags = TARG_CCB_HELDQ; 2011 TAILQ_INSERT_TAIL(&softc->pending_queue, 2012 &ccb->ccb_h, 2013 periph_links.tqe); 2014 } 2015 /* The command will be retried at a later time. */ 2016 on_held_queue = TRUE; 2017 error = ERESTART; 2018 break; 2019 } 2020 /* FALLTHROUGH */ 2021 case CAM_SCSI_BUS_RESET: 2022 case CAM_BDR_SENT: 2023 case CAM_REQ_TERMIO: 2024 case CAM_CMD_TIMEOUT: 2025 /* Assume we did not send any data */ 2026 csio->resid = csio->dxfer_len; 2027 error = EIO; 2028 break; 2029 case CAM_SEL_TIMEOUT: 2030 if (ccb->ccb_h.retry_count > 0) { 2031 ccb->ccb_h.retry_count--; 2032 error = ERESTART; 2033 } else { 2034 /* "Select or reselect failure" */ 2035 csio->resid = csio->dxfer_len; 2036 fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR, 2037 SSD_KEY_HARDWARE_ERROR, 0x45, 0x00); 2038 set_ca_condition(periph, csio->init_id, CA_CMD_SENSE); 2039 error = EIO; 2040 } 2041 break; 2042 case CAM_UNCOR_PARITY: 2043 /* "SCSI parity error" */ 2044 fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR, 2045 SSD_KEY_HARDWARE_ERROR, 0x47, 0x00); 2046 set_ca_condition(periph, csio->init_id, 2047 CA_CMD_SENSE); 2048 csio->resid = csio->dxfer_len; 2049 error = EIO; 2050 break; 2051 case CAM_NO_HBA: 2052 csio->resid = csio->dxfer_len; 2053 error = ENXIO; 2054 break; 2055 case CAM_SEQUENCE_FAIL: 2056 if (sense != 0) { 2057 copy_sense(softc, istate, (u_int8_t *)&csio->sense_data, 2058 csio->sense_len); 2059 set_ca_condition(periph, 2060 csio->init_id, 2061 CA_CMD_SENSE); 2062 } 2063 csio->resid = csio->dxfer_len; 2064 error = EIO; 2065 break; 2066 case CAM_IDE: 2067 /* "Initiator detected error message received" */ 2068 fill_sense(softc, csio->init_id, SSD_CURRENT_ERROR, 2069 SSD_KEY_HARDWARE_ERROR, 0x48, 0x00); 2070 set_ca_condition(periph, csio->init_id, 2071 CA_CMD_SENSE); 2072 csio->resid = csio->dxfer_len; 2073 error = EIO; 2074 break; 2075 case CAM_REQUEUE_REQ: 2076 printf("Requeue Request!\n"); 2077 error = ERESTART; 2078 break; 2079 default: 2080 csio->resid = csio->dxfer_len; 2081 error = EIO; 2082 panic("targerror: Unexpected status %x encounterd", status); 2083 /* NOTREACHED */ 2084 } 2085 2086 if (error == ERESTART || error == 0) { 2087 /* Clear the QFRZN flag as we will release the queue */ 2088 if (frozen != 0) 2089 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 2090 2091 if (error == ERESTART && !on_held_queue) 2092 xpt_action(ccb); 2093 2094 if (frozen != 0) 2095 cam_release_devq(ccb->ccb_h.path, 2096 /*relsim_flags*/0, 2097 /*opening reduction*/0, 2098 /*timeout*/0, 2099 /*getcount_only*/0); 2100 } 2101 return (error); 2102 } 2103 2104 static struct targ_cmd_desc* 2105 allocdescr() 2106 { 2107 struct targ_cmd_desc* descr; 2108 2109 /* Allocate the targ_descr structure */ 2110 descr = (struct targ_cmd_desc *)malloc(sizeof(*descr), 2111 M_DEVBUF, M_NOWAIT); 2112 if (descr == NULL) 2113 return (NULL); 2114 2115 bzero(descr, sizeof(*descr)); 2116 2117 /* Allocate buffer backing store */ 2118 descr->backing_store = malloc(MAX_BUF_SIZE, M_DEVBUF, M_NOWAIT); 2119 if (descr->backing_store == NULL) { 2120 free(descr, M_DEVBUF); 2121 return (NULL); 2122 } 2123 descr->max_size = MAX_BUF_SIZE; 2124 return (descr); 2125 } 2126 2127 static void 2128 freedescr(struct targ_cmd_desc *descr) 2129 { 2130 free(descr->backing_store, M_DEVBUF); 2131 free(descr, M_DEVBUF); 2132 } 2133 2134 static void 2135 fill_sense(struct targ_softc *softc, u_int initiator_id, u_int error_code, 2136 u_int sense_key, u_int asc, u_int ascq) 2137 { 2138 struct initiator_state *istate; 2139 struct scsi_sense_data *sense; 2140 2141 istate = &softc->istate[initiator_id]; 2142 sense = &istate->sense_data; 2143 bzero(sense, sizeof(*sense)); 2144 sense->error_code = error_code; 2145 sense->flags = sense_key; 2146 sense->add_sense_code = asc; 2147 sense->add_sense_code_qual = ascq; 2148 2149 sense->extra_len = offsetof(struct scsi_sense_data, fru) 2150 - offsetof(struct scsi_sense_data, extra_len); 2151 } 2152 2153 static void 2154 copy_sense(struct targ_softc *softc, struct initiator_state *istate, 2155 u_int8_t *sense_buffer, size_t sense_len) 2156 { 2157 struct scsi_sense_data *sense; 2158 size_t copylen; 2159 2160 sense = &istate->sense_data; 2161 copylen = sizeof(*sense); 2162 if (copylen > sense_len) 2163 copylen = sense_len; 2164 bcopy(sense_buffer, sense, copylen); 2165 } 2166 2167 static void 2168 set_unit_attention_cond(struct cam_periph *periph, 2169 u_int initiator_id, ua_types ua) 2170 { 2171 int start; 2172 int end; 2173 struct targ_softc *softc; 2174 2175 softc = (struct targ_softc *)periph->softc; 2176 if (initiator_id == CAM_TARGET_WILDCARD) { 2177 start = 0; 2178 end = MAX_INITIATORS - 1; 2179 } else 2180 start = end = initiator_id; 2181 2182 while (start <= end) { 2183 softc->istate[start].pending_ua = ua; 2184 start++; 2185 } 2186 } 2187 2188 static void 2189 set_ca_condition(struct cam_periph *periph, u_int initiator_id, ca_types ca) 2190 { 2191 struct targ_softc *softc; 2192 2193 softc = (struct targ_softc *)periph->softc; 2194 softc->istate[initiator_id].pending_ca = ca; 2195 abort_pending_transactions(periph, initiator_id, TARG_TAG_WILDCARD, 2196 /*errno*/0, /*to_held_queue*/TRUE); 2197 } 2198 2199 static void 2200 abort_pending_transactions(struct cam_periph *periph, u_int initiator_id, 2201 u_int tag_id, int errno, int to_held_queue) 2202 { 2203 struct ccb_abort cab; 2204 struct ccb_queue *atio_queues[3]; 2205 struct targ_softc *softc; 2206 struct ccb_hdr *ccbh; 2207 u_int i; 2208 2209 softc = (struct targ_softc *)periph->softc; 2210 2211 atio_queues[0] = &softc->work_queue; 2212 atio_queues[1] = &softc->snd_ccb_queue; 2213 atio_queues[2] = &softc->rcv_ccb_queue; 2214 2215 /* First address the ATIOs awaiting resources */ 2216 for (i = 0; i < (sizeof(atio_queues) / sizeof(*atio_queues)); i++) { 2217 struct ccb_queue *atio_queue; 2218 2219 if (to_held_queue) { 2220 /* 2221 * The device queue is frozen anyway, so there 2222 * is nothing for us to do. 2223 */ 2224 continue; 2225 } 2226 atio_queue = atio_queues[i]; 2227 ccbh = TAILQ_FIRST(atio_queue); 2228 while (ccbh != NULL) { 2229 struct ccb_accept_tio *atio; 2230 struct targ_cmd_desc *desc; 2231 2232 atio = (struct ccb_accept_tio *)ccbh; 2233 desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr; 2234 ccbh = TAILQ_NEXT(ccbh, periph_links.tqe); 2235 2236 /* Only abort the CCBs that match */ 2237 if ((atio->init_id != initiator_id 2238 && initiator_id != CAM_TARGET_WILDCARD) 2239 || (tag_id != TARG_TAG_WILDCARD 2240 && ((atio->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0 2241 || atio->tag_id != tag_id))) 2242 continue; 2243 2244 TAILQ_REMOVE(atio_queue, &atio->ccb_h, 2245 periph_links.tqe); 2246 2247 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 2248 ("Aborting ATIO\n")); 2249 if (desc->bp != NULL) { 2250 desc->bp->bio_flags |= BIO_ERROR; 2251 if (softc->state != TARG_STATE_TEARDOWN) 2252 desc->bp->bio_error = errno; 2253 else 2254 desc->bp->bio_error = ENXIO; 2255 biodone(desc->bp); 2256 desc->bp = NULL; 2257 } 2258 if (softc->state == TARG_STATE_TEARDOWN) { 2259 freedescr(desc); 2260 free(atio, M_DEVBUF); 2261 } else { 2262 /* Return the ATIO back to the controller */ 2263 atio->ccb_h.ccb_flags = TARG_CCB_NONE; 2264 xpt_action((union ccb *)atio); 2265 } 2266 } 2267 } 2268 2269 ccbh = TAILQ_FIRST(&softc->pending_queue); 2270 while (ccbh != NULL) { 2271 struct ccb_scsiio *csio; 2272 2273 csio = (struct ccb_scsiio *)ccbh; 2274 ccbh = TAILQ_NEXT(ccbh, periph_links.tqe); 2275 2276 /* Only abort the CCBs that match */ 2277 if ((csio->init_id != initiator_id 2278 && initiator_id != CAM_TARGET_WILDCARD) 2279 || (tag_id != TARG_TAG_WILDCARD 2280 && ((csio->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0 2281 || csio->tag_id != tag_id))) 2282 continue; 2283 2284 CAM_DEBUG(periph->path, CAM_DEBUG_PERIPH, 2285 ("Aborting CTIO\n")); 2286 2287 TAILQ_REMOVE(&softc->pending_queue, &csio->ccb_h, 2288 periph_links.tqe); 2289 2290 if (to_held_queue != 0) 2291 csio->ccb_h.ccb_flags |= TARG_CCB_ABORT_TO_HELDQ; 2292 xpt_setup_ccb(&cab.ccb_h, csio->ccb_h.path, /*priority*/1); 2293 cab.abort_ccb = (union ccb *)csio; 2294 xpt_action((union ccb *)&cab); 2295 if (cab.ccb_h.status != CAM_REQ_CMP) { 2296 xpt_print_path(cab.ccb_h.path); 2297 printf("Unable to abort CCB. Status %x\n", 2298 cab.ccb_h.status); 2299 } 2300 } 2301 } 2302