1 /* 2 * Implementation of a simple Target Mode SCSI Proccessor Target driver for CAM. 3 * 4 * Copyright (c) 1998, 1999 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $Id: scsi_target.c,v 1.13 1999/05/30 16:51:07 phk Exp $ 29 */ 30 #include <stddef.h> /* For offsetof */ 31 32 #include <sys/param.h> 33 #include <sys/queue.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/types.h> 37 #include <sys/buf.h> 38 #include <sys/conf.h> 39 #include <sys/devicestat.h> 40 #include <sys/malloc.h> 41 #include <sys/poll.h> 42 #include <sys/select.h> /* For struct selinfo. */ 43 #include <sys/uio.h> 44 45 #include <cam/cam.h> 46 #include <cam/cam_ccb.h> 47 #include <cam/cam_extend.h> 48 #include <cam/cam_periph.h> 49 #include <cam/cam_queue.h> 50 #include <cam/cam_xpt_periph.h> 51 #include <cam/cam_debug.h> 52 53 #include <cam/scsi/scsi_all.h> 54 #include <cam/scsi/scsi_pt.h> 55 #include <cam/scsi/scsi_targetio.h> 56 #include <cam/scsi/scsi_message.h> 57 58 typedef enum { 59 TARG_STATE_NORMAL, 60 TARG_STATE_EXCEPTION, 61 TARG_STATE_TEARDOWN 62 } targ_state; 63 64 typedef enum { 65 TARG_FLAG_NONE = 0x00, 66 TARG_FLAG_SEND_EOF = 0x01, 67 TARG_FLAG_RECEIVE_EOF = 0x02, 68 TARG_FLAG_LUN_ENABLED = 0x04 69 } targ_flags; 70 71 typedef enum { 72 TARG_CCB_WORKQ, 73 TARG_CCB_WAITING 74 } targ_ccb_types; 75 76 #define MAX_ACCEPT 16 77 #define MAX_IMMEDIATE 16 78 #define MAX_BUF_SIZE 256 /* Max inquiry/sense/mode page transfer */ 79 #define MAX_INITIATORS 16 /* XXX More for Fibre-Channel */ 80 81 #define MIN(a, b) ((a > b) ? b : a) 82 83 #define TARG_CONTROL_UNIT 0xffff00ff 84 #define TARG_IS_CONTROL_DEV(unit) ((unit) == TARG_CONTROL_UNIT) 85 86 /* Offsets into our private CCB area for storing accept information */ 87 #define ccb_type ppriv_field0 88 #define ccb_descr ppriv_ptr1 89 90 /* We stick a pointer to the originating accept TIO in each continue I/O CCB */ 91 #define ccb_atio ppriv_ptr1 92 93 TAILQ_HEAD(ccb_queue, ccb_hdr); 94 95 struct targ_softc { 96 struct ccb_queue pending_queue; 97 struct ccb_queue work_queue; 98 struct ccb_queue snd_ccb_queue; 99 struct ccb_queue rcv_ccb_queue; 100 struct ccb_queue unknown_atio_queue; 101 struct buf_queue_head snd_buf_queue; 102 struct buf_queue_head rcv_buf_queue; 103 struct devstat device_stats; 104 struct selinfo snd_select; 105 struct selinfo rcv_select; 106 targ_state state; 107 targ_flags flags; 108 targ_exception exceptions; 109 u_int init_level; 110 u_int inq_data_len; 111 struct scsi_inquiry_data *inq_data; 112 struct ccb_accept_tio *accept_tio_list; 113 struct ccb_hdr_slist immed_notify_slist; 114 struct initiator_state istate[MAX_INITIATORS]; 115 }; 116 117 struct targ_cmd_desc { 118 struct ccb_accept_tio* atio_link; 119 u_int data_resid; /* How much left to transfer */ 120 u_int data_increment;/* Amount to send before next disconnect */ 121 void* data; /* The data. Can be from backing_store or not */ 122 void* backing_store;/* Backing store allocated for this descriptor*/ 123 struct buf *bp; /* Buffer for this transfer */ 124 u_int max_size; /* Size of backing_store */ 125 u_int32_t timeout; 126 u_int8_t status; /* Status to return to initiator */ 127 }; 128 129 static d_open_t targopen; 130 static d_close_t targclose; 131 static d_read_t targread; 132 static d_write_t targwrite; 133 static d_ioctl_t targioctl; 134 static d_poll_t targpoll; 135 static d_strategy_t targstrategy; 136 137 #define TARG_CDEV_MAJOR 65 138 static struct cdevsw targ_cdevsw = { 139 /* open */ targopen, 140 /* close */ targclose, 141 /* read */ targread, 142 /* write */ targwrite, 143 /* ioctl */ targioctl, 144 /* stop */ nostop, 145 /* reset */ noreset, 146 /* devtotty */ nodevtotty, 147 /* poll */ targpoll, 148 /* mmap */ nommap, 149 /* strategy */ targstrategy, 150 /* name */ "targ", 151 /* parms */ noparms, 152 /* maj */ TARG_CDEV_MAJOR, 153 /* dump */ nodump, 154 /* psize */ nopsize, 155 /* flags */ 0, 156 /* maxio */ 0, 157 /* bmaj */ -1 158 }; 159 160 static int targsendccb(struct cam_periph *periph, union ccb *ccb, 161 union ccb *inccb); 162 static periph_init_t targinit; 163 static void targasync(void *callback_arg, u_int32_t code, 164 struct cam_path *path, void *arg); 165 static int targallocinstance(struct ioc_alloc_unit *alloc_unit); 166 static int targfreeinstance(struct ioc_alloc_unit *alloc_unit); 167 static cam_status targenlun(struct cam_periph *periph); 168 static cam_status targdislun(struct cam_periph *periph); 169 static periph_ctor_t targctor; 170 static periph_dtor_t targdtor; 171 static void targrunqueue(struct cam_periph *periph, 172 struct targ_softc *softc); 173 static periph_start_t targstart; 174 static void targdone(struct cam_periph *periph, 175 union ccb *done_ccb); 176 static void targfireexception(struct cam_periph *periph, 177 struct targ_softc *softc); 178 static int targerror(union ccb *ccb, u_int32_t cam_flags, 179 u_int32_t sense_flags); 180 static struct targ_cmd_desc* allocdescr(void); 181 static void freedescr(struct targ_cmd_desc *buf); 182 static void fill_sense(struct scsi_sense_data *sense, 183 u_int error_code, u_int sense_key, 184 u_int asc, u_int ascq); 185 186 static struct periph_driver targdriver = 187 { 188 targinit, "targ", 189 TAILQ_HEAD_INITIALIZER(targdriver.units), /* generation */ 0 190 }; 191 192 DATA_SET(periphdriver_set, targdriver); 193 194 static struct extend_array *targperiphs; 195 196 static void 197 targinit(void) 198 { 199 200 /* 201 * Create our extend array for storing the devices we attach to. 202 */ 203 targperiphs = cam_extend_new(); 204 if (targperiphs == NULL) { 205 printf("targ: Failed to alloc extend array!\n"); 206 return; 207 } 208 209 /* If we were successfull, register our devsw */ 210 cdevsw_add(&targ_cdevsw); 211 } 212 213 static void 214 targasync(void *callback_arg, u_int32_t code, 215 struct cam_path *path, void *arg) 216 { 217 struct cam_periph *periph; 218 219 periph = (struct cam_periph *)callback_arg; 220 switch (code) { 221 case AC_PATH_DEREGISTERED: 222 { 223 /* XXX Implement */ 224 break; 225 } 226 case AC_BUS_RESET: 227 { 228 /* Flush transaction queue */ 229 } 230 default: 231 break; 232 } 233 } 234 235 /* Attempt to enable our lun */ 236 static cam_status 237 targenlun(struct cam_periph *periph) 238 { 239 union ccb immed_ccb; 240 struct targ_softc *softc; 241 cam_status status; 242 int i; 243 244 softc = (struct targ_softc *)periph->softc; 245 246 if ((softc->flags & TARG_FLAG_LUN_ENABLED) != 0) 247 return (CAM_REQ_CMP); 248 249 xpt_setup_ccb(&immed_ccb.ccb_h, periph->path, /*priority*/1); 250 immed_ccb.ccb_h.func_code = XPT_EN_LUN; 251 252 /* Don't need support for any vendor specific commands */ 253 immed_ccb.cel.grp6_len = 0; 254 immed_ccb.cel.grp7_len = 0; 255 immed_ccb.cel.enable = 1; 256 xpt_action(&immed_ccb); 257 status = immed_ccb.ccb_h.status; 258 if (status != CAM_REQ_CMP) { 259 xpt_print_path(periph->path); 260 printf("targenlun - Enable Lun Rejected for status 0x%x\n", 261 status); 262 return (status); 263 } 264 265 softc->flags |= TARG_FLAG_LUN_ENABLED; 266 267 /* 268 * Build up a buffer of accept target I/O 269 * operations for incoming selections. 270 */ 271 for (i = 0; i < MAX_ACCEPT; i++) { 272 struct ccb_accept_tio *atio; 273 274 atio = (struct ccb_accept_tio*)malloc(sizeof(*atio), M_DEVBUF, 275 M_NOWAIT); 276 if (atio == NULL) { 277 status = CAM_RESRC_UNAVAIL; 278 break; 279 } 280 281 atio->ccb_h.ccb_descr = allocdescr(); 282 283 if (atio->ccb_h.ccb_descr == NULL) { 284 free(atio, M_DEVBUF); 285 status = CAM_RESRC_UNAVAIL; 286 break; 287 } 288 289 xpt_setup_ccb(&atio->ccb_h, periph->path, /*priority*/1); 290 atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO; 291 atio->ccb_h.cbfcnp = targdone; 292 xpt_action((union ccb *)atio); 293 status = atio->ccb_h.status; 294 if (status != CAM_REQ_INPROG) { 295 xpt_print_path(periph->path); 296 printf("Queue of atio failed\n"); 297 freedescr(atio->ccb_h.ccb_descr); 298 free(atio, M_DEVBUF); 299 break; 300 } 301 ((struct targ_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link = 302 softc->accept_tio_list; 303 softc->accept_tio_list = atio; 304 } 305 306 if (i == 0) { 307 xpt_print_path(periph->path); 308 printf("targenlun - Could not allocate accept tio CCBs: " 309 "status = 0x%x\n", status); 310 targdislun(periph); 311 return (CAM_REQ_CMP_ERR); 312 } 313 314 /* 315 * Build up a buffer of immediate notify CCBs 316 * so the SIM can tell us of asynchronous target mode events. 317 */ 318 for (i = 0; i < MAX_ACCEPT; i++) { 319 struct ccb_immed_notify *inot; 320 321 inot = (struct ccb_immed_notify*)malloc(sizeof(*inot), M_DEVBUF, 322 M_NOWAIT); 323 324 if (inot == NULL) { 325 status = CAM_RESRC_UNAVAIL; 326 break; 327 } 328 329 xpt_setup_ccb(&inot->ccb_h, periph->path, /*priority*/1); 330 inot->ccb_h.func_code = XPT_IMMED_NOTIFY; 331 inot->ccb_h.cbfcnp = targdone; 332 xpt_action((union ccb *)inot); 333 status = inot->ccb_h.status; 334 if (status != CAM_REQ_INPROG) { 335 printf("Queue of inot failed\n"); 336 free(inot, M_DEVBUF); 337 break; 338 } 339 SLIST_INSERT_HEAD(&softc->immed_notify_slist, &inot->ccb_h, 340 periph_links.sle); 341 } 342 343 if (i == 0) { 344 xpt_print_path(periph->path); 345 printf("targenlun - Could not allocate immediate notify CCBs: " 346 "status = 0x%x\n", status); 347 targdislun(periph); 348 return (CAM_REQ_CMP_ERR); 349 } 350 351 return (CAM_REQ_CMP); 352 } 353 354 static cam_status 355 targdislun(struct cam_periph *periph) 356 { 357 union ccb ccb; 358 struct targ_softc *softc; 359 struct ccb_accept_tio* atio; 360 struct ccb_hdr *ccb_h; 361 362 softc = (struct targ_softc *)periph->softc; 363 if ((softc->flags & TARG_FLAG_LUN_ENABLED) == 0) 364 return CAM_REQ_CMP; 365 366 /* XXX Block for Continue I/O completion */ 367 368 /* Kill off all ACCECPT and IMMEDIATE CCBs */ 369 while ((atio = softc->accept_tio_list) != NULL) { 370 371 softc->accept_tio_list = 372 ((struct targ_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link; 373 xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1); 374 ccb.cab.ccb_h.func_code = XPT_ABORT; 375 ccb.cab.abort_ccb = (union ccb *)atio; 376 xpt_action(&ccb); 377 } 378 379 while ((ccb_h = SLIST_FIRST(&softc->immed_notify_slist)) != NULL) { 380 SLIST_REMOVE_HEAD(&softc->immed_notify_slist, periph_links.sle); 381 xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1); 382 ccb.cab.ccb_h.func_code = XPT_ABORT; 383 ccb.cab.abort_ccb = (union ccb *)ccb_h; 384 xpt_action(&ccb); 385 } 386 387 /* 388 * Dissable this lun. 389 */ 390 xpt_setup_ccb(&ccb.cel.ccb_h, periph->path, /*priority*/1); 391 ccb.cel.ccb_h.func_code = XPT_EN_LUN; 392 ccb.cel.enable = 0; 393 xpt_action(&ccb); 394 395 if (ccb.cel.ccb_h.status != CAM_REQ_CMP) 396 printf("targdislun - Disabling lun on controller failed " 397 "with status 0x%x\n", ccb.cel.ccb_h.status); 398 else 399 softc->flags &= ~TARG_FLAG_LUN_ENABLED; 400 return (ccb.cel.ccb_h.status); 401 } 402 403 static cam_status 404 targctor(struct cam_periph *periph, void *arg) 405 { 406 struct ccb_pathinq *cpi; 407 struct targ_softc *softc; 408 int i; 409 410 cpi = (struct ccb_pathinq *)arg; 411 412 /* Allocate our per-instance private storage */ 413 softc = (struct targ_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT); 414 if (softc == NULL) { 415 printf("targctor: unable to malloc softc\n"); 416 return (CAM_REQ_CMP_ERR); 417 } 418 419 bzero(softc, sizeof(softc)); 420 TAILQ_INIT(&softc->pending_queue); 421 TAILQ_INIT(&softc->work_queue); 422 TAILQ_INIT(&softc->snd_ccb_queue); 423 TAILQ_INIT(&softc->rcv_ccb_queue); 424 TAILQ_INIT(&softc->unknown_atio_queue); 425 bufq_init(&softc->snd_buf_queue); 426 bufq_init(&softc->rcv_buf_queue); 427 softc->accept_tio_list = NULL; 428 SLIST_INIT(&softc->immed_notify_slist); 429 softc->state = TARG_STATE_NORMAL; 430 periph->softc = softc; 431 softc->init_level++; 432 433 cam_extend_set(targperiphs, periph->unit_number, periph); 434 435 /* 436 * We start out life with a UA to indicate power-on/reset. 437 */ 438 for (i = 0; i < MAX_INITIATORS; i++) 439 softc->istate[i].pending_ua = UA_POWER_ON; 440 441 /* 442 * Allocate an initial inquiry data buffer. We might allow the 443 * user to override this later via an ioctl. 444 */ 445 softc->inq_data_len = sizeof(*softc->inq_data); 446 softc->inq_data = malloc(softc->inq_data_len, M_DEVBUF, M_NOWAIT); 447 if (softc->inq_data == NULL) { 448 printf("targctor - Unable to malloc inquiry data\n"); 449 targdtor(periph); 450 return (CAM_RESRC_UNAVAIL); 451 } 452 bzero(softc->inq_data, softc->inq_data_len); 453 softc->inq_data->device = T_PROCESSOR | (SID_QUAL_LU_CONNECTED << 5); 454 softc->inq_data->version = 2; 455 softc->inq_data->response_format = 2; /* SCSI2 Inquiry Format */ 456 softc->inq_data->flags = 457 cpi->hba_inquiry & (PI_SDTR_ABLE|PI_WIDE_16|PI_WIDE_32); 458 softc->inq_data->additional_length = softc->inq_data_len - 4; 459 strncpy(softc->inq_data->vendor, "FreeBSD ", SID_VENDOR_SIZE); 460 strncpy(softc->inq_data->product, "TM-PT ", SID_PRODUCT_SIZE); 461 strncpy(softc->inq_data->revision, "0.0 ", SID_REVISION_SIZE); 462 softc->init_level++; 463 464 return (CAM_REQ_CMP); 465 } 466 467 static void 468 targdtor(struct cam_periph *periph) 469 { 470 struct targ_softc *softc; 471 472 softc = (struct targ_softc *)periph->softc; 473 474 softc->state = TARG_STATE_TEARDOWN; 475 476 targdislun(periph); 477 478 cam_extend_release(targperiphs, periph->unit_number); 479 480 switch (softc->init_level) { 481 default: 482 /* FALLTHROUGH */ 483 case 2: 484 free(softc->inq_data, M_DEVBUF); 485 /* FALLTHROUGH */ 486 case 1: 487 free(softc, M_DEVBUF); 488 break; 489 case 0: 490 panic("targdtor - impossible init level");; 491 } 492 } 493 494 static int 495 targopen(dev_t dev, int flags, int fmt, struct proc *p) 496 { 497 struct cam_periph *periph; 498 struct targ_softc *softc; 499 u_int unit; 500 cam_status status; 501 int error; 502 int s; 503 504 unit = minor(dev); 505 506 /* An open of the control device always succeeds */ 507 if (TARG_IS_CONTROL_DEV(unit)) 508 return 0; 509 510 s = splsoftcam(); 511 periph = cam_extend_get(targperiphs, unit); 512 if (periph == NULL) { 513 return (ENXIO); 514 splx(s); 515 } 516 if ((error = cam_periph_lock(periph, PRIBIO | PCATCH)) != 0) { 517 splx(s); 518 return (error); 519 } 520 521 softc = (struct targ_softc *)periph->softc; 522 if ((softc->flags & TARG_FLAG_LUN_ENABLED) == 0) { 523 if (cam_periph_acquire(periph) != CAM_REQ_CMP) { 524 splx(s); 525 cam_periph_unlock(periph); 526 return(ENXIO); 527 } 528 } 529 splx(s); 530 531 status = targenlun(periph); 532 switch (status) { 533 case CAM_REQ_CMP: 534 error = 0; 535 break; 536 case CAM_RESRC_UNAVAIL: 537 error = ENOMEM; 538 break; 539 case CAM_LUN_ALRDY_ENA: 540 error = EADDRINUSE; 541 break; 542 default: 543 error = ENXIO; 544 break; 545 } 546 cam_periph_unlock(periph); 547 return (error); 548 } 549 550 static int 551 targclose(dev_t dev, int flag, int fmt, struct proc *p) 552 { 553 struct cam_periph *periph; 554 struct targ_softc *softc; 555 u_int unit; 556 int s; 557 int error; 558 559 unit = minor(dev); 560 561 /* A close of the control device always succeeds */ 562 if (TARG_IS_CONTROL_DEV(unit)) 563 return 0; 564 565 s = splsoftcam(); 566 periph = cam_extend_get(targperiphs, unit); 567 if (periph == NULL) { 568 splx(s); 569 return (ENXIO); 570 } 571 if ((error = cam_periph_lock(periph, PRIBIO)) != 0) 572 return (error); 573 softc = (struct targ_softc *)periph->softc; 574 splx(s); 575 576 targdislun(periph); 577 578 cam_periph_unlock(periph); 579 cam_periph_release(periph); 580 581 return (0); 582 } 583 584 static int 585 targallocinstance(struct ioc_alloc_unit *alloc_unit) 586 { 587 struct ccb_pathinq cpi; 588 struct cam_path *path; 589 struct cam_periph *periph; 590 cam_status status; 591 int free_path_on_return; 592 int error; 593 594 free_path_on_return = 0; 595 status = xpt_create_path(&path, /*periph*/NULL, 596 alloc_unit->path_id, 597 alloc_unit->target_id, 598 alloc_unit->lun_id); 599 free_path_on_return++; 600 601 if (status != CAM_REQ_CMP) { 602 printf("Couldn't Allocate Path %x\n", status); 603 goto fail; 604 } 605 606 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1); 607 cpi.ccb_h.func_code = XPT_PATH_INQ; 608 xpt_action((union ccb *)&cpi); 609 status = cpi.ccb_h.status; 610 611 if (status != CAM_REQ_CMP) { 612 printf("Couldn't CPI %x\n", status); 613 goto fail; 614 } 615 616 /* Can only alloc units on controllers that support target mode */ 617 if ((cpi.target_sprt & PIT_PROCESSOR) == 0) { 618 printf("Controller does not support target mode%x\n", status); 619 status = CAM_PATH_INVALID; 620 goto fail; 621 } 622 623 /* Ensure that we don't already have an instance for this unit. */ 624 if ((periph = cam_periph_find(path, "targ")) != NULL) { 625 printf("Lun already enabled%x\n", status); 626 status = CAM_LUN_ALRDY_ENA; 627 goto fail; 628 } 629 630 /* 631 * Allocate a peripheral instance for 632 * this target instance. 633 */ 634 status = cam_periph_alloc(targctor, NULL, targdtor, targstart, 635 "targ", CAM_PERIPH_BIO, path, targasync, 636 0, &cpi); 637 638 fail: 639 switch (status) { 640 case CAM_REQ_CMP: 641 { 642 struct cam_periph *periph; 643 644 if ((periph = cam_periph_find(path, "targ")) == NULL) 645 panic("targallocinstance: Succeeded but no periph?"); 646 error = 0; 647 alloc_unit->unit = periph->unit_number; 648 break; 649 } 650 case CAM_RESRC_UNAVAIL: 651 error = ENOMEM; 652 break; 653 case CAM_LUN_ALRDY_ENA: 654 error = EADDRINUSE; 655 break; 656 default: 657 printf("targallocinstance: Unexpected CAM status %x\n", status); 658 /* FALLTHROUGH */ 659 case CAM_PATH_INVALID: 660 error = ENXIO; 661 break; 662 case CAM_PROVIDE_FAIL: 663 error = ENODEV; 664 break; 665 } 666 667 if (free_path_on_return != 0) 668 xpt_free_path(path); 669 670 return (error); 671 } 672 673 static int 674 targfreeinstance(struct ioc_alloc_unit *alloc_unit) 675 { 676 struct cam_path *path; 677 struct cam_periph *periph; 678 struct targ_softc *softc; 679 cam_status status; 680 int free_path_on_return; 681 int error; 682 683 periph = NULL; 684 free_path_on_return = 0; 685 status = xpt_create_path(&path, /*periph*/NULL, 686 alloc_unit->path_id, 687 alloc_unit->target_id, 688 alloc_unit->lun_id); 689 free_path_on_return++; 690 691 if (status != CAM_REQ_CMP) 692 goto fail; 693 694 /* Find our instance. */ 695 if ((periph = cam_periph_find(path, "targ")) == NULL) { 696 xpt_print_path(path); 697 status = CAM_PATH_INVALID; 698 goto fail; 699 } 700 701 softc = (struct targ_softc *)periph->softc; 702 703 if ((softc->flags & TARG_FLAG_LUN_ENABLED) != 0) { 704 status = CAM_BUSY; 705 goto fail; 706 } 707 708 fail: 709 if (free_path_on_return != 0) 710 xpt_free_path(path); 711 712 switch (status) { 713 case CAM_REQ_CMP: 714 if (periph != NULL) 715 cam_periph_invalidate(periph); 716 error = 0; 717 break; 718 case CAM_RESRC_UNAVAIL: 719 error = ENOMEM; 720 break; 721 case CAM_LUN_ALRDY_ENA: 722 error = EADDRINUSE; 723 break; 724 default: 725 printf("targfreeinstance: Unexpected CAM status %x\n", status); 726 /* FALLTHROUGH */ 727 case CAM_PATH_INVALID: 728 error = ENODEV; 729 break; 730 } 731 return (error); 732 } 733 734 static int 735 targioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p) 736 { 737 struct cam_periph *periph; 738 struct targ_softc *softc; 739 u_int unit; 740 int error; 741 742 unit = minor(dev); 743 error = 0; 744 if (TARG_IS_CONTROL_DEV(unit)) { 745 switch (cmd) { 746 case TARGCTLIOALLOCUNIT: 747 error = targallocinstance((struct ioc_alloc_unit*)addr); 748 break; 749 case TARGCTLIOFREEUNIT: 750 error = targfreeinstance((struct ioc_alloc_unit*)addr); 751 break; 752 default: 753 error = EINVAL; 754 break; 755 } 756 return (error); 757 } 758 759 periph = cam_extend_get(targperiphs, unit); 760 if (periph == NULL) 761 return (ENXIO); 762 softc = (struct targ_softc *)periph->softc; 763 switch (cmd) { 764 case TARGIOCFETCHEXCEPTION: 765 *((targ_exception *)addr) = softc->exceptions; 766 break; 767 case TARGIOCCLEAREXCEPTION: 768 { 769 targ_exception clear_mask; 770 771 clear_mask = *((targ_exception *)addr); 772 if ((clear_mask & TARG_EXCEPT_UNKNOWN_ATIO) != 0) { 773 struct ccb_hdr *ccbh; 774 775 ccbh = TAILQ_FIRST(&softc->unknown_atio_queue); 776 if (ccbh != NULL) { 777 TAILQ_REMOVE(&softc->unknown_atio_queue, 778 ccbh, periph_links.tqe); 779 /* Requeue the ATIO back to the controller */ 780 xpt_action((union ccb *)ccbh); 781 ccbh = TAILQ_FIRST(&softc->unknown_atio_queue); 782 } 783 if (ccbh != NULL) 784 clear_mask &= ~TARG_EXCEPT_UNKNOWN_ATIO; 785 } 786 softc->exceptions &= ~clear_mask; 787 if (softc->exceptions == TARG_EXCEPT_NONE 788 && softc->state == TARG_STATE_EXCEPTION) { 789 softc->state = TARG_STATE_NORMAL; 790 targrunqueue(periph, softc); 791 } 792 break; 793 } 794 case TARGIOCFETCHATIO: 795 { 796 struct ccb_hdr *ccbh; 797 798 ccbh = TAILQ_FIRST(&softc->unknown_atio_queue); 799 if (ccbh != NULL) { 800 bcopy(ccbh, addr, sizeof(struct ccb_accept_tio)); 801 } else { 802 error = ENOENT; 803 } 804 break; 805 } 806 case TARGIOCCOMMAND: 807 { 808 union ccb *inccb; 809 union ccb *ccb; 810 811 /* 812 * XXX JGibbs 813 * This code is lifted directly from the pass-thru driver. 814 * Perhaps this should be moved to a library???? 815 */ 816 inccb = (union ccb *)addr; 817 ccb = cam_periph_getccb(periph, inccb->ccb_h.pinfo.priority); 818 819 error = targsendccb(periph, ccb, inccb); 820 821 xpt_release_ccb(ccb); 822 823 break; 824 } 825 case TARGIOCGETISTATE: 826 case TARGIOCSETISTATE: 827 { 828 struct ioc_initiator_state *ioc_istate; 829 830 ioc_istate = (struct ioc_initiator_state *)addr; 831 if (ioc_istate->initiator_id > MAX_INITIATORS) { 832 error = EINVAL; 833 break; 834 } 835 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 836 ("GET/SETISTATE for %d\n", ioc_istate->initiator_id)); 837 if (cmd == TARGIOCGETISTATE) { 838 bcopy(&softc->istate[ioc_istate->initiator_id], 839 &ioc_istate->istate, sizeof(ioc_istate->istate)); 840 } else { 841 bcopy(&ioc_istate->istate, 842 &softc->istate[ioc_istate->initiator_id], 843 sizeof(ioc_istate->istate)); 844 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 845 ("pending_ca now %x\n", 846 softc->istate[ioc_istate->initiator_id].pending_ca)); 847 } 848 break; 849 } 850 default: 851 error = ENOTTY; 852 break; 853 } 854 return (error); 855 } 856 857 /* 858 * XXX JGibbs lifted from pass-thru driver. 859 * Generally, "ccb" should be the CCB supplied by the kernel. "inccb" 860 * should be the CCB that is copied in from the user. 861 */ 862 static int 863 targsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb) 864 { 865 struct targ_softc *softc; 866 struct cam_periph_map_info mapinfo; 867 int error, need_unmap; 868 869 softc = (struct targ_softc *)periph->softc; 870 871 need_unmap = 0; 872 873 /* 874 * There are some fields in the CCB header that need to be 875 * preserved, the rest we get from the user. 876 */ 877 xpt_merge_ccb(ccb, inccb); 878 879 /* 880 * There's no way for the user to have a completion 881 * function, so we put our own completion function in here. 882 */ 883 ccb->ccb_h.cbfcnp = targdone; 884 885 /* 886 * We only attempt to map the user memory into kernel space 887 * if they haven't passed in a physical memory pointer, 888 * and if there is actually an I/O operation to perform. 889 * Right now cam_periph_mapmem() only supports SCSI and device 890 * match CCBs. For the SCSI CCBs, we only pass the CCB in if 891 * there's actually data to map. cam_periph_mapmem() will do the 892 * right thing, even if there isn't data to map, but since CCBs 893 * without data are a reasonably common occurance (e.g. test unit 894 * ready), it will save a few cycles if we check for it here. 895 */ 896 if (((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) 897 && (((ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) 898 && ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE)) 899 || (ccb->ccb_h.func_code == XPT_DEV_MATCH))) { 900 901 bzero(&mapinfo, sizeof(mapinfo)); 902 903 error = cam_periph_mapmem(ccb, &mapinfo); 904 905 /* 906 * cam_periph_mapmem returned an error, we can't continue. 907 * Return the error to the user. 908 */ 909 if (error) 910 return(error); 911 912 /* 913 * We successfully mapped the memory in, so we need to 914 * unmap it when the transaction is done. 915 */ 916 need_unmap = 1; 917 } 918 919 /* 920 * If the user wants us to perform any error recovery, then honor 921 * that request. Otherwise, it's up to the user to perform any 922 * error recovery. 923 */ 924 error = cam_periph_runccb(ccb, 925 (ccb->ccb_h.flags & CAM_PASS_ERR_RECOVER) ? 926 targerror : NULL, 927 /* cam_flags */ 0, 928 /* sense_flags */SF_RETRY_UA, 929 &softc->device_stats); 930 931 if (need_unmap != 0) 932 cam_periph_unmapmem(ccb, &mapinfo); 933 934 ccb->ccb_h.cbfcnp = NULL; 935 ccb->ccb_h.periph_priv = inccb->ccb_h.periph_priv; 936 bcopy(ccb, inccb, sizeof(union ccb)); 937 938 return(error); 939 } 940 941 942 static int 943 targpoll(dev_t dev, int poll_events, struct proc *p) 944 { 945 struct cam_periph *periph; 946 struct targ_softc *softc; 947 u_int unit; 948 int revents; 949 int s; 950 951 unit = minor(dev); 952 953 /* ioctl is the only supported operation of the control device */ 954 if (TARG_IS_CONTROL_DEV(unit)) 955 return EINVAL; 956 957 periph = cam_extend_get(targperiphs, unit); 958 if (periph == NULL) 959 return (ENXIO); 960 softc = (struct targ_softc *)periph->softc; 961 962 revents = 0; 963 s = splcam(); 964 if ((poll_events & (POLLOUT | POLLWRNORM)) != 0) { 965 if (TAILQ_FIRST(&softc->rcv_ccb_queue) != NULL 966 && bufq_first(&softc->rcv_buf_queue) == NULL) 967 revents |= poll_events & (POLLOUT | POLLWRNORM); 968 } 969 if ((poll_events & (POLLIN | POLLRDNORM)) != 0) { 970 if (TAILQ_FIRST(&softc->snd_ccb_queue) != NULL 971 && bufq_first(&softc->snd_buf_queue) == NULL) 972 revents |= poll_events & (POLLIN | POLLRDNORM); 973 } 974 975 if (softc->state != TARG_STATE_NORMAL) 976 revents |= POLLERR; 977 978 if (revents == 0) { 979 if (poll_events & (POLLOUT | POLLWRNORM)) 980 selrecord(p, &softc->rcv_select); 981 if (poll_events & (POLLIN | POLLRDNORM)) 982 selrecord(p, &softc->snd_select); 983 } 984 splx(s); 985 return (revents); 986 } 987 988 static int 989 targread(dev_t dev, struct uio *uio, int ioflag) 990 { 991 u_int unit; 992 993 unit = minor(dev); 994 /* ioctl is the only supported operation of the control device */ 995 if (TARG_IS_CONTROL_DEV(unit)) 996 return EINVAL; 997 998 if (uio->uio_iovcnt == 0 999 || uio->uio_iov->iov_len == 0) { 1000 /* EOF */ 1001 struct cam_periph *periph; 1002 struct targ_softc *softc; 1003 int s; 1004 1005 s = splcam(); 1006 periph = cam_extend_get(targperiphs, unit); 1007 if (periph == NULL) 1008 return (ENXIO); 1009 softc = (struct targ_softc *)periph->softc; 1010 softc->flags |= TARG_FLAG_SEND_EOF; 1011 splx(s); 1012 targrunqueue(periph, softc); 1013 return (0); 1014 } 1015 return(physread(dev, uio, ioflag)); 1016 } 1017 1018 static int 1019 targwrite(dev_t dev, struct uio *uio, int ioflag) 1020 { 1021 u_int unit; 1022 1023 unit = minor(dev); 1024 /* ioctl is the only supported operation of the control device */ 1025 if (TARG_IS_CONTROL_DEV(unit)) 1026 return EINVAL; 1027 1028 if (uio->uio_iovcnt == 0 1029 || uio->uio_iov->iov_len == 0) { 1030 /* EOF */ 1031 struct cam_periph *periph; 1032 struct targ_softc *softc; 1033 int s; 1034 1035 s = splcam(); 1036 periph = cam_extend_get(targperiphs, unit); 1037 if (periph == NULL) 1038 return (ENXIO); 1039 softc = (struct targ_softc *)periph->softc; 1040 softc->flags |= TARG_FLAG_RECEIVE_EOF; 1041 splx(s); 1042 targrunqueue(periph, softc); 1043 return (0); 1044 } 1045 return(physwrite(dev, uio, ioflag)); 1046 } 1047 1048 /* 1049 * Actually translate the requested transfer into one the physical driver 1050 * can understand. The transfer is described by a buf and will include 1051 * only one physical transfer. 1052 */ 1053 static void 1054 targstrategy(struct buf *bp) 1055 { 1056 struct cam_periph *periph; 1057 struct targ_softc *softc; 1058 u_int unit; 1059 int s; 1060 1061 unit = minor(bp->b_dev); 1062 1063 /* ioctl is the only supported operation of the control device */ 1064 if (TARG_IS_CONTROL_DEV(unit)) { 1065 bp->b_error = EINVAL; 1066 goto bad; 1067 } 1068 1069 periph = cam_extend_get(targperiphs, unit); 1070 if (periph == NULL) { 1071 bp->b_error = ENXIO; 1072 goto bad; 1073 } 1074 softc = (struct targ_softc *)periph->softc; 1075 1076 /* 1077 * Mask interrupts so that the device cannot be invalidated until 1078 * after we are in the queue. Otherwise, we might not properly 1079 * clean up one of the buffers. 1080 */ 1081 s = splbio(); 1082 1083 /* 1084 * If there is an exception pending, error out 1085 */ 1086 if (softc->state != TARG_STATE_NORMAL) { 1087 splx(s); 1088 if (softc->state == TARG_STATE_EXCEPTION 1089 && (softc->exceptions & TARG_EXCEPT_DEVICE_INVALID) == 0) 1090 bp->b_error = EBUSY; 1091 else 1092 bp->b_error = ENXIO; 1093 goto bad; 1094 } 1095 1096 /* 1097 * Place it in the queue of buffers available for either 1098 * SEND or RECEIVE commands. 1099 * 1100 */ 1101 bp->b_resid = bp->b_bcount; 1102 if ((bp->b_flags & B_READ) != 0) { 1103 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 1104 ("Queued a SEND buffer\n")); 1105 bufq_insert_tail(&softc->snd_buf_queue, bp); 1106 } else { 1107 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 1108 ("Queued a RECEIVE buffer\n")); 1109 bufq_insert_tail(&softc->rcv_buf_queue, bp); 1110 } 1111 1112 splx(s); 1113 1114 /* 1115 * Attempt to use the new buffer to service any pending 1116 * target commands. 1117 */ 1118 targrunqueue(periph, softc); 1119 1120 return; 1121 bad: 1122 bp->b_flags |= B_ERROR; 1123 1124 /* 1125 * Correctly set the buf to indicate a completed xfer 1126 */ 1127 bp->b_resid = bp->b_bcount; 1128 biodone(bp); 1129 } 1130 1131 static void 1132 targrunqueue(struct cam_periph *periph, struct targ_softc *softc) 1133 { 1134 struct ccb_queue *pending_queue; 1135 struct ccb_accept_tio *atio; 1136 struct buf_queue_head *bufq; 1137 struct buf *bp; 1138 struct targ_cmd_desc *desc; 1139 struct ccb_hdr *ccbh; 1140 int s; 1141 1142 s = splbio(); 1143 pending_queue = NULL; 1144 bufq = NULL; 1145 ccbh = NULL; 1146 /* Only run one request at a time to maintain data ordering. */ 1147 if (softc->state != TARG_STATE_NORMAL 1148 || TAILQ_FIRST(&softc->work_queue) != NULL 1149 || TAILQ_FIRST(&softc->pending_queue) != NULL) { 1150 splx(s); 1151 return; 1152 } 1153 1154 if (((bp = bufq_first(&softc->snd_buf_queue)) != NULL 1155 || (softc->flags & TARG_FLAG_SEND_EOF) != 0) 1156 && (ccbh = TAILQ_FIRST(&softc->snd_ccb_queue)) != NULL) { 1157 1158 if (bp == NULL) 1159 softc->flags &= ~TARG_FLAG_SEND_EOF; 1160 else { 1161 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 1162 ("De-Queued a SEND buffer %ld\n", 1163 bp->b_bcount)); 1164 } 1165 bufq = &softc->snd_buf_queue; 1166 pending_queue = &softc->snd_ccb_queue; 1167 } else if (((bp = bufq_first(&softc->rcv_buf_queue)) != NULL 1168 || (softc->flags & TARG_FLAG_RECEIVE_EOF) != 0) 1169 && (ccbh = TAILQ_FIRST(&softc->rcv_ccb_queue)) != NULL) { 1170 1171 if (bp == NULL) 1172 softc->flags &= ~TARG_FLAG_RECEIVE_EOF; 1173 else { 1174 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 1175 ("De-Queued a RECEIVE buffer %ld\n", 1176 bp->b_bcount)); 1177 } 1178 bufq = &softc->rcv_buf_queue; 1179 pending_queue = &softc->rcv_ccb_queue; 1180 } 1181 1182 if (pending_queue != NULL) { 1183 /* Process a request */ 1184 atio = (struct ccb_accept_tio *)ccbh; 1185 TAILQ_REMOVE(pending_queue, ccbh, periph_links.tqe); 1186 desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr; 1187 desc->bp = bp; 1188 if (bp == NULL) { 1189 /* EOF */ 1190 desc->data = NULL; 1191 desc->data_increment = 0; 1192 desc->data_resid = 0; 1193 atio->ccb_h.flags &= ~CAM_DIR_MASK; 1194 atio->ccb_h.flags |= CAM_DIR_NONE; 1195 } else { 1196 bufq_remove(bufq, bp); 1197 desc->data = &bp->b_data[bp->b_bcount - bp->b_resid]; 1198 desc->data_increment = 1199 MIN(desc->data_resid, bp->b_resid); 1200 } 1201 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 1202 ("Buffer command: data %x: datacnt %d\n", 1203 (intptr_t)desc->data, desc->data_increment)); 1204 TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h, 1205 periph_links.tqe); 1206 } 1207 if (TAILQ_FIRST(&softc->work_queue) != NULL) { 1208 splx(s); 1209 xpt_schedule(periph, /*XXX priority*/1); 1210 } else 1211 splx(s); 1212 } 1213 1214 static void 1215 targstart(struct cam_periph *periph, union ccb *start_ccb) 1216 { 1217 struct targ_softc *softc; 1218 struct ccb_hdr *ccbh; 1219 struct ccb_accept_tio *atio; 1220 struct targ_cmd_desc *desc; 1221 struct ccb_scsiio *csio; 1222 ccb_flags flags; 1223 int s; 1224 1225 softc = (struct targ_softc *)periph->softc; 1226 1227 s = splbio(); 1228 ccbh = TAILQ_FIRST(&softc->work_queue); 1229 if (periph->immediate_priority <= periph->pinfo.priority) { 1230 start_ccb->ccb_h.ccb_type = TARG_CCB_WAITING; 1231 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, 1232 periph_links.sle); 1233 periph->immediate_priority = CAM_PRIORITY_NONE; 1234 splx(s); 1235 wakeup(&periph->ccb_list); 1236 } else if (ccbh == NULL) { 1237 splx(s); 1238 xpt_release_ccb(start_ccb); 1239 } else { 1240 TAILQ_REMOVE(&softc->work_queue, ccbh, periph_links.tqe); 1241 TAILQ_INSERT_HEAD(&softc->pending_queue, ccbh, 1242 periph_links.tqe); 1243 splx(s); 1244 atio = (struct ccb_accept_tio*)ccbh; 1245 desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr; 1246 1247 /* Is this a tagged request? */ 1248 flags = atio->ccb_h.flags & (CAM_TAG_ACTION_VALID|CAM_DIR_MASK); 1249 1250 /* 1251 * If we are done with the transaction, tell the 1252 * controller to send status and perform a CMD_CMPLT. 1253 */ 1254 if (desc->data_resid == desc->data_increment) 1255 flags |= CAM_SEND_STATUS; 1256 1257 csio = &start_ccb->csio; 1258 cam_fill_ctio(csio, 1259 /*retries*/2, 1260 targdone, 1261 flags, 1262 /*tag_action*/MSG_SIMPLE_Q_TAG, 1263 atio->tag_id, 1264 atio->init_id, 1265 desc->status, 1266 /*data_ptr*/desc->data_increment == 0 1267 ? NULL : desc->data, 1268 /*dxfer_len*/desc->data_increment, 1269 /*timeout*/desc->timeout); 1270 1271 start_ccb->ccb_h.ccb_type = TARG_CCB_WORKQ; 1272 start_ccb->ccb_h.ccb_atio = atio; 1273 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 1274 ("Sending a CTIO\n")); 1275 xpt_action(start_ccb); 1276 s = splbio(); 1277 ccbh = TAILQ_FIRST(&softc->work_queue); 1278 splx(s); 1279 } 1280 if (ccbh != NULL) 1281 targrunqueue(periph, softc); 1282 } 1283 1284 static void 1285 targdone(struct cam_periph *periph, union ccb *done_ccb) 1286 { 1287 struct targ_softc *softc; 1288 1289 softc = (struct targ_softc *)periph->softc; 1290 1291 if (done_ccb->ccb_h.ccb_type == TARG_CCB_WAITING) { 1292 /* Caller will release the CCB */ 1293 wakeup(&done_ccb->ccb_h.cbfcnp); 1294 return; 1295 } 1296 1297 switch (done_ccb->ccb_h.func_code) { 1298 case XPT_ACCEPT_TARGET_IO: 1299 { 1300 struct ccb_accept_tio *atio; 1301 struct targ_cmd_desc *descr; 1302 struct initiator_state *istate; 1303 u_int8_t *cdb; 1304 1305 atio = &done_ccb->atio; 1306 descr = (struct targ_cmd_desc*)atio->ccb_h.ccb_descr; 1307 istate = &softc->istate[atio->init_id]; 1308 cdb = atio->cdb_io.cdb_bytes; 1309 if (softc->state == TARG_STATE_TEARDOWN 1310 || atio->ccb_h.status == CAM_REQ_ABORTED) { 1311 freedescr(descr); 1312 free(done_ccb, M_DEVBUF); 1313 return; 1314 } 1315 1316 if (istate->pending_ca == 0 1317 && istate->pending_ua != 0 1318 && cdb[0] != INQUIRY) { 1319 /* Pending UA, tell initiator */ 1320 /* Direction is always relative to the initator */ 1321 istate->pending_ca = CA_UNIT_ATTN; 1322 atio->ccb_h.flags &= ~CAM_DIR_MASK; 1323 atio->ccb_h.flags |= CAM_DIR_NONE; 1324 descr->data_resid = 0; 1325 descr->data_increment = 0; 1326 descr->timeout = 5 * 1000; 1327 descr->status = SCSI_STATUS_CHECK_COND; 1328 } else { 1329 /* 1330 * Save the current CA and UA status so 1331 * they can be used by this command. 1332 */ 1333 ua_types pending_ua; 1334 ca_types pending_ca; 1335 1336 pending_ua = istate->pending_ua; 1337 pending_ca = istate->pending_ca; 1338 1339 /* 1340 * As per the SCSI2 spec, any command that occurs 1341 * after a CA is reported, clears the CA. If the 1342 * command is not an inquiry, we are also supposed 1343 * to clear the UA condition, if any, that caused 1344 * the CA to occur assuming the UA is not a 1345 * persistant state. 1346 */ 1347 istate->pending_ca = CA_NONE; 1348 if ((pending_ca 1349 & (CA_CMD_SENSE|CA_UNIT_ATTN)) == CA_UNIT_ATTN 1350 && cdb[0] != INQUIRY) 1351 istate->pending_ua = UA_NONE; 1352 1353 /* 1354 * Determine the type of incoming command and 1355 * setup our buffer for a response. 1356 */ 1357 switch (cdb[0]) { 1358 case INQUIRY: 1359 { 1360 struct scsi_inquiry *inq; 1361 struct scsi_sense_data *sense; 1362 1363 inq = (struct scsi_inquiry *)cdb; 1364 sense = &istate->sense_data; 1365 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 1366 ("Saw an inquiry!\n")); 1367 /* 1368 * Validate the command. We don't 1369 * support any VPD pages, so complain 1370 * if EVPD is set. 1371 */ 1372 if ((inq->byte2 & SI_EVPD) != 0 1373 || inq->page_code != 0) { 1374 istate->pending_ca = CA_CMD_SENSE; 1375 atio->ccb_h.flags &= ~CAM_DIR_MASK; 1376 atio->ccb_h.flags |= CAM_DIR_NONE; 1377 descr->data_resid = 0; 1378 descr->data_increment = 0; 1379 descr->status = SCSI_STATUS_CHECK_COND; 1380 fill_sense(sense, 1381 SSD_CURRENT_ERROR, 1382 SSD_KEY_ILLEGAL_REQUEST, 1383 /*asc*/0x24, /*ascq*/0x00); 1384 sense->extra_len = 1385 offsetof(struct scsi_sense_data, 1386 extra_bytes) 1387 - offsetof(struct scsi_sense_data, 1388 extra_len); 1389 } 1390 1391 if ((inq->byte2 & SI_EVPD) != 0) { 1392 sense->sense_key_spec[0] = 1393 SSD_SCS_VALID|SSD_FIELDPTR_CMD 1394 |SSD_BITPTR_VALID| /*bit value*/1; 1395 sense->sense_key_spec[1] = 0; 1396 sense->sense_key_spec[2] = 1397 offsetof(struct scsi_inquiry, 1398 byte2); 1399 break; 1400 } else if (inq->page_code != 0) { 1401 sense->sense_key_spec[0] = 1402 SSD_SCS_VALID|SSD_FIELDPTR_CMD; 1403 sense->sense_key_spec[1] = 0; 1404 sense->sense_key_spec[2] = 1405 offsetof(struct scsi_inquiry, 1406 page_code); 1407 break; 1408 } 1409 /* 1410 * Direction is always relative 1411 * to the initator. 1412 */ 1413 atio->ccb_h.flags &= ~CAM_DIR_MASK; 1414 atio->ccb_h.flags |= CAM_DIR_IN; 1415 descr->data = softc->inq_data; 1416 descr->data_resid = MIN(softc->inq_data_len, 1417 inq->length); 1418 descr->data_increment = descr->data_resid; 1419 descr->timeout = 5 * 1000; 1420 descr->status = SCSI_STATUS_OK; 1421 break; 1422 } 1423 case TEST_UNIT_READY: 1424 atio->ccb_h.flags &= ~CAM_DIR_MASK; 1425 atio->ccb_h.flags |= CAM_DIR_NONE; 1426 descr->data_resid = 0; 1427 descr->data_increment = 0; 1428 descr->timeout = 5 * 1000; 1429 descr->status = SCSI_STATUS_OK; 1430 break; 1431 case REQUEST_SENSE: 1432 { 1433 struct scsi_request_sense *rsense; 1434 struct scsi_sense_data *sense; 1435 1436 rsense = (struct scsi_request_sense *)cdb; 1437 sense = &istate->sense_data; 1438 if (pending_ca == 0) { 1439 fill_sense(sense, SSD_CURRENT_ERROR, 1440 SSD_KEY_NO_SENSE, 0x00, 1441 0x00); 1442 CAM_DEBUG(periph->path, 1443 CAM_DEBUG_SUBTRACE, 1444 ("No pending CA!\n")); 1445 } else if (pending_ca == CA_UNIT_ATTN) { 1446 u_int ascq; 1447 1448 if (pending_ua == UA_POWER_ON) 1449 ascq = 0x1; 1450 else 1451 ascq = 0x2; 1452 fill_sense(sense, SSD_CURRENT_ERROR, 1453 SSD_KEY_UNIT_ATTENTION, 1454 0x29, ascq); 1455 CAM_DEBUG(periph->path, 1456 CAM_DEBUG_SUBTRACE, 1457 ("Pending UA!\n")); 1458 } 1459 /* 1460 * Direction is always relative 1461 * to the initator. 1462 */ 1463 atio->ccb_h.flags &= ~CAM_DIR_MASK; 1464 atio->ccb_h.flags |= CAM_DIR_IN; 1465 descr->data = sense; 1466 descr->data_resid = 1467 offsetof(struct scsi_sense_data, 1468 extra_len) 1469 + sense->extra_len; 1470 descr->data_resid = MIN(descr->data_resid, 1471 rsense->length); 1472 descr->data_increment = descr->data_resid; 1473 descr->timeout = 5 * 1000; 1474 descr->status = SCSI_STATUS_OK; 1475 break; 1476 } 1477 case RECEIVE: 1478 case SEND: 1479 { 1480 struct scsi_send_receive *sr; 1481 1482 sr = (struct scsi_send_receive *)cdb; 1483 1484 /* 1485 * Direction is always relative 1486 * to the initator. 1487 */ 1488 atio->ccb_h.flags &= ~CAM_DIR_MASK; 1489 descr->data_resid = scsi_3btoul(sr->xfer_len); 1490 descr->timeout = 5 * 1000; 1491 descr->status = SCSI_STATUS_OK; 1492 if (cdb[0] == SEND) { 1493 atio->ccb_h.flags |= CAM_DIR_OUT; 1494 CAM_DEBUG(periph->path, 1495 CAM_DEBUG_SUBTRACE, 1496 ("Saw a SEND!\n")); 1497 atio->ccb_h.flags |= CAM_DIR_OUT; 1498 TAILQ_INSERT_TAIL(&softc->snd_ccb_queue, 1499 &atio->ccb_h, 1500 periph_links.tqe); 1501 selwakeup(&softc->snd_select); 1502 } else { 1503 atio->ccb_h.flags |= CAM_DIR_IN; 1504 CAM_DEBUG(periph->path, 1505 CAM_DEBUG_SUBTRACE, 1506 ("Saw a RECEIVE!\n")); 1507 TAILQ_INSERT_TAIL(&softc->rcv_ccb_queue, 1508 &atio->ccb_h, 1509 periph_links.tqe); 1510 selwakeup(&softc->rcv_select); 1511 } 1512 /* 1513 * Attempt to satisfy this request with 1514 * a user buffer. 1515 */ 1516 targrunqueue(periph, softc); 1517 return; 1518 } 1519 default: 1520 /* 1521 * Queue for consumption by our userland 1522 * counterpart and transition to the exception 1523 * state. 1524 */ 1525 TAILQ_INSERT_TAIL(&softc->unknown_atio_queue, 1526 &atio->ccb_h, 1527 periph_links.tqe); 1528 softc->exceptions |= TARG_EXCEPT_UNKNOWN_ATIO; 1529 targfireexception(periph, softc); 1530 return; 1531 } 1532 } 1533 1534 /* Queue us up to receive a Continue Target I/O ccb. */ 1535 TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h, 1536 periph_links.tqe); 1537 xpt_schedule(periph, /*priority*/1); 1538 break; 1539 } 1540 case XPT_CONT_TARGET_IO: 1541 { 1542 struct ccb_accept_tio *atio; 1543 struct targ_cmd_desc *desc; 1544 struct buf *bp; 1545 1546 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 1547 ("Received completed CTIO\n")); 1548 atio = (struct ccb_accept_tio*)done_ccb->ccb_h.ccb_atio; 1549 desc = (struct targ_cmd_desc *)atio->ccb_h.ccb_descr; 1550 1551 TAILQ_REMOVE(&softc->pending_queue, &atio->ccb_h, 1552 periph_links.tqe); 1553 1554 /* XXX Check for errors */ 1555 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1556 1557 } 1558 desc->data_resid -= desc->data_increment; 1559 if ((bp = desc->bp) != NULL) { 1560 1561 bp->b_resid -= desc->data_increment; 1562 bp->b_error = 0; 1563 1564 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 1565 ("Buffer I/O Completed - Resid %ld:%d\n", 1566 bp->b_resid, desc->data_resid)); 1567 /* 1568 * Send the buffer back to the client if 1569 * either the command has completed or all 1570 * buffer space has been consumed. 1571 */ 1572 if (desc->data_resid == 0 1573 || bp->b_resid == 0) { 1574 if (bp->b_resid != 0) 1575 /* Short transfer */ 1576 bp->b_flags |= B_ERROR; 1577 1578 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 1579 ("Completing a buffer\n")); 1580 biodone(bp); 1581 desc->bp = NULL; 1582 } 1583 } 1584 1585 xpt_release_ccb(done_ccb); 1586 if (softc->state != TARG_STATE_TEARDOWN) { 1587 1588 if (desc->data_resid == 0) { 1589 /* 1590 * Send the original accept TIO back to the 1591 * controller to handle more work. 1592 */ 1593 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 1594 ("Returning ATIO to target\n")); 1595 xpt_action((union ccb *)atio); 1596 break; 1597 } 1598 1599 /* Queue us up for another buffer */ 1600 if (atio->cdb_io.cdb_bytes[0] == SEND) { 1601 if (desc->bp != NULL) 1602 TAILQ_INSERT_HEAD( 1603 &softc->snd_buf_queue.queue, 1604 bp, b_act); 1605 TAILQ_INSERT_HEAD(&softc->snd_ccb_queue, 1606 &atio->ccb_h, 1607 periph_links.tqe); 1608 } else { 1609 if (desc->bp != NULL) 1610 TAILQ_INSERT_HEAD( 1611 &softc->rcv_buf_queue.queue, 1612 bp, b_act); 1613 TAILQ_INSERT_HEAD(&softc->rcv_ccb_queue, 1614 &atio->ccb_h, 1615 periph_links.tqe); 1616 } 1617 desc->bp = NULL; 1618 targrunqueue(periph, softc); 1619 } else { 1620 if (desc->bp != NULL) { 1621 bp->b_flags |= B_ERROR; 1622 bp->b_error = ENXIO; 1623 biodone(bp); 1624 } 1625 freedescr(desc); 1626 free(atio, M_DEVBUF); 1627 } 1628 break; 1629 } 1630 case XPT_IMMED_NOTIFY: 1631 { 1632 if (softc->state == TARG_STATE_TEARDOWN 1633 || done_ccb->ccb_h.status == CAM_REQ_ABORTED) 1634 free(done_ccb, M_DEVBUF); 1635 break; 1636 } 1637 default: 1638 panic("targdone: Impossible xpt opcode %x encountered.", 1639 done_ccb->ccb_h.func_code); 1640 /* NOTREACHED */ 1641 break; 1642 } 1643 } 1644 1645 /* 1646 * Transition to the exception state and notify our symbiotic 1647 * userland process of the change. 1648 */ 1649 static void 1650 targfireexception(struct cam_periph *periph, struct targ_softc *softc) 1651 { 1652 /* 1653 * return all pending buffers with short read/write status so our 1654 * process unblocks, and do a selwakeup on any process queued 1655 * waiting for reads or writes. When the selwakeup is performed, 1656 * the waking process will wakeup, call our poll routine again, 1657 * and pick up the exception. 1658 */ 1659 struct buf *bp; 1660 1661 if (softc->state != TARG_STATE_NORMAL) 1662 /* Already either tearing down or in exception state */ 1663 return; 1664 1665 softc->state = TARG_STATE_EXCEPTION; 1666 1667 while ((bp = bufq_first(&softc->snd_buf_queue)) != NULL) { 1668 bufq_remove(&softc->snd_buf_queue, bp); 1669 bp->b_flags |= B_ERROR; 1670 biodone(bp); 1671 } 1672 1673 while ((bp = bufq_first(&softc->rcv_buf_queue)) != NULL) { 1674 bufq_remove(&softc->snd_buf_queue, bp); 1675 bp->b_flags |= B_ERROR; 1676 biodone(bp); 1677 } 1678 1679 selwakeup(&softc->snd_select); 1680 selwakeup(&softc->rcv_select); 1681 } 1682 1683 static int 1684 targerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) 1685 { 1686 return 0; 1687 } 1688 1689 static struct targ_cmd_desc* 1690 allocdescr() 1691 { 1692 struct targ_cmd_desc* descr; 1693 1694 /* Allocate the targ_descr structure */ 1695 descr = (struct targ_cmd_desc *)malloc(sizeof(*descr), 1696 M_DEVBUF, M_NOWAIT); 1697 if (descr == NULL) 1698 return (NULL); 1699 1700 bzero(descr, sizeof(*descr)); 1701 1702 /* Allocate buffer backing store */ 1703 descr->backing_store = malloc(MAX_BUF_SIZE, M_DEVBUF, M_NOWAIT); 1704 if (descr->backing_store == NULL) { 1705 free(descr, M_DEVBUF); 1706 return (NULL); 1707 } 1708 descr->max_size = MAX_BUF_SIZE; 1709 return (descr); 1710 } 1711 1712 static void 1713 freedescr(struct targ_cmd_desc *descr) 1714 { 1715 free(descr->backing_store, M_DEVBUF); 1716 free(descr, M_DEVBUF); 1717 } 1718 1719 static void 1720 fill_sense(struct scsi_sense_data *sense, u_int error_code, u_int sense_key, 1721 u_int asc, u_int ascq) 1722 { 1723 bzero(sense, sizeof(*sense)); 1724 sense->error_code = error_code; 1725 sense->flags = sense_key; 1726 sense->add_sense_code = asc; 1727 sense->add_sense_code_qual = ascq; 1728 1729 sense->extra_len = offsetof(struct scsi_sense_data, fru) 1730 - offsetof(struct scsi_sense_data, extra_len); 1731 } 1732