1 /*- 2 * Copyright (C) 2003 3 * Hidetoshi Shimokawa. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * 16 * This product includes software developed by Hidetoshi Shimokawa. 17 * 18 * 4. Neither the name of the author nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $FreeBSD$ 35 */ 36 37 #include <sys/param.h> 38 #include <sys/kernel.h> 39 #include <sys/systm.h> 40 #include <sys/sysctl.h> 41 #include <sys/types.h> 42 #include <sys/conf.h> 43 #include <sys/malloc.h> 44 #include <sys/endian.h> 45 #if __FreeBSD_version < 500000 46 #include <sys/devicestat.h> 47 #endif 48 49 #include <sys/bus.h> 50 #include <machine/bus.h> 51 52 #include <dev/firewire/firewire.h> 53 #include <dev/firewire/firewirereg.h> 54 #include <dev/firewire/iec13213.h> 55 #include <dev/firewire/sbp.h> 56 #include <dev/firewire/fwmem.h> 57 58 #include <cam/cam.h> 59 #include <cam/cam_ccb.h> 60 #include <cam/cam_sim.h> 61 #include <cam/cam_xpt_sim.h> 62 #include <cam/cam_debug.h> 63 #include <cam/cam_periph.h> 64 #include <cam/scsi/scsi_all.h> 65 #include <cam/scsi/scsi_message.h> 66 67 #define SBP_TARG_RECV_LEN 8 68 #define MAX_INITIATORS 8 69 #define MAX_LUN 63 70 #define MAX_LOGINS 63 71 #define MAX_NODES 63 72 /* 73 * management/command block agent registers 74 * 75 * BASE 0xffff f001 0000 management port 76 * BASE 0xffff f001 0020 command port for login id 0 77 * BASE 0xffff f001 0040 command port for login id 1 78 * 79 */ 80 #define SBP_TARG_MGM 0x10000 /* offset from 0xffff f000 000 */ 81 #define SBP_TARG_BIND_HI 0xffff 82 #define SBP_TARG_BIND_LO(l) (0xf0000000 + SBP_TARG_MGM + 0x20 * ((l) + 1)) 83 #define SBP_TARG_BIND_START (((u_int64_t)SBP_TARG_BIND_HI << 32) | \ 84 SBP_TARG_BIND_LO(-1)) 85 #define SBP_TARG_BIND_END (((u_int64_t)SBP_TARG_BIND_HI << 32) | \ 86 SBP_TARG_BIND_LO(MAX_LOGINS)) 87 #define SBP_TARG_LOGIN_ID(lo) (((lo) - SBP_TARG_BIND_LO(0))/0x20) 88 89 #define FETCH_MGM 0 90 #define FETCH_CMD 1 91 #define FETCH_POINTER 2 92 93 #define F_LINK_ACTIVE (1 << 0) 94 #define F_ATIO_STARVED (1 << 1) 95 #define F_LOGIN (1 << 2) 96 #define F_HOLD (1 << 3) 97 #define F_FREEZED (1 << 4) 98 99 static MALLOC_DEFINE(M_SBP_TARG, "sbp_targ", "SBP-II/FireWire target mode"); 100 101 static int debug = 0; 102 103 SYSCTL_INT(_debug, OID_AUTO, sbp_targ_debug, CTLFLAG_RW, &debug, 0, 104 "SBP target mode debug flag"); 105 106 struct sbp_targ_login { 107 struct sbp_targ_lstate *lstate; 108 struct fw_device *fwdev; 109 struct sbp_login_res loginres; 110 uint16_t fifo_hi; 111 uint16_t last_hi; 112 uint32_t fifo_lo; 113 uint32_t last_lo; 114 STAILQ_HEAD(, orb_info) orbs; 115 STAILQ_ENTRY(sbp_targ_login) link; 116 uint16_t hold_sec; 117 uint16_t id; 118 uint8_t flags; 119 uint8_t spd; 120 struct callout hold_callout; 121 }; 122 123 struct sbp_targ_lstate { 124 uint16_t lun; 125 struct sbp_targ_softc *sc; 126 struct cam_path *path; 127 struct ccb_hdr_slist accept_tios; 128 struct ccb_hdr_slist immed_notifies; 129 struct crom_chunk model; 130 uint32_t flags; 131 STAILQ_HEAD(, sbp_targ_login) logins; 132 }; 133 134 struct sbp_targ_softc { 135 struct firewire_dev_comm fd; 136 struct cam_sim *sim; 137 struct cam_path *path; 138 struct fw_bind fwb; 139 int ndevs; 140 int flags; 141 struct crom_chunk unit; 142 struct sbp_targ_lstate *lstate[MAX_LUN]; 143 struct sbp_targ_lstate *black_hole; 144 struct sbp_targ_login *logins[MAX_LOGINS]; 145 struct mtx mtx; 146 }; 147 #define SBP_LOCK(sc) mtx_lock(&(sc)->mtx) 148 #define SBP_UNLOCK(sc) mtx_unlock(&(sc)->mtx) 149 150 struct corb4 { 151 #if BYTE_ORDER == BIG_ENDIAN 152 uint32_t n:1, 153 rq_fmt:2, 154 :1, 155 dir:1, 156 spd:3, 157 max_payload:4, 158 page_table_present:1, 159 page_size:3, 160 data_size:16; 161 #else 162 uint32_t data_size:16, 163 page_size:3, 164 page_table_present:1, 165 max_payload:4, 166 spd:3, 167 dir:1, 168 :1, 169 rq_fmt:2, 170 n:1; 171 #endif 172 }; 173 174 struct morb4 { 175 #if BYTE_ORDER == BIG_ENDIAN 176 uint32_t n:1, 177 rq_fmt:2, 178 :9, 179 fun:4, 180 id:16; 181 #else 182 uint32_t id:16, 183 fun:4, 184 :9, 185 rq_fmt:2, 186 n:1; 187 #endif 188 }; 189 190 191 /* 192 * Urestricted page table format 193 * states that the segment length 194 * and high base addr are in the first 195 * 32 bits and the base low is in 196 * the second 197 */ 198 struct unrestricted_page_table_fmt { 199 uint16_t segment_len; 200 uint16_t segment_base_high; 201 uint32_t segment_base_low; 202 }; 203 204 205 struct orb_info { 206 struct sbp_targ_softc *sc; 207 struct fw_device *fwdev; 208 struct sbp_targ_login *login; 209 union ccb *ccb; 210 struct ccb_accept_tio *atio; 211 uint8_t state; 212 #define ORBI_STATUS_NONE 0 213 #define ORBI_STATUS_FETCH 1 214 #define ORBI_STATUS_ATIO 2 215 #define ORBI_STATUS_CTIO 3 216 #define ORBI_STATUS_STATUS 4 217 #define ORBI_STATUS_POINTER 5 218 #define ORBI_STATUS_ABORTED 7 219 uint8_t refcount; 220 uint16_t orb_hi; 221 uint32_t orb_lo; 222 uint32_t data_hi; 223 uint32_t data_lo; 224 struct corb4 orb4; 225 STAILQ_ENTRY(orb_info) link; 226 uint32_t orb[8]; 227 struct unrestricted_page_table_fmt *page_table; 228 struct unrestricted_page_table_fmt *cur_pte; 229 struct unrestricted_page_table_fmt *last_pte; 230 uint32_t last_block_read; 231 struct sbp_status status; 232 }; 233 234 static char *orb_fun_name[] = { 235 ORB_FUN_NAMES 236 }; 237 238 static void sbp_targ_recv(struct fw_xfer *); 239 static void sbp_targ_fetch_orb(struct sbp_targ_softc *, struct fw_device *, 240 uint16_t, uint32_t, struct sbp_targ_login *, int); 241 static void sbp_targ_xfer_pt(struct orb_info *); 242 static void sbp_targ_abort(struct sbp_targ_softc *, struct orb_info *); 243 244 static void 245 sbp_targ_identify(driver_t *driver, device_t parent) 246 { 247 BUS_ADD_CHILD(parent, 0, "sbp_targ", device_get_unit(parent)); 248 } 249 250 static int 251 sbp_targ_probe(device_t dev) 252 { 253 device_t pa; 254 255 pa = device_get_parent(dev); 256 if(device_get_unit(dev) != device_get_unit(pa)){ 257 return(ENXIO); 258 } 259 260 device_set_desc(dev, "SBP-2/SCSI over FireWire target mode"); 261 return (0); 262 } 263 264 static void 265 sbp_targ_dealloc_login(struct sbp_targ_login *login) 266 { 267 struct orb_info *orbi, *next; 268 269 if (login == NULL) { 270 printf("%s: login = NULL\n", __func__); 271 return; 272 } 273 for (orbi = STAILQ_FIRST(&login->orbs); orbi != NULL; orbi = next) { 274 next = STAILQ_NEXT(orbi, link); 275 if (debug) 276 printf("%s: free orbi %p\n", __func__, orbi); 277 free(orbi, M_SBP_TARG); 278 orbi = NULL; 279 } 280 callout_stop(&login->hold_callout); 281 282 STAILQ_REMOVE(&login->lstate->logins, login, sbp_targ_login, link); 283 login->lstate->sc->logins[login->id] = NULL; 284 if (debug) 285 printf("%s: free login %p\n", __func__, login); 286 free((void *)login, M_SBP_TARG); 287 login = NULL; 288 } 289 290 static void 291 sbp_targ_hold_expire(void *arg) 292 { 293 struct sbp_targ_login *login; 294 295 login = (struct sbp_targ_login *)arg; 296 297 if (login->flags & F_HOLD) { 298 printf("%s: login_id=%d expired\n", __func__, login->id); 299 sbp_targ_dealloc_login(login); 300 } else { 301 printf("%s: login_id=%d not hold\n", __func__, login->id); 302 } 303 } 304 305 static void 306 sbp_targ_post_busreset(void *arg) 307 { 308 struct sbp_targ_softc *sc; 309 struct crom_src *src; 310 struct crom_chunk *root; 311 struct crom_chunk *unit; 312 struct sbp_targ_lstate *lstate; 313 struct sbp_targ_login *login; 314 int i; 315 316 sc = (struct sbp_targ_softc *)arg; 317 src = sc->fd.fc->crom_src; 318 root = sc->fd.fc->crom_root; 319 320 unit = &sc->unit; 321 322 if ((sc->flags & F_FREEZED) == 0) { 323 SBP_LOCK(sc); 324 sc->flags |= F_FREEZED; 325 xpt_freeze_simq(sc->sim, /*count*/1); 326 SBP_UNLOCK(sc); 327 } else { 328 printf("%s: already freezed\n", __func__); 329 } 330 331 bzero(unit, sizeof(struct crom_chunk)); 332 333 crom_add_chunk(src, root, unit, CROM_UDIR); 334 crom_add_entry(unit, CSRKEY_SPEC, CSRVAL_ANSIT10); 335 crom_add_entry(unit, CSRKEY_VER, CSRVAL_T10SBP2); 336 crom_add_entry(unit, CSRKEY_COM_SPEC, CSRVAL_ANSIT10); 337 crom_add_entry(unit, CSRKEY_COM_SET, CSRVAL_SCSI); 338 339 crom_add_entry(unit, CROM_MGM, SBP_TARG_MGM >> 2); 340 crom_add_entry(unit, CSRKEY_UNIT_CH, (10<<8) | 8); 341 342 for (i = 0; i < MAX_LUN; i ++) { 343 lstate = sc->lstate[i]; 344 if (lstate == NULL) 345 continue; 346 crom_add_entry(unit, CSRKEY_FIRM_VER, 1); 347 crom_add_entry(unit, CROM_LUN, i); 348 crom_add_entry(unit, CSRKEY_MODEL, 1); 349 crom_add_simple_text(src, unit, &lstate->model, "TargetMode"); 350 } 351 352 /* Process for reconnection hold time */ 353 for (i = 0; i < MAX_LOGINS; i ++) { 354 login = sc->logins[i]; 355 if (login == NULL) 356 continue; 357 sbp_targ_abort(sc, STAILQ_FIRST(&login->orbs)); 358 if (login->flags & F_LOGIN) { 359 login->flags |= F_HOLD; 360 callout_reset(&login->hold_callout, 361 hz * login->hold_sec, 362 sbp_targ_hold_expire, (void *)login); 363 } 364 } 365 } 366 367 static void 368 sbp_targ_post_explore(void *arg) 369 { 370 struct sbp_targ_softc *sc; 371 372 sc = (struct sbp_targ_softc *)arg; 373 SBP_LOCK(sc); 374 sc->flags &= ~F_FREEZED; 375 xpt_release_simq(sc->sim, /*run queue*/TRUE); 376 SBP_UNLOCK(sc); 377 return; 378 } 379 380 static cam_status 381 sbp_targ_find_devs(struct sbp_targ_softc *sc, union ccb *ccb, 382 struct sbp_targ_lstate **lstate, int notfound_failure) 383 { 384 u_int lun; 385 386 /* XXX 0 is the only vaild target_id */ 387 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD && 388 ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { 389 *lstate = sc->black_hole; 390 if (debug) 391 printf("setting black hole for this target id(%d)\n", ccb->ccb_h.target_id); 392 return (CAM_REQ_CMP); 393 } 394 395 lun = ccb->ccb_h.target_lun; 396 if (lun >= MAX_LUN) 397 return (CAM_LUN_INVALID); 398 399 *lstate = sc->lstate[lun]; 400 401 if (notfound_failure != 0 && *lstate == NULL) { 402 if (debug) 403 printf("%s: lstate for lun is invalid, target(%d), lun(%d)\n", 404 __func__, ccb->ccb_h.target_id, lun); 405 return (CAM_PATH_INVALID); 406 } else 407 if (debug) 408 printf("%s: setting lstate for tgt(%d) lun(%d)\n", 409 __func__,ccb->ccb_h.target_id, lun); 410 411 return (CAM_REQ_CMP); 412 } 413 414 static void 415 sbp_targ_en_lun(struct sbp_targ_softc *sc, union ccb *ccb) 416 { 417 struct ccb_en_lun *cel = &ccb->cel; 418 struct sbp_targ_lstate *lstate; 419 cam_status status; 420 421 status = sbp_targ_find_devs(sc, ccb, &lstate, 0); 422 if (status != CAM_REQ_CMP) { 423 ccb->ccb_h.status = status; 424 return; 425 } 426 427 if (cel->enable != 0) { 428 if (lstate != NULL) { 429 xpt_print_path(ccb->ccb_h.path); 430 printf("Lun already enabled\n"); 431 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 432 return; 433 } 434 if (cel->grp6_len != 0 || cel->grp7_len != 0) { 435 ccb->ccb_h.status = CAM_REQ_INVALID; 436 printf("Non-zero Group Codes\n"); 437 return; 438 } 439 lstate = (struct sbp_targ_lstate *) 440 malloc(sizeof(*lstate), M_SBP_TARG, M_NOWAIT | M_ZERO); 441 if (lstate == NULL) { 442 xpt_print_path(ccb->ccb_h.path); 443 printf("Couldn't allocate lstate\n"); 444 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 445 return; 446 } else { 447 if (debug) 448 printf("%s: malloc'd lstate %p\n",__func__, lstate); 449 } 450 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD) { 451 sc->black_hole = lstate; 452 if (debug) 453 printf("Blackhole set due to target id == %d\n", 454 ccb->ccb_h.target_id); 455 } else 456 sc->lstate[ccb->ccb_h.target_lun] = lstate; 457 458 memset(lstate, 0, sizeof(*lstate)); 459 lstate->sc = sc; 460 status = xpt_create_path(&lstate->path, /*periph*/NULL, 461 xpt_path_path_id(ccb->ccb_h.path), 462 xpt_path_target_id(ccb->ccb_h.path), 463 xpt_path_lun_id(ccb->ccb_h.path)); 464 if (status != CAM_REQ_CMP) { 465 free(lstate, M_SBP_TARG); 466 lstate = NULL; 467 xpt_print_path(ccb->ccb_h.path); 468 printf("Couldn't allocate path\n"); 469 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 470 return; 471 } 472 SLIST_INIT(&lstate->accept_tios); 473 SLIST_INIT(&lstate->immed_notifies); 474 STAILQ_INIT(&lstate->logins); 475 476 ccb->ccb_h.status = CAM_REQ_CMP; 477 xpt_print_path(ccb->ccb_h.path); 478 printf("Lun now enabled for target mode\n"); 479 /* bus reset */ 480 sc->fd.fc->ibr(sc->fd.fc); 481 } else { 482 struct sbp_targ_login *login, *next; 483 484 if (lstate == NULL) { 485 ccb->ccb_h.status = CAM_LUN_INVALID; 486 printf("Invalid lstate for this target\n"); 487 return; 488 } 489 ccb->ccb_h.status = CAM_REQ_CMP; 490 491 if (SLIST_FIRST(&lstate->accept_tios) != NULL) { 492 printf("ATIOs pending\n"); 493 ccb->ccb_h.status = CAM_REQ_INVALID; 494 } 495 496 if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { 497 printf("INOTs pending\n"); 498 ccb->ccb_h.status = CAM_REQ_INVALID; 499 } 500 501 if (ccb->ccb_h.status != CAM_REQ_CMP) { 502 printf("status != CAM_REQ_CMP\n"); 503 return; 504 } 505 506 xpt_print_path(ccb->ccb_h.path); 507 printf("Target mode disabled\n"); 508 xpt_free_path(lstate->path); 509 510 for (login = STAILQ_FIRST(&lstate->logins); login != NULL; 511 login = next) { 512 next = STAILQ_NEXT(login, link); 513 sbp_targ_dealloc_login(login); 514 } 515 516 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD) 517 sc->black_hole = NULL; 518 else 519 sc->lstate[ccb->ccb_h.target_lun] = NULL; 520 if (debug) 521 printf("%s: free lstate %p\n", __func__, lstate); 522 free(lstate, M_SBP_TARG); 523 lstate = NULL; 524 525 /* bus reset */ 526 sc->fd.fc->ibr(sc->fd.fc); 527 } 528 } 529 530 static void 531 sbp_targ_send_lstate_events(struct sbp_targ_softc *sc, 532 struct sbp_targ_lstate *lstate) 533 { 534 #if 0 535 struct ccb_hdr *ccbh; 536 struct ccb_immediate_notify *inot; 537 538 printf("%s: not implemented yet\n", __func__); 539 #endif 540 } 541 542 543 static __inline void 544 sbp_targ_remove_orb_info_locked(struct sbp_targ_login *login, struct orb_info *orbi) 545 { 546 STAILQ_REMOVE(&login->orbs, orbi, orb_info, link); 547 } 548 549 static __inline void 550 sbp_targ_remove_orb_info(struct sbp_targ_login *login, struct orb_info *orbi) 551 { 552 SBP_LOCK(orbi->sc); 553 STAILQ_REMOVE(&login->orbs, orbi, orb_info, link); 554 SBP_UNLOCK(orbi->sc); 555 } 556 557 /* 558 * tag_id/init_id encoding 559 * 560 * tag_id and init_id has only 32bit for each. 561 * scsi_target can handle very limited number(up to 15) of init_id. 562 * we have to encode 48bit orb and 64bit EUI64 into these 563 * variables. 564 * 565 * tag_id represents lower 32bit of ORB address. 566 * init_id represents login_id. 567 * 568 */ 569 570 static struct orb_info * 571 sbp_targ_get_orb_info(struct sbp_targ_lstate *lstate, 572 u_int tag_id, u_int init_id) 573 { 574 struct sbp_targ_login *login; 575 struct orb_info *orbi; 576 577 login = lstate->sc->logins[init_id]; 578 if (login == NULL) { 579 printf("%s: no such login\n", __func__); 580 return (NULL); 581 } 582 STAILQ_FOREACH(orbi, &login->orbs, link) 583 if (orbi->orb_lo == tag_id) 584 goto found; 585 printf("%s: orb not found tag_id=0x%08x init_id=%d\n", 586 __func__, tag_id, init_id); 587 return (NULL); 588 found: 589 return (orbi); 590 } 591 592 static void 593 sbp_targ_abort(struct sbp_targ_softc *sc, struct orb_info *orbi) 594 { 595 struct orb_info *norbi; 596 597 SBP_LOCK(sc); 598 for (; orbi != NULL; orbi = norbi) { 599 printf("%s: status=%d ccb=%p\n", __func__, orbi->state, orbi->ccb); 600 norbi = STAILQ_NEXT(orbi, link); 601 if (orbi->state != ORBI_STATUS_ABORTED) { 602 if (orbi->ccb != NULL) { 603 orbi->ccb->ccb_h.status = CAM_REQ_ABORTED; 604 xpt_done(orbi->ccb); 605 orbi->ccb = NULL; 606 } 607 if (orbi->state <= ORBI_STATUS_ATIO) { 608 sbp_targ_remove_orb_info_locked(orbi->login, orbi); 609 if (debug) 610 printf("%s: free orbi %p\n", __func__, orbi); 611 free(orbi, M_SBP_TARG); 612 orbi = NULL; 613 } else 614 orbi->state = ORBI_STATUS_ABORTED; 615 } 616 } 617 SBP_UNLOCK(sc); 618 } 619 620 static void 621 sbp_targ_free_orbi(struct fw_xfer *xfer) 622 { 623 struct orb_info *orbi; 624 625 if (xfer->resp != 0) { 626 /* XXX */ 627 printf("%s: xfer->resp = %d\n", __func__, xfer->resp); 628 } 629 orbi = (struct orb_info *)xfer->sc; 630 if ( orbi->page_table != NULL ) { 631 if (debug) 632 printf("%s: free orbi->page_table %p\n", __func__, orbi->page_table); 633 free(orbi->page_table, M_SBP_TARG); 634 orbi->page_table = NULL; 635 } 636 if (debug) 637 printf("%s: free orbi %p\n", __func__, orbi); 638 free(orbi, M_SBP_TARG); 639 orbi = NULL; 640 fw_xfer_free(xfer); 641 } 642 643 static void 644 sbp_targ_status_FIFO(struct orb_info *orbi, 645 uint32_t fifo_hi, uint32_t fifo_lo, int dequeue) 646 { 647 struct fw_xfer *xfer; 648 649 if (dequeue) 650 sbp_targ_remove_orb_info(orbi->login, orbi); 651 652 xfer = fwmem_write_block(orbi->fwdev, (void *)orbi, 653 /*spd*/FWSPD_S400, fifo_hi, fifo_lo, 654 sizeof(uint32_t) * (orbi->status.len + 1), (char *)&orbi->status, 655 sbp_targ_free_orbi); 656 657 if (xfer == NULL) { 658 /* XXX */ 659 printf("%s: xfer == NULL\n", __func__); 660 } 661 } 662 663 /* 664 * Generate the appropriate CAM status for the 665 * target. 666 */ 667 static void 668 sbp_targ_send_status(struct orb_info *orbi, union ccb *ccb) 669 { 670 struct sbp_status *sbp_status; 671 #if 0 672 struct orb_info *norbi; 673 #endif 674 675 sbp_status = &orbi->status; 676 677 orbi->state = ORBI_STATUS_STATUS; 678 679 sbp_status->resp = 0; /* XXX */ 680 sbp_status->status = 0; /* XXX */ 681 sbp_status->dead = 0; /* XXX */ 682 683 ccb->ccb_h.status= CAM_REQ_CMP; 684 685 switch (ccb->csio.scsi_status) { 686 case SCSI_STATUS_OK: 687 if (debug) 688 printf("%s: STATUS_OK\n", __func__); 689 sbp_status->len = 1; 690 break; 691 case SCSI_STATUS_CHECK_COND: 692 if (debug) 693 printf("%s: STATUS SCSI_STATUS_CHECK_COND\n", __func__); 694 goto process_scsi_status; 695 case SCSI_STATUS_BUSY: 696 if (debug) 697 printf("%s: STATUS SCSI_STATUS_BUSY\n", __func__); 698 goto process_scsi_status; 699 case SCSI_STATUS_CMD_TERMINATED: 700 process_scsi_status: 701 { 702 struct sbp_cmd_status *sbp_cmd_status; 703 struct scsi_sense_data *sense; 704 int error_code, sense_key, asc, ascq; 705 uint8_t stream_bits; 706 uint8_t sks[3]; 707 uint64_t info; 708 int64_t sinfo; 709 int sense_len; 710 711 sbp_cmd_status = (struct sbp_cmd_status *)&sbp_status->data[0]; 712 sbp_cmd_status->status = ccb->csio.scsi_status; 713 sense = &ccb->csio.sense_data; 714 715 #if 0 /* XXX What we should do? */ 716 #if 0 717 sbp_targ_abort(orbi->sc, STAILQ_NEXT(orbi, link)); 718 #else 719 norbi = STAILQ_NEXT(orbi, link); 720 while (norbi) { 721 printf("%s: status=%d\n", __func__, norbi->state); 722 if (norbi->ccb != NULL) { 723 norbi->ccb->ccb_h.status = CAM_REQ_ABORTED; 724 xpt_done(norbi->ccb); 725 norbi->ccb = NULL; 726 } 727 sbp_targ_remove_orb_info_locked(orbi->login, norbi); 728 norbi = STAILQ_NEXT(norbi, link); 729 free(norbi, M_SBP_TARG); 730 } 731 #endif 732 #endif 733 734 sense_len = ccb->csio.sense_len - ccb->csio.sense_resid; 735 scsi_extract_sense_len(sense, sense_len, &error_code, 736 &sense_key, &asc, &ascq, /*show_errors*/ 0); 737 738 switch (error_code) { 739 case SSD_CURRENT_ERROR: 740 case SSD_DESC_CURRENT_ERROR: 741 sbp_cmd_status->sfmt = SBP_SFMT_CURR; 742 break; 743 default: 744 sbp_cmd_status->sfmt = SBP_SFMT_DEFER; 745 break; 746 } 747 748 if (scsi_get_sense_info(sense, sense_len, SSD_DESC_INFO, &info, 749 &sinfo) == 0) { 750 uint32_t info_trunc; 751 sbp_cmd_status->valid = 1; 752 info_trunc = info; 753 754 sbp_cmd_status->info = htobe32(info_trunc); 755 } else { 756 sbp_cmd_status->valid = 0; 757 } 758 759 sbp_cmd_status->s_key = sense_key; 760 761 if (scsi_get_stream_info(sense, sense_len, NULL, 762 &stream_bits) == 0) { 763 sbp_cmd_status->mark = 764 (stream_bits & SSD_FILEMARK) ? 1 : 0; 765 sbp_cmd_status->eom = 766 (stream_bits & SSD_EOM) ? 1 : 0; 767 sbp_cmd_status->ill_len = 768 (stream_bits & SSD_ILI) ? 1 : 0; 769 } else { 770 sbp_cmd_status->mark = 0; 771 sbp_cmd_status->eom = 0; 772 sbp_cmd_status->ill_len = 0; 773 } 774 775 776 /* add_sense_code(_qual), info, cmd_spec_info */ 777 sbp_status->len = 4; 778 779 if (scsi_get_sense_info(sense, sense_len, SSD_DESC_COMMAND, 780 &info, &sinfo) == 0) { 781 uint32_t cmdspec_trunc; 782 783 cmdspec_trunc = info; 784 785 sbp_cmd_status->cdb = htobe32(cmdspec_trunc); 786 } 787 788 sbp_cmd_status->s_code = asc; 789 sbp_cmd_status->s_qlfr = ascq; 790 791 if (scsi_get_sense_info(sense, sense_len, SSD_DESC_FRU, &info, 792 &sinfo) == 0) { 793 sbp_cmd_status->fru = (uint8_t)info; 794 sbp_status->len = 5; 795 } else { 796 sbp_cmd_status->fru = 0; 797 } 798 799 if (scsi_get_sks(sense, sense_len, sks) == 0) { 800 bcopy(sks, &sbp_cmd_status->s_keydep[0], sizeof(sks)); 801 sbp_status->len = 5; 802 ccb->ccb_h.status |= CAM_SENT_SENSE; 803 } 804 805 break; 806 } 807 default: 808 printf("%s: unknown scsi status 0x%x\n", __func__, 809 sbp_status->status); 810 } 811 812 813 sbp_targ_status_FIFO(orbi, 814 orbi->login->fifo_hi, orbi->login->fifo_lo, /*dequeue*/1); 815 } 816 817 /* 818 * Invoked as a callback handler from fwmem_read/write_block 819 * 820 * Process read/write of initiator address space 821 * completion and pass status onto the backend target. 822 * If this is a partial read/write for a CCB then 823 * we decrement the orbi's refcount to indicate 824 * the status of the read/write is complete 825 */ 826 static void 827 sbp_targ_cam_done(struct fw_xfer *xfer) 828 { 829 struct orb_info *orbi; 830 union ccb *ccb; 831 832 orbi = (struct orb_info *)xfer->sc; 833 834 if (debug) 835 printf("%s: resp=%d refcount=%d\n", __func__, 836 xfer->resp, orbi->refcount); 837 838 if (xfer->resp != 0) { 839 printf("%s: xfer->resp = %d\n", __func__, xfer->resp); 840 orbi->status.resp = SBP_TRANS_FAIL; 841 orbi->status.status = OBJ_DATA | SBE_TIMEOUT/*XXX*/; 842 orbi->status.dead = 1; 843 sbp_targ_abort(orbi->sc, STAILQ_NEXT(orbi, link)); 844 } 845 846 orbi->refcount --; 847 848 ccb = orbi->ccb; 849 if (orbi->refcount == 0) { 850 orbi->ccb = NULL; 851 if (orbi->state == ORBI_STATUS_ABORTED) { 852 if (debug) 853 printf("%s: orbi aborted\n", __func__); 854 sbp_targ_remove_orb_info(orbi->login, orbi); 855 if (orbi->page_table != NULL) { 856 if (debug) 857 printf("%s: free orbi->page_table %p\n", 858 __func__, orbi->page_table); 859 free(orbi->page_table, M_SBP_TARG); 860 } 861 if (debug) 862 printf("%s: free orbi %p\n", __func__, orbi); 863 free(orbi, M_SBP_TARG); 864 orbi = NULL; 865 } else if (orbi->status.resp == ORBI_STATUS_NONE) { 866 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 867 if (debug) 868 printf("%s: CAM_SEND_STATUS set %0x\n", __func__, ccb->ccb_h.flags); 869 sbp_targ_send_status(orbi, ccb); 870 } else { 871 if (debug) 872 printf("%s: CAM_SEND_STATUS not set %0x\n", __func__, ccb->ccb_h.flags); 873 ccb->ccb_h.status = CAM_REQ_CMP; 874 } 875 SBP_LOCK(orbi->sc); 876 xpt_done(ccb); 877 SBP_UNLOCK(orbi->sc); 878 } else { 879 orbi->status.len = 1; 880 sbp_targ_status_FIFO(orbi, 881 orbi->login->fifo_hi, orbi->login->fifo_lo, 882 /*dequeue*/1); 883 ccb->ccb_h.status = CAM_REQ_ABORTED; 884 SBP_LOCK(orbi->sc); 885 xpt_done(ccb); 886 SBP_UNLOCK(orbi->sc); 887 } 888 } 889 890 fw_xfer_free(xfer); 891 } 892 893 static cam_status 894 sbp_targ_abort_ccb(struct sbp_targ_softc *sc, union ccb *ccb) 895 { 896 union ccb *accb; 897 struct sbp_targ_lstate *lstate; 898 struct ccb_hdr_slist *list; 899 struct ccb_hdr *curelm; 900 int found; 901 cam_status status; 902 903 status = sbp_targ_find_devs(sc, ccb, &lstate, 0); 904 if (status != CAM_REQ_CMP) 905 return (status); 906 907 accb = ccb->cab.abort_ccb; 908 909 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) 910 list = &lstate->accept_tios; 911 else if (accb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) 912 list = &lstate->immed_notifies; 913 else 914 return (CAM_UA_ABORT); 915 916 curelm = SLIST_FIRST(list); 917 found = 0; 918 if (curelm == &accb->ccb_h) { 919 found = 1; 920 SLIST_REMOVE_HEAD(list, sim_links.sle); 921 } else { 922 while(curelm != NULL) { 923 struct ccb_hdr *nextelm; 924 925 nextelm = SLIST_NEXT(curelm, sim_links.sle); 926 if (nextelm == &accb->ccb_h) { 927 found = 1; 928 SLIST_NEXT(curelm, sim_links.sle) = 929 SLIST_NEXT(nextelm, sim_links.sle); 930 break; 931 } 932 curelm = nextelm; 933 } 934 } 935 if (found) { 936 accb->ccb_h.status = CAM_REQ_ABORTED; 937 xpt_done(accb); 938 return (CAM_REQ_CMP); 939 } 940 printf("%s: not found\n", __func__); 941 return (CAM_PATH_INVALID); 942 } 943 944 /* 945 * directly execute a read or write to the initiator 946 * address space and set hand(sbp_targ_cam_done) to 947 * process the completion from the SIM to the target. 948 * set orbi->refcount to inidicate that a read/write 949 * is inflight to/from the initiator. 950 */ 951 static void 952 sbp_targ_xfer_buf(struct orb_info *orbi, u_int offset, 953 uint16_t dst_hi, uint32_t dst_lo, u_int size, 954 void (*hand)(struct fw_xfer *)) 955 { 956 struct fw_xfer *xfer; 957 u_int len, ccb_dir, off = 0; 958 char *ptr; 959 960 if (debug > 1) 961 printf("%s: offset=%d size=%d\n", __func__, offset, size); 962 ccb_dir = orbi->ccb->ccb_h.flags & CAM_DIR_MASK; 963 ptr = (char *)orbi->ccb->csio.data_ptr + offset; 964 965 while (size > 0) { 966 /* XXX assume dst_lo + off doesn't overflow */ 967 len = MIN(size, 2048 /* XXX */); 968 size -= len; 969 orbi->refcount ++; 970 if (ccb_dir == CAM_DIR_OUT) { 971 if (debug) 972 printf("%s: CAM_DIR_OUT --> read block in?\n",__func__); 973 xfer = fwmem_read_block(orbi->fwdev, 974 (void *)orbi, /*spd*/FWSPD_S400, 975 dst_hi, dst_lo + off, len, 976 ptr + off, hand); 977 } else { 978 if (debug) 979 printf("%s: CAM_DIR_IN --> write block out?\n",__func__); 980 xfer = fwmem_write_block(orbi->fwdev, 981 (void *)orbi, /*spd*/FWSPD_S400, 982 dst_hi, dst_lo + off, len, 983 ptr + off, hand); 984 } 985 if (xfer == NULL) { 986 printf("%s: xfer == NULL", __func__); 987 /* XXX what should we do?? */ 988 orbi->refcount --; 989 } 990 off += len; 991 } 992 } 993 994 static void 995 sbp_targ_pt_done(struct fw_xfer *xfer) 996 { 997 struct orb_info *orbi; 998 struct unrestricted_page_table_fmt *pt; 999 uint32_t i; 1000 1001 orbi = (struct orb_info *)xfer->sc; 1002 1003 if (orbi->state == ORBI_STATUS_ABORTED) { 1004 if (debug) 1005 printf("%s: orbi aborted\n", __func__); 1006 sbp_targ_remove_orb_info(orbi->login, orbi); 1007 if (debug) { 1008 printf("%s: free orbi->page_table %p\n", __func__, orbi->page_table); 1009 printf("%s: free orbi %p\n", __func__, orbi); 1010 } 1011 free(orbi->page_table, M_SBP_TARG); 1012 free(orbi, M_SBP_TARG); 1013 orbi = NULL; 1014 fw_xfer_free(xfer); 1015 return; 1016 } 1017 if (xfer->resp != 0) { 1018 printf("%s: xfer->resp = %d\n", __func__, xfer->resp); 1019 orbi->status.resp = SBP_TRANS_FAIL; 1020 orbi->status.status = OBJ_PT | SBE_TIMEOUT/*XXX*/; 1021 orbi->status.dead = 1; 1022 orbi->status.len = 1; 1023 sbp_targ_abort(orbi->sc, STAILQ_NEXT(orbi, link)); 1024 1025 if (debug) 1026 printf("%s: free orbi->page_table %p\n", __func__, orbi->page_table); 1027 1028 sbp_targ_status_FIFO(orbi, 1029 orbi->login->fifo_hi, orbi->login->fifo_lo, /*dequeue*/1); 1030 free(orbi->page_table, M_SBP_TARG); 1031 orbi->page_table = NULL; 1032 fw_xfer_free(xfer); 1033 return; 1034 } 1035 orbi->refcount++; 1036 /* 1037 * Set endianess here so we don't have 1038 * to deal with is later 1039 */ 1040 for (i = 0, pt = orbi->page_table; i < orbi->orb4.data_size; i++, pt++) { 1041 pt->segment_len = ntohs(pt->segment_len); 1042 if (debug) 1043 printf("%s:segment_len = %u\n", __func__,pt->segment_len); 1044 pt->segment_base_high = ntohs(pt->segment_base_high); 1045 pt->segment_base_low = ntohl(pt->segment_base_low); 1046 } 1047 1048 sbp_targ_xfer_pt(orbi); 1049 1050 orbi->refcount--; 1051 if (orbi->refcount == 0) 1052 printf("%s: refcount == 0\n", __func__); 1053 1054 fw_xfer_free(xfer); 1055 return; 1056 } 1057 1058 static void sbp_targ_xfer_pt(struct orb_info *orbi) 1059 { 1060 union ccb *ccb; 1061 uint32_t res, offset, len; 1062 1063 ccb = orbi->ccb; 1064 if (debug) 1065 printf("%s: dxfer_len=%d\n", __func__, ccb->csio.dxfer_len); 1066 res = ccb->csio.dxfer_len; 1067 /* 1068 * If the page table required multiple CTIO's to 1069 * complete, then cur_pte is non NULL 1070 * and we need to start from the last position 1071 * If this is the first pass over a page table 1072 * then we just start at the beginning of the page 1073 * table. 1074 * 1075 * Parse the unrestricted page table and figure out where we need 1076 * to shove the data from this read request. 1077 */ 1078 for (offset = 0, len = 0; (res != 0) && (orbi->cur_pte < orbi->last_pte); offset += len) { 1079 len = MIN(orbi->cur_pte->segment_len, res); 1080 res -= len; 1081 if (debug) 1082 printf("%s:page_table: %04x:%08x segment_len(%u) res(%u) len(%u)\n", 1083 __func__, orbi->cur_pte->segment_base_high, 1084 orbi->cur_pte->segment_base_low, 1085 orbi->cur_pte->segment_len, 1086 res, len); 1087 sbp_targ_xfer_buf(orbi, offset, 1088 orbi->cur_pte->segment_base_high, 1089 orbi->cur_pte->segment_base_low, 1090 len, sbp_targ_cam_done); 1091 /* 1092 * If we have only written partially to 1093 * this page table, then we need to save 1094 * our position for the next CTIO. If we 1095 * have completed the page table, then we 1096 * are safe to move on to the next entry. 1097 */ 1098 if (len == orbi->cur_pte->segment_len) { 1099 orbi->cur_pte++; 1100 } else { 1101 uint32_t saved_base_low; 1102 1103 /* Handle transfers that cross a 4GB boundary. */ 1104 saved_base_low = orbi->cur_pte->segment_base_low; 1105 orbi->cur_pte->segment_base_low += len; 1106 if (orbi->cur_pte->segment_base_low < saved_base_low) 1107 orbi->cur_pte->segment_base_high++; 1108 1109 orbi->cur_pte->segment_len -= len; 1110 } 1111 } 1112 if (debug) { 1113 printf("%s: base_low(%08x) page_table_off(%p) last_block(%u)\n", 1114 __func__, orbi->cur_pte->segment_base_low, 1115 orbi->cur_pte, orbi->last_block_read); 1116 } 1117 if (res != 0) 1118 printf("Warning - short pt encountered. " 1119 "Could not transfer all data.\n"); 1120 return; 1121 } 1122 1123 /* 1124 * Create page table in local memory 1125 * and transfer it from the initiator 1126 * in order to know where we are supposed 1127 * to put the data. 1128 */ 1129 1130 static void 1131 sbp_targ_fetch_pt(struct orb_info *orbi) 1132 { 1133 struct fw_xfer *xfer; 1134 1135 /* 1136 * Pull in page table from initiator 1137 * and setup for data from our 1138 * backend device. 1139 */ 1140 if (orbi->page_table == NULL) { 1141 orbi->page_table = malloc(orbi->orb4.data_size* 1142 sizeof(struct unrestricted_page_table_fmt), 1143 M_SBP_TARG, M_NOWAIT|M_ZERO); 1144 if (orbi->page_table == NULL) 1145 goto error; 1146 orbi->cur_pte = orbi->page_table; 1147 orbi->last_pte = orbi->page_table + orbi->orb4.data_size; 1148 orbi->last_block_read = orbi->orb4.data_size; 1149 if (debug && orbi->page_table != NULL) 1150 printf("%s: malloc'd orbi->page_table(%p), orb4.data_size(%u)\n", 1151 __func__, orbi->page_table, orbi->orb4.data_size); 1152 1153 xfer = fwmem_read_block(orbi->fwdev, (void *)orbi, /*spd*/FWSPD_S400, 1154 orbi->data_hi, orbi->data_lo, orbi->orb4.data_size* 1155 sizeof(struct unrestricted_page_table_fmt), 1156 (void *)orbi->page_table, sbp_targ_pt_done); 1157 1158 if (xfer != NULL) 1159 return; 1160 } else { 1161 /* 1162 * This is a CTIO for a page table we have 1163 * already malloc'd, so just directly invoke 1164 * the xfer function on the orbi. 1165 */ 1166 sbp_targ_xfer_pt(orbi); 1167 return; 1168 } 1169 error: 1170 orbi->ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1171 if (debug) 1172 printf("%s: free orbi->page_table %p due to xfer == NULL\n", __func__, orbi->page_table); 1173 if (orbi->page_table != NULL) { 1174 free(orbi->page_table, M_SBP_TARG); 1175 orbi->page_table = NULL; 1176 } 1177 xpt_done(orbi->ccb); 1178 return; 1179 } 1180 1181 static void 1182 sbp_targ_action1(struct cam_sim *sim, union ccb *ccb) 1183 { 1184 struct sbp_targ_softc *sc; 1185 struct sbp_targ_lstate *lstate; 1186 cam_status status; 1187 u_int ccb_dir; 1188 1189 sc = (struct sbp_targ_softc *)cam_sim_softc(sim); 1190 1191 status = sbp_targ_find_devs(sc, ccb, &lstate, TRUE); 1192 1193 switch (ccb->ccb_h.func_code) { 1194 case XPT_CONT_TARGET_IO: 1195 { 1196 struct orb_info *orbi; 1197 1198 if (debug) 1199 printf("%s: XPT_CONT_TARGET_IO (0x%08x)\n", 1200 __func__, ccb->csio.tag_id); 1201 1202 if (status != CAM_REQ_CMP) { 1203 ccb->ccb_h.status = status; 1204 xpt_done(ccb); 1205 break; 1206 } 1207 /* XXX transfer from/to initiator */ 1208 orbi = sbp_targ_get_orb_info(lstate, 1209 ccb->csio.tag_id, ccb->csio.init_id); 1210 if (orbi == NULL) { 1211 ccb->ccb_h.status = CAM_REQ_ABORTED; /* XXX */ 1212 xpt_done(ccb); 1213 break; 1214 } 1215 if (orbi->state == ORBI_STATUS_ABORTED) { 1216 if (debug) 1217 printf("%s: ctio aborted\n", __func__); 1218 sbp_targ_remove_orb_info_locked(orbi->login, orbi); 1219 if (debug) 1220 printf("%s: free orbi %p\n", __func__, orbi); 1221 free(orbi, M_SBP_TARG); 1222 ccb->ccb_h.status = CAM_REQ_ABORTED; 1223 xpt_done(ccb); 1224 break; 1225 } 1226 orbi->state = ORBI_STATUS_CTIO; 1227 1228 orbi->ccb = ccb; 1229 ccb_dir = ccb->ccb_h.flags & CAM_DIR_MASK; 1230 1231 /* XXX */ 1232 if (ccb->csio.dxfer_len == 0) 1233 ccb_dir = CAM_DIR_NONE; 1234 1235 /* Sanity check */ 1236 if (ccb_dir == CAM_DIR_IN && orbi->orb4.dir == 0) 1237 printf("%s: direction mismatch\n", __func__); 1238 1239 /* check page table */ 1240 if (ccb_dir != CAM_DIR_NONE && orbi->orb4.page_table_present) { 1241 if (debug) 1242 printf("%s: page_table_present\n", 1243 __func__); 1244 if (orbi->orb4.page_size != 0) { 1245 printf("%s: unsupported pagesize %d != 0\n", 1246 __func__, orbi->orb4.page_size); 1247 ccb->ccb_h.status = CAM_REQ_INVALID; 1248 xpt_done(ccb); 1249 break; 1250 } 1251 sbp_targ_fetch_pt(orbi); 1252 break; 1253 } 1254 1255 /* Sanity check */ 1256 if (ccb_dir != CAM_DIR_NONE) { 1257 sbp_targ_xfer_buf(orbi, 0, orbi->data_hi, 1258 orbi->data_lo, 1259 MIN(orbi->orb4.data_size, ccb->csio.dxfer_len), 1260 sbp_targ_cam_done); 1261 if ( orbi->orb4.data_size > ccb->csio.dxfer_len ) { 1262 orbi->data_lo += ccb->csio.dxfer_len; 1263 orbi->orb4.data_size -= ccb->csio.dxfer_len; 1264 } 1265 } 1266 1267 if (ccb_dir == CAM_DIR_NONE) { 1268 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 1269 /* XXX */ 1270 SBP_UNLOCK(sc); 1271 sbp_targ_send_status(orbi, ccb); 1272 SBP_LOCK(sc); 1273 } 1274 ccb->ccb_h.status = CAM_REQ_CMP; 1275 xpt_done(ccb); 1276 } 1277 break; 1278 } 1279 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 1280 if (status != CAM_REQ_CMP) { 1281 ccb->ccb_h.status = status; 1282 xpt_done(ccb); 1283 break; 1284 } 1285 SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h, 1286 sim_links.sle); 1287 ccb->ccb_h.status = CAM_REQ_INPROG; 1288 if ((lstate->flags & F_ATIO_STARVED) != 0) { 1289 struct sbp_targ_login *login; 1290 1291 if (debug) 1292 printf("%s: new atio arrived\n", __func__); 1293 lstate->flags &= ~F_ATIO_STARVED; 1294 STAILQ_FOREACH(login, &lstate->logins, link) 1295 if ((login->flags & F_ATIO_STARVED) != 0) { 1296 login->flags &= ~F_ATIO_STARVED; 1297 sbp_targ_fetch_orb(lstate->sc, 1298 login->fwdev, 1299 login->last_hi, login->last_lo, 1300 login, FETCH_CMD); 1301 } 1302 } 1303 break; 1304 case XPT_NOTIFY_ACKNOWLEDGE: /* recycle notify ack */ 1305 case XPT_IMMEDIATE_NOTIFY: /* Add Immediate Notify Resource */ 1306 if (status != CAM_REQ_CMP) { 1307 ccb->ccb_h.status = status; 1308 xpt_done(ccb); 1309 break; 1310 } 1311 SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h, 1312 sim_links.sle); 1313 ccb->ccb_h.status = CAM_REQ_INPROG; 1314 sbp_targ_send_lstate_events(sc, lstate); 1315 break; 1316 case XPT_EN_LUN: 1317 sbp_targ_en_lun(sc, ccb); 1318 xpt_done(ccb); 1319 break; 1320 case XPT_PATH_INQ: 1321 { 1322 struct ccb_pathinq *cpi = &ccb->cpi; 1323 1324 cpi->version_num = 1; /* XXX??? */ 1325 cpi->hba_inquiry = PI_TAG_ABLE; 1326 cpi->target_sprt = PIT_PROCESSOR 1327 | PIT_DISCONNECT 1328 | PIT_TERM_IO; 1329 cpi->transport = XPORT_SPI; /* FIXME add XPORT_FW type to cam */ 1330 cpi->hba_misc = PIM_NOBUSRESET | PIM_NOBUSRESET; 1331 cpi->hba_eng_cnt = 0; 1332 cpi->max_target = 7; /* XXX */ 1333 cpi->max_lun = MAX_LUN - 1; 1334 cpi->initiator_id = 7; /* XXX */ 1335 cpi->bus_id = sim->bus_id; 1336 cpi->base_transfer_speed = 400 * 1000 / 8; 1337 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 1338 strncpy(cpi->hba_vid, "SBP_TARG", HBA_IDLEN); 1339 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN); 1340 cpi->unit_number = sim->unit_number; 1341 1342 cpi->ccb_h.status = CAM_REQ_CMP; 1343 xpt_done(ccb); 1344 break; 1345 } 1346 case XPT_ABORT: 1347 { 1348 union ccb *accb = ccb->cab.abort_ccb; 1349 1350 switch (accb->ccb_h.func_code) { 1351 case XPT_ACCEPT_TARGET_IO: 1352 case XPT_IMMEDIATE_NOTIFY: 1353 ccb->ccb_h.status = sbp_targ_abort_ccb(sc, ccb); 1354 break; 1355 case XPT_CONT_TARGET_IO: 1356 /* XXX */ 1357 ccb->ccb_h.status = CAM_UA_ABORT; 1358 break; 1359 default: 1360 printf("%s: aborting unknown function %d\n", 1361 __func__, accb->ccb_h.func_code); 1362 ccb->ccb_h.status = CAM_REQ_INVALID; 1363 break; 1364 } 1365 xpt_done(ccb); 1366 break; 1367 } 1368 #ifdef CAM_NEW_TRAN_CODE 1369 case XPT_SET_TRAN_SETTINGS: 1370 ccb->ccb_h.status = CAM_REQ_INVALID; 1371 xpt_done(ccb); 1372 break; 1373 case XPT_GET_TRAN_SETTINGS: 1374 { 1375 struct ccb_trans_settings *cts = &ccb->cts; 1376 struct ccb_trans_settings_scsi *scsi = 1377 &cts->proto_specific.scsi; 1378 struct ccb_trans_settings_spi *spi = 1379 &cts->xport_specific.spi; 1380 1381 cts->protocol = PROTO_SCSI; 1382 cts->protocol_version = SCSI_REV_2; 1383 cts->transport = XPORT_FW; /* should have a FireWire */ 1384 cts->transport_version = 2; 1385 spi->valid = CTS_SPI_VALID_DISC; 1386 spi->flags = CTS_SPI_FLAGS_DISC_ENB; 1387 scsi->valid = CTS_SCSI_VALID_TQ; 1388 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 1389 #if 0 1390 printf("%s:%d:%d XPT_GET_TRAN_SETTINGS:\n", 1391 device_get_nameunit(sc->fd.dev), 1392 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 1393 #endif 1394 cts->ccb_h.status = CAM_REQ_CMP; 1395 xpt_done(ccb); 1396 break; 1397 } 1398 #endif 1399 1400 default: 1401 printf("%s: unknown function 0x%x\n", 1402 __func__, ccb->ccb_h.func_code); 1403 ccb->ccb_h.status = CAM_PROVIDE_FAIL; 1404 xpt_done(ccb); 1405 break; 1406 } 1407 return; 1408 } 1409 1410 static void 1411 sbp_targ_action(struct cam_sim *sim, union ccb *ccb) 1412 { 1413 int s; 1414 1415 s = splfw(); 1416 sbp_targ_action1(sim, ccb); 1417 splx(s); 1418 } 1419 1420 static void 1421 sbp_targ_poll(struct cam_sim *sim) 1422 { 1423 /* XXX */ 1424 return; 1425 } 1426 1427 static void 1428 sbp_targ_cmd_handler(struct fw_xfer *xfer) 1429 { 1430 struct fw_pkt *fp; 1431 uint32_t *orb; 1432 struct corb4 *orb4; 1433 struct orb_info *orbi; 1434 struct ccb_accept_tio *atio; 1435 u_char *bytes; 1436 int i; 1437 1438 orbi = (struct orb_info *)xfer->sc; 1439 if (xfer->resp != 0) { 1440 printf("%s: xfer->resp = %d\n", __func__, xfer->resp); 1441 orbi->status.resp = SBP_TRANS_FAIL; 1442 orbi->status.status = OBJ_ORB | SBE_TIMEOUT/*XXX*/; 1443 orbi->status.dead = 1; 1444 orbi->status.len = 1; 1445 sbp_targ_abort(orbi->sc, STAILQ_NEXT(orbi, link)); 1446 1447 sbp_targ_status_FIFO(orbi, 1448 orbi->login->fifo_hi, orbi->login->fifo_lo, /*dequeue*/1); 1449 fw_xfer_free(xfer); 1450 return; 1451 } 1452 fp = &xfer->recv.hdr; 1453 1454 atio = orbi->atio; 1455 1456 if (orbi->state == ORBI_STATUS_ABORTED) { 1457 printf("%s: aborted\n", __func__); 1458 sbp_targ_remove_orb_info(orbi->login, orbi); 1459 free(orbi, M_SBP_TARG); 1460 atio->ccb_h.status = CAM_REQ_ABORTED; 1461 SBP_LOCK(orbi->sc); 1462 xpt_done((union ccb*)atio); 1463 SBP_UNLOCK(orbi->sc); 1464 goto done0; 1465 } 1466 orbi->state = ORBI_STATUS_ATIO; 1467 1468 orb = orbi->orb; 1469 /* swap payload except SCSI command */ 1470 for (i = 0; i < 5; i ++) 1471 orb[i] = ntohl(orb[i]); 1472 1473 orb4 = (struct corb4 *)&orb[4]; 1474 if (orb4->rq_fmt != 0) { 1475 /* XXX */ 1476 printf("%s: rq_fmt(%d) != 0\n", __func__, orb4->rq_fmt); 1477 } 1478 1479 atio->ccb_h.target_id = 0; /* XXX */ 1480 atio->ccb_h.target_lun = orbi->login->lstate->lun; 1481 atio->sense_len = 0; 1482 atio->tag_action = MSG_SIMPLE_TASK; 1483 atio->tag_id = orbi->orb_lo; 1484 atio->init_id = orbi->login->id; 1485 1486 atio->ccb_h.flags |= CAM_TAG_ACTION_VALID; 1487 bytes = (u_char *)&orb[5]; 1488 if (debug) 1489 printf("%s: %p %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", 1490 __func__, (void *)atio, 1491 bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], 1492 bytes[5], bytes[6], bytes[7], bytes[8], bytes[9]); 1493 switch (bytes[0] >> 5) { 1494 case 0: 1495 atio->cdb_len = 6; 1496 break; 1497 case 1: 1498 case 2: 1499 atio->cdb_len = 10; 1500 break; 1501 case 4: 1502 atio->cdb_len = 16; 1503 break; 1504 case 5: 1505 atio->cdb_len = 12; 1506 break; 1507 case 3: 1508 default: 1509 /* Only copy the opcode. */ 1510 atio->cdb_len = 1; 1511 printf("Reserved or VU command code type encountered\n"); 1512 break; 1513 } 1514 1515 memcpy(atio->cdb_io.cdb_bytes, bytes, atio->cdb_len); 1516 1517 atio->ccb_h.status |= CAM_CDB_RECVD; 1518 1519 /* next ORB */ 1520 if ((orb[0] & (1<<31)) == 0) { 1521 if (debug) 1522 printf("%s: fetch next orb\n", __func__); 1523 orbi->status.src = SRC_NEXT_EXISTS; 1524 sbp_targ_fetch_orb(orbi->sc, orbi->fwdev, 1525 orb[0], orb[1], orbi->login, FETCH_CMD); 1526 } else { 1527 orbi->status.src = SRC_NO_NEXT; 1528 orbi->login->flags &= ~F_LINK_ACTIVE; 1529 } 1530 1531 orbi->data_hi = orb[2]; 1532 orbi->data_lo = orb[3]; 1533 orbi->orb4 = *orb4; 1534 1535 SBP_LOCK(orbi->sc); 1536 xpt_done((union ccb*)atio); 1537 SBP_UNLOCK(orbi->sc); 1538 done0: 1539 fw_xfer_free(xfer); 1540 return; 1541 } 1542 1543 static struct sbp_targ_login * 1544 sbp_targ_get_login(struct sbp_targ_softc *sc, struct fw_device *fwdev, int lun) 1545 { 1546 struct sbp_targ_lstate *lstate; 1547 struct sbp_targ_login *login; 1548 int i; 1549 1550 lstate = sc->lstate[lun]; 1551 1552 STAILQ_FOREACH(login, &lstate->logins, link) 1553 if (login->fwdev == fwdev) 1554 return (login); 1555 1556 for (i = 0; i < MAX_LOGINS; i ++) 1557 if (sc->logins[i] == NULL) 1558 goto found; 1559 1560 printf("%s: increase MAX_LOGIN\n", __func__); 1561 return (NULL); 1562 1563 found: 1564 login = (struct sbp_targ_login *)malloc( 1565 sizeof(struct sbp_targ_login), M_SBP_TARG, M_NOWAIT | M_ZERO); 1566 1567 if (login == NULL) { 1568 printf("%s: malloc failed\n", __func__); 1569 return (NULL); 1570 } 1571 1572 login->id = i; 1573 login->fwdev = fwdev; 1574 login->lstate = lstate; 1575 login->last_hi = 0xffff; 1576 login->last_lo = 0xffffffff; 1577 login->hold_sec = 1; 1578 STAILQ_INIT(&login->orbs); 1579 CALLOUT_INIT(&login->hold_callout); 1580 sc->logins[i] = login; 1581 return (login); 1582 } 1583 1584 static void 1585 sbp_targ_mgm_handler(struct fw_xfer *xfer) 1586 { 1587 struct sbp_targ_lstate *lstate; 1588 struct sbp_targ_login *login; 1589 struct fw_pkt *fp; 1590 uint32_t *orb; 1591 struct morb4 *orb4; 1592 struct orb_info *orbi; 1593 int i; 1594 1595 orbi = (struct orb_info *)xfer->sc; 1596 if (xfer->resp != 0) { 1597 printf("%s: xfer->resp = %d\n", __func__, xfer->resp); 1598 orbi->status.resp = SBP_TRANS_FAIL; 1599 orbi->status.status = OBJ_ORB | SBE_TIMEOUT/*XXX*/; 1600 orbi->status.dead = 1; 1601 orbi->status.len = 1; 1602 sbp_targ_abort(orbi->sc, STAILQ_NEXT(orbi, link)); 1603 1604 sbp_targ_status_FIFO(orbi, 1605 orbi->login->fifo_hi, orbi->login->fifo_lo, /*dequeue*/0); 1606 fw_xfer_free(xfer); 1607 return; 1608 } 1609 fp = &xfer->recv.hdr; 1610 1611 orb = orbi->orb; 1612 /* swap payload */ 1613 for (i = 0; i < 8; i ++) { 1614 orb[i] = ntohl(orb[i]); 1615 } 1616 orb4 = (struct morb4 *)&orb[4]; 1617 if (debug) 1618 printf("%s: %s\n", __func__, orb_fun_name[orb4->fun]); 1619 1620 orbi->status.src = SRC_NO_NEXT; 1621 1622 switch (orb4->fun << 16) { 1623 case ORB_FUN_LGI: 1624 { 1625 int exclusive = 0, lun; 1626 1627 if (orb[4] & ORB_EXV) 1628 exclusive = 1; 1629 1630 lun = orb4->id; 1631 lstate = orbi->sc->lstate[lun]; 1632 1633 if (lun >= MAX_LUN || lstate == NULL || 1634 (exclusive && 1635 STAILQ_FIRST(&lstate->logins) != NULL && 1636 STAILQ_FIRST(&lstate->logins)->fwdev != orbi->fwdev) 1637 ) { 1638 /* error */ 1639 orbi->status.dead = 1; 1640 orbi->status.status = STATUS_ACCESS_DENY; 1641 orbi->status.len = 1; 1642 break; 1643 } 1644 1645 /* allocate login */ 1646 login = sbp_targ_get_login(orbi->sc, orbi->fwdev, lun); 1647 if (login == NULL) { 1648 printf("%s: sbp_targ_get_login failed\n", 1649 __func__); 1650 orbi->status.dead = 1; 1651 orbi->status.status = STATUS_RES_UNAVAIL; 1652 orbi->status.len = 1; 1653 break; 1654 } 1655 printf("%s: login id=%d\n", __func__, login->id); 1656 1657 login->fifo_hi = orb[6]; 1658 login->fifo_lo = orb[7]; 1659 login->loginres.len = htons(sizeof(uint32_t) * 4); 1660 login->loginres.id = htons(login->id); 1661 login->loginres.cmd_hi = htons(SBP_TARG_BIND_HI); 1662 login->loginres.cmd_lo = htonl(SBP_TARG_BIND_LO(login->id)); 1663 login->loginres.recon_hold = htons(login->hold_sec); 1664 1665 STAILQ_INSERT_TAIL(&lstate->logins, login, link); 1666 fwmem_write_block(orbi->fwdev, NULL, /*spd*/FWSPD_S400, orb[2], orb[3], 1667 sizeof(struct sbp_login_res), (void *)&login->loginres, 1668 fw_asy_callback_free); 1669 /* XXX return status after loginres is successfully written */ 1670 break; 1671 } 1672 case ORB_FUN_RCN: 1673 login = orbi->sc->logins[orb4->id]; 1674 if (login != NULL && login->fwdev == orbi->fwdev) { 1675 login->flags &= ~F_HOLD; 1676 callout_stop(&login->hold_callout); 1677 printf("%s: reconnected id=%d\n", 1678 __func__, login->id); 1679 } else { 1680 orbi->status.dead = 1; 1681 orbi->status.status = STATUS_ACCESS_DENY; 1682 printf("%s: reconnection faild id=%d\n", 1683 __func__, orb4->id); 1684 } 1685 break; 1686 case ORB_FUN_LGO: 1687 login = orbi->sc->logins[orb4->id]; 1688 if (login->fwdev != orbi->fwdev) { 1689 printf("%s: wrong initiator\n", __func__); 1690 break; 1691 } 1692 sbp_targ_dealloc_login(login); 1693 break; 1694 default: 1695 printf("%s: %s not implemented yet\n", 1696 __func__, orb_fun_name[orb4->fun]); 1697 break; 1698 } 1699 orbi->status.len = 1; 1700 sbp_targ_status_FIFO(orbi, orb[6], orb[7], /*dequeue*/0); 1701 fw_xfer_free(xfer); 1702 return; 1703 } 1704 1705 static void 1706 sbp_targ_pointer_handler(struct fw_xfer *xfer) 1707 { 1708 struct orb_info *orbi; 1709 uint32_t orb0, orb1; 1710 1711 orbi = (struct orb_info *)xfer->sc; 1712 if (xfer->resp != 0) { 1713 printf("%s: xfer->resp = %d\n", __func__, xfer->resp); 1714 goto done; 1715 } 1716 1717 orb0 = ntohl(orbi->orb[0]); 1718 orb1 = ntohl(orbi->orb[1]); 1719 if ((orb0 & (1U << 31)) != 0) { 1720 printf("%s: invalid pointer\n", __func__); 1721 goto done; 1722 } 1723 sbp_targ_fetch_orb(orbi->login->lstate->sc, orbi->fwdev, 1724 (uint16_t)orb0, orb1, orbi->login, FETCH_CMD); 1725 done: 1726 free(orbi, M_SBP_TARG); 1727 fw_xfer_free(xfer); 1728 return; 1729 } 1730 1731 static void 1732 sbp_targ_fetch_orb(struct sbp_targ_softc *sc, struct fw_device *fwdev, 1733 uint16_t orb_hi, uint32_t orb_lo, struct sbp_targ_login *login, 1734 int mode) 1735 { 1736 struct orb_info *orbi; 1737 1738 if (debug) 1739 printf("%s: fetch orb %04x:%08x\n", __func__, orb_hi, orb_lo); 1740 orbi = malloc(sizeof(struct orb_info), M_SBP_TARG, M_NOWAIT | M_ZERO); 1741 if (orbi == NULL) { 1742 printf("%s: malloc failed\n", __func__); 1743 return; 1744 } 1745 orbi->sc = sc; 1746 orbi->fwdev = fwdev; 1747 orbi->login = login; 1748 orbi->orb_hi = orb_hi; 1749 orbi->orb_lo = orb_lo; 1750 orbi->status.orb_hi = htons(orb_hi); 1751 orbi->status.orb_lo = htonl(orb_lo); 1752 orbi->page_table = NULL; 1753 1754 switch (mode) { 1755 case FETCH_MGM: 1756 fwmem_read_block(fwdev, (void *)orbi, /*spd*/FWSPD_S400, orb_hi, orb_lo, 1757 sizeof(uint32_t) * 8, &orbi->orb[0], 1758 sbp_targ_mgm_handler); 1759 break; 1760 case FETCH_CMD: 1761 orbi->state = ORBI_STATUS_FETCH; 1762 login->last_hi = orb_hi; 1763 login->last_lo = orb_lo; 1764 login->flags |= F_LINK_ACTIVE; 1765 /* dequeue */ 1766 SBP_LOCK(sc); 1767 orbi->atio = (struct ccb_accept_tio *) 1768 SLIST_FIRST(&login->lstate->accept_tios); 1769 if (orbi->atio == NULL) { 1770 SBP_UNLOCK(sc); 1771 printf("%s: no free atio\n", __func__); 1772 login->lstate->flags |= F_ATIO_STARVED; 1773 login->flags |= F_ATIO_STARVED; 1774 #if 0 1775 /* XXX ?? */ 1776 login->fwdev = fwdev; 1777 #endif 1778 break; 1779 } 1780 SLIST_REMOVE_HEAD(&login->lstate->accept_tios, sim_links.sle); 1781 STAILQ_INSERT_TAIL(&login->orbs, orbi, link); 1782 SBP_UNLOCK(sc); 1783 fwmem_read_block(fwdev, (void *)orbi, /*spd*/FWSPD_S400, orb_hi, orb_lo, 1784 sizeof(uint32_t) * 8, &orbi->orb[0], 1785 sbp_targ_cmd_handler); 1786 break; 1787 case FETCH_POINTER: 1788 orbi->state = ORBI_STATUS_POINTER; 1789 login->flags |= F_LINK_ACTIVE; 1790 fwmem_read_block(fwdev, (void *)orbi, /*spd*/FWSPD_S400, orb_hi, orb_lo, 1791 sizeof(uint32_t) * 2, &orbi->orb[0], 1792 sbp_targ_pointer_handler); 1793 break; 1794 default: 1795 printf("%s: invalid mode %d\n", __func__, mode); 1796 } 1797 } 1798 1799 static void 1800 sbp_targ_resp_callback(struct fw_xfer *xfer) 1801 { 1802 struct sbp_targ_softc *sc; 1803 int s; 1804 1805 if (debug) 1806 printf("%s: xfer=%p\n", __func__, xfer); 1807 sc = (struct sbp_targ_softc *)xfer->sc; 1808 fw_xfer_unload(xfer); 1809 xfer->recv.pay_len = SBP_TARG_RECV_LEN; 1810 xfer->hand = sbp_targ_recv; 1811 s = splfw(); 1812 STAILQ_INSERT_TAIL(&sc->fwb.xferlist, xfer, link); 1813 splx(s); 1814 } 1815 1816 static int 1817 sbp_targ_cmd(struct fw_xfer *xfer, struct fw_device *fwdev, int login_id, 1818 int reg) 1819 { 1820 struct sbp_targ_login *login; 1821 struct sbp_targ_softc *sc; 1822 int rtcode = 0; 1823 1824 if (login_id < 0 || login_id >= MAX_LOGINS) 1825 return(RESP_ADDRESS_ERROR); 1826 1827 sc = (struct sbp_targ_softc *)xfer->sc; 1828 login = sc->logins[login_id]; 1829 if (login == NULL) 1830 return(RESP_ADDRESS_ERROR); 1831 1832 if (login->fwdev != fwdev) { 1833 /* XXX */ 1834 return(RESP_ADDRESS_ERROR); 1835 } 1836 1837 switch (reg) { 1838 case 0x08: /* ORB_POINTER */ 1839 if (debug) 1840 printf("%s: ORB_POINTER(%d)\n", __func__, login_id); 1841 if ((login->flags & F_LINK_ACTIVE) != 0) { 1842 if (debug) 1843 printf("link active (ORB_POINTER)\n"); 1844 break; 1845 } 1846 sbp_targ_fetch_orb(sc, fwdev, 1847 ntohl(xfer->recv.payload[0]), 1848 ntohl(xfer->recv.payload[1]), 1849 login, FETCH_CMD); 1850 break; 1851 case 0x04: /* AGENT_RESET */ 1852 if (debug) 1853 printf("%s: AGENT RESET(%d)\n", __func__, login_id); 1854 login->last_hi = 0xffff; 1855 login->last_lo = 0xffffffff; 1856 sbp_targ_abort(sc, STAILQ_FIRST(&login->orbs)); 1857 break; 1858 case 0x10: /* DOORBELL */ 1859 if (debug) 1860 printf("%s: DOORBELL(%d)\n", __func__, login_id); 1861 if (login->last_hi == 0xffff && 1862 login->last_lo == 0xffffffff) { 1863 printf("%s: no previous pointer(DOORBELL)\n", 1864 __func__); 1865 break; 1866 } 1867 if ((login->flags & F_LINK_ACTIVE) != 0) { 1868 if (debug) 1869 printf("link active (DOORBELL)\n"); 1870 break; 1871 } 1872 sbp_targ_fetch_orb(sc, fwdev, 1873 login->last_hi, login->last_lo, 1874 login, FETCH_POINTER); 1875 break; 1876 case 0x00: /* AGENT_STATE */ 1877 printf("%s: AGENT_STATE (%d:ignore)\n", __func__, login_id); 1878 break; 1879 case 0x14: /* UNSOLICITED_STATE_ENABLE */ 1880 printf("%s: UNSOLICITED_STATE_ENABLE (%d:ignore)\n", 1881 __func__, login_id); 1882 break; 1883 default: 1884 printf("%s: invalid register %d(%d)\n", 1885 __func__, reg, login_id); 1886 rtcode = RESP_ADDRESS_ERROR; 1887 } 1888 1889 return (rtcode); 1890 } 1891 1892 static int 1893 sbp_targ_mgm(struct fw_xfer *xfer, struct fw_device *fwdev) 1894 { 1895 struct sbp_targ_softc *sc; 1896 struct fw_pkt *fp; 1897 1898 sc = (struct sbp_targ_softc *)xfer->sc; 1899 1900 fp = &xfer->recv.hdr; 1901 if (fp->mode.wreqb.tcode != FWTCODE_WREQB){ 1902 printf("%s: tcode = %d\n", __func__, fp->mode.wreqb.tcode); 1903 return(RESP_TYPE_ERROR); 1904 } 1905 1906 sbp_targ_fetch_orb(sc, fwdev, 1907 ntohl(xfer->recv.payload[0]), 1908 ntohl(xfer->recv.payload[1]), 1909 NULL, FETCH_MGM); 1910 1911 return(0); 1912 } 1913 1914 static void 1915 sbp_targ_recv(struct fw_xfer *xfer) 1916 { 1917 struct fw_pkt *fp, *sfp; 1918 struct fw_device *fwdev; 1919 uint32_t lo; 1920 int s, rtcode; 1921 struct sbp_targ_softc *sc; 1922 1923 s = splfw(); 1924 sc = (struct sbp_targ_softc *)xfer->sc; 1925 fp = &xfer->recv.hdr; 1926 fwdev = fw_noderesolve_nodeid(sc->fd.fc, fp->mode.wreqb.src & 0x3f); 1927 if (fwdev == NULL) { 1928 printf("%s: cannot resolve nodeid=%d\n", 1929 __func__, fp->mode.wreqb.src & 0x3f); 1930 rtcode = RESP_TYPE_ERROR; /* XXX */ 1931 goto done; 1932 } 1933 lo = fp->mode.wreqb.dest_lo; 1934 1935 if (lo == SBP_TARG_BIND_LO(-1)) 1936 rtcode = sbp_targ_mgm(xfer, fwdev); 1937 else if (lo >= SBP_TARG_BIND_LO(0)) 1938 rtcode = sbp_targ_cmd(xfer, fwdev, SBP_TARG_LOGIN_ID(lo), 1939 lo % 0x20); 1940 else 1941 rtcode = RESP_ADDRESS_ERROR; 1942 1943 done: 1944 if (rtcode != 0) 1945 printf("%s: rtcode = %d\n", __func__, rtcode); 1946 sfp = &xfer->send.hdr; 1947 xfer->send.spd = FWSPD_S400; 1948 xfer->hand = sbp_targ_resp_callback; 1949 sfp->mode.wres.dst = fp->mode.wreqb.src; 1950 sfp->mode.wres.tlrt = fp->mode.wreqb.tlrt; 1951 sfp->mode.wres.tcode = FWTCODE_WRES; 1952 sfp->mode.wres.rtcode = rtcode; 1953 sfp->mode.wres.pri = 0; 1954 1955 fw_asyreq(xfer->fc, -1, xfer); 1956 splx(s); 1957 } 1958 1959 static int 1960 sbp_targ_attach(device_t dev) 1961 { 1962 struct sbp_targ_softc *sc; 1963 struct cam_devq *devq; 1964 struct firewire_comm *fc; 1965 1966 sc = (struct sbp_targ_softc *) device_get_softc(dev); 1967 bzero((void *)sc, sizeof(struct sbp_targ_softc)); 1968 1969 mtx_init(&sc->mtx, "sbp_targ", NULL, MTX_DEF); 1970 sc->fd.fc = fc = device_get_ivars(dev); 1971 sc->fd.dev = dev; 1972 sc->fd.post_explore = (void *) sbp_targ_post_explore; 1973 sc->fd.post_busreset = (void *) sbp_targ_post_busreset; 1974 1975 devq = cam_simq_alloc(/*maxopenings*/MAX_LUN*MAX_INITIATORS); 1976 if (devq == NULL) 1977 return (ENXIO); 1978 1979 sc->sim = cam_sim_alloc(sbp_targ_action, sbp_targ_poll, 1980 "sbp_targ", sc, device_get_unit(dev), &sc->mtx, 1981 /*untagged*/ 1, /*tagged*/ 1, devq); 1982 if (sc->sim == NULL) { 1983 cam_simq_free(devq); 1984 return (ENXIO); 1985 } 1986 1987 SBP_LOCK(sc); 1988 if (xpt_bus_register(sc->sim, dev, /*bus*/0) != CAM_SUCCESS) 1989 goto fail; 1990 1991 if (xpt_create_path(&sc->path, /*periph*/ NULL, cam_sim_path(sc->sim), 1992 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1993 xpt_bus_deregister(cam_sim_path(sc->sim)); 1994 goto fail; 1995 } 1996 SBP_UNLOCK(sc); 1997 1998 sc->fwb.start = SBP_TARG_BIND_START; 1999 sc->fwb.end = SBP_TARG_BIND_END; 2000 2001 /* pre-allocate xfer */ 2002 STAILQ_INIT(&sc->fwb.xferlist); 2003 fw_xferlist_add(&sc->fwb.xferlist, M_SBP_TARG, 2004 /*send*/ 0, /*recv*/ SBP_TARG_RECV_LEN, MAX_LUN /* XXX */, 2005 fc, (void *)sc, sbp_targ_recv); 2006 fw_bindadd(fc, &sc->fwb); 2007 return 0; 2008 2009 fail: 2010 SBP_UNLOCK(sc); 2011 cam_sim_free(sc->sim, /*free_devq*/TRUE); 2012 return (ENXIO); 2013 } 2014 2015 static int 2016 sbp_targ_detach(device_t dev) 2017 { 2018 struct sbp_targ_softc *sc; 2019 struct sbp_targ_lstate *lstate; 2020 int i; 2021 2022 sc = (struct sbp_targ_softc *)device_get_softc(dev); 2023 sc->fd.post_busreset = NULL; 2024 2025 SBP_LOCK(sc); 2026 xpt_free_path(sc->path); 2027 xpt_bus_deregister(cam_sim_path(sc->sim)); 2028 SBP_UNLOCK(sc); 2029 cam_sim_free(sc->sim, /*free_devq*/TRUE); 2030 2031 for (i = 0; i < MAX_LUN; i ++) { 2032 lstate = sc->lstate[i]; 2033 if (lstate != NULL) { 2034 xpt_free_path(lstate->path); 2035 free(lstate, M_SBP_TARG); 2036 } 2037 } 2038 if (sc->black_hole != NULL) { 2039 xpt_free_path(sc->black_hole->path); 2040 free(sc->black_hole, M_SBP_TARG); 2041 } 2042 2043 fw_bindremove(sc->fd.fc, &sc->fwb); 2044 fw_xferlist_remove(&sc->fwb.xferlist); 2045 2046 mtx_destroy(&sc->mtx); 2047 2048 return 0; 2049 } 2050 2051 static devclass_t sbp_targ_devclass; 2052 2053 static device_method_t sbp_targ_methods[] = { 2054 /* device interface */ 2055 DEVMETHOD(device_identify, sbp_targ_identify), 2056 DEVMETHOD(device_probe, sbp_targ_probe), 2057 DEVMETHOD(device_attach, sbp_targ_attach), 2058 DEVMETHOD(device_detach, sbp_targ_detach), 2059 { 0, 0 } 2060 }; 2061 2062 static driver_t sbp_targ_driver = { 2063 "sbp_targ", 2064 sbp_targ_methods, 2065 sizeof(struct sbp_targ_softc), 2066 }; 2067 2068 DRIVER_MODULE(sbp_targ, firewire, sbp_targ_driver, sbp_targ_devclass, 0, 0); 2069 MODULE_VERSION(sbp_targ, 1); 2070 MODULE_DEPEND(sbp_targ, firewire, 1, 1, 1); 2071 MODULE_DEPEND(sbp_targ, cam, 1, 1, 1); 2072