1 /*- 2 * Copyright (C) 2003 3 * Hidetoshi Shimokawa. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * 16 * This product includes software developed by Hidetoshi Shimokawa. 17 * 18 * 4. Neither the name of the author nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $FreeBSD$ 35 */ 36 37 #include <sys/param.h> 38 #include <sys/kernel.h> 39 #include <sys/systm.h> 40 #include <sys/sysctl.h> 41 #include <sys/types.h> 42 #include <sys/conf.h> 43 #include <sys/malloc.h> 44 #include <sys/endian.h> 45 46 #include <sys/bus.h> 47 #include <machine/bus.h> 48 49 #include <dev/firewire/firewire.h> 50 #include <dev/firewire/firewirereg.h> 51 #include <dev/firewire/iec13213.h> 52 #include <dev/firewire/sbp.h> 53 #include <dev/firewire/fwmem.h> 54 55 #include <cam/cam.h> 56 #include <cam/cam_ccb.h> 57 #include <cam/cam_sim.h> 58 #include <cam/cam_xpt_sim.h> 59 #include <cam/cam_debug.h> 60 #include <cam/cam_periph.h> 61 #include <cam/scsi/scsi_all.h> 62 #include <cam/scsi/scsi_message.h> 63 64 #define SBP_TARG_RECV_LEN 8 65 #define MAX_INITIATORS 8 66 #define MAX_LUN 63 67 #define MAX_LOGINS 63 68 #define MAX_NODES 63 69 /* 70 * management/command block agent registers 71 * 72 * BASE 0xffff f001 0000 management port 73 * BASE 0xffff f001 0020 command port for login id 0 74 * BASE 0xffff f001 0040 command port for login id 1 75 * 76 */ 77 #define SBP_TARG_MGM 0x10000 /* offset from 0xffff f000 000 */ 78 #define SBP_TARG_BIND_HI 0xffff 79 #define SBP_TARG_BIND_LO(l) (0xf0000000 + SBP_TARG_MGM + 0x20 * ((l) + 1)) 80 #define SBP_TARG_BIND_START (((u_int64_t)SBP_TARG_BIND_HI << 32) | \ 81 SBP_TARG_BIND_LO(-1)) 82 #define SBP_TARG_BIND_END (((u_int64_t)SBP_TARG_BIND_HI << 32) | \ 83 SBP_TARG_BIND_LO(MAX_LOGINS)) 84 #define SBP_TARG_LOGIN_ID(lo) (((lo) - SBP_TARG_BIND_LO(0))/0x20) 85 86 #define FETCH_MGM 0 87 #define FETCH_CMD 1 88 #define FETCH_POINTER 2 89 90 #define F_LINK_ACTIVE (1 << 0) 91 #define F_ATIO_STARVED (1 << 1) 92 #define F_LOGIN (1 << 2) 93 #define F_HOLD (1 << 3) 94 #define F_FREEZED (1 << 4) 95 96 static MALLOC_DEFINE(M_SBP_TARG, "sbp_targ", "SBP-II/FireWire target mode"); 97 98 static int debug = 0; 99 100 SYSCTL_INT(_debug, OID_AUTO, sbp_targ_debug, CTLFLAG_RW, &debug, 0, 101 "SBP target mode debug flag"); 102 103 struct sbp_targ_login { 104 struct sbp_targ_lstate *lstate; 105 struct fw_device *fwdev; 106 struct sbp_login_res loginres; 107 uint16_t fifo_hi; 108 uint16_t last_hi; 109 uint32_t fifo_lo; 110 uint32_t last_lo; 111 STAILQ_HEAD(, orb_info) orbs; 112 STAILQ_ENTRY(sbp_targ_login) link; 113 uint16_t hold_sec; 114 uint16_t id; 115 uint8_t flags; 116 uint8_t spd; 117 struct callout hold_callout; 118 }; 119 120 struct sbp_targ_lstate { 121 uint16_t lun; 122 struct sbp_targ_softc *sc; 123 struct cam_path *path; 124 struct ccb_hdr_slist accept_tios; 125 struct ccb_hdr_slist immed_notifies; 126 struct crom_chunk model; 127 uint32_t flags; 128 STAILQ_HEAD(, sbp_targ_login) logins; 129 }; 130 131 struct sbp_targ_softc { 132 struct firewire_dev_comm fd; 133 struct cam_sim *sim; 134 struct cam_path *path; 135 struct fw_bind fwb; 136 int ndevs; 137 int flags; 138 struct crom_chunk unit; 139 struct sbp_targ_lstate *lstate[MAX_LUN]; 140 struct sbp_targ_lstate *black_hole; 141 struct sbp_targ_login *logins[MAX_LOGINS]; 142 struct mtx mtx; 143 }; 144 #define SBP_LOCK(sc) mtx_lock(&(sc)->mtx) 145 #define SBP_UNLOCK(sc) mtx_unlock(&(sc)->mtx) 146 147 struct corb4 { 148 #if BYTE_ORDER == BIG_ENDIAN 149 uint32_t n:1, 150 rq_fmt:2, 151 :1, 152 dir:1, 153 spd:3, 154 max_payload:4, 155 page_table_present:1, 156 page_size:3, 157 data_size:16; 158 #else 159 uint32_t data_size:16, 160 page_size:3, 161 page_table_present:1, 162 max_payload:4, 163 spd:3, 164 dir:1, 165 :1, 166 rq_fmt:2, 167 n:1; 168 #endif 169 }; 170 171 struct morb4 { 172 #if BYTE_ORDER == BIG_ENDIAN 173 uint32_t n:1, 174 rq_fmt:2, 175 :9, 176 fun:4, 177 id:16; 178 #else 179 uint32_t id:16, 180 fun:4, 181 :9, 182 rq_fmt:2, 183 n:1; 184 #endif 185 }; 186 187 188 /* 189 * Urestricted page table format 190 * states that the segment length 191 * and high base addr are in the first 192 * 32 bits and the base low is in 193 * the second 194 */ 195 struct unrestricted_page_table_fmt { 196 uint16_t segment_len; 197 uint16_t segment_base_high; 198 uint32_t segment_base_low; 199 }; 200 201 202 struct orb_info { 203 struct sbp_targ_softc *sc; 204 struct fw_device *fwdev; 205 struct sbp_targ_login *login; 206 union ccb *ccb; 207 struct ccb_accept_tio *atio; 208 uint8_t state; 209 #define ORBI_STATUS_NONE 0 210 #define ORBI_STATUS_FETCH 1 211 #define ORBI_STATUS_ATIO 2 212 #define ORBI_STATUS_CTIO 3 213 #define ORBI_STATUS_STATUS 4 214 #define ORBI_STATUS_POINTER 5 215 #define ORBI_STATUS_ABORTED 7 216 uint8_t refcount; 217 uint16_t orb_hi; 218 uint32_t orb_lo; 219 uint32_t data_hi; 220 uint32_t data_lo; 221 struct corb4 orb4; 222 STAILQ_ENTRY(orb_info) link; 223 uint32_t orb[8]; 224 struct unrestricted_page_table_fmt *page_table; 225 struct unrestricted_page_table_fmt *cur_pte; 226 struct unrestricted_page_table_fmt *last_pte; 227 uint32_t last_block_read; 228 struct sbp_status status; 229 }; 230 231 static char *orb_fun_name[] = { 232 ORB_FUN_NAMES 233 }; 234 235 static void sbp_targ_recv(struct fw_xfer *); 236 static void sbp_targ_fetch_orb(struct sbp_targ_softc *, struct fw_device *, 237 uint16_t, uint32_t, struct sbp_targ_login *, int); 238 static void sbp_targ_xfer_pt(struct orb_info *); 239 static void sbp_targ_abort(struct sbp_targ_softc *, struct orb_info *); 240 241 static void 242 sbp_targ_identify(driver_t *driver, device_t parent) 243 { 244 BUS_ADD_CHILD(parent, 0, "sbp_targ", device_get_unit(parent)); 245 } 246 247 static int 248 sbp_targ_probe(device_t dev) 249 { 250 device_t pa; 251 252 pa = device_get_parent(dev); 253 if (device_get_unit(dev) != device_get_unit(pa)) { 254 return (ENXIO); 255 } 256 257 device_set_desc(dev, "SBP-2/SCSI over FireWire target mode"); 258 return (0); 259 } 260 261 static void 262 sbp_targ_dealloc_login(struct sbp_targ_login *login) 263 { 264 struct orb_info *orbi, *next; 265 266 if (login == NULL) { 267 printf("%s: login = NULL\n", __func__); 268 return; 269 } 270 for (orbi = STAILQ_FIRST(&login->orbs); orbi != NULL; orbi = next) { 271 next = STAILQ_NEXT(orbi, link); 272 if (debug) 273 printf("%s: free orbi %p\n", __func__, orbi); 274 free(orbi, M_SBP_TARG); 275 orbi = NULL; 276 } 277 callout_stop(&login->hold_callout); 278 279 STAILQ_REMOVE(&login->lstate->logins, login, sbp_targ_login, link); 280 login->lstate->sc->logins[login->id] = NULL; 281 if (debug) 282 printf("%s: free login %p\n", __func__, login); 283 free((void *)login, M_SBP_TARG); 284 login = NULL; 285 } 286 287 static void 288 sbp_targ_hold_expire(void *arg) 289 { 290 struct sbp_targ_login *login; 291 292 login = (struct sbp_targ_login *)arg; 293 294 if (login->flags & F_HOLD) { 295 printf("%s: login_id=%d expired\n", __func__, login->id); 296 sbp_targ_dealloc_login(login); 297 } else { 298 printf("%s: login_id=%d not hold\n", __func__, login->id); 299 } 300 } 301 302 static void 303 sbp_targ_post_busreset(void *arg) 304 { 305 struct sbp_targ_softc *sc; 306 struct crom_src *src; 307 struct crom_chunk *root; 308 struct crom_chunk *unit; 309 struct sbp_targ_lstate *lstate; 310 struct sbp_targ_login *login; 311 int i; 312 313 sc = (struct sbp_targ_softc *)arg; 314 src = sc->fd.fc->crom_src; 315 root = sc->fd.fc->crom_root; 316 317 unit = &sc->unit; 318 319 if ((sc->flags & F_FREEZED) == 0) { 320 SBP_LOCK(sc); 321 sc->flags |= F_FREEZED; 322 xpt_freeze_simq(sc->sim, /*count*/1); 323 SBP_UNLOCK(sc); 324 } else { 325 printf("%s: already freezed\n", __func__); 326 } 327 328 bzero(unit, sizeof(struct crom_chunk)); 329 330 crom_add_chunk(src, root, unit, CROM_UDIR); 331 crom_add_entry(unit, CSRKEY_SPEC, CSRVAL_ANSIT10); 332 crom_add_entry(unit, CSRKEY_VER, CSRVAL_T10SBP2); 333 crom_add_entry(unit, CSRKEY_COM_SPEC, CSRVAL_ANSIT10); 334 crom_add_entry(unit, CSRKEY_COM_SET, CSRVAL_SCSI); 335 336 crom_add_entry(unit, CROM_MGM, SBP_TARG_MGM >> 2); 337 crom_add_entry(unit, CSRKEY_UNIT_CH, (10<<8) | 8); 338 339 for (i = 0; i < MAX_LUN; i++) { 340 lstate = sc->lstate[i]; 341 if (lstate == NULL) 342 continue; 343 crom_add_entry(unit, CSRKEY_FIRM_VER, 1); 344 crom_add_entry(unit, CROM_LUN, i); 345 crom_add_entry(unit, CSRKEY_MODEL, 1); 346 crom_add_simple_text(src, unit, &lstate->model, "TargetMode"); 347 } 348 349 /* Process for reconnection hold time */ 350 for (i = 0; i < MAX_LOGINS; i++) { 351 login = sc->logins[i]; 352 if (login == NULL) 353 continue; 354 sbp_targ_abort(sc, STAILQ_FIRST(&login->orbs)); 355 if (login->flags & F_LOGIN) { 356 login->flags |= F_HOLD; 357 callout_reset(&login->hold_callout, 358 hz * login->hold_sec, 359 sbp_targ_hold_expire, (void *)login); 360 } 361 } 362 } 363 364 static void 365 sbp_targ_post_explore(void *arg) 366 { 367 struct sbp_targ_softc *sc; 368 369 sc = (struct sbp_targ_softc *)arg; 370 SBP_LOCK(sc); 371 sc->flags &= ~F_FREEZED; 372 xpt_release_simq(sc->sim, /*run queue*/TRUE); 373 SBP_UNLOCK(sc); 374 return; 375 } 376 377 static cam_status 378 sbp_targ_find_devs(struct sbp_targ_softc *sc, union ccb *ccb, 379 struct sbp_targ_lstate **lstate, int notfound_failure) 380 { 381 u_int lun; 382 383 /* XXX 0 is the only vaild target_id */ 384 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD && 385 ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { 386 *lstate = sc->black_hole; 387 if (debug) 388 printf("setting black hole for this target id(%d)\n", ccb->ccb_h.target_id); 389 return (CAM_REQ_CMP); 390 } 391 392 lun = ccb->ccb_h.target_lun; 393 if (lun >= MAX_LUN) 394 return (CAM_LUN_INVALID); 395 396 *lstate = sc->lstate[lun]; 397 398 if (notfound_failure != 0 && *lstate == NULL) { 399 if (debug) 400 printf("%s: lstate for lun is invalid, target(%d), lun(%d)\n", 401 __func__, ccb->ccb_h.target_id, lun); 402 return (CAM_PATH_INVALID); 403 } else 404 if (debug) 405 printf("%s: setting lstate for tgt(%d) lun(%d)\n", 406 __func__,ccb->ccb_h.target_id, lun); 407 408 return (CAM_REQ_CMP); 409 } 410 411 static void 412 sbp_targ_en_lun(struct sbp_targ_softc *sc, union ccb *ccb) 413 { 414 struct ccb_en_lun *cel = &ccb->cel; 415 struct sbp_targ_lstate *lstate; 416 cam_status status; 417 418 status = sbp_targ_find_devs(sc, ccb, &lstate, 0); 419 if (status != CAM_REQ_CMP) { 420 ccb->ccb_h.status = status; 421 return; 422 } 423 424 if (cel->enable != 0) { 425 if (lstate != NULL) { 426 xpt_print_path(ccb->ccb_h.path); 427 printf("Lun already enabled\n"); 428 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 429 return; 430 } 431 if (cel->grp6_len != 0 || cel->grp7_len != 0) { 432 ccb->ccb_h.status = CAM_REQ_INVALID; 433 printf("Non-zero Group Codes\n"); 434 return; 435 } 436 lstate = (struct sbp_targ_lstate *) 437 malloc(sizeof(*lstate), M_SBP_TARG, M_NOWAIT | M_ZERO); 438 if (lstate == NULL) { 439 xpt_print_path(ccb->ccb_h.path); 440 printf("Couldn't allocate lstate\n"); 441 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 442 return; 443 } else { 444 if (debug) 445 printf("%s: malloc'd lstate %p\n",__func__, lstate); 446 } 447 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD) { 448 sc->black_hole = lstate; 449 if (debug) 450 printf("Blackhole set due to target id == %d\n", 451 ccb->ccb_h.target_id); 452 } else 453 sc->lstate[ccb->ccb_h.target_lun] = lstate; 454 455 memset(lstate, 0, sizeof(*lstate)); 456 lstate->sc = sc; 457 status = xpt_create_path(&lstate->path, /*periph*/NULL, 458 xpt_path_path_id(ccb->ccb_h.path), 459 xpt_path_target_id(ccb->ccb_h.path), 460 xpt_path_lun_id(ccb->ccb_h.path)); 461 if (status != CAM_REQ_CMP) { 462 free(lstate, M_SBP_TARG); 463 lstate = NULL; 464 xpt_print_path(ccb->ccb_h.path); 465 printf("Couldn't allocate path\n"); 466 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 467 return; 468 } 469 SLIST_INIT(&lstate->accept_tios); 470 SLIST_INIT(&lstate->immed_notifies); 471 STAILQ_INIT(&lstate->logins); 472 473 ccb->ccb_h.status = CAM_REQ_CMP; 474 xpt_print_path(ccb->ccb_h.path); 475 printf("Lun now enabled for target mode\n"); 476 /* bus reset */ 477 sc->fd.fc->ibr(sc->fd.fc); 478 } else { 479 struct sbp_targ_login *login, *next; 480 481 if (lstate == NULL) { 482 ccb->ccb_h.status = CAM_LUN_INVALID; 483 printf("Invalid lstate for this target\n"); 484 return; 485 } 486 ccb->ccb_h.status = CAM_REQ_CMP; 487 488 if (SLIST_FIRST(&lstate->accept_tios) != NULL) { 489 printf("ATIOs pending\n"); 490 ccb->ccb_h.status = CAM_REQ_INVALID; 491 } 492 493 if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { 494 printf("INOTs pending\n"); 495 ccb->ccb_h.status = CAM_REQ_INVALID; 496 } 497 498 if (ccb->ccb_h.status != CAM_REQ_CMP) { 499 printf("status != CAM_REQ_CMP\n"); 500 return; 501 } 502 503 xpt_print_path(ccb->ccb_h.path); 504 printf("Target mode disabled\n"); 505 xpt_free_path(lstate->path); 506 507 for (login = STAILQ_FIRST(&lstate->logins); login != NULL; 508 login = next) { 509 next = STAILQ_NEXT(login, link); 510 sbp_targ_dealloc_login(login); 511 } 512 513 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD) 514 sc->black_hole = NULL; 515 else 516 sc->lstate[ccb->ccb_h.target_lun] = NULL; 517 if (debug) 518 printf("%s: free lstate %p\n", __func__, lstate); 519 free(lstate, M_SBP_TARG); 520 lstate = NULL; 521 522 /* bus reset */ 523 sc->fd.fc->ibr(sc->fd.fc); 524 } 525 } 526 527 static void 528 sbp_targ_send_lstate_events(struct sbp_targ_softc *sc, 529 struct sbp_targ_lstate *lstate) 530 { 531 #if 0 532 struct ccb_hdr *ccbh; 533 struct ccb_immediate_notify *inot; 534 535 printf("%s: not implemented yet\n", __func__); 536 #endif 537 } 538 539 540 static __inline void 541 sbp_targ_remove_orb_info_locked(struct sbp_targ_login *login, struct orb_info *orbi) 542 { 543 STAILQ_REMOVE(&login->orbs, orbi, orb_info, link); 544 } 545 546 static __inline void 547 sbp_targ_remove_orb_info(struct sbp_targ_login *login, struct orb_info *orbi) 548 { 549 SBP_LOCK(orbi->sc); 550 STAILQ_REMOVE(&login->orbs, orbi, orb_info, link); 551 SBP_UNLOCK(orbi->sc); 552 } 553 554 /* 555 * tag_id/init_id encoding 556 * 557 * tag_id and init_id has only 32bit for each. 558 * scsi_target can handle very limited number(up to 15) of init_id. 559 * we have to encode 48bit orb and 64bit EUI64 into these 560 * variables. 561 * 562 * tag_id represents lower 32bit of ORB address. 563 * init_id represents login_id. 564 * 565 */ 566 567 static struct orb_info * 568 sbp_targ_get_orb_info(struct sbp_targ_lstate *lstate, 569 u_int tag_id, u_int init_id) 570 { 571 struct sbp_targ_login *login; 572 struct orb_info *orbi; 573 574 login = lstate->sc->logins[init_id]; 575 if (login == NULL) { 576 printf("%s: no such login\n", __func__); 577 return (NULL); 578 } 579 STAILQ_FOREACH(orbi, &login->orbs, link) 580 if (orbi->orb_lo == tag_id) 581 goto found; 582 printf("%s: orb not found tag_id=0x%08x init_id=%d\n", 583 __func__, tag_id, init_id); 584 return (NULL); 585 found: 586 return (orbi); 587 } 588 589 static void 590 sbp_targ_abort(struct sbp_targ_softc *sc, struct orb_info *orbi) 591 { 592 struct orb_info *norbi; 593 594 SBP_LOCK(sc); 595 for (; orbi != NULL; orbi = norbi) { 596 printf("%s: status=%d ccb=%p\n", __func__, orbi->state, orbi->ccb); 597 norbi = STAILQ_NEXT(orbi, link); 598 if (orbi->state != ORBI_STATUS_ABORTED) { 599 if (orbi->ccb != NULL) { 600 orbi->ccb->ccb_h.status = CAM_REQ_ABORTED; 601 xpt_done(orbi->ccb); 602 orbi->ccb = NULL; 603 } 604 if (orbi->state <= ORBI_STATUS_ATIO) { 605 sbp_targ_remove_orb_info_locked(orbi->login, orbi); 606 if (debug) 607 printf("%s: free orbi %p\n", __func__, orbi); 608 free(orbi, M_SBP_TARG); 609 orbi = NULL; 610 } else 611 orbi->state = ORBI_STATUS_ABORTED; 612 } 613 } 614 SBP_UNLOCK(sc); 615 } 616 617 static void 618 sbp_targ_free_orbi(struct fw_xfer *xfer) 619 { 620 struct orb_info *orbi; 621 622 if (xfer->resp != 0) { 623 /* XXX */ 624 printf("%s: xfer->resp = %d\n", __func__, xfer->resp); 625 } 626 orbi = (struct orb_info *)xfer->sc; 627 if ( orbi->page_table != NULL ) { 628 if (debug) 629 printf("%s: free orbi->page_table %p\n", __func__, orbi->page_table); 630 free(orbi->page_table, M_SBP_TARG); 631 orbi->page_table = NULL; 632 } 633 if (debug) 634 printf("%s: free orbi %p\n", __func__, orbi); 635 free(orbi, M_SBP_TARG); 636 orbi = NULL; 637 fw_xfer_free(xfer); 638 } 639 640 static void 641 sbp_targ_status_FIFO(struct orb_info *orbi, 642 uint32_t fifo_hi, uint32_t fifo_lo, int dequeue) 643 { 644 struct fw_xfer *xfer; 645 646 if (dequeue) 647 sbp_targ_remove_orb_info(orbi->login, orbi); 648 649 xfer = fwmem_write_block(orbi->fwdev, (void *)orbi, 650 /*spd*/FWSPD_S400, fifo_hi, fifo_lo, 651 sizeof(uint32_t) * (orbi->status.len + 1), (char *)&orbi->status, 652 sbp_targ_free_orbi); 653 654 if (xfer == NULL) { 655 /* XXX */ 656 printf("%s: xfer == NULL\n", __func__); 657 } 658 } 659 660 /* 661 * Generate the appropriate CAM status for the 662 * target. 663 */ 664 static void 665 sbp_targ_send_status(struct orb_info *orbi, union ccb *ccb) 666 { 667 struct sbp_status *sbp_status; 668 #if 0 669 struct orb_info *norbi; 670 #endif 671 672 sbp_status = &orbi->status; 673 674 orbi->state = ORBI_STATUS_STATUS; 675 676 sbp_status->resp = 0; /* XXX */ 677 sbp_status->status = 0; /* XXX */ 678 sbp_status->dead = 0; /* XXX */ 679 680 ccb->ccb_h.status= CAM_REQ_CMP; 681 682 switch (ccb->csio.scsi_status) { 683 case SCSI_STATUS_OK: 684 if (debug) 685 printf("%s: STATUS_OK\n", __func__); 686 sbp_status->len = 1; 687 break; 688 case SCSI_STATUS_CHECK_COND: 689 if (debug) 690 printf("%s: STATUS SCSI_STATUS_CHECK_COND\n", __func__); 691 goto process_scsi_status; 692 case SCSI_STATUS_BUSY: 693 if (debug) 694 printf("%s: STATUS SCSI_STATUS_BUSY\n", __func__); 695 goto process_scsi_status; 696 case SCSI_STATUS_CMD_TERMINATED: 697 process_scsi_status: 698 { 699 struct sbp_cmd_status *sbp_cmd_status; 700 struct scsi_sense_data *sense; 701 int error_code, sense_key, asc, ascq; 702 uint8_t stream_bits; 703 uint8_t sks[3]; 704 uint64_t info; 705 int64_t sinfo; 706 int sense_len; 707 708 sbp_cmd_status = (struct sbp_cmd_status *)&sbp_status->data[0]; 709 sbp_cmd_status->status = ccb->csio.scsi_status; 710 sense = &ccb->csio.sense_data; 711 712 #if 0 /* XXX What we should do? */ 713 #if 0 714 sbp_targ_abort(orbi->sc, STAILQ_NEXT(orbi, link)); 715 #else 716 norbi = STAILQ_NEXT(orbi, link); 717 while (norbi) { 718 printf("%s: status=%d\n", __func__, norbi->state); 719 if (norbi->ccb != NULL) { 720 norbi->ccb->ccb_h.status = CAM_REQ_ABORTED; 721 xpt_done(norbi->ccb); 722 norbi->ccb = NULL; 723 } 724 sbp_targ_remove_orb_info_locked(orbi->login, norbi); 725 norbi = STAILQ_NEXT(norbi, link); 726 free(norbi, M_SBP_TARG); 727 } 728 #endif 729 #endif 730 731 sense_len = ccb->csio.sense_len - ccb->csio.sense_resid; 732 scsi_extract_sense_len(sense, sense_len, &error_code, 733 &sense_key, &asc, &ascq, /*show_errors*/ 0); 734 735 switch (error_code) { 736 case SSD_CURRENT_ERROR: 737 case SSD_DESC_CURRENT_ERROR: 738 sbp_cmd_status->sfmt = SBP_SFMT_CURR; 739 break; 740 default: 741 sbp_cmd_status->sfmt = SBP_SFMT_DEFER; 742 break; 743 } 744 745 if (scsi_get_sense_info(sense, sense_len, SSD_DESC_INFO, &info, 746 &sinfo) == 0) { 747 uint32_t info_trunc; 748 sbp_cmd_status->valid = 1; 749 info_trunc = info; 750 751 sbp_cmd_status->info = htobe32(info_trunc); 752 } else { 753 sbp_cmd_status->valid = 0; 754 } 755 756 sbp_cmd_status->s_key = sense_key; 757 758 if (scsi_get_stream_info(sense, sense_len, NULL, 759 &stream_bits) == 0) { 760 sbp_cmd_status->mark = 761 (stream_bits & SSD_FILEMARK) ? 1 : 0; 762 sbp_cmd_status->eom = 763 (stream_bits & SSD_EOM) ? 1 : 0; 764 sbp_cmd_status->ill_len = 765 (stream_bits & SSD_ILI) ? 1 : 0; 766 } else { 767 sbp_cmd_status->mark = 0; 768 sbp_cmd_status->eom = 0; 769 sbp_cmd_status->ill_len = 0; 770 } 771 772 773 /* add_sense_code(_qual), info, cmd_spec_info */ 774 sbp_status->len = 4; 775 776 if (scsi_get_sense_info(sense, sense_len, SSD_DESC_COMMAND, 777 &info, &sinfo) == 0) { 778 uint32_t cmdspec_trunc; 779 780 cmdspec_trunc = info; 781 782 sbp_cmd_status->cdb = htobe32(cmdspec_trunc); 783 } 784 785 sbp_cmd_status->s_code = asc; 786 sbp_cmd_status->s_qlfr = ascq; 787 788 if (scsi_get_sense_info(sense, sense_len, SSD_DESC_FRU, &info, 789 &sinfo) == 0) { 790 sbp_cmd_status->fru = (uint8_t)info; 791 sbp_status->len = 5; 792 } else { 793 sbp_cmd_status->fru = 0; 794 } 795 796 if (scsi_get_sks(sense, sense_len, sks) == 0) { 797 bcopy(sks, &sbp_cmd_status->s_keydep[0], sizeof(sks)); 798 sbp_status->len = 5; 799 ccb->ccb_h.status |= CAM_SENT_SENSE; 800 } 801 802 break; 803 } 804 default: 805 printf("%s: unknown scsi status 0x%x\n", __func__, 806 sbp_status->status); 807 } 808 809 810 sbp_targ_status_FIFO(orbi, 811 orbi->login->fifo_hi, orbi->login->fifo_lo, /*dequeue*/1); 812 } 813 814 /* 815 * Invoked as a callback handler from fwmem_read/write_block 816 * 817 * Process read/write of initiator address space 818 * completion and pass status onto the backend target. 819 * If this is a partial read/write for a CCB then 820 * we decrement the orbi's refcount to indicate 821 * the status of the read/write is complete 822 */ 823 static void 824 sbp_targ_cam_done(struct fw_xfer *xfer) 825 { 826 struct orb_info *orbi; 827 union ccb *ccb; 828 829 orbi = (struct orb_info *)xfer->sc; 830 831 if (debug) 832 printf("%s: resp=%d refcount=%d\n", __func__, 833 xfer->resp, orbi->refcount); 834 835 if (xfer->resp != 0) { 836 printf("%s: xfer->resp = %d\n", __func__, xfer->resp); 837 orbi->status.resp = SBP_TRANS_FAIL; 838 orbi->status.status = OBJ_DATA | SBE_TIMEOUT/*XXX*/; 839 orbi->status.dead = 1; 840 sbp_targ_abort(orbi->sc, STAILQ_NEXT(orbi, link)); 841 } 842 843 orbi->refcount--; 844 845 ccb = orbi->ccb; 846 if (orbi->refcount == 0) { 847 orbi->ccb = NULL; 848 if (orbi->state == ORBI_STATUS_ABORTED) { 849 if (debug) 850 printf("%s: orbi aborted\n", __func__); 851 sbp_targ_remove_orb_info(orbi->login, orbi); 852 if (orbi->page_table != NULL) { 853 if (debug) 854 printf("%s: free orbi->page_table %p\n", 855 __func__, orbi->page_table); 856 free(orbi->page_table, M_SBP_TARG); 857 } 858 if (debug) 859 printf("%s: free orbi %p\n", __func__, orbi); 860 free(orbi, M_SBP_TARG); 861 orbi = NULL; 862 } else if (orbi->status.resp == ORBI_STATUS_NONE) { 863 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 864 if (debug) 865 printf("%s: CAM_SEND_STATUS set %0x\n", __func__, ccb->ccb_h.flags); 866 sbp_targ_send_status(orbi, ccb); 867 } else { 868 if (debug) 869 printf("%s: CAM_SEND_STATUS not set %0x\n", __func__, ccb->ccb_h.flags); 870 ccb->ccb_h.status = CAM_REQ_CMP; 871 } 872 SBP_LOCK(orbi->sc); 873 xpt_done(ccb); 874 SBP_UNLOCK(orbi->sc); 875 } else { 876 orbi->status.len = 1; 877 sbp_targ_status_FIFO(orbi, 878 orbi->login->fifo_hi, orbi->login->fifo_lo, 879 /*dequeue*/1); 880 ccb->ccb_h.status = CAM_REQ_ABORTED; 881 SBP_LOCK(orbi->sc); 882 xpt_done(ccb); 883 SBP_UNLOCK(orbi->sc); 884 } 885 } 886 887 fw_xfer_free(xfer); 888 } 889 890 static cam_status 891 sbp_targ_abort_ccb(struct sbp_targ_softc *sc, union ccb *ccb) 892 { 893 union ccb *accb; 894 struct sbp_targ_lstate *lstate; 895 struct ccb_hdr_slist *list; 896 struct ccb_hdr *curelm; 897 int found; 898 cam_status status; 899 900 status = sbp_targ_find_devs(sc, ccb, &lstate, 0); 901 if (status != CAM_REQ_CMP) 902 return (status); 903 904 accb = ccb->cab.abort_ccb; 905 906 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) 907 list = &lstate->accept_tios; 908 else if (accb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) 909 list = &lstate->immed_notifies; 910 else 911 return (CAM_UA_ABORT); 912 913 curelm = SLIST_FIRST(list); 914 found = 0; 915 if (curelm == &accb->ccb_h) { 916 found = 1; 917 SLIST_REMOVE_HEAD(list, sim_links.sle); 918 } else { 919 while (curelm != NULL) { 920 struct ccb_hdr *nextelm; 921 922 nextelm = SLIST_NEXT(curelm, sim_links.sle); 923 if (nextelm == &accb->ccb_h) { 924 found = 1; 925 SLIST_NEXT(curelm, sim_links.sle) = 926 SLIST_NEXT(nextelm, sim_links.sle); 927 break; 928 } 929 curelm = nextelm; 930 } 931 } 932 if (found) { 933 accb->ccb_h.status = CAM_REQ_ABORTED; 934 xpt_done(accb); 935 return (CAM_REQ_CMP); 936 } 937 printf("%s: not found\n", __func__); 938 return (CAM_PATH_INVALID); 939 } 940 941 /* 942 * directly execute a read or write to the initiator 943 * address space and set hand(sbp_targ_cam_done) to 944 * process the completion from the SIM to the target. 945 * set orbi->refcount to inidicate that a read/write 946 * is inflight to/from the initiator. 947 */ 948 static void 949 sbp_targ_xfer_buf(struct orb_info *orbi, u_int offset, 950 uint16_t dst_hi, uint32_t dst_lo, u_int size, 951 void (*hand)(struct fw_xfer *)) 952 { 953 struct fw_xfer *xfer; 954 u_int len, ccb_dir, off = 0; 955 char *ptr; 956 957 if (debug > 1) 958 printf("%s: offset=%d size=%d\n", __func__, offset, size); 959 ccb_dir = orbi->ccb->ccb_h.flags & CAM_DIR_MASK; 960 ptr = (char *)orbi->ccb->csio.data_ptr + offset; 961 962 while (size > 0) { 963 /* XXX assume dst_lo + off doesn't overflow */ 964 len = MIN(size, 2048 /* XXX */); 965 size -= len; 966 orbi->refcount ++; 967 if (ccb_dir == CAM_DIR_OUT) { 968 if (debug) 969 printf("%s: CAM_DIR_OUT --> read block in?\n",__func__); 970 xfer = fwmem_read_block(orbi->fwdev, 971 (void *)orbi, /*spd*/FWSPD_S400, 972 dst_hi, dst_lo + off, len, 973 ptr + off, hand); 974 } else { 975 if (debug) 976 printf("%s: CAM_DIR_IN --> write block out?\n",__func__); 977 xfer = fwmem_write_block(orbi->fwdev, 978 (void *)orbi, /*spd*/FWSPD_S400, 979 dst_hi, dst_lo + off, len, 980 ptr + off, hand); 981 } 982 if (xfer == NULL) { 983 printf("%s: xfer == NULL", __func__); 984 /* XXX what should we do?? */ 985 orbi->refcount--; 986 } 987 off += len; 988 } 989 } 990 991 static void 992 sbp_targ_pt_done(struct fw_xfer *xfer) 993 { 994 struct orb_info *orbi; 995 struct unrestricted_page_table_fmt *pt; 996 uint32_t i; 997 998 orbi = (struct orb_info *)xfer->sc; 999 1000 if (orbi->state == ORBI_STATUS_ABORTED) { 1001 if (debug) 1002 printf("%s: orbi aborted\n", __func__); 1003 sbp_targ_remove_orb_info(orbi->login, orbi); 1004 if (debug) { 1005 printf("%s: free orbi->page_table %p\n", __func__, orbi->page_table); 1006 printf("%s: free orbi %p\n", __func__, orbi); 1007 } 1008 free(orbi->page_table, M_SBP_TARG); 1009 free(orbi, M_SBP_TARG); 1010 orbi = NULL; 1011 fw_xfer_free(xfer); 1012 return; 1013 } 1014 if (xfer->resp != 0) { 1015 printf("%s: xfer->resp = %d\n", __func__, xfer->resp); 1016 orbi->status.resp = SBP_TRANS_FAIL; 1017 orbi->status.status = OBJ_PT | SBE_TIMEOUT/*XXX*/; 1018 orbi->status.dead = 1; 1019 orbi->status.len = 1; 1020 sbp_targ_abort(orbi->sc, STAILQ_NEXT(orbi, link)); 1021 1022 if (debug) 1023 printf("%s: free orbi->page_table %p\n", __func__, orbi->page_table); 1024 1025 sbp_targ_status_FIFO(orbi, 1026 orbi->login->fifo_hi, orbi->login->fifo_lo, /*dequeue*/1); 1027 free(orbi->page_table, M_SBP_TARG); 1028 orbi->page_table = NULL; 1029 fw_xfer_free(xfer); 1030 return; 1031 } 1032 orbi->refcount++; 1033 /* 1034 * Set endianness here so we don't have 1035 * to deal with is later 1036 */ 1037 for (i = 0, pt = orbi->page_table; i < orbi->orb4.data_size; i++, pt++) { 1038 pt->segment_len = ntohs(pt->segment_len); 1039 if (debug) 1040 printf("%s:segment_len = %u\n", __func__,pt->segment_len); 1041 pt->segment_base_high = ntohs(pt->segment_base_high); 1042 pt->segment_base_low = ntohl(pt->segment_base_low); 1043 } 1044 1045 sbp_targ_xfer_pt(orbi); 1046 1047 orbi->refcount--; 1048 if (orbi->refcount == 0) 1049 printf("%s: refcount == 0\n", __func__); 1050 1051 fw_xfer_free(xfer); 1052 return; 1053 } 1054 1055 static void sbp_targ_xfer_pt(struct orb_info *orbi) 1056 { 1057 union ccb *ccb; 1058 uint32_t res, offset, len; 1059 1060 ccb = orbi->ccb; 1061 if (debug) 1062 printf("%s: dxfer_len=%d\n", __func__, ccb->csio.dxfer_len); 1063 res = ccb->csio.dxfer_len; 1064 /* 1065 * If the page table required multiple CTIO's to 1066 * complete, then cur_pte is non NULL 1067 * and we need to start from the last position 1068 * If this is the first pass over a page table 1069 * then we just start at the beginning of the page 1070 * table. 1071 * 1072 * Parse the unrestricted page table and figure out where we need 1073 * to shove the data from this read request. 1074 */ 1075 for (offset = 0, len = 0; (res != 0) && (orbi->cur_pte < orbi->last_pte); offset += len) { 1076 len = MIN(orbi->cur_pte->segment_len, res); 1077 res -= len; 1078 if (debug) 1079 printf("%s:page_table: %04x:%08x segment_len(%u) res(%u) len(%u)\n", 1080 __func__, orbi->cur_pte->segment_base_high, 1081 orbi->cur_pte->segment_base_low, 1082 orbi->cur_pte->segment_len, 1083 res, len); 1084 sbp_targ_xfer_buf(orbi, offset, 1085 orbi->cur_pte->segment_base_high, 1086 orbi->cur_pte->segment_base_low, 1087 len, sbp_targ_cam_done); 1088 /* 1089 * If we have only written partially to 1090 * this page table, then we need to save 1091 * our position for the next CTIO. If we 1092 * have completed the page table, then we 1093 * are safe to move on to the next entry. 1094 */ 1095 if (len == orbi->cur_pte->segment_len) { 1096 orbi->cur_pte++; 1097 } else { 1098 uint32_t saved_base_low; 1099 1100 /* Handle transfers that cross a 4GB boundary. */ 1101 saved_base_low = orbi->cur_pte->segment_base_low; 1102 orbi->cur_pte->segment_base_low += len; 1103 if (orbi->cur_pte->segment_base_low < saved_base_low) 1104 orbi->cur_pte->segment_base_high++; 1105 1106 orbi->cur_pte->segment_len -= len; 1107 } 1108 } 1109 if (debug) { 1110 printf("%s: base_low(%08x) page_table_off(%p) last_block(%u)\n", 1111 __func__, orbi->cur_pte->segment_base_low, 1112 orbi->cur_pte, orbi->last_block_read); 1113 } 1114 if (res != 0) 1115 printf("Warning - short pt encountered. " 1116 "Could not transfer all data.\n"); 1117 return; 1118 } 1119 1120 /* 1121 * Create page table in local memory 1122 * and transfer it from the initiator 1123 * in order to know where we are supposed 1124 * to put the data. 1125 */ 1126 1127 static void 1128 sbp_targ_fetch_pt(struct orb_info *orbi) 1129 { 1130 struct fw_xfer *xfer; 1131 1132 /* 1133 * Pull in page table from initiator 1134 * and setup for data from our 1135 * backend device. 1136 */ 1137 if (orbi->page_table == NULL) { 1138 orbi->page_table = malloc(orbi->orb4.data_size* 1139 sizeof(struct unrestricted_page_table_fmt), 1140 M_SBP_TARG, M_NOWAIT|M_ZERO); 1141 if (orbi->page_table == NULL) 1142 goto error; 1143 orbi->cur_pte = orbi->page_table; 1144 orbi->last_pte = orbi->page_table + orbi->orb4.data_size; 1145 orbi->last_block_read = orbi->orb4.data_size; 1146 if (debug && orbi->page_table != NULL) 1147 printf("%s: malloc'd orbi->page_table(%p), orb4.data_size(%u)\n", 1148 __func__, orbi->page_table, orbi->orb4.data_size); 1149 1150 xfer = fwmem_read_block(orbi->fwdev, (void *)orbi, /*spd*/FWSPD_S400, 1151 orbi->data_hi, orbi->data_lo, orbi->orb4.data_size* 1152 sizeof(struct unrestricted_page_table_fmt), 1153 (void *)orbi->page_table, sbp_targ_pt_done); 1154 1155 if (xfer != NULL) 1156 return; 1157 } else { 1158 /* 1159 * This is a CTIO for a page table we have 1160 * already malloc'd, so just directly invoke 1161 * the xfer function on the orbi. 1162 */ 1163 sbp_targ_xfer_pt(orbi); 1164 return; 1165 } 1166 error: 1167 orbi->ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1168 if (debug) 1169 printf("%s: free orbi->page_table %p due to xfer == NULL\n", __func__, orbi->page_table); 1170 if (orbi->page_table != NULL) { 1171 free(orbi->page_table, M_SBP_TARG); 1172 orbi->page_table = NULL; 1173 } 1174 xpt_done(orbi->ccb); 1175 return; 1176 } 1177 1178 static void 1179 sbp_targ_action1(struct cam_sim *sim, union ccb *ccb) 1180 { 1181 struct sbp_targ_softc *sc; 1182 struct sbp_targ_lstate *lstate; 1183 cam_status status; 1184 u_int ccb_dir; 1185 1186 sc = (struct sbp_targ_softc *)cam_sim_softc(sim); 1187 1188 status = sbp_targ_find_devs(sc, ccb, &lstate, TRUE); 1189 1190 switch (ccb->ccb_h.func_code) { 1191 case XPT_CONT_TARGET_IO: 1192 { 1193 struct orb_info *orbi; 1194 1195 if (debug) 1196 printf("%s: XPT_CONT_TARGET_IO (0x%08x)\n", 1197 __func__, ccb->csio.tag_id); 1198 1199 if (status != CAM_REQ_CMP) { 1200 ccb->ccb_h.status = status; 1201 xpt_done(ccb); 1202 break; 1203 } 1204 /* XXX transfer from/to initiator */ 1205 orbi = sbp_targ_get_orb_info(lstate, 1206 ccb->csio.tag_id, ccb->csio.init_id); 1207 if (orbi == NULL) { 1208 ccb->ccb_h.status = CAM_REQ_ABORTED; /* XXX */ 1209 xpt_done(ccb); 1210 break; 1211 } 1212 if (orbi->state == ORBI_STATUS_ABORTED) { 1213 if (debug) 1214 printf("%s: ctio aborted\n", __func__); 1215 sbp_targ_remove_orb_info_locked(orbi->login, orbi); 1216 if (debug) 1217 printf("%s: free orbi %p\n", __func__, orbi); 1218 free(orbi, M_SBP_TARG); 1219 ccb->ccb_h.status = CAM_REQ_ABORTED; 1220 xpt_done(ccb); 1221 break; 1222 } 1223 orbi->state = ORBI_STATUS_CTIO; 1224 1225 orbi->ccb = ccb; 1226 ccb_dir = ccb->ccb_h.flags & CAM_DIR_MASK; 1227 1228 /* XXX */ 1229 if (ccb->csio.dxfer_len == 0) 1230 ccb_dir = CAM_DIR_NONE; 1231 1232 /* Sanity check */ 1233 if (ccb_dir == CAM_DIR_IN && orbi->orb4.dir == 0) 1234 printf("%s: direction mismatch\n", __func__); 1235 1236 /* check page table */ 1237 if (ccb_dir != CAM_DIR_NONE && orbi->orb4.page_table_present) { 1238 if (debug) 1239 printf("%s: page_table_present\n", 1240 __func__); 1241 if (orbi->orb4.page_size != 0) { 1242 printf("%s: unsupported pagesize %d != 0\n", 1243 __func__, orbi->orb4.page_size); 1244 ccb->ccb_h.status = CAM_REQ_INVALID; 1245 xpt_done(ccb); 1246 break; 1247 } 1248 sbp_targ_fetch_pt(orbi); 1249 break; 1250 } 1251 1252 /* Sanity check */ 1253 if (ccb_dir != CAM_DIR_NONE) { 1254 sbp_targ_xfer_buf(orbi, 0, orbi->data_hi, 1255 orbi->data_lo, 1256 MIN(orbi->orb4.data_size, ccb->csio.dxfer_len), 1257 sbp_targ_cam_done); 1258 if ( orbi->orb4.data_size > ccb->csio.dxfer_len ) { 1259 orbi->data_lo += ccb->csio.dxfer_len; 1260 orbi->orb4.data_size -= ccb->csio.dxfer_len; 1261 } 1262 } 1263 1264 if (ccb_dir == CAM_DIR_NONE) { 1265 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 1266 /* XXX */ 1267 SBP_UNLOCK(sc); 1268 sbp_targ_send_status(orbi, ccb); 1269 SBP_LOCK(sc); 1270 } 1271 ccb->ccb_h.status = CAM_REQ_CMP; 1272 xpt_done(ccb); 1273 } 1274 break; 1275 } 1276 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 1277 if (status != CAM_REQ_CMP) { 1278 ccb->ccb_h.status = status; 1279 xpt_done(ccb); 1280 break; 1281 } 1282 SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h, 1283 sim_links.sle); 1284 ccb->ccb_h.status = CAM_REQ_INPROG; 1285 if ((lstate->flags & F_ATIO_STARVED) != 0) { 1286 struct sbp_targ_login *login; 1287 1288 if (debug) 1289 printf("%s: new atio arrived\n", __func__); 1290 lstate->flags &= ~F_ATIO_STARVED; 1291 STAILQ_FOREACH(login, &lstate->logins, link) 1292 if ((login->flags & F_ATIO_STARVED) != 0) { 1293 login->flags &= ~F_ATIO_STARVED; 1294 sbp_targ_fetch_orb(lstate->sc, 1295 login->fwdev, 1296 login->last_hi, login->last_lo, 1297 login, FETCH_CMD); 1298 } 1299 } 1300 break; 1301 case XPT_NOTIFY_ACKNOWLEDGE: /* recycle notify ack */ 1302 case XPT_IMMEDIATE_NOTIFY: /* Add Immediate Notify Resource */ 1303 if (status != CAM_REQ_CMP) { 1304 ccb->ccb_h.status = status; 1305 xpt_done(ccb); 1306 break; 1307 } 1308 SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h, 1309 sim_links.sle); 1310 ccb->ccb_h.status = CAM_REQ_INPROG; 1311 sbp_targ_send_lstate_events(sc, lstate); 1312 break; 1313 case XPT_EN_LUN: 1314 sbp_targ_en_lun(sc, ccb); 1315 xpt_done(ccb); 1316 break; 1317 case XPT_PATH_INQ: 1318 { 1319 struct ccb_pathinq *cpi = &ccb->cpi; 1320 1321 cpi->version_num = 1; /* XXX??? */ 1322 cpi->hba_inquiry = PI_TAG_ABLE; 1323 cpi->target_sprt = PIT_PROCESSOR 1324 | PIT_DISCONNECT 1325 | PIT_TERM_IO; 1326 cpi->transport = XPORT_SPI; /* FIXME add XPORT_FW type to cam */ 1327 cpi->hba_misc = PIM_NOBUSRESET | PIM_NO_6_BYTE; 1328 cpi->hba_eng_cnt = 0; 1329 cpi->max_target = 7; /* XXX */ 1330 cpi->max_lun = MAX_LUN - 1; 1331 cpi->initiator_id = 7; /* XXX */ 1332 cpi->bus_id = sim->bus_id; 1333 cpi->base_transfer_speed = 400 * 1000 / 8; 1334 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 1335 strlcpy(cpi->hba_vid, "SBP_TARG", HBA_IDLEN); 1336 strlcpy(cpi->dev_name, sim->sim_name, DEV_IDLEN); 1337 cpi->unit_number = sim->unit_number; 1338 1339 cpi->ccb_h.status = CAM_REQ_CMP; 1340 xpt_done(ccb); 1341 break; 1342 } 1343 case XPT_ABORT: 1344 { 1345 union ccb *accb = ccb->cab.abort_ccb; 1346 1347 switch (accb->ccb_h.func_code) { 1348 case XPT_ACCEPT_TARGET_IO: 1349 case XPT_IMMEDIATE_NOTIFY: 1350 ccb->ccb_h.status = sbp_targ_abort_ccb(sc, ccb); 1351 break; 1352 case XPT_CONT_TARGET_IO: 1353 /* XXX */ 1354 ccb->ccb_h.status = CAM_UA_ABORT; 1355 break; 1356 default: 1357 printf("%s: aborting unknown function %d\n", 1358 __func__, accb->ccb_h.func_code); 1359 ccb->ccb_h.status = CAM_REQ_INVALID; 1360 break; 1361 } 1362 xpt_done(ccb); 1363 break; 1364 } 1365 #ifdef CAM_NEW_TRAN_CODE 1366 case XPT_SET_TRAN_SETTINGS: 1367 ccb->ccb_h.status = CAM_REQ_INVALID; 1368 xpt_done(ccb); 1369 break; 1370 case XPT_GET_TRAN_SETTINGS: 1371 { 1372 struct ccb_trans_settings *cts = &ccb->cts; 1373 struct ccb_trans_settings_scsi *scsi = 1374 &cts->proto_specific.scsi; 1375 struct ccb_trans_settings_spi *spi = 1376 &cts->xport_specific.spi; 1377 1378 cts->protocol = PROTO_SCSI; 1379 cts->protocol_version = SCSI_REV_2; 1380 cts->transport = XPORT_FW; /* should have a FireWire */ 1381 cts->transport_version = 2; 1382 spi->valid = CTS_SPI_VALID_DISC; 1383 spi->flags = CTS_SPI_FLAGS_DISC_ENB; 1384 scsi->valid = CTS_SCSI_VALID_TQ; 1385 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 1386 #if 0 1387 printf("%s:%d:%d XPT_GET_TRAN_SETTINGS:\n", 1388 device_get_nameunit(sc->fd.dev), 1389 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 1390 #endif 1391 cts->ccb_h.status = CAM_REQ_CMP; 1392 xpt_done(ccb); 1393 break; 1394 } 1395 #endif 1396 1397 default: 1398 printf("%s: unknown function 0x%x\n", 1399 __func__, ccb->ccb_h.func_code); 1400 ccb->ccb_h.status = CAM_PROVIDE_FAIL; 1401 xpt_done(ccb); 1402 break; 1403 } 1404 return; 1405 } 1406 1407 static void 1408 sbp_targ_action(struct cam_sim *sim, union ccb *ccb) 1409 { 1410 int s; 1411 1412 s = splfw(); 1413 sbp_targ_action1(sim, ccb); 1414 splx(s); 1415 } 1416 1417 static void 1418 sbp_targ_poll(struct cam_sim *sim) 1419 { 1420 /* XXX */ 1421 return; 1422 } 1423 1424 static void 1425 sbp_targ_cmd_handler(struct fw_xfer *xfer) 1426 { 1427 struct fw_pkt *fp; 1428 uint32_t *orb; 1429 struct corb4 *orb4; 1430 struct orb_info *orbi; 1431 struct ccb_accept_tio *atio; 1432 u_char *bytes; 1433 int i; 1434 1435 orbi = (struct orb_info *)xfer->sc; 1436 if (xfer->resp != 0) { 1437 printf("%s: xfer->resp = %d\n", __func__, xfer->resp); 1438 orbi->status.resp = SBP_TRANS_FAIL; 1439 orbi->status.status = OBJ_ORB | SBE_TIMEOUT/*XXX*/; 1440 orbi->status.dead = 1; 1441 orbi->status.len = 1; 1442 sbp_targ_abort(orbi->sc, STAILQ_NEXT(orbi, link)); 1443 1444 sbp_targ_status_FIFO(orbi, 1445 orbi->login->fifo_hi, orbi->login->fifo_lo, /*dequeue*/1); 1446 fw_xfer_free(xfer); 1447 return; 1448 } 1449 fp = &xfer->recv.hdr; 1450 1451 atio = orbi->atio; 1452 1453 if (orbi->state == ORBI_STATUS_ABORTED) { 1454 printf("%s: aborted\n", __func__); 1455 sbp_targ_remove_orb_info(orbi->login, orbi); 1456 free(orbi, M_SBP_TARG); 1457 atio->ccb_h.status = CAM_REQ_ABORTED; 1458 SBP_LOCK(orbi->sc); 1459 xpt_done((union ccb*)atio); 1460 SBP_UNLOCK(orbi->sc); 1461 goto done0; 1462 } 1463 orbi->state = ORBI_STATUS_ATIO; 1464 1465 orb = orbi->orb; 1466 /* swap payload except SCSI command */ 1467 for (i = 0; i < 5; i++) 1468 orb[i] = ntohl(orb[i]); 1469 1470 orb4 = (struct corb4 *)&orb[4]; 1471 if (orb4->rq_fmt != 0) { 1472 /* XXX */ 1473 printf("%s: rq_fmt(%d) != 0\n", __func__, orb4->rq_fmt); 1474 } 1475 1476 atio->ccb_h.target_id = 0; /* XXX */ 1477 atio->ccb_h.target_lun = orbi->login->lstate->lun; 1478 atio->sense_len = 0; 1479 atio->tag_action = MSG_SIMPLE_TASK; 1480 atio->tag_id = orbi->orb_lo; 1481 atio->init_id = orbi->login->id; 1482 1483 atio->ccb_h.flags |= CAM_TAG_ACTION_VALID; 1484 bytes = (u_char *)&orb[5]; 1485 if (debug) 1486 printf("%s: %p %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", 1487 __func__, (void *)atio, 1488 bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], 1489 bytes[5], bytes[6], bytes[7], bytes[8], bytes[9]); 1490 switch (bytes[0] >> 5) { 1491 case 0: 1492 atio->cdb_len = 6; 1493 break; 1494 case 1: 1495 case 2: 1496 atio->cdb_len = 10; 1497 break; 1498 case 4: 1499 atio->cdb_len = 16; 1500 break; 1501 case 5: 1502 atio->cdb_len = 12; 1503 break; 1504 case 3: 1505 default: 1506 /* Only copy the opcode. */ 1507 atio->cdb_len = 1; 1508 printf("Reserved or VU command code type encountered\n"); 1509 break; 1510 } 1511 1512 memcpy(atio->cdb_io.cdb_bytes, bytes, atio->cdb_len); 1513 1514 atio->ccb_h.status |= CAM_CDB_RECVD; 1515 1516 /* next ORB */ 1517 if ((orb[0] & (1<<31)) == 0) { 1518 if (debug) 1519 printf("%s: fetch next orb\n", __func__); 1520 orbi->status.src = SRC_NEXT_EXISTS; 1521 sbp_targ_fetch_orb(orbi->sc, orbi->fwdev, 1522 orb[0], orb[1], orbi->login, FETCH_CMD); 1523 } else { 1524 orbi->status.src = SRC_NO_NEXT; 1525 orbi->login->flags &= ~F_LINK_ACTIVE; 1526 } 1527 1528 orbi->data_hi = orb[2]; 1529 orbi->data_lo = orb[3]; 1530 orbi->orb4 = *orb4; 1531 1532 SBP_LOCK(orbi->sc); 1533 xpt_done((union ccb*)atio); 1534 SBP_UNLOCK(orbi->sc); 1535 done0: 1536 fw_xfer_free(xfer); 1537 return; 1538 } 1539 1540 static struct sbp_targ_login * 1541 sbp_targ_get_login(struct sbp_targ_softc *sc, struct fw_device *fwdev, int lun) 1542 { 1543 struct sbp_targ_lstate *lstate; 1544 struct sbp_targ_login *login; 1545 int i; 1546 1547 lstate = sc->lstate[lun]; 1548 1549 STAILQ_FOREACH(login, &lstate->logins, link) 1550 if (login->fwdev == fwdev) 1551 return (login); 1552 1553 for (i = 0; i < MAX_LOGINS; i++) 1554 if (sc->logins[i] == NULL) 1555 goto found; 1556 1557 printf("%s: increase MAX_LOGIN\n", __func__); 1558 return (NULL); 1559 1560 found: 1561 login = (struct sbp_targ_login *)malloc( 1562 sizeof(struct sbp_targ_login), M_SBP_TARG, M_NOWAIT | M_ZERO); 1563 1564 if (login == NULL) { 1565 printf("%s: malloc failed\n", __func__); 1566 return (NULL); 1567 } 1568 1569 login->id = i; 1570 login->fwdev = fwdev; 1571 login->lstate = lstate; 1572 login->last_hi = 0xffff; 1573 login->last_lo = 0xffffffff; 1574 login->hold_sec = 1; 1575 STAILQ_INIT(&login->orbs); 1576 CALLOUT_INIT(&login->hold_callout); 1577 sc->logins[i] = login; 1578 return (login); 1579 } 1580 1581 static void 1582 sbp_targ_mgm_handler(struct fw_xfer *xfer) 1583 { 1584 struct sbp_targ_lstate *lstate; 1585 struct sbp_targ_login *login; 1586 struct fw_pkt *fp; 1587 uint32_t *orb; 1588 struct morb4 *orb4; 1589 struct orb_info *orbi; 1590 int i; 1591 1592 orbi = (struct orb_info *)xfer->sc; 1593 if (xfer->resp != 0) { 1594 printf("%s: xfer->resp = %d\n", __func__, xfer->resp); 1595 orbi->status.resp = SBP_TRANS_FAIL; 1596 orbi->status.status = OBJ_ORB | SBE_TIMEOUT/*XXX*/; 1597 orbi->status.dead = 1; 1598 orbi->status.len = 1; 1599 sbp_targ_abort(orbi->sc, STAILQ_NEXT(orbi, link)); 1600 1601 sbp_targ_status_FIFO(orbi, 1602 orbi->login->fifo_hi, orbi->login->fifo_lo, /*dequeue*/0); 1603 fw_xfer_free(xfer); 1604 return; 1605 } 1606 fp = &xfer->recv.hdr; 1607 1608 orb = orbi->orb; 1609 /* swap payload */ 1610 for (i = 0; i < 8; i++) { 1611 orb[i] = ntohl(orb[i]); 1612 } 1613 orb4 = (struct morb4 *)&orb[4]; 1614 if (debug) 1615 printf("%s: %s\n", __func__, orb_fun_name[orb4->fun]); 1616 1617 orbi->status.src = SRC_NO_NEXT; 1618 1619 switch (orb4->fun << 16) { 1620 case ORB_FUN_LGI: 1621 { 1622 int exclusive = 0, lun; 1623 1624 if (orb[4] & ORB_EXV) 1625 exclusive = 1; 1626 1627 lun = orb4->id; 1628 lstate = orbi->sc->lstate[lun]; 1629 1630 if (lun >= MAX_LUN || lstate == NULL || 1631 (exclusive && 1632 STAILQ_FIRST(&lstate->logins) != NULL && 1633 STAILQ_FIRST(&lstate->logins)->fwdev != orbi->fwdev) 1634 ) { 1635 /* error */ 1636 orbi->status.dead = 1; 1637 orbi->status.status = STATUS_ACCESS_DENY; 1638 orbi->status.len = 1; 1639 break; 1640 } 1641 1642 /* allocate login */ 1643 login = sbp_targ_get_login(orbi->sc, orbi->fwdev, lun); 1644 if (login == NULL) { 1645 printf("%s: sbp_targ_get_login failed\n", 1646 __func__); 1647 orbi->status.dead = 1; 1648 orbi->status.status = STATUS_RES_UNAVAIL; 1649 orbi->status.len = 1; 1650 break; 1651 } 1652 printf("%s: login id=%d\n", __func__, login->id); 1653 1654 login->fifo_hi = orb[6]; 1655 login->fifo_lo = orb[7]; 1656 login->loginres.len = htons(sizeof(uint32_t) * 4); 1657 login->loginres.id = htons(login->id); 1658 login->loginres.cmd_hi = htons(SBP_TARG_BIND_HI); 1659 login->loginres.cmd_lo = htonl(SBP_TARG_BIND_LO(login->id)); 1660 login->loginres.recon_hold = htons(login->hold_sec); 1661 1662 STAILQ_INSERT_TAIL(&lstate->logins, login, link); 1663 fwmem_write_block(orbi->fwdev, NULL, /*spd*/FWSPD_S400, orb[2], orb[3], 1664 sizeof(struct sbp_login_res), (void *)&login->loginres, 1665 fw_asy_callback_free); 1666 /* XXX return status after loginres is successfully written */ 1667 break; 1668 } 1669 case ORB_FUN_RCN: 1670 login = orbi->sc->logins[orb4->id]; 1671 if (login != NULL && login->fwdev == orbi->fwdev) { 1672 login->flags &= ~F_HOLD; 1673 callout_stop(&login->hold_callout); 1674 printf("%s: reconnected id=%d\n", 1675 __func__, login->id); 1676 } else { 1677 orbi->status.dead = 1; 1678 orbi->status.status = STATUS_ACCESS_DENY; 1679 printf("%s: reconnection faild id=%d\n", 1680 __func__, orb4->id); 1681 } 1682 break; 1683 case ORB_FUN_LGO: 1684 login = orbi->sc->logins[orb4->id]; 1685 if (login->fwdev != orbi->fwdev) { 1686 printf("%s: wrong initiator\n", __func__); 1687 break; 1688 } 1689 sbp_targ_dealloc_login(login); 1690 break; 1691 default: 1692 printf("%s: %s not implemented yet\n", 1693 __func__, orb_fun_name[orb4->fun]); 1694 break; 1695 } 1696 orbi->status.len = 1; 1697 sbp_targ_status_FIFO(orbi, orb[6], orb[7], /*dequeue*/0); 1698 fw_xfer_free(xfer); 1699 return; 1700 } 1701 1702 static void 1703 sbp_targ_pointer_handler(struct fw_xfer *xfer) 1704 { 1705 struct orb_info *orbi; 1706 uint32_t orb0, orb1; 1707 1708 orbi = (struct orb_info *)xfer->sc; 1709 if (xfer->resp != 0) { 1710 printf("%s: xfer->resp = %d\n", __func__, xfer->resp); 1711 goto done; 1712 } 1713 1714 orb0 = ntohl(orbi->orb[0]); 1715 orb1 = ntohl(orbi->orb[1]); 1716 if ((orb0 & (1U << 31)) != 0) { 1717 printf("%s: invalid pointer\n", __func__); 1718 goto done; 1719 } 1720 sbp_targ_fetch_orb(orbi->login->lstate->sc, orbi->fwdev, 1721 (uint16_t)orb0, orb1, orbi->login, FETCH_CMD); 1722 done: 1723 free(orbi, M_SBP_TARG); 1724 fw_xfer_free(xfer); 1725 return; 1726 } 1727 1728 static void 1729 sbp_targ_fetch_orb(struct sbp_targ_softc *sc, struct fw_device *fwdev, 1730 uint16_t orb_hi, uint32_t orb_lo, struct sbp_targ_login *login, 1731 int mode) 1732 { 1733 struct orb_info *orbi; 1734 1735 if (debug) 1736 printf("%s: fetch orb %04x:%08x\n", __func__, orb_hi, orb_lo); 1737 orbi = malloc(sizeof(struct orb_info), M_SBP_TARG, M_NOWAIT | M_ZERO); 1738 if (orbi == NULL) { 1739 printf("%s: malloc failed\n", __func__); 1740 return; 1741 } 1742 orbi->sc = sc; 1743 orbi->fwdev = fwdev; 1744 orbi->login = login; 1745 orbi->orb_hi = orb_hi; 1746 orbi->orb_lo = orb_lo; 1747 orbi->status.orb_hi = htons(orb_hi); 1748 orbi->status.orb_lo = htonl(orb_lo); 1749 orbi->page_table = NULL; 1750 1751 switch (mode) { 1752 case FETCH_MGM: 1753 fwmem_read_block(fwdev, (void *)orbi, /*spd*/FWSPD_S400, orb_hi, orb_lo, 1754 sizeof(uint32_t) * 8, &orbi->orb[0], 1755 sbp_targ_mgm_handler); 1756 break; 1757 case FETCH_CMD: 1758 orbi->state = ORBI_STATUS_FETCH; 1759 login->last_hi = orb_hi; 1760 login->last_lo = orb_lo; 1761 login->flags |= F_LINK_ACTIVE; 1762 /* dequeue */ 1763 SBP_LOCK(sc); 1764 orbi->atio = (struct ccb_accept_tio *) 1765 SLIST_FIRST(&login->lstate->accept_tios); 1766 if (orbi->atio == NULL) { 1767 SBP_UNLOCK(sc); 1768 printf("%s: no free atio\n", __func__); 1769 login->lstate->flags |= F_ATIO_STARVED; 1770 login->flags |= F_ATIO_STARVED; 1771 #if 0 1772 /* XXX ?? */ 1773 login->fwdev = fwdev; 1774 #endif 1775 break; 1776 } 1777 SLIST_REMOVE_HEAD(&login->lstate->accept_tios, sim_links.sle); 1778 STAILQ_INSERT_TAIL(&login->orbs, orbi, link); 1779 SBP_UNLOCK(sc); 1780 fwmem_read_block(fwdev, (void *)orbi, /*spd*/FWSPD_S400, orb_hi, orb_lo, 1781 sizeof(uint32_t) * 8, &orbi->orb[0], 1782 sbp_targ_cmd_handler); 1783 break; 1784 case FETCH_POINTER: 1785 orbi->state = ORBI_STATUS_POINTER; 1786 login->flags |= F_LINK_ACTIVE; 1787 fwmem_read_block(fwdev, (void *)orbi, /*spd*/FWSPD_S400, orb_hi, orb_lo, 1788 sizeof(uint32_t) * 2, &orbi->orb[0], 1789 sbp_targ_pointer_handler); 1790 break; 1791 default: 1792 printf("%s: invalid mode %d\n", __func__, mode); 1793 } 1794 } 1795 1796 static void 1797 sbp_targ_resp_callback(struct fw_xfer *xfer) 1798 { 1799 struct sbp_targ_softc *sc; 1800 int s; 1801 1802 if (debug) 1803 printf("%s: xfer=%p\n", __func__, xfer); 1804 sc = (struct sbp_targ_softc *)xfer->sc; 1805 fw_xfer_unload(xfer); 1806 xfer->recv.pay_len = SBP_TARG_RECV_LEN; 1807 xfer->hand = sbp_targ_recv; 1808 s = splfw(); 1809 STAILQ_INSERT_TAIL(&sc->fwb.xferlist, xfer, link); 1810 splx(s); 1811 } 1812 1813 static int 1814 sbp_targ_cmd(struct fw_xfer *xfer, struct fw_device *fwdev, int login_id, 1815 int reg) 1816 { 1817 struct sbp_targ_login *login; 1818 struct sbp_targ_softc *sc; 1819 int rtcode = 0; 1820 1821 if (login_id < 0 || login_id >= MAX_LOGINS) 1822 return (RESP_ADDRESS_ERROR); 1823 1824 sc = (struct sbp_targ_softc *)xfer->sc; 1825 login = sc->logins[login_id]; 1826 if (login == NULL) 1827 return (RESP_ADDRESS_ERROR); 1828 1829 if (login->fwdev != fwdev) { 1830 /* XXX */ 1831 return (RESP_ADDRESS_ERROR); 1832 } 1833 1834 switch (reg) { 1835 case 0x08: /* ORB_POINTER */ 1836 if (debug) 1837 printf("%s: ORB_POINTER(%d)\n", __func__, login_id); 1838 if ((login->flags & F_LINK_ACTIVE) != 0) { 1839 if (debug) 1840 printf("link active (ORB_POINTER)\n"); 1841 break; 1842 } 1843 sbp_targ_fetch_orb(sc, fwdev, 1844 ntohl(xfer->recv.payload[0]), 1845 ntohl(xfer->recv.payload[1]), 1846 login, FETCH_CMD); 1847 break; 1848 case 0x04: /* AGENT_RESET */ 1849 if (debug) 1850 printf("%s: AGENT RESET(%d)\n", __func__, login_id); 1851 login->last_hi = 0xffff; 1852 login->last_lo = 0xffffffff; 1853 sbp_targ_abort(sc, STAILQ_FIRST(&login->orbs)); 1854 break; 1855 case 0x10: /* DOORBELL */ 1856 if (debug) 1857 printf("%s: DOORBELL(%d)\n", __func__, login_id); 1858 if (login->last_hi == 0xffff && 1859 login->last_lo == 0xffffffff) { 1860 printf("%s: no previous pointer(DOORBELL)\n", 1861 __func__); 1862 break; 1863 } 1864 if ((login->flags & F_LINK_ACTIVE) != 0) { 1865 if (debug) 1866 printf("link active (DOORBELL)\n"); 1867 break; 1868 } 1869 sbp_targ_fetch_orb(sc, fwdev, 1870 login->last_hi, login->last_lo, 1871 login, FETCH_POINTER); 1872 break; 1873 case 0x00: /* AGENT_STATE */ 1874 printf("%s: AGENT_STATE (%d:ignore)\n", __func__, login_id); 1875 break; 1876 case 0x14: /* UNSOLICITED_STATE_ENABLE */ 1877 printf("%s: UNSOLICITED_STATE_ENABLE (%d:ignore)\n", 1878 __func__, login_id); 1879 break; 1880 default: 1881 printf("%s: invalid register %d(%d)\n", 1882 __func__, reg, login_id); 1883 rtcode = RESP_ADDRESS_ERROR; 1884 } 1885 1886 return (rtcode); 1887 } 1888 1889 static int 1890 sbp_targ_mgm(struct fw_xfer *xfer, struct fw_device *fwdev) 1891 { 1892 struct sbp_targ_softc *sc; 1893 struct fw_pkt *fp; 1894 1895 sc = (struct sbp_targ_softc *)xfer->sc; 1896 1897 fp = &xfer->recv.hdr; 1898 if (fp->mode.wreqb.tcode != FWTCODE_WREQB) { 1899 printf("%s: tcode = %d\n", __func__, fp->mode.wreqb.tcode); 1900 return (RESP_TYPE_ERROR); 1901 } 1902 1903 sbp_targ_fetch_orb(sc, fwdev, 1904 ntohl(xfer->recv.payload[0]), 1905 ntohl(xfer->recv.payload[1]), 1906 NULL, FETCH_MGM); 1907 1908 return (0); 1909 } 1910 1911 static void 1912 sbp_targ_recv(struct fw_xfer *xfer) 1913 { 1914 struct fw_pkt *fp, *sfp; 1915 struct fw_device *fwdev; 1916 uint32_t lo; 1917 int s, rtcode; 1918 struct sbp_targ_softc *sc; 1919 1920 s = splfw(); 1921 sc = (struct sbp_targ_softc *)xfer->sc; 1922 fp = &xfer->recv.hdr; 1923 fwdev = fw_noderesolve_nodeid(sc->fd.fc, fp->mode.wreqb.src & 0x3f); 1924 if (fwdev == NULL) { 1925 printf("%s: cannot resolve nodeid=%d\n", 1926 __func__, fp->mode.wreqb.src & 0x3f); 1927 rtcode = RESP_TYPE_ERROR; /* XXX */ 1928 goto done; 1929 } 1930 lo = fp->mode.wreqb.dest_lo; 1931 1932 if (lo == SBP_TARG_BIND_LO(-1)) 1933 rtcode = sbp_targ_mgm(xfer, fwdev); 1934 else if (lo >= SBP_TARG_BIND_LO(0)) 1935 rtcode = sbp_targ_cmd(xfer, fwdev, SBP_TARG_LOGIN_ID(lo), 1936 lo % 0x20); 1937 else 1938 rtcode = RESP_ADDRESS_ERROR; 1939 1940 done: 1941 if (rtcode != 0) 1942 printf("%s: rtcode = %d\n", __func__, rtcode); 1943 sfp = &xfer->send.hdr; 1944 xfer->send.spd = FWSPD_S400; 1945 xfer->hand = sbp_targ_resp_callback; 1946 sfp->mode.wres.dst = fp->mode.wreqb.src; 1947 sfp->mode.wres.tlrt = fp->mode.wreqb.tlrt; 1948 sfp->mode.wres.tcode = FWTCODE_WRES; 1949 sfp->mode.wres.rtcode = rtcode; 1950 sfp->mode.wres.pri = 0; 1951 1952 fw_asyreq(xfer->fc, -1, xfer); 1953 splx(s); 1954 } 1955 1956 static int 1957 sbp_targ_attach(device_t dev) 1958 { 1959 struct sbp_targ_softc *sc; 1960 struct cam_devq *devq; 1961 struct firewire_comm *fc; 1962 1963 sc = (struct sbp_targ_softc *) device_get_softc(dev); 1964 bzero((void *)sc, sizeof(struct sbp_targ_softc)); 1965 1966 mtx_init(&sc->mtx, "sbp_targ", NULL, MTX_DEF); 1967 sc->fd.fc = fc = device_get_ivars(dev); 1968 sc->fd.dev = dev; 1969 sc->fd.post_explore = (void *) sbp_targ_post_explore; 1970 sc->fd.post_busreset = (void *) sbp_targ_post_busreset; 1971 1972 devq = cam_simq_alloc(/*maxopenings*/MAX_LUN*MAX_INITIATORS); 1973 if (devq == NULL) 1974 return (ENXIO); 1975 1976 sc->sim = cam_sim_alloc(sbp_targ_action, sbp_targ_poll, 1977 "sbp_targ", sc, device_get_unit(dev), &sc->mtx, 1978 /*untagged*/ 1, /*tagged*/ 1, devq); 1979 if (sc->sim == NULL) { 1980 cam_simq_free(devq); 1981 return (ENXIO); 1982 } 1983 1984 SBP_LOCK(sc); 1985 if (xpt_bus_register(sc->sim, dev, /*bus*/0) != CAM_SUCCESS) 1986 goto fail; 1987 1988 if (xpt_create_path(&sc->path, /*periph*/ NULL, cam_sim_path(sc->sim), 1989 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1990 xpt_bus_deregister(cam_sim_path(sc->sim)); 1991 goto fail; 1992 } 1993 SBP_UNLOCK(sc); 1994 1995 sc->fwb.start = SBP_TARG_BIND_START; 1996 sc->fwb.end = SBP_TARG_BIND_END; 1997 1998 /* pre-allocate xfer */ 1999 STAILQ_INIT(&sc->fwb.xferlist); 2000 fw_xferlist_add(&sc->fwb.xferlist, M_SBP_TARG, 2001 /*send*/ 0, /*recv*/ SBP_TARG_RECV_LEN, MAX_LUN /* XXX */, 2002 fc, (void *)sc, sbp_targ_recv); 2003 fw_bindadd(fc, &sc->fwb); 2004 return 0; 2005 2006 fail: 2007 SBP_UNLOCK(sc); 2008 cam_sim_free(sc->sim, /*free_devq*/TRUE); 2009 return (ENXIO); 2010 } 2011 2012 static int 2013 sbp_targ_detach(device_t dev) 2014 { 2015 struct sbp_targ_softc *sc; 2016 struct sbp_targ_lstate *lstate; 2017 int i; 2018 2019 sc = (struct sbp_targ_softc *)device_get_softc(dev); 2020 sc->fd.post_busreset = NULL; 2021 2022 SBP_LOCK(sc); 2023 xpt_free_path(sc->path); 2024 xpt_bus_deregister(cam_sim_path(sc->sim)); 2025 SBP_UNLOCK(sc); 2026 cam_sim_free(sc->sim, /*free_devq*/TRUE); 2027 2028 for (i = 0; i < MAX_LUN; i++) { 2029 lstate = sc->lstate[i]; 2030 if (lstate != NULL) { 2031 xpt_free_path(lstate->path); 2032 free(lstate, M_SBP_TARG); 2033 } 2034 } 2035 if (sc->black_hole != NULL) { 2036 xpt_free_path(sc->black_hole->path); 2037 free(sc->black_hole, M_SBP_TARG); 2038 } 2039 2040 fw_bindremove(sc->fd.fc, &sc->fwb); 2041 fw_xferlist_remove(&sc->fwb.xferlist); 2042 2043 mtx_destroy(&sc->mtx); 2044 2045 return 0; 2046 } 2047 2048 static devclass_t sbp_targ_devclass; 2049 2050 static device_method_t sbp_targ_methods[] = { 2051 /* device interface */ 2052 DEVMETHOD(device_identify, sbp_targ_identify), 2053 DEVMETHOD(device_probe, sbp_targ_probe), 2054 DEVMETHOD(device_attach, sbp_targ_attach), 2055 DEVMETHOD(device_detach, sbp_targ_detach), 2056 { 0, 0 } 2057 }; 2058 2059 static driver_t sbp_targ_driver = { 2060 "sbp_targ", 2061 sbp_targ_methods, 2062 sizeof(struct sbp_targ_softc), 2063 }; 2064 2065 DRIVER_MODULE(sbp_targ, firewire, sbp_targ_driver, sbp_targ_devclass, 0, 0); 2066 MODULE_VERSION(sbp_targ, 1); 2067 MODULE_DEPEND(sbp_targ, firewire, 1, 1, 1); 2068 MODULE_DEPEND(sbp_targ, cam, 1, 1, 1); 2069