1 /* 2 * Copyright (c) 2004 3 * Doug Rabson 4 * Copyright (c) 2002-2003 5 * Hidetoshi Shimokawa. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * 18 * This product includes software developed by Hidetoshi Shimokawa. 19 * 20 * 4. Neither the name of the author nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * $FreeBSD$ 37 */ 38 39 #include "opt_inet.h" 40 41 #include <sys/param.h> 42 #include <sys/kernel.h> 43 #include <sys/malloc.h> 44 #include <sys/mbuf.h> 45 #include <sys/socket.h> 46 #include <sys/sockio.h> 47 #include <sys/sysctl.h> 48 #include <sys/systm.h> 49 #include <sys/taskqueue.h> 50 #include <sys/module.h> 51 #include <sys/bus.h> 52 #include <machine/bus.h> 53 54 #include <net/bpf.h> 55 #include <net/if.h> 56 #include <net/firewire.h> 57 #include <net/if_arp.h> 58 #ifdef __DragonFly__ 59 #include <bus/firewire/firewire.h> 60 #include <bus/firewire/firewirereg.h> 61 #include "if_fwipvar.h" 62 #else 63 #include <dev/firewire/firewire.h> 64 #include <dev/firewire/firewirereg.h> 65 #include <dev/firewire/iec13213.h> 66 #include <dev/firewire/if_fwipvar.h> 67 #endif 68 69 /* 70 * We really need a mechanism for allocating regions in the FIFO 71 * address space. We pick a address in the OHCI controller's 'middle' 72 * address space. This means that the controller will automatically 73 * send responses for us, which is fine since we don't have any 74 * important information to put in the response anyway. 75 */ 76 #define INET_FIFO 0xfffe00000000LL 77 78 #define FWIPDEBUG if (fwipdebug) if_printf 79 #define TX_MAX_QUEUE (FWMAXQUEUE - 1) 80 81 /* network interface */ 82 static void fwip_start (struct ifnet *); 83 static int fwip_ioctl (struct ifnet *, u_long, caddr_t); 84 static void fwip_init (void *); 85 86 static void fwip_post_busreset (void *); 87 static void fwip_output_callback (struct fw_xfer *); 88 static void fwip_async_output (struct fwip_softc *, struct ifnet *); 89 static void fwip_start_send (void *, int); 90 static void fwip_stream_input (struct fw_xferq *); 91 static void fwip_unicast_input(struct fw_xfer *); 92 93 static int fwipdebug = 0; 94 static int broadcast_channel = 0xc0 | 0x1f; /* tag | channel(XXX) */ 95 static int tx_speed = 2; 96 static int rx_queue_len = FWMAXQUEUE; 97 98 MALLOC_DEFINE(M_FWIP, "if_fwip", "IP over FireWire interface"); 99 SYSCTL_INT(_debug, OID_AUTO, if_fwip_debug, CTLFLAG_RW, &fwipdebug, 0, ""); 100 SYSCTL_DECL(_hw_firewire); 101 SYSCTL_NODE(_hw_firewire, OID_AUTO, fwip, CTLFLAG_RD, 0, 102 "Firewire ip subsystem"); 103 SYSCTL_INT(_hw_firewire_fwip, OID_AUTO, rx_queue_len, CTLFLAG_RW, &rx_queue_len, 104 0, "Length of the receive queue"); 105 106 TUNABLE_INT("hw.firewire.fwip.rx_queue_len", &rx_queue_len); 107 108 #ifdef DEVICE_POLLING 109 #define FWIP_POLL_REGISTER(func, fwip, ifp) \ 110 if (ether_poll_register(func, ifp)) { \ 111 struct firewire_comm *fc = (fwip)->fd.fc; \ 112 fc->set_intr(fc, 0); \ 113 } 114 115 #define FWIP_POLL_DEREGISTER(fwip, ifp) \ 116 do { \ 117 struct firewire_comm *fc = (fwip)->fd.fc; \ 118 ether_poll_deregister(ifp); \ 119 fc->set_intr(fc, 1); \ 120 } while(0) \ 121 122 static poll_handler_t fwip_poll; 123 124 static void 125 fwip_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 126 { 127 struct fwip_softc *fwip; 128 struct firewire_comm *fc; 129 130 fwip = ((struct fwip_eth_softc *)ifp->if_softc)->fwip; 131 fc = fwip->fd.fc; 132 if (cmd == POLL_DEREGISTER) { 133 /* enable interrupts */ 134 fc->set_intr(fc, 1); 135 return; 136 } 137 fc->poll(fc, (cmd == POLL_AND_CHECK_STATUS)?0:1, count); 138 } 139 #else 140 #define FWIP_POLL_REGISTER(func, fwip, ifp) 141 #define FWIP_POLL_DEREGISTER(fwip, ifp) 142 #endif 143 static void 144 fwip_identify(driver_t *driver, device_t parent) 145 { 146 BUS_ADD_CHILD(parent, 0, "fwip", device_get_unit(parent)); 147 } 148 149 static int 150 fwip_probe(device_t dev) 151 { 152 device_t pa; 153 154 pa = device_get_parent(dev); 155 if(device_get_unit(dev) != device_get_unit(pa)){ 156 return(ENXIO); 157 } 158 159 device_set_desc(dev, "IP over FireWire"); 160 return (0); 161 } 162 163 static int 164 fwip_attach(device_t dev) 165 { 166 struct fwip_softc *fwip; 167 struct ifnet *ifp; 168 int unit, s; 169 struct fw_hwaddr *hwaddr; 170 171 fwip = ((struct fwip_softc *)device_get_softc(dev)); 172 unit = device_get_unit(dev); 173 174 bzero(fwip, sizeof(struct fwip_softc)); 175 /* XXX */ 176 fwip->dma_ch = -1; 177 178 fwip->fd.fc = device_get_ivars(dev); 179 if (tx_speed < 0) 180 tx_speed = fwip->fd.fc->speed; 181 182 fwip->fd.dev = dev; 183 fwip->fd.post_explore = NULL; 184 fwip->fd.post_busreset = fwip_post_busreset; 185 fwip->fw_softc.fwip = fwip; 186 TASK_INIT(&fwip->start_send, 0, fwip_start_send, fwip); 187 188 /* 189 * Encode our hardware the way that arp likes it. 190 */ 191 hwaddr = &fwip->fw_softc.fwcom.fc_hwaddr; 192 hwaddr->sender_unique_ID_hi = htonl(fwip->fd.fc->eui.hi); 193 hwaddr->sender_unique_ID_lo = htonl(fwip->fd.fc->eui.lo); 194 hwaddr->sender_max_rec = fwip->fd.fc->maxrec; 195 hwaddr->sspd = fwip->fd.fc->speed; 196 hwaddr->sender_unicast_FIFO_hi = htons((uint16_t)(INET_FIFO >> 32)); 197 hwaddr->sender_unicast_FIFO_lo = htonl((uint32_t)INET_FIFO); 198 199 /* fill the rest and attach interface */ 200 ifp = &fwip->fwip_if; 201 ifp->if_softc = &fwip->fw_softc; 202 203 #if __FreeBSD_version >= 501113 || defined(__DragonFly__) 204 if_initname(ifp, device_get_name(dev), unit); 205 #else 206 ifp->if_unit = unit; 207 ifp->if_name = "fwip"; 208 #endif 209 ifp->if_init = fwip_init; 210 ifp->if_start = fwip_start; 211 ifp->if_ioctl = fwip_ioctl; 212 ifp->if_flags = (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST); 213 ifp->if_snd.ifq_maxlen = TX_MAX_QUEUE; 214 215 s = splimp(); 216 firewire_ifattach(ifp, hwaddr); 217 splx(s); 218 219 FWIPDEBUG(ifp, "interface created\n"); 220 return 0; 221 } 222 223 static void 224 fwip_stop(struct fwip_softc *fwip) 225 { 226 struct firewire_comm *fc; 227 struct fw_xferq *xferq; 228 struct ifnet *ifp = &fwip->fwip_if; 229 struct fw_xfer *xfer, *next; 230 int i; 231 232 fc = fwip->fd.fc; 233 234 FWIP_POLL_DEREGISTER(fwip, ifp); 235 236 if (fwip->dma_ch >= 0) { 237 xferq = fc->ir[fwip->dma_ch]; 238 239 if (xferq->flag & FWXFERQ_RUNNING) 240 fc->irx_disable(fc, fwip->dma_ch); 241 xferq->flag &= 242 ~(FWXFERQ_MODEMASK | FWXFERQ_OPEN | FWXFERQ_STREAM | 243 FWXFERQ_EXTBUF | FWXFERQ_HANDLER | FWXFERQ_CHTAGMASK); 244 xferq->hand = NULL; 245 246 for (i = 0; i < xferq->bnchunk; i ++) 247 m_freem(xferq->bulkxfer[i].mbuf); 248 free(xferq->bulkxfer, M_FWIP); 249 250 fw_bindremove(fc, &fwip->fwb); 251 for (xfer = STAILQ_FIRST(&fwip->fwb.xferlist); xfer != NULL; 252 xfer = next) { 253 next = STAILQ_NEXT(xfer, link); 254 fw_xfer_free(xfer); 255 } 256 257 for (xfer = STAILQ_FIRST(&fwip->xferlist); xfer != NULL; 258 xfer = next) { 259 next = STAILQ_NEXT(xfer, link); 260 fw_xfer_free(xfer); 261 } 262 STAILQ_INIT(&fwip->xferlist); 263 264 xferq->bulkxfer = NULL; 265 fwip->dma_ch = -1; 266 } 267 268 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 269 } 270 271 static int 272 fwip_detach(device_t dev) 273 { 274 struct fwip_softc *fwip; 275 int s; 276 277 fwip = (struct fwip_softc *)device_get_softc(dev); 278 s = splimp(); 279 280 fwip_stop(fwip); 281 firewire_ifdetach(&fwip->fwip_if); 282 283 splx(s); 284 return 0; 285 } 286 287 static void 288 fwip_init(void *arg) 289 { 290 struct fwip_softc *fwip = ((struct fwip_eth_softc *)arg)->fwip; 291 struct firewire_comm *fc; 292 struct ifnet *ifp = &fwip->fwip_if; 293 struct fw_xferq *xferq; 294 struct fw_xfer *xfer; 295 struct mbuf *m; 296 int i; 297 298 FWIPDEBUG(ifp, "initializing\n"); 299 300 fc = fwip->fd.fc; 301 #define START 0 302 if (fwip->dma_ch < 0) { 303 for (i = START; i < fc->nisodma; i ++) { 304 xferq = fc->ir[i]; 305 if ((xferq->flag & FWXFERQ_OPEN) == 0) 306 goto found; 307 } 308 printf("no free dma channel\n"); 309 return; 310 found: 311 fwip->dma_ch = i; 312 /* allocate DMA channel and init packet mode */ 313 xferq->flag |= FWXFERQ_OPEN | FWXFERQ_EXTBUF | 314 FWXFERQ_HANDLER | FWXFERQ_STREAM; 315 xferq->flag &= ~0xff; 316 xferq->flag |= broadcast_channel & 0xff; 317 /* register fwip_input handler */ 318 xferq->sc = (caddr_t) fwip; 319 xferq->hand = fwip_stream_input; 320 xferq->bnchunk = rx_queue_len; 321 xferq->bnpacket = 1; 322 xferq->psize = MCLBYTES; 323 xferq->queued = 0; 324 xferq->buf = NULL; 325 xferq->bulkxfer = (struct fw_bulkxfer *) malloc( 326 sizeof(struct fw_bulkxfer) * xferq->bnchunk, 327 M_FWIP, M_WAITOK); 328 if (xferq->bulkxfer == NULL) { 329 printf("if_fwip: malloc failed\n"); 330 return; 331 } 332 STAILQ_INIT(&xferq->stvalid); 333 STAILQ_INIT(&xferq->stfree); 334 STAILQ_INIT(&xferq->stdma); 335 xferq->stproc = NULL; 336 for (i = 0; i < xferq->bnchunk; i ++) { 337 m = 338 #if defined(__DragonFly__) || __FreeBSD_version < 500000 339 m_getcl(M_WAIT, MT_DATA, M_PKTHDR); 340 #else 341 m_getcl(M_TRYWAIT, MT_DATA, M_PKTHDR); 342 #endif 343 xferq->bulkxfer[i].mbuf = m; 344 if (m != NULL) { 345 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 346 STAILQ_INSERT_TAIL(&xferq->stfree, 347 &xferq->bulkxfer[i], link); 348 } else 349 printf("fwip_as_input: m_getcl failed\n"); 350 } 351 352 fwip->fwb.start = INET_FIFO; 353 fwip->fwb.end = INET_FIFO + 16384; /* S3200 packet size */ 354 fwip->fwb.act_type = FWACT_XFER; 355 356 /* pre-allocate xfer */ 357 STAILQ_INIT(&fwip->fwb.xferlist); 358 for (i = 0; i < rx_queue_len; i ++) { 359 xfer = fw_xfer_alloc(M_FWIP); 360 if (xfer == NULL) 361 break; 362 m = m_getcl(M_TRYWAIT, MT_DATA, M_PKTHDR); 363 xfer->recv.payload = mtod(m, uint32_t *); 364 xfer->recv.pay_len = MCLBYTES; 365 xfer->act.hand = fwip_unicast_input; 366 xfer->fc = fc; 367 xfer->sc = (caddr_t)fwip; 368 xfer->mbuf = m; 369 STAILQ_INSERT_TAIL(&fwip->fwb.xferlist, xfer, link); 370 } 371 fw_bindadd(fc, &fwip->fwb); 372 373 STAILQ_INIT(&fwip->xferlist); 374 for (i = 0; i < TX_MAX_QUEUE; i++) { 375 xfer = fw_xfer_alloc(M_FWIP); 376 if (xfer == NULL) 377 break; 378 xfer->send.spd = tx_speed; 379 xfer->fc = fwip->fd.fc; 380 xfer->retry_req = fw_asybusy; 381 xfer->sc = (caddr_t)fwip; 382 xfer->act.hand = fwip_output_callback; 383 STAILQ_INSERT_TAIL(&fwip->xferlist, xfer, link); 384 } 385 } else 386 xferq = fc->ir[fwip->dma_ch]; 387 388 fwip->last_dest.hi = 0; 389 fwip->last_dest.lo = 0; 390 391 /* start dma */ 392 if ((xferq->flag & FWXFERQ_RUNNING) == 0) 393 fc->irx_enable(fc, fwip->dma_ch); 394 395 ifp->if_flags |= IFF_RUNNING; 396 ifp->if_flags &= ~IFF_OACTIVE; 397 398 FWIP_POLL_REGISTER(fwip_poll, fwip, ifp); 399 #if 0 400 /* attempt to start output */ 401 fwip_start(ifp); 402 #endif 403 } 404 405 static int 406 fwip_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 407 { 408 struct fwip_softc *fwip = ((struct fwip_eth_softc *)ifp->if_softc)->fwip; 409 int s, error; 410 411 switch (cmd) { 412 case SIOCSIFFLAGS: 413 s = splimp(); 414 if (ifp->if_flags & IFF_UP) { 415 if (!(ifp->if_flags & IFF_RUNNING)) 416 fwip_init(&fwip->fw_softc); 417 } else { 418 if (ifp->if_flags & IFF_RUNNING) 419 fwip_stop(fwip); 420 } 421 splx(s); 422 break; 423 case SIOCADDMULTI: 424 case SIOCDELMULTI: 425 break; 426 427 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000 428 default: 429 #else 430 case SIOCSIFADDR: 431 case SIOCGIFADDR: 432 case SIOCSIFMTU: 433 #endif 434 s = splimp(); 435 error = firewire_ioctl(ifp, cmd, data); 436 splx(s); 437 return (error); 438 #if defined(__DragonFly__) || __FreeBSD_version < 500000 439 default: 440 return (EINVAL); 441 #endif 442 } 443 444 return (0); 445 } 446 447 static void 448 fwip_post_busreset(void *arg) 449 { 450 struct fwip_softc *fwip = arg; 451 struct crom_src *src; 452 struct crom_chunk *root; 453 454 src = fwip->fd.fc->crom_src; 455 root = fwip->fd.fc->crom_root; 456 457 /* RFC2734 IPv4 over IEEE1394 */ 458 bzero(&fwip->unit4, sizeof(struct crom_chunk)); 459 crom_add_chunk(src, root, &fwip->unit4, CROM_UDIR); 460 crom_add_entry(&fwip->unit4, CSRKEY_SPEC, CSRVAL_IETF); 461 crom_add_simple_text(src, &fwip->unit4, &fwip->spec4, "IANA"); 462 crom_add_entry(&fwip->unit4, CSRKEY_VER, 1); 463 crom_add_simple_text(src, &fwip->unit4, &fwip->ver4, "IPv4"); 464 465 /* RFC3146 IPv6 over IEEE1394 */ 466 bzero(&fwip->unit6, sizeof(struct crom_chunk)); 467 crom_add_chunk(src, root, &fwip->unit6, CROM_UDIR); 468 crom_add_entry(&fwip->unit6, CSRKEY_SPEC, CSRVAL_IETF); 469 crom_add_simple_text(src, &fwip->unit6, &fwip->spec6, "IANA"); 470 crom_add_entry(&fwip->unit6, CSRKEY_VER, 2); 471 crom_add_simple_text(src, &fwip->unit6, &fwip->ver6, "IPv6"); 472 473 fwip->last_dest.hi = 0; 474 fwip->last_dest.lo = 0; 475 firewire_busreset(&fwip->fwip_if); 476 } 477 478 static void 479 fwip_output_callback(struct fw_xfer *xfer) 480 { 481 struct fwip_softc *fwip; 482 struct ifnet *ifp; 483 int s; 484 485 GIANT_REQUIRED; 486 487 fwip = (struct fwip_softc *)xfer->sc; 488 ifp = &fwip->fwip_if; 489 /* XXX error check */ 490 FWIPDEBUG(ifp, "resp = %d\n", xfer->resp); 491 if (xfer->resp != 0) 492 ifp->if_oerrors ++; 493 494 m_freem(xfer->mbuf); 495 fw_xfer_unload(xfer); 496 497 s = splimp(); 498 STAILQ_INSERT_TAIL(&fwip->xferlist, xfer, link); 499 splx(s); 500 501 /* for queue full */ 502 if (ifp->if_snd.ifq_head != NULL) 503 fwip_start(ifp); 504 } 505 506 static void 507 fwip_start(struct ifnet *ifp) 508 { 509 struct fwip_softc *fwip = ((struct fwip_eth_softc *)ifp->if_softc)->fwip; 510 int s; 511 512 GIANT_REQUIRED; 513 514 FWIPDEBUG(ifp, "starting\n"); 515 516 if (fwip->dma_ch < 0) { 517 struct mbuf *m = NULL; 518 519 FWIPDEBUG(ifp, "not ready\n"); 520 521 s = splimp(); 522 do { 523 IF_DEQUEUE(&ifp->if_snd, m); 524 if (m != NULL) 525 m_freem(m); 526 ifp->if_oerrors ++; 527 } while (m != NULL); 528 splx(s); 529 530 return; 531 } 532 533 s = splimp(); 534 ifp->if_flags |= IFF_OACTIVE; 535 536 if (ifp->if_snd.ifq_len != 0) 537 fwip_async_output(fwip, ifp); 538 539 ifp->if_flags &= ~IFF_OACTIVE; 540 splx(s); 541 } 542 543 /* Async. stream output */ 544 static void 545 fwip_async_output(struct fwip_softc *fwip, struct ifnet *ifp) 546 { 547 struct firewire_comm *fc = fwip->fd.fc; 548 struct mbuf *m; 549 struct m_tag *mtag; 550 struct fw_hwaddr *destfw; 551 struct fw_xfer *xfer; 552 struct fw_xferq *xferq; 553 struct fw_pkt *fp; 554 uint16_t nodeid; 555 int error; 556 int i = 0; 557 558 GIANT_REQUIRED; 559 560 xfer = NULL; 561 xferq = fwip->fd.fc->atq; 562 while (xferq->queued < xferq->maxq - 1) { 563 xfer = STAILQ_FIRST(&fwip->xferlist); 564 if (xfer == NULL) { 565 printf("if_fwip: lack of xfer\n"); 566 return; 567 } 568 IF_DEQUEUE(&ifp->if_snd, m); 569 if (m == NULL) 570 break; 571 572 /* 573 * Dig out the link-level address which 574 * firewire_output got via arp or neighbour 575 * discovery. If we don't have a link-level address, 576 * just stick the thing on the broadcast channel. 577 */ 578 mtag = m_tag_locate(m, MTAG_FIREWIRE, MTAG_FIREWIRE_HWADDR, 0); 579 if (mtag == NULL) 580 destfw = 0; 581 else 582 destfw = (struct fw_hwaddr *) (mtag + 1); 583 584 STAILQ_REMOVE_HEAD(&fwip->xferlist, link); 585 586 /* 587 * We don't do any bpf stuff here - the generic code 588 * in firewire_output gives the packet to bpf before 589 * it adds the link-level encapsulation. 590 */ 591 592 /* 593 * Put the mbuf in the xfer early in case we hit an 594 * error case below - fwip_output_callback will free 595 * the mbuf. 596 */ 597 xfer->mbuf = m; 598 599 /* 600 * We use the arp result (if any) to add a suitable firewire 601 * packet header before handing off to the bus. 602 */ 603 fp = &xfer->send.hdr; 604 nodeid = FWLOCALBUS | fc->nodeid; 605 if ((m->m_flags & M_BCAST) || !destfw) { 606 /* 607 * Broadcast packets are sent as GASP packets with 608 * specifier ID 0x00005e, version 1 on the broadcast 609 * channel. To be conservative, we send at the 610 * slowest possible speed. 611 */ 612 uint32_t *p; 613 614 M_PREPEND(m, 2*sizeof(uint32_t), M_DONTWAIT); 615 p = mtod(m, uint32_t *); 616 fp->mode.stream.len = m->m_pkthdr.len; 617 fp->mode.stream.chtag = broadcast_channel; 618 fp->mode.stream.tcode = FWTCODE_STREAM; 619 fp->mode.stream.sy = 0; 620 xfer->send.spd = 0; 621 p[0] = htonl(nodeid << 16); 622 p[1] = htonl((0x5e << 24) | 1); 623 } else { 624 /* 625 * Unicast packets are sent as block writes to the 626 * target's unicast fifo address. If we can't 627 * find the node address, we just give up. We 628 * could broadcast it but that might overflow 629 * the packet size limitations due to the 630 * extra GASP header. Note: the hardware 631 * address is stored in network byte order to 632 * make life easier for ARP. 633 */ 634 struct fw_device *fd; 635 struct fw_eui64 eui; 636 637 eui.hi = ntohl(destfw->sender_unique_ID_hi); 638 eui.lo = ntohl(destfw->sender_unique_ID_lo); 639 if (fwip->last_dest.hi != eui.hi || 640 fwip->last_dest.lo != eui.lo) { 641 fd = fw_noderesolve_eui64(fc, &eui); 642 if (!fd) { 643 /* error */ 644 ifp->if_oerrors ++; 645 /* XXX set error code */ 646 fwip_output_callback(xfer); 647 continue; 648 649 } 650 fwip->last_hdr.mode.wreqb.dst = FWLOCALBUS | fd->dst; 651 fwip->last_hdr.mode.wreqb.tlrt = 0; 652 fwip->last_hdr.mode.wreqb.tcode = FWTCODE_WREQB; 653 fwip->last_hdr.mode.wreqb.pri = 0; 654 fwip->last_hdr.mode.wreqb.src = nodeid; 655 fwip->last_hdr.mode.wreqb.dest_hi = 656 ntohs(destfw->sender_unicast_FIFO_hi); 657 fwip->last_hdr.mode.wreqb.dest_lo = 658 ntohl(destfw->sender_unicast_FIFO_lo); 659 fwip->last_hdr.mode.wreqb.extcode = 0; 660 fwip->last_dest = eui; 661 } 662 663 fp->mode.wreqb = fwip->last_hdr.mode.wreqb; 664 fp->mode.wreqb.len = m->m_pkthdr.len; 665 xfer->send.spd = min(destfw->sspd, fc->speed); 666 } 667 668 xfer->send.pay_len = m->m_pkthdr.len; 669 670 error = fw_asyreq(fc, -1, xfer); 671 if (error == EAGAIN) { 672 /* 673 * We ran out of tlabels - requeue the packet 674 * for later transmission. 675 */ 676 xfer->mbuf = 0; 677 STAILQ_INSERT_TAIL(&fwip->xferlist, xfer, link); 678 IF_PREPEND(&ifp->if_snd, m); 679 break; 680 } 681 if (error) { 682 /* error */ 683 ifp->if_oerrors ++; 684 /* XXX set error code */ 685 fwip_output_callback(xfer); 686 continue; 687 } else { 688 ifp->if_opackets ++; 689 i++; 690 } 691 } 692 #if 0 693 if (i > 1) 694 printf("%d queued\n", i); 695 #endif 696 if (i > 0) { 697 #if 1 698 xferq->start(fc); 699 #else 700 taskqueue_enqueue(taskqueue_swi_giant, &fwip->start_send); 701 #endif 702 } 703 } 704 705 static void 706 fwip_start_send (void *arg, int count) 707 { 708 struct fwip_softc *fwip = arg; 709 710 GIANT_REQUIRED; 711 fwip->fd.fc->atq->start(fwip->fd.fc); 712 } 713 714 /* Async. stream output */ 715 static void 716 fwip_stream_input(struct fw_xferq *xferq) 717 { 718 struct mbuf *m, *m0; 719 struct m_tag *mtag; 720 struct ifnet *ifp; 721 struct fwip_softc *fwip; 722 struct fw_bulkxfer *sxfer; 723 struct fw_pkt *fp; 724 uint16_t src; 725 uint32_t *p; 726 727 GIANT_REQUIRED; 728 729 fwip = (struct fwip_softc *)xferq->sc; 730 ifp = &fwip->fwip_if; 731 #if 0 732 FWIP_POLL_REGISTER(fwip_poll, fwip, ifp); 733 #endif 734 while ((sxfer = STAILQ_FIRST(&xferq->stvalid)) != NULL) { 735 STAILQ_REMOVE_HEAD(&xferq->stvalid, link); 736 fp = mtod(sxfer->mbuf, struct fw_pkt *); 737 if (fwip->fd.fc->irx_post != NULL) 738 fwip->fd.fc->irx_post(fwip->fd.fc, fp->mode.ld); 739 m = sxfer->mbuf; 740 741 /* insert new rbuf */ 742 sxfer->mbuf = m0 = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 743 if (m0 != NULL) { 744 m0->m_len = m0->m_pkthdr.len = m0->m_ext.ext_size; 745 STAILQ_INSERT_TAIL(&xferq->stfree, sxfer, link); 746 } else 747 printf("fwip_as_input: m_getcl failed\n"); 748 749 /* 750 * We must have a GASP header - leave the 751 * encapsulation sanity checks to the generic 752 * code. Remeber that we also have the firewire async 753 * stream header even though that isn't accounted for 754 * in mode.stream.len. 755 */ 756 if (sxfer->resp != 0 || fp->mode.stream.len < 757 2*sizeof(uint32_t)) { 758 m_freem(m); 759 ifp->if_ierrors ++; 760 continue; 761 } 762 m->m_len = m->m_pkthdr.len = fp->mode.stream.len 763 + sizeof(fp->mode.stream); 764 765 /* 766 * If we received the packet on the broadcast channel, 767 * mark it as broadcast, otherwise we assume it must 768 * be multicast. 769 */ 770 if (fp->mode.stream.chtag == broadcast_channel) 771 m->m_flags |= M_BCAST; 772 else 773 m->m_flags |= M_MCAST; 774 775 /* 776 * Make sure we recognise the GASP specifier and 777 * version. 778 */ 779 p = mtod(m, uint32_t *); 780 if ((((ntohl(p[1]) & 0xffff) << 8) | ntohl(p[2]) >> 24) != 0x00005e 781 || (ntohl(p[2]) & 0xffffff) != 1) { 782 FWIPDEBUG(ifp, "Unrecognised GASP header %#08x %#08x\n", 783 ntohl(p[1]), ntohl(p[2])); 784 m_freem(m); 785 ifp->if_ierrors ++; 786 continue; 787 } 788 789 /* 790 * Record the sender ID for possible BPF usage. 791 */ 792 src = ntohl(p[1]) >> 16; 793 if (ifp->if_bpf) { 794 mtag = m_tag_alloc(MTAG_FIREWIRE, 795 MTAG_FIREWIRE_SENDER_EUID, 796 2*sizeof(uint32_t), M_NOWAIT); 797 if (mtag) { 798 /* bpf wants it in network byte order */ 799 struct fw_device *fd; 800 uint32_t *p = (uint32_t *) (mtag + 1); 801 fd = fw_noderesolve_nodeid(fwip->fd.fc, 802 src & 0x3f); 803 if (fd) { 804 p[0] = htonl(fd->eui.hi); 805 p[1] = htonl(fd->eui.lo); 806 } else { 807 p[0] = 0; 808 p[1] = 0; 809 } 810 m_tag_prepend(m, mtag); 811 } 812 } 813 814 /* 815 * Trim off the GASP header 816 */ 817 m_adj(m, 3*sizeof(uint32_t)); 818 m->m_pkthdr.rcvif = ifp; 819 firewire_input(ifp, m, src); 820 ifp->if_ipackets ++; 821 } 822 if (STAILQ_FIRST(&xferq->stfree) != NULL) 823 fwip->fd.fc->irx_enable(fwip->fd.fc, fwip->dma_ch); 824 } 825 826 static __inline void 827 fwip_unicast_input_recycle(struct fwip_softc *fwip, struct fw_xfer *xfer) 828 { 829 struct mbuf *m; 830 831 GIANT_REQUIRED; 832 833 /* 834 * We have finished with a unicast xfer. Allocate a new 835 * cluster and stick it on the back of the input queue. 836 */ 837 m = m_getcl(M_TRYWAIT, MT_DATA, M_PKTHDR); 838 xfer->mbuf = m; 839 xfer->recv.payload = mtod(m, uint32_t *); 840 xfer->recv.pay_len = MCLBYTES; 841 xfer->mbuf = m; 842 STAILQ_INSERT_TAIL(&fwip->fwb.xferlist, xfer, link); 843 } 844 845 static void 846 fwip_unicast_input(struct fw_xfer *xfer) 847 { 848 uint64_t address; 849 struct mbuf *m; 850 struct m_tag *mtag; 851 struct ifnet *ifp; 852 struct fwip_softc *fwip; 853 struct fw_pkt *fp; 854 //struct fw_pkt *sfp; 855 int rtcode; 856 857 GIANT_REQUIRED; 858 859 fwip = (struct fwip_softc *)xfer->sc; 860 ifp = &fwip->fwip_if; 861 m = xfer->mbuf; 862 xfer->mbuf = 0; 863 fp = &xfer->recv.hdr; 864 865 /* 866 * Check the fifo address - we only accept addresses of 867 * exactly INET_FIFO. 868 */ 869 address = ((uint64_t)fp->mode.wreqb.dest_hi << 32) 870 | fp->mode.wreqb.dest_lo; 871 if (fp->mode.wreqb.tcode != FWTCODE_WREQB) { 872 rtcode = FWRCODE_ER_TYPE; 873 } else if (address != INET_FIFO) { 874 rtcode = FWRCODE_ER_ADDR; 875 } else { 876 rtcode = FWRCODE_COMPLETE; 877 } 878 879 /* 880 * Pick up a new mbuf and stick it on the back of the receive 881 * queue. 882 */ 883 fwip_unicast_input_recycle(fwip, xfer); 884 885 /* 886 * If we've already rejected the packet, give up now. 887 */ 888 if (rtcode != FWRCODE_COMPLETE) { 889 m_freem(m); 890 ifp->if_ierrors ++; 891 return; 892 } 893 894 if (ifp->if_bpf) { 895 /* 896 * Record the sender ID for possible BPF usage. 897 */ 898 mtag = m_tag_alloc(MTAG_FIREWIRE, MTAG_FIREWIRE_SENDER_EUID, 899 2*sizeof(uint32_t), M_NOWAIT); 900 if (mtag) { 901 /* bpf wants it in network byte order */ 902 struct fw_device *fd; 903 uint32_t *p = (uint32_t *) (mtag + 1); 904 fd = fw_noderesolve_nodeid(fwip->fd.fc, 905 fp->mode.wreqb.src & 0x3f); 906 if (fd) { 907 p[0] = htonl(fd->eui.hi); 908 p[1] = htonl(fd->eui.lo); 909 } else { 910 p[0] = 0; 911 p[1] = 0; 912 } 913 m_tag_prepend(m, mtag); 914 } 915 } 916 917 /* 918 * Hand off to the generic encapsulation code. We don't use 919 * ifp->if_input so that we can pass the source nodeid as an 920 * argument to facilitate link-level fragment reassembly. 921 */ 922 m->m_len = m->m_pkthdr.len = fp->mode.wreqb.len; 923 m->m_pkthdr.rcvif = ifp; 924 firewire_input(ifp, m, fp->mode.wreqb.src); 925 ifp->if_ipackets ++; 926 } 927 928 static devclass_t fwip_devclass; 929 930 static device_method_t fwip_methods[] = { 931 /* device interface */ 932 DEVMETHOD(device_identify, fwip_identify), 933 DEVMETHOD(device_probe, fwip_probe), 934 DEVMETHOD(device_attach, fwip_attach), 935 DEVMETHOD(device_detach, fwip_detach), 936 { 0, 0 } 937 }; 938 939 static driver_t fwip_driver = { 940 "fwip", 941 fwip_methods, 942 sizeof(struct fwip_softc), 943 }; 944 945 946 #ifdef __DragonFly__ 947 DECLARE_DUMMY_MODULE(fwip); 948 #endif 949 DRIVER_MODULE(fwip, firewire, fwip_driver, fwip_devclass, 0, 0); 950 MODULE_VERSION(fwip, 1); 951 MODULE_DEPEND(fwip, firewire, 1, 1, 1); 952