1 /*- 2 * Copyright (c) 2004 3 * Doug Rabson 4 * Copyright (c) 2002-2003 5 * Hidetoshi Shimokawa. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * 18 * This product includes software developed by Hidetoshi Shimokawa. 19 * 20 * 4. Neither the name of the author nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * $FreeBSD$ 37 */ 38 39 #ifdef HAVE_KERNEL_OPTION_HEADERS 40 #include "opt_device_polling.h" 41 #include "opt_inet.h" 42 #endif 43 44 #include <sys/param.h> 45 #include <sys/kernel.h> 46 #include <sys/malloc.h> 47 #include <sys/mbuf.h> 48 #include <sys/socket.h> 49 #include <sys/sockio.h> 50 #include <sys/sysctl.h> 51 #include <sys/systm.h> 52 #include <sys/taskqueue.h> 53 #include <sys/module.h> 54 #include <sys/bus.h> 55 #include <machine/bus.h> 56 57 #include <net/bpf.h> 58 #include <net/if.h> 59 #include <net/firewire.h> 60 #include <net/if_arp.h> 61 #include <net/if_types.h> 62 #ifdef __DragonFly__ 63 #include <bus/firewire/firewire.h> 64 #include <bus/firewire/firewirereg.h> 65 #include "if_fwipvar.h" 66 #else 67 #include <dev/firewire/firewire.h> 68 #include <dev/firewire/firewirereg.h> 69 #include <dev/firewire/iec13213.h> 70 #include <dev/firewire/if_fwipvar.h> 71 #endif 72 73 /* 74 * We really need a mechanism for allocating regions in the FIFO 75 * address space. We pick a address in the OHCI controller's 'middle' 76 * address space. This means that the controller will automatically 77 * send responses for us, which is fine since we don't have any 78 * important information to put in the response anyway. 79 */ 80 #define INET_FIFO 0xfffe00000000LL 81 82 #define FWIPDEBUG if (fwipdebug) if_printf 83 #define TX_MAX_QUEUE (FWMAXQUEUE - 1) 84 85 /* network interface */ 86 static void fwip_start (struct ifnet *); 87 static int fwip_ioctl (struct ifnet *, u_long, caddr_t); 88 static void fwip_init (void *); 89 90 static void fwip_post_busreset (void *); 91 static void fwip_output_callback (struct fw_xfer *); 92 static void fwip_async_output (struct fwip_softc *, struct ifnet *); 93 static void fwip_start_send (void *, int); 94 static void fwip_stream_input (struct fw_xferq *); 95 static void fwip_unicast_input(struct fw_xfer *); 96 97 static int fwipdebug = 0; 98 static int broadcast_channel = 0xc0 | 0x1f; /* tag | channel(XXX) */ 99 static int tx_speed = 2; 100 static int rx_queue_len = FWMAXQUEUE; 101 102 static MALLOC_DEFINE(M_FWIP, "if_fwip", "IP over FireWire interface"); 103 SYSCTL_INT(_debug, OID_AUTO, if_fwip_debug, CTLFLAG_RW, &fwipdebug, 0, ""); 104 SYSCTL_DECL(_hw_firewire); 105 static SYSCTL_NODE(_hw_firewire, OID_AUTO, fwip, CTLFLAG_RD, 0, 106 "Firewire ip subsystem"); 107 SYSCTL_INT(_hw_firewire_fwip, OID_AUTO, rx_queue_len, CTLFLAG_RW, &rx_queue_len, 108 0, "Length of the receive queue"); 109 110 TUNABLE_INT("hw.firewire.fwip.rx_queue_len", &rx_queue_len); 111 112 #ifdef DEVICE_POLLING 113 static poll_handler_t fwip_poll; 114 115 static int 116 fwip_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 117 { 118 struct fwip_softc *fwip; 119 struct firewire_comm *fc; 120 121 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 122 return (0); 123 124 fwip = ((struct fwip_eth_softc *)ifp->if_softc)->fwip; 125 fc = fwip->fd.fc; 126 fc->poll(fc, (cmd == POLL_AND_CHECK_STATUS)?0:1, count); 127 return (0); 128 } 129 #endif /* DEVICE_POLLING */ 130 131 static void 132 fwip_identify(driver_t *driver, device_t parent) 133 { 134 BUS_ADD_CHILD(parent, 0, "fwip", device_get_unit(parent)); 135 } 136 137 static int 138 fwip_probe(device_t dev) 139 { 140 device_t pa; 141 142 pa = device_get_parent(dev); 143 if(device_get_unit(dev) != device_get_unit(pa)){ 144 return(ENXIO); 145 } 146 147 device_set_desc(dev, "IP over FireWire"); 148 return (0); 149 } 150 151 static int 152 fwip_attach(device_t dev) 153 { 154 struct fwip_softc *fwip; 155 struct ifnet *ifp; 156 int unit, s; 157 struct fw_hwaddr *hwaddr; 158 159 fwip = ((struct fwip_softc *)device_get_softc(dev)); 160 unit = device_get_unit(dev); 161 ifp = fwip->fw_softc.fwip_ifp = if_alloc(IFT_IEEE1394); 162 if (ifp == NULL) 163 return (ENOSPC); 164 165 mtx_init(&fwip->mtx, "fwip", NULL, MTX_DEF); 166 /* XXX */ 167 fwip->dma_ch = -1; 168 169 fwip->fd.fc = device_get_ivars(dev); 170 if (tx_speed < 0) 171 tx_speed = fwip->fd.fc->speed; 172 173 fwip->fd.dev = dev; 174 fwip->fd.post_explore = NULL; 175 fwip->fd.post_busreset = fwip_post_busreset; 176 fwip->fw_softc.fwip = fwip; 177 TASK_INIT(&fwip->start_send, 0, fwip_start_send, fwip); 178 179 /* 180 * Encode our hardware the way that arp likes it. 181 */ 182 hwaddr = &IFP2FWC(fwip->fw_softc.fwip_ifp)->fc_hwaddr; 183 hwaddr->sender_unique_ID_hi = htonl(fwip->fd.fc->eui.hi); 184 hwaddr->sender_unique_ID_lo = htonl(fwip->fd.fc->eui.lo); 185 hwaddr->sender_max_rec = fwip->fd.fc->maxrec; 186 hwaddr->sspd = fwip->fd.fc->speed; 187 hwaddr->sender_unicast_FIFO_hi = htons((uint16_t)(INET_FIFO >> 32)); 188 hwaddr->sender_unicast_FIFO_lo = htonl((uint32_t)INET_FIFO); 189 190 /* fill the rest and attach interface */ 191 ifp->if_softc = &fwip->fw_softc; 192 193 #if __FreeBSD_version >= 501113 || defined(__DragonFly__) 194 if_initname(ifp, device_get_name(dev), unit); 195 #else 196 ifp->if_unit = unit; 197 ifp->if_name = "fwip"; 198 #endif 199 ifp->if_init = fwip_init; 200 ifp->if_start = fwip_start; 201 ifp->if_ioctl = fwip_ioctl; 202 ifp->if_flags = (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST); 203 ifp->if_snd.ifq_maxlen = TX_MAX_QUEUE; 204 #ifdef DEVICE_POLLING 205 ifp->if_capabilities |= IFCAP_POLLING; 206 #endif 207 208 s = splimp(); 209 firewire_ifattach(ifp, hwaddr); 210 splx(s); 211 212 FWIPDEBUG(ifp, "interface created\n"); 213 return 0; 214 } 215 216 static void 217 fwip_stop(struct fwip_softc *fwip) 218 { 219 struct firewire_comm *fc; 220 struct fw_xferq *xferq; 221 struct ifnet *ifp = fwip->fw_softc.fwip_ifp; 222 struct fw_xfer *xfer, *next; 223 int i; 224 225 fc = fwip->fd.fc; 226 227 if (fwip->dma_ch >= 0) { 228 xferq = fc->ir[fwip->dma_ch]; 229 230 if (xferq->flag & FWXFERQ_RUNNING) 231 fc->irx_disable(fc, fwip->dma_ch); 232 xferq->flag &= 233 ~(FWXFERQ_MODEMASK | FWXFERQ_OPEN | FWXFERQ_STREAM | 234 FWXFERQ_EXTBUF | FWXFERQ_HANDLER | FWXFERQ_CHTAGMASK); 235 xferq->hand = NULL; 236 237 for (i = 0; i < xferq->bnchunk; i ++) 238 m_freem(xferq->bulkxfer[i].mbuf); 239 free(xferq->bulkxfer, M_FWIP); 240 241 fw_bindremove(fc, &fwip->fwb); 242 for (xfer = STAILQ_FIRST(&fwip->fwb.xferlist); xfer != NULL; 243 xfer = next) { 244 next = STAILQ_NEXT(xfer, link); 245 fw_xfer_free(xfer); 246 } 247 248 for (xfer = STAILQ_FIRST(&fwip->xferlist); xfer != NULL; 249 xfer = next) { 250 next = STAILQ_NEXT(xfer, link); 251 fw_xfer_free(xfer); 252 } 253 STAILQ_INIT(&fwip->xferlist); 254 255 xferq->bulkxfer = NULL; 256 fwip->dma_ch = -1; 257 } 258 259 #if defined(__FreeBSD__) 260 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 261 #else 262 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 263 #endif 264 } 265 266 static int 267 fwip_detach(device_t dev) 268 { 269 struct fwip_softc *fwip; 270 struct ifnet *ifp; 271 int s; 272 273 fwip = (struct fwip_softc *)device_get_softc(dev); 274 ifp = fwip->fw_softc.fwip_ifp; 275 276 #ifdef DEVICE_POLLING 277 if (ifp->if_capenable & IFCAP_POLLING) 278 ether_poll_deregister(ifp); 279 #endif 280 281 s = splimp(); 282 283 fwip_stop(fwip); 284 firewire_ifdetach(ifp); 285 if_free(ifp); 286 mtx_destroy(&fwip->mtx); 287 288 splx(s); 289 return 0; 290 } 291 292 static void 293 fwip_init(void *arg) 294 { 295 struct fwip_softc *fwip = ((struct fwip_eth_softc *)arg)->fwip; 296 struct firewire_comm *fc; 297 struct ifnet *ifp = fwip->fw_softc.fwip_ifp; 298 struct fw_xferq *xferq; 299 struct fw_xfer *xfer; 300 struct mbuf *m; 301 int i; 302 303 FWIPDEBUG(ifp, "initializing\n"); 304 305 fc = fwip->fd.fc; 306 #define START 0 307 if (fwip->dma_ch < 0) { 308 fwip->dma_ch = fw_open_isodma(fc, /* tx */0); 309 if (fwip->dma_ch < 0) 310 return; 311 xferq = fc->ir[fwip->dma_ch]; 312 xferq->flag |= FWXFERQ_EXTBUF | 313 FWXFERQ_HANDLER | FWXFERQ_STREAM; 314 xferq->flag &= ~0xff; 315 xferq->flag |= broadcast_channel & 0xff; 316 /* register fwip_input handler */ 317 xferq->sc = (caddr_t) fwip; 318 xferq->hand = fwip_stream_input; 319 xferq->bnchunk = rx_queue_len; 320 xferq->bnpacket = 1; 321 xferq->psize = MCLBYTES; 322 xferq->queued = 0; 323 xferq->buf = NULL; 324 xferq->bulkxfer = (struct fw_bulkxfer *) malloc( 325 sizeof(struct fw_bulkxfer) * xferq->bnchunk, 326 M_FWIP, M_WAITOK); 327 if (xferq->bulkxfer == NULL) { 328 printf("if_fwip: malloc failed\n"); 329 return; 330 } 331 STAILQ_INIT(&xferq->stvalid); 332 STAILQ_INIT(&xferq->stfree); 333 STAILQ_INIT(&xferq->stdma); 334 xferq->stproc = NULL; 335 for (i = 0; i < xferq->bnchunk; i ++) { 336 m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR); 337 xferq->bulkxfer[i].mbuf = m; 338 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 339 STAILQ_INSERT_TAIL(&xferq->stfree, 340 &xferq->bulkxfer[i], link); 341 } 342 343 fwip->fwb.start = INET_FIFO; 344 fwip->fwb.end = INET_FIFO + 16384; /* S3200 packet size */ 345 346 /* pre-allocate xfer */ 347 STAILQ_INIT(&fwip->fwb.xferlist); 348 for (i = 0; i < rx_queue_len; i ++) { 349 xfer = fw_xfer_alloc(M_FWIP); 350 if (xfer == NULL) 351 break; 352 m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR); 353 xfer->recv.payload = mtod(m, uint32_t *); 354 xfer->recv.pay_len = MCLBYTES; 355 xfer->hand = fwip_unicast_input; 356 xfer->fc = fc; 357 xfer->sc = (caddr_t)fwip; 358 xfer->mbuf = m; 359 STAILQ_INSERT_TAIL(&fwip->fwb.xferlist, xfer, link); 360 } 361 fw_bindadd(fc, &fwip->fwb); 362 363 STAILQ_INIT(&fwip->xferlist); 364 for (i = 0; i < TX_MAX_QUEUE; i++) { 365 xfer = fw_xfer_alloc(M_FWIP); 366 if (xfer == NULL) 367 break; 368 xfer->send.spd = tx_speed; 369 xfer->fc = fwip->fd.fc; 370 xfer->sc = (caddr_t)fwip; 371 xfer->hand = fwip_output_callback; 372 STAILQ_INSERT_TAIL(&fwip->xferlist, xfer, link); 373 } 374 } else 375 xferq = fc->ir[fwip->dma_ch]; 376 377 fwip->last_dest.hi = 0; 378 fwip->last_dest.lo = 0; 379 380 /* start dma */ 381 if ((xferq->flag & FWXFERQ_RUNNING) == 0) 382 fc->irx_enable(fc, fwip->dma_ch); 383 384 #if defined(__FreeBSD__) 385 ifp->if_drv_flags |= IFF_DRV_RUNNING; 386 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 387 #else 388 ifp->if_flags |= IFF_RUNNING; 389 ifp->if_flags &= ~IFF_OACTIVE; 390 #endif 391 392 #if 0 393 /* attempt to start output */ 394 fwip_start(ifp); 395 #endif 396 } 397 398 static int 399 fwip_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 400 { 401 struct fwip_softc *fwip = ((struct fwip_eth_softc *)ifp->if_softc)->fwip; 402 int s, error; 403 404 switch (cmd) { 405 case SIOCSIFFLAGS: 406 s = splimp(); 407 if (ifp->if_flags & IFF_UP) { 408 #if defined(__FreeBSD__) 409 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 410 #else 411 if (!(ifp->if_flags & IFF_RUNNING)) 412 #endif 413 fwip_init(&fwip->fw_softc); 414 } else { 415 #if defined(__FreeBSD__) 416 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 417 #else 418 if (ifp->if_flags & IFF_RUNNING) 419 #endif 420 fwip_stop(fwip); 421 } 422 splx(s); 423 break; 424 case SIOCADDMULTI: 425 case SIOCDELMULTI: 426 break; 427 case SIOCSIFCAP: 428 #ifdef DEVICE_POLLING 429 { 430 struct ifreq *ifr = (struct ifreq *) data; 431 struct firewire_comm *fc = fwip->fd.fc; 432 433 if (ifr->ifr_reqcap & IFCAP_POLLING && 434 !(ifp->if_capenable & IFCAP_POLLING)) { 435 error = ether_poll_register(fwip_poll, ifp); 436 if (error) 437 return(error); 438 /* Disable interrupts */ 439 fc->set_intr(fc, 0); 440 ifp->if_capenable |= IFCAP_POLLING | 441 IFCAP_POLLING_NOCOUNT; 442 return (error); 443 444 } 445 if (!(ifr->ifr_reqcap & IFCAP_POLLING) && 446 ifp->if_capenable & IFCAP_POLLING) { 447 error = ether_poll_deregister(ifp); 448 /* Enable interrupts. */ 449 fc->set_intr(fc, 1); 450 ifp->if_capenable &= ~IFCAP_POLLING; 451 ifp->if_capenable &= ~IFCAP_POLLING_NOCOUNT; 452 return (error); 453 } 454 } 455 #endif /* DEVICE_POLLING */ 456 break; 457 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000 458 default: 459 #else 460 case SIOCSIFADDR: 461 case SIOCGIFADDR: 462 case SIOCSIFMTU: 463 #endif 464 s = splimp(); 465 error = firewire_ioctl(ifp, cmd, data); 466 splx(s); 467 return (error); 468 #if defined(__DragonFly__) || __FreeBSD_version < 500000 469 default: 470 return (EINVAL); 471 #endif 472 } 473 474 return (0); 475 } 476 477 static void 478 fwip_post_busreset(void *arg) 479 { 480 struct fwip_softc *fwip = arg; 481 struct crom_src *src; 482 struct crom_chunk *root; 483 484 src = fwip->fd.fc->crom_src; 485 root = fwip->fd.fc->crom_root; 486 487 /* RFC2734 IPv4 over IEEE1394 */ 488 bzero(&fwip->unit4, sizeof(struct crom_chunk)); 489 crom_add_chunk(src, root, &fwip->unit4, CROM_UDIR); 490 crom_add_entry(&fwip->unit4, CSRKEY_SPEC, CSRVAL_IETF); 491 crom_add_simple_text(src, &fwip->unit4, &fwip->spec4, "IANA"); 492 crom_add_entry(&fwip->unit4, CSRKEY_VER, 1); 493 crom_add_simple_text(src, &fwip->unit4, &fwip->ver4, "IPv4"); 494 495 /* RFC3146 IPv6 over IEEE1394 */ 496 bzero(&fwip->unit6, sizeof(struct crom_chunk)); 497 crom_add_chunk(src, root, &fwip->unit6, CROM_UDIR); 498 crom_add_entry(&fwip->unit6, CSRKEY_SPEC, CSRVAL_IETF); 499 crom_add_simple_text(src, &fwip->unit6, &fwip->spec6, "IANA"); 500 crom_add_entry(&fwip->unit6, CSRKEY_VER, 2); 501 crom_add_simple_text(src, &fwip->unit6, &fwip->ver6, "IPv6"); 502 503 fwip->last_dest.hi = 0; 504 fwip->last_dest.lo = 0; 505 firewire_busreset(fwip->fw_softc.fwip_ifp); 506 } 507 508 static void 509 fwip_output_callback(struct fw_xfer *xfer) 510 { 511 struct fwip_softc *fwip; 512 struct ifnet *ifp; 513 int s; 514 515 fwip = (struct fwip_softc *)xfer->sc; 516 ifp = fwip->fw_softc.fwip_ifp; 517 /* XXX error check */ 518 FWIPDEBUG(ifp, "resp = %d\n", xfer->resp); 519 if (xfer->resp != 0) 520 ifp->if_oerrors ++; 521 522 m_freem(xfer->mbuf); 523 fw_xfer_unload(xfer); 524 525 s = splimp(); 526 FWIP_LOCK(fwip); 527 STAILQ_INSERT_TAIL(&fwip->xferlist, xfer, link); 528 FWIP_UNLOCK(fwip); 529 splx(s); 530 531 /* for queue full */ 532 if (ifp->if_snd.ifq_head != NULL) { 533 fwip_start(ifp); 534 } 535 } 536 537 static void 538 fwip_start(struct ifnet *ifp) 539 { 540 struct fwip_softc *fwip = ((struct fwip_eth_softc *)ifp->if_softc)->fwip; 541 int s; 542 543 FWIPDEBUG(ifp, "starting\n"); 544 545 if (fwip->dma_ch < 0) { 546 struct mbuf *m = NULL; 547 548 FWIPDEBUG(ifp, "not ready\n"); 549 550 s = splimp(); 551 do { 552 IF_DEQUEUE(&ifp->if_snd, m); 553 if (m != NULL) 554 m_freem(m); 555 ifp->if_oerrors ++; 556 } while (m != NULL); 557 splx(s); 558 559 return; 560 } 561 562 s = splimp(); 563 #if defined(__FreeBSD__) 564 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 565 #else 566 ifp->if_flags |= IFF_OACTIVE; 567 #endif 568 569 if (ifp->if_snd.ifq_len != 0) 570 fwip_async_output(fwip, ifp); 571 572 #if defined(__FreeBSD__) 573 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 574 #else 575 ifp->if_flags &= ~IFF_OACTIVE; 576 #endif 577 splx(s); 578 } 579 580 /* Async. stream output */ 581 static void 582 fwip_async_output(struct fwip_softc *fwip, struct ifnet *ifp) 583 { 584 struct firewire_comm *fc = fwip->fd.fc; 585 struct mbuf *m; 586 struct m_tag *mtag; 587 struct fw_hwaddr *destfw; 588 struct fw_xfer *xfer; 589 struct fw_xferq *xferq; 590 struct fw_pkt *fp; 591 uint16_t nodeid; 592 int error; 593 int i = 0; 594 595 xfer = NULL; 596 xferq = fc->atq; 597 while ((xferq->queued < xferq->maxq - 1) && 598 (ifp->if_snd.ifq_head != NULL)) { 599 FWIP_LOCK(fwip); 600 xfer = STAILQ_FIRST(&fwip->xferlist); 601 if (xfer == NULL) { 602 FWIP_UNLOCK(fwip); 603 #if 0 604 printf("if_fwip: lack of xfer\n"); 605 #endif 606 break; 607 } 608 STAILQ_REMOVE_HEAD(&fwip->xferlist, link); 609 FWIP_UNLOCK(fwip); 610 611 IF_DEQUEUE(&ifp->if_snd, m); 612 if (m == NULL) { 613 FWIP_LOCK(fwip); 614 STAILQ_INSERT_HEAD(&fwip->xferlist, xfer, link); 615 FWIP_UNLOCK(fwip); 616 break; 617 } 618 619 /* 620 * Dig out the link-level address which 621 * firewire_output got via arp or neighbour 622 * discovery. If we don't have a link-level address, 623 * just stick the thing on the broadcast channel. 624 */ 625 mtag = m_tag_locate(m, MTAG_FIREWIRE, MTAG_FIREWIRE_HWADDR, 0); 626 if (mtag == NULL) 627 destfw = 0; 628 else 629 destfw = (struct fw_hwaddr *) (mtag + 1); 630 631 632 /* 633 * We don't do any bpf stuff here - the generic code 634 * in firewire_output gives the packet to bpf before 635 * it adds the link-level encapsulation. 636 */ 637 638 /* 639 * Put the mbuf in the xfer early in case we hit an 640 * error case below - fwip_output_callback will free 641 * the mbuf. 642 */ 643 xfer->mbuf = m; 644 645 /* 646 * We use the arp result (if any) to add a suitable firewire 647 * packet header before handing off to the bus. 648 */ 649 fp = &xfer->send.hdr; 650 nodeid = FWLOCALBUS | fc->nodeid; 651 if ((m->m_flags & M_BCAST) || !destfw) { 652 /* 653 * Broadcast packets are sent as GASP packets with 654 * specifier ID 0x00005e, version 1 on the broadcast 655 * channel. To be conservative, we send at the 656 * slowest possible speed. 657 */ 658 uint32_t *p; 659 660 M_PREPEND(m, 2*sizeof(uint32_t), M_DONTWAIT); 661 p = mtod(m, uint32_t *); 662 fp->mode.stream.len = m->m_pkthdr.len; 663 fp->mode.stream.chtag = broadcast_channel; 664 fp->mode.stream.tcode = FWTCODE_STREAM; 665 fp->mode.stream.sy = 0; 666 xfer->send.spd = 0; 667 p[0] = htonl(nodeid << 16); 668 p[1] = htonl((0x5e << 24) | 1); 669 } else { 670 /* 671 * Unicast packets are sent as block writes to the 672 * target's unicast fifo address. If we can't 673 * find the node address, we just give up. We 674 * could broadcast it but that might overflow 675 * the packet size limitations due to the 676 * extra GASP header. Note: the hardware 677 * address is stored in network byte order to 678 * make life easier for ARP. 679 */ 680 struct fw_device *fd; 681 struct fw_eui64 eui; 682 683 eui.hi = ntohl(destfw->sender_unique_ID_hi); 684 eui.lo = ntohl(destfw->sender_unique_ID_lo); 685 if (fwip->last_dest.hi != eui.hi || 686 fwip->last_dest.lo != eui.lo) { 687 fd = fw_noderesolve_eui64(fc, &eui); 688 if (!fd) { 689 /* error */ 690 ifp->if_oerrors ++; 691 /* XXX set error code */ 692 fwip_output_callback(xfer); 693 continue; 694 695 } 696 fwip->last_hdr.mode.wreqb.dst = FWLOCALBUS | fd->dst; 697 fwip->last_hdr.mode.wreqb.tlrt = 0; 698 fwip->last_hdr.mode.wreqb.tcode = FWTCODE_WREQB; 699 fwip->last_hdr.mode.wreqb.pri = 0; 700 fwip->last_hdr.mode.wreqb.src = nodeid; 701 fwip->last_hdr.mode.wreqb.dest_hi = 702 ntohs(destfw->sender_unicast_FIFO_hi); 703 fwip->last_hdr.mode.wreqb.dest_lo = 704 ntohl(destfw->sender_unicast_FIFO_lo); 705 fwip->last_hdr.mode.wreqb.extcode = 0; 706 fwip->last_dest = eui; 707 } 708 709 fp->mode.wreqb = fwip->last_hdr.mode.wreqb; 710 fp->mode.wreqb.len = m->m_pkthdr.len; 711 xfer->send.spd = min(destfw->sspd, fc->speed); 712 } 713 714 xfer->send.pay_len = m->m_pkthdr.len; 715 716 error = fw_asyreq(fc, -1, xfer); 717 if (error == EAGAIN) { 718 /* 719 * We ran out of tlabels - requeue the packet 720 * for later transmission. 721 */ 722 xfer->mbuf = 0; 723 FWIP_LOCK(fwip); 724 STAILQ_INSERT_TAIL(&fwip->xferlist, xfer, link); 725 FWIP_UNLOCK(fwip); 726 IF_PREPEND(&ifp->if_snd, m); 727 break; 728 } 729 if (error) { 730 /* error */ 731 ifp->if_oerrors ++; 732 /* XXX set error code */ 733 fwip_output_callback(xfer); 734 continue; 735 } else { 736 ifp->if_opackets ++; 737 i++; 738 } 739 } 740 #if 0 741 if (i > 1) 742 printf("%d queued\n", i); 743 #endif 744 if (i > 0) 745 xferq->start(fc); 746 } 747 748 static void 749 fwip_start_send (void *arg, int count) 750 { 751 struct fwip_softc *fwip = arg; 752 753 fwip->fd.fc->atq->start(fwip->fd.fc); 754 } 755 756 /* Async. stream output */ 757 static void 758 fwip_stream_input(struct fw_xferq *xferq) 759 { 760 struct mbuf *m, *m0; 761 struct m_tag *mtag; 762 struct ifnet *ifp; 763 struct fwip_softc *fwip; 764 struct fw_bulkxfer *sxfer; 765 struct fw_pkt *fp; 766 uint16_t src; 767 uint32_t *p; 768 769 770 fwip = (struct fwip_softc *)xferq->sc; 771 ifp = fwip->fw_softc.fwip_ifp; 772 773 while ((sxfer = STAILQ_FIRST(&xferq->stvalid)) != NULL) { 774 STAILQ_REMOVE_HEAD(&xferq->stvalid, link); 775 fp = mtod(sxfer->mbuf, struct fw_pkt *); 776 if (fwip->fd.fc->irx_post != NULL) 777 fwip->fd.fc->irx_post(fwip->fd.fc, fp->mode.ld); 778 m = sxfer->mbuf; 779 780 /* insert new rbuf */ 781 sxfer->mbuf = m0 = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 782 if (m0 != NULL) { 783 m0->m_len = m0->m_pkthdr.len = m0->m_ext.ext_size; 784 STAILQ_INSERT_TAIL(&xferq->stfree, sxfer, link); 785 } else 786 printf("fwip_as_input: m_getcl failed\n"); 787 788 /* 789 * We must have a GASP header - leave the 790 * encapsulation sanity checks to the generic 791 * code. Remeber that we also have the firewire async 792 * stream header even though that isn't accounted for 793 * in mode.stream.len. 794 */ 795 if (sxfer->resp != 0 || fp->mode.stream.len < 796 2*sizeof(uint32_t)) { 797 m_freem(m); 798 ifp->if_ierrors ++; 799 continue; 800 } 801 m->m_len = m->m_pkthdr.len = fp->mode.stream.len 802 + sizeof(fp->mode.stream); 803 804 /* 805 * If we received the packet on the broadcast channel, 806 * mark it as broadcast, otherwise we assume it must 807 * be multicast. 808 */ 809 if (fp->mode.stream.chtag == broadcast_channel) 810 m->m_flags |= M_BCAST; 811 else 812 m->m_flags |= M_MCAST; 813 814 /* 815 * Make sure we recognise the GASP specifier and 816 * version. 817 */ 818 p = mtod(m, uint32_t *); 819 if ((((ntohl(p[1]) & 0xffff) << 8) | ntohl(p[2]) >> 24) != 0x00005e 820 || (ntohl(p[2]) & 0xffffff) != 1) { 821 FWIPDEBUG(ifp, "Unrecognised GASP header %#08x %#08x\n", 822 ntohl(p[1]), ntohl(p[2])); 823 m_freem(m); 824 ifp->if_ierrors ++; 825 continue; 826 } 827 828 /* 829 * Record the sender ID for possible BPF usage. 830 */ 831 src = ntohl(p[1]) >> 16; 832 if (bpf_peers_present(ifp->if_bpf)) { 833 mtag = m_tag_alloc(MTAG_FIREWIRE, 834 MTAG_FIREWIRE_SENDER_EUID, 835 2*sizeof(uint32_t), M_NOWAIT); 836 if (mtag) { 837 /* bpf wants it in network byte order */ 838 struct fw_device *fd; 839 uint32_t *p = (uint32_t *) (mtag + 1); 840 fd = fw_noderesolve_nodeid(fwip->fd.fc, 841 src & 0x3f); 842 if (fd) { 843 p[0] = htonl(fd->eui.hi); 844 p[1] = htonl(fd->eui.lo); 845 } else { 846 p[0] = 0; 847 p[1] = 0; 848 } 849 m_tag_prepend(m, mtag); 850 } 851 } 852 853 /* 854 * Trim off the GASP header 855 */ 856 m_adj(m, 3*sizeof(uint32_t)); 857 m->m_pkthdr.rcvif = ifp; 858 firewire_input(ifp, m, src); 859 ifp->if_ipackets ++; 860 } 861 if (STAILQ_FIRST(&xferq->stfree) != NULL) 862 fwip->fd.fc->irx_enable(fwip->fd.fc, fwip->dma_ch); 863 } 864 865 static __inline void 866 fwip_unicast_input_recycle(struct fwip_softc *fwip, struct fw_xfer *xfer) 867 { 868 struct mbuf *m; 869 870 /* 871 * We have finished with a unicast xfer. Allocate a new 872 * cluster and stick it on the back of the input queue. 873 */ 874 m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR); 875 xfer->mbuf = m; 876 xfer->recv.payload = mtod(m, uint32_t *); 877 xfer->recv.pay_len = MCLBYTES; 878 xfer->mbuf = m; 879 STAILQ_INSERT_TAIL(&fwip->fwb.xferlist, xfer, link); 880 } 881 882 static void 883 fwip_unicast_input(struct fw_xfer *xfer) 884 { 885 uint64_t address; 886 struct mbuf *m; 887 struct m_tag *mtag; 888 struct ifnet *ifp; 889 struct fwip_softc *fwip; 890 struct fw_pkt *fp; 891 //struct fw_pkt *sfp; 892 int rtcode; 893 894 fwip = (struct fwip_softc *)xfer->sc; 895 ifp = fwip->fw_softc.fwip_ifp; 896 m = xfer->mbuf; 897 xfer->mbuf = 0; 898 fp = &xfer->recv.hdr; 899 900 /* 901 * Check the fifo address - we only accept addresses of 902 * exactly INET_FIFO. 903 */ 904 address = ((uint64_t)fp->mode.wreqb.dest_hi << 32) 905 | fp->mode.wreqb.dest_lo; 906 if (fp->mode.wreqb.tcode != FWTCODE_WREQB) { 907 rtcode = FWRCODE_ER_TYPE; 908 } else if (address != INET_FIFO) { 909 rtcode = FWRCODE_ER_ADDR; 910 } else { 911 rtcode = FWRCODE_COMPLETE; 912 } 913 914 /* 915 * Pick up a new mbuf and stick it on the back of the receive 916 * queue. 917 */ 918 fwip_unicast_input_recycle(fwip, xfer); 919 920 /* 921 * If we've already rejected the packet, give up now. 922 */ 923 if (rtcode != FWRCODE_COMPLETE) { 924 m_freem(m); 925 ifp->if_ierrors ++; 926 return; 927 } 928 929 if (bpf_peers_present(ifp->if_bpf)) { 930 /* 931 * Record the sender ID for possible BPF usage. 932 */ 933 mtag = m_tag_alloc(MTAG_FIREWIRE, MTAG_FIREWIRE_SENDER_EUID, 934 2*sizeof(uint32_t), M_NOWAIT); 935 if (mtag) { 936 /* bpf wants it in network byte order */ 937 struct fw_device *fd; 938 uint32_t *p = (uint32_t *) (mtag + 1); 939 fd = fw_noderesolve_nodeid(fwip->fd.fc, 940 fp->mode.wreqb.src & 0x3f); 941 if (fd) { 942 p[0] = htonl(fd->eui.hi); 943 p[1] = htonl(fd->eui.lo); 944 } else { 945 p[0] = 0; 946 p[1] = 0; 947 } 948 m_tag_prepend(m, mtag); 949 } 950 } 951 952 /* 953 * Hand off to the generic encapsulation code. We don't use 954 * ifp->if_input so that we can pass the source nodeid as an 955 * argument to facilitate link-level fragment reassembly. 956 */ 957 m->m_len = m->m_pkthdr.len = fp->mode.wreqb.len; 958 m->m_pkthdr.rcvif = ifp; 959 firewire_input(ifp, m, fp->mode.wreqb.src); 960 ifp->if_ipackets ++; 961 } 962 963 static devclass_t fwip_devclass; 964 965 static device_method_t fwip_methods[] = { 966 /* device interface */ 967 DEVMETHOD(device_identify, fwip_identify), 968 DEVMETHOD(device_probe, fwip_probe), 969 DEVMETHOD(device_attach, fwip_attach), 970 DEVMETHOD(device_detach, fwip_detach), 971 { 0, 0 } 972 }; 973 974 static driver_t fwip_driver = { 975 "fwip", 976 fwip_methods, 977 sizeof(struct fwip_softc), 978 }; 979 980 981 #ifdef __DragonFly__ 982 DECLARE_DUMMY_MODULE(fwip); 983 #endif 984 DRIVER_MODULE(fwip, firewire, fwip_driver, fwip_devclass, 0, 0); 985 MODULE_VERSION(fwip, 1); 986 MODULE_DEPEND(fwip, firewire, 1, 1, 1); 987