1 /*- 2 * Copyright (c) 2004 3 * Doug Rabson 4 * Copyright (c) 2002-2003 5 * Hidetoshi Shimokawa. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * 18 * This product includes software developed by Hidetoshi Shimokawa. 19 * 20 * 4. Neither the name of the author nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * $FreeBSD$ 37 */ 38 39 #ifdef HAVE_KERNEL_OPTION_HEADERS 40 #include "opt_device_polling.h" 41 #include "opt_inet.h" 42 #endif 43 44 #include <sys/param.h> 45 #include <sys/kernel.h> 46 #include <sys/malloc.h> 47 #include <sys/mbuf.h> 48 #include <sys/socket.h> 49 #include <sys/sockio.h> 50 #include <sys/sysctl.h> 51 #include <sys/systm.h> 52 #include <sys/taskqueue.h> 53 #include <sys/module.h> 54 #include <sys/bus.h> 55 #include <machine/bus.h> 56 57 #include <net/bpf.h> 58 #include <net/if.h> 59 #include <net/if_var.h> 60 #include <net/firewire.h> 61 #include <net/if_arp.h> 62 #include <net/if_types.h> 63 #ifdef __DragonFly__ 64 #include <bus/firewire/firewire.h> 65 #include <bus/firewire/firewirereg.h> 66 #include "if_fwipvar.h" 67 #else 68 #include <dev/firewire/firewire.h> 69 #include <dev/firewire/firewirereg.h> 70 #include <dev/firewire/iec13213.h> 71 #include <dev/firewire/if_fwipvar.h> 72 #endif 73 74 /* 75 * We really need a mechanism for allocating regions in the FIFO 76 * address space. We pick a address in the OHCI controller's 'middle' 77 * address space. This means that the controller will automatically 78 * send responses for us, which is fine since we don't have any 79 * important information to put in the response anyway. 80 */ 81 #define INET_FIFO 0xfffe00000000LL 82 83 #define FWIPDEBUG if (fwipdebug) if_printf 84 #define TX_MAX_QUEUE (FWMAXQUEUE - 1) 85 86 /* network interface */ 87 static void fwip_start (struct ifnet *); 88 static int fwip_ioctl (struct ifnet *, u_long, caddr_t); 89 static void fwip_init (void *); 90 91 static void fwip_post_busreset (void *); 92 static void fwip_output_callback (struct fw_xfer *); 93 static void fwip_async_output (struct fwip_softc *, struct ifnet *); 94 static void fwip_start_send (void *, int); 95 static void fwip_stream_input (struct fw_xferq *); 96 static void fwip_unicast_input(struct fw_xfer *); 97 98 static int fwipdebug = 0; 99 static int broadcast_channel = 0xc0 | 0x1f; /* tag | channel(XXX) */ 100 static int tx_speed = 2; 101 static int rx_queue_len = FWMAXQUEUE; 102 103 static MALLOC_DEFINE(M_FWIP, "if_fwip", "IP over FireWire interface"); 104 SYSCTL_INT(_debug, OID_AUTO, if_fwip_debug, CTLFLAG_RW, &fwipdebug, 0, ""); 105 SYSCTL_DECL(_hw_firewire); 106 static SYSCTL_NODE(_hw_firewire, OID_AUTO, fwip, CTLFLAG_RD, 0, 107 "Firewire ip subsystem"); 108 SYSCTL_INT(_hw_firewire_fwip, OID_AUTO, rx_queue_len, CTLFLAG_RW, &rx_queue_len, 109 0, "Length of the receive queue"); 110 111 TUNABLE_INT("hw.firewire.fwip.rx_queue_len", &rx_queue_len); 112 113 #ifdef DEVICE_POLLING 114 static poll_handler_t fwip_poll; 115 116 static int 117 fwip_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 118 { 119 struct fwip_softc *fwip; 120 struct firewire_comm *fc; 121 122 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 123 return (0); 124 125 fwip = ((struct fwip_eth_softc *)ifp->if_softc)->fwip; 126 fc = fwip->fd.fc; 127 fc->poll(fc, (cmd == POLL_AND_CHECK_STATUS)?0:1, count); 128 return (0); 129 } 130 #endif /* DEVICE_POLLING */ 131 132 static void 133 fwip_identify(driver_t *driver, device_t parent) 134 { 135 BUS_ADD_CHILD(parent, 0, "fwip", device_get_unit(parent)); 136 } 137 138 static int 139 fwip_probe(device_t dev) 140 { 141 device_t pa; 142 143 pa = device_get_parent(dev); 144 if(device_get_unit(dev) != device_get_unit(pa)){ 145 return(ENXIO); 146 } 147 148 device_set_desc(dev, "IP over FireWire"); 149 return (0); 150 } 151 152 static int 153 fwip_attach(device_t dev) 154 { 155 struct fwip_softc *fwip; 156 struct ifnet *ifp; 157 int unit, s; 158 struct fw_hwaddr *hwaddr; 159 160 fwip = ((struct fwip_softc *)device_get_softc(dev)); 161 unit = device_get_unit(dev); 162 ifp = fwip->fw_softc.fwip_ifp = if_alloc(IFT_IEEE1394); 163 if (ifp == NULL) 164 return (ENOSPC); 165 166 mtx_init(&fwip->mtx, "fwip", NULL, MTX_DEF); 167 /* XXX */ 168 fwip->dma_ch = -1; 169 170 fwip->fd.fc = device_get_ivars(dev); 171 if (tx_speed < 0) 172 tx_speed = fwip->fd.fc->speed; 173 174 fwip->fd.dev = dev; 175 fwip->fd.post_explore = NULL; 176 fwip->fd.post_busreset = fwip_post_busreset; 177 fwip->fw_softc.fwip = fwip; 178 TASK_INIT(&fwip->start_send, 0, fwip_start_send, fwip); 179 180 /* 181 * Encode our hardware the way that arp likes it. 182 */ 183 hwaddr = &IFP2FWC(fwip->fw_softc.fwip_ifp)->fc_hwaddr; 184 hwaddr->sender_unique_ID_hi = htonl(fwip->fd.fc->eui.hi); 185 hwaddr->sender_unique_ID_lo = htonl(fwip->fd.fc->eui.lo); 186 hwaddr->sender_max_rec = fwip->fd.fc->maxrec; 187 hwaddr->sspd = fwip->fd.fc->speed; 188 hwaddr->sender_unicast_FIFO_hi = htons((uint16_t)(INET_FIFO >> 32)); 189 hwaddr->sender_unicast_FIFO_lo = htonl((uint32_t)INET_FIFO); 190 191 /* fill the rest and attach interface */ 192 ifp->if_softc = &fwip->fw_softc; 193 194 #if __FreeBSD_version >= 501113 || defined(__DragonFly__) 195 if_initname(ifp, device_get_name(dev), unit); 196 #else 197 ifp->if_unit = unit; 198 ifp->if_name = "fwip"; 199 #endif 200 ifp->if_init = fwip_init; 201 ifp->if_start = fwip_start; 202 ifp->if_ioctl = fwip_ioctl; 203 ifp->if_flags = (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST); 204 ifp->if_snd.ifq_maxlen = TX_MAX_QUEUE; 205 #ifdef DEVICE_POLLING 206 ifp->if_capabilities |= IFCAP_POLLING; 207 #endif 208 209 s = splimp(); 210 firewire_ifattach(ifp, hwaddr); 211 splx(s); 212 213 FWIPDEBUG(ifp, "interface created\n"); 214 return 0; 215 } 216 217 static void 218 fwip_stop(struct fwip_softc *fwip) 219 { 220 struct firewire_comm *fc; 221 struct fw_xferq *xferq; 222 struct ifnet *ifp = fwip->fw_softc.fwip_ifp; 223 struct fw_xfer *xfer, *next; 224 int i; 225 226 fc = fwip->fd.fc; 227 228 if (fwip->dma_ch >= 0) { 229 xferq = fc->ir[fwip->dma_ch]; 230 231 if (xferq->flag & FWXFERQ_RUNNING) 232 fc->irx_disable(fc, fwip->dma_ch); 233 xferq->flag &= 234 ~(FWXFERQ_MODEMASK | FWXFERQ_OPEN | FWXFERQ_STREAM | 235 FWXFERQ_EXTBUF | FWXFERQ_HANDLER | FWXFERQ_CHTAGMASK); 236 xferq->hand = NULL; 237 238 for (i = 0; i < xferq->bnchunk; i ++) 239 m_freem(xferq->bulkxfer[i].mbuf); 240 free(xferq->bulkxfer, M_FWIP); 241 242 fw_bindremove(fc, &fwip->fwb); 243 for (xfer = STAILQ_FIRST(&fwip->fwb.xferlist); xfer != NULL; 244 xfer = next) { 245 next = STAILQ_NEXT(xfer, link); 246 fw_xfer_free(xfer); 247 } 248 249 for (xfer = STAILQ_FIRST(&fwip->xferlist); xfer != NULL; 250 xfer = next) { 251 next = STAILQ_NEXT(xfer, link); 252 fw_xfer_free(xfer); 253 } 254 STAILQ_INIT(&fwip->xferlist); 255 256 xferq->bulkxfer = NULL; 257 fwip->dma_ch = -1; 258 } 259 260 #if defined(__FreeBSD__) 261 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 262 #else 263 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 264 #endif 265 } 266 267 static int 268 fwip_detach(device_t dev) 269 { 270 struct fwip_softc *fwip; 271 struct ifnet *ifp; 272 int s; 273 274 fwip = (struct fwip_softc *)device_get_softc(dev); 275 ifp = fwip->fw_softc.fwip_ifp; 276 277 #ifdef DEVICE_POLLING 278 if (ifp->if_capenable & IFCAP_POLLING) 279 ether_poll_deregister(ifp); 280 #endif 281 282 s = splimp(); 283 284 fwip_stop(fwip); 285 firewire_ifdetach(ifp); 286 if_free(ifp); 287 mtx_destroy(&fwip->mtx); 288 289 splx(s); 290 return 0; 291 } 292 293 static void 294 fwip_init(void *arg) 295 { 296 struct fwip_softc *fwip = ((struct fwip_eth_softc *)arg)->fwip; 297 struct firewire_comm *fc; 298 struct ifnet *ifp = fwip->fw_softc.fwip_ifp; 299 struct fw_xferq *xferq; 300 struct fw_xfer *xfer; 301 struct mbuf *m; 302 int i; 303 304 FWIPDEBUG(ifp, "initializing\n"); 305 306 fc = fwip->fd.fc; 307 #define START 0 308 if (fwip->dma_ch < 0) { 309 fwip->dma_ch = fw_open_isodma(fc, /* tx */0); 310 if (fwip->dma_ch < 0) 311 return; 312 xferq = fc->ir[fwip->dma_ch]; 313 xferq->flag |= FWXFERQ_EXTBUF | 314 FWXFERQ_HANDLER | FWXFERQ_STREAM; 315 xferq->flag &= ~0xff; 316 xferq->flag |= broadcast_channel & 0xff; 317 /* register fwip_input handler */ 318 xferq->sc = (caddr_t) fwip; 319 xferq->hand = fwip_stream_input; 320 xferq->bnchunk = rx_queue_len; 321 xferq->bnpacket = 1; 322 xferq->psize = MCLBYTES; 323 xferq->queued = 0; 324 xferq->buf = NULL; 325 xferq->bulkxfer = (struct fw_bulkxfer *) malloc( 326 sizeof(struct fw_bulkxfer) * xferq->bnchunk, 327 M_FWIP, M_WAITOK); 328 if (xferq->bulkxfer == NULL) { 329 printf("if_fwip: malloc failed\n"); 330 return; 331 } 332 STAILQ_INIT(&xferq->stvalid); 333 STAILQ_INIT(&xferq->stfree); 334 STAILQ_INIT(&xferq->stdma); 335 xferq->stproc = NULL; 336 for (i = 0; i < xferq->bnchunk; i ++) { 337 m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR); 338 xferq->bulkxfer[i].mbuf = m; 339 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 340 STAILQ_INSERT_TAIL(&xferq->stfree, 341 &xferq->bulkxfer[i], link); 342 } 343 344 fwip->fwb.start = INET_FIFO; 345 fwip->fwb.end = INET_FIFO + 16384; /* S3200 packet size */ 346 347 /* pre-allocate xfer */ 348 STAILQ_INIT(&fwip->fwb.xferlist); 349 for (i = 0; i < rx_queue_len; i ++) { 350 xfer = fw_xfer_alloc(M_FWIP); 351 if (xfer == NULL) 352 break; 353 m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR); 354 xfer->recv.payload = mtod(m, uint32_t *); 355 xfer->recv.pay_len = MCLBYTES; 356 xfer->hand = fwip_unicast_input; 357 xfer->fc = fc; 358 xfer->sc = (caddr_t)fwip; 359 xfer->mbuf = m; 360 STAILQ_INSERT_TAIL(&fwip->fwb.xferlist, xfer, link); 361 } 362 fw_bindadd(fc, &fwip->fwb); 363 364 STAILQ_INIT(&fwip->xferlist); 365 for (i = 0; i < TX_MAX_QUEUE; i++) { 366 xfer = fw_xfer_alloc(M_FWIP); 367 if (xfer == NULL) 368 break; 369 xfer->send.spd = tx_speed; 370 xfer->fc = fwip->fd.fc; 371 xfer->sc = (caddr_t)fwip; 372 xfer->hand = fwip_output_callback; 373 STAILQ_INSERT_TAIL(&fwip->xferlist, xfer, link); 374 } 375 } else 376 xferq = fc->ir[fwip->dma_ch]; 377 378 fwip->last_dest.hi = 0; 379 fwip->last_dest.lo = 0; 380 381 /* start dma */ 382 if ((xferq->flag & FWXFERQ_RUNNING) == 0) 383 fc->irx_enable(fc, fwip->dma_ch); 384 385 #if defined(__FreeBSD__) 386 ifp->if_drv_flags |= IFF_DRV_RUNNING; 387 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 388 #else 389 ifp->if_flags |= IFF_RUNNING; 390 ifp->if_flags &= ~IFF_OACTIVE; 391 #endif 392 393 #if 0 394 /* attempt to start output */ 395 fwip_start(ifp); 396 #endif 397 } 398 399 static int 400 fwip_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 401 { 402 struct fwip_softc *fwip = ((struct fwip_eth_softc *)ifp->if_softc)->fwip; 403 int s, error; 404 405 switch (cmd) { 406 case SIOCSIFFLAGS: 407 s = splimp(); 408 if (ifp->if_flags & IFF_UP) { 409 #if defined(__FreeBSD__) 410 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 411 #else 412 if (!(ifp->if_flags & IFF_RUNNING)) 413 #endif 414 fwip_init(&fwip->fw_softc); 415 } else { 416 #if defined(__FreeBSD__) 417 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 418 #else 419 if (ifp->if_flags & IFF_RUNNING) 420 #endif 421 fwip_stop(fwip); 422 } 423 splx(s); 424 break; 425 case SIOCADDMULTI: 426 case SIOCDELMULTI: 427 break; 428 case SIOCSIFCAP: 429 #ifdef DEVICE_POLLING 430 { 431 struct ifreq *ifr = (struct ifreq *) data; 432 struct firewire_comm *fc = fwip->fd.fc; 433 434 if (ifr->ifr_reqcap & IFCAP_POLLING && 435 !(ifp->if_capenable & IFCAP_POLLING)) { 436 error = ether_poll_register(fwip_poll, ifp); 437 if (error) 438 return(error); 439 /* Disable interrupts */ 440 fc->set_intr(fc, 0); 441 ifp->if_capenable |= IFCAP_POLLING | 442 IFCAP_POLLING_NOCOUNT; 443 return (error); 444 445 } 446 if (!(ifr->ifr_reqcap & IFCAP_POLLING) && 447 ifp->if_capenable & IFCAP_POLLING) { 448 error = ether_poll_deregister(ifp); 449 /* Enable interrupts. */ 450 fc->set_intr(fc, 1); 451 ifp->if_capenable &= ~IFCAP_POLLING; 452 ifp->if_capenable &= ~IFCAP_POLLING_NOCOUNT; 453 return (error); 454 } 455 } 456 #endif /* DEVICE_POLLING */ 457 break; 458 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000 459 default: 460 #else 461 case SIOCSIFADDR: 462 case SIOCGIFADDR: 463 case SIOCSIFMTU: 464 #endif 465 s = splimp(); 466 error = firewire_ioctl(ifp, cmd, data); 467 splx(s); 468 return (error); 469 #if defined(__DragonFly__) || __FreeBSD_version < 500000 470 default: 471 return (EINVAL); 472 #endif 473 } 474 475 return (0); 476 } 477 478 static void 479 fwip_post_busreset(void *arg) 480 { 481 struct fwip_softc *fwip = arg; 482 struct crom_src *src; 483 struct crom_chunk *root; 484 485 src = fwip->fd.fc->crom_src; 486 root = fwip->fd.fc->crom_root; 487 488 /* RFC2734 IPv4 over IEEE1394 */ 489 bzero(&fwip->unit4, sizeof(struct crom_chunk)); 490 crom_add_chunk(src, root, &fwip->unit4, CROM_UDIR); 491 crom_add_entry(&fwip->unit4, CSRKEY_SPEC, CSRVAL_IETF); 492 crom_add_simple_text(src, &fwip->unit4, &fwip->spec4, "IANA"); 493 crom_add_entry(&fwip->unit4, CSRKEY_VER, 1); 494 crom_add_simple_text(src, &fwip->unit4, &fwip->ver4, "IPv4"); 495 496 /* RFC3146 IPv6 over IEEE1394 */ 497 bzero(&fwip->unit6, sizeof(struct crom_chunk)); 498 crom_add_chunk(src, root, &fwip->unit6, CROM_UDIR); 499 crom_add_entry(&fwip->unit6, CSRKEY_SPEC, CSRVAL_IETF); 500 crom_add_simple_text(src, &fwip->unit6, &fwip->spec6, "IANA"); 501 crom_add_entry(&fwip->unit6, CSRKEY_VER, 2); 502 crom_add_simple_text(src, &fwip->unit6, &fwip->ver6, "IPv6"); 503 504 fwip->last_dest.hi = 0; 505 fwip->last_dest.lo = 0; 506 firewire_busreset(fwip->fw_softc.fwip_ifp); 507 } 508 509 static void 510 fwip_output_callback(struct fw_xfer *xfer) 511 { 512 struct fwip_softc *fwip; 513 struct ifnet *ifp; 514 int s; 515 516 fwip = (struct fwip_softc *)xfer->sc; 517 ifp = fwip->fw_softc.fwip_ifp; 518 /* XXX error check */ 519 FWIPDEBUG(ifp, "resp = %d\n", xfer->resp); 520 if (xfer->resp != 0) 521 ifp->if_oerrors ++; 522 523 m_freem(xfer->mbuf); 524 fw_xfer_unload(xfer); 525 526 s = splimp(); 527 FWIP_LOCK(fwip); 528 STAILQ_INSERT_TAIL(&fwip->xferlist, xfer, link); 529 FWIP_UNLOCK(fwip); 530 splx(s); 531 532 /* for queue full */ 533 if (ifp->if_snd.ifq_head != NULL) { 534 fwip_start(ifp); 535 } 536 } 537 538 static void 539 fwip_start(struct ifnet *ifp) 540 { 541 struct fwip_softc *fwip = ((struct fwip_eth_softc *)ifp->if_softc)->fwip; 542 int s; 543 544 FWIPDEBUG(ifp, "starting\n"); 545 546 if (fwip->dma_ch < 0) { 547 struct mbuf *m = NULL; 548 549 FWIPDEBUG(ifp, "not ready\n"); 550 551 s = splimp(); 552 do { 553 IF_DEQUEUE(&ifp->if_snd, m); 554 if (m != NULL) 555 m_freem(m); 556 ifp->if_oerrors ++; 557 } while (m != NULL); 558 splx(s); 559 560 return; 561 } 562 563 s = splimp(); 564 #if defined(__FreeBSD__) 565 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 566 #else 567 ifp->if_flags |= IFF_OACTIVE; 568 #endif 569 570 if (ifp->if_snd.ifq_len != 0) 571 fwip_async_output(fwip, ifp); 572 573 #if defined(__FreeBSD__) 574 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 575 #else 576 ifp->if_flags &= ~IFF_OACTIVE; 577 #endif 578 splx(s); 579 } 580 581 /* Async. stream output */ 582 static void 583 fwip_async_output(struct fwip_softc *fwip, struct ifnet *ifp) 584 { 585 struct firewire_comm *fc = fwip->fd.fc; 586 struct mbuf *m; 587 struct m_tag *mtag; 588 struct fw_hwaddr *destfw; 589 struct fw_xfer *xfer; 590 struct fw_xferq *xferq; 591 struct fw_pkt *fp; 592 uint16_t nodeid; 593 int error; 594 int i = 0; 595 596 xfer = NULL; 597 xferq = fc->atq; 598 while ((xferq->queued < xferq->maxq - 1) && 599 (ifp->if_snd.ifq_head != NULL)) { 600 FWIP_LOCK(fwip); 601 xfer = STAILQ_FIRST(&fwip->xferlist); 602 if (xfer == NULL) { 603 FWIP_UNLOCK(fwip); 604 #if 0 605 printf("if_fwip: lack of xfer\n"); 606 #endif 607 break; 608 } 609 STAILQ_REMOVE_HEAD(&fwip->xferlist, link); 610 FWIP_UNLOCK(fwip); 611 612 IF_DEQUEUE(&ifp->if_snd, m); 613 if (m == NULL) { 614 FWIP_LOCK(fwip); 615 STAILQ_INSERT_HEAD(&fwip->xferlist, xfer, link); 616 FWIP_UNLOCK(fwip); 617 break; 618 } 619 620 /* 621 * Dig out the link-level address which 622 * firewire_output got via arp or neighbour 623 * discovery. If we don't have a link-level address, 624 * just stick the thing on the broadcast channel. 625 */ 626 mtag = m_tag_locate(m, MTAG_FIREWIRE, MTAG_FIREWIRE_HWADDR, 0); 627 if (mtag == NULL) 628 destfw = 0; 629 else 630 destfw = (struct fw_hwaddr *) (mtag + 1); 631 632 633 /* 634 * We don't do any bpf stuff here - the generic code 635 * in firewire_output gives the packet to bpf before 636 * it adds the link-level encapsulation. 637 */ 638 639 /* 640 * Put the mbuf in the xfer early in case we hit an 641 * error case below - fwip_output_callback will free 642 * the mbuf. 643 */ 644 xfer->mbuf = m; 645 646 /* 647 * We use the arp result (if any) to add a suitable firewire 648 * packet header before handing off to the bus. 649 */ 650 fp = &xfer->send.hdr; 651 nodeid = FWLOCALBUS | fc->nodeid; 652 if ((m->m_flags & M_BCAST) || !destfw) { 653 /* 654 * Broadcast packets are sent as GASP packets with 655 * specifier ID 0x00005e, version 1 on the broadcast 656 * channel. To be conservative, we send at the 657 * slowest possible speed. 658 */ 659 uint32_t *p; 660 661 M_PREPEND(m, 2*sizeof(uint32_t), M_NOWAIT); 662 p = mtod(m, uint32_t *); 663 fp->mode.stream.len = m->m_pkthdr.len; 664 fp->mode.stream.chtag = broadcast_channel; 665 fp->mode.stream.tcode = FWTCODE_STREAM; 666 fp->mode.stream.sy = 0; 667 xfer->send.spd = 0; 668 p[0] = htonl(nodeid << 16); 669 p[1] = htonl((0x5e << 24) | 1); 670 } else { 671 /* 672 * Unicast packets are sent as block writes to the 673 * target's unicast fifo address. If we can't 674 * find the node address, we just give up. We 675 * could broadcast it but that might overflow 676 * the packet size limitations due to the 677 * extra GASP header. Note: the hardware 678 * address is stored in network byte order to 679 * make life easier for ARP. 680 */ 681 struct fw_device *fd; 682 struct fw_eui64 eui; 683 684 eui.hi = ntohl(destfw->sender_unique_ID_hi); 685 eui.lo = ntohl(destfw->sender_unique_ID_lo); 686 if (fwip->last_dest.hi != eui.hi || 687 fwip->last_dest.lo != eui.lo) { 688 fd = fw_noderesolve_eui64(fc, &eui); 689 if (!fd) { 690 /* error */ 691 ifp->if_oerrors ++; 692 /* XXX set error code */ 693 fwip_output_callback(xfer); 694 continue; 695 696 } 697 fwip->last_hdr.mode.wreqb.dst = FWLOCALBUS | fd->dst; 698 fwip->last_hdr.mode.wreqb.tlrt = 0; 699 fwip->last_hdr.mode.wreqb.tcode = FWTCODE_WREQB; 700 fwip->last_hdr.mode.wreqb.pri = 0; 701 fwip->last_hdr.mode.wreqb.src = nodeid; 702 fwip->last_hdr.mode.wreqb.dest_hi = 703 ntohs(destfw->sender_unicast_FIFO_hi); 704 fwip->last_hdr.mode.wreqb.dest_lo = 705 ntohl(destfw->sender_unicast_FIFO_lo); 706 fwip->last_hdr.mode.wreqb.extcode = 0; 707 fwip->last_dest = eui; 708 } 709 710 fp->mode.wreqb = fwip->last_hdr.mode.wreqb; 711 fp->mode.wreqb.len = m->m_pkthdr.len; 712 xfer->send.spd = min(destfw->sspd, fc->speed); 713 } 714 715 xfer->send.pay_len = m->m_pkthdr.len; 716 717 error = fw_asyreq(fc, -1, xfer); 718 if (error == EAGAIN) { 719 /* 720 * We ran out of tlabels - requeue the packet 721 * for later transmission. 722 */ 723 xfer->mbuf = 0; 724 FWIP_LOCK(fwip); 725 STAILQ_INSERT_TAIL(&fwip->xferlist, xfer, link); 726 FWIP_UNLOCK(fwip); 727 IF_PREPEND(&ifp->if_snd, m); 728 break; 729 } 730 if (error) { 731 /* error */ 732 ifp->if_oerrors ++; 733 /* XXX set error code */ 734 fwip_output_callback(xfer); 735 continue; 736 } else { 737 ifp->if_opackets ++; 738 i++; 739 } 740 } 741 #if 0 742 if (i > 1) 743 printf("%d queued\n", i); 744 #endif 745 if (i > 0) 746 xferq->start(fc); 747 } 748 749 static void 750 fwip_start_send (void *arg, int count) 751 { 752 struct fwip_softc *fwip = arg; 753 754 fwip->fd.fc->atq->start(fwip->fd.fc); 755 } 756 757 /* Async. stream output */ 758 static void 759 fwip_stream_input(struct fw_xferq *xferq) 760 { 761 struct mbuf *m, *m0; 762 struct m_tag *mtag; 763 struct ifnet *ifp; 764 struct fwip_softc *fwip; 765 struct fw_bulkxfer *sxfer; 766 struct fw_pkt *fp; 767 uint16_t src; 768 uint32_t *p; 769 770 771 fwip = (struct fwip_softc *)xferq->sc; 772 ifp = fwip->fw_softc.fwip_ifp; 773 774 while ((sxfer = STAILQ_FIRST(&xferq->stvalid)) != NULL) { 775 STAILQ_REMOVE_HEAD(&xferq->stvalid, link); 776 fp = mtod(sxfer->mbuf, struct fw_pkt *); 777 if (fwip->fd.fc->irx_post != NULL) 778 fwip->fd.fc->irx_post(fwip->fd.fc, fp->mode.ld); 779 m = sxfer->mbuf; 780 781 /* insert new rbuf */ 782 sxfer->mbuf = m0 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 783 if (m0 != NULL) { 784 m0->m_len = m0->m_pkthdr.len = m0->m_ext.ext_size; 785 STAILQ_INSERT_TAIL(&xferq->stfree, sxfer, link); 786 } else 787 printf("fwip_as_input: m_getcl failed\n"); 788 789 /* 790 * We must have a GASP header - leave the 791 * encapsulation sanity checks to the generic 792 * code. Remeber that we also have the firewire async 793 * stream header even though that isn't accounted for 794 * in mode.stream.len. 795 */ 796 if (sxfer->resp != 0 || fp->mode.stream.len < 797 2*sizeof(uint32_t)) { 798 m_freem(m); 799 ifp->if_ierrors ++; 800 continue; 801 } 802 m->m_len = m->m_pkthdr.len = fp->mode.stream.len 803 + sizeof(fp->mode.stream); 804 805 /* 806 * If we received the packet on the broadcast channel, 807 * mark it as broadcast, otherwise we assume it must 808 * be multicast. 809 */ 810 if (fp->mode.stream.chtag == broadcast_channel) 811 m->m_flags |= M_BCAST; 812 else 813 m->m_flags |= M_MCAST; 814 815 /* 816 * Make sure we recognise the GASP specifier and 817 * version. 818 */ 819 p = mtod(m, uint32_t *); 820 if ((((ntohl(p[1]) & 0xffff) << 8) | ntohl(p[2]) >> 24) != 0x00005e 821 || (ntohl(p[2]) & 0xffffff) != 1) { 822 FWIPDEBUG(ifp, "Unrecognised GASP header %#08x %#08x\n", 823 ntohl(p[1]), ntohl(p[2])); 824 m_freem(m); 825 ifp->if_ierrors ++; 826 continue; 827 } 828 829 /* 830 * Record the sender ID for possible BPF usage. 831 */ 832 src = ntohl(p[1]) >> 16; 833 if (bpf_peers_present(ifp->if_bpf)) { 834 mtag = m_tag_alloc(MTAG_FIREWIRE, 835 MTAG_FIREWIRE_SENDER_EUID, 836 2*sizeof(uint32_t), M_NOWAIT); 837 if (mtag) { 838 /* bpf wants it in network byte order */ 839 struct fw_device *fd; 840 uint32_t *p = (uint32_t *) (mtag + 1); 841 fd = fw_noderesolve_nodeid(fwip->fd.fc, 842 src & 0x3f); 843 if (fd) { 844 p[0] = htonl(fd->eui.hi); 845 p[1] = htonl(fd->eui.lo); 846 } else { 847 p[0] = 0; 848 p[1] = 0; 849 } 850 m_tag_prepend(m, mtag); 851 } 852 } 853 854 /* 855 * Trim off the GASP header 856 */ 857 m_adj(m, 3*sizeof(uint32_t)); 858 m->m_pkthdr.rcvif = ifp; 859 firewire_input(ifp, m, src); 860 ifp->if_ipackets ++; 861 } 862 if (STAILQ_FIRST(&xferq->stfree) != NULL) 863 fwip->fd.fc->irx_enable(fwip->fd.fc, fwip->dma_ch); 864 } 865 866 static __inline void 867 fwip_unicast_input_recycle(struct fwip_softc *fwip, struct fw_xfer *xfer) 868 { 869 struct mbuf *m; 870 871 /* 872 * We have finished with a unicast xfer. Allocate a new 873 * cluster and stick it on the back of the input queue. 874 */ 875 m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR); 876 xfer->mbuf = m; 877 xfer->recv.payload = mtod(m, uint32_t *); 878 xfer->recv.pay_len = MCLBYTES; 879 xfer->mbuf = m; 880 STAILQ_INSERT_TAIL(&fwip->fwb.xferlist, xfer, link); 881 } 882 883 static void 884 fwip_unicast_input(struct fw_xfer *xfer) 885 { 886 uint64_t address; 887 struct mbuf *m; 888 struct m_tag *mtag; 889 struct ifnet *ifp; 890 struct fwip_softc *fwip; 891 struct fw_pkt *fp; 892 //struct fw_pkt *sfp; 893 int rtcode; 894 895 fwip = (struct fwip_softc *)xfer->sc; 896 ifp = fwip->fw_softc.fwip_ifp; 897 m = xfer->mbuf; 898 xfer->mbuf = 0; 899 fp = &xfer->recv.hdr; 900 901 /* 902 * Check the fifo address - we only accept addresses of 903 * exactly INET_FIFO. 904 */ 905 address = ((uint64_t)fp->mode.wreqb.dest_hi << 32) 906 | fp->mode.wreqb.dest_lo; 907 if (fp->mode.wreqb.tcode != FWTCODE_WREQB) { 908 rtcode = FWRCODE_ER_TYPE; 909 } else if (address != INET_FIFO) { 910 rtcode = FWRCODE_ER_ADDR; 911 } else { 912 rtcode = FWRCODE_COMPLETE; 913 } 914 915 /* 916 * Pick up a new mbuf and stick it on the back of the receive 917 * queue. 918 */ 919 fwip_unicast_input_recycle(fwip, xfer); 920 921 /* 922 * If we've already rejected the packet, give up now. 923 */ 924 if (rtcode != FWRCODE_COMPLETE) { 925 m_freem(m); 926 ifp->if_ierrors ++; 927 return; 928 } 929 930 if (bpf_peers_present(ifp->if_bpf)) { 931 /* 932 * Record the sender ID for possible BPF usage. 933 */ 934 mtag = m_tag_alloc(MTAG_FIREWIRE, MTAG_FIREWIRE_SENDER_EUID, 935 2*sizeof(uint32_t), M_NOWAIT); 936 if (mtag) { 937 /* bpf wants it in network byte order */ 938 struct fw_device *fd; 939 uint32_t *p = (uint32_t *) (mtag + 1); 940 fd = fw_noderesolve_nodeid(fwip->fd.fc, 941 fp->mode.wreqb.src & 0x3f); 942 if (fd) { 943 p[0] = htonl(fd->eui.hi); 944 p[1] = htonl(fd->eui.lo); 945 } else { 946 p[0] = 0; 947 p[1] = 0; 948 } 949 m_tag_prepend(m, mtag); 950 } 951 } 952 953 /* 954 * Hand off to the generic encapsulation code. We don't use 955 * ifp->if_input so that we can pass the source nodeid as an 956 * argument to facilitate link-level fragment reassembly. 957 */ 958 m->m_len = m->m_pkthdr.len = fp->mode.wreqb.len; 959 m->m_pkthdr.rcvif = ifp; 960 firewire_input(ifp, m, fp->mode.wreqb.src); 961 ifp->if_ipackets ++; 962 } 963 964 static devclass_t fwip_devclass; 965 966 static device_method_t fwip_methods[] = { 967 /* device interface */ 968 DEVMETHOD(device_identify, fwip_identify), 969 DEVMETHOD(device_probe, fwip_probe), 970 DEVMETHOD(device_attach, fwip_attach), 971 DEVMETHOD(device_detach, fwip_detach), 972 { 0, 0 } 973 }; 974 975 static driver_t fwip_driver = { 976 "fwip", 977 fwip_methods, 978 sizeof(struct fwip_softc), 979 }; 980 981 982 #ifdef __DragonFly__ 983 DECLARE_DUMMY_MODULE(fwip); 984 #endif 985 DRIVER_MODULE(fwip, firewire, fwip_driver, fwip_devclass, 0, 0); 986 MODULE_VERSION(fwip, 1); 987 MODULE_DEPEND(fwip, firewire, 1, 1, 1); 988