1 /*-
2 * SPDX-License-Identifier: BSD-4-Clause
3 *
4 * Copyright (c) 2002-2003
5 * Hidetoshi Shimokawa. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 *
18 * This product includes software developed by Hidetoshi Shimokawa.
19 *
20 * 4. Neither the name of the author nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 */
36
37 #ifdef HAVE_KERNEL_OPTION_HEADERS
38 #include "opt_device_polling.h"
39 #include "opt_inet.h"
40 #endif
41
42 #include <sys/param.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
45 #include <sys/mbuf.h>
46 #include <sys/socket.h>
47 #include <sys/sockio.h>
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
50 #include <sys/module.h>
51 #include <sys/bus.h>
52 #include <machine/bus.h>
53
54 #include <net/bpf.h>
55 #include <net/ethernet.h>
56 #include <net/if.h>
57 #include <net/if_var.h>
58 #include <net/if_arp.h>
59 #include <net/if_types.h>
60 #include <net/if_vlan_var.h>
61
62 #include <dev/firewire/firewire.h>
63 #include <dev/firewire/firewirereg.h>
64 #include <dev/firewire/if_fwevar.h>
65
66 #define FWEDEBUG if (fwedebug) if_printf
67 #define TX_MAX_QUEUE (FWMAXQUEUE - 1)
68
69 /* network interface */
70 static void fwe_start (if_t);
71 static int fwe_ioctl (if_t, u_long, caddr_t);
72 static void fwe_init (void *);
73
74 static void fwe_output_callback (struct fw_xfer *);
75 static void fwe_as_output (struct fwe_softc *, if_t);
76 static void fwe_as_input (struct fw_xferq *);
77
78 static int fwedebug = 0;
79 static int stream_ch = 1;
80 static int tx_speed = 2;
81 static int rx_queue_len = FWMAXQUEUE;
82
83 static MALLOC_DEFINE(M_FWE, "if_fwe", "Ethernet over FireWire interface");
84 SYSCTL_INT(_debug, OID_AUTO, if_fwe_debug, CTLFLAG_RWTUN, &fwedebug, 0, "");
85 SYSCTL_DECL(_hw_firewire);
86 static SYSCTL_NODE(_hw_firewire, OID_AUTO, fwe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
87 "Ethernet emulation subsystem");
88 SYSCTL_INT(_hw_firewire_fwe, OID_AUTO, stream_ch, CTLFLAG_RWTUN, &stream_ch, 0,
89 "Stream channel to use");
90 SYSCTL_INT(_hw_firewire_fwe, OID_AUTO, tx_speed, CTLFLAG_RWTUN, &tx_speed, 0,
91 "Transmission speed");
92 SYSCTL_INT(_hw_firewire_fwe, OID_AUTO, rx_queue_len, CTLFLAG_RWTUN, &rx_queue_len,
93 0, "Length of the receive queue");
94
95 #ifdef DEVICE_POLLING
96 static poll_handler_t fwe_poll;
97
98 static int
fwe_poll(if_t ifp,enum poll_cmd cmd,int count)99 fwe_poll(if_t ifp, enum poll_cmd cmd, int count)
100 {
101 struct fwe_softc *fwe;
102 struct firewire_comm *fc;
103
104 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
105 return (0);
106
107 fwe = ((struct fwe_eth_softc *)if_getsoftc(ifp))->fwe;
108 fc = fwe->fd.fc;
109 fc->poll(fc, (cmd == POLL_AND_CHECK_STATUS)?0:1, count);
110 return (0);
111 }
112 #endif /* DEVICE_POLLING */
113
114 static void
fwe_identify(driver_t * driver,device_t parent)115 fwe_identify(driver_t *driver, device_t parent)
116 {
117 BUS_ADD_CHILD(parent, 0, "fwe", device_get_unit(parent));
118 }
119
120 static int
fwe_probe(device_t dev)121 fwe_probe(device_t dev)
122 {
123 device_t pa;
124
125 pa = device_get_parent(dev);
126 if (device_get_unit(dev) != device_get_unit(pa)) {
127 return (ENXIO);
128 }
129
130 device_set_desc(dev, "Ethernet over FireWire");
131 return (0);
132 }
133
134 static int
fwe_attach(device_t dev)135 fwe_attach(device_t dev)
136 {
137 struct fwe_softc *fwe;
138 if_t ifp;
139 int unit, s;
140 u_char eaddr[6];
141 struct fw_eui64 *eui;
142
143 fwe = ((struct fwe_softc *)device_get_softc(dev));
144 unit = device_get_unit(dev);
145
146 bzero(fwe, sizeof(struct fwe_softc));
147 mtx_init(&fwe->mtx, "fwe", NULL, MTX_DEF);
148 /* XXX */
149 fwe->stream_ch = stream_ch;
150 fwe->dma_ch = -1;
151
152 fwe->fd.fc = device_get_ivars(dev);
153 if (tx_speed < 0)
154 tx_speed = fwe->fd.fc->speed;
155
156 fwe->fd.dev = dev;
157 fwe->fd.post_explore = NULL;
158 fwe->eth_softc.fwe = fwe;
159
160 fwe->pkt_hdr.mode.stream.tcode = FWTCODE_STREAM;
161 fwe->pkt_hdr.mode.stream.sy = 0;
162 fwe->pkt_hdr.mode.stream.chtag = fwe->stream_ch;
163
164 /* generate fake MAC address: first and last 3bytes from eui64 */
165 #define LOCAL (0x02)
166 #define GROUP (0x01)
167
168 eui = &fwe->fd.fc->eui;
169 eaddr[0] = (FW_EUI64_BYTE(eui, 0) | LOCAL) & ~GROUP;
170 eaddr[1] = FW_EUI64_BYTE(eui, 1);
171 eaddr[2] = FW_EUI64_BYTE(eui, 2);
172 eaddr[3] = FW_EUI64_BYTE(eui, 5);
173 eaddr[4] = FW_EUI64_BYTE(eui, 6);
174 eaddr[5] = FW_EUI64_BYTE(eui, 7);
175 printf("if_fwe%d: Fake Ethernet address: "
176 "%02x:%02x:%02x:%02x:%02x:%02x\n", unit,
177 eaddr[0], eaddr[1], eaddr[2], eaddr[3], eaddr[4], eaddr[5]);
178
179 /* fill the rest and attach interface */
180 ifp = fwe->eth_softc.ifp = if_alloc(IFT_ETHER);
181 if_setsoftc(ifp, &fwe->eth_softc);
182
183 if_initname(ifp, device_get_name(dev), unit);
184 if_setinitfn(ifp, fwe_init);
185 if_setstartfn(ifp, fwe_start);
186 if_setioctlfn(ifp, fwe_ioctl);
187 if_setflags(ifp, (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST));
188 if_setsendqlen(ifp, TX_MAX_QUEUE);
189
190 s = splimp();
191 ether_ifattach(ifp, eaddr);
192 splx(s);
193
194 /* Tell the upper layer(s) we support long frames. */
195 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
196 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_POLLING, 0);
197 if_setcapenablebit(ifp, IFCAP_VLAN_MTU, 0);
198
199 FWEDEBUG(ifp, "interface created\n");
200 return (0);
201 }
202
203 static void
fwe_stop(struct fwe_softc * fwe)204 fwe_stop(struct fwe_softc *fwe)
205 {
206 struct firewire_comm *fc;
207 struct fw_xferq *xferq;
208 if_t ifp = fwe->eth_softc.ifp;
209 struct fw_xfer *xfer, *next;
210 int i;
211
212 fc = fwe->fd.fc;
213
214 if (fwe->dma_ch >= 0) {
215 xferq = fc->ir[fwe->dma_ch];
216
217 if (xferq->flag & FWXFERQ_RUNNING)
218 fc->irx_disable(fc, fwe->dma_ch);
219 xferq->flag &=
220 ~(FWXFERQ_MODEMASK | FWXFERQ_OPEN | FWXFERQ_STREAM |
221 FWXFERQ_EXTBUF | FWXFERQ_HANDLER | FWXFERQ_CHTAGMASK);
222 xferq->hand = NULL;
223
224 for (i = 0; i < xferq->bnchunk; i++)
225 m_freem(xferq->bulkxfer[i].mbuf);
226 free(xferq->bulkxfer, M_FWE);
227
228 for (xfer = STAILQ_FIRST(&fwe->xferlist); xfer != NULL;
229 xfer = next) {
230 next = STAILQ_NEXT(xfer, link);
231 fw_xfer_free(xfer);
232 }
233 STAILQ_INIT(&fwe->xferlist);
234
235 xferq->bulkxfer = NULL;
236 fwe->dma_ch = -1;
237 }
238
239 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
240 }
241
242 static int
fwe_detach(device_t dev)243 fwe_detach(device_t dev)
244 {
245 struct fwe_softc *fwe;
246 if_t ifp;
247 int s;
248
249 fwe = device_get_softc(dev);
250 ifp = fwe->eth_softc.ifp;
251
252 #ifdef DEVICE_POLLING
253 if (if_getcapenable(ifp) & IFCAP_POLLING)
254 ether_poll_deregister(ifp);
255 #endif
256 s = splimp();
257
258 fwe_stop(fwe);
259 ether_ifdetach(ifp);
260 if_free(ifp);
261
262 splx(s);
263 mtx_destroy(&fwe->mtx);
264 return 0;
265 }
266
267 static void
fwe_init(void * arg)268 fwe_init(void *arg)
269 {
270 struct fwe_softc *fwe = ((struct fwe_eth_softc *)arg)->fwe;
271 struct firewire_comm *fc;
272 if_t ifp = fwe->eth_softc.ifp;
273 struct fw_xferq *xferq;
274 struct fw_xfer *xfer;
275 struct mbuf *m;
276 int i;
277
278 FWEDEBUG(ifp, "initializing\n");
279
280 /* XXX keep promiscoud mode */
281 if_setflagbits(ifp, IFF_PROMISC, 0);
282
283 fc = fwe->fd.fc;
284 if (fwe->dma_ch < 0) {
285 fwe->dma_ch = fw_open_isodma(fc, /* tx */0);
286 if (fwe->dma_ch < 0)
287 return;
288 xferq = fc->ir[fwe->dma_ch];
289 xferq->flag |= FWXFERQ_EXTBUF |
290 FWXFERQ_HANDLER | FWXFERQ_STREAM;
291 fwe->stream_ch = stream_ch;
292 fwe->pkt_hdr.mode.stream.chtag = fwe->stream_ch;
293 xferq->flag &= ~0xff;
294 xferq->flag |= fwe->stream_ch & 0xff;
295 /* register fwe_input handler */
296 xferq->sc = (caddr_t) fwe;
297 xferq->hand = fwe_as_input;
298 xferq->bnchunk = rx_queue_len;
299 xferq->bnpacket = 1;
300 xferq->psize = MCLBYTES;
301 xferq->queued = 0;
302 xferq->buf = NULL;
303 xferq->bulkxfer = (struct fw_bulkxfer *) malloc(
304 sizeof(struct fw_bulkxfer) * xferq->bnchunk,
305 M_FWE, M_WAITOK);
306 STAILQ_INIT(&xferq->stvalid);
307 STAILQ_INIT(&xferq->stfree);
308 STAILQ_INIT(&xferq->stdma);
309 xferq->stproc = NULL;
310 for (i = 0; i < xferq->bnchunk; i++) {
311 m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
312 xferq->bulkxfer[i].mbuf = m;
313 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
314 STAILQ_INSERT_TAIL(&xferq->stfree,
315 &xferq->bulkxfer[i], link);
316 }
317 STAILQ_INIT(&fwe->xferlist);
318 for (i = 0; i < TX_MAX_QUEUE; i++) {
319 xfer = fw_xfer_alloc(M_FWE);
320 if (xfer == NULL)
321 break;
322 xfer->send.spd = tx_speed;
323 xfer->fc = fwe->fd.fc;
324 xfer->sc = (caddr_t)fwe;
325 xfer->hand = fwe_output_callback;
326 STAILQ_INSERT_TAIL(&fwe->xferlist, xfer, link);
327 }
328 } else
329 xferq = fc->ir[fwe->dma_ch];
330
331
332 /* start dma */
333 if ((xferq->flag & FWXFERQ_RUNNING) == 0)
334 fc->irx_enable(fc, fwe->dma_ch);
335
336 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
337 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
338
339 #if 0
340 /* attempt to start output */
341 fwe_start(ifp);
342 #endif
343 }
344
345
346 static int
fwe_ioctl(if_t ifp,u_long cmd,caddr_t data)347 fwe_ioctl(if_t ifp, u_long cmd, caddr_t data)
348 {
349 struct fwe_softc *fwe = ((struct fwe_eth_softc *)if_getsoftc(ifp))->fwe;
350 struct ifstat *ifs = NULL;
351 int s, error;
352
353 switch (cmd) {
354 case SIOCSIFFLAGS:
355 s = splimp();
356 if (if_getflags(ifp) & IFF_UP) {
357 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
358 fwe_init(&fwe->eth_softc);
359 } else {
360 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
361 fwe_stop(fwe);
362 }
363 /* XXX keep promiscoud mode */
364 if_setflagbits(ifp, IFF_PROMISC, 0);
365 splx(s);
366 break;
367 case SIOCADDMULTI:
368 case SIOCDELMULTI:
369 break;
370
371 case SIOCGIFSTATUS:
372 s = splimp();
373 ifs = (struct ifstat *)data;
374 snprintf(ifs->ascii, sizeof(ifs->ascii),
375 "\tch %d dma %d\n", fwe->stream_ch, fwe->dma_ch);
376 splx(s);
377 break;
378 case SIOCSIFCAP:
379 #ifdef DEVICE_POLLING
380 {
381 struct ifreq *ifr = (struct ifreq *) data;
382 struct firewire_comm *fc = fwe->fd.fc;
383
384 if (ifr->ifr_reqcap & IFCAP_POLLING &&
385 !(if_getcapenable(ifp) & IFCAP_POLLING)) {
386 error = ether_poll_register(fwe_poll, ifp);
387 if (error)
388 return (error);
389 /* Disable interrupts */
390 fc->set_intr(fc, 0);
391 if_setcapenablebit(ifp, IFCAP_POLLING, 0);
392 return (error);
393 }
394 if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
395 if_getcapenable(ifp) & IFCAP_POLLING) {
396 error = ether_poll_deregister(ifp);
397 /* Enable interrupts. */
398 fc->set_intr(fc, 1);
399 if_setcapenablebit(ifp, 0, IFCAP_POLLING);
400 return (error);
401 }
402 }
403 #endif /* DEVICE_POLLING */
404 break;
405 default:
406 s = splimp();
407 error = ether_ioctl(ifp, cmd, data);
408 splx(s);
409 return (error);
410 }
411
412 return (0);
413 }
414
415 static void
fwe_output_callback(struct fw_xfer * xfer)416 fwe_output_callback(struct fw_xfer *xfer)
417 {
418 struct fwe_softc *fwe;
419 if_t ifp;
420 int s;
421
422 fwe = (struct fwe_softc *)xfer->sc;
423 ifp = fwe->eth_softc.ifp;
424 /* XXX error check */
425 FWEDEBUG(ifp, "resp = %d\n", xfer->resp);
426 if (xfer->resp != 0)
427 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
428 m_freem(xfer->mbuf);
429 fw_xfer_unload(xfer);
430
431 s = splimp();
432 FWE_LOCK(fwe);
433 STAILQ_INSERT_TAIL(&fwe->xferlist, xfer, link);
434 FWE_UNLOCK(fwe);
435 splx(s);
436
437 /* for queue full */
438 if (!if_sendq_empty(ifp))
439 fwe_start(ifp);
440 }
441
442 static void
fwe_start(if_t ifp)443 fwe_start(if_t ifp)
444 {
445 struct fwe_softc *fwe = ((struct fwe_eth_softc *)if_getsoftc(ifp))->fwe;
446 int s;
447
448 FWEDEBUG(ifp, "starting\n");
449
450 if (fwe->dma_ch < 0) {
451 struct mbuf *m = NULL;
452
453 FWEDEBUG(ifp, "not ready\n");
454
455 s = splimp();
456 do {
457 m = if_dequeue(ifp);
458 if (m != NULL)
459 m_freem(m);
460 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
461 } while (m != NULL);
462 splx(s);
463
464 return;
465 }
466
467 s = splimp();
468 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
469
470 if (!if_sendq_empty(ifp))
471 fwe_as_output(fwe, ifp);
472
473 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
474 splx(s);
475 }
476
477 #define HDR_LEN 4
478 #ifndef ETHER_ALIGN
479 #define ETHER_ALIGN 2
480 #endif
481 /* Async. stream output */
482 static void
fwe_as_output(struct fwe_softc * fwe,if_t ifp)483 fwe_as_output(struct fwe_softc *fwe, if_t ifp)
484 {
485 struct mbuf *m;
486 struct fw_xfer *xfer;
487 struct fw_xferq *xferq;
488 struct fw_pkt *fp;
489 int i = 0;
490
491 xfer = NULL;
492 xferq = fwe->fd.fc->atq;
493 while ((xferq->queued < xferq->maxq - 1) &&
494 !if_sendq_empty(ifp)) {
495 FWE_LOCK(fwe);
496 xfer = STAILQ_FIRST(&fwe->xferlist);
497 if (xfer == NULL) {
498 #if 0
499 printf("if_fwe: lack of xfer\n");
500 #endif
501 FWE_UNLOCK(fwe);
502 break;
503 }
504 STAILQ_REMOVE_HEAD(&fwe->xferlist, link);
505 FWE_UNLOCK(fwe);
506
507 m = if_dequeue(ifp);
508 if (m == NULL) {
509 FWE_LOCK(fwe);
510 STAILQ_INSERT_HEAD(&fwe->xferlist, xfer, link);
511 FWE_UNLOCK(fwe);
512 break;
513 }
514 BPF_MTAP(ifp, m);
515
516 /* keep ip packet alignment for alpha */
517 M_PREPEND(m, ETHER_ALIGN, M_NOWAIT);
518 fp = &xfer->send.hdr;
519 *(uint32_t *)&xfer->send.hdr = *(int32_t *)&fwe->pkt_hdr;
520 fp->mode.stream.len = m->m_pkthdr.len;
521 xfer->mbuf = m;
522 xfer->send.pay_len = m->m_pkthdr.len;
523
524 if (fw_asyreq(fwe->fd.fc, -1, xfer) != 0) {
525 /* error */
526 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
527 /* XXX set error code */
528 fwe_output_callback(xfer);
529 } else {
530 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
531 i++;
532 }
533 }
534 #if 0
535 if (i > 1)
536 printf("%d queued\n", i);
537 #endif
538 if (i > 0)
539 xferq->start(fwe->fd.fc);
540 }
541
542 /* Async. stream output */
543 static void
fwe_as_input(struct fw_xferq * xferq)544 fwe_as_input(struct fw_xferq *xferq)
545 {
546 struct mbuf *m, *m0;
547 if_t ifp;
548 struct fwe_softc *fwe;
549 struct fw_bulkxfer *sxfer;
550 struct fw_pkt *fp;
551 #if 0
552 u_char *c;
553 #endif
554
555 fwe = (struct fwe_softc *)xferq->sc;
556 ifp = fwe->eth_softc.ifp;
557
558 /* We do not need a lock here because the bottom half is serialized */
559 while ((sxfer = STAILQ_FIRST(&xferq->stvalid)) != NULL) {
560 STAILQ_REMOVE_HEAD(&xferq->stvalid, link);
561 fp = mtod(sxfer->mbuf, struct fw_pkt *);
562 if (fwe->fd.fc->irx_post != NULL)
563 fwe->fd.fc->irx_post(fwe->fd.fc, fp->mode.ld);
564 m = sxfer->mbuf;
565
566 /* insert new rbuf */
567 sxfer->mbuf = m0 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
568 if (m0 != NULL) {
569 m0->m_len = m0->m_pkthdr.len = m0->m_ext.ext_size;
570 STAILQ_INSERT_TAIL(&xferq->stfree, sxfer, link);
571 } else
572 printf("%s: m_getcl failed\n", __FUNCTION__);
573
574 if (sxfer->resp != 0 || fp->mode.stream.len <
575 ETHER_ALIGN + sizeof(struct ether_header)) {
576 m_freem(m);
577 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
578 continue;
579 }
580
581 m->m_data += HDR_LEN + ETHER_ALIGN;
582 #if 0
583 c = mtod(m, u_char *);
584 #endif
585 m->m_len = m->m_pkthdr.len = fp->mode.stream.len - ETHER_ALIGN;
586 m->m_pkthdr.rcvif = ifp;
587 #if 0
588 FWEDEBUG(ifp, "%02x %02x %02x %02x %02x %02x\n"
589 "%02x %02x %02x %02x %02x %02x\n"
590 "%02x %02x %02x %02x\n"
591 "%02x %02x %02x %02x\n"
592 "%02x %02x %02x %02x\n"
593 "%02x %02x %02x %02x\n",
594 c[0], c[1], c[2], c[3], c[4], c[5],
595 c[6], c[7], c[8], c[9], c[10], c[11],
596 c[12], c[13], c[14], c[15],
597 c[16], c[17], c[18], c[19],
598 c[20], c[21], c[22], c[23],
599 c[20], c[21], c[22], c[23]
600 );
601 #endif
602 if_input(ifp, m);
603 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
604 }
605 if (STAILQ_FIRST(&xferq->stfree) != NULL)
606 fwe->fd.fc->irx_enable(fwe->fd.fc, fwe->dma_ch);
607 }
608
609
610 static device_method_t fwe_methods[] = {
611 /* device interface */
612 DEVMETHOD(device_identify, fwe_identify),
613 DEVMETHOD(device_probe, fwe_probe),
614 DEVMETHOD(device_attach, fwe_attach),
615 DEVMETHOD(device_detach, fwe_detach),
616 { 0, 0 }
617 };
618
619 static driver_t fwe_driver = {
620 "fwe",
621 fwe_methods,
622 sizeof(struct fwe_softc),
623 };
624
625
626 DRIVER_MODULE(fwe, firewire, fwe_driver, 0, 0);
627 MODULE_VERSION(fwe, 1);
628 MODULE_DEPEND(fwe, firewire, 1, 1, 1);
629