xref: /freebsd/sys/powerpc/ps3/if_glc.c (revision 884a2a699669ec61e2366e3e358342dbc94be24a)
1 /*-
2  * Copyright (C) 2010 Nathan Whitehorn
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
18  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
20  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
21  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
22  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
23  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/sockio.h>
31 #include <sys/endian.h>
32 #include <sys/mbuf.h>
33 #include <sys/module.h>
34 #include <sys/malloc.h>
35 #include <sys/kernel.h>
36 #include <sys/socket.h>
37 
38 #include <vm/vm.h>
39 #include <vm/pmap.h>
40 
41 #include <net/bpf.h>
42 #include <net/if.h>
43 #include <net/if_arp.h>
44 #include <net/ethernet.h>
45 #include <net/if_dl.h>
46 #include <net/if_media.h>
47 #include <net/if_types.h>
48 #include <net/if_vlan_var.h>
49 
50 #include <machine/pio.h>
51 #include <machine/bus.h>
52 #include <machine/platform.h>
53 #include <machine/pmap.h>
54 #include <machine/resource.h>
55 #include <sys/bus.h>
56 #include <sys/rman.h>
57 
58 #include "ps3bus.h"
59 #include "ps3-hvcall.h"
60 #include "if_glcreg.h"
61 
62 static int	glc_probe(device_t);
63 static int	glc_attach(device_t);
64 static void	glc_init(void *xsc);
65 static void	glc_start(struct ifnet *ifp);
66 static int	glc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
67 static void	glc_set_multicast(struct glc_softc *sc);
68 static int	glc_add_rxbuf(struct glc_softc *sc, int idx);
69 static int	glc_add_rxbuf_dma(struct glc_softc *sc, int idx);
70 static int	glc_encap(struct glc_softc *sc, struct mbuf **m_head,
71 		    bus_addr_t *pktdesc);
72 static int	glc_intr_filter(void *xsc);
73 static void	glc_intr(void *xsc);
74 static void	glc_tick(void *xsc);
75 static void	glc_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
76 static int	glc_media_change(struct ifnet *ifp);
77 
78 static MALLOC_DEFINE(M_GLC, "gelic", "PS3 GELIC ethernet");
79 
80 static device_method_t glc_methods[] = {
81 	/* Device interface */
82 	DEVMETHOD(device_probe,		glc_probe),
83 	DEVMETHOD(device_attach,	glc_attach),
84 
85 	{ 0, 0 }
86 };
87 
88 static driver_t glc_driver = {
89 	"glc",
90 	glc_methods,
91 	sizeof(struct glc_softc)
92 };
93 
94 static devclass_t glc_devclass;
95 
96 DRIVER_MODULE(glc, ps3bus, glc_driver, glc_devclass, 0, 0);
97 
98 static int
99 glc_probe(device_t dev)
100 {
101 
102 	if (ps3bus_get_bustype(dev) != PS3_BUSTYPE_SYSBUS ||
103 	    ps3bus_get_devtype(dev) != PS3_DEVTYPE_GELIC)
104 		return (ENXIO);
105 
106 	device_set_desc(dev, "Playstation 3 GELIC Network Controller");
107 	return (BUS_PROBE_SPECIFIC);
108 }
109 
110 static void
111 glc_getphys(void *xaddr, bus_dma_segment_t *segs, int nsegs, int error)
112 {
113 	if (error != 0)
114 		return;
115 
116 	*(bus_addr_t *)xaddr = segs[0].ds_addr;
117 }
118 
119 static int
120 glc_attach(device_t dev)
121 {
122 	struct glc_softc *sc;
123 	struct glc_txsoft *txs;
124 	uint64_t mac64, val, junk;
125 	int i, err;
126 
127 	sc = device_get_softc(dev);
128 
129 	sc->sc_bus = ps3bus_get_bus(dev);
130 	sc->sc_dev = ps3bus_get_device(dev);
131 	sc->sc_self = dev;
132 
133 	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
134 	    MTX_DEF);
135 	callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
136 	sc->next_txdma_slot = 0;
137 	sc->bsy_txdma_slots = 0;
138 	sc->first_used_txdma_slot = -1;
139 
140 	/*
141 	 * Shut down existing tasks.
142 	 */
143 
144 	lv1_net_stop_tx_dma(sc->sc_bus, sc->sc_dev, 0);
145 	lv1_net_stop_rx_dma(sc->sc_bus, sc->sc_dev, 0);
146 
147 	sc->sc_ifp = if_alloc(IFT_ETHER);
148 	sc->sc_ifp->if_softc = sc;
149 
150 	/*
151 	 * Get MAC address and VLAN id
152 	 */
153 
154 	lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_MAC_ADDRESS,
155 	    0, 0, 0, &mac64, &junk);
156 	memcpy(sc->sc_enaddr, &((uint8_t *)&mac64)[2], sizeof(sc->sc_enaddr));
157 	sc->sc_tx_vlan = sc->sc_rx_vlan =  -1;
158 	err = lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_VLAN_ID,
159 	    GELIC_VLAN_TX_ETHERNET, 0, 0, &val, &junk);
160 	if (err == 0)
161 		sc->sc_tx_vlan = val;
162 	err = lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_VLAN_ID,
163 	    GELIC_VLAN_RX_ETHERNET, 0, 0, &val, &junk);
164 	if (err == 0)
165 		sc->sc_rx_vlan = val;
166 
167 	/*
168 	 * Set up interrupt handler
169 	 */
170 	sc->sc_irqid = 0;
171 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irqid,
172 	    RF_ACTIVE);
173 	if (sc->sc_irq == NULL) {
174 		device_printf(dev, "Could not allocate IRQ!\n");
175 		mtx_destroy(&sc->sc_mtx);
176 		return (ENXIO);
177 	}
178 
179 	bus_setup_intr(dev, sc->sc_irq,
180 	    INTR_TYPE_MISC | INTR_MPSAFE | INTR_ENTROPY,
181 	    glc_intr_filter, glc_intr, sc, &sc->sc_irqctx);
182 	sc->sc_hwirq_status = (uint64_t *)contigmalloc(8, M_GLC, M_ZERO, 0,
183 	    BUS_SPACE_MAXADDR_32BIT, 8, PAGE_SIZE);
184 	lv1_net_set_interrupt_status_indicator(sc->sc_bus, sc->sc_dev,
185 	    vtophys(sc->sc_hwirq_status), 0);
186 	lv1_net_set_interrupt_mask(sc->sc_bus, sc->sc_dev,
187 	    GELIC_INT_RXDONE | GELIC_INT_RXFRAME | GELIC_INT_PHY |
188 	    GELIC_INT_TX_CHAIN_END, 0);
189 
190 	/*
191 	 * Set up DMA.
192 	 */
193 
194 	err = bus_dma_tag_create(bus_get_dma_tag(dev), 32, 0,
195 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
196 	    129*sizeof(struct glc_dmadesc), 1, 128*sizeof(struct glc_dmadesc),
197 	    0, NULL,NULL, &sc->sc_dmadesc_tag);
198 
199 	err = bus_dmamem_alloc(sc->sc_dmadesc_tag, (void **)&sc->sc_txdmadesc,
200 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
201 	    &sc->sc_txdmadesc_map);
202 	err = bus_dmamap_load(sc->sc_dmadesc_tag, sc->sc_txdmadesc_map,
203 	    sc->sc_txdmadesc, 128*sizeof(struct glc_dmadesc), glc_getphys,
204 	    &sc->sc_txdmadesc_phys, 0);
205 	err = bus_dmamem_alloc(sc->sc_dmadesc_tag, (void **)&sc->sc_rxdmadesc,
206 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
207 	    &sc->sc_rxdmadesc_map);
208 	err = bus_dmamap_load(sc->sc_dmadesc_tag, sc->sc_rxdmadesc_map,
209 	    sc->sc_rxdmadesc, 128*sizeof(struct glc_dmadesc), glc_getphys,
210 	    &sc->sc_rxdmadesc_phys, 0);
211 
212 	err = bus_dma_tag_create(bus_get_dma_tag(dev), 128, 0,
213 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
214 	    BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL,NULL,
215 	    &sc->sc_rxdma_tag);
216 	err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
217 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
218 	    BUS_SPACE_MAXSIZE_32BIT, 16, BUS_SPACE_MAXSIZE_32BIT, 0, NULL,NULL,
219 	    &sc->sc_txdma_tag);
220 
221 	/* init transmit descriptors */
222 	STAILQ_INIT(&sc->sc_txfreeq);
223 	STAILQ_INIT(&sc->sc_txdirtyq);
224 
225 	/* create TX DMA maps */
226 	err = ENOMEM;
227 	for (i = 0; i < GLC_MAX_TX_PACKETS; i++) {
228 		txs = &sc->sc_txsoft[i];
229 		txs->txs_mbuf = NULL;
230 		err = bus_dmamap_create(sc->sc_txdma_tag, 0, &txs->txs_dmamap);
231 		if (err) {
232 			device_printf(dev,
233 			    "unable to create TX DMA map %d, error = %d\n",
234 			    i, err);
235 		}
236 		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
237 	}
238 
239 	/* Create the receive buffer DMA maps. */
240 	for (i = 0; i < GLC_MAX_RX_PACKETS; i++) {
241 		err = bus_dmamap_create(sc->sc_rxdma_tag, 0,
242 		    &sc->sc_rxsoft[i].rxs_dmamap);
243 		if (err) {
244 			device_printf(dev,
245 			    "unable to create RX DMA map %d, error = %d\n",
246 			    i, err);
247 		}
248 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
249 	}
250 
251 	/*
252 	 * Attach to network stack
253 	 */
254 
255 	if_initname(sc->sc_ifp, device_get_name(dev), device_get_unit(dev));
256 	sc->sc_ifp->if_mtu = ETHERMTU;
257 	sc->sc_ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
258 	sc->sc_ifp->if_hwassist = CSUM_TCP | CSUM_UDP;
259 	sc->sc_ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_RXCSUM;
260 	sc->sc_ifp->if_capenable = IFCAP_HWCSUM | IFCAP_RXCSUM;
261 	sc->sc_ifp->if_start = glc_start;
262 	sc->sc_ifp->if_ioctl = glc_ioctl;
263 	sc->sc_ifp->if_init = glc_init;
264 
265 	ifmedia_init(&sc->sc_media, IFM_IMASK, glc_media_change,
266 	    glc_media_status);
267 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
268 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
269 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
270 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
271 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
272 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
273 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
274 
275 	IFQ_SET_MAXLEN(&sc->sc_ifp->if_snd, GLC_MAX_TX_PACKETS);
276 	sc->sc_ifp->if_snd.ifq_drv_maxlen = GLC_MAX_TX_PACKETS;
277 	IFQ_SET_READY(&sc->sc_ifp->if_snd);
278 
279 	ether_ifattach(sc->sc_ifp, sc->sc_enaddr);
280 	sc->sc_ifp->if_hwassist = 0;
281 
282 	return (0);
283 
284 	mtx_destroy(&sc->sc_mtx);
285 	if_free(sc->sc_ifp);
286 	return (ENXIO);
287 }
288 
289 static void
290 glc_init_locked(struct glc_softc *sc)
291 {
292 	int i, error;
293 	struct glc_rxsoft *rxs;
294 	struct glc_txsoft *txs;
295 
296 	mtx_assert(&sc->sc_mtx, MA_OWNED);
297 
298 	lv1_net_stop_tx_dma(sc->sc_bus, sc->sc_dev, 0);
299 	lv1_net_stop_rx_dma(sc->sc_bus, sc->sc_dev, 0);
300 
301 	glc_set_multicast(sc);
302 
303 	for (i = 0; i < GLC_MAX_RX_PACKETS; i++) {
304 		rxs = &sc->sc_rxsoft[i];
305 		rxs->rxs_desc_slot = i;
306 
307 		if (rxs->rxs_mbuf == NULL) {
308 			glc_add_rxbuf(sc, i);
309 
310 			if (rxs->rxs_mbuf == NULL) {
311 				rxs->rxs_desc_slot = -1;
312 				break;
313 			}
314 		}
315 
316 		glc_add_rxbuf_dma(sc, i);
317 		bus_dmamap_sync(sc->sc_dmadesc_tag, sc->sc_rxdmadesc_map,
318 		    BUS_DMASYNC_PREREAD);
319 	}
320 
321 	/* Clear TX dirty queue */
322 	while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
323 		STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
324 		bus_dmamap_unload(sc->sc_txdma_tag, txs->txs_dmamap);
325 
326 		if (txs->txs_mbuf != NULL) {
327 			m_freem(txs->txs_mbuf);
328 			txs->txs_mbuf = NULL;
329 		}
330 
331 		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
332 	}
333 	sc->first_used_txdma_slot = -1;
334 	sc->bsy_txdma_slots = 0;
335 
336 	error = lv1_net_start_rx_dma(sc->sc_bus, sc->sc_dev,
337 	    sc->sc_rxsoft[0].rxs_desc, 0);
338 	if (error != 0)
339 		device_printf(sc->sc_self,
340 		    "lv1_net_start_rx_dma error: %d\n", error);
341 
342 	sc->sc_ifp->if_drv_flags |= IFF_DRV_RUNNING;
343 	sc->sc_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
344 	sc->sc_ifpflags = sc->sc_ifp->if_flags;
345 
346 	sc->sc_wdog_timer = 0;
347 	callout_reset(&sc->sc_tick_ch, hz, glc_tick, sc);
348 }
349 
350 static void
351 glc_stop(void *xsc)
352 {
353 	struct glc_softc *sc = xsc;
354 
355 	mtx_assert(&sc->sc_mtx, MA_OWNED);
356 
357 	lv1_net_stop_tx_dma(sc->sc_bus, sc->sc_dev, 0);
358 	lv1_net_stop_rx_dma(sc->sc_bus, sc->sc_dev, 0);
359 }
360 
361 static void
362 glc_init(void *xsc)
363 {
364 	struct glc_softc *sc = xsc;
365 
366 	mtx_lock(&sc->sc_mtx);
367 	glc_init_locked(sc);
368 	mtx_unlock(&sc->sc_mtx);
369 }
370 
371 static void
372 glc_tick(void *xsc)
373 {
374 	struct glc_softc *sc = xsc;
375 
376 	mtx_assert(&sc->sc_mtx, MA_OWNED);
377 
378 	if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0) {
379 		callout_reset(&sc->sc_tick_ch, hz, glc_tick, sc);
380 		return;
381 	}
382 
383 	/* Problems */
384 	device_printf(sc->sc_self, "device timeout\n");
385 
386 	glc_init_locked(sc);
387 }
388 
389 static void
390 glc_start_locked(struct ifnet *ifp)
391 {
392 	struct glc_softc *sc = ifp->if_softc;
393 	bus_addr_t first, pktdesc;
394 	int kickstart = 0;
395 	int error;
396 	struct mbuf *mb_head;
397 
398 	mtx_assert(&sc->sc_mtx, MA_OWNED);
399 	first = 0;
400 
401 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
402 	    IFF_DRV_RUNNING)
403 		return;
404 
405 	if (STAILQ_EMPTY(&sc->sc_txdirtyq))
406 		kickstart = 1;
407 
408 	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
409 		IFQ_DRV_DEQUEUE(&ifp->if_snd, mb_head);
410 
411 		if (mb_head == NULL)
412 			break;
413 
414 		/* Check if the ring buffer is full */
415 		if (sc->bsy_txdma_slots > 125) {
416 			/* Put the packet back and stop */
417 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
418 			IFQ_DRV_PREPEND(&ifp->if_snd, mb_head);
419 			break;
420 		}
421 
422 		BPF_MTAP(ifp, mb_head);
423 
424 		if (sc->sc_tx_vlan >= 0)
425 			mb_head = ether_vlanencap(mb_head, sc->sc_tx_vlan);
426 
427 		if (glc_encap(sc, &mb_head, &pktdesc)) {
428 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
429 			break;
430 		}
431 
432 		if (first == 0)
433 			first = pktdesc;
434 	}
435 
436 	if (kickstart && first != 0) {
437 		error = lv1_net_start_tx_dma(sc->sc_bus, sc->sc_dev, first, 0);
438 		if (error != 0)
439 			device_printf(sc->sc_self,
440 			    "lv1_net_start_tx_dma error: %d\n", error);
441 		sc->sc_wdog_timer = 5;
442 	}
443 }
444 
445 static void
446 glc_start(struct ifnet *ifp)
447 {
448 	struct glc_softc *sc = ifp->if_softc;
449 
450 	mtx_lock(&sc->sc_mtx);
451 	glc_start_locked(ifp);
452 	mtx_unlock(&sc->sc_mtx);
453 }
454 
455 static int
456 glc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
457 {
458 	struct glc_softc *sc = ifp->if_softc;
459 	struct ifreq *ifr = (struct ifreq *)data;
460 	int err = 0;
461 
462 	switch (cmd) {
463 	case SIOCSIFFLAGS:
464                 mtx_lock(&sc->sc_mtx);
465 		if ((ifp->if_flags & IFF_UP) != 0) {
466 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
467 			   ((ifp->if_flags ^ sc->sc_ifpflags) &
468 			    (IFF_ALLMULTI | IFF_PROMISC)) != 0)
469 				glc_set_multicast(sc);
470 			else
471 				glc_init_locked(sc);
472 		}
473 		else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
474 			glc_stop(sc);
475 		sc->sc_ifpflags = ifp->if_flags;
476 		mtx_unlock(&sc->sc_mtx);
477 		break;
478 	case SIOCADDMULTI:
479 	case SIOCDELMULTI:
480                 mtx_lock(&sc->sc_mtx);
481 		glc_set_multicast(sc);
482                 mtx_unlock(&sc->sc_mtx);
483 		break;
484 	case SIOCGIFMEDIA:
485 	case SIOCSIFMEDIA:
486 		err = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
487 		break;
488 	default:
489 		err = ether_ioctl(ifp, cmd, data);
490 		break;
491 	}
492 
493 	return (err);
494 }
495 
496 static void
497 glc_set_multicast(struct glc_softc *sc)
498 {
499 	struct ifnet *ifp = sc->sc_ifp;
500 	struct ifmultiaddr *inm;
501 	uint64_t addr;
502 	int naddrs;
503 
504 	/* Clear multicast filter */
505 	lv1_net_remove_multicast_address(sc->sc_bus, sc->sc_dev, 0, 1);
506 
507 	/* Add broadcast */
508 	lv1_net_add_multicast_address(sc->sc_bus, sc->sc_dev,
509 	    0xffffffffffffL, 0);
510 
511 	if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
512 		lv1_net_add_multicast_address(sc->sc_bus, sc->sc_dev, 0, 1);
513 	} else {
514 		if_maddr_rlock(ifp);
515 		naddrs = 1; /* Include broadcast */
516 		TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) {
517 			if (inm->ifma_addr->sa_family != AF_LINK)
518 				continue;
519 			addr = 0;
520 			memcpy(&((uint8_t *)(&addr))[2],
521 			    LLADDR((struct sockaddr_dl *)inm->ifma_addr),
522 			    ETHER_ADDR_LEN);
523 
524 			lv1_net_add_multicast_address(sc->sc_bus, sc->sc_dev,
525 			    addr, 0);
526 
527 			/*
528 			 * Filter can only hold 32 addresses, so fall back to
529 			 * the IFF_ALLMULTI case if we have too many.
530 			 */
531 			if (++naddrs >= 32) {
532 				lv1_net_add_multicast_address(sc->sc_bus,
533 				    sc->sc_dev, 0, 1);
534 				break;
535 			}
536 		}
537 		if_maddr_runlock(ifp);
538 	}
539 }
540 
541 static int
542 glc_add_rxbuf(struct glc_softc *sc, int idx)
543 {
544 	struct glc_rxsoft *rxs = &sc->sc_rxsoft[idx];
545 	struct mbuf *m;
546 	bus_dma_segment_t segs[1];
547 	int error, nsegs;
548 
549 	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
550 	if (m == NULL)
551 		return (ENOBUFS);
552 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
553 
554 	if (rxs->rxs_mbuf != NULL) {
555 		bus_dmamap_sync(sc->sc_rxdma_tag, rxs->rxs_dmamap,
556 		    BUS_DMASYNC_POSTREAD);
557 		bus_dmamap_unload(sc->sc_rxdma_tag, rxs->rxs_dmamap);
558 	}
559 
560 	error = bus_dmamap_load_mbuf_sg(sc->sc_rxdma_tag, rxs->rxs_dmamap, m,
561 	    segs, &nsegs, BUS_DMA_NOWAIT);
562 	if (error != 0) {
563 		device_printf(sc->sc_self,
564 		    "cannot load RS DMA map %d, error = %d\n", idx, error);
565 		m_freem(m);
566 		return (error);
567 	}
568 	/* If nsegs is wrong then the stack is corrupt. */
569 	KASSERT(nsegs == 1,
570 	    ("%s: too many DMA segments (%d)", __func__, nsegs));
571 	rxs->rxs_mbuf = m;
572 	rxs->segment = segs[0];
573 
574 	bus_dmamap_sync(sc->sc_rxdma_tag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD);
575 
576 	return (0);
577 }
578 
579 static int
580 glc_add_rxbuf_dma(struct glc_softc *sc, int idx)
581 {
582 	struct glc_rxsoft *rxs = &sc->sc_rxsoft[idx];
583 
584 	bzero(&sc->sc_rxdmadesc[idx], sizeof(sc->sc_rxdmadesc[idx]));
585 	sc->sc_rxdmadesc[idx].paddr = rxs->segment.ds_addr;
586 	sc->sc_rxdmadesc[idx].len = rxs->segment.ds_len;
587 	sc->sc_rxdmadesc[idx].next = sc->sc_rxdmadesc_phys +
588 	    ((idx + 1) % GLC_MAX_RX_PACKETS)*sizeof(sc->sc_rxdmadesc[idx]);
589 	sc->sc_rxdmadesc[idx].cmd_stat = GELIC_DESCR_OWNED;
590 
591 	rxs->rxs_desc_slot = idx;
592 	rxs->rxs_desc = sc->sc_rxdmadesc_phys + idx*sizeof(struct glc_dmadesc);
593 
594         return (0);
595 }
596 
597 static int
598 glc_encap(struct glc_softc *sc, struct mbuf **m_head, bus_addr_t *pktdesc)
599 {
600 	bus_dma_segment_t segs[16];
601 	struct glc_txsoft *txs;
602 	struct mbuf *m;
603 	bus_addr_t firstslotphys;
604 	int i, idx, nsegs, nsegs_max;
605 	int err = 0;
606 
607 	/* Max number of segments is the number of free DMA slots */
608 	nsegs_max = 128 - sc->bsy_txdma_slots;
609 
610 	if (nsegs_max > 16 || sc->first_used_txdma_slot < 0)
611 		nsegs_max = 16;
612 
613 	/* Get a work queue entry. */
614 	if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
615 		/* Ran out of descriptors. */
616 		return (ENOBUFS);
617 	}
618 
619 	nsegs = 0;
620 	for (m = *m_head; m != NULL; m = m->m_next)
621 		nsegs++;
622 
623 	if (nsegs > nsegs_max) {
624 		m = m_collapse(*m_head, M_DONTWAIT, nsegs_max);
625 		if (m == NULL) {
626 			m_freem(*m_head);
627 			*m_head = NULL;
628 			return (ENOBUFS);
629 		}
630 		*m_head = m;
631 	}
632 
633 	err = bus_dmamap_load_mbuf_sg(sc->sc_txdma_tag, txs->txs_dmamap,
634 	    *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
635 	if (err != 0) {
636 		m_freem(*m_head);
637 		*m_head = NULL;
638 		return (err);
639 	}
640 
641 	KASSERT(nsegs <= 128 - sc->bsy_txdma_slots,
642 	    ("GLC: Mapped too many (%d) DMA segments with %d available",
643 	    nsegs, 128 - sc->bsy_txdma_slots));
644 
645 	if (nsegs == 0) {
646 		m_freem(*m_head);
647 		*m_head = NULL;
648 		return (EIO);
649 	}
650 
651 	txs->txs_ndescs = nsegs;
652 	txs->txs_firstdesc = sc->next_txdma_slot;
653 
654 	idx = txs->txs_firstdesc;
655 	firstslotphys = sc->sc_txdmadesc_phys +
656 	    txs->txs_firstdesc*sizeof(struct glc_dmadesc);
657 
658 	for (i = 0; i < nsegs; i++) {
659 		bzero(&sc->sc_txdmadesc[idx], sizeof(sc->sc_txdmadesc[idx]));
660 		sc->sc_txdmadesc[idx].paddr = segs[i].ds_addr;
661 		sc->sc_txdmadesc[idx].len = segs[i].ds_len;
662 		sc->sc_txdmadesc[idx].next = sc->sc_txdmadesc_phys +
663 		    ((idx + 1) % GLC_MAX_TX_PACKETS)*sizeof(struct glc_dmadesc);
664 		sc->sc_txdmadesc[idx].cmd_stat |= GELIC_CMDSTAT_NOIPSEC;
665 
666 		if (i+1 == nsegs) {
667 			txs->txs_lastdesc = idx;
668 			sc->sc_txdmadesc[idx].next = 0;
669 			sc->sc_txdmadesc[idx].cmd_stat |= GELIC_CMDSTAT_LAST;
670 		}
671 
672 		if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP)
673 			sc->sc_txdmadesc[idx].cmd_stat |= GELIC_CMDSTAT_CSUM_TCP;
674 		if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP)
675 			sc->sc_txdmadesc[idx].cmd_stat |= GELIC_CMDSTAT_CSUM_UDP;
676 		sc->sc_txdmadesc[idx].cmd_stat |= GELIC_DESCR_OWNED;
677 
678 		idx = (idx + 1) % GLC_MAX_TX_PACKETS;
679 	}
680 	sc->next_txdma_slot = idx;
681 	sc->bsy_txdma_slots += nsegs;
682 	if (txs->txs_firstdesc != 0)
683 		idx = txs->txs_firstdesc - 1;
684 	else
685 		idx = GLC_MAX_TX_PACKETS - 1;
686 
687 	if (sc->first_used_txdma_slot < 0)
688 		sc->first_used_txdma_slot = txs->txs_firstdesc;
689 
690 	bus_dmamap_sync(sc->sc_txdma_tag, txs->txs_dmamap,
691 	    BUS_DMASYNC_PREWRITE);
692 	sc->sc_txdmadesc[idx].next = firstslotphys;
693 
694 	STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
695 	STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
696 	txs->txs_mbuf = *m_head;
697 	*pktdesc = firstslotphys;
698 
699 	return (0);
700 }
701 
702 static void
703 glc_rxintr(struct glc_softc *sc)
704 {
705 	int i, restart_rxdma, error;
706 	struct mbuf *m;
707 	struct ifnet *ifp = sc->sc_ifp;
708 
709 	bus_dmamap_sync(sc->sc_dmadesc_tag, sc->sc_rxdmadesc_map,
710 	    BUS_DMASYNC_PREWRITE);
711 
712 	restart_rxdma = 0;
713 	while ((sc->sc_rxdmadesc[sc->sc_next_rxdma_slot].cmd_stat &
714 	   GELIC_DESCR_OWNED) == 0) {
715 		i = sc->sc_next_rxdma_slot;
716 		if (sc->sc_rxdmadesc[i].rxerror & GELIC_RXERRORS) {
717 			ifp->if_ierrors++;
718 			goto requeue;
719 		}
720 
721 		m = sc->sc_rxsoft[i].rxs_mbuf;
722 		if (sc->sc_rxdmadesc[i].data_stat & GELIC_RX_IPCSUM) {
723 			m->m_pkthdr.csum_flags |=
724 			    CSUM_IP_CHECKED | CSUM_IP_VALID;
725 		}
726 		if (sc->sc_rxdmadesc[i].data_stat & GELIC_RX_TCPUDPCSUM) {
727 			m->m_pkthdr.csum_flags |=
728 			    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
729 			m->m_pkthdr.csum_data = 0xffff;
730 		}
731 
732 		if (glc_add_rxbuf(sc, i)) {
733 			ifp->if_ierrors++;
734 			goto requeue;
735 		}
736 
737 		ifp->if_ipackets++;
738 		m->m_pkthdr.rcvif = ifp;
739 		m->m_len = sc->sc_rxdmadesc[i].valid_size;
740 		m->m_pkthdr.len = m->m_len;
741 		sc->sc_next_rxdma_slot++;
742 		if (sc->sc_next_rxdma_slot >= GLC_MAX_RX_PACKETS)
743 			sc->sc_next_rxdma_slot = 0;
744 
745 		if (sc->sc_rx_vlan >= 0)
746 			m_adj(m, 2);
747 
748 		mtx_unlock(&sc->sc_mtx);
749 		(*ifp->if_input)(ifp, m);
750 		mtx_lock(&sc->sc_mtx);
751 
752 	    requeue:
753 		if (sc->sc_rxdmadesc[i].cmd_stat & GELIC_CMDSTAT_CHAIN_END)
754 			restart_rxdma = 1;
755 		glc_add_rxbuf_dma(sc, i);
756 		if (restart_rxdma) {
757 			error = lv1_net_start_rx_dma(sc->sc_bus, sc->sc_dev,
758 			    sc->sc_rxsoft[i].rxs_desc, 0);
759 			if (error != 0)
760 				device_printf(sc->sc_self,
761 				    "lv1_net_start_rx_dma error: %d\n", error);
762 		}
763 	}
764 }
765 
766 static void
767 glc_txintr(struct glc_softc *sc)
768 {
769 	struct ifnet *ifp = sc->sc_ifp;
770 	struct glc_txsoft *txs;
771 	int progress = 0, kickstart = 0, error;
772 
773 	while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
774 		if (sc->sc_txdmadesc[txs->txs_lastdesc].cmd_stat
775 		    & GELIC_DESCR_OWNED)
776 			break;
777 
778 		STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
779 		bus_dmamap_unload(sc->sc_txdma_tag, txs->txs_dmamap);
780 		sc->bsy_txdma_slots -= txs->txs_ndescs;
781 
782 		if (txs->txs_mbuf != NULL) {
783 			m_freem(txs->txs_mbuf);
784 			txs->txs_mbuf = NULL;
785 		}
786 
787 		if ((sc->sc_txdmadesc[txs->txs_lastdesc].cmd_stat & 0xf0000000)
788 		    != 0) {
789 			lv1_net_stop_tx_dma(sc->sc_bus, sc->sc_dev, 0);
790 			kickstart = 1;
791 			ifp->if_oerrors++;
792 		}
793 
794 		if (sc->sc_txdmadesc[txs->txs_lastdesc].cmd_stat &
795 		    GELIC_CMDSTAT_CHAIN_END)
796 			kickstart = 1;
797 
798 		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
799 		ifp->if_opackets++;
800 		progress = 1;
801 	}
802 
803 	if (txs != NULL)
804 		sc->first_used_txdma_slot = txs->txs_firstdesc;
805 	else
806 		sc->first_used_txdma_slot = -1;
807 
808 	if (kickstart && txs != NULL) {
809 		error = lv1_net_start_tx_dma(sc->sc_bus, sc->sc_dev,
810 		    sc->sc_txdmadesc_phys +
811 		    txs->txs_firstdesc*sizeof(struct glc_dmadesc), 0);
812 		if (error != 0)
813 			device_printf(sc->sc_self,
814 			    "lv1_net_start_tx_dma error: %d\n", error);
815 	}
816 
817 	if (progress) {
818 		/*
819 		 * We freed some descriptors, so reset IFF_DRV_OACTIVE
820 		 * and restart.
821 		 */
822 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
823 		sc->sc_wdog_timer = STAILQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5;
824 
825 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
826 		    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
827 			glc_start_locked(ifp);
828 	}
829 }
830 
831 static int
832 glc_intr_filter(void *xsc)
833 {
834 	struct glc_softc *sc = xsc;
835 
836 	powerpc_sync();
837 	atomic_set_64(&sc->sc_interrupt_status, *sc->sc_hwirq_status);
838 	return (FILTER_SCHEDULE_THREAD);
839 }
840 
841 static void
842 glc_intr(void *xsc)
843 {
844 	struct glc_softc *sc = xsc;
845 	uint64_t status, linkstat, junk;
846 
847 	mtx_lock(&sc->sc_mtx);
848 
849 	status = atomic_readandclear_64(&sc->sc_interrupt_status);
850 
851 	if (status == 0) {
852 		mtx_unlock(&sc->sc_mtx);
853 		return;
854 	}
855 
856 	if (status & (GELIC_INT_RXDONE | GELIC_INT_RXFRAME))
857 		glc_rxintr(sc);
858 
859 	if (status & (GELIC_INT_TXDONE | GELIC_INT_TX_CHAIN_END))
860 		glc_txintr(sc);
861 
862 	if (status & GELIC_INT_PHY) {
863 		lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_LINK_STATUS,
864 		    GELIC_VLAN_TX_ETHERNET, 0, 0, &linkstat, &junk);
865 
866 		linkstat = (linkstat & GELIC_LINK_UP) ?
867 		    LINK_STATE_UP : LINK_STATE_DOWN;
868 		if (linkstat != sc->sc_ifp->if_link_state)
869 			if_link_state_change(sc->sc_ifp, linkstat);
870 	}
871 
872 	mtx_unlock(&sc->sc_mtx);
873 }
874 
875 static void
876 glc_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
877 {
878 	struct glc_softc *sc = ifp->if_softc;
879 	uint64_t status, junk;
880 
881 	ifmr->ifm_status = IFM_AVALID;
882 	ifmr->ifm_active = IFM_ETHER;
883 
884 	lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_LINK_STATUS,
885 	    GELIC_VLAN_TX_ETHERNET, 0, 0, &status, &junk);
886 
887 	if (status & GELIC_LINK_UP)
888 		ifmr->ifm_status |= IFM_ACTIVE;
889 
890 	if (status & GELIC_SPEED_10)
891 		ifmr->ifm_active |= IFM_10_T;
892 	else if (status & GELIC_SPEED_100)
893 		ifmr->ifm_active |= IFM_100_TX;
894 	else if (status & GELIC_SPEED_1000)
895 		ifmr->ifm_active |= IFM_1000_T;
896 
897 	if (status & GELIC_FULL_DUPLEX)
898 		ifmr->ifm_active |= IFM_FDX;
899 	else
900 		ifmr->ifm_active |= IFM_HDX;
901 }
902 
903 static int
904 glc_media_change(struct ifnet *ifp)
905 {
906 	struct glc_softc *sc = ifp->if_softc;
907 	uint64_t mode, junk;
908 	int result;
909 
910 	if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
911 		return (EINVAL);
912 
913 	switch (IFM_SUBTYPE(sc->sc_media.ifm_media)) {
914 	case IFM_AUTO:
915 		mode = GELIC_AUTO_NEG;
916 		break;
917 	case IFM_10_T:
918 		mode = GELIC_SPEED_10;
919 		break;
920 	case IFM_100_TX:
921 		mode = GELIC_SPEED_100;
922 		break;
923 	case IFM_1000_T:
924 		mode = GELIC_SPEED_1000 | GELIC_FULL_DUPLEX;
925 		break;
926 	default:
927 		return (EINVAL);
928 	}
929 
930 	if (IFM_OPTIONS(sc->sc_media.ifm_media) & IFM_FDX)
931 		mode |= GELIC_FULL_DUPLEX;
932 
933 	result = lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_SET_LINK_MODE,
934 	    GELIC_VLAN_TX_ETHERNET, mode, 0, &junk, &junk);
935 
936 	return (result ? EIO : 0);
937 }
938 
939