xref: /freebsd/sys/powerpc/ps3/if_glc.c (revision 95ee2897e98f5d444f26ed2334cc7c439f9c16c6)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (C) 2010 Nathan Whitehorn
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
22  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
23  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
24  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
25  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/sockio.h>
31 #include <sys/endian.h>
32 #include <sys/lock.h>
33 #include <sys/mbuf.h>
34 #include <sys/module.h>
35 #include <sys/malloc.h>
36 #include <sys/mutex.h>
37 #include <sys/kernel.h>
38 #include <sys/socket.h>
39 
40 #include <vm/vm.h>
41 #include <vm/pmap.h>
42 
43 #include <net/bpf.h>
44 #include <net/if.h>
45 #include <net/if_var.h>
46 #include <net/ethernet.h>
47 #include <net/if_media.h>
48 #include <net/if_types.h>
49 #include <net/if_dl.h>
50 
51 #include <machine/pio.h>
52 #include <machine/bus.h>
53 #include <machine/platform.h>
54 #include <machine/resource.h>
55 #include <sys/bus.h>
56 #include <sys/rman.h>
57 
58 #include "ps3bus.h"
59 #include "ps3-hvcall.h"
60 #include "if_glcreg.h"
61 
62 static int	glc_probe(device_t);
63 static int	glc_attach(device_t);
64 static void	glc_init(void *xsc);
65 static void	glc_start(if_t ifp);
66 static int	glc_ioctl(if_t ifp, u_long cmd, caddr_t data);
67 static void	glc_set_multicast(struct glc_softc *sc);
68 static int	glc_add_rxbuf(struct glc_softc *sc, int idx);
69 static int	glc_add_rxbuf_dma(struct glc_softc *sc, int idx);
70 static int	glc_encap(struct glc_softc *sc, struct mbuf **m_head,
71 		    bus_addr_t *pktdesc);
72 static int	glc_intr_filter(void *xsc);
73 static void	glc_intr(void *xsc);
74 static void	glc_tick(void *xsc);
75 static void	glc_media_status(if_t ifp, struct ifmediareq *ifmr);
76 static int	glc_media_change(if_t ifp);
77 
78 static MALLOC_DEFINE(M_GLC, "gelic", "PS3 GELIC ethernet");
79 
80 static device_method_t glc_methods[] = {
81 	/* Device interface */
82 	DEVMETHOD(device_probe,		glc_probe),
83 	DEVMETHOD(device_attach,	glc_attach),
84 	{ 0, 0 }
85 };
86 
87 static driver_t glc_driver = {
88 	"glc",
89 	glc_methods,
90 	sizeof(struct glc_softc)
91 };
92 
93 DRIVER_MODULE(glc, ps3bus, glc_driver, 0, 0);
94 
95 static int
glc_probe(device_t dev)96 glc_probe(device_t dev)
97 {
98 
99 	if (ps3bus_get_bustype(dev) != PS3_BUSTYPE_SYSBUS ||
100 	    ps3bus_get_devtype(dev) != PS3_DEVTYPE_GELIC)
101 		return (ENXIO);
102 
103 	device_set_desc(dev, "Playstation 3 GELIC Network Controller");
104 	return (BUS_PROBE_SPECIFIC);
105 }
106 
107 static void
glc_getphys(void * xaddr,bus_dma_segment_t * segs,int nsegs,int error)108 glc_getphys(void *xaddr, bus_dma_segment_t *segs, int nsegs, int error)
109 {
110 	if (error != 0)
111 		return;
112 
113 	*(bus_addr_t *)xaddr = segs[0].ds_addr;
114 }
115 
116 static int
glc_attach(device_t dev)117 glc_attach(device_t dev)
118 {
119 	struct glc_softc *sc;
120 	struct glc_txsoft *txs;
121 	uint64_t mac64, val, junk;
122 	int i, err;
123 
124 	sc = device_get_softc(dev);
125 
126 	sc->sc_bus = ps3bus_get_bus(dev);
127 	sc->sc_dev = ps3bus_get_device(dev);
128 	sc->sc_self = dev;
129 
130 	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
131 	    MTX_DEF);
132 	callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
133 	sc->next_txdma_slot = 0;
134 	sc->bsy_txdma_slots = 0;
135 	sc->sc_next_rxdma_slot = 0;
136 	sc->first_used_txdma_slot = -1;
137 
138 	/*
139 	 * Shut down existing tasks.
140 	 */
141 
142 	lv1_net_stop_tx_dma(sc->sc_bus, sc->sc_dev, 0);
143 	lv1_net_stop_rx_dma(sc->sc_bus, sc->sc_dev, 0);
144 
145 	sc->sc_ifp = if_alloc(IFT_ETHER);
146 	if_setsoftc(sc->sc_ifp, sc);
147 
148 	/*
149 	 * Get MAC address and VLAN id
150 	 */
151 
152 	lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_MAC_ADDRESS,
153 	    0, 0, 0, &mac64, &junk);
154 	memcpy(sc->sc_enaddr, &((uint8_t *)&mac64)[2], sizeof(sc->sc_enaddr));
155 	sc->sc_tx_vlan = sc->sc_rx_vlan = -1;
156 	err = lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_VLAN_ID,
157 	    GELIC_VLAN_TX_ETHERNET, 0, 0, &val, &junk);
158 	if (err == 0)
159 		sc->sc_tx_vlan = val;
160 	err = lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_VLAN_ID,
161 	    GELIC_VLAN_RX_ETHERNET, 0, 0, &val, &junk);
162 	if (err == 0)
163 		sc->sc_rx_vlan = val;
164 
165 	/*
166 	 * Set up interrupt handler
167 	 */
168 	sc->sc_irqid = 0;
169 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irqid,
170 	    RF_ACTIVE);
171 	if (sc->sc_irq == NULL) {
172 		device_printf(dev, "Could not allocate IRQ!\n");
173 		mtx_destroy(&sc->sc_mtx);
174 		return (ENXIO);
175 	}
176 
177 	bus_setup_intr(dev, sc->sc_irq,
178 	    INTR_TYPE_NET | INTR_MPSAFE | INTR_ENTROPY,
179 	    glc_intr_filter, glc_intr, sc, &sc->sc_irqctx);
180 	sc->sc_hwirq_status = (uint64_t *)contigmalloc(8, M_GLC, M_ZERO, 0,
181 	    BUS_SPACE_MAXADDR_32BIT, 8, PAGE_SIZE);
182 	lv1_net_set_interrupt_status_indicator(sc->sc_bus, sc->sc_dev,
183 	    vtophys(sc->sc_hwirq_status), 0);
184 	lv1_net_set_interrupt_mask(sc->sc_bus, sc->sc_dev,
185 	    GELIC_INT_RXDONE | GELIC_INT_RXFRAME | GELIC_INT_PHY |
186 	    GELIC_INT_TX_CHAIN_END, 0);
187 
188 	/*
189 	 * Set up DMA.
190 	 */
191 
192 	err = bus_dma_tag_create(bus_get_dma_tag(dev), 32, 0,
193 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
194 	    129*sizeof(struct glc_dmadesc), 1, 128*sizeof(struct glc_dmadesc),
195 	    0, NULL,NULL, &sc->sc_dmadesc_tag);
196 
197 	err = bus_dmamem_alloc(sc->sc_dmadesc_tag, (void **)&sc->sc_txdmadesc,
198 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
199 	    &sc->sc_txdmadesc_map);
200 	err = bus_dmamap_load(sc->sc_dmadesc_tag, sc->sc_txdmadesc_map,
201 	    sc->sc_txdmadesc, 128*sizeof(struct glc_dmadesc), glc_getphys,
202 	    &sc->sc_txdmadesc_phys, 0);
203 	err = bus_dmamem_alloc(sc->sc_dmadesc_tag, (void **)&sc->sc_rxdmadesc,
204 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
205 	    &sc->sc_rxdmadesc_map);
206 	err = bus_dmamap_load(sc->sc_dmadesc_tag, sc->sc_rxdmadesc_map,
207 	    sc->sc_rxdmadesc, 128*sizeof(struct glc_dmadesc), glc_getphys,
208 	    &sc->sc_rxdmadesc_phys, 0);
209 
210 	err = bus_dma_tag_create(bus_get_dma_tag(dev), 128, 0,
211 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
212 	    BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL,NULL,
213 	    &sc->sc_rxdma_tag);
214 	err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
215 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
216 	    BUS_SPACE_MAXSIZE_32BIT, 16, BUS_SPACE_MAXSIZE_32BIT, 0, NULL,NULL,
217 	    &sc->sc_txdma_tag);
218 
219 	/* init transmit descriptors */
220 	STAILQ_INIT(&sc->sc_txfreeq);
221 	STAILQ_INIT(&sc->sc_txdirtyq);
222 
223 	/* create TX DMA maps */
224 	err = ENOMEM;
225 	for (i = 0; i < GLC_MAX_TX_PACKETS; i++) {
226 		txs = &sc->sc_txsoft[i];
227 		txs->txs_mbuf = NULL;
228 		err = bus_dmamap_create(sc->sc_txdma_tag, 0, &txs->txs_dmamap);
229 		if (err) {
230 			device_printf(dev,
231 			    "unable to create TX DMA map %d, error = %d\n",
232 			    i, err);
233 		}
234 		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
235 	}
236 
237 	/* Create the receive buffer DMA maps. */
238 	for (i = 0; i < GLC_MAX_RX_PACKETS; i++) {
239 		err = bus_dmamap_create(sc->sc_rxdma_tag, 0,
240 		    &sc->sc_rxsoft[i].rxs_dmamap);
241 		if (err) {
242 			device_printf(dev,
243 			    "unable to create RX DMA map %d, error = %d\n",
244 			    i, err);
245 		}
246 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
247 	}
248 
249 	/*
250 	 * Attach to network stack
251 	 */
252 
253 	if_initname(sc->sc_ifp, device_get_name(dev), device_get_unit(dev));
254 	if_setmtu(sc->sc_ifp, ETHERMTU);
255 	if_setflags(sc->sc_ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
256 	if_sethwassist(sc->sc_ifp, CSUM_TCP | CSUM_UDP);
257 	if_setcapabilities(sc->sc_ifp, IFCAP_HWCSUM | IFCAP_RXCSUM);
258 	if_setcapenable(sc->sc_ifp, IFCAP_HWCSUM | IFCAP_RXCSUM);
259 	if_setstartfn(sc->sc_ifp, glc_start);
260 	if_setioctlfn(sc->sc_ifp, glc_ioctl);
261 	if_setinitfn(sc->sc_ifp, glc_init);
262 
263 	ifmedia_init(&sc->sc_media, IFM_IMASK, glc_media_change,
264 	    glc_media_status);
265 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
266 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
267 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
268 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
269 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
270 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
271 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
272 
273 	if_setsendqlen(sc->sc_ifp, GLC_MAX_TX_PACKETS);
274 	if_setsendqready(sc->sc_ifp);
275 
276 	ether_ifattach(sc->sc_ifp, sc->sc_enaddr);
277 	if_sethwassist(sc->sc_ifp, 0);
278 
279 	return (0);
280 
281 	mtx_destroy(&sc->sc_mtx);
282 	if_free(sc->sc_ifp);
283 	return (ENXIO);
284 }
285 
286 static void
glc_init_locked(struct glc_softc * sc)287 glc_init_locked(struct glc_softc *sc)
288 {
289 	int i, error;
290 	struct glc_rxsoft *rxs;
291 	struct glc_txsoft *txs;
292 
293 	mtx_assert(&sc->sc_mtx, MA_OWNED);
294 
295 	lv1_net_stop_tx_dma(sc->sc_bus, sc->sc_dev, 0);
296 	lv1_net_stop_rx_dma(sc->sc_bus, sc->sc_dev, 0);
297 
298 	glc_set_multicast(sc);
299 
300 	for (i = 0; i < GLC_MAX_RX_PACKETS; i++) {
301 		rxs = &sc->sc_rxsoft[i];
302 		rxs->rxs_desc_slot = i;
303 
304 		if (rxs->rxs_mbuf == NULL) {
305 			glc_add_rxbuf(sc, i);
306 
307 			if (rxs->rxs_mbuf == NULL) {
308 				rxs->rxs_desc_slot = -1;
309 				break;
310 			}
311 		}
312 
313 		glc_add_rxbuf_dma(sc, i);
314 		bus_dmamap_sync(sc->sc_dmadesc_tag, sc->sc_rxdmadesc_map,
315 		    BUS_DMASYNC_PREREAD);
316 	}
317 
318 	/* Clear TX dirty queue */
319 	while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
320 		STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
321 		bus_dmamap_unload(sc->sc_txdma_tag, txs->txs_dmamap);
322 
323 		if (txs->txs_mbuf != NULL) {
324 			m_freem(txs->txs_mbuf);
325 			txs->txs_mbuf = NULL;
326 		}
327 
328 		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
329 	}
330 	sc->first_used_txdma_slot = -1;
331 	sc->bsy_txdma_slots = 0;
332 
333 	error = lv1_net_start_rx_dma(sc->sc_bus, sc->sc_dev,
334 	    sc->sc_rxsoft[0].rxs_desc, 0);
335 	if (error != 0)
336 		device_printf(sc->sc_self,
337 		    "lv1_net_start_rx_dma error: %d\n", error);
338 
339 	if_setdrvflagbits(sc->sc_ifp, IFF_DRV_RUNNING, 0);
340 	if_setdrvflagbits(sc->sc_ifp, 0, IFF_DRV_OACTIVE);
341 	sc->sc_ifpflags = if_getflags(sc->sc_ifp);
342 
343 	sc->sc_wdog_timer = 0;
344 	callout_reset(&sc->sc_tick_ch, hz, glc_tick, sc);
345 }
346 
347 static void
glc_stop(void * xsc)348 glc_stop(void *xsc)
349 {
350 	struct glc_softc *sc = xsc;
351 
352 	mtx_assert(&sc->sc_mtx, MA_OWNED);
353 
354 	lv1_net_stop_tx_dma(sc->sc_bus, sc->sc_dev, 0);
355 	lv1_net_stop_rx_dma(sc->sc_bus, sc->sc_dev, 0);
356 }
357 
358 static void
glc_init(void * xsc)359 glc_init(void *xsc)
360 {
361 	struct glc_softc *sc = xsc;
362 
363 	mtx_lock(&sc->sc_mtx);
364 	glc_init_locked(sc);
365 	mtx_unlock(&sc->sc_mtx);
366 }
367 
368 static void
glc_tick(void * xsc)369 glc_tick(void *xsc)
370 {
371 	struct glc_softc *sc = xsc;
372 
373 	mtx_assert(&sc->sc_mtx, MA_OWNED);
374 
375 	/*
376 	 * XXX: Sometimes the RX queue gets stuck. Poke it periodically until
377 	 * we figure out why. This will fail harmlessly if the RX queue is
378 	 * already running.
379 	 */
380 	lv1_net_start_rx_dma(sc->sc_bus, sc->sc_dev,
381 	    sc->sc_rxsoft[sc->sc_next_rxdma_slot].rxs_desc, 0);
382 
383 	if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0) {
384 		callout_reset(&sc->sc_tick_ch, hz, glc_tick, sc);
385 		return;
386 	}
387 
388 	/* Problems */
389 	device_printf(sc->sc_self, "device timeout\n");
390 
391 	glc_init_locked(sc);
392 }
393 
394 static void
glc_start_locked(if_t ifp)395 glc_start_locked(if_t ifp)
396 {
397 	struct glc_softc *sc = if_getsoftc(ifp);
398 	bus_addr_t first, pktdesc;
399 	int kickstart = 0;
400 	int error;
401 	struct mbuf *mb_head;
402 
403 	mtx_assert(&sc->sc_mtx, MA_OWNED);
404 	first = 0;
405 
406 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
407 	    IFF_DRV_RUNNING)
408 		return;
409 
410 	if (STAILQ_EMPTY(&sc->sc_txdirtyq))
411 		kickstart = 1;
412 
413 	while (!if_sendq_empty(ifp)) {
414 		mb_head = if_dequeue(ifp);
415 
416 		if (mb_head == NULL)
417 			break;
418 
419 		/* Check if the ring buffer is full */
420 		if (sc->bsy_txdma_slots > 125) {
421 			/* Put the packet back and stop */
422 			if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
423 			if_sendq_prepend(ifp, mb_head);
424 			break;
425 		}
426 
427 		BPF_MTAP(ifp, mb_head);
428 
429 		if (sc->sc_tx_vlan >= 0)
430 			mb_head = ether_vlanencap(mb_head, sc->sc_tx_vlan);
431 
432 		if (glc_encap(sc, &mb_head, &pktdesc)) {
433 			if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
434 			break;
435 		}
436 
437 		if (first == 0)
438 			first = pktdesc;
439 	}
440 
441 	if (kickstart && first != 0) {
442 		error = lv1_net_start_tx_dma(sc->sc_bus, sc->sc_dev, first, 0);
443 		if (error != 0)
444 			device_printf(sc->sc_self,
445 			    "lv1_net_start_tx_dma error: %d\n", error);
446 		sc->sc_wdog_timer = 5;
447 	}
448 }
449 
450 static void
glc_start(if_t ifp)451 glc_start(if_t ifp)
452 {
453 	struct glc_softc *sc = if_getsoftc(ifp);
454 
455 	mtx_lock(&sc->sc_mtx);
456 	glc_start_locked(ifp);
457 	mtx_unlock(&sc->sc_mtx);
458 }
459 
460 static int
glc_ioctl(if_t ifp,u_long cmd,caddr_t data)461 glc_ioctl(if_t ifp, u_long cmd, caddr_t data)
462 {
463 	struct glc_softc *sc = if_getsoftc(ifp);
464 	struct ifreq *ifr = (struct ifreq *)data;
465 	int err = 0;
466 
467 	switch (cmd) {
468 	case SIOCSIFFLAGS:
469                 mtx_lock(&sc->sc_mtx);
470 		if ((if_getflags(ifp) & IFF_UP) != 0) {
471 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 &&
472 			   ((if_getflags(ifp) ^ sc->sc_ifpflags) &
473 			    (IFF_ALLMULTI | IFF_PROMISC)) != 0)
474 				glc_set_multicast(sc);
475 			else
476 				glc_init_locked(sc);
477 		}
478 		else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
479 			glc_stop(sc);
480 		sc->sc_ifpflags = if_getflags(ifp);
481 		mtx_unlock(&sc->sc_mtx);
482 		break;
483 	case SIOCADDMULTI:
484 	case SIOCDELMULTI:
485                 mtx_lock(&sc->sc_mtx);
486 		glc_set_multicast(sc);
487                 mtx_unlock(&sc->sc_mtx);
488 		break;
489 	case SIOCGIFMEDIA:
490 	case SIOCSIFMEDIA:
491 		err = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
492 		break;
493 	default:
494 		err = ether_ioctl(ifp, cmd, data);
495 		break;
496 	}
497 
498 	return (err);
499 }
500 
501 static u_int
glc_add_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)502 glc_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
503 {
504 	struct glc_softc *sc = arg;
505 	uint64_t addr;
506 
507 	/*
508 	 * Filter can only hold 32 addresses, so fall back to
509 	 * the IFF_ALLMULTI case if we have too many. +1 is for
510 	 * broadcast.
511 	 */
512 	if (cnt + 1 == 32)
513 		return (0);
514 
515 	addr = 0;
516 	memcpy(&((uint8_t *)(&addr))[2], LLADDR(sdl), ETHER_ADDR_LEN);
517 	lv1_net_add_multicast_address(sc->sc_bus, sc->sc_dev, addr, 0);
518 
519 	return (1);
520 }
521 
522 static void
glc_set_multicast(struct glc_softc * sc)523 glc_set_multicast(struct glc_softc *sc)
524 {
525 	if_t ifp = sc->sc_ifp;
526 	int naddrs;
527 
528 	/* Clear multicast filter */
529 	lv1_net_remove_multicast_address(sc->sc_bus, sc->sc_dev, 0, 1);
530 
531 	/* Add broadcast */
532 	lv1_net_add_multicast_address(sc->sc_bus, sc->sc_dev,
533 	    0xffffffffffffL, 0);
534 
535 	if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) {
536 		lv1_net_add_multicast_address(sc->sc_bus, sc->sc_dev, 0, 1);
537 	} else {
538 		naddrs = if_foreach_llmaddr(ifp, glc_add_maddr, sc);
539 		if (naddrs + 1 == 32)
540 			lv1_net_add_multicast_address(sc->sc_bus,
541 			    sc->sc_dev, 0, 1);
542 	}
543 }
544 
545 static int
glc_add_rxbuf(struct glc_softc * sc,int idx)546 glc_add_rxbuf(struct glc_softc *sc, int idx)
547 {
548 	struct glc_rxsoft *rxs = &sc->sc_rxsoft[idx];
549 	struct mbuf *m;
550 	bus_dma_segment_t segs[1];
551 	int error, nsegs;
552 
553 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
554 	if (m == NULL)
555 		return (ENOBUFS);
556 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
557 
558 	if (rxs->rxs_mbuf != NULL) {
559 		bus_dmamap_sync(sc->sc_rxdma_tag, rxs->rxs_dmamap,
560 		    BUS_DMASYNC_POSTREAD);
561 		bus_dmamap_unload(sc->sc_rxdma_tag, rxs->rxs_dmamap);
562 	}
563 
564 	error = bus_dmamap_load_mbuf_sg(sc->sc_rxdma_tag, rxs->rxs_dmamap, m,
565 	    segs, &nsegs, BUS_DMA_NOWAIT);
566 	if (error != 0) {
567 		device_printf(sc->sc_self,
568 		    "cannot load RS DMA map %d, error = %d\n", idx, error);
569 		m_freem(m);
570 		return (error);
571 	}
572 	/* If nsegs is wrong then the stack is corrupt. */
573 	KASSERT(nsegs == 1,
574 	    ("%s: too many DMA segments (%d)", __func__, nsegs));
575 	rxs->rxs_mbuf = m;
576 	rxs->segment = segs[0];
577 
578 	bus_dmamap_sync(sc->sc_rxdma_tag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD);
579 
580 	return (0);
581 }
582 
583 static int
glc_add_rxbuf_dma(struct glc_softc * sc,int idx)584 glc_add_rxbuf_dma(struct glc_softc *sc, int idx)
585 {
586 	struct glc_rxsoft *rxs = &sc->sc_rxsoft[idx];
587 
588 	bzero(&sc->sc_rxdmadesc[idx], sizeof(sc->sc_rxdmadesc[idx]));
589 	sc->sc_rxdmadesc[idx].paddr = rxs->segment.ds_addr;
590 	sc->sc_rxdmadesc[idx].len = rxs->segment.ds_len;
591 	sc->sc_rxdmadesc[idx].next = sc->sc_rxdmadesc_phys +
592 	    ((idx + 1) % GLC_MAX_RX_PACKETS)*sizeof(sc->sc_rxdmadesc[idx]);
593 	sc->sc_rxdmadesc[idx].cmd_stat = GELIC_DESCR_OWNED;
594 
595 	rxs->rxs_desc_slot = idx;
596 	rxs->rxs_desc = sc->sc_rxdmadesc_phys + idx*sizeof(struct glc_dmadesc);
597 
598         return (0);
599 }
600 
601 static int
glc_encap(struct glc_softc * sc,struct mbuf ** m_head,bus_addr_t * pktdesc)602 glc_encap(struct glc_softc *sc, struct mbuf **m_head, bus_addr_t *pktdesc)
603 {
604 	bus_dma_segment_t segs[16];
605 	struct glc_txsoft *txs;
606 	struct mbuf *m;
607 	bus_addr_t firstslotphys;
608 	int i, idx, nsegs, nsegs_max;
609 	int err = 0;
610 
611 	/* Max number of segments is the number of free DMA slots */
612 	nsegs_max = 128 - sc->bsy_txdma_slots;
613 
614 	if (nsegs_max > 16 || sc->first_used_txdma_slot < 0)
615 		nsegs_max = 16;
616 
617 	/* Get a work queue entry. */
618 	if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
619 		/* Ran out of descriptors. */
620 		return (ENOBUFS);
621 	}
622 
623 	nsegs = 0;
624 	for (m = *m_head; m != NULL; m = m->m_next)
625 		nsegs++;
626 
627 	if (nsegs > nsegs_max) {
628 		m = m_collapse(*m_head, M_NOWAIT, nsegs_max);
629 		if (m == NULL) {
630 			m_freem(*m_head);
631 			*m_head = NULL;
632 			return (ENOBUFS);
633 		}
634 		*m_head = m;
635 	}
636 
637 	err = bus_dmamap_load_mbuf_sg(sc->sc_txdma_tag, txs->txs_dmamap,
638 	    *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
639 	if (err != 0) {
640 		m_freem(*m_head);
641 		*m_head = NULL;
642 		return (err);
643 	}
644 
645 	KASSERT(nsegs <= 128 - sc->bsy_txdma_slots,
646 	    ("GLC: Mapped too many (%d) DMA segments with %d available",
647 	    nsegs, 128 - sc->bsy_txdma_slots));
648 
649 	if (nsegs == 0) {
650 		m_freem(*m_head);
651 		*m_head = NULL;
652 		return (EIO);
653 	}
654 
655 	txs->txs_ndescs = nsegs;
656 	txs->txs_firstdesc = sc->next_txdma_slot;
657 
658 	idx = txs->txs_firstdesc;
659 	firstslotphys = sc->sc_txdmadesc_phys +
660 	    txs->txs_firstdesc*sizeof(struct glc_dmadesc);
661 
662 	for (i = 0; i < nsegs; i++) {
663 		bzero(&sc->sc_txdmadesc[idx], sizeof(sc->sc_txdmadesc[idx]));
664 		sc->sc_txdmadesc[idx].paddr = segs[i].ds_addr;
665 		sc->sc_txdmadesc[idx].len = segs[i].ds_len;
666 		sc->sc_txdmadesc[idx].next = sc->sc_txdmadesc_phys +
667 		    ((idx + 1) % GLC_MAX_TX_PACKETS)*sizeof(struct glc_dmadesc);
668 		sc->sc_txdmadesc[idx].cmd_stat |= GELIC_CMDSTAT_NOIPSEC;
669 
670 		if (i+1 == nsegs) {
671 			txs->txs_lastdesc = idx;
672 			sc->sc_txdmadesc[idx].next = 0;
673 			sc->sc_txdmadesc[idx].cmd_stat |= GELIC_CMDSTAT_LAST;
674 		}
675 
676 		if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP)
677 			sc->sc_txdmadesc[idx].cmd_stat |= GELIC_CMDSTAT_CSUM_TCP;
678 		if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP)
679 			sc->sc_txdmadesc[idx].cmd_stat |= GELIC_CMDSTAT_CSUM_UDP;
680 		sc->sc_txdmadesc[idx].cmd_stat |= GELIC_DESCR_OWNED;
681 
682 		idx = (idx + 1) % GLC_MAX_TX_PACKETS;
683 	}
684 	sc->next_txdma_slot = idx;
685 	sc->bsy_txdma_slots += nsegs;
686 	if (txs->txs_firstdesc != 0)
687 		idx = txs->txs_firstdesc - 1;
688 	else
689 		idx = GLC_MAX_TX_PACKETS - 1;
690 
691 	if (sc->first_used_txdma_slot < 0)
692 		sc->first_used_txdma_slot = txs->txs_firstdesc;
693 
694 	bus_dmamap_sync(sc->sc_txdma_tag, txs->txs_dmamap,
695 	    BUS_DMASYNC_PREWRITE);
696 	sc->sc_txdmadesc[idx].next = firstslotphys;
697 
698 	STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
699 	STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
700 	txs->txs_mbuf = *m_head;
701 	*pktdesc = firstslotphys;
702 
703 	return (0);
704 }
705 
706 static void
glc_rxintr(struct glc_softc * sc)707 glc_rxintr(struct glc_softc *sc)
708 {
709 	int i, restart_rxdma, error;
710 	struct mbuf *m;
711 	if_t ifp = sc->sc_ifp;
712 
713 	bus_dmamap_sync(sc->sc_dmadesc_tag, sc->sc_rxdmadesc_map,
714 	    BUS_DMASYNC_POSTREAD);
715 
716 	restart_rxdma = 0;
717 	while ((sc->sc_rxdmadesc[sc->sc_next_rxdma_slot].cmd_stat &
718 	   GELIC_DESCR_OWNED) == 0) {
719 		i = sc->sc_next_rxdma_slot;
720 		sc->sc_next_rxdma_slot++;
721 		if (sc->sc_next_rxdma_slot >= GLC_MAX_RX_PACKETS)
722 			sc->sc_next_rxdma_slot = 0;
723 
724 		if (sc->sc_rxdmadesc[i].cmd_stat & GELIC_CMDSTAT_CHAIN_END)
725 			restart_rxdma = 1;
726 
727 		if (sc->sc_rxdmadesc[i].rxerror & GELIC_RXERRORS) {
728 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
729 			goto requeue;
730 		}
731 
732 		m = sc->sc_rxsoft[i].rxs_mbuf;
733 		if (sc->sc_rxdmadesc[i].data_stat & GELIC_RX_IPCSUM) {
734 			m->m_pkthdr.csum_flags |=
735 			    CSUM_IP_CHECKED | CSUM_IP_VALID;
736 		}
737 		if (sc->sc_rxdmadesc[i].data_stat & GELIC_RX_TCPUDPCSUM) {
738 			m->m_pkthdr.csum_flags |=
739 			    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
740 			m->m_pkthdr.csum_data = 0xffff;
741 		}
742 
743 		if (glc_add_rxbuf(sc, i)) {
744 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
745 			goto requeue;
746 		}
747 
748 		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
749 		m->m_pkthdr.rcvif = ifp;
750 		m->m_len = sc->sc_rxdmadesc[i].valid_size;
751 		m->m_pkthdr.len = m->m_len;
752 
753 		/*
754 		 * Remove VLAN tag. Even on early firmwares that do not allow
755 		 * multiple VLANs, the VLAN tag is still in place here.
756 		 */
757 		m_adj(m, 2);
758 
759 		mtx_unlock(&sc->sc_mtx);
760 		if_input(ifp, m);
761 		mtx_lock(&sc->sc_mtx);
762 
763 	    requeue:
764 		glc_add_rxbuf_dma(sc, i);
765 	}
766 
767 	bus_dmamap_sync(sc->sc_dmadesc_tag, sc->sc_rxdmadesc_map,
768 	    BUS_DMASYNC_PREWRITE);
769 
770 	if (restart_rxdma) {
771 		error = lv1_net_start_rx_dma(sc->sc_bus, sc->sc_dev,
772 		    sc->sc_rxsoft[sc->sc_next_rxdma_slot].rxs_desc, 0);
773 		if (error != 0)
774 			device_printf(sc->sc_self,
775 			    "lv1_net_start_rx_dma error: %d\n", error);
776 	}
777 }
778 
779 static void
glc_txintr(struct glc_softc * sc)780 glc_txintr(struct glc_softc *sc)
781 {
782 	if_t ifp = sc->sc_ifp;
783 	struct glc_txsoft *txs;
784 	int progress = 0, kickstart = 0, error;
785 
786 	bus_dmamap_sync(sc->sc_dmadesc_tag, sc->sc_txdmadesc_map,
787 	    BUS_DMASYNC_POSTREAD);
788 
789 	while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
790 		if (sc->sc_txdmadesc[txs->txs_lastdesc].cmd_stat
791 		    & GELIC_DESCR_OWNED)
792 			break;
793 
794 		STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
795 		bus_dmamap_unload(sc->sc_txdma_tag, txs->txs_dmamap);
796 		sc->bsy_txdma_slots -= txs->txs_ndescs;
797 
798 		if (txs->txs_mbuf != NULL) {
799 			m_freem(txs->txs_mbuf);
800 			txs->txs_mbuf = NULL;
801 		}
802 
803 		if ((sc->sc_txdmadesc[txs->txs_lastdesc].cmd_stat & 0xf0000000)
804 		    != 0) {
805 			lv1_net_stop_tx_dma(sc->sc_bus, sc->sc_dev, 0);
806 			kickstart = 1;
807 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
808 		}
809 
810 		if (sc->sc_txdmadesc[txs->txs_lastdesc].cmd_stat &
811 		    GELIC_CMDSTAT_CHAIN_END)
812 			kickstart = 1;
813 
814 		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
815 		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
816 		progress = 1;
817 	}
818 
819 	if (txs != NULL)
820 		sc->first_used_txdma_slot = txs->txs_firstdesc;
821 	else
822 		sc->first_used_txdma_slot = -1;
823 
824 	if (kickstart || txs != NULL) {
825 		/* Speculatively (or necessarily) start the TX queue again */
826 		error = lv1_net_start_tx_dma(sc->sc_bus, sc->sc_dev,
827 		    sc->sc_txdmadesc_phys +
828 		    ((txs == NULL) ? 0 : txs->txs_firstdesc)*
829 		     sizeof(struct glc_dmadesc), 0);
830 		if (error != 0)
831 			device_printf(sc->sc_self,
832 			    "lv1_net_start_tx_dma error: %d\n", error);
833 	}
834 
835 	if (progress) {
836 		/*
837 		 * We freed some descriptors, so reset IFF_DRV_OACTIVE
838 		 * and restart.
839 		 */
840 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
841 		sc->sc_wdog_timer = STAILQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5;
842 
843 		if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) &&
844 		    !if_sendq_empty(ifp))
845 			glc_start_locked(ifp);
846 	}
847 }
848 
849 static int
glc_intr_filter(void * xsc)850 glc_intr_filter(void *xsc)
851 {
852 	struct glc_softc *sc = xsc;
853 
854 	powerpc_sync();
855 	atomic_set_64(&sc->sc_interrupt_status, *sc->sc_hwirq_status);
856 	return (FILTER_SCHEDULE_THREAD);
857 }
858 
859 static void
glc_intr(void * xsc)860 glc_intr(void *xsc)
861 {
862 	struct glc_softc *sc = xsc;
863 	uint64_t status, linkstat, junk;
864 
865 	mtx_lock(&sc->sc_mtx);
866 
867 	status = atomic_readandclear_64(&sc->sc_interrupt_status);
868 
869 	if (status == 0) {
870 		mtx_unlock(&sc->sc_mtx);
871 		return;
872 	}
873 
874 	if (status & (GELIC_INT_RXDONE | GELIC_INT_RXFRAME))
875 		glc_rxintr(sc);
876 
877 	if (status & (GELIC_INT_TXDONE | GELIC_INT_TX_CHAIN_END))
878 		glc_txintr(sc);
879 
880 	if (status & GELIC_INT_PHY) {
881 		lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_LINK_STATUS,
882 		    GELIC_VLAN_TX_ETHERNET, 0, 0, &linkstat, &junk);
883 
884 		linkstat = (linkstat & GELIC_LINK_UP) ?
885 		    LINK_STATE_UP : LINK_STATE_DOWN;
886 		if_link_state_change(sc->sc_ifp, linkstat);
887 	}
888 
889 	mtx_unlock(&sc->sc_mtx);
890 }
891 
892 static void
glc_media_status(if_t ifp,struct ifmediareq * ifmr)893 glc_media_status(if_t ifp, struct ifmediareq *ifmr)
894 {
895 	struct glc_softc *sc = if_getsoftc(ifp);
896 	uint64_t status, junk;
897 
898 	ifmr->ifm_status = IFM_AVALID;
899 	ifmr->ifm_active = IFM_ETHER;
900 
901 	lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_LINK_STATUS,
902 	    GELIC_VLAN_TX_ETHERNET, 0, 0, &status, &junk);
903 
904 	if (status & GELIC_LINK_UP)
905 		ifmr->ifm_status |= IFM_ACTIVE;
906 
907 	if (status & GELIC_SPEED_10)
908 		ifmr->ifm_active |= IFM_10_T;
909 	else if (status & GELIC_SPEED_100)
910 		ifmr->ifm_active |= IFM_100_TX;
911 	else if (status & GELIC_SPEED_1000)
912 		ifmr->ifm_active |= IFM_1000_T;
913 
914 	if (status & GELIC_FULL_DUPLEX)
915 		ifmr->ifm_active |= IFM_FDX;
916 	else
917 		ifmr->ifm_active |= IFM_HDX;
918 }
919 
920 static int
glc_media_change(if_t ifp)921 glc_media_change(if_t ifp)
922 {
923 	struct glc_softc *sc = if_getsoftc(ifp);
924 	uint64_t mode, junk;
925 	int result;
926 
927 	if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
928 		return (EINVAL);
929 
930 	switch (IFM_SUBTYPE(sc->sc_media.ifm_media)) {
931 	case IFM_AUTO:
932 		mode = GELIC_AUTO_NEG;
933 		break;
934 	case IFM_10_T:
935 		mode = GELIC_SPEED_10;
936 		break;
937 	case IFM_100_TX:
938 		mode = GELIC_SPEED_100;
939 		break;
940 	case IFM_1000_T:
941 		mode = GELIC_SPEED_1000 | GELIC_FULL_DUPLEX;
942 		break;
943 	default:
944 		return (EINVAL);
945 	}
946 
947 	if (IFM_OPTIONS(sc->sc_media.ifm_media) & IFM_FDX)
948 		mode |= GELIC_FULL_DUPLEX;
949 
950 	result = lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_SET_LINK_MODE,
951 	    GELIC_VLAN_TX_ETHERNET, mode, 0, &junk, &junk);
952 
953 	return (result ? EIO : 0);
954 }
955