1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2021 Adrian Chadd <adrian@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
11 * disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/bus.h>
35
36 #include <sys/kernel.h>
37 #include <sys/module.h>
38 #include <sys/rman.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
42 #include <sys/mbuf.h>
43 #include <sys/socket.h>
44 #include <sys/sockio.h>
45 #include <sys/smp.h>
46 #include <sys/queue.h>
47 #include <sys/taskqueue.h>
48
49 #include <net/if.h>
50 #include <net/if_var.h>
51 #include <net/if_media.h>
52 #include <net/ethernet.h>
53
54 #include <machine/bus.h>
55 #include <machine/resource.h>
56
57 #include <dev/fdt/fdt_common.h>
58 #include <dev/ofw/ofw_bus.h>
59 #include <dev/ofw/ofw_bus_subr.h>
60
61 #include <dev/qcom_ess_edma/qcom_ess_edma_var.h>
62 #include <dev/qcom_ess_edma/qcom_ess_edma_reg.h>
63 #include <dev/qcom_ess_edma/qcom_ess_edma_hw.h>
64 #include <dev/qcom_ess_edma/qcom_ess_edma_desc.h>
65 #include <dev/qcom_ess_edma/qcom_ess_edma_rx.h>
66 #include <dev/qcom_ess_edma/qcom_ess_edma_tx.h>
67 #include <dev/qcom_ess_edma/qcom_ess_edma_debug.h>
68 #include <dev/qcom_ess_edma/qcom_ess_edma_gmac.h>
69
70 static int
qcom_ess_edma_probe(device_t dev)71 qcom_ess_edma_probe(device_t dev)
72 {
73
74 if (! ofw_bus_status_okay(dev))
75 return (ENXIO);
76
77 if (ofw_bus_is_compatible(dev, "qcom,ess-edma") == 0)
78 return (ENXIO);
79
80 device_set_desc(dev,
81 "Qualcomm Atheros IPQ4018/IPQ4019 Ethernet driver");
82 return (0);
83 }
84
85 static int
qcom_ess_edma_release_intr(struct qcom_ess_edma_softc * sc,struct qcom_ess_edma_intr * intr)86 qcom_ess_edma_release_intr(struct qcom_ess_edma_softc *sc,
87 struct qcom_ess_edma_intr *intr)
88 {
89
90 if (intr->irq_res == NULL)
91 return (0);
92
93 if (intr->irq_intr != NULL)
94 bus_teardown_intr(sc->sc_dev, intr->irq_res, intr->irq_intr);
95 if (intr->irq_res != NULL)
96 bus_release_resource(sc->sc_dev, SYS_RES_IRQ, intr->irq_rid,
97 intr->irq_res);
98
99 return (0);
100 }
101
102 static void
qcom_ess_edma_tx_queue_xmit(struct qcom_ess_edma_softc * sc,int queue_id)103 qcom_ess_edma_tx_queue_xmit(struct qcom_ess_edma_softc *sc, int queue_id)
104 {
105 struct qcom_ess_edma_tx_state *txs = &sc->sc_tx_state[queue_id];
106 int n = 0;
107 int ret;
108
109 QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_TASK,
110 "%s: called; TX queue %d\n", __func__, queue_id);
111
112 EDMA_RING_LOCK_ASSERT(&sc->sc_tx_ring[queue_id]);
113
114 sc->sc_tx_ring[queue_id].stats.num_tx_xmit_defer++;
115
116 (void) atomic_cmpset_int(&txs->enqueue_is_running, 1, 0);
117
118 /* Don't do any work if the ring is empty */
119 if (buf_ring_empty(txs->br))
120 return;
121
122 /*
123 * The ring isn't empty, dequeue frames and hand
124 * them to the hardware; defer updating the
125 * transmit ring pointer until we're done.
126 */
127 while (! buf_ring_empty(txs->br)) {
128 if_t ifp;
129 struct qcom_ess_edma_gmac *gmac;
130 struct mbuf *m;
131
132 m = buf_ring_peek_clear_sc(txs->br);
133 if (m == NULL)
134 break;
135
136 ifp = m->m_pkthdr.rcvif;
137 gmac = if_getsoftc(ifp);
138
139 /*
140 * The only way we'll know if we have space is to
141 * to try and transmit it.
142 */
143 ret = qcom_ess_edma_tx_ring_frame(sc, queue_id, &m,
144 gmac->port_mask, gmac->vlan_id);
145 if (ret == 0) {
146 if_inc_counter(gmac->ifp, IFCOUNTER_OPACKETS, 1);
147 buf_ring_advance_sc(txs->br);
148 } else {
149 /* Put whatever we tried to transmit back */
150 if_inc_counter(gmac->ifp, IFCOUNTER_OERRORS, 1);
151 buf_ring_putback_sc(txs->br, m);
152 break;
153 }
154 n++;
155 }
156
157 /*
158 * Only push the updated descriptor ring stuff to the hardware
159 * if we actually queued something.
160 */
161 if (n != 0)
162 (void) qcom_ess_edma_tx_ring_frame_update(sc, queue_id);
163 }
164
165 /*
166 * Enqueued when a deferred TX needs to happen.
167 */
168 static void
qcom_ess_edma_tx_queue_xmit_task(void * arg,int npending)169 qcom_ess_edma_tx_queue_xmit_task(void *arg, int npending)
170 {
171 struct qcom_ess_edma_tx_state *txs = arg;
172 struct qcom_ess_edma_softc *sc = txs->sc;
173
174 QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_INTERRUPT,
175 "%s: called; TX queue %d\n", __func__, txs->queue_id);
176
177 EDMA_RING_LOCK(&sc->sc_tx_ring[txs->queue_id]);
178
179 sc->sc_tx_ring[txs->queue_id].stats.num_tx_xmit_task++;
180 qcom_ess_edma_tx_queue_xmit(sc, txs->queue_id);
181
182 EDMA_RING_UNLOCK(&sc->sc_tx_ring[txs->queue_id]);
183 }
184
185 /*
186 * Enqueued when a TX completion interrupt occurs.
187 */
188 static void
qcom_ess_edma_tx_queue_complete_task(void * arg,int npending)189 qcom_ess_edma_tx_queue_complete_task(void *arg, int npending)
190 {
191 struct qcom_ess_edma_tx_state *txs = arg;
192 struct qcom_ess_edma_softc *sc = txs->sc;
193
194 /* Transmit queue */
195 QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_INTERRUPT,
196 "%s: called; TX queue %d\n", __func__, txs->queue_id);
197
198 EDMA_RING_LOCK(&sc->sc_tx_ring[txs->queue_id]);
199
200 /*
201 * Complete/free tx mbufs.
202 */
203 (void) qcom_ess_edma_tx_ring_complete(sc, txs->queue_id);
204
205 /*
206 * ACK the interrupt.
207 */
208 (void) qcom_ess_edma_hw_intr_tx_ack(sc, txs->queue_id);
209
210 /*
211 * Re-enable the interrupt.
212 */
213 (void) qcom_ess_edma_hw_intr_tx_intr_set_enable(sc, txs->queue_id,
214 true);
215
216 /*
217 * Do any pending TX work if there's any buffers in the ring.
218 */
219 if (! buf_ring_empty(txs->br))
220 qcom_ess_edma_tx_queue_xmit(sc, txs->queue_id);
221
222 EDMA_RING_UNLOCK(&sc->sc_tx_ring[txs->queue_id]);
223 }
224
225 static int
qcom_ess_edma_setup_tx_state(struct qcom_ess_edma_softc * sc,int txq,int cpu)226 qcom_ess_edma_setup_tx_state(struct qcom_ess_edma_softc *sc, int txq, int cpu)
227 {
228 struct qcom_ess_edma_tx_state *txs;
229 struct qcom_ess_edma_desc_ring *ring;
230 cpuset_t mask;
231
232 txs = &sc->sc_tx_state[txq];
233 ring = &sc->sc_tx_ring[txq];
234
235 snprintf(txs->label, QCOM_ESS_EDMA_LABEL_SZ - 1, "txq%d_compl", txq);
236
237 CPU_ZERO(&mask);
238 CPU_SET(cpu, &mask);
239
240 txs->queue_id = txq;
241 txs->sc = sc;
242 txs->completion_tq = taskqueue_create_fast(txs->label, M_NOWAIT,
243 taskqueue_thread_enqueue, &txs->completion_tq);
244 #if 0
245 taskqueue_start_threads_cpuset(&txs->completion_tq, 1, PI_NET,
246 &mask, "%s", txs->label);
247 #else
248 taskqueue_start_threads(&txs->completion_tq, 1, PI_NET,
249 "%s", txs->label);
250 #endif
251
252 TASK_INIT(&txs->completion_task, 0,
253 qcom_ess_edma_tx_queue_complete_task, txs);
254 TASK_INIT(&txs->xmit_task, 0,
255 qcom_ess_edma_tx_queue_xmit_task, txs);
256
257 txs->br = buf_ring_alloc(EDMA_TX_BUFRING_SIZE, M_DEVBUF, M_WAITOK,
258 &ring->mtx);
259
260 return (0);
261 }
262
263 /*
264 * Free the transmit ring state.
265 *
266 * This assumes that the taskqueues have been drained and DMA has
267 * stopped - all we're doing here is freeing the allocated resources.
268 */
269 static int
qcom_ess_edma_free_tx_state(struct qcom_ess_edma_softc * sc,int txq)270 qcom_ess_edma_free_tx_state(struct qcom_ess_edma_softc *sc, int txq)
271 {
272 struct qcom_ess_edma_tx_state *txs;
273
274 txs = &sc->sc_tx_state[txq];
275
276 taskqueue_free(txs->completion_tq);
277
278 while (! buf_ring_empty(txs->br)) {
279 struct mbuf *m;
280
281 m = buf_ring_dequeue_sc(txs->br);
282 m_freem(m);
283 }
284
285 buf_ring_free(txs->br, M_DEVBUF);
286
287 return (0);
288 }
289
290 static void
qcom_ess_edma_rx_queue_complete_task(void * arg,int npending)291 qcom_ess_edma_rx_queue_complete_task(void *arg, int npending)
292 {
293 struct qcom_ess_edma_rx_state *rxs = arg;
294 struct qcom_ess_edma_softc *sc = rxs->sc;
295 struct mbufq mq;
296
297 mbufq_init(&mq, EDMA_RX_RING_SIZE);
298
299 /* Receive queue */
300 QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_INTERRUPT,
301 "%s: called; RX queue %d\n",
302 __func__, rxs->queue_id);
303
304 EDMA_RING_LOCK(&sc->sc_rx_ring[rxs->queue_id]);
305
306 /*
307 * Do receive work, get completed mbufs.
308 */
309 (void) qcom_ess_edma_rx_ring_complete(sc, rxs->queue_id, &mq);
310
311 /*
312 * ACK the interrupt.
313 */
314 (void) qcom_ess_edma_hw_intr_rx_ack(sc, rxs->queue_id);
315
316 /*
317 * Re-enable interrupt for this ring.
318 */
319 (void) qcom_ess_edma_hw_intr_rx_intr_set_enable(sc, rxs->queue_id,
320 true);
321
322 EDMA_RING_UNLOCK(&sc->sc_rx_ring[rxs->queue_id]);
323
324 /* Push frames into networking stack */
325 (void) qcom_ess_edma_gmac_receive_frames(sc, rxs->queue_id, &mq);
326 }
327
328 static int
qcom_ess_edma_setup_rx_state(struct qcom_ess_edma_softc * sc,int rxq,int cpu)329 qcom_ess_edma_setup_rx_state(struct qcom_ess_edma_softc *sc, int rxq, int cpu)
330 {
331 struct qcom_ess_edma_rx_state *rxs;
332 cpuset_t mask;
333
334 rxs = &sc->sc_rx_state[rxq];
335
336 snprintf(rxs->label, QCOM_ESS_EDMA_LABEL_SZ - 1, "rxq%d_compl", rxq);
337
338 CPU_ZERO(&mask);
339 CPU_SET(cpu, &mask);
340
341 rxs->queue_id = rxq;
342 rxs->sc = sc;
343 rxs->completion_tq = taskqueue_create_fast(rxs->label, M_NOWAIT,
344 taskqueue_thread_enqueue, &rxs->completion_tq);
345 #if 0
346 taskqueue_start_threads_cpuset(&rxs->completion_tq, 1, PI_NET,
347 &mask, "%s", rxs->label);
348 #else
349 taskqueue_start_threads(&rxs->completion_tq, 1, PI_NET,
350 "%s", rxs->label);
351 #endif
352
353 TASK_INIT(&rxs->completion_task, 0,
354 qcom_ess_edma_rx_queue_complete_task, rxs);
355 return (0);
356 }
357
358 /*
359 * Free the receive ring state.
360 *
361 * This assumes that the taskqueues have been drained and DMA has
362 * stopped - all we're doing here is freeing the allocated resources.
363 */
364
365 static int
qcom_ess_edma_free_rx_state(struct qcom_ess_edma_softc * sc,int rxq)366 qcom_ess_edma_free_rx_state(struct qcom_ess_edma_softc *sc, int rxq)
367 {
368 struct qcom_ess_edma_rx_state *rxs;
369
370 rxs = &sc->sc_rx_state[rxq];
371
372 taskqueue_free(rxs->completion_tq);
373
374 return (0);
375 }
376
377
378 static int
qcom_ess_edma_detach(device_t dev)379 qcom_ess_edma_detach(device_t dev)
380 {
381 struct qcom_ess_edma_softc *sc = device_get_softc(dev);
382 int i;
383
384 for (i = 0; i < QCOM_ESS_EDMA_NUM_TX_IRQS; i++) {
385 (void) qcom_ess_edma_release_intr(sc, &sc->sc_tx_irq[i]);
386 }
387 for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_IRQS; i++) {
388 (void) qcom_ess_edma_release_intr(sc, &sc->sc_rx_irq[i]);
389 }
390
391 for (i = 0; i < QCOM_ESS_EDMA_NUM_TX_RINGS; i++) {
392 (void) qcom_ess_edma_free_tx_state(sc, i);
393 (void) qcom_ess_edma_tx_ring_clean(sc, &sc->sc_rx_ring[i]);
394 (void) qcom_ess_edma_desc_ring_free(sc, &sc->sc_tx_ring[i]);
395 }
396
397 for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_RINGS; i++) {
398 (void) qcom_ess_edma_free_rx_state(sc, i);
399 (void) qcom_ess_edma_rx_ring_clean(sc, &sc->sc_rx_ring[i]);
400 (void) qcom_ess_edma_desc_ring_free(sc, &sc->sc_rx_ring[i]);
401 }
402
403 if (sc->sc_dma_tag) {
404 bus_dma_tag_destroy(sc->sc_dma_tag);
405 sc->sc_dma_tag = NULL;
406 }
407
408 if (sc->sc_mem_res)
409 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid,
410 sc->sc_mem_res);
411 mtx_destroy(&sc->sc_mtx);
412
413 return(0);
414 }
415
416 static int
qcom_ess_edma_filter(void * arg)417 qcom_ess_edma_filter(void *arg)
418 {
419 struct qcom_ess_edma_intr *intr = arg;
420 struct qcom_ess_edma_softc *sc = intr->sc;
421
422 if (intr->irq_rid < QCOM_ESS_EDMA_NUM_TX_IRQS) {
423 int tx_queue = intr->irq_rid;
424
425 intr->stats.num_intr++;
426
427 /*
428 * Disable the interrupt for this ring.
429 */
430 (void) qcom_ess_edma_hw_intr_tx_intr_set_enable(sc, tx_queue,
431 false);
432
433 /*
434 * Schedule taskqueue to run for this queue.
435 */
436 taskqueue_enqueue(sc->sc_tx_state[tx_queue].completion_tq,
437 &sc->sc_tx_state[tx_queue].completion_task);
438
439 return (FILTER_HANDLED);
440 } else {
441 int rx_queue = intr->irq_rid - QCOM_ESS_EDMA_NUM_TX_IRQS;
442
443 intr->stats.num_intr++;
444
445 /*
446 * Disable the interrupt for this ring.
447 */
448 (void) qcom_ess_edma_hw_intr_rx_intr_set_enable(sc, rx_queue,
449 false);
450
451 /*
452 * Schedule taskqueue to run for this queue.
453 */
454 taskqueue_enqueue(sc->sc_rx_state[rx_queue].completion_tq,
455 &sc->sc_rx_state[rx_queue].completion_task);
456
457 return (FILTER_HANDLED);
458 }
459 }
460
461 static int
qcom_ess_edma_setup_intr(struct qcom_ess_edma_softc * sc,struct qcom_ess_edma_intr * intr,int rid,int cpu_id)462 qcom_ess_edma_setup_intr(struct qcom_ess_edma_softc *sc,
463 struct qcom_ess_edma_intr *intr, int rid, int cpu_id)
464 {
465
466 QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_INTERRUPT,
467 "%s: setting up interrupt id %d\n", __func__, rid);
468 intr->sc = sc;
469 intr->irq_rid = rid;
470 intr->irq_res = bus_alloc_resource_any(sc->sc_dev,
471 SYS_RES_IRQ, &intr->irq_rid, RF_ACTIVE);
472 if (intr->irq_res == NULL) {
473 device_printf(sc->sc_dev,
474 "ERROR: couldn't allocate IRQ %d\n",
475 rid);
476 return (ENXIO);
477 }
478
479 if ((bus_setup_intr(sc->sc_dev, intr->irq_res,
480 INTR_TYPE_NET | INTR_MPSAFE,
481 qcom_ess_edma_filter, NULL, intr,
482 &intr->irq_intr))) {
483 device_printf(sc->sc_dev,
484 "ERROR: unable to register interrupt handler for"
485 " IRQ %d\n", rid);
486 return (ENXIO);
487 }
488
489 /* If requested, bind the interrupt to the given CPU. */
490 if (cpu_id != -1) {
491 if (intr_bind_irq(sc->sc_dev, intr->irq_res, cpu_id) != 0) {
492 device_printf(sc->sc_dev,
493 "ERROR: unable to bind IRQ %d to CPU %d\n",
494 rid, cpu_id);
495 }
496 /* Note: don't completely error out here */
497 }
498
499 return (0);
500 }
501
502 static int
qcom_ess_edma_sysctl_dump_state(SYSCTL_HANDLER_ARGS)503 qcom_ess_edma_sysctl_dump_state(SYSCTL_HANDLER_ARGS)
504 {
505 struct qcom_ess_edma_softc *sc = arg1;
506 int val = 0;
507 int error;
508 int i;
509
510 error = sysctl_handle_int(oidp, &val, 0, req);
511 if (error || !req->newptr)
512 return (error);
513 if (val == 0)
514 return (0);
515
516 EDMA_LOCK(sc);
517 for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_RINGS; i++) {
518 device_printf(sc->sc_dev,
519 "RXQ[%d]: prod=%u, cons=%u, hw prod=%u, hw cons=%u,"
520 " REG_SW_CONS_IDX=0x%08x\n",
521 i,
522 sc->sc_rx_ring[i].next_to_fill,
523 sc->sc_rx_ring[i].next_to_clean,
524 EDMA_REG_READ(sc,
525 EDMA_REG_RFD_IDX_Q(i)) & EDMA_RFD_PROD_IDX_BITS,
526 qcom_ess_edma_hw_rfd_get_cons_index(sc, i),
527 EDMA_REG_READ(sc, EDMA_REG_RX_SW_CONS_IDX_Q(i)));
528 }
529
530 for (i = 0; i < QCOM_ESS_EDMA_NUM_TX_RINGS; i++) {
531 device_printf(sc->sc_dev,
532 "TXQ[%d]: prod=%u, cons=%u, hw prod=%u, hw cons=%u\n",
533 i,
534 sc->sc_tx_ring[i].next_to_fill,
535 sc->sc_tx_ring[i].next_to_clean,
536 (EDMA_REG_READ(sc, EDMA_REG_TPD_IDX_Q(i))
537 >> EDMA_TPD_CONS_IDX_SHIFT) & EDMA_TPD_CONS_IDX_MASK,
538 EDMA_REG_READ(sc, EDMA_REG_TX_SW_CONS_IDX_Q(i)));
539 }
540
541 device_printf(sc->sc_dev, "EDMA_REG_TXQ_CTRL=0x%08x\n",
542 EDMA_REG_READ(sc, EDMA_REG_TXQ_CTRL));
543 device_printf(sc->sc_dev, "EDMA_REG_RXQ_CTRL=0x%08x\n",
544 EDMA_REG_READ(sc, EDMA_REG_RXQ_CTRL));
545 device_printf(sc->sc_dev, "EDMA_REG_RX_DESC0=0x%08x\n",
546 EDMA_REG_READ(sc, EDMA_REG_RX_DESC0));
547 device_printf(sc->sc_dev, "EDMA_REG_RX_DESC1=0x%08x\n",
548 EDMA_REG_READ(sc, EDMA_REG_RX_DESC1));
549 device_printf(sc->sc_dev, "EDMA_REG_RX_ISR=0x%08x\n",
550 EDMA_REG_READ(sc, EDMA_REG_RX_ISR));
551 device_printf(sc->sc_dev, "EDMA_REG_TX_ISR=0x%08x\n",
552 EDMA_REG_READ(sc, EDMA_REG_TX_ISR));
553 device_printf(sc->sc_dev, "EDMA_REG_MISC_ISR=0x%08x\n",
554 EDMA_REG_READ(sc, EDMA_REG_MISC_ISR));
555 device_printf(sc->sc_dev, "EDMA_REG_WOL_ISR=0x%08x\n",
556 EDMA_REG_READ(sc, EDMA_REG_WOL_ISR));
557
558 EDMA_UNLOCK(sc);
559
560 return (0);
561 }
562
563 static int
qcom_ess_edma_sysctl_dump_stats(SYSCTL_HANDLER_ARGS)564 qcom_ess_edma_sysctl_dump_stats(SYSCTL_HANDLER_ARGS)
565 {
566 struct qcom_ess_edma_softc *sc = arg1;
567 int val = 0;
568 int error;
569 int i;
570
571 error = sysctl_handle_int(oidp, &val, 0, req);
572 if (error || !req->newptr)
573 return (error);
574 if (val == 0)
575 return (0);
576
577 EDMA_LOCK(sc);
578 for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_RINGS; i++) {
579 device_printf(sc->sc_dev,
580 "RXQ[%d]: num_added=%llu, num_cleaned=%llu,"
581 " num_dropped=%llu, num_enqueue_full=%llu,"
582 " num_rx_no_gmac=%llu, tx_mapfail=%llu,"
583 " num_tx_maxfrags=%llu, num_rx_ok=%llu\n",
584 i,
585 sc->sc_rx_ring[i].stats.num_added,
586 sc->sc_rx_ring[i].stats.num_cleaned,
587 sc->sc_rx_ring[i].stats.num_dropped,
588 sc->sc_rx_ring[i].stats.num_enqueue_full,
589 sc->sc_rx_ring[i].stats.num_rx_no_gmac,
590 sc->sc_rx_ring[i].stats.num_tx_mapfail,
591 sc->sc_rx_ring[i].stats.num_tx_maxfrags,
592 sc->sc_rx_ring[i].stats.num_rx_ok);
593 }
594
595 for (i = 0; i < QCOM_ESS_EDMA_NUM_TX_RINGS; i++) {
596 device_printf(sc->sc_dev,
597 "TXQ[%d]: num_added=%llu, num_cleaned=%llu,"
598 " num_dropped=%llu, num_enqueue_full=%llu,"
599 " tx_mapfail=%llu, tx_complete=%llu, tx_xmit_defer=%llu,"
600 " tx_xmit_task=%llu, num_tx_maxfrags=%llu,"
601 " num_tx_ok=%llu\n",
602 i,
603 sc->sc_tx_ring[i].stats.num_added,
604 sc->sc_tx_ring[i].stats.num_cleaned,
605 sc->sc_tx_ring[i].stats.num_dropped,
606 sc->sc_tx_ring[i].stats.num_enqueue_full,
607 sc->sc_tx_ring[i].stats.num_tx_mapfail,
608 sc->sc_tx_ring[i].stats.num_tx_complete,
609 sc->sc_tx_ring[i].stats.num_tx_xmit_defer,
610 sc->sc_tx_ring[i].stats.num_tx_xmit_task,
611 sc->sc_tx_ring[i].stats.num_tx_maxfrags,
612 sc->sc_tx_ring[i].stats.num_tx_ok);
613 }
614
615 for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_IRQS; i++) {
616 device_printf(sc->sc_dev, "INTR_RXQ[%d]: num_intr=%llu\n",
617 i,
618 sc->sc_rx_irq[i].stats.num_intr);
619 }
620
621 for (i = 0; i < QCOM_ESS_EDMA_NUM_TX_IRQS; i++) {
622 device_printf(sc->sc_dev, "INTR_TXQ[%d]: num_intr=%llu\n",
623 i,
624 sc->sc_tx_irq[i].stats.num_intr);
625 }
626
627 EDMA_UNLOCK(sc);
628
629 return (0);
630 }
631
632
633 static int
qcom_ess_edma_sysctl_tx_intmit(SYSCTL_HANDLER_ARGS)634 qcom_ess_edma_sysctl_tx_intmit(SYSCTL_HANDLER_ARGS)
635 {
636 struct qcom_ess_edma_softc *sc = arg1;
637 uint32_t usec;
638 int val = 0;
639 int error;
640
641 EDMA_LOCK(sc);
642 (void) qcom_ess_edma_hw_get_tx_intr_moderation(sc, &usec);
643 EDMA_UNLOCK(sc);
644
645 val = usec;
646
647 error = sysctl_handle_int(oidp, &val, 0, req);
648 if (error || !req->newptr)
649 goto finish;
650
651 EDMA_LOCK(sc);
652 error = qcom_ess_edma_hw_set_tx_intr_moderation(sc, (uint32_t) val);
653 EDMA_UNLOCK(sc);
654 finish:
655 return error;
656 }
657
658
659 static int
qcom_ess_edma_attach_sysctl(struct qcom_ess_edma_softc * sc)660 qcom_ess_edma_attach_sysctl(struct qcom_ess_edma_softc *sc)
661 {
662 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
663 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
664
665 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
666 "debug", CTLFLAG_RW, &sc->sc_debug, 0,
667 "debugging flags");
668
669 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
670 "state", CTLTYPE_INT | CTLFLAG_RW, sc,
671 0, qcom_ess_edma_sysctl_dump_state, "I", "");
672
673 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
674 "stats", CTLTYPE_INT | CTLFLAG_RW, sc,
675 0, qcom_ess_edma_sysctl_dump_stats, "I", "");
676
677 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
678 "tx_intmit", CTLTYPE_INT | CTLFLAG_RW, sc,
679 0, qcom_ess_edma_sysctl_tx_intmit, "I", "");
680
681 return (0);
682 }
683
684 static int
qcom_ess_edma_attach(device_t dev)685 qcom_ess_edma_attach(device_t dev)
686 {
687 struct qcom_ess_edma_softc *sc = device_get_softc(dev);
688 int i, ret;
689
690 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF);
691
692 sc->sc_dev = dev;
693 sc->sc_debug = 0;
694
695 (void) qcom_ess_edma_attach_sysctl(sc);
696
697 /* Create parent DMA tag. */
698 ret = bus_dma_tag_create(
699 bus_get_dma_tag(sc->sc_dev), /* parent */
700 1, 0, /* alignment, boundary */
701 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
702 BUS_SPACE_MAXADDR, /* highaddr */
703 NULL, NULL, /* filter, filterarg */
704 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
705 0, /* nsegments */
706 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
707 0, /* flags */
708 NULL, NULL, /* lockfunc, lockarg */
709 &sc->sc_dma_tag);
710 if (ret != 0) {
711 device_printf(sc->sc_dev,
712 "ERROR: failed to create parent DMA tag\n");
713 goto error;
714 }
715
716 /* Map control/status registers. */
717 sc->sc_mem_rid = 0;
718 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
719 &sc->sc_mem_rid, RF_ACTIVE);
720
721 if (sc->sc_mem_res == NULL) {
722 device_printf(dev, "ERROR: couldn't map MMIO space\n");
723 goto error;
724 }
725
726 sc->sc_mem_res_size = (size_t) bus_get_resource_count(dev,
727 SYS_RES_MEMORY, sc->sc_mem_rid);
728 if (sc->sc_mem_res_size == 0) {
729 device_printf(dev, "%s: failed to get device memory size\n",
730 __func__);
731 goto error;
732 }
733
734 /*
735 * How many TX queues per CPU, for figuring out flowid/CPU
736 * mapping.
737 */
738 sc->sc_config.num_tx_queue_per_cpu =
739 QCOM_ESS_EDMA_NUM_TX_RINGS / mp_ncpus;
740
741 /* Allocate TX IRQs */
742 for (i = 0; i < QCOM_ESS_EDMA_NUM_TX_IRQS; i++) {
743 int cpu_id;
744
745 /*
746 * The current mapping in the if_transmit() path
747 * will map mp_ncpu groups of flowids to the TXQs.
748 * So for a 4 CPU system the first four will be CPU 0,
749 * the second four will be CPU 1, etc.
750 */
751 cpu_id = qcom_ess_edma_tx_queue_to_cpu(sc, i);
752 if (qcom_ess_edma_setup_intr(sc, &sc->sc_tx_irq[i],
753 i, cpu_id) != 0)
754 goto error;
755 if (bootverbose)
756 device_printf(sc->sc_dev,
757 "mapping TX IRQ %d to CPU %d\n",
758 i, cpu_id);
759 }
760
761 /* Allocate RX IRQs */
762 for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_IRQS; i++) {
763 int cpu_id = qcom_ess_edma_rx_queue_to_cpu(sc, i);
764 if (qcom_ess_edma_setup_intr(sc, &sc->sc_rx_irq[i],
765 i + QCOM_ESS_EDMA_NUM_TX_IRQS, cpu_id) != 0)
766 goto error;
767 if (bootverbose)
768 device_printf(sc->sc_dev,
769 "mapping RX IRQ %d to CPU %d\n",
770 i, cpu_id);
771 }
772
773 /* Default receive frame size - before ETHER_ALIGN hack */
774 sc->sc_config.rx_buf_size = 2048;
775 sc->sc_config.rx_buf_ether_align = true;
776
777 /* Default RSS paramters */
778 sc->sc_config.rss_type =
779 EDMA_RSS_TYPE_IPV4TCP | EDMA_RSS_TYPE_IPV6_TCP
780 | EDMA_RSS_TYPE_IPV4_UDP | EDMA_RSS_TYPE_IPV6UDP
781 | EDMA_RSS_TYPE_IPV4 | EDMA_RSS_TYPE_IPV6;
782
783 /* Default queue parameters */
784 sc->sc_config.tx_ring_count = EDMA_TX_RING_SIZE;
785 sc->sc_config.rx_ring_count = EDMA_RX_RING_SIZE;
786
787 /* Default interrupt masks */
788 sc->sc_config.rx_intr_mask = EDMA_RX_IMR_NORMAL_MASK;
789 sc->sc_config.tx_intr_mask = EDMA_TX_IMR_NORMAL_MASK;
790 sc->sc_state.misc_intr_mask = 0;
791 sc->sc_state.wol_intr_mask = 0;
792 sc->sc_state.intr_sw_idx_w = EDMA_INTR_SW_IDX_W_TYPE;
793
794 /*
795 * Parse out the gmac count so we can start parsing out
796 * the gmac list and create us some ifnets.
797 */
798 if (OF_getencprop(ofw_bus_get_node(dev), "qcom,num_gmac",
799 &sc->sc_config.num_gmac, sizeof(uint32_t)) > 0) {
800 device_printf(sc->sc_dev, "Creating %d GMACs\n",
801 sc->sc_config.num_gmac);
802 } else {
803 device_printf(sc->sc_dev, "Defaulting to 1 GMAC\n");
804 sc->sc_config.num_gmac = 1;
805 }
806 if (sc->sc_config.num_gmac > QCOM_ESS_EDMA_MAX_NUM_GMACS) {
807 device_printf(sc->sc_dev, "Capping GMACs to %d\n",
808 QCOM_ESS_EDMA_MAX_NUM_GMACS);
809 sc->sc_config.num_gmac = QCOM_ESS_EDMA_MAX_NUM_GMACS;
810 }
811
812 /*
813 * And now, create some gmac entries here; we'll create the
814 * ifnet's once this is all done.
815 */
816 for (i = 0; i < sc->sc_config.num_gmac; i++) {
817 ret = qcom_ess_edma_gmac_parse(sc, i);
818 if (ret != 0) {
819 device_printf(sc->sc_dev,
820 "Failed to parse gmac%d\n", i);
821 goto error;
822 }
823 }
824
825 /* allocate tx rings */
826 for (i = 0; i < QCOM_ESS_EDMA_NUM_TX_RINGS; i++) {
827 char label[QCOM_ESS_EDMA_LABEL_SZ];
828 int cpu_id;
829
830 snprintf(label, QCOM_ESS_EDMA_LABEL_SZ - 1, "tx_ring%d", i);
831 if (qcom_ess_edma_desc_ring_setup(sc, &sc->sc_tx_ring[i],
832 label,
833 sc->sc_config.tx_ring_count,
834 sizeof(struct qcom_ess_edma_sw_desc_tx),
835 sizeof(struct qcom_ess_edma_tx_desc),
836 QCOM_ESS_EDMA_MAX_TXFRAGS,
837 ESS_EDMA_TX_BUFFER_ALIGN) != 0)
838 goto error;
839 if (qcom_ess_edma_tx_ring_setup(sc, &sc->sc_tx_ring[i]) != 0)
840 goto error;
841
842 /* Same CPU as the interrupts for now */
843 cpu_id = qcom_ess_edma_tx_queue_to_cpu(sc, i);
844
845 if (qcom_ess_edma_setup_tx_state(sc, i, cpu_id) != 0)
846 goto error;
847 }
848
849 /* allocate rx rings */
850 for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_RINGS; i++) {
851 char label[QCOM_ESS_EDMA_LABEL_SZ];
852 int cpu_id;
853
854 snprintf(label, QCOM_ESS_EDMA_LABEL_SZ - 1, "rx_ring%d", i);
855 if (qcom_ess_edma_desc_ring_setup(sc, &sc->sc_rx_ring[i],
856 label,
857 sc->sc_config.rx_ring_count,
858 sizeof(struct qcom_ess_edma_sw_desc_rx),
859 sizeof(struct qcom_ess_edma_rx_free_desc),
860 1,
861 ESS_EDMA_RX_BUFFER_ALIGN) != 0)
862 goto error;
863 if (qcom_ess_edma_rx_ring_setup(sc, &sc->sc_rx_ring[i]) != 0)
864 goto error;
865
866 /* Same CPU as the interrupts for now */
867 cpu_id = qcom_ess_edma_rx_queue_to_cpu(sc, i);
868
869 if (qcom_ess_edma_setup_rx_state(sc, i, cpu_id) != 0)
870 goto error;
871 }
872
873 /*
874 * map the gmac instances <-> port masks, so incoming frames know
875 * where they need to be forwarded to.
876 */
877 for (i = 0; i < QCOM_ESS_EDMA_MAX_NUM_PORTS; i++)
878 sc->sc_gmac_port_map[i] = -1;
879 for (i = 0; i < sc->sc_config.num_gmac; i++) {
880 ret = qcom_ess_edma_gmac_setup_port_mapping(sc, i);
881 if (ret != 0) {
882 device_printf(sc->sc_dev,
883 "Failed to setup port mpapping for gmac%d\n", i);
884 goto error;
885 }
886 }
887
888
889 /* Create ifnets */
890 for (i = 0; i < sc->sc_config.num_gmac; i++) {
891 ret = qcom_ess_edma_gmac_create_ifnet(sc, i);
892 if (ret != 0) {
893 device_printf(sc->sc_dev,
894 "Failed to create ifnet for gmac%d\n", i);
895 goto error;
896 }
897 }
898
899 /*
900 * NOTE: If there's no ess-switch / we're a single phy, we
901 * still need to reset the ess fabric to a fixed useful state.
902 * Otherwise we won't be able to pass packets to anything.
903 *
904 * Worry about this later.
905 */
906
907 EDMA_LOCK(sc);
908
909 /* disable all interrupts */
910 ret = qcom_ess_edma_hw_intr_disable(sc);
911 if (ret != 0) {
912 device_printf(sc->sc_dev,
913 "Failed to disable interrupts (%d)\n",
914 ret);
915 goto error_locked;
916 }
917
918 /* reset edma */
919 ret = qcom_ess_edma_hw_stop(sc);
920
921 /* fill RX ring here, explicitly */
922 for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_RINGS; i++) {
923 EDMA_RING_LOCK(&sc->sc_rx_ring[i]);
924 (void) qcom_ess_edma_rx_ring_fill(sc, i,
925 sc->sc_config.rx_ring_count);
926 EDMA_RING_UNLOCK(&sc->sc_rx_ring[i]);
927 }
928
929 /* configure TX/RX rings; RSS config; initial interrupt rates, etc */
930 ret = qcom_ess_edma_hw_setup(sc);
931 ret = qcom_ess_edma_hw_setup_tx(sc);
932 ret = qcom_ess_edma_hw_setup_rx(sc);
933 ret = qcom_ess_edma_hw_setup_txrx_desc_rings(sc);
934
935 /* setup rss indirection table */
936 ret = qcom_ess_edma_hw_configure_rss_table(sc);
937
938 /* setup load balancing table */
939 ret = qcom_ess_edma_hw_configure_load_balance_table(sc);
940
941 /* configure virtual queue */
942 ret = qcom_ess_edma_hw_configure_tx_virtual_queue(sc);
943
944 /* configure AXI burst max */
945 ret = qcom_ess_edma_hw_configure_default_axi_transaction_size(sc);
946
947 /* enable IRQs */
948 ret = qcom_ess_edma_hw_intr_enable(sc);
949
950 /* enable TX control */
951 ret = qcom_ess_edma_hw_tx_enable(sc);
952
953 /* enable RX control */
954 ret = qcom_ess_edma_hw_rx_enable(sc);
955
956 EDMA_UNLOCK(sc);
957
958 return (0);
959
960 error_locked:
961 EDMA_UNLOCK(sc);
962 error:
963 qcom_ess_edma_detach(dev);
964 return (ENXIO);
965 }
966
967 static device_method_t qcom_ess_edma_methods[] = {
968 /* Driver */
969 DEVMETHOD(device_probe, qcom_ess_edma_probe),
970 DEVMETHOD(device_attach, qcom_ess_edma_attach),
971 DEVMETHOD(device_detach, qcom_ess_edma_detach),
972
973 {0, 0},
974 };
975
976 static driver_t qcom_ess_edma_driver = {
977 "essedma",
978 qcom_ess_edma_methods,
979 sizeof(struct qcom_ess_edma_softc),
980 };
981
982 DRIVER_MODULE(qcom_ess_edma, simplebus, qcom_ess_edma_driver, NULL, 0);
983 DRIVER_MODULE(qcom_ess_edma, ofwbus, qcom_ess_edma_driver, NULL, 0);
984 MODULE_DEPEND(qcom_ess_edma, ether, 1, 1, 1);
985 MODULE_VERSION(qcom_ess_edma, 1);
986