xref: /freebsd/sys/dev/qcom_ess_edma/qcom_ess_edma_hw.c (revision 9f32893b05dabedc7f8332ec12e2a944b6543158)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2021 Adrian Chadd <adrian@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice unmodified, this list of conditions, and the following
11  *    disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/bus.h>
35 
36 #include <sys/kernel.h>
37 #include <sys/module.h>
38 #include <sys/rman.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 
45 #include <net/if.h>
46 #include <net/if_var.h>
47 #include <net/if_media.h>
48 #include <net/ethernet.h>
49 
50 #include <machine/bus.h>
51 #include <machine/resource.h>
52 
53 #include <dev/fdt/fdt_common.h>
54 #include <dev/ofw/ofw_bus.h>
55 #include <dev/ofw/ofw_bus_subr.h>
56 
57 #include <dev/qcom_ess_edma/qcom_ess_edma_var.h>
58 #include <dev/qcom_ess_edma/qcom_ess_edma_reg.h>
59 #include <dev/qcom_ess_edma/qcom_ess_edma_hw.h>
60 #include <dev/qcom_ess_edma/qcom_ess_edma_debug.h>
61 
62 /*
63  * Reset the ESS EDMA core.
64  *
65  * This is ... problematic.  There's only a single clock control
66  * for the ESS core - and that includes both the EDMA (ethernet)
67  * and switch hardware.
68  *
69  * AND, it's a placeholder for what the linux ess-edma driver
70  * is doing directly to the ess core because in some instances
71  * where there's a single PHY hooked up, it's possible that
72  * ess-switch won't be initialised.  In that case it defaults
73  * to a very minimal switch config.  Now, that's honestly pretty
74  * bad, and instead we should be doing that kind of awareness
75  * in ar40xx_switch.
76  *
77  * So, for now this is a big no-op, at least until everything
78  * is implemented enough that I can get the switch/phy code and
79  * this EDMA driver code to co-exist.
80  */
81 int
qcom_ess_edma_hw_reset(struct qcom_ess_edma_softc * sc)82 qcom_ess_edma_hw_reset(struct qcom_ess_edma_softc *sc)
83 {
84 
85 	EDMA_LOCK_ASSERT(sc);
86 
87 	device_printf(sc->sc_dev, "%s: called, TODO!\n", __func__);
88 
89 	/*
90 	 * This is where the linux ess-edma driver would reset the
91 	 * ESS core.
92 	 */
93 
94 	/*
95 	 * and here's where the linux ess-edma driver would program
96 	 * in the initial port config, rgmii control, traffic
97 	 * port forwarding and broadcast/multicast traffic forwarding.
98 	 *
99 	 * instead, this should be done by the ar40xx_switch driver!
100 	 */
101 
102 	return (0);
103 }
104 
105 /*
106  * Get the TX interrupt moderation timer.
107  *
108  * The resolution of this register is 2uS.
109  */
110 int
qcom_ess_edma_hw_get_tx_intr_moderation(struct qcom_ess_edma_softc * sc,uint32_t * usec)111 qcom_ess_edma_hw_get_tx_intr_moderation(struct qcom_ess_edma_softc *sc,
112     uint32_t *usec)
113 {
114 	uint32_t reg;
115 
116 	EDMA_LOCK_ASSERT(sc);
117 
118 	EDMA_REG_BARRIER_READ(sc);
119 	reg = EDMA_REG_READ(sc, EDMA_REG_IRQ_MODRT_TIMER_INIT);
120 	reg = reg >> EDMA_IRQ_MODRT_TX_TIMER_SHIFT;
121 	reg &= EDMA_IRQ_MODRT_TIMER_MASK;
122 
123 	*usec = reg * 2;
124 
125 	return (0);
126 }
127 
128 
129 /*
130  * Set the TX interrupt moderation timer.
131  *
132  * The resolution of this register is 2uS.
133  */
134 int
qcom_ess_edma_hw_set_tx_intr_moderation(struct qcom_ess_edma_softc * sc,uint32_t usec)135 qcom_ess_edma_hw_set_tx_intr_moderation(struct qcom_ess_edma_softc *sc,
136     uint32_t usec)
137 {
138 	uint32_t reg;
139 
140 	usec = usec / 2;
141 
142 	EDMA_LOCK_ASSERT(sc);
143 
144 	EDMA_REG_BARRIER_READ(sc);
145 	reg = EDMA_REG_READ(sc, EDMA_REG_IRQ_MODRT_TIMER_INIT);
146 	reg &= ~(EDMA_IRQ_MODRT_TIMER_MASK << EDMA_IRQ_MODRT_TX_TIMER_SHIFT);
147 	reg |= (usec & EDMA_IRQ_MODRT_TIMER_MASK)
148 	    << EDMA_IRQ_MODRT_TX_TIMER_SHIFT;
149 	EDMA_REG_WRITE(sc, EDMA_REG_IRQ_MODRT_TIMER_INIT, reg);
150 	EDMA_REG_BARRIER_WRITE(sc);
151 
152 	return (0);
153 }
154 
155 /*
156  * Set the RX interrupt moderation timer.
157  *
158  * The resolution of this register is 2uS.
159  */
160 int
qcom_ess_edma_hw_set_rx_intr_moderation(struct qcom_ess_edma_softc * sc,uint32_t usec)161 qcom_ess_edma_hw_set_rx_intr_moderation(struct qcom_ess_edma_softc *sc,
162     uint32_t usec)
163 {
164 	uint32_t reg;
165 
166 	EDMA_LOCK_ASSERT(sc);
167 
168 	EDMA_REG_BARRIER_READ(sc);
169 	reg = EDMA_REG_READ(sc, EDMA_REG_IRQ_MODRT_TIMER_INIT);
170 	reg &= ~(EDMA_IRQ_MODRT_TIMER_MASK << EDMA_IRQ_MODRT_RX_TIMER_SHIFT);
171 	reg |= (usec & EDMA_IRQ_MODRT_TIMER_MASK)
172 	    << EDMA_IRQ_MODRT_RX_TIMER_SHIFT;
173 	EDMA_REG_WRITE(sc, EDMA_REG_IRQ_MODRT_TIMER_INIT, reg);
174 	EDMA_REG_BARRIER_WRITE(sc);
175 
176 	return (0);
177 }
178 
179 /*
180  * Disable all interrupts.
181  */
182 int
qcom_ess_edma_hw_intr_disable(struct qcom_ess_edma_softc * sc)183 qcom_ess_edma_hw_intr_disable(struct qcom_ess_edma_softc *sc)
184 {
185 	int i;
186 
187 	/* Disable TX interrupts */
188 	for (i = 0; i < QCOM_ESS_EDMA_NUM_TX_IRQS; i++) {
189 		EDMA_REG_WRITE(sc, EDMA_REG_TX_INT_MASK_Q(i), 0);
190 	}
191 
192 	/* Disable RX interrupts */
193 	for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_IRQS; i++) {
194 		EDMA_REG_WRITE(sc, EDMA_REG_RX_INT_MASK_Q(i), 0);
195 	}
196 
197 	/* Disable misc/WOL interrupts */
198 	EDMA_REG_WRITE(sc, EDMA_REG_MISC_IMR, 0);
199 	EDMA_REG_WRITE(sc, EDMA_REG_WOL_IMR, 0);
200 
201 	EDMA_REG_BARRIER_WRITE(sc);
202 
203 	return (0);
204 }
205 
206 /*
207  * Enable/disable the given RX ring interrupt.
208  */
209 int
qcom_ess_edma_hw_intr_rx_intr_set_enable(struct qcom_ess_edma_softc * sc,int rxq,bool state)210 qcom_ess_edma_hw_intr_rx_intr_set_enable(struct qcom_ess_edma_softc *sc,
211     int rxq, bool state)
212 {
213 	EDMA_REG_WRITE(sc, EDMA_REG_RX_INT_MASK_Q(rxq), state ? 1 : 0);
214 	EDMA_REG_BARRIER_WRITE(sc);
215 
216 	return (0);
217 }
218 
219 /*
220  * Enable/disable the given TX ring interrupt.
221  */
222 int
qcom_ess_edma_hw_intr_tx_intr_set_enable(struct qcom_ess_edma_softc * sc,int txq,bool state)223 qcom_ess_edma_hw_intr_tx_intr_set_enable(struct qcom_ess_edma_softc *sc,
224     int txq, bool state)
225 {
226 
227 	EDMA_REG_WRITE(sc, EDMA_REG_TX_INT_MASK_Q(txq), state ? 1 : 0);
228 	EDMA_REG_BARRIER_WRITE(sc);
229 
230 	return (0);
231 }
232 
233 /*
234  * Enable interrupts.
235  */
236 int
qcom_ess_edma_hw_intr_enable(struct qcom_ess_edma_softc * sc)237 qcom_ess_edma_hw_intr_enable(struct qcom_ess_edma_softc *sc)
238 {
239 	int i;
240 
241 	/* ACK, then Enable TX interrupts */
242 	EDMA_REG_WRITE(sc, EDMA_REG_TX_ISR, 0xffff);
243 	for (i = 0; i < QCOM_ESS_EDMA_NUM_TX_IRQS; i++) {
244 		EDMA_REG_WRITE(sc, EDMA_REG_TX_INT_MASK_Q(i),
245 		    sc->sc_config.tx_intr_mask);
246 	}
247 
248 	/* ACK, then Enable RX interrupts */
249 	EDMA_REG_WRITE(sc, EDMA_REG_RX_ISR, 0xff);
250 	for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_IRQS; i++) {
251 		EDMA_REG_WRITE(sc, EDMA_REG_RX_INT_MASK_Q(i),
252 		    sc->sc_config.rx_intr_mask);
253 	}
254 
255 	/* Disable misc/WOL interrupts */
256 	EDMA_REG_WRITE(sc, EDMA_REG_MISC_IMR, 0);
257 	EDMA_REG_WRITE(sc, EDMA_REG_WOL_IMR, 0);
258 
259 	EDMA_REG_BARRIER_WRITE(sc);
260 
261 	return (0);
262 }
263 
264 /*
265  * Clear interrupt status.
266  */
267 int
qcom_ess_edma_hw_intr_status_clear(struct qcom_ess_edma_softc * sc)268 qcom_ess_edma_hw_intr_status_clear(struct qcom_ess_edma_softc *sc)
269 {
270 
271 	EDMA_REG_WRITE(sc, EDMA_REG_RX_ISR, 0xff);
272 	EDMA_REG_WRITE(sc, EDMA_REG_TX_ISR, 0xffff);
273 	EDMA_REG_WRITE(sc, EDMA_REG_MISC_ISR, 0x1fff);
274 	EDMA_REG_WRITE(sc, EDMA_REG_WOL_ISR, 0x1);
275 
276 	return (0);
277 }
278 
279 /*
280  * ACK the given RX queue ISR.
281  *
282  * Must be called with the RX ring lock held!
283  */
284 int
qcom_ess_edma_hw_intr_rx_ack(struct qcom_ess_edma_softc * sc,int rx_queue)285 qcom_ess_edma_hw_intr_rx_ack(struct qcom_ess_edma_softc *sc, int rx_queue)
286 {
287 
288 	EDMA_RING_LOCK_ASSERT(&sc->sc_rx_ring[rx_queue]);
289 	EDMA_REG_WRITE(sc, EDMA_REG_RX_ISR, (1U << rx_queue));
290 	(void) EDMA_REG_READ(sc, EDMA_REG_RX_ISR);
291 
292 	return (0);
293 }
294 
295 /*
296  * ACK the given TX queue ISR.
297  *
298  * Must be called with the TX ring lock held!
299  */
300 int
qcom_ess_edma_hw_intr_tx_ack(struct qcom_ess_edma_softc * sc,int tx_queue)301 qcom_ess_edma_hw_intr_tx_ack(struct qcom_ess_edma_softc *sc, int tx_queue)
302 {
303 
304 	EDMA_RING_LOCK_ASSERT(&sc->sc_tx_ring[tx_queue]);
305 	EDMA_REG_WRITE(sc, EDMA_REG_TX_ISR, (1U << tx_queue));
306 	(void) EDMA_REG_READ(sc, EDMA_REG_TX_ISR);
307 
308 	return (0);
309 }
310 
311 /*
312  * Configure the default RSS indirection table.
313  */
314 int
qcom_ess_edma_hw_configure_rss_table(struct qcom_ess_edma_softc * sc)315 qcom_ess_edma_hw_configure_rss_table(struct qcom_ess_edma_softc *sc)
316 {
317 	int i;
318 
319 	/*
320 	 * The default IDT value configures the hash buckets
321 	 * to a repeating pattern of q0, q2, q4, q6.
322 	 */
323 	for (i = 0; i < EDMA_NUM_IDT; i++) {
324 		EDMA_REG_WRITE(sc, EDMA_REG_RSS_IDT(i), EDMA_RSS_IDT_VALUE);
325 	}
326 	EDMA_REG_BARRIER_WRITE(sc);
327 
328 	return (0);
329 }
330 
331 /*
332  * Configure the default load balance mapping table.
333  */
334 int
qcom_ess_edma_hw_configure_load_balance_table(struct qcom_ess_edma_softc * sc)335 qcom_ess_edma_hw_configure_load_balance_table(struct qcom_ess_edma_softc *sc)
336 {
337 
338 	/*
339 	 * I think this is mapping things to queues 0,2,4,6.
340 	 * Linux says it's 0,1,3,4 but that doesn't match the
341 	 * EDMA_LB_REG_VALUE field.
342 	 */
343 	EDMA_REG_WRITE(sc, EDMA_REG_LB_RING, EDMA_LB_REG_VALUE);
344 	EDMA_REG_BARRIER_WRITE(sc);
345 	return (0);
346 }
347 
348 /*
349  * Configure the default virtual tx ring queues.
350  */
351 int
qcom_ess_edma_hw_configure_tx_virtual_queue(struct qcom_ess_edma_softc * sc)352 qcom_ess_edma_hw_configure_tx_virtual_queue(struct qcom_ess_edma_softc *sc)
353 {
354 
355 	EDMA_REG_WRITE(sc, EDMA_REG_VQ_CTRL0, EDMA_VQ_REG_VALUE);
356 	EDMA_REG_WRITE(sc, EDMA_REG_VQ_CTRL1, EDMA_VQ_REG_VALUE);
357 
358 	EDMA_REG_BARRIER_WRITE(sc);
359 	return (0);
360 }
361 
362 /*
363  * Configure the default maximum AXI bus transaction size.
364  */
365 int
qcom_ess_edma_hw_configure_default_axi_transaction_size(struct qcom_ess_edma_softc * sc)366 qcom_ess_edma_hw_configure_default_axi_transaction_size(
367     struct qcom_ess_edma_softc *sc)
368 {
369 
370 	EDMA_REG_WRITE(sc, EDMA_REG_AXIW_CTRL_MAXWRSIZE,
371 	    EDMA_AXIW_MAXWRSIZE_VALUE);
372 	return (0);
373 }
374 
375 /*
376  * Stop the TX/RX queues.
377  */
378 int
qcom_ess_edma_hw_stop_txrx_queues(struct qcom_ess_edma_softc * sc)379 qcom_ess_edma_hw_stop_txrx_queues(struct qcom_ess_edma_softc *sc)
380 {
381 	uint32_t reg;
382 
383 	EDMA_REG_BARRIER_READ(sc);
384 	reg = EDMA_REG_READ(sc, EDMA_REG_RXQ_CTRL);
385 	reg &= ~EDMA_RXQ_CTRL_EN;
386 	EDMA_REG_WRITE(sc, EDMA_REG_RXQ_CTRL, reg);
387 	EDMA_REG_BARRIER_WRITE(sc);
388 
389 	EDMA_REG_BARRIER_READ(sc);
390 	reg = EDMA_REG_READ(sc, EDMA_REG_TXQ_CTRL);
391 	reg &= ~EDMA_TXQ_CTRL_TXQ_EN;
392 	EDMA_REG_WRITE(sc, EDMA_REG_TXQ_CTRL, reg);
393 	EDMA_REG_BARRIER_WRITE(sc);
394 	return (0);
395 }
396 
397 /*
398  * Stop the EDMA block, disable interrupts.
399  */
400 int
qcom_ess_edma_hw_stop(struct qcom_ess_edma_softc * sc)401 qcom_ess_edma_hw_stop(struct qcom_ess_edma_softc *sc)
402 {
403 	int ret;
404 
405 	EDMA_LOCK_ASSERT(sc);
406 
407 	ret = qcom_ess_edma_hw_intr_disable(sc);
408 	if (ret != 0) {
409 		QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_INTERRUPT,
410 		    "%s: hw_intr_disable failed (%d)\n",
411 		    __func__,
412 		    ret);
413 	}
414 
415 	ret = qcom_ess_edma_hw_intr_status_clear(sc);
416 	if (ret != 0) {
417 		QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_INTERRUPT,
418 		    "%s: hw_intr_status_clear failed (%d)\n",
419 		    __func__,
420 		    ret);
421 	}
422 
423 	ret = qcom_ess_edma_hw_stop_txrx_queues(sc);
424 	if (ret != 0) {
425 		QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_INTERRUPT,
426 		    "%s: hw_stop_txrx_queues failed (%d)\n",
427 		    __func__,
428 		    ret);
429 	}
430 
431 	return (0);
432 }
433 
434 /*
435  * Update the producer index for the given receive queue.
436  *
437  * Note: the RX ring lock must be held!
438  *
439  * Return 0 if OK, an error number if there's an error.
440  */
441 int
qcom_ess_edma_hw_rfd_prod_index_update(struct qcom_ess_edma_softc * sc,int queue,int idx)442 qcom_ess_edma_hw_rfd_prod_index_update(struct qcom_ess_edma_softc *sc,
443     int queue, int idx)
444 {
445 	uint32_t reg;
446 
447 	EDMA_RING_LOCK_ASSERT(&sc->sc_rx_ring[queue]);
448 
449 	QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_RX_RING_MGMT,
450 	    "%s: called; q=%d idx=0x%x\n",
451 	    __func__, queue, idx);
452 
453 	EDMA_REG_BARRIER_READ(sc);
454 	reg = EDMA_REG_READ(sc, EDMA_REG_RFD_IDX_Q(queue));
455 	QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_RX_RING_MGMT,
456 	    "%s: q=%d reg was 0x%08x\n", __func__, queue, reg);
457 	reg &= ~EDMA_RFD_PROD_IDX_BITS;
458 	reg |= idx;
459 	EDMA_REG_WRITE(sc, EDMA_REG_RFD_IDX_Q(queue), reg);
460 	QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_RX_RING_MGMT,
461 	    "%s: q=%d reg now 0x%08x\n", __func__, queue, reg);
462 	EDMA_REG_BARRIER_WRITE(sc);
463 
464 	return (0);
465 }
466 
467 /*
468  * Fetch the consumer index for the given receive queue.
469  *
470  * Returns the current consumer index.
471  *
472  * Note - since it's used in statistics/debugging it isn't asserting the
473  * RX ring lock, so be careful when/how you use this!
474  */
475 int
qcom_ess_edma_hw_rfd_get_cons_index(struct qcom_ess_edma_softc * sc,int queue)476 qcom_ess_edma_hw_rfd_get_cons_index(struct qcom_ess_edma_softc *sc, int queue)
477 {
478 	uint32_t reg;
479 
480 	EDMA_REG_BARRIER_READ(sc);
481 	reg = EDMA_REG_READ(sc, EDMA_REG_RFD_IDX_Q(queue));
482 	return (reg >> EDMA_RFD_CONS_IDX_SHIFT) & EDMA_RFD_CONS_IDX_MASK;
483 }
484 
485 /*
486  * Update the software consumed index to the hardware, so
487  * it knows what we've read.
488  *
489  * Note: the RX ring lock must be held when calling this!
490  *
491  * Returns 0 if OK, error number if error.
492  */
493 int
qcom_ess_edma_hw_rfd_sw_cons_index_update(struct qcom_ess_edma_softc * sc,int queue,int idx)494 qcom_ess_edma_hw_rfd_sw_cons_index_update(struct qcom_ess_edma_softc *sc,
495     int queue, int idx)
496 {
497 	EDMA_RING_LOCK_ASSERT(&sc->sc_rx_ring[queue]);
498 
499 	EDMA_REG_WRITE(sc, EDMA_REG_RX_SW_CONS_IDX_Q(queue), idx);
500 	EDMA_REG_BARRIER_WRITE(sc);
501 
502 	return (0);
503 }
504 
505 /*
506  * Setup initial hardware configuration.
507  */
508 int
qcom_ess_edma_hw_setup(struct qcom_ess_edma_softc * sc)509 qcom_ess_edma_hw_setup(struct qcom_ess_edma_softc *sc)
510 {
511 	uint32_t reg;
512 
513 	EDMA_LOCK_ASSERT(sc);
514 
515 	EDMA_REG_BARRIER_READ(sc);
516 	reg = EDMA_REG_READ(sc, EDMA_REG_INTR_CTRL);
517 	reg &= ~(1 << EDMA_INTR_SW_IDX_W_TYP_SHIFT);
518 	reg |= sc->sc_state.intr_sw_idx_w << EDMA_INTR_SW_IDX_W_TYP_SHIFT;
519 	EDMA_REG_WRITE(sc, EDMA_REG_INTR_CTRL, reg);
520 
521 
522 	/* Clear wake-on-lan config */
523 	EDMA_REG_WRITE(sc, EDMA_REG_WOL_CTRL, 0);
524 
525 	/* configure initial interrupt moderation config */
526 	reg = (EDMA_TX_IMT << EDMA_IRQ_MODRT_TX_TIMER_SHIFT);
527 	reg |= (EDMA_RX_IMT << EDMA_IRQ_MODRT_RX_TIMER_SHIFT);
528 	EDMA_REG_WRITE(sc, EDMA_REG_IRQ_MODRT_TIMER_INIT, reg);
529 
530 	EDMA_REG_BARRIER_WRITE(sc);
531 
532 	return (0);
533 }
534 
535 /*
536  * Setup TX DMA burst configuration.
537  */
538 int
qcom_ess_edma_hw_setup_tx(struct qcom_ess_edma_softc * sc)539 qcom_ess_edma_hw_setup_tx(struct qcom_ess_edma_softc *sc)
540 {
541 	uint32_t reg;
542 
543 	EDMA_LOCK_ASSERT(sc);
544 
545 	reg = (EDMA_TPD_BURST << EDMA_TXQ_NUM_TPD_BURST_SHIFT);
546 	reg |= EDMA_TXQ_CTRL_TPD_BURST_EN;
547 	reg |= (EDMA_TXF_BURST << EDMA_TXQ_TXF_BURST_NUM_SHIFT);
548 	EDMA_REG_WRITE(sc, EDMA_REG_TXQ_CTRL, reg);
549 	EDMA_REG_BARRIER_WRITE(sc);
550 
551 	return (0);
552 }
553 
554 /*
555  * Setup default RSS, RX burst/prefetch/interrupt thresholds.
556  *
557  * Strip VLANs, those are offloaded in the RX descriptor.
558  */
559 int
qcom_ess_edma_hw_setup_rx(struct qcom_ess_edma_softc * sc)560 qcom_ess_edma_hw_setup_rx(struct qcom_ess_edma_softc *sc)
561 {
562 	uint32_t reg;
563 
564 	EDMA_LOCK_ASSERT(sc);
565 
566 	/* Configure RSS types */
567 	EDMA_REG_WRITE(sc, EDMA_REG_RSS_TYPE, sc->sc_config.rss_type);
568 
569 	/* Configure RFD burst */
570 	reg = (EDMA_RFD_BURST << EDMA_RXQ_RFD_BURST_NUM_SHIFT);
571 	/* .. and RFD prefetch threshold */
572 	reg |= (EDMA_RFD_THR << EDMA_RXQ_RFD_PF_THRESH_SHIFT);
573 	/* ... and threshold to generate RFD interrupt */
574 	reg |= (EDMA_RFD_LTHR << EDMA_RXQ_RFD_LOW_THRESH_SHIFT);
575 	EDMA_REG_WRITE(sc, EDMA_REG_RX_DESC1, reg);
576 
577 	/* Set RX FIFO threshold to begin DMAing data to host */
578 	reg = EDMA_FIFO_THRESH_128_BYTE;
579 	/* Remove VLANs (??) */
580 	reg |= EDMA_RXQ_CTRL_RMV_VLAN;
581 	EDMA_REG_WRITE(sc, EDMA_REG_RXQ_CTRL, reg);
582 
583 	EDMA_REG_BARRIER_WRITE(sc);
584 	return (0);
585 }
586 
587 /*
588  * XXX TODO: this particular routine is a bit big and likely should be split
589  * across main, hw, desc, rx and tx.  But to expedite initial bring-up,
590  * let's just commit the sins here and get receive up and going.
591  */
592 int
qcom_ess_edma_hw_setup_txrx_desc_rings(struct qcom_ess_edma_softc * sc)593 qcom_ess_edma_hw_setup_txrx_desc_rings(struct qcom_ess_edma_softc *sc)
594 {
595 	uint32_t reg, i, idx;
596 	int len;
597 
598 	EDMA_LOCK_ASSERT(sc);
599 
600 	/*
601 	 * setup base addresses for each transmit ring, and
602 	 * read in the initial index to use for transmit.
603 	 */
604 	for (i = 0; i < QCOM_ESS_EDMA_NUM_TX_RINGS; i++) {
605 		/* Descriptor ring based address */
606 		QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_RING_MGMT,
607 		    "TXQ[%d]: ring paddr=0x%08lx\n",
608 		    i, sc->sc_tx_ring[i].hw_desc_paddr);
609 		EDMA_REG_WRITE(sc, EDMA_REG_TPD_BASE_ADDR_Q(i),
610 		    sc->sc_tx_ring[i].hw_desc_paddr);
611 
612 		/* And now, grab the consumer index */
613 		reg = EDMA_REG_READ(sc, EDMA_REG_TPD_IDX_Q(i));
614 		idx = (reg >> EDMA_TPD_CONS_IDX_SHIFT) & 0xffff;
615 
616 		sc->sc_tx_ring[i].next_to_fill = idx;
617 		sc->sc_tx_ring[i].next_to_clean = idx;
618 
619 		/* Update prod and sw consumer indexes */
620 		reg &= ~(EDMA_TPD_PROD_IDX_MASK << EDMA_TPD_PROD_IDX_SHIFT);
621 		reg |= idx;
622 		EDMA_REG_WRITE(sc, EDMA_REG_TPD_IDX_Q(i), reg);
623 		EDMA_REG_WRITE(sc, EDMA_REG_TX_SW_CONS_IDX_Q(i), idx);
624 
625 		/* Set the ring size */
626 		EDMA_REG_WRITE(sc, EDMA_REG_TPD_RING_SIZE,
627 		    sc->sc_config.tx_ring_count & EDMA_TPD_RING_SIZE_MASK);
628 
629 	}
630 
631 	/* Set base addresses for each RFD ring */
632 	for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_RINGS; i++) {
633 		QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_RX_RING_MGMT,
634 		    "RXQ[%d]: ring paddr=0x%08lx\n",
635 		    i, sc->sc_rx_ring[i].hw_desc_paddr);
636 		EDMA_REG_WRITE(sc, EDMA_REG_RFD_BASE_ADDR_Q(i),
637 		    sc->sc_rx_ring[i].hw_desc_paddr);
638 	}
639 	EDMA_REG_BARRIER_WRITE(sc);
640 
641 	/* Configure RX buffer size */
642 	len = sc->sc_config.rx_buf_size;
643 	if (sc->sc_config.rx_buf_ether_align)
644 		len -= ETHER_ALIGN;
645 	reg = (len & EDMA_RX_BUF_SIZE_MASK)
646 	    << EDMA_RX_BUF_SIZE_SHIFT;
647 	/* .. and RFD ring size */
648 	reg |= (sc->sc_config.rx_ring_count & EDMA_RFD_RING_SIZE_MASK)
649 	    << EDMA_RFD_RING_SIZE_SHIFT;
650 	EDMA_REG_WRITE(sc, EDMA_REG_RX_DESC0, reg);
651 
652 	/* Disable the TX low/high watermark (for interrupts?) */
653 	EDMA_REG_WRITE(sc, EDMA_REG_TXF_WATER_MARK, 0);
654 
655 	EDMA_REG_BARRIER_WRITE(sc);
656 
657 	/* Load all the ring base addresses into the hardware */
658 	EDMA_REG_BARRIER_READ(sc);
659 	reg = EDMA_REG_READ(sc, EDMA_REG_TX_SRAM_PART);
660 	reg |= 1 << EDMA_LOAD_PTR_SHIFT;
661 	EDMA_REG_WRITE(sc, EDMA_REG_TX_SRAM_PART, reg);
662 	EDMA_REG_BARRIER_WRITE(sc);
663 
664 	return (0);
665 }
666 
667 /*
668  * Enable general MAC TX DMA.
669  */
670 int
qcom_ess_edma_hw_tx_enable(struct qcom_ess_edma_softc * sc)671 qcom_ess_edma_hw_tx_enable(struct qcom_ess_edma_softc *sc)
672 {
673 	uint32_t reg;
674 
675 	EDMA_LOCK_ASSERT(sc);
676 
677 	EDMA_REG_BARRIER_READ(sc);
678 	reg = EDMA_REG_READ(sc, EDMA_REG_TXQ_CTRL);
679 	reg |= EDMA_TXQ_CTRL_TXQ_EN;
680 	EDMA_REG_WRITE(sc, EDMA_REG_TXQ_CTRL, reg);
681 	EDMA_REG_BARRIER_WRITE(sc);
682 
683 	return (0);
684 }
685 
686 /*
687  * Enable general MAC RX DMA.
688  */
689 int
qcom_ess_edma_hw_rx_enable(struct qcom_ess_edma_softc * sc)690 qcom_ess_edma_hw_rx_enable(struct qcom_ess_edma_softc *sc)
691 {
692 	EDMA_LOCK_ASSERT(sc);
693 	uint32_t reg;
694 
695 	EDMA_REG_BARRIER_READ(sc);
696 	reg = EDMA_REG_READ(sc, EDMA_REG_RXQ_CTRL);
697 	reg |= EDMA_RXQ_CTRL_EN;
698 	EDMA_REG_WRITE(sc, EDMA_REG_RXQ_CTRL, reg);
699 	EDMA_REG_BARRIER_WRITE(sc);
700 
701 	return (0);
702 }
703 
704 /*
705  * Read the TPD consumer index register for the given transmit ring.
706  */
707 int
qcom_ess_edma_hw_tx_read_tpd_cons_idx(struct qcom_ess_edma_softc * sc,int queue_id,uint16_t * idx)708 qcom_ess_edma_hw_tx_read_tpd_cons_idx(struct qcom_ess_edma_softc *sc,
709     int queue_id, uint16_t *idx)
710 {
711 	uint32_t reg;
712 
713 	EDMA_REG_BARRIER_READ(sc);
714 	reg = EDMA_REG_READ(sc, EDMA_REG_TPD_IDX_Q(queue_id));
715 	*idx = (reg >> EDMA_TPD_CONS_IDX_SHIFT) & EDMA_TPD_CONS_IDX_MASK;
716 
717 	return (0);
718 }
719 
720 /*
721  * Update the TPD producer index for the given transmit wring.
722  */
723 int
qcom_ess_edma_hw_tx_update_tpd_prod_idx(struct qcom_ess_edma_softc * sc,int queue_id,uint16_t idx)724 qcom_ess_edma_hw_tx_update_tpd_prod_idx(struct qcom_ess_edma_softc *sc,
725     int queue_id, uint16_t idx)
726 {
727 	uint32_t reg;
728 
729 	EDMA_REG_BARRIER_READ(sc);
730 	reg = EDMA_REG_READ(sc, EDMA_REG_TPD_IDX_Q(queue_id));
731 	reg &= ~EDMA_TPD_PROD_IDX_BITS;
732 	reg |= (idx & EDMA_TPD_PROD_IDX_MASK) << EDMA_TPD_PROD_IDX_SHIFT;
733 	EDMA_REG_WRITE(sc, EDMA_REG_TPD_IDX_Q(queue_id), reg);
734 	EDMA_REG_BARRIER_WRITE(sc);
735 
736 	return (0);
737 }
738 
739 /*
740  * Update the TPD software consumer index register for the given
741  * transmit ring - ie, what software has cleaned.
742  */
743 int
qcom_ess_edma_hw_tx_update_cons_idx(struct qcom_ess_edma_softc * sc,int queue_id,uint16_t idx)744 qcom_ess_edma_hw_tx_update_cons_idx(struct qcom_ess_edma_softc *sc,
745     int queue_id, uint16_t idx)
746 {
747 
748 	EDMA_REG_WRITE(sc, EDMA_REG_TX_SW_CONS_IDX_Q(queue_id), idx);
749 	EDMA_REG_BARRIER_WRITE(sc);
750 
751 	return (0);
752 }
753