1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2021 Adrian Chadd <adrian@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
11 * disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/bus.h>
35
36 #include <sys/kernel.h>
37 #include <sys/module.h>
38 #include <sys/rman.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
42 #include <sys/mbuf.h>
43 #include <sys/endian.h>
44 #include <sys/smp.h>
45 #include <sys/socket.h>
46 #include <sys/sockio.h>
47
48 #include <net/if.h>
49 #include <net/if_var.h>
50 #include <net/if_media.h>
51 #include <net/ethernet.h>
52
53 #include <machine/bus.h>
54 #include <machine/resource.h>
55
56 #include <dev/qcom_ess_edma/qcom_ess_edma_var.h>
57 #include <dev/qcom_ess_edma/qcom_ess_edma_reg.h>
58 #include <dev/qcom_ess_edma/qcom_ess_edma_hw.h>
59 #include <dev/qcom_ess_edma/qcom_ess_edma_desc.h>
60 #include <dev/qcom_ess_edma/qcom_ess_edma_rx.h>
61 #include <dev/qcom_ess_edma/qcom_ess_edma_debug.h>
62
63 /*
64 * Map the given RX queue to a given CPU.
65 */
66 int
qcom_ess_edma_rx_queue_to_cpu(struct qcom_ess_edma_softc * sc,int queue)67 qcom_ess_edma_rx_queue_to_cpu(struct qcom_ess_edma_softc *sc, int queue)
68 {
69 return (queue % mp_ncpus);
70 }
71
72 int
qcom_ess_edma_rx_ring_setup(struct qcom_ess_edma_softc * sc,struct qcom_ess_edma_desc_ring * ring)73 qcom_ess_edma_rx_ring_setup(struct qcom_ess_edma_softc *sc,
74 struct qcom_ess_edma_desc_ring *ring)
75 {
76 struct qcom_ess_edma_sw_desc_rx *rxd;
77 int i, ret;
78
79 for (i = 0; i < EDMA_RX_RING_SIZE; i++) {
80 rxd = qcom_ess_edma_desc_ring_get_sw_desc(sc, ring, i);
81 if (rxd == NULL) {
82 device_printf(sc->sc_dev,
83 "ERROR; couldn't get sw desc (idx %d)\n", i);
84 return (EINVAL);
85 }
86 rxd->m = NULL;
87 ret = bus_dmamap_create(ring->buffer_dma_tag,
88 BUS_DMA_NOWAIT,
89 &rxd->m_dmamap);
90 if (ret != 0) {
91 device_printf(sc->sc_dev,
92 "%s: failed to create dmamap (%d)\n",
93 __func__, ret);
94 }
95 }
96
97 return (0);
98 }
99
100 int
qcom_ess_edma_rx_ring_clean(struct qcom_ess_edma_softc * sc,struct qcom_ess_edma_desc_ring * ring)101 qcom_ess_edma_rx_ring_clean(struct qcom_ess_edma_softc *sc,
102 struct qcom_ess_edma_desc_ring *ring)
103 {
104 device_printf(sc->sc_dev, "%s: TODO\n", __func__);
105 return (0);
106 }
107
108 /*
109 * Allocate a receive buffer for the given ring/index, setup DMA.
110 *
111 * The caller must have called the ring prewrite routine in order
112 * to flush the ring memory if needed before writing to it.
113 * It's not done here so we don't do it on /every/ ring update.
114 *
115 * Returns an error if the slot is full or unable to fill it;
116 * the caller should then figure out how to cope.
117 */
118 int
qcom_ess_edma_rx_buf_alloc(struct qcom_ess_edma_softc * sc,struct qcom_ess_edma_desc_ring * ring,int idx)119 qcom_ess_edma_rx_buf_alloc(struct qcom_ess_edma_softc *sc,
120 struct qcom_ess_edma_desc_ring *ring, int idx)
121 {
122 struct mbuf *m;
123 struct qcom_ess_edma_sw_desc_rx *rxd;
124 struct qcom_ess_edma_rx_free_desc *ds;
125 bus_dma_segment_t segs[1];
126 int error;
127 int nsegs;
128
129 /* Get the software/hardware descriptors we're going to update */
130 rxd = qcom_ess_edma_desc_ring_get_sw_desc(sc, ring, idx);
131 if (rxd == NULL) {
132 device_printf(sc->sc_dev,
133 "ERROR; couldn't get sw desc (idx %d)\n", idx);
134 return (EINVAL);
135 }
136 ds = qcom_ess_edma_desc_ring_get_hw_desc(sc, ring, idx);
137 if (ds == NULL) {
138 device_printf(sc->sc_dev,
139 "ERROR; couldn't get hw desc (idx %d)\n", idx);
140 return (EINVAL);
141 }
142
143 /* If this ring has an mbuf already then return error */
144 if (rxd->m != NULL) {
145 device_printf(sc->sc_dev,
146 "ERROR: sw desc idx %d already has an mbuf\n",
147 idx);
148 return (EINVAL); /* XXX */
149 }
150
151 /* Allocate mbuf */
152 m = m_get2(sc->sc_config.rx_buf_size, M_NOWAIT, MT_DATA, M_PKTHDR);
153 if (m == NULL) {
154 /* XXX keep statistics */
155 device_printf(sc->sc_dev, "ERROR: failed to allocate mbuf\n");
156 return (ENOMEM);
157 }
158
159 /* Load dma map, get physical memory address of mbuf */
160 nsegs = 1;
161 m->m_pkthdr.len = m->m_len = sc->sc_config.rx_buf_size;
162
163 /* ETHER_ALIGN hack */
164 if (sc->sc_config.rx_buf_ether_align)
165 m_adj(m, ETHER_ALIGN);
166 error = bus_dmamap_load_mbuf_sg(ring->buffer_dma_tag, rxd->m_dmamap,
167 m, segs, &nsegs, 0);
168 if (error != 0 || nsegs != 1) {
169 device_printf(sc->sc_dev,
170 "ERROR: couldn't load mbuf dmamap (%d) (nsegs=%d)\n", error, nsegs);
171 m_freem(m);
172 return (error);
173 }
174
175 /* Populate sw and hw desc */
176 rxd->m = m;
177 rxd->m_physaddr = segs[0].ds_addr;
178
179 ds->addr = htole32(segs[0].ds_addr);
180
181 ring->stats.num_added++;
182
183 return (0);
184 }
185
186 /*
187 * Remove a receive buffer from the given ring/index.
188 *
189 * This clears the software/hardware index and unmaps the mbuf;
190 * the returned mbuf will be owned by the caller.
191 */
192 struct mbuf *
qcom_ess_edma_rx_buf_clean(struct qcom_ess_edma_softc * sc,struct qcom_ess_edma_desc_ring * ring,int idx)193 qcom_ess_edma_rx_buf_clean(struct qcom_ess_edma_softc *sc,
194 struct qcom_ess_edma_desc_ring *ring, int idx)
195 {
196 struct mbuf *m;
197 struct qcom_ess_edma_sw_desc_rx *rxd;
198 struct qcom_ess_edma_rx_free_desc *ds;
199
200 /* Get the software/hardware descriptors we're going to update */
201 rxd = qcom_ess_edma_desc_ring_get_sw_desc(sc, ring, idx);
202 if (rxd == NULL) {
203 device_printf(sc->sc_dev,
204 "ERROR; couldn't get sw desc (idx %d)\n", idx);
205 return (NULL);
206 }
207 ds = qcom_ess_edma_desc_ring_get_hw_desc(sc, ring, idx);
208 if (ds == NULL) {
209 device_printf(sc->sc_dev,
210 "ERROR; couldn't get hw desc (idx %d)\n", idx);
211 return (NULL);
212 }
213
214 QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_RX_RING,
215 "%s: idx=%u, rxd=%p, ds=0x%p, maddr=0x%08x/0x%08lx\n",
216 __func__, idx, rxd, ds, ds->addr, rxd->m_physaddr);
217
218 /* No mbuf? return null; it's fine */
219 if (rxd->m == NULL) {
220 return (NULL);
221 }
222
223 /* Flush mbuf */
224 bus_dmamap_sync(ring->buffer_dma_tag, rxd->m_dmamap,
225 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
226
227 /* Unload */
228 bus_dmamap_unload(ring->buffer_dma_tag, rxd->m_dmamap);
229
230 /* Remove sw/hw desc entries */
231 m = rxd->m;
232 rxd->m = NULL;
233
234 #ifdef ESS_EDMA_DEBUG_CLEAR_DESC
235 /*
236 * Note: removing hw entries is purely for correctness; it may be
237 * VERY SLOW!
238 */
239 ds->addr = 0;
240 #endif
241
242 ring->stats.num_cleaned++;
243
244 return (m);
245 }
246
247 /*
248 * Fill the current ring, up to 'num' entries (or the ring is full.)
249 * It will also update the producer index for the given queue.
250 *
251 * Returns 0 if OK, error if there's a problem.
252 */
253 int
qcom_ess_edma_rx_ring_fill(struct qcom_ess_edma_softc * sc,int queue,int num)254 qcom_ess_edma_rx_ring_fill(struct qcom_ess_edma_softc *sc,
255 int queue, int num)
256 {
257 struct qcom_ess_edma_desc_ring *ring;
258 int num_fill;
259 int idx;
260 int error;
261 int prod_index;
262 int n = 0;
263
264
265 ring = &sc->sc_rx_ring[queue];
266
267 EDMA_RING_LOCK_ASSERT(ring);
268
269 num_fill = num;
270 if (num_fill > ring->ring_count)
271 num_fill = ring->ring_count - 1;
272 idx = ring->next_to_fill;
273
274 while (num_fill != 0) {
275 error = qcom_ess_edma_rx_buf_alloc(sc, ring, idx);
276 if (error != 0) {
277 device_printf(sc->sc_dev,
278 "ERROR: queue %d: failed to alloc rx buf (%d)\n",
279 queue, error);
280 break;
281 }
282 num_fill--;
283
284 /* Update ring index, wrap at ring_count */
285 idx++;
286 if (idx >= ring->ring_count)
287 idx = 0;
288 n++;
289 }
290
291 ring->next_to_fill = idx;
292
293 /* Flush ring updates before HW index is updated */
294 qcom_ess_edma_desc_ring_flush_preupdate(sc, ring);
295
296 /* producer index is the ring number, minus 1 (ie the slot BEFORE) */
297 if (idx == 0)
298 prod_index = ring->ring_count - 1;
299 else
300 prod_index = idx - 1;
301 (void) qcom_ess_edma_hw_rfd_prod_index_update(sc, queue, prod_index);
302
303 QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_RX_RING,
304 "%s: queue %d: added %d bufs, prod_idx=%u\n",
305 __func__, queue, n, prod_index);
306
307 return (0);
308 }
309
310 /*
311 * fetch, unmap the given mbuf
312 *
313 struct mbuf *
314 qcom_ess_edma_rx_buf_clean(struct qcom_ess_edma_softc *sc,
315 struct qcom_ess_edma_desc_ring *ring, int idx)
316 */
317
318
319 /*
320 * Run through the RX ring, complete frames.
321 *
322 * For now they're simply freed and the ring is re-filled.
323 * Once that logic is working soundly we'll want to populate an
324 * mbuf list for the caller with completed mbufs so they can be
325 * dispatched up to the network stack.
326 */
327 int
qcom_ess_edma_rx_ring_complete(struct qcom_ess_edma_softc * sc,int queue,struct mbufq * mq)328 qcom_ess_edma_rx_ring_complete(struct qcom_ess_edma_softc *sc, int queue,
329 struct mbufq *mq)
330 {
331 struct qcom_ess_edma_desc_ring *ring;
332 struct qcom_ess_edma_sw_desc_rx *rxd;
333 int n, cleaned_count, len;
334 uint16_t sw_next_to_clean, hw_next_to_clean;
335 struct mbuf *m;
336 struct qcom_edma_rx_return_desc *rrd;
337 int num_rfds, port_id, priority, hash_type, hash_val, flow_cookie, vlan;
338 bool rx_checksum = 1;
339 int port_vlan = -1;
340
341 ring = &sc->sc_rx_ring[queue];
342
343 EDMA_RING_LOCK_ASSERT(ring);
344
345 qcom_ess_edma_desc_ring_flush_postupdate(sc, ring);
346
347 sw_next_to_clean = ring->next_to_clean;
348 hw_next_to_clean = 0;
349 cleaned_count = 0;
350
351 for (n = 0; n < EDMA_RX_RING_SIZE - 1; n++) {
352 rxd = qcom_ess_edma_desc_ring_get_sw_desc(sc, ring,
353 sw_next_to_clean);
354 if (rxd == NULL) {
355 device_printf(sc->sc_dev,
356 "ERROR; couldn't get sw desc (idx %d)\n",
357 sw_next_to_clean);
358 return (EINVAL);
359 }
360
361 hw_next_to_clean = qcom_ess_edma_hw_rfd_get_cons_index(sc,
362 queue);
363 if (hw_next_to_clean == sw_next_to_clean)
364 break;
365
366 /* Unmap the mbuf at this index */
367 m = qcom_ess_edma_rx_buf_clean(sc, ring, sw_next_to_clean);
368 sw_next_to_clean = (sw_next_to_clean + 1) % ring->ring_count;
369 cleaned_count++;
370
371 /* Get the RRD header */
372 rrd = mtod(m, struct qcom_edma_rx_return_desc *);
373 if (rrd->rrd7 & EDMA_RRD_DESC_VALID) {
374 len = rrd->rrd6 & EDMA_RRD_PKT_SIZE_MASK;
375 num_rfds = rrd->rrd1 & EDMA_RRD_NUM_RFD_MASK;;
376 port_id = (rrd->rrd1 >> EDMA_PORT_ID_SHIFT)
377 & EDMA_PORT_ID_MASK;
378 priority = (rrd->rrd1 >> EDMA_RRD_PRIORITY_SHIFT)
379 & EDMA_RRD_PRIORITY_MASK;
380 hash_type = (rrd->rrd5 >> EDMA_HASH_TYPE_SHIFT)
381 & EDMA_HASH_TYPE_MASK;
382 hash_val = rrd->rrd2;
383 flow_cookie = rrd->rrd3 & EDMA_RRD_FLOW_COOKIE_MASK;
384 vlan = rrd->rrd4;
385 QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_RX_FRAME,
386 "%s: len=%d, num_rfds=%d, port_id=%d,"
387 " priority=%d, hash_type=%d, hash_val=%d,"
388 " flow_cookie=%d, vlan=%d\n",
389 __func__,
390 len,
391 num_rfds,
392 port_id,
393 priority,
394 hash_type,
395 hash_val,
396 flow_cookie,
397 vlan);
398 QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_RX_FRAME,
399 "%s: flags: L4 checksum"
400 " fail=%d, 802.1q vlan=%d, 802.1ad vlan=%d\n",
401 __func__,
402 !! (rrd->rrd6 & EDMA_RRD_CSUM_FAIL_MASK),
403 !! (rrd->rrd7 & EDMA_RRD_CVLAN),
404 !! (rrd->rrd1 & EDMA_RRD_SVLAN));
405 } else {
406 len = 0;
407 }
408
409 /* Payload starts after the RRD header */
410 m_adj(m, sizeof(struct qcom_edma_rx_return_desc));
411
412 /* Set mbuf length now */
413 m->m_len = m->m_pkthdr.len = len;
414
415 /*
416 * Set rcvif to the relevant GMAC ifp; GMAC receive will
417 * check the field to receive it to the right place, or
418 * if it's NULL it'll drop it for us.
419 */
420 m->m_pkthdr.rcvif = NULL;
421 if (sc->sc_gmac_port_map[port_id] != -1) {
422 struct qcom_ess_edma_gmac *gmac;
423 gmac = &sc->sc_gmac[sc->sc_gmac_port_map[port_id]];
424 QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_RX_FRAME,
425 "%s: port_id=%d gmac=%d\n", __func__,
426 port_id, gmac->id);
427 if (gmac->enabled == true) {
428 m->m_pkthdr.rcvif = gmac->ifp;
429 if ((if_getcapenable(gmac->ifp) & IFCAP_RXCSUM) != 0)
430 rx_checksum = true;
431 }
432 port_vlan = gmac->vlan_id;
433 }
434
435 /* XXX TODO: handle multi-frame packets (ie, jumbos!) */
436 /* XXX TODO: handle 802.1ad VLAN offload field */
437 /* XXX TODO: flow offload */
438
439 /*
440 * For now we don't support disabling VLAN offload.
441 * Instead, tags are stripped by the hardware.
442 * Handle the outer VLAN tag; worry about 802.1ad
443 * later on (and hopefully by something other than
444 * adding another mbuf.)
445 */
446 if ((rrd->rrd7 & EDMA_RRD_CVLAN) != 0) {
447 /*
448 * There's an outer VLAN tag that has been
449 * decaped by the hardware. Compare it to the
450 * current port vlan, and if they don't match,
451 * add an offloaded VLAN tag to the mbuf.
452 *
453 * And yes, care about the priority field too.
454 */
455 if ((port_vlan == -1) || (port_vlan != vlan)) {
456 m->m_pkthdr.ether_vtag = (vlan & 0xfff)
457 | ((priority < 1) & 0xf);
458 m->m_flags |= M_VLANTAG;
459 }
460 }
461
462 /*
463 * Store the hash info in the mbuf if it's there.
464 *
465 * XXX TODO: decode the RSS field and translate it to
466 * the mbuf hash entry. For now, just treat as OPAQUE.
467 */
468 if (hash_type != EDMA_RRD_RSS_TYPE_NONE) {
469 m->m_pkthdr.flowid = hash_val;
470 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
471 }
472
473 /*
474 * Check the RX checksum flag if the destination ifp
475 * has the RXCSUM flag set.
476 */
477 if (rx_checksum) {
478 if (rrd->rrd6 & EDMA_RRD_CSUM_FAIL_MASK) {
479 /* Fail */
480 ring->stats.num_rx_csum_fail++;
481 } else {
482 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED
483 | CSUM_IP_VALID
484 | CSUM_DATA_VALID
485 | CSUM_PSEUDO_HDR;
486 m->m_pkthdr.csum_data = 0xffff;
487 ring->stats.num_rx_csum_ok++;
488 }
489 }
490
491
492 /*
493 * Finally enqueue into the incoming receive queue
494 * to push up into the networking stack.
495 */
496 if (mbufq_enqueue(mq, m) != 0) {
497 ring->stats.num_enqueue_full++;
498 m_freem(m);
499 }
500 }
501 ring->next_to_clean = sw_next_to_clean;
502
503 /* Refill ring if needed */
504 if (cleaned_count > 0) {
505 QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_RX_RING,
506 "%s: ring=%d, cleaned=%d\n",
507 __func__, queue, cleaned_count);
508 (void) qcom_ess_edma_rx_ring_fill(sc, queue, cleaned_count);
509 (void) qcom_ess_edma_hw_rfd_sw_cons_index_update(sc, queue,
510 ring->next_to_clean);
511 }
512
513 return (0);
514 }
515