1 // SPDX-License-Identifier: GPL-2.0
2
3 /* Texas Instruments ICSSG Ethernet Driver
4 *
5 * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
6 * Copyright (C) Siemens AG, 2024
7 *
8 */
9
10 #include <linux/dma-mapping.h>
11 #include <linux/dma/ti-cppi5.h>
12 #include <linux/etherdevice.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel.h>
15 #include <linux/of.h>
16 #include <linux/of_mdio.h>
17 #include <linux/phy.h>
18 #include <linux/remoteproc/pruss.h>
19 #include <linux/regmap.h>
20 #include <linux/remoteproc.h>
21
22 #include "icssg_prueth.h"
23 #include "../k3-cppi-desc-pool.h"
24
25 /* Netif debug messages possible */
26 #define PRUETH_EMAC_DEBUG (NETIF_MSG_DRV | \
27 NETIF_MSG_PROBE | \
28 NETIF_MSG_LINK | \
29 NETIF_MSG_TIMER | \
30 NETIF_MSG_IFDOWN | \
31 NETIF_MSG_IFUP | \
32 NETIF_MSG_RX_ERR | \
33 NETIF_MSG_TX_ERR | \
34 NETIF_MSG_TX_QUEUED | \
35 NETIF_MSG_INTR | \
36 NETIF_MSG_TX_DONE | \
37 NETIF_MSG_RX_STATUS | \
38 NETIF_MSG_PKTDATA | \
39 NETIF_MSG_HW | \
40 NETIF_MSG_WOL)
41
42 #define prueth_napi_to_emac(napi) container_of(napi, struct prueth_emac, napi_rx)
43
prueth_cleanup_rx_chns(struct prueth_emac * emac,struct prueth_rx_chn * rx_chn,int max_rflows)44 void prueth_cleanup_rx_chns(struct prueth_emac *emac,
45 struct prueth_rx_chn *rx_chn,
46 int max_rflows)
47 {
48 if (rx_chn->pg_pool) {
49 page_pool_destroy(rx_chn->pg_pool);
50 rx_chn->pg_pool = NULL;
51 }
52
53 if (rx_chn->desc_pool)
54 k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
55
56 if (rx_chn->rx_chn)
57 k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
58 }
59 EXPORT_SYMBOL_GPL(prueth_cleanup_rx_chns);
60
prueth_cleanup_tx_chns(struct prueth_emac * emac)61 void prueth_cleanup_tx_chns(struct prueth_emac *emac)
62 {
63 int i;
64
65 for (i = 0; i < emac->tx_ch_num; i++) {
66 struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
67
68 if (tx_chn->desc_pool)
69 k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
70
71 if (tx_chn->tx_chn)
72 k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
73
74 /* Assume prueth_cleanup_tx_chns() is called at the
75 * end after all channel resources are freed
76 */
77 memset(tx_chn, 0, sizeof(*tx_chn));
78 }
79 }
80 EXPORT_SYMBOL_GPL(prueth_cleanup_tx_chns);
81
prueth_ndev_del_tx_napi(struct prueth_emac * emac,int num)82 void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num)
83 {
84 int i;
85
86 for (i = 0; i < num; i++) {
87 struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
88
89 if (tx_chn->irq)
90 free_irq(tx_chn->irq, tx_chn);
91 netif_napi_del(&tx_chn->napi_tx);
92 }
93 }
94 EXPORT_SYMBOL_GPL(prueth_ndev_del_tx_napi);
95
emac_xsk_xmit_zc(struct prueth_emac * emac,unsigned int q_idx)96 static int emac_xsk_xmit_zc(struct prueth_emac *emac,
97 unsigned int q_idx)
98 {
99 struct prueth_tx_chn *tx_chn = &emac->tx_chns[q_idx];
100 struct xsk_buff_pool *pool = tx_chn->xsk_pool;
101 struct net_device *ndev = emac->ndev;
102 struct cppi5_host_desc_t *host_desc;
103 dma_addr_t dma_desc, dma_buf;
104 struct prueth_swdata *swdata;
105 struct xdp_desc xdp_desc;
106 int num_tx = 0, pkt_len;
107 int descs_avail, ret;
108 u32 *epib;
109 int i;
110
111 descs_avail = k3_cppi_desc_pool_avail(tx_chn->desc_pool);
112 /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS
113 * will be available for normal TX path and queue is stopped there if
114 * necessary
115 */
116 if (descs_avail <= MAX_SKB_FRAGS)
117 return 0;
118
119 descs_avail -= MAX_SKB_FRAGS;
120
121 for (i = 0; i < descs_avail; i++) {
122 if (!xsk_tx_peek_desc(pool, &xdp_desc))
123 break;
124
125 dma_buf = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
126 pkt_len = xdp_desc.len;
127 xsk_buff_raw_dma_sync_for_device(pool, dma_buf, pkt_len);
128
129 host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
130 if (unlikely(!host_desc))
131 break;
132
133 cppi5_hdesc_init(host_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
134 PRUETH_NAV_PS_DATA_SIZE);
135 cppi5_hdesc_set_pkttype(host_desc, 0);
136 epib = host_desc->epib;
137 epib[0] = 0;
138 epib[1] = 0;
139 cppi5_hdesc_set_pktlen(host_desc, pkt_len);
140 cppi5_desc_set_tags_ids(&host_desc->hdr, 0,
141 (emac->port_id | (q_idx << 8)));
142
143 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &dma_buf);
144 cppi5_hdesc_attach_buf(host_desc, dma_buf, pkt_len, dma_buf,
145 pkt_len);
146
147 swdata = cppi5_hdesc_get_swdata(host_desc);
148 swdata->type = PRUETH_SWDATA_XSK;
149
150 dma_desc = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
151 host_desc);
152 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn,
153 host_desc, dma_desc);
154
155 if (ret) {
156 ndev->stats.tx_errors++;
157 k3_cppi_desc_pool_free(tx_chn->desc_pool, host_desc);
158 break;
159 }
160
161 num_tx++;
162 }
163
164 xsk_tx_release(tx_chn->xsk_pool);
165 return num_tx;
166 }
167
prueth_xmit_free(struct prueth_tx_chn * tx_chn,struct cppi5_host_desc_t * desc)168 void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
169 struct cppi5_host_desc_t *desc)
170 {
171 struct cppi5_host_desc_t *first_desc, *next_desc;
172 dma_addr_t buf_dma, next_desc_dma;
173 struct prueth_swdata *swdata;
174 u32 buf_dma_len;
175
176 first_desc = desc;
177 next_desc = first_desc;
178 swdata = cppi5_hdesc_get_swdata(first_desc);
179 if (swdata->type == PRUETH_SWDATA_XSK)
180 goto free_pool;
181
182 cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
183 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
184
185 dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len,
186 DMA_TO_DEVICE);
187
188 next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
189 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
190 while (next_desc_dma) {
191 next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
192 next_desc_dma);
193 cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
194 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
195
196 dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len,
197 DMA_TO_DEVICE);
198
199 next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
200 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
201
202 k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
203 }
204
205 free_pool:
206 k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
207 }
208 EXPORT_SYMBOL_GPL(prueth_xmit_free);
209
emac_tx_complete_packets(struct prueth_emac * emac,int chn,int budget,bool * tdown)210 int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
211 int budget, bool *tdown)
212 {
213 struct net_device *ndev = emac->ndev;
214 struct cppi5_host_desc_t *desc_tx;
215 struct netdev_queue *netif_txq;
216 struct prueth_swdata *swdata;
217 struct prueth_tx_chn *tx_chn;
218 unsigned int total_bytes = 0;
219 int xsk_frames_done = 0;
220 struct xdp_frame *xdpf;
221 unsigned int pkt_len;
222 struct sk_buff *skb;
223 dma_addr_t desc_dma;
224 int res, num_tx = 0;
225
226 tx_chn = &emac->tx_chns[chn];
227
228 while (true) {
229 res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
230 if (res == -ENODATA)
231 break;
232
233 /* teardown completion */
234 if (cppi5_desc_is_tdcm(desc_dma)) {
235 if (atomic_dec_and_test(&emac->tdown_cnt))
236 complete(&emac->tdown_complete);
237 *tdown = true;
238 break;
239 }
240
241 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
242 desc_dma);
243 swdata = cppi5_hdesc_get_swdata(desc_tx);
244
245 switch (swdata->type) {
246 case PRUETH_SWDATA_SKB:
247 skb = swdata->data.skb;
248 dev_sw_netstats_tx_add(skb->dev, 1, skb->len);
249 total_bytes += skb->len;
250 napi_consume_skb(skb, budget);
251 break;
252 case PRUETH_SWDATA_XDPF:
253 xdpf = swdata->data.xdpf;
254 dev_sw_netstats_tx_add(ndev, 1, xdpf->len);
255 total_bytes += xdpf->len;
256 xdp_return_frame(xdpf);
257 break;
258 case PRUETH_SWDATA_XSK:
259 pkt_len = cppi5_hdesc_get_pktlen(desc_tx);
260 dev_sw_netstats_tx_add(ndev, 1, pkt_len);
261 xsk_frames_done++;
262 break;
263 default:
264 prueth_xmit_free(tx_chn, desc_tx);
265 ndev->stats.tx_dropped++;
266 continue;
267 }
268
269 prueth_xmit_free(tx_chn, desc_tx);
270 num_tx++;
271 }
272
273 if (!num_tx)
274 return 0;
275
276 netif_txq = netdev_get_tx_queue(ndev, chn);
277 netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
278
279 if (netif_tx_queue_stopped(netif_txq)) {
280 /* If the TX queue was stopped, wake it now
281 * if we have enough room.
282 */
283 __netif_tx_lock(netif_txq, smp_processor_id());
284 if (netif_running(ndev) &&
285 (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
286 MAX_SKB_FRAGS))
287 netif_tx_wake_queue(netif_txq);
288 __netif_tx_unlock(netif_txq);
289 }
290
291 if (tx_chn->xsk_pool) {
292 if (xsk_frames_done)
293 xsk_tx_completed(tx_chn->xsk_pool, xsk_frames_done);
294
295 if (xsk_uses_need_wakeup(tx_chn->xsk_pool))
296 xsk_set_tx_need_wakeup(tx_chn->xsk_pool);
297
298 netif_txq = netdev_get_tx_queue(ndev, chn);
299 txq_trans_cond_update(netif_txq);
300 emac_xsk_xmit_zc(emac, chn);
301 }
302
303 return num_tx;
304 }
305
emac_tx_timer_callback(struct hrtimer * timer)306 static enum hrtimer_restart emac_tx_timer_callback(struct hrtimer *timer)
307 {
308 struct prueth_tx_chn *tx_chns =
309 container_of(timer, struct prueth_tx_chn, tx_hrtimer);
310
311 if (tx_chns->irq_disabled) {
312 tx_chns->irq_disabled = false;
313 enable_irq(tx_chns->irq);
314 }
315 return HRTIMER_NORESTART;
316 }
317
emac_napi_tx_poll(struct napi_struct * napi_tx,int budget)318 static int emac_napi_tx_poll(struct napi_struct *napi_tx, int budget)
319 {
320 struct prueth_tx_chn *tx_chn = prueth_napi_to_tx_chn(napi_tx);
321 struct prueth_emac *emac = tx_chn->emac;
322 bool tdown = false;
323 int num_tx_packets;
324
325 num_tx_packets = emac_tx_complete_packets(emac, tx_chn->id, budget,
326 &tdown);
327
328 if (num_tx_packets >= budget)
329 return budget;
330
331 if (napi_complete_done(napi_tx, num_tx_packets)) {
332 if (unlikely(tx_chn->tx_pace_timeout_ns && !tdown)) {
333 hrtimer_start(&tx_chn->tx_hrtimer,
334 ns_to_ktime(tx_chn->tx_pace_timeout_ns),
335 HRTIMER_MODE_REL_PINNED);
336 } else {
337 if (tx_chn->irq_disabled) {
338 tx_chn->irq_disabled = false;
339 enable_irq(tx_chn->irq);
340 }
341 }
342 }
343
344 return num_tx_packets;
345 }
346
prueth_tx_irq(int irq,void * dev_id)347 static irqreturn_t prueth_tx_irq(int irq, void *dev_id)
348 {
349 struct prueth_tx_chn *tx_chn = dev_id;
350
351 tx_chn->irq_disabled = true;
352 disable_irq_nosync(irq);
353 napi_schedule(&tx_chn->napi_tx);
354
355 return IRQ_HANDLED;
356 }
357
prueth_ndev_add_tx_napi(struct prueth_emac * emac)358 int prueth_ndev_add_tx_napi(struct prueth_emac *emac)
359 {
360 struct prueth *prueth = emac->prueth;
361 int i, ret;
362
363 for (i = 0; i < emac->tx_ch_num; i++) {
364 struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
365
366 netif_napi_add_tx(emac->ndev, &tx_chn->napi_tx, emac_napi_tx_poll);
367 hrtimer_setup(&tx_chn->tx_hrtimer, &emac_tx_timer_callback, CLOCK_MONOTONIC,
368 HRTIMER_MODE_REL_PINNED);
369 ret = request_irq(tx_chn->irq, prueth_tx_irq,
370 IRQF_TRIGGER_HIGH, tx_chn->name,
371 tx_chn);
372 if (ret) {
373 netif_napi_del(&tx_chn->napi_tx);
374 dev_err(prueth->dev, "unable to request TX IRQ %d\n",
375 tx_chn->irq);
376 goto fail;
377 }
378 }
379
380 return 0;
381 fail:
382 prueth_ndev_del_tx_napi(emac, i);
383 return ret;
384 }
385 EXPORT_SYMBOL_GPL(prueth_ndev_add_tx_napi);
386
prueth_init_tx_chns(struct prueth_emac * emac)387 int prueth_init_tx_chns(struct prueth_emac *emac)
388 {
389 static const struct k3_ring_cfg ring_cfg = {
390 .elm_size = K3_RINGACC_RING_ELSIZE_8,
391 .mode = K3_RINGACC_RING_MODE_RING,
392 .flags = 0,
393 .size = PRUETH_MAX_TX_DESC,
394 };
395 struct k3_udma_glue_tx_channel_cfg tx_cfg;
396 struct device *dev = emac->prueth->dev;
397 struct net_device *ndev = emac->ndev;
398 int ret, slice, i;
399 u32 hdesc_size;
400
401 slice = prueth_emac_slice(emac);
402 if (slice < 0)
403 return slice;
404
405 init_completion(&emac->tdown_complete);
406
407 hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE,
408 PRUETH_NAV_SW_DATA_SIZE);
409 memset(&tx_cfg, 0, sizeof(tx_cfg));
410 tx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE;
411 tx_cfg.tx_cfg = ring_cfg;
412 tx_cfg.txcq_cfg = ring_cfg;
413
414 for (i = 0; i < emac->tx_ch_num; i++) {
415 struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
416
417 /* To differentiate channels for SLICE0 vs SLICE1 */
418 snprintf(tx_chn->name, sizeof(tx_chn->name),
419 "tx%d-%d", slice, i);
420
421 tx_chn->emac = emac;
422 tx_chn->id = i;
423 tx_chn->descs_num = PRUETH_MAX_TX_DESC;
424
425 tx_chn->tx_chn =
426 k3_udma_glue_request_tx_chn(dev, tx_chn->name,
427 &tx_cfg);
428 if (IS_ERR(tx_chn->tx_chn)) {
429 ret = PTR_ERR(tx_chn->tx_chn);
430 tx_chn->tx_chn = NULL;
431 netdev_err(ndev,
432 "Failed to request tx dma ch: %d\n", ret);
433 goto fail;
434 }
435
436 tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn);
437 tx_chn->desc_pool =
438 k3_cppi_desc_pool_create_name(tx_chn->dma_dev,
439 tx_chn->descs_num,
440 hdesc_size,
441 tx_chn->name);
442 if (IS_ERR(tx_chn->desc_pool)) {
443 ret = PTR_ERR(tx_chn->desc_pool);
444 tx_chn->desc_pool = NULL;
445 netdev_err(ndev, "Failed to create tx pool: %d\n", ret);
446 goto fail;
447 }
448
449 ret = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
450 if (ret < 0) {
451 netdev_err(ndev, "failed to get tx irq\n");
452 goto fail;
453 }
454 tx_chn->irq = ret;
455
456 snprintf(tx_chn->name, sizeof(tx_chn->name), "%s-tx%d",
457 dev_name(dev), tx_chn->id);
458 }
459
460 return 0;
461
462 fail:
463 prueth_cleanup_tx_chns(emac);
464 return ret;
465 }
466 EXPORT_SYMBOL_GPL(prueth_init_tx_chns);
467
prueth_create_page_pool(struct prueth_emac * emac,struct device * dma_dev,int size)468 static struct page_pool *prueth_create_page_pool(struct prueth_emac *emac,
469 struct device *dma_dev,
470 int size)
471 {
472 struct page_pool_params pp_params = { 0 };
473 struct page_pool *pool;
474
475 pp_params.order = 0;
476 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
477 pp_params.pool_size = size;
478 pp_params.nid = dev_to_node(emac->prueth->dev);
479 pp_params.dma_dir = DMA_BIDIRECTIONAL;
480 pp_params.dev = dma_dev;
481 pp_params.napi = &emac->napi_rx;
482 pp_params.max_len = PAGE_SIZE;
483
484 pool = page_pool_create(&pp_params);
485 if (IS_ERR(pool))
486 netdev_err(emac->ndev, "cannot create rx page pool\n");
487
488 return pool;
489 }
490
prueth_init_rx_chns(struct prueth_emac * emac,struct prueth_rx_chn * rx_chn,char * name,u32 max_rflows,u32 max_desc_num)491 int prueth_init_rx_chns(struct prueth_emac *emac,
492 struct prueth_rx_chn *rx_chn,
493 char *name, u32 max_rflows,
494 u32 max_desc_num)
495 {
496 struct k3_udma_glue_rx_channel_cfg rx_cfg;
497 struct device *dev = emac->prueth->dev;
498 struct net_device *ndev = emac->ndev;
499 u32 fdqring_id, hdesc_size;
500 struct page_pool *pool;
501 int i, ret = 0, slice;
502 int flow_id_base;
503
504 slice = prueth_emac_slice(emac);
505 if (slice < 0)
506 return slice;
507
508 /* To differentiate channels for SLICE0 vs SLICE1 */
509 snprintf(rx_chn->name, sizeof(rx_chn->name), "%s%d", name, slice);
510
511 hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE,
512 PRUETH_NAV_SW_DATA_SIZE);
513 memset(&rx_cfg, 0, sizeof(rx_cfg));
514 rx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE;
515 rx_cfg.flow_id_num = max_rflows;
516 rx_cfg.flow_id_base = -1; /* udmax will auto select flow id base */
517
518 /* init all flows */
519 rx_chn->dev = dev;
520 rx_chn->descs_num = max_desc_num;
521
522 rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, rx_chn->name,
523 &rx_cfg);
524 if (IS_ERR(rx_chn->rx_chn)) {
525 ret = PTR_ERR(rx_chn->rx_chn);
526 rx_chn->rx_chn = NULL;
527 netdev_err(ndev, "Failed to request rx dma ch: %d\n", ret);
528 goto fail;
529 }
530
531 rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn);
532 rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev,
533 rx_chn->descs_num,
534 hdesc_size,
535 rx_chn->name);
536 if (IS_ERR(rx_chn->desc_pool)) {
537 ret = PTR_ERR(rx_chn->desc_pool);
538 rx_chn->desc_pool = NULL;
539 netdev_err(ndev, "Failed to create rx pool: %d\n", ret);
540 goto fail;
541 }
542
543 pool = prueth_create_page_pool(emac, rx_chn->dma_dev, rx_chn->descs_num);
544 if (IS_ERR(pool)) {
545 ret = PTR_ERR(pool);
546 goto fail;
547 }
548
549 rx_chn->pg_pool = pool;
550
551 flow_id_base = k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
552 if (emac->is_sr1 && !strcmp(name, "rxmgm")) {
553 emac->rx_mgm_flow_id_base = flow_id_base;
554 netdev_dbg(ndev, "mgm flow id base = %d\n", flow_id_base);
555 } else {
556 emac->rx_flow_id_base = flow_id_base;
557 netdev_dbg(ndev, "flow id base = %d\n", flow_id_base);
558 }
559
560 fdqring_id = K3_RINGACC_RING_ID_ANY;
561 for (i = 0; i < rx_cfg.flow_id_num; i++) {
562 struct k3_ring_cfg rxring_cfg = {
563 .elm_size = K3_RINGACC_RING_ELSIZE_8,
564 .mode = K3_RINGACC_RING_MODE_RING,
565 .flags = 0,
566 };
567 struct k3_ring_cfg fdqring_cfg = {
568 .elm_size = K3_RINGACC_RING_ELSIZE_8,
569 .flags = K3_RINGACC_RING_SHARED,
570 };
571 struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = {
572 .rx_cfg = rxring_cfg,
573 .rxfdq_cfg = fdqring_cfg,
574 .ring_rxq_id = K3_RINGACC_RING_ID_ANY,
575 .src_tag_lo_sel =
576 K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG,
577 };
578
579 rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
580 rx_flow_cfg.rx_cfg.size = max_desc_num;
581 rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
582 rx_flow_cfg.rxfdq_cfg.mode = emac->prueth->pdata.fdqring_mode;
583
584 ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
585 i, &rx_flow_cfg);
586 if (ret) {
587 netdev_err(ndev, "Failed to init rx flow%d %d\n",
588 i, ret);
589 goto fail;
590 }
591 if (!i)
592 fdqring_id = k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
593 i);
594 ret = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
595 if (ret < 0) {
596 netdev_err(ndev, "Failed to get rx dma irq");
597 goto fail;
598 }
599 rx_chn->irq[i] = ret;
600 }
601
602 return 0;
603
604 fail:
605 prueth_cleanup_rx_chns(emac, rx_chn, max_rflows);
606 return ret;
607 }
608 EXPORT_SYMBOL_GPL(prueth_init_rx_chns);
609
prueth_dma_rx_push_mapped(struct prueth_emac * emac,struct prueth_rx_chn * rx_chn,struct page * page,u32 buf_len)610 int prueth_dma_rx_push_mapped(struct prueth_emac *emac,
611 struct prueth_rx_chn *rx_chn,
612 struct page *page, u32 buf_len)
613 {
614 struct net_device *ndev = emac->ndev;
615 struct cppi5_host_desc_t *desc_rx;
616 struct prueth_swdata *swdata;
617 dma_addr_t desc_dma;
618 dma_addr_t buf_dma;
619
620 buf_dma = page_pool_get_dma_addr(page) + PRUETH_HEADROOM;
621 desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
622 if (!desc_rx) {
623 netdev_err(ndev, "rx push: failed to allocate descriptor\n");
624 return -ENOMEM;
625 }
626 desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
627
628 cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
629 PRUETH_NAV_PS_DATA_SIZE);
630 k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
631 cppi5_hdesc_attach_buf(desc_rx, buf_dma, buf_len, buf_dma, buf_len);
632
633 swdata = cppi5_hdesc_get_swdata(desc_rx);
634 swdata->type = PRUETH_SWDATA_PAGE;
635 swdata->data.page = page;
636
637 return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, PRUETH_RX_FLOW_DATA,
638 desc_rx, desc_dma);
639 }
640 EXPORT_SYMBOL_GPL(prueth_dma_rx_push_mapped);
641
icssg_ts_to_ns(u32 hi_sw,u32 hi,u32 lo,u32 cycle_time_ns)642 u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns)
643 {
644 u32 iepcount_lo, iepcount_hi, hi_rollover_count;
645 u64 ns;
646
647 iepcount_lo = lo & GENMASK(19, 0);
648 iepcount_hi = (hi & GENMASK(11, 0)) << 12 | lo >> 20;
649 hi_rollover_count = hi >> 11;
650
651 ns = ((u64)hi_rollover_count) << 23 | (iepcount_hi + hi_sw);
652 ns = ns * cycle_time_ns + iepcount_lo;
653
654 return ns;
655 }
656 EXPORT_SYMBOL_GPL(icssg_ts_to_ns);
657
emac_rx_timestamp(struct prueth_emac * emac,struct sk_buff * skb,u32 * psdata)658 void emac_rx_timestamp(struct prueth_emac *emac,
659 struct sk_buff *skb, u32 *psdata)
660 {
661 struct skb_shared_hwtstamps *ssh;
662 u64 ns;
663
664 if (emac->is_sr1) {
665 ns = (u64)psdata[1] << 32 | psdata[0];
666 } else {
667 u32 hi_sw = readl(emac->prueth->shram.va +
668 TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET);
669 ns = icssg_ts_to_ns(hi_sw, psdata[1], psdata[0],
670 IEP_DEFAULT_CYCLE_TIME_NS);
671 }
672
673 ssh = skb_hwtstamps(skb);
674 memset(ssh, 0, sizeof(*ssh));
675 ssh->hwtstamp = ns_to_ktime(ns);
676 }
677
678 /**
679 * emac_xmit_xdp_frame - transmits an XDP frame
680 * @emac: emac device
681 * @xdpf: data to transmit
682 * @q_idx: queue id
683 * @buff_type: Type of buffer to be transmitted
684 *
685 * Return: XDP state
686 */
emac_xmit_xdp_frame(struct prueth_emac * emac,struct xdp_frame * xdpf,unsigned int q_idx,enum prueth_tx_buff_type buff_type)687 u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
688 struct xdp_frame *xdpf,
689 unsigned int q_idx,
690 enum prueth_tx_buff_type buff_type)
691 {
692 struct cppi5_host_desc_t *first_desc;
693 struct net_device *ndev = emac->ndev;
694 struct netdev_queue *netif_txq;
695 struct prueth_tx_chn *tx_chn;
696 dma_addr_t desc_dma, buf_dma;
697 struct prueth_swdata *swdata;
698 struct page *page;
699 u32 *epib;
700 int ret;
701
702 if (q_idx >= PRUETH_MAX_TX_QUEUES) {
703 netdev_err(ndev, "xdp tx: invalid q_id %d\n", q_idx);
704 return ICSSG_XDP_CONSUMED; /* drop */
705 }
706
707 tx_chn = &emac->tx_chns[q_idx];
708
709 first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
710 if (!first_desc) {
711 netdev_dbg(ndev, "xdp tx: failed to allocate descriptor\n");
712 return ICSSG_XDP_CONSUMED; /* drop */
713 }
714
715 if (buff_type == PRUETH_TX_BUFF_TYPE_XDP_TX) { /* already DMA mapped by page_pool */
716 page = virt_to_head_page(xdpf->data);
717 if (unlikely(!page)) {
718 netdev_err(ndev, "xdp tx: failed to get page from xdpf\n");
719 goto drop_free_descs;
720 }
721 buf_dma = page_pool_get_dma_addr(page);
722 buf_dma += xdpf->headroom + sizeof(struct xdp_frame);
723 } else { /* Map the linear buffer */
724 buf_dma = dma_map_single(tx_chn->dma_dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
725 if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
726 netdev_err(ndev, "xdp tx: failed to map data buffer\n");
727 goto drop_free_descs; /* drop */
728 }
729 }
730
731 cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
732 PRUETH_NAV_PS_DATA_SIZE);
733 cppi5_hdesc_set_pkttype(first_desc, 0);
734 epib = first_desc->epib;
735 epib[0] = 0;
736 epib[1] = 0;
737
738 /* set dst tag to indicate internal qid at the firmware which is at
739 * bit8..bit15. bit0..bit7 indicates port num for directed
740 * packets in case of switch mode operation
741 */
742 cppi5_desc_set_tags_ids(&first_desc->hdr, 0, (emac->port_id | (q_idx << 8)));
743 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
744 cppi5_hdesc_attach_buf(first_desc, buf_dma, xdpf->len, buf_dma, xdpf->len);
745 swdata = cppi5_hdesc_get_swdata(first_desc);
746 swdata->type = PRUETH_SWDATA_XDPF;
747 swdata->data.xdpf = xdpf;
748
749 /* Report BQL before sending the packet */
750 netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
751 netdev_tx_sent_queue(netif_txq, xdpf->len);
752
753 cppi5_hdesc_set_pktlen(first_desc, xdpf->len);
754 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
755
756 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
757 if (ret) {
758 netdev_err(ndev, "xdp tx: push failed: %d\n", ret);
759 netdev_tx_completed_queue(netif_txq, 1, xdpf->len);
760 goto drop_free_descs;
761 }
762
763 return ICSSG_XDP_TX;
764
765 drop_free_descs:
766 prueth_xmit_free(tx_chn, first_desc);
767 return ICSSG_XDP_CONSUMED;
768 }
769 EXPORT_SYMBOL_GPL(emac_xmit_xdp_frame);
770
771 /**
772 * emac_run_xdp - run an XDP program
773 * @emac: emac device
774 * @xdp: XDP buffer containing the frame
775 * @len: Rx descriptor packet length
776 *
777 * Return: XDP state
778 */
emac_run_xdp(struct prueth_emac * emac,struct xdp_buff * xdp,u32 * len)779 static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp, u32 *len)
780 {
781 struct net_device *ndev = emac->ndev;
782 struct netdev_queue *netif_txq;
783 int cpu = smp_processor_id();
784 struct bpf_prog *xdp_prog;
785 struct xdp_frame *xdpf;
786 u32 pkt_len = *len;
787 u32 act, result;
788 int q_idx, err;
789
790 xdp_prog = READ_ONCE(emac->xdp_prog);
791 act = bpf_prog_run_xdp(xdp_prog, xdp);
792 switch (act) {
793 case XDP_PASS:
794 return ICSSG_XDP_PASS;
795 case XDP_TX:
796 /* Send packet to TX ring for immediate transmission */
797 xdpf = xdp_convert_buff_to_frame(xdp);
798 if (unlikely(!xdpf)) {
799 ndev->stats.tx_dropped++;
800 goto drop;
801 }
802
803 q_idx = cpu % emac->tx_ch_num;
804 netif_txq = netdev_get_tx_queue(ndev, q_idx);
805 __netif_tx_lock(netif_txq, cpu);
806 result = emac_xmit_xdp_frame(emac, xdpf, q_idx,
807 PRUETH_TX_BUFF_TYPE_XDP_TX);
808 __netif_tx_unlock(netif_txq);
809 if (result == ICSSG_XDP_CONSUMED) {
810 ndev->stats.tx_dropped++;
811 goto drop;
812 }
813
814 dev_sw_netstats_rx_add(ndev, xdpf->len);
815 return result;
816 case XDP_REDIRECT:
817 err = xdp_do_redirect(emac->ndev, xdp, xdp_prog);
818 if (err)
819 goto drop;
820
821 dev_sw_netstats_rx_add(ndev, pkt_len);
822 return ICSSG_XDP_REDIR;
823 default:
824 bpf_warn_invalid_xdp_action(emac->ndev, xdp_prog, act);
825 fallthrough;
826 case XDP_ABORTED:
827 drop:
828 trace_xdp_exception(emac->ndev, xdp_prog, act);
829 fallthrough; /* handle aborts by dropping packet */
830 case XDP_DROP:
831 ndev->stats.rx_dropped++;
832 return ICSSG_XDP_CONSUMED;
833 }
834 }
835
prueth_dma_rx_push_mapped_zc(struct prueth_emac * emac,struct prueth_rx_chn * rx_chn,struct xdp_buff * xdp)836 static int prueth_dma_rx_push_mapped_zc(struct prueth_emac *emac,
837 struct prueth_rx_chn *rx_chn,
838 struct xdp_buff *xdp)
839 {
840 struct net_device *ndev = emac->ndev;
841 struct cppi5_host_desc_t *desc_rx;
842 struct prueth_swdata *swdata;
843 dma_addr_t desc_dma;
844 dma_addr_t buf_dma;
845 int buf_len;
846
847 buf_dma = xsk_buff_xdp_get_dma(xdp);
848 desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
849 if (!desc_rx) {
850 netdev_err(ndev, "rx push: failed to allocate descriptor\n");
851 return -ENOMEM;
852 }
853 desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
854
855 cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
856 PRUETH_NAV_PS_DATA_SIZE);
857 k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
858 buf_len = xsk_pool_get_rx_frame_size(rx_chn->xsk_pool);
859 cppi5_hdesc_attach_buf(desc_rx, buf_dma, buf_len, buf_dma, buf_len);
860 swdata = cppi5_hdesc_get_swdata(desc_rx);
861 swdata->type = PRUETH_SWDATA_XSK;
862 swdata->data.xdp = xdp;
863
864 return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, PRUETH_RX_FLOW_DATA,
865 desc_rx, desc_dma);
866 }
867
prueth_rx_alloc_zc(struct prueth_emac * emac,int budget)868 static int prueth_rx_alloc_zc(struct prueth_emac *emac, int budget)
869 {
870 struct prueth_rx_chn *rx_chn = &emac->rx_chns;
871 struct xdp_buff *xdp;
872 int i, ret;
873
874 for (i = 0; i < budget; i++) {
875 xdp = xsk_buff_alloc(rx_chn->xsk_pool);
876 if (!xdp)
877 break;
878
879 ret = prueth_dma_rx_push_mapped_zc(emac, rx_chn, xdp);
880 if (ret) {
881 netdev_err(emac->ndev, "rx alloc: failed to map descriptors to xdp buff\n");
882 xsk_buff_free(xdp);
883 break;
884 }
885 }
886
887 return i;
888 }
889
emac_dispatch_skb_zc(struct prueth_emac * emac,struct xdp_buff * xdp,u32 * psdata)890 static void emac_dispatch_skb_zc(struct prueth_emac *emac, struct xdp_buff *xdp, u32 *psdata)
891 {
892 unsigned int headroom = xdp->data - xdp->data_hard_start;
893 unsigned int pkt_len = xdp->data_end - xdp->data;
894 struct net_device *ndev = emac->ndev;
895 struct sk_buff *skb;
896
897 skb = napi_alloc_skb(&emac->napi_rx, xdp->data_end - xdp->data_hard_start);
898 if (unlikely(!skb)) {
899 ndev->stats.rx_dropped++;
900 return;
901 }
902
903 skb_reserve(skb, headroom);
904 skb_put(skb, pkt_len);
905 skb->dev = ndev;
906
907 /* RX HW timestamp */
908 if (emac->rx_ts_enabled)
909 emac_rx_timestamp(emac, skb, psdata);
910
911 if (emac->prueth->is_switch_mode)
912 skb->offload_fwd_mark = emac->offload_fwd_mark;
913 skb->protocol = eth_type_trans(skb, ndev);
914
915 skb_mark_for_recycle(skb);
916 napi_gro_receive(&emac->napi_rx, skb);
917 ndev->stats.rx_bytes += pkt_len;
918 ndev->stats.rx_packets++;
919 }
920
emac_rx_packet_zc(struct prueth_emac * emac,u32 flow_id,int budget)921 static int emac_rx_packet_zc(struct prueth_emac *emac, u32 flow_id,
922 int budget)
923 {
924 struct prueth_rx_chn *rx_chn = &emac->rx_chns;
925 u32 buf_dma_len, pkt_len, port_id = 0;
926 struct net_device *ndev = emac->ndev;
927 struct cppi5_host_desc_t *desc_rx;
928 struct prueth_swdata *swdata;
929 dma_addr_t desc_dma, buf_dma;
930 struct xdp_buff *xdp;
931 int xdp_status = 0;
932 int count = 0;
933 u32 *psdata;
934 int ret;
935
936 while (count < budget) {
937 ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
938 if (ret) {
939 if (ret != -ENODATA)
940 netdev_err(ndev, "rx pop: failed: %d\n", ret);
941 break;
942 }
943
944 if (cppi5_desc_is_tdcm(desc_dma)) {
945 complete(&emac->tdown_complete);
946 break;
947 }
948
949 desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
950 swdata = cppi5_hdesc_get_swdata(desc_rx);
951 if (swdata->type != PRUETH_SWDATA_XSK) {
952 netdev_err(ndev, "rx_pkt: invalid swdata->type %d\n", swdata->type);
953 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
954 break;
955 }
956
957 xdp = swdata->data.xdp;
958 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
959 k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
960 pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
961 /* firmware adds 4 CRC bytes, strip them */
962 pkt_len -= 4;
963 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
964 psdata = cppi5_hdesc_get_psdata(desc_rx);
965 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
966 count++;
967 xsk_buff_set_size(xdp, pkt_len);
968 xsk_buff_dma_sync_for_cpu(xdp);
969
970 if (prueth_xdp_is_enabled(emac)) {
971 ret = emac_run_xdp(emac, xdp, &pkt_len);
972 switch (ret) {
973 case ICSSG_XDP_PASS:
974 /* prepare skb and send to n/w stack */
975 emac_dispatch_skb_zc(emac, xdp, psdata);
976 xsk_buff_free(xdp);
977 break;
978 case ICSSG_XDP_CONSUMED:
979 xsk_buff_free(xdp);
980 break;
981 case ICSSG_XDP_TX:
982 case ICSSG_XDP_REDIR:
983 xdp_status |= ret;
984 break;
985 }
986 } else {
987 /* prepare skb and send to n/w stack */
988 emac_dispatch_skb_zc(emac, xdp, psdata);
989 xsk_buff_free(xdp);
990 }
991 }
992
993 if (xdp_status & ICSSG_XDP_REDIR)
994 xdp_do_flush();
995
996 /* Allocate xsk buffers from the pool for the "count" number of
997 * packets processed in order to be able to receive more packets.
998 */
999 ret = prueth_rx_alloc_zc(emac, count);
1000
1001 if (xsk_uses_need_wakeup(rx_chn->xsk_pool)) {
1002 /* If the user space doesn't provide enough buffers then it must
1003 * explicitly wake up the kernel when new buffers are available
1004 */
1005 if (ret < count)
1006 xsk_set_rx_need_wakeup(rx_chn->xsk_pool);
1007 else
1008 xsk_clear_rx_need_wakeup(rx_chn->xsk_pool);
1009 }
1010
1011 return count;
1012 }
1013
emac_rx_packet(struct prueth_emac * emac,u32 flow_id,u32 * xdp_state)1014 static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state)
1015 {
1016 struct prueth_rx_chn *rx_chn = &emac->rx_chns;
1017 u32 buf_dma_len, pkt_len, port_id = 0;
1018 struct net_device *ndev = emac->ndev;
1019 struct cppi5_host_desc_t *desc_rx;
1020 struct prueth_swdata *swdata;
1021 dma_addr_t desc_dma, buf_dma;
1022 struct page *page, *new_page;
1023 struct page_pool *pool;
1024 struct sk_buff *skb;
1025 struct xdp_buff xdp;
1026 int headroom, ret;
1027 u32 *psdata;
1028 void *pa;
1029
1030 *xdp_state = 0;
1031 pool = rx_chn->pg_pool;
1032 ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
1033 if (ret) {
1034 if (ret != -ENODATA)
1035 netdev_err(ndev, "rx pop: failed: %d\n", ret);
1036 return ret;
1037 }
1038
1039 if (cppi5_desc_is_tdcm(desc_dma)) {
1040 complete(&emac->tdown_complete);
1041 return 0;
1042 }
1043
1044 desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
1045 swdata = cppi5_hdesc_get_swdata(desc_rx);
1046 if (swdata->type != PRUETH_SWDATA_PAGE) {
1047 netdev_err(ndev, "rx_pkt: invalid swdata->type %d\n", swdata->type);
1048 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
1049 return 0;
1050 }
1051
1052 page = swdata->data.page;
1053 page_pool_dma_sync_for_cpu(pool, page, 0, PAGE_SIZE);
1054 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
1055 k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
1056 pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
1057 /* firmware adds 4 CRC bytes, strip them */
1058 pkt_len -= 4;
1059 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
1060 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
1061
1062 /* if allocation fails we drop the packet but push the
1063 * descriptor back to the ring with old page to prevent a stall
1064 */
1065 new_page = page_pool_dev_alloc_pages(pool);
1066 if (unlikely(!new_page)) {
1067 new_page = page;
1068 ndev->stats.rx_dropped++;
1069 goto requeue;
1070 }
1071
1072 pa = page_address(page);
1073 if (prueth_xdp_is_enabled(emac)) {
1074 xdp_init_buff(&xdp, PAGE_SIZE, &rx_chn->xdp_rxq);
1075 xdp_prepare_buff(&xdp, pa, PRUETH_HEADROOM, pkt_len, false);
1076
1077 *xdp_state = emac_run_xdp(emac, &xdp, &pkt_len);
1078 if (*xdp_state == ICSSG_XDP_CONSUMED) {
1079 page_pool_recycle_direct(pool, page);
1080 goto requeue;
1081 }
1082
1083 if (*xdp_state != ICSSG_XDP_PASS)
1084 goto requeue;
1085 headroom = xdp.data - xdp.data_hard_start;
1086 pkt_len = xdp.data_end - xdp.data;
1087 } else {
1088 headroom = PRUETH_HEADROOM;
1089 }
1090
1091 /* prepare skb and send to n/w stack */
1092 skb = napi_build_skb(pa, PAGE_SIZE);
1093 if (!skb) {
1094 ndev->stats.rx_dropped++;
1095 page_pool_recycle_direct(pool, page);
1096 goto requeue;
1097 }
1098
1099 skb_reserve(skb, headroom);
1100 skb_put(skb, pkt_len);
1101 skb->dev = ndev;
1102
1103 psdata = cppi5_hdesc_get_psdata(desc_rx);
1104 /* RX HW timestamp */
1105 if (emac->rx_ts_enabled)
1106 emac_rx_timestamp(emac, skb, psdata);
1107
1108 if (emac->prueth->is_switch_mode)
1109 skb->offload_fwd_mark = emac->offload_fwd_mark;
1110 skb->protocol = eth_type_trans(skb, ndev);
1111
1112 skb_mark_for_recycle(skb);
1113 napi_gro_receive(&emac->napi_rx, skb);
1114 ndev->stats.rx_bytes += pkt_len;
1115 ndev->stats.rx_packets++;
1116
1117 requeue:
1118 /* queue another RX DMA */
1119 ret = prueth_dma_rx_push_mapped(emac, &emac->rx_chns, new_page,
1120 PRUETH_MAX_PKT_SIZE);
1121 if (WARN_ON(ret < 0)) {
1122 page_pool_recycle_direct(pool, new_page);
1123 ndev->stats.rx_errors++;
1124 ndev->stats.rx_dropped++;
1125 }
1126
1127 return ret;
1128 }
1129
prueth_rx_cleanup(void * data,dma_addr_t desc_dma)1130 void prueth_rx_cleanup(void *data, dma_addr_t desc_dma)
1131 {
1132 struct prueth_rx_chn *rx_chn = data;
1133 struct cppi5_host_desc_t *desc_rx;
1134 struct prueth_swdata *swdata;
1135 struct page_pool *pool;
1136 struct xdp_buff *xdp;
1137 struct page *page;
1138
1139 pool = rx_chn->pg_pool;
1140 desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
1141 swdata = cppi5_hdesc_get_swdata(desc_rx);
1142 if (rx_chn->xsk_pool) {
1143 xdp = swdata->data.xdp;
1144 xsk_buff_free(xdp);
1145 } else {
1146 page = swdata->data.page;
1147 page_pool_recycle_direct(pool, page);
1148 }
1149
1150 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
1151 }
1152 EXPORT_SYMBOL_GPL(prueth_rx_cleanup);
1153
prueth_tx_ts_cookie_get(struct prueth_emac * emac)1154 static int prueth_tx_ts_cookie_get(struct prueth_emac *emac)
1155 {
1156 int i;
1157
1158 /* search and get the next free slot */
1159 for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) {
1160 if (!emac->tx_ts_skb[i]) {
1161 emac->tx_ts_skb[i] = ERR_PTR(-EBUSY); /* reserve slot */
1162 return i;
1163 }
1164 }
1165
1166 return -EBUSY;
1167 }
1168
1169 /**
1170 * icssg_ndo_start_xmit - EMAC Transmit function
1171 * @skb: SKB pointer
1172 * @ndev: EMAC network adapter
1173 *
1174 * Called by the system to transmit a packet - we queue the packet in
1175 * EMAC hardware transmit queue
1176 * Doesn't wait for completion we'll check for TX completion in
1177 * emac_tx_complete_packets().
1178 *
1179 * Return: enum netdev_tx
1180 */
icssg_ndo_start_xmit(struct sk_buff * skb,struct net_device * ndev)1181 enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1182 {
1183 struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
1184 struct prueth_emac *emac = netdev_priv(ndev);
1185 struct prueth *prueth = emac->prueth;
1186 struct netdev_queue *netif_txq;
1187 struct prueth_swdata *swdata;
1188 struct prueth_tx_chn *tx_chn;
1189 dma_addr_t desc_dma, buf_dma;
1190 u32 pkt_len, dst_tag_id;
1191 int i, ret = 0, q_idx;
1192 bool in_tx_ts = 0;
1193 int tx_ts_cookie;
1194 u32 *epib;
1195
1196 pkt_len = skb_headlen(skb);
1197 q_idx = skb_get_queue_mapping(skb);
1198
1199 tx_chn = &emac->tx_chns[q_idx];
1200 netif_txq = netdev_get_tx_queue(ndev, q_idx);
1201
1202 /* Map the linear buffer */
1203 buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len, DMA_TO_DEVICE);
1204 if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
1205 netdev_err(ndev, "tx: failed to map skb buffer\n");
1206 ret = NETDEV_TX_OK;
1207 goto drop_free_skb;
1208 }
1209
1210 first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
1211 if (!first_desc) {
1212 netdev_dbg(ndev, "tx: failed to allocate descriptor\n");
1213 dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, DMA_TO_DEVICE);
1214 goto drop_stop_q_busy;
1215 }
1216
1217 cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
1218 PRUETH_NAV_PS_DATA_SIZE);
1219 cppi5_hdesc_set_pkttype(first_desc, 0);
1220 epib = first_desc->epib;
1221 epib[0] = 0;
1222 epib[1] = 0;
1223 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
1224 emac->tx_ts_enabled) {
1225 tx_ts_cookie = prueth_tx_ts_cookie_get(emac);
1226 if (tx_ts_cookie >= 0) {
1227 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1228 /* Request TX timestamp */
1229 epib[0] = (u32)tx_ts_cookie;
1230 epib[1] = 0x80000000; /* TX TS request */
1231 emac->tx_ts_skb[tx_ts_cookie] = skb_get(skb);
1232 in_tx_ts = 1;
1233 }
1234 }
1235
1236 /* set dst tag to indicate internal qid at the firmware which is at
1237 * bit8..bit15. bit0..bit7 indicates port num for directed
1238 * packets in case of switch mode operation and port num 0
1239 * for undirected packets in case of HSR offload mode
1240 */
1241 dst_tag_id = emac->port_id | (q_idx << 8);
1242
1243 if (prueth->is_hsr_offload_mode &&
1244 (ndev->features & NETIF_F_HW_HSR_DUP))
1245 dst_tag_id = PRUETH_UNDIRECTED_PKT_DST_TAG;
1246
1247 if (prueth->is_hsr_offload_mode &&
1248 (ndev->features & NETIF_F_HW_HSR_TAG_INS))
1249 epib[1] |= PRUETH_UNDIRECTED_PKT_TAG_INS;
1250
1251 cppi5_desc_set_tags_ids(&first_desc->hdr, 0, dst_tag_id);
1252 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
1253 cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
1254 swdata = cppi5_hdesc_get_swdata(first_desc);
1255 swdata->type = PRUETH_SWDATA_SKB;
1256 swdata->data.skb = skb;
1257
1258 /* Handle the case where skb is fragmented in pages */
1259 cur_desc = first_desc;
1260 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1261 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1262 u32 frag_size = skb_frag_size(frag);
1263
1264 next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
1265 if (!next_desc) {
1266 netdev_err(ndev,
1267 "tx: failed to allocate frag. descriptor\n");
1268 goto free_desc_stop_q_busy_cleanup_tx_ts;
1269 }
1270
1271 buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size,
1272 DMA_TO_DEVICE);
1273 if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
1274 netdev_err(ndev, "tx: Failed to map skb page\n");
1275 k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
1276 ret = NETDEV_TX_OK;
1277 goto cleanup_tx_ts;
1278 }
1279
1280 cppi5_hdesc_reset_hbdesc(next_desc);
1281 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
1282 cppi5_hdesc_attach_buf(next_desc,
1283 buf_dma, frag_size, buf_dma, frag_size);
1284
1285 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
1286 next_desc);
1287 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma);
1288 cppi5_hdesc_link_hbdesc(cur_desc, desc_dma);
1289
1290 pkt_len += frag_size;
1291 cur_desc = next_desc;
1292 }
1293 WARN_ON_ONCE(pkt_len != skb->len);
1294
1295 /* report bql before sending packet */
1296 netdev_tx_sent_queue(netif_txq, pkt_len);
1297
1298 cppi5_hdesc_set_pktlen(first_desc, pkt_len);
1299 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
1300 /* cppi5_desc_dump(first_desc, 64); */
1301
1302 skb_tx_timestamp(skb); /* SW timestamp if SKBTX_IN_PROGRESS not set */
1303 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
1304 if (ret) {
1305 netdev_err(ndev, "tx: push failed: %d\n", ret);
1306 netdev_tx_completed_queue(netif_txq, 1, pkt_len);
1307 goto drop_free_descs;
1308 }
1309
1310 if (in_tx_ts)
1311 atomic_inc(&emac->tx_ts_pending);
1312
1313 if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) {
1314 netif_tx_stop_queue(netif_txq);
1315 /* Barrier, so that stop_queue visible to other cpus */
1316 smp_mb__after_atomic();
1317
1318 if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
1319 MAX_SKB_FRAGS)
1320 netif_tx_wake_queue(netif_txq);
1321 }
1322
1323 return NETDEV_TX_OK;
1324
1325 cleanup_tx_ts:
1326 if (in_tx_ts) {
1327 dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]);
1328 emac->tx_ts_skb[tx_ts_cookie] = NULL;
1329 }
1330
1331 drop_free_descs:
1332 prueth_xmit_free(tx_chn, first_desc);
1333
1334 drop_free_skb:
1335 dev_kfree_skb_any(skb);
1336
1337 /* error */
1338 ndev->stats.tx_dropped++;
1339 netdev_err(ndev, "tx: error: %d\n", ret);
1340
1341 return ret;
1342
1343 free_desc_stop_q_busy_cleanup_tx_ts:
1344 if (in_tx_ts) {
1345 dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]);
1346 emac->tx_ts_skb[tx_ts_cookie] = NULL;
1347 }
1348 prueth_xmit_free(tx_chn, first_desc);
1349
1350 drop_stop_q_busy:
1351 netif_tx_stop_queue(netif_txq);
1352 return NETDEV_TX_BUSY;
1353 }
1354 EXPORT_SYMBOL_GPL(icssg_ndo_start_xmit);
1355
prueth_tx_cleanup(void * data,dma_addr_t desc_dma)1356 void prueth_tx_cleanup(void *data, dma_addr_t desc_dma)
1357 {
1358 struct prueth_tx_chn *tx_chn = data;
1359 struct cppi5_host_desc_t *desc_tx;
1360 struct xsk_buff_pool *xsk_pool;
1361 struct prueth_swdata *swdata;
1362 struct xdp_frame *xdpf;
1363 struct sk_buff *skb;
1364
1365 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
1366 swdata = cppi5_hdesc_get_swdata(desc_tx);
1367
1368 switch (swdata->type) {
1369 case PRUETH_SWDATA_SKB:
1370 skb = swdata->data.skb;
1371 dev_kfree_skb_any(skb);
1372 break;
1373 case PRUETH_SWDATA_XDPF:
1374 xdpf = swdata->data.xdpf;
1375 xdp_return_frame(xdpf);
1376 break;
1377 case PRUETH_SWDATA_XSK:
1378 xsk_pool = tx_chn->xsk_pool;
1379 xsk_tx_completed(xsk_pool, 1);
1380 break;
1381 default:
1382 break;
1383 }
1384
1385 prueth_xmit_free(tx_chn, desc_tx);
1386 }
1387 EXPORT_SYMBOL_GPL(prueth_tx_cleanup);
1388
prueth_rx_irq(int irq,void * dev_id)1389 irqreturn_t prueth_rx_irq(int irq, void *dev_id)
1390 {
1391 struct prueth_emac *emac = dev_id;
1392
1393 emac->rx_chns.irq_disabled = true;
1394 disable_irq_nosync(irq);
1395 napi_schedule(&emac->napi_rx);
1396
1397 return IRQ_HANDLED;
1398 }
1399 EXPORT_SYMBOL_GPL(prueth_rx_irq);
1400
prueth_cleanup_tx_ts(struct prueth_emac * emac)1401 void prueth_cleanup_tx_ts(struct prueth_emac *emac)
1402 {
1403 int i;
1404
1405 for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) {
1406 if (emac->tx_ts_skb[i]) {
1407 dev_kfree_skb_any(emac->tx_ts_skb[i]);
1408 emac->tx_ts_skb[i] = NULL;
1409 }
1410 }
1411 }
1412 EXPORT_SYMBOL_GPL(prueth_cleanup_tx_ts);
1413
icssg_napi_rx_poll(struct napi_struct * napi_rx,int budget)1414 int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
1415 {
1416 struct prueth_emac *emac = prueth_napi_to_emac(napi_rx);
1417 int rx_flow = emac->is_sr1 ?
1418 PRUETH_RX_FLOW_DATA_SR1 : PRUETH_RX_FLOW_DATA;
1419 int flow = emac->is_sr1 ?
1420 PRUETH_MAX_RX_FLOWS_SR1 : PRUETH_MAX_RX_FLOWS;
1421 struct prueth_rx_chn *rx_chn = &emac->rx_chns;
1422 int xdp_state_or = 0;
1423 int num_rx = 0;
1424 int cur_budget;
1425 u32 xdp_state;
1426 int ret;
1427
1428 while (flow--) {
1429 if (rx_chn->xsk_pool) {
1430 num_rx = emac_rx_packet_zc(emac, flow, budget);
1431 } else {
1432 cur_budget = budget - num_rx;
1433
1434 while (cur_budget--) {
1435 ret = emac_rx_packet(emac, flow, &xdp_state);
1436 xdp_state_or |= xdp_state;
1437 if (ret)
1438 break;
1439 num_rx++;
1440 }
1441 }
1442
1443 if (num_rx >= budget)
1444 break;
1445 }
1446
1447 if (xdp_state_or & ICSSG_XDP_REDIR)
1448 xdp_do_flush();
1449
1450 if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
1451 if (unlikely(emac->rx_pace_timeout_ns)) {
1452 hrtimer_start(&emac->rx_hrtimer,
1453 ns_to_ktime(emac->rx_pace_timeout_ns),
1454 HRTIMER_MODE_REL_PINNED);
1455 } else {
1456 if (emac->rx_chns.irq_disabled) {
1457 /* re-enable the RX IRQ */
1458 emac->rx_chns.irq_disabled = false;
1459 enable_irq(emac->rx_chns.irq[rx_flow]);
1460 }
1461 }
1462 }
1463
1464 return num_rx;
1465 }
1466 EXPORT_SYMBOL_GPL(icssg_napi_rx_poll);
1467
prueth_prepare_rx_chan(struct prueth_emac * emac,struct prueth_rx_chn * chn,int buf_size)1468 int prueth_prepare_rx_chan(struct prueth_emac *emac,
1469 struct prueth_rx_chn *chn,
1470 int buf_size)
1471 {
1472 struct page *page;
1473 int desc_avail;
1474 int i, ret;
1475
1476 desc_avail = k3_cppi_desc_pool_avail(chn->desc_pool);
1477 if (desc_avail < chn->descs_num)
1478 netdev_warn(emac->ndev,
1479 "not enough RX descriptors available %d < %d\n",
1480 desc_avail, chn->descs_num);
1481
1482 if (chn->xsk_pool) {
1483 /* get pages from xsk_pool and push to RX ring
1484 * queue as much as possible
1485 */
1486 ret = prueth_rx_alloc_zc(emac, desc_avail);
1487 if (!ret)
1488 goto recycle_alloc_pg;
1489 } else {
1490 for (i = 0; i < desc_avail; i++) {
1491 /* NOTE: we're not using memory efficiently here.
1492 * 1 full page (4KB?) used here instead of
1493 * PRUETH_MAX_PKT_SIZE (~1.5KB?)
1494 */
1495 page = page_pool_dev_alloc_pages(chn->pg_pool);
1496 if (!page) {
1497 netdev_err(emac->ndev, "couldn't allocate rx page\n");
1498 ret = -ENOMEM;
1499 goto recycle_alloc_pg;
1500 }
1501
1502 ret = prueth_dma_rx_push_mapped(emac, chn, page, buf_size);
1503 if (ret < 0) {
1504 netdev_err(emac->ndev,
1505 "cannot submit page for rx chan %s ret %d\n",
1506 chn->name, ret);
1507 page_pool_recycle_direct(chn->pg_pool, page);
1508 goto recycle_alloc_pg;
1509 }
1510 }
1511 }
1512
1513 return 0;
1514
1515 recycle_alloc_pg:
1516 prueth_reset_rx_chan(&emac->rx_chns, PRUETH_MAX_RX_FLOWS, false);
1517
1518 return ret;
1519 }
1520 EXPORT_SYMBOL_GPL(prueth_prepare_rx_chan);
1521
prueth_reset_tx_chan(struct prueth_emac * emac,int ch_num,bool free_skb)1522 void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num,
1523 bool free_skb)
1524 {
1525 int i;
1526
1527 for (i = 0; i < ch_num; i++) {
1528 if (free_skb)
1529 k3_udma_glue_reset_tx_chn(emac->tx_chns[i].tx_chn,
1530 &emac->tx_chns[i],
1531 prueth_tx_cleanup);
1532 k3_udma_glue_disable_tx_chn(emac->tx_chns[i].tx_chn);
1533 }
1534 }
1535 EXPORT_SYMBOL_GPL(prueth_reset_tx_chan);
1536
prueth_reset_rx_chan(struct prueth_rx_chn * chn,int num_flows,bool disable)1537 void prueth_reset_rx_chan(struct prueth_rx_chn *chn,
1538 int num_flows, bool disable)
1539 {
1540 int i;
1541
1542 for (i = 0; i < num_flows; i++)
1543 k3_udma_glue_reset_rx_chn(chn->rx_chn, i, chn,
1544 prueth_rx_cleanup);
1545 if (disable)
1546 k3_udma_glue_disable_rx_chn(chn->rx_chn);
1547 }
1548 EXPORT_SYMBOL_GPL(prueth_reset_rx_chan);
1549
icssg_ndo_tx_timeout(struct net_device * ndev,unsigned int txqueue)1550 void icssg_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1551 {
1552 ndev->stats.tx_errors++;
1553 }
1554 EXPORT_SYMBOL_GPL(icssg_ndo_tx_timeout);
1555
icssg_ndo_set_ts_config(struct net_device * ndev,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)1556 int icssg_ndo_set_ts_config(struct net_device *ndev,
1557 struct kernel_hwtstamp_config *config,
1558 struct netlink_ext_ack *extack)
1559 {
1560 struct prueth_emac *emac = netdev_priv(ndev);
1561
1562 switch (config->tx_type) {
1563 case HWTSTAMP_TX_OFF:
1564 emac->tx_ts_enabled = 0;
1565 break;
1566 case HWTSTAMP_TX_ON:
1567 emac->tx_ts_enabled = 1;
1568 break;
1569 default:
1570 return -ERANGE;
1571 }
1572
1573 switch (config->rx_filter) {
1574 case HWTSTAMP_FILTER_NONE:
1575 emac->rx_ts_enabled = 0;
1576 break;
1577 case HWTSTAMP_FILTER_ALL:
1578 case HWTSTAMP_FILTER_SOME:
1579 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1580 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1581 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1582 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1583 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1584 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1585 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1586 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1587 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1588 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1589 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1590 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1591 case HWTSTAMP_FILTER_NTP_ALL:
1592 emac->rx_ts_enabled = 1;
1593 config->rx_filter = HWTSTAMP_FILTER_ALL;
1594 break;
1595 default:
1596 return -ERANGE;
1597 }
1598
1599 return 0;
1600 }
1601 EXPORT_SYMBOL_GPL(icssg_ndo_set_ts_config);
1602
icssg_ndo_get_ts_config(struct net_device * ndev,struct kernel_hwtstamp_config * config)1603 int icssg_ndo_get_ts_config(struct net_device *ndev,
1604 struct kernel_hwtstamp_config *config)
1605 {
1606 struct prueth_emac *emac = netdev_priv(ndev);
1607
1608 config->flags = 0;
1609 config->tx_type = emac->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
1610 config->rx_filter = emac->rx_ts_enabled ? HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
1611
1612 return 0;
1613 }
1614 EXPORT_SYMBOL_GPL(icssg_ndo_get_ts_config);
1615
icssg_ndo_get_stats64(struct net_device * ndev,struct rtnl_link_stats64 * stats)1616 void icssg_ndo_get_stats64(struct net_device *ndev,
1617 struct rtnl_link_stats64 *stats)
1618 {
1619 struct prueth_emac *emac = netdev_priv(ndev);
1620
1621 emac_update_hardware_stats(emac);
1622
1623 stats->rx_packets = emac_get_stat_by_name(emac, "rx_packets");
1624 stats->rx_bytes = emac_get_stat_by_name(emac, "rx_bytes");
1625 stats->tx_packets = emac_get_stat_by_name(emac, "tx_packets");
1626 stats->tx_bytes = emac_get_stat_by_name(emac, "tx_bytes");
1627 stats->rx_crc_errors = emac_get_stat_by_name(emac, "rx_crc_errors");
1628 stats->rx_over_errors = emac_get_stat_by_name(emac, "rx_over_errors");
1629 stats->multicast = emac_get_stat_by_name(emac, "rx_multicast_frames");
1630
1631 stats->rx_errors = ndev->stats.rx_errors +
1632 emac_get_stat_by_name(emac, "FW_RX_ERROR") +
1633 emac_get_stat_by_name(emac, "FW_RX_EOF_SHORT_FRMERR") +
1634 emac_get_stat_by_name(emac, "FW_RX_B0_DROP_EARLY_EOF") +
1635 emac_get_stat_by_name(emac, "FW_RX_EXP_FRAG_Q_DROP") +
1636 emac_get_stat_by_name(emac, "FW_RX_FIFO_OVERRUN");
1637 stats->rx_dropped = ndev->stats.rx_dropped +
1638 emac_get_stat_by_name(emac, "FW_DROPPED_PKT") +
1639 emac_get_stat_by_name(emac, "FW_INF_PORT_DISABLED") +
1640 emac_get_stat_by_name(emac, "FW_INF_SAV") +
1641 emac_get_stat_by_name(emac, "FW_INF_SA_DL") +
1642 emac_get_stat_by_name(emac, "FW_INF_PORT_BLOCKED") +
1643 emac_get_stat_by_name(emac, "FW_INF_DROP_TAGGED") +
1644 emac_get_stat_by_name(emac, "FW_INF_DROP_PRIOTAGGED") +
1645 emac_get_stat_by_name(emac, "FW_INF_DROP_NOTAG") +
1646 emac_get_stat_by_name(emac, "FW_INF_DROP_NOTMEMBER");
1647 stats->tx_errors = ndev->stats.tx_errors;
1648 stats->tx_dropped = ndev->stats.tx_dropped +
1649 emac_get_stat_by_name(emac, "FW_RTU_PKT_DROP") +
1650 emac_get_stat_by_name(emac, "FW_TX_DROPPED_PACKET") +
1651 emac_get_stat_by_name(emac, "FW_TX_TS_DROPPED_PACKET") +
1652 emac_get_stat_by_name(emac, "FW_TX_JUMBO_FRM_CUTOFF");
1653 }
1654 EXPORT_SYMBOL_GPL(icssg_ndo_get_stats64);
1655
icssg_ndo_get_phys_port_name(struct net_device * ndev,char * name,size_t len)1656 int icssg_ndo_get_phys_port_name(struct net_device *ndev, char *name,
1657 size_t len)
1658 {
1659 struct prueth_emac *emac = netdev_priv(ndev);
1660 int ret;
1661
1662 ret = snprintf(name, len, "p%d", emac->port_id);
1663 if (ret >= len)
1664 return -EINVAL;
1665
1666 return 0;
1667 }
1668 EXPORT_SYMBOL_GPL(icssg_ndo_get_phys_port_name);
1669
1670 /* get emac_port corresponding to eth_node name */
prueth_node_port(struct device_node * eth_node)1671 int prueth_node_port(struct device_node *eth_node)
1672 {
1673 u32 port_id;
1674 int ret;
1675
1676 ret = of_property_read_u32(eth_node, "reg", &port_id);
1677 if (ret)
1678 return ret;
1679
1680 if (port_id == 0)
1681 return PRUETH_PORT_MII0;
1682 else if (port_id == 1)
1683 return PRUETH_PORT_MII1;
1684 else
1685 return PRUETH_PORT_INVALID;
1686 }
1687 EXPORT_SYMBOL_GPL(prueth_node_port);
1688
1689 /* get MAC instance corresponding to eth_node name */
prueth_node_mac(struct device_node * eth_node)1690 int prueth_node_mac(struct device_node *eth_node)
1691 {
1692 u32 port_id;
1693 int ret;
1694
1695 ret = of_property_read_u32(eth_node, "reg", &port_id);
1696 if (ret)
1697 return ret;
1698
1699 if (port_id == 0)
1700 return PRUETH_MAC0;
1701 else if (port_id == 1)
1702 return PRUETH_MAC1;
1703 else
1704 return PRUETH_MAC_INVALID;
1705 }
1706 EXPORT_SYMBOL_GPL(prueth_node_mac);
1707
prueth_netdev_exit(struct prueth * prueth,struct device_node * eth_node)1708 void prueth_netdev_exit(struct prueth *prueth,
1709 struct device_node *eth_node)
1710 {
1711 struct prueth_emac *emac;
1712 enum prueth_mac mac;
1713
1714 mac = prueth_node_mac(eth_node);
1715 if (mac == PRUETH_MAC_INVALID)
1716 return;
1717
1718 emac = prueth->emac[mac];
1719 if (!emac)
1720 return;
1721
1722 if (of_phy_is_fixed_link(emac->phy_node))
1723 of_phy_deregister_fixed_link(emac->phy_node);
1724
1725 netif_napi_del(&emac->napi_rx);
1726
1727 pruss_release_mem_region(prueth->pruss, &emac->dram);
1728 free_netdev(emac->ndev);
1729 prueth->emac[mac] = NULL;
1730 }
1731 EXPORT_SYMBOL_GPL(prueth_netdev_exit);
1732
prueth_get_cores(struct prueth * prueth,int slice,bool is_sr1)1733 int prueth_get_cores(struct prueth *prueth, int slice, bool is_sr1)
1734 {
1735 struct device *dev = prueth->dev;
1736 enum pruss_pru_id pruss_id;
1737 struct device_node *np;
1738 int idx = -1, ret;
1739
1740 np = dev->of_node;
1741
1742 switch (slice) {
1743 case ICSS_SLICE0:
1744 idx = 0;
1745 break;
1746 case ICSS_SLICE1:
1747 idx = is_sr1 ? 2 : 3;
1748 break;
1749 default:
1750 return -EINVAL;
1751 }
1752
1753 prueth->pru[slice] = pru_rproc_get(np, idx, &pruss_id);
1754 if (IS_ERR(prueth->pru[slice])) {
1755 ret = PTR_ERR(prueth->pru[slice]);
1756 prueth->pru[slice] = NULL;
1757 return dev_err_probe(dev, ret, "unable to get PRU%d\n", slice);
1758 }
1759 prueth->pru_id[slice] = pruss_id;
1760
1761 idx++;
1762 prueth->rtu[slice] = pru_rproc_get(np, idx, NULL);
1763 if (IS_ERR(prueth->rtu[slice])) {
1764 ret = PTR_ERR(prueth->rtu[slice]);
1765 prueth->rtu[slice] = NULL;
1766 return dev_err_probe(dev, ret, "unable to get RTU%d\n", slice);
1767 }
1768
1769 if (is_sr1)
1770 return 0;
1771
1772 idx++;
1773 prueth->txpru[slice] = pru_rproc_get(np, idx, NULL);
1774 if (IS_ERR(prueth->txpru[slice])) {
1775 ret = PTR_ERR(prueth->txpru[slice]);
1776 prueth->txpru[slice] = NULL;
1777 return dev_err_probe(dev, ret, "unable to get TX_PRU%d\n", slice);
1778 }
1779
1780 return 0;
1781 }
1782 EXPORT_SYMBOL_GPL(prueth_get_cores);
1783
prueth_put_cores(struct prueth * prueth,int slice)1784 void prueth_put_cores(struct prueth *prueth, int slice)
1785 {
1786 if (prueth->txpru[slice])
1787 pru_rproc_put(prueth->txpru[slice]);
1788
1789 if (prueth->rtu[slice])
1790 pru_rproc_put(prueth->rtu[slice]);
1791
1792 if (prueth->pru[slice])
1793 pru_rproc_put(prueth->pru[slice]);
1794 }
1795 EXPORT_SYMBOL_GPL(prueth_put_cores);
1796
1797 #ifdef CONFIG_PM_SLEEP
prueth_suspend(struct device * dev)1798 static int prueth_suspend(struct device *dev)
1799 {
1800 struct prueth *prueth = dev_get_drvdata(dev);
1801 struct net_device *ndev;
1802 int i, ret;
1803
1804 for (i = 0; i < PRUETH_NUM_MACS; i++) {
1805 ndev = prueth->registered_netdevs[i];
1806
1807 if (!ndev)
1808 continue;
1809
1810 if (netif_running(ndev)) {
1811 netif_device_detach(ndev);
1812 ret = ndev->netdev_ops->ndo_stop(ndev);
1813 if (ret < 0) {
1814 netdev_err(ndev, "failed to stop: %d", ret);
1815 return ret;
1816 }
1817 }
1818 }
1819
1820 return 0;
1821 }
1822
prueth_resume(struct device * dev)1823 static int prueth_resume(struct device *dev)
1824 {
1825 struct prueth *prueth = dev_get_drvdata(dev);
1826 struct net_device *ndev;
1827 int i, ret;
1828
1829 for (i = 0; i < PRUETH_NUM_MACS; i++) {
1830 ndev = prueth->registered_netdevs[i];
1831
1832 if (!ndev)
1833 continue;
1834
1835 if (netif_running(ndev)) {
1836 ret = ndev->netdev_ops->ndo_open(ndev);
1837 if (ret < 0) {
1838 netdev_err(ndev, "failed to start: %d", ret);
1839 return ret;
1840 }
1841 netif_device_attach(ndev);
1842 }
1843 }
1844
1845 return 0;
1846 }
1847 #endif /* CONFIG_PM_SLEEP */
1848
1849 const struct dev_pm_ops prueth_dev_pm_ops = {
1850 SET_SYSTEM_SLEEP_PM_OPS(prueth_suspend, prueth_resume)
1851 };
1852 EXPORT_SYMBOL_GPL(prueth_dev_pm_ops);
1853
1854 MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
1855 MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
1856 MODULE_DESCRIPTION("PRUSS ICSSG Ethernet Driver Common Module");
1857 MODULE_LICENSE("GPL");
1858