1 // SPDX-License-Identifier: GPL-2.0
2
3 /* Texas Instruments ICSSG Ethernet Driver
4 *
5 * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
6 * Copyright (C) Siemens AG, 2024
7 *
8 */
9
10 #include <linux/dma-mapping.h>
11 #include <linux/dma/ti-cppi5.h>
12 #include <linux/etherdevice.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel.h>
15 #include <linux/of.h>
16 #include <linux/of_mdio.h>
17 #include <linux/phy.h>
18 #include <linux/remoteproc/pruss.h>
19 #include <linux/regmap.h>
20 #include <linux/remoteproc.h>
21
22 #include "icssg_prueth.h"
23 #include "../k3-cppi-desc-pool.h"
24
25 /* Netif debug messages possible */
26 #define PRUETH_EMAC_DEBUG (NETIF_MSG_DRV | \
27 NETIF_MSG_PROBE | \
28 NETIF_MSG_LINK | \
29 NETIF_MSG_TIMER | \
30 NETIF_MSG_IFDOWN | \
31 NETIF_MSG_IFUP | \
32 NETIF_MSG_RX_ERR | \
33 NETIF_MSG_TX_ERR | \
34 NETIF_MSG_TX_QUEUED | \
35 NETIF_MSG_INTR | \
36 NETIF_MSG_TX_DONE | \
37 NETIF_MSG_RX_STATUS | \
38 NETIF_MSG_PKTDATA | \
39 NETIF_MSG_HW | \
40 NETIF_MSG_WOL)
41
42 #define prueth_napi_to_emac(napi) container_of(napi, struct prueth_emac, napi_rx)
43
prueth_cleanup_rx_chns(struct prueth_emac * emac,struct prueth_rx_chn * rx_chn,int max_rflows)44 void prueth_cleanup_rx_chns(struct prueth_emac *emac,
45 struct prueth_rx_chn *rx_chn,
46 int max_rflows)
47 {
48 if (rx_chn->pg_pool) {
49 page_pool_destroy(rx_chn->pg_pool);
50 rx_chn->pg_pool = NULL;
51 }
52
53 if (rx_chn->desc_pool)
54 k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
55
56 if (rx_chn->rx_chn)
57 k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
58 }
59 EXPORT_SYMBOL_GPL(prueth_cleanup_rx_chns);
60
prueth_cleanup_tx_chns(struct prueth_emac * emac)61 void prueth_cleanup_tx_chns(struct prueth_emac *emac)
62 {
63 int i;
64
65 for (i = 0; i < emac->tx_ch_num; i++) {
66 struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
67
68 if (tx_chn->desc_pool)
69 k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
70
71 if (tx_chn->tx_chn)
72 k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
73
74 /* Assume prueth_cleanup_tx_chns() is called at the
75 * end after all channel resources are freed
76 */
77 memset(tx_chn, 0, sizeof(*tx_chn));
78 }
79 }
80 EXPORT_SYMBOL_GPL(prueth_cleanup_tx_chns);
81
prueth_ndev_del_tx_napi(struct prueth_emac * emac,int num)82 void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num)
83 {
84 int i;
85
86 for (i = 0; i < num; i++) {
87 struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
88
89 if (tx_chn->irq)
90 free_irq(tx_chn->irq, tx_chn);
91 netif_napi_del(&tx_chn->napi_tx);
92 }
93 }
94 EXPORT_SYMBOL_GPL(prueth_ndev_del_tx_napi);
95
prueth_xmit_free(struct prueth_tx_chn * tx_chn,struct cppi5_host_desc_t * desc)96 void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
97 struct cppi5_host_desc_t *desc)
98 {
99 struct cppi5_host_desc_t *first_desc, *next_desc;
100 dma_addr_t buf_dma, next_desc_dma;
101 u32 buf_dma_len;
102
103 first_desc = desc;
104 next_desc = first_desc;
105
106 cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
107 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
108
109 dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len,
110 DMA_TO_DEVICE);
111
112 next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
113 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
114 while (next_desc_dma) {
115 next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
116 next_desc_dma);
117 cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
118 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
119
120 dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len,
121 DMA_TO_DEVICE);
122
123 next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
124 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
125
126 k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
127 }
128
129 k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
130 }
131 EXPORT_SYMBOL_GPL(prueth_xmit_free);
132
emac_tx_complete_packets(struct prueth_emac * emac,int chn,int budget,bool * tdown)133 int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
134 int budget, bool *tdown)
135 {
136 struct net_device *ndev = emac->ndev;
137 struct cppi5_host_desc_t *desc_tx;
138 struct netdev_queue *netif_txq;
139 struct prueth_swdata *swdata;
140 struct prueth_tx_chn *tx_chn;
141 unsigned int total_bytes = 0;
142 struct xdp_frame *xdpf;
143 struct sk_buff *skb;
144 dma_addr_t desc_dma;
145 int res, num_tx = 0;
146
147 tx_chn = &emac->tx_chns[chn];
148
149 while (true) {
150 res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
151 if (res == -ENODATA)
152 break;
153
154 /* teardown completion */
155 if (cppi5_desc_is_tdcm(desc_dma)) {
156 if (atomic_dec_and_test(&emac->tdown_cnt))
157 complete(&emac->tdown_complete);
158 *tdown = true;
159 break;
160 }
161
162 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
163 desc_dma);
164 swdata = cppi5_hdesc_get_swdata(desc_tx);
165
166 switch (swdata->type) {
167 case PRUETH_SWDATA_SKB:
168 skb = swdata->data.skb;
169 dev_sw_netstats_tx_add(skb->dev, 1, skb->len);
170 total_bytes += skb->len;
171 napi_consume_skb(skb, budget);
172 break;
173 case PRUETH_SWDATA_XDPF:
174 xdpf = swdata->data.xdpf;
175 dev_sw_netstats_tx_add(ndev, 1, xdpf->len);
176 total_bytes += xdpf->len;
177 xdp_return_frame(xdpf);
178 break;
179 default:
180 prueth_xmit_free(tx_chn, desc_tx);
181 ndev->stats.tx_dropped++;
182 continue;
183 }
184
185 prueth_xmit_free(tx_chn, desc_tx);
186 num_tx++;
187 }
188
189 if (!num_tx)
190 return 0;
191
192 netif_txq = netdev_get_tx_queue(ndev, chn);
193 netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
194
195 if (netif_tx_queue_stopped(netif_txq)) {
196 /* If the TX queue was stopped, wake it now
197 * if we have enough room.
198 */
199 __netif_tx_lock(netif_txq, smp_processor_id());
200 if (netif_running(ndev) &&
201 (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
202 MAX_SKB_FRAGS))
203 netif_tx_wake_queue(netif_txq);
204 __netif_tx_unlock(netif_txq);
205 }
206
207 return num_tx;
208 }
209
emac_tx_timer_callback(struct hrtimer * timer)210 static enum hrtimer_restart emac_tx_timer_callback(struct hrtimer *timer)
211 {
212 struct prueth_tx_chn *tx_chns =
213 container_of(timer, struct prueth_tx_chn, tx_hrtimer);
214
215 enable_irq(tx_chns->irq);
216 return HRTIMER_NORESTART;
217 }
218
emac_napi_tx_poll(struct napi_struct * napi_tx,int budget)219 static int emac_napi_tx_poll(struct napi_struct *napi_tx, int budget)
220 {
221 struct prueth_tx_chn *tx_chn = prueth_napi_to_tx_chn(napi_tx);
222 struct prueth_emac *emac = tx_chn->emac;
223 bool tdown = false;
224 int num_tx_packets;
225
226 num_tx_packets = emac_tx_complete_packets(emac, tx_chn->id, budget,
227 &tdown);
228
229 if (num_tx_packets >= budget)
230 return budget;
231
232 if (napi_complete_done(napi_tx, num_tx_packets)) {
233 if (unlikely(tx_chn->tx_pace_timeout_ns && !tdown)) {
234 hrtimer_start(&tx_chn->tx_hrtimer,
235 ns_to_ktime(tx_chn->tx_pace_timeout_ns),
236 HRTIMER_MODE_REL_PINNED);
237 } else {
238 enable_irq(tx_chn->irq);
239 }
240 }
241
242 return num_tx_packets;
243 }
244
prueth_tx_irq(int irq,void * dev_id)245 static irqreturn_t prueth_tx_irq(int irq, void *dev_id)
246 {
247 struct prueth_tx_chn *tx_chn = dev_id;
248
249 disable_irq_nosync(irq);
250 napi_schedule(&tx_chn->napi_tx);
251
252 return IRQ_HANDLED;
253 }
254
prueth_ndev_add_tx_napi(struct prueth_emac * emac)255 int prueth_ndev_add_tx_napi(struct prueth_emac *emac)
256 {
257 struct prueth *prueth = emac->prueth;
258 int i, ret;
259
260 for (i = 0; i < emac->tx_ch_num; i++) {
261 struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
262
263 netif_napi_add_tx(emac->ndev, &tx_chn->napi_tx, emac_napi_tx_poll);
264 hrtimer_setup(&tx_chn->tx_hrtimer, &emac_tx_timer_callback, CLOCK_MONOTONIC,
265 HRTIMER_MODE_REL_PINNED);
266 ret = request_irq(tx_chn->irq, prueth_tx_irq,
267 IRQF_TRIGGER_HIGH, tx_chn->name,
268 tx_chn);
269 if (ret) {
270 netif_napi_del(&tx_chn->napi_tx);
271 dev_err(prueth->dev, "unable to request TX IRQ %d\n",
272 tx_chn->irq);
273 goto fail;
274 }
275 }
276
277 return 0;
278 fail:
279 prueth_ndev_del_tx_napi(emac, i);
280 return ret;
281 }
282 EXPORT_SYMBOL_GPL(prueth_ndev_add_tx_napi);
283
prueth_init_tx_chns(struct prueth_emac * emac)284 int prueth_init_tx_chns(struct prueth_emac *emac)
285 {
286 static const struct k3_ring_cfg ring_cfg = {
287 .elm_size = K3_RINGACC_RING_ELSIZE_8,
288 .mode = K3_RINGACC_RING_MODE_RING,
289 .flags = 0,
290 .size = PRUETH_MAX_TX_DESC,
291 };
292 struct k3_udma_glue_tx_channel_cfg tx_cfg;
293 struct device *dev = emac->prueth->dev;
294 struct net_device *ndev = emac->ndev;
295 int ret, slice, i;
296 u32 hdesc_size;
297
298 slice = prueth_emac_slice(emac);
299 if (slice < 0)
300 return slice;
301
302 init_completion(&emac->tdown_complete);
303
304 hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE,
305 PRUETH_NAV_SW_DATA_SIZE);
306 memset(&tx_cfg, 0, sizeof(tx_cfg));
307 tx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE;
308 tx_cfg.tx_cfg = ring_cfg;
309 tx_cfg.txcq_cfg = ring_cfg;
310
311 for (i = 0; i < emac->tx_ch_num; i++) {
312 struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
313
314 /* To differentiate channels for SLICE0 vs SLICE1 */
315 snprintf(tx_chn->name, sizeof(tx_chn->name),
316 "tx%d-%d", slice, i);
317
318 tx_chn->emac = emac;
319 tx_chn->id = i;
320 tx_chn->descs_num = PRUETH_MAX_TX_DESC;
321
322 tx_chn->tx_chn =
323 k3_udma_glue_request_tx_chn(dev, tx_chn->name,
324 &tx_cfg);
325 if (IS_ERR(tx_chn->tx_chn)) {
326 ret = PTR_ERR(tx_chn->tx_chn);
327 tx_chn->tx_chn = NULL;
328 netdev_err(ndev,
329 "Failed to request tx dma ch: %d\n", ret);
330 goto fail;
331 }
332
333 tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn);
334 tx_chn->desc_pool =
335 k3_cppi_desc_pool_create_name(tx_chn->dma_dev,
336 tx_chn->descs_num,
337 hdesc_size,
338 tx_chn->name);
339 if (IS_ERR(tx_chn->desc_pool)) {
340 ret = PTR_ERR(tx_chn->desc_pool);
341 tx_chn->desc_pool = NULL;
342 netdev_err(ndev, "Failed to create tx pool: %d\n", ret);
343 goto fail;
344 }
345
346 ret = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
347 if (ret < 0) {
348 netdev_err(ndev, "failed to get tx irq\n");
349 goto fail;
350 }
351 tx_chn->irq = ret;
352
353 snprintf(tx_chn->name, sizeof(tx_chn->name), "%s-tx%d",
354 dev_name(dev), tx_chn->id);
355 }
356
357 return 0;
358
359 fail:
360 prueth_cleanup_tx_chns(emac);
361 return ret;
362 }
363 EXPORT_SYMBOL_GPL(prueth_init_tx_chns);
364
prueth_init_rx_chns(struct prueth_emac * emac,struct prueth_rx_chn * rx_chn,char * name,u32 max_rflows,u32 max_desc_num)365 int prueth_init_rx_chns(struct prueth_emac *emac,
366 struct prueth_rx_chn *rx_chn,
367 char *name, u32 max_rflows,
368 u32 max_desc_num)
369 {
370 struct k3_udma_glue_rx_channel_cfg rx_cfg;
371 struct device *dev = emac->prueth->dev;
372 struct net_device *ndev = emac->ndev;
373 u32 fdqring_id, hdesc_size;
374 int i, ret = 0, slice;
375 int flow_id_base;
376
377 slice = prueth_emac_slice(emac);
378 if (slice < 0)
379 return slice;
380
381 /* To differentiate channels for SLICE0 vs SLICE1 */
382 snprintf(rx_chn->name, sizeof(rx_chn->name), "%s%d", name, slice);
383
384 hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE,
385 PRUETH_NAV_SW_DATA_SIZE);
386 memset(&rx_cfg, 0, sizeof(rx_cfg));
387 rx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE;
388 rx_cfg.flow_id_num = max_rflows;
389 rx_cfg.flow_id_base = -1; /* udmax will auto select flow id base */
390
391 /* init all flows */
392 rx_chn->dev = dev;
393 rx_chn->descs_num = max_desc_num;
394
395 rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, rx_chn->name,
396 &rx_cfg);
397 if (IS_ERR(rx_chn->rx_chn)) {
398 ret = PTR_ERR(rx_chn->rx_chn);
399 rx_chn->rx_chn = NULL;
400 netdev_err(ndev, "Failed to request rx dma ch: %d\n", ret);
401 goto fail;
402 }
403
404 rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn);
405 rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev,
406 rx_chn->descs_num,
407 hdesc_size,
408 rx_chn->name);
409 if (IS_ERR(rx_chn->desc_pool)) {
410 ret = PTR_ERR(rx_chn->desc_pool);
411 rx_chn->desc_pool = NULL;
412 netdev_err(ndev, "Failed to create rx pool: %d\n", ret);
413 goto fail;
414 }
415
416 flow_id_base = k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
417 if (emac->is_sr1 && !strcmp(name, "rxmgm")) {
418 emac->rx_mgm_flow_id_base = flow_id_base;
419 netdev_dbg(ndev, "mgm flow id base = %d\n", flow_id_base);
420 } else {
421 emac->rx_flow_id_base = flow_id_base;
422 netdev_dbg(ndev, "flow id base = %d\n", flow_id_base);
423 }
424
425 fdqring_id = K3_RINGACC_RING_ID_ANY;
426 for (i = 0; i < rx_cfg.flow_id_num; i++) {
427 struct k3_ring_cfg rxring_cfg = {
428 .elm_size = K3_RINGACC_RING_ELSIZE_8,
429 .mode = K3_RINGACC_RING_MODE_RING,
430 .flags = 0,
431 };
432 struct k3_ring_cfg fdqring_cfg = {
433 .elm_size = K3_RINGACC_RING_ELSIZE_8,
434 .flags = K3_RINGACC_RING_SHARED,
435 };
436 struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = {
437 .rx_cfg = rxring_cfg,
438 .rxfdq_cfg = fdqring_cfg,
439 .ring_rxq_id = K3_RINGACC_RING_ID_ANY,
440 .src_tag_lo_sel =
441 K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG,
442 };
443
444 rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
445 rx_flow_cfg.rx_cfg.size = max_desc_num;
446 rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
447 rx_flow_cfg.rxfdq_cfg.mode = emac->prueth->pdata.fdqring_mode;
448
449 ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
450 i, &rx_flow_cfg);
451 if (ret) {
452 netdev_err(ndev, "Failed to init rx flow%d %d\n",
453 i, ret);
454 goto fail;
455 }
456 if (!i)
457 fdqring_id = k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
458 i);
459 ret = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
460 if (ret < 0) {
461 netdev_err(ndev, "Failed to get rx dma irq");
462 goto fail;
463 }
464 rx_chn->irq[i] = ret;
465 }
466
467 return 0;
468
469 fail:
470 prueth_cleanup_rx_chns(emac, rx_chn, max_rflows);
471 return ret;
472 }
473 EXPORT_SYMBOL_GPL(prueth_init_rx_chns);
474
prueth_dma_rx_push_mapped(struct prueth_emac * emac,struct prueth_rx_chn * rx_chn,struct page * page,u32 buf_len)475 int prueth_dma_rx_push_mapped(struct prueth_emac *emac,
476 struct prueth_rx_chn *rx_chn,
477 struct page *page, u32 buf_len)
478 {
479 struct net_device *ndev = emac->ndev;
480 struct cppi5_host_desc_t *desc_rx;
481 struct prueth_swdata *swdata;
482 dma_addr_t desc_dma;
483 dma_addr_t buf_dma;
484
485 buf_dma = page_pool_get_dma_addr(page) + PRUETH_HEADROOM;
486 desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
487 if (!desc_rx) {
488 netdev_err(ndev, "rx push: failed to allocate descriptor\n");
489 return -ENOMEM;
490 }
491 desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
492
493 cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
494 PRUETH_NAV_PS_DATA_SIZE);
495 k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
496 cppi5_hdesc_attach_buf(desc_rx, buf_dma, buf_len, buf_dma, buf_len);
497
498 swdata = cppi5_hdesc_get_swdata(desc_rx);
499 swdata->type = PRUETH_SWDATA_PAGE;
500 swdata->data.page = page;
501
502 return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, PRUETH_RX_FLOW_DATA,
503 desc_rx, desc_dma);
504 }
505 EXPORT_SYMBOL_GPL(prueth_dma_rx_push_mapped);
506
icssg_ts_to_ns(u32 hi_sw,u32 hi,u32 lo,u32 cycle_time_ns)507 u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns)
508 {
509 u32 iepcount_lo, iepcount_hi, hi_rollover_count;
510 u64 ns;
511
512 iepcount_lo = lo & GENMASK(19, 0);
513 iepcount_hi = (hi & GENMASK(11, 0)) << 12 | lo >> 20;
514 hi_rollover_count = hi >> 11;
515
516 ns = ((u64)hi_rollover_count) << 23 | (iepcount_hi + hi_sw);
517 ns = ns * cycle_time_ns + iepcount_lo;
518
519 return ns;
520 }
521 EXPORT_SYMBOL_GPL(icssg_ts_to_ns);
522
emac_rx_timestamp(struct prueth_emac * emac,struct sk_buff * skb,u32 * psdata)523 void emac_rx_timestamp(struct prueth_emac *emac,
524 struct sk_buff *skb, u32 *psdata)
525 {
526 struct skb_shared_hwtstamps *ssh;
527 u64 ns;
528
529 if (emac->is_sr1) {
530 ns = (u64)psdata[1] << 32 | psdata[0];
531 } else {
532 u32 hi_sw = readl(emac->prueth->shram.va +
533 TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET);
534 ns = icssg_ts_to_ns(hi_sw, psdata[1], psdata[0],
535 IEP_DEFAULT_CYCLE_TIME_NS);
536 }
537
538 ssh = skb_hwtstamps(skb);
539 memset(ssh, 0, sizeof(*ssh));
540 ssh->hwtstamp = ns_to_ktime(ns);
541 }
542
543 /**
544 * emac_xmit_xdp_frame - transmits an XDP frame
545 * @emac: emac device
546 * @xdpf: data to transmit
547 * @page: page from page pool if already DMA mapped
548 * @q_idx: queue id
549 *
550 * Return: XDP state
551 */
emac_xmit_xdp_frame(struct prueth_emac * emac,struct xdp_frame * xdpf,struct page * page,unsigned int q_idx)552 u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
553 struct xdp_frame *xdpf,
554 struct page *page,
555 unsigned int q_idx)
556 {
557 struct cppi5_host_desc_t *first_desc;
558 struct net_device *ndev = emac->ndev;
559 struct netdev_queue *netif_txq;
560 struct prueth_tx_chn *tx_chn;
561 dma_addr_t desc_dma, buf_dma;
562 struct prueth_swdata *swdata;
563 u32 *epib;
564 int ret;
565
566 if (q_idx >= PRUETH_MAX_TX_QUEUES) {
567 netdev_err(ndev, "xdp tx: invalid q_id %d\n", q_idx);
568 return ICSSG_XDP_CONSUMED; /* drop */
569 }
570
571 tx_chn = &emac->tx_chns[q_idx];
572
573 first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
574 if (!first_desc) {
575 netdev_dbg(ndev, "xdp tx: failed to allocate descriptor\n");
576 return ICSSG_XDP_CONSUMED; /* drop */
577 }
578
579 if (page) { /* already DMA mapped by page_pool */
580 buf_dma = page_pool_get_dma_addr(page);
581 buf_dma += xdpf->headroom + sizeof(struct xdp_frame);
582 } else { /* Map the linear buffer */
583 buf_dma = dma_map_single(tx_chn->dma_dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
584 if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
585 netdev_err(ndev, "xdp tx: failed to map data buffer\n");
586 goto drop_free_descs; /* drop */
587 }
588 }
589
590 cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
591 PRUETH_NAV_PS_DATA_SIZE);
592 cppi5_hdesc_set_pkttype(first_desc, 0);
593 epib = first_desc->epib;
594 epib[0] = 0;
595 epib[1] = 0;
596
597 /* set dst tag to indicate internal qid at the firmware which is at
598 * bit8..bit15. bit0..bit7 indicates port num for directed
599 * packets in case of switch mode operation
600 */
601 cppi5_desc_set_tags_ids(&first_desc->hdr, 0, (emac->port_id | (q_idx << 8)));
602 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
603 cppi5_hdesc_attach_buf(first_desc, buf_dma, xdpf->len, buf_dma, xdpf->len);
604 swdata = cppi5_hdesc_get_swdata(first_desc);
605 swdata->type = PRUETH_SWDATA_XDPF;
606 swdata->data.xdpf = xdpf;
607
608 /* Report BQL before sending the packet */
609 netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
610 netdev_tx_sent_queue(netif_txq, xdpf->len);
611
612 cppi5_hdesc_set_pktlen(first_desc, xdpf->len);
613 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
614
615 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
616 if (ret) {
617 netdev_err(ndev, "xdp tx: push failed: %d\n", ret);
618 netdev_tx_completed_queue(netif_txq, 1, xdpf->len);
619 goto drop_free_descs;
620 }
621
622 return ICSSG_XDP_TX;
623
624 drop_free_descs:
625 prueth_xmit_free(tx_chn, first_desc);
626 return ICSSG_XDP_CONSUMED;
627 }
628 EXPORT_SYMBOL_GPL(emac_xmit_xdp_frame);
629
630 /**
631 * emac_run_xdp - run an XDP program
632 * @emac: emac device
633 * @xdp: XDP buffer containing the frame
634 * @page: page with RX data if already DMA mapped
635 * @len: Rx descriptor packet length
636 *
637 * Return: XDP state
638 */
emac_run_xdp(struct prueth_emac * emac,struct xdp_buff * xdp,struct page * page,u32 * len)639 static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp,
640 struct page *page, u32 *len)
641 {
642 struct net_device *ndev = emac->ndev;
643 struct netdev_queue *netif_txq;
644 int cpu = smp_processor_id();
645 struct bpf_prog *xdp_prog;
646 struct xdp_frame *xdpf;
647 u32 pkt_len = *len;
648 u32 act, result;
649 int q_idx, err;
650
651 xdp_prog = READ_ONCE(emac->xdp_prog);
652 act = bpf_prog_run_xdp(xdp_prog, xdp);
653 switch (act) {
654 case XDP_PASS:
655 return ICSSG_XDP_PASS;
656 case XDP_TX:
657 /* Send packet to TX ring for immediate transmission */
658 xdpf = xdp_convert_buff_to_frame(xdp);
659 if (unlikely(!xdpf)) {
660 ndev->stats.tx_dropped++;
661 goto drop;
662 }
663
664 q_idx = cpu % emac->tx_ch_num;
665 netif_txq = netdev_get_tx_queue(ndev, q_idx);
666 __netif_tx_lock(netif_txq, cpu);
667 result = emac_xmit_xdp_frame(emac, xdpf, page, q_idx);
668 __netif_tx_unlock(netif_txq);
669 if (result == ICSSG_XDP_CONSUMED) {
670 ndev->stats.tx_dropped++;
671 goto drop;
672 }
673
674 dev_sw_netstats_rx_add(ndev, xdpf->len);
675 return result;
676 case XDP_REDIRECT:
677 err = xdp_do_redirect(emac->ndev, xdp, xdp_prog);
678 if (err)
679 goto drop;
680
681 dev_sw_netstats_rx_add(ndev, pkt_len);
682 return ICSSG_XDP_REDIR;
683 default:
684 bpf_warn_invalid_xdp_action(emac->ndev, xdp_prog, act);
685 fallthrough;
686 case XDP_ABORTED:
687 drop:
688 trace_xdp_exception(emac->ndev, xdp_prog, act);
689 fallthrough; /* handle aborts by dropping packet */
690 case XDP_DROP:
691 ndev->stats.rx_dropped++;
692 page_pool_recycle_direct(emac->rx_chns.pg_pool, page);
693 return ICSSG_XDP_CONSUMED;
694 }
695 }
696
emac_rx_packet(struct prueth_emac * emac,u32 flow_id,u32 * xdp_state)697 static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state)
698 {
699 struct prueth_rx_chn *rx_chn = &emac->rx_chns;
700 u32 buf_dma_len, pkt_len, port_id = 0;
701 struct net_device *ndev = emac->ndev;
702 struct cppi5_host_desc_t *desc_rx;
703 struct prueth_swdata *swdata;
704 dma_addr_t desc_dma, buf_dma;
705 struct page *page, *new_page;
706 struct page_pool *pool;
707 struct sk_buff *skb;
708 struct xdp_buff xdp;
709 u32 *psdata;
710 void *pa;
711 int ret;
712
713 *xdp_state = 0;
714 pool = rx_chn->pg_pool;
715 ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
716 if (ret) {
717 if (ret != -ENODATA)
718 netdev_err(ndev, "rx pop: failed: %d\n", ret);
719 return ret;
720 }
721
722 if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown ? */
723 return 0;
724
725 desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
726 swdata = cppi5_hdesc_get_swdata(desc_rx);
727 if (swdata->type != PRUETH_SWDATA_PAGE) {
728 netdev_err(ndev, "rx_pkt: invalid swdata->type %d\n", swdata->type);
729 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
730 return 0;
731 }
732
733 page = swdata->data.page;
734 page_pool_dma_sync_for_cpu(pool, page, 0, PAGE_SIZE);
735 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
736 k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
737 pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
738 /* firmware adds 4 CRC bytes, strip them */
739 pkt_len -= 4;
740 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
741
742 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
743
744 /* if allocation fails we drop the packet but push the
745 * descriptor back to the ring with old page to prevent a stall
746 */
747 new_page = page_pool_dev_alloc_pages(pool);
748 if (unlikely(!new_page)) {
749 new_page = page;
750 ndev->stats.rx_dropped++;
751 goto requeue;
752 }
753
754 pa = page_address(page);
755 if (emac->xdp_prog) {
756 xdp_init_buff(&xdp, PAGE_SIZE, &rx_chn->xdp_rxq);
757 xdp_prepare_buff(&xdp, pa, PRUETH_HEADROOM, pkt_len, false);
758
759 *xdp_state = emac_run_xdp(emac, &xdp, page, &pkt_len);
760 if (*xdp_state == ICSSG_XDP_PASS)
761 skb = xdp_build_skb_from_buff(&xdp);
762 else
763 goto requeue;
764 } else {
765 /* prepare skb and send to n/w stack */
766 skb = napi_build_skb(pa, PAGE_SIZE);
767 }
768
769 if (!skb) {
770 ndev->stats.rx_dropped++;
771 page_pool_recycle_direct(pool, page);
772 goto requeue;
773 }
774
775 skb_reserve(skb, PRUETH_HEADROOM);
776 skb_put(skb, pkt_len);
777 skb->dev = ndev;
778
779 psdata = cppi5_hdesc_get_psdata(desc_rx);
780 /* RX HW timestamp */
781 if (emac->rx_ts_enabled)
782 emac_rx_timestamp(emac, skb, psdata);
783
784 if (emac->prueth->is_switch_mode)
785 skb->offload_fwd_mark = emac->offload_fwd_mark;
786 skb->protocol = eth_type_trans(skb, ndev);
787
788 skb_mark_for_recycle(skb);
789 napi_gro_receive(&emac->napi_rx, skb);
790 ndev->stats.rx_bytes += pkt_len;
791 ndev->stats.rx_packets++;
792
793 requeue:
794 /* queue another RX DMA */
795 ret = prueth_dma_rx_push_mapped(emac, &emac->rx_chns, new_page,
796 PRUETH_MAX_PKT_SIZE);
797 if (WARN_ON(ret < 0)) {
798 page_pool_recycle_direct(pool, new_page);
799 ndev->stats.rx_errors++;
800 ndev->stats.rx_dropped++;
801 }
802
803 return ret;
804 }
805
prueth_rx_cleanup(void * data,dma_addr_t desc_dma)806 static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma)
807 {
808 struct prueth_rx_chn *rx_chn = data;
809 struct cppi5_host_desc_t *desc_rx;
810 struct prueth_swdata *swdata;
811 struct page_pool *pool;
812 struct page *page;
813
814 pool = rx_chn->pg_pool;
815 desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
816 swdata = cppi5_hdesc_get_swdata(desc_rx);
817 if (swdata->type == PRUETH_SWDATA_PAGE) {
818 page = swdata->data.page;
819 page_pool_recycle_direct(pool, page);
820 }
821
822 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
823 }
824
prueth_tx_ts_cookie_get(struct prueth_emac * emac)825 static int prueth_tx_ts_cookie_get(struct prueth_emac *emac)
826 {
827 int i;
828
829 /* search and get the next free slot */
830 for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) {
831 if (!emac->tx_ts_skb[i]) {
832 emac->tx_ts_skb[i] = ERR_PTR(-EBUSY); /* reserve slot */
833 return i;
834 }
835 }
836
837 return -EBUSY;
838 }
839
840 /**
841 * icssg_ndo_start_xmit - EMAC Transmit function
842 * @skb: SKB pointer
843 * @ndev: EMAC network adapter
844 *
845 * Called by the system to transmit a packet - we queue the packet in
846 * EMAC hardware transmit queue
847 * Doesn't wait for completion we'll check for TX completion in
848 * emac_tx_complete_packets().
849 *
850 * Return: enum netdev_tx
851 */
icssg_ndo_start_xmit(struct sk_buff * skb,struct net_device * ndev)852 enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
853 {
854 struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
855 struct prueth_emac *emac = netdev_priv(ndev);
856 struct prueth *prueth = emac->prueth;
857 struct netdev_queue *netif_txq;
858 struct prueth_swdata *swdata;
859 struct prueth_tx_chn *tx_chn;
860 dma_addr_t desc_dma, buf_dma;
861 u32 pkt_len, dst_tag_id;
862 int i, ret = 0, q_idx;
863 bool in_tx_ts = 0;
864 int tx_ts_cookie;
865 u32 *epib;
866
867 pkt_len = skb_headlen(skb);
868 q_idx = skb_get_queue_mapping(skb);
869
870 tx_chn = &emac->tx_chns[q_idx];
871 netif_txq = netdev_get_tx_queue(ndev, q_idx);
872
873 /* Map the linear buffer */
874 buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len, DMA_TO_DEVICE);
875 if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
876 netdev_err(ndev, "tx: failed to map skb buffer\n");
877 ret = NETDEV_TX_OK;
878 goto drop_free_skb;
879 }
880
881 first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
882 if (!first_desc) {
883 netdev_dbg(ndev, "tx: failed to allocate descriptor\n");
884 dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, DMA_TO_DEVICE);
885 goto drop_stop_q_busy;
886 }
887
888 cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
889 PRUETH_NAV_PS_DATA_SIZE);
890 cppi5_hdesc_set_pkttype(first_desc, 0);
891 epib = first_desc->epib;
892 epib[0] = 0;
893 epib[1] = 0;
894 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
895 emac->tx_ts_enabled) {
896 tx_ts_cookie = prueth_tx_ts_cookie_get(emac);
897 if (tx_ts_cookie >= 0) {
898 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
899 /* Request TX timestamp */
900 epib[0] = (u32)tx_ts_cookie;
901 epib[1] = 0x80000000; /* TX TS request */
902 emac->tx_ts_skb[tx_ts_cookie] = skb_get(skb);
903 in_tx_ts = 1;
904 }
905 }
906
907 /* set dst tag to indicate internal qid at the firmware which is at
908 * bit8..bit15. bit0..bit7 indicates port num for directed
909 * packets in case of switch mode operation and port num 0
910 * for undirected packets in case of HSR offload mode
911 */
912 dst_tag_id = emac->port_id | (q_idx << 8);
913
914 if (prueth->is_hsr_offload_mode &&
915 (ndev->features & NETIF_F_HW_HSR_DUP))
916 dst_tag_id = PRUETH_UNDIRECTED_PKT_DST_TAG;
917
918 if (prueth->is_hsr_offload_mode &&
919 (ndev->features & NETIF_F_HW_HSR_TAG_INS))
920 epib[1] |= PRUETH_UNDIRECTED_PKT_TAG_INS;
921
922 cppi5_desc_set_tags_ids(&first_desc->hdr, 0, dst_tag_id);
923 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
924 cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
925 swdata = cppi5_hdesc_get_swdata(first_desc);
926 swdata->type = PRUETH_SWDATA_SKB;
927 swdata->data.skb = skb;
928
929 /* Handle the case where skb is fragmented in pages */
930 cur_desc = first_desc;
931 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
932 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
933 u32 frag_size = skb_frag_size(frag);
934
935 next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
936 if (!next_desc) {
937 netdev_err(ndev,
938 "tx: failed to allocate frag. descriptor\n");
939 goto free_desc_stop_q_busy_cleanup_tx_ts;
940 }
941
942 buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size,
943 DMA_TO_DEVICE);
944 if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
945 netdev_err(ndev, "tx: Failed to map skb page\n");
946 k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
947 ret = NETDEV_TX_OK;
948 goto cleanup_tx_ts;
949 }
950
951 cppi5_hdesc_reset_hbdesc(next_desc);
952 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
953 cppi5_hdesc_attach_buf(next_desc,
954 buf_dma, frag_size, buf_dma, frag_size);
955
956 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
957 next_desc);
958 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma);
959 cppi5_hdesc_link_hbdesc(cur_desc, desc_dma);
960
961 pkt_len += frag_size;
962 cur_desc = next_desc;
963 }
964 WARN_ON_ONCE(pkt_len != skb->len);
965
966 /* report bql before sending packet */
967 netdev_tx_sent_queue(netif_txq, pkt_len);
968
969 cppi5_hdesc_set_pktlen(first_desc, pkt_len);
970 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
971 /* cppi5_desc_dump(first_desc, 64); */
972
973 skb_tx_timestamp(skb); /* SW timestamp if SKBTX_IN_PROGRESS not set */
974 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
975 if (ret) {
976 netdev_err(ndev, "tx: push failed: %d\n", ret);
977 netdev_tx_completed_queue(netif_txq, 1, pkt_len);
978 goto drop_free_descs;
979 }
980
981 if (in_tx_ts)
982 atomic_inc(&emac->tx_ts_pending);
983
984 if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) {
985 netif_tx_stop_queue(netif_txq);
986 /* Barrier, so that stop_queue visible to other cpus */
987 smp_mb__after_atomic();
988
989 if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
990 MAX_SKB_FRAGS)
991 netif_tx_wake_queue(netif_txq);
992 }
993
994 return NETDEV_TX_OK;
995
996 cleanup_tx_ts:
997 if (in_tx_ts) {
998 dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]);
999 emac->tx_ts_skb[tx_ts_cookie] = NULL;
1000 }
1001
1002 drop_free_descs:
1003 prueth_xmit_free(tx_chn, first_desc);
1004
1005 drop_free_skb:
1006 dev_kfree_skb_any(skb);
1007
1008 /* error */
1009 ndev->stats.tx_dropped++;
1010 netdev_err(ndev, "tx: error: %d\n", ret);
1011
1012 return ret;
1013
1014 free_desc_stop_q_busy_cleanup_tx_ts:
1015 if (in_tx_ts) {
1016 dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]);
1017 emac->tx_ts_skb[tx_ts_cookie] = NULL;
1018 }
1019 prueth_xmit_free(tx_chn, first_desc);
1020
1021 drop_stop_q_busy:
1022 netif_tx_stop_queue(netif_txq);
1023 return NETDEV_TX_BUSY;
1024 }
1025 EXPORT_SYMBOL_GPL(icssg_ndo_start_xmit);
1026
prueth_tx_cleanup(void * data,dma_addr_t desc_dma)1027 static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma)
1028 {
1029 struct prueth_tx_chn *tx_chn = data;
1030 struct cppi5_host_desc_t *desc_tx;
1031 struct prueth_swdata *swdata;
1032 struct xdp_frame *xdpf;
1033 struct sk_buff *skb;
1034
1035 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
1036 swdata = cppi5_hdesc_get_swdata(desc_tx);
1037
1038 switch (swdata->type) {
1039 case PRUETH_SWDATA_SKB:
1040 skb = swdata->data.skb;
1041 dev_kfree_skb_any(skb);
1042 break;
1043 case PRUETH_SWDATA_XDPF:
1044 xdpf = swdata->data.xdpf;
1045 xdp_return_frame(xdpf);
1046 break;
1047 default:
1048 break;
1049 }
1050
1051 prueth_xmit_free(tx_chn, desc_tx);
1052 }
1053
prueth_rx_irq(int irq,void * dev_id)1054 irqreturn_t prueth_rx_irq(int irq, void *dev_id)
1055 {
1056 struct prueth_emac *emac = dev_id;
1057
1058 disable_irq_nosync(irq);
1059 napi_schedule(&emac->napi_rx);
1060
1061 return IRQ_HANDLED;
1062 }
1063 EXPORT_SYMBOL_GPL(prueth_rx_irq);
1064
prueth_cleanup_tx_ts(struct prueth_emac * emac)1065 void prueth_cleanup_tx_ts(struct prueth_emac *emac)
1066 {
1067 int i;
1068
1069 for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) {
1070 if (emac->tx_ts_skb[i]) {
1071 dev_kfree_skb_any(emac->tx_ts_skb[i]);
1072 emac->tx_ts_skb[i] = NULL;
1073 }
1074 }
1075 }
1076 EXPORT_SYMBOL_GPL(prueth_cleanup_tx_ts);
1077
icssg_napi_rx_poll(struct napi_struct * napi_rx,int budget)1078 int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
1079 {
1080 struct prueth_emac *emac = prueth_napi_to_emac(napi_rx);
1081 int rx_flow = emac->is_sr1 ?
1082 PRUETH_RX_FLOW_DATA_SR1 : PRUETH_RX_FLOW_DATA;
1083 int flow = emac->is_sr1 ?
1084 PRUETH_MAX_RX_FLOWS_SR1 : PRUETH_MAX_RX_FLOWS;
1085 int xdp_state_or = 0;
1086 int num_rx = 0;
1087 int cur_budget;
1088 u32 xdp_state;
1089 int ret;
1090
1091 while (flow--) {
1092 cur_budget = budget - num_rx;
1093
1094 while (cur_budget--) {
1095 ret = emac_rx_packet(emac, flow, &xdp_state);
1096 xdp_state_or |= xdp_state;
1097 if (ret)
1098 break;
1099 num_rx++;
1100 }
1101
1102 if (num_rx >= budget)
1103 break;
1104 }
1105
1106 if (xdp_state_or & ICSSG_XDP_REDIR)
1107 xdp_do_flush();
1108
1109 if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
1110 if (unlikely(emac->rx_pace_timeout_ns)) {
1111 hrtimer_start(&emac->rx_hrtimer,
1112 ns_to_ktime(emac->rx_pace_timeout_ns),
1113 HRTIMER_MODE_REL_PINNED);
1114 } else {
1115 enable_irq(emac->rx_chns.irq[rx_flow]);
1116 }
1117 }
1118
1119 return num_rx;
1120 }
1121 EXPORT_SYMBOL_GPL(icssg_napi_rx_poll);
1122
prueth_create_page_pool(struct prueth_emac * emac,struct device * dma_dev,int size)1123 static struct page_pool *prueth_create_page_pool(struct prueth_emac *emac,
1124 struct device *dma_dev,
1125 int size)
1126 {
1127 struct page_pool_params pp_params = { 0 };
1128 struct page_pool *pool;
1129
1130 pp_params.order = 0;
1131 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
1132 pp_params.pool_size = size;
1133 pp_params.nid = dev_to_node(emac->prueth->dev);
1134 pp_params.dma_dir = DMA_BIDIRECTIONAL;
1135 pp_params.dev = dma_dev;
1136 pp_params.napi = &emac->napi_rx;
1137 pp_params.max_len = PAGE_SIZE;
1138
1139 pool = page_pool_create(&pp_params);
1140 if (IS_ERR(pool))
1141 netdev_err(emac->ndev, "cannot create rx page pool\n");
1142
1143 return pool;
1144 }
1145
prueth_prepare_rx_chan(struct prueth_emac * emac,struct prueth_rx_chn * chn,int buf_size)1146 int prueth_prepare_rx_chan(struct prueth_emac *emac,
1147 struct prueth_rx_chn *chn,
1148 int buf_size)
1149 {
1150 struct page_pool *pool;
1151 struct page *page;
1152 int i, ret;
1153
1154 pool = prueth_create_page_pool(emac, chn->dma_dev, chn->descs_num);
1155 if (IS_ERR(pool))
1156 return PTR_ERR(pool);
1157
1158 chn->pg_pool = pool;
1159
1160 for (i = 0; i < chn->descs_num; i++) {
1161 /* NOTE: we're not using memory efficiently here.
1162 * 1 full page (4KB?) used here instead of
1163 * PRUETH_MAX_PKT_SIZE (~1.5KB?)
1164 */
1165 page = page_pool_dev_alloc_pages(pool);
1166 if (!page) {
1167 netdev_err(emac->ndev, "couldn't allocate rx page\n");
1168 ret = -ENOMEM;
1169 goto recycle_alloc_pg;
1170 }
1171
1172 ret = prueth_dma_rx_push_mapped(emac, chn, page, buf_size);
1173 if (ret < 0) {
1174 netdev_err(emac->ndev,
1175 "cannot submit page for rx chan %s ret %d\n",
1176 chn->name, ret);
1177 page_pool_recycle_direct(pool, page);
1178 goto recycle_alloc_pg;
1179 }
1180 }
1181
1182 return 0;
1183
1184 recycle_alloc_pg:
1185 prueth_reset_rx_chan(&emac->rx_chns, PRUETH_MAX_RX_FLOWS, false);
1186
1187 return ret;
1188 }
1189 EXPORT_SYMBOL_GPL(prueth_prepare_rx_chan);
1190
prueth_reset_tx_chan(struct prueth_emac * emac,int ch_num,bool free_skb)1191 void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num,
1192 bool free_skb)
1193 {
1194 int i;
1195
1196 for (i = 0; i < ch_num; i++) {
1197 if (free_skb)
1198 k3_udma_glue_reset_tx_chn(emac->tx_chns[i].tx_chn,
1199 &emac->tx_chns[i],
1200 prueth_tx_cleanup);
1201 k3_udma_glue_disable_tx_chn(emac->tx_chns[i].tx_chn);
1202 }
1203 }
1204 EXPORT_SYMBOL_GPL(prueth_reset_tx_chan);
1205
prueth_reset_rx_chan(struct prueth_rx_chn * chn,int num_flows,bool disable)1206 void prueth_reset_rx_chan(struct prueth_rx_chn *chn,
1207 int num_flows, bool disable)
1208 {
1209 int i;
1210
1211 for (i = 0; i < num_flows; i++)
1212 k3_udma_glue_reset_rx_chn(chn->rx_chn, i, chn,
1213 prueth_rx_cleanup);
1214 if (disable)
1215 k3_udma_glue_disable_rx_chn(chn->rx_chn);
1216 }
1217 EXPORT_SYMBOL_GPL(prueth_reset_rx_chan);
1218
icssg_ndo_tx_timeout(struct net_device * ndev,unsigned int txqueue)1219 void icssg_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1220 {
1221 ndev->stats.tx_errors++;
1222 }
1223 EXPORT_SYMBOL_GPL(icssg_ndo_tx_timeout);
1224
emac_set_ts_config(struct net_device * ndev,struct ifreq * ifr)1225 static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr)
1226 {
1227 struct prueth_emac *emac = netdev_priv(ndev);
1228 struct hwtstamp_config config;
1229
1230 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1231 return -EFAULT;
1232
1233 switch (config.tx_type) {
1234 case HWTSTAMP_TX_OFF:
1235 emac->tx_ts_enabled = 0;
1236 break;
1237 case HWTSTAMP_TX_ON:
1238 emac->tx_ts_enabled = 1;
1239 break;
1240 default:
1241 return -ERANGE;
1242 }
1243
1244 switch (config.rx_filter) {
1245 case HWTSTAMP_FILTER_NONE:
1246 emac->rx_ts_enabled = 0;
1247 break;
1248 case HWTSTAMP_FILTER_ALL:
1249 case HWTSTAMP_FILTER_SOME:
1250 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1251 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1252 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1253 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1254 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1255 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1256 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1257 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1258 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1259 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1260 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1261 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1262 case HWTSTAMP_FILTER_NTP_ALL:
1263 emac->rx_ts_enabled = 1;
1264 config.rx_filter = HWTSTAMP_FILTER_ALL;
1265 break;
1266 default:
1267 return -ERANGE;
1268 }
1269
1270 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
1271 -EFAULT : 0;
1272 }
1273
emac_get_ts_config(struct net_device * ndev,struct ifreq * ifr)1274 static int emac_get_ts_config(struct net_device *ndev, struct ifreq *ifr)
1275 {
1276 struct prueth_emac *emac = netdev_priv(ndev);
1277 struct hwtstamp_config config;
1278
1279 config.flags = 0;
1280 config.tx_type = emac->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
1281 config.rx_filter = emac->rx_ts_enabled ? HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
1282
1283 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
1284 -EFAULT : 0;
1285 }
1286
icssg_ndo_ioctl(struct net_device * ndev,struct ifreq * ifr,int cmd)1287 int icssg_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
1288 {
1289 switch (cmd) {
1290 case SIOCGHWTSTAMP:
1291 return emac_get_ts_config(ndev, ifr);
1292 case SIOCSHWTSTAMP:
1293 return emac_set_ts_config(ndev, ifr);
1294 default:
1295 break;
1296 }
1297
1298 return phy_do_ioctl(ndev, ifr, cmd);
1299 }
1300 EXPORT_SYMBOL_GPL(icssg_ndo_ioctl);
1301
icssg_ndo_get_stats64(struct net_device * ndev,struct rtnl_link_stats64 * stats)1302 void icssg_ndo_get_stats64(struct net_device *ndev,
1303 struct rtnl_link_stats64 *stats)
1304 {
1305 struct prueth_emac *emac = netdev_priv(ndev);
1306
1307 emac_update_hardware_stats(emac);
1308
1309 stats->rx_packets = emac_get_stat_by_name(emac, "rx_packets");
1310 stats->rx_bytes = emac_get_stat_by_name(emac, "rx_bytes");
1311 stats->tx_packets = emac_get_stat_by_name(emac, "tx_packets");
1312 stats->tx_bytes = emac_get_stat_by_name(emac, "tx_bytes");
1313 stats->rx_crc_errors = emac_get_stat_by_name(emac, "rx_crc_errors");
1314 stats->rx_over_errors = emac_get_stat_by_name(emac, "rx_over_errors");
1315 stats->multicast = emac_get_stat_by_name(emac, "rx_multicast_frames");
1316
1317 stats->rx_errors = ndev->stats.rx_errors +
1318 emac_get_stat_by_name(emac, "FW_RX_ERROR") +
1319 emac_get_stat_by_name(emac, "FW_RX_EOF_SHORT_FRMERR") +
1320 emac_get_stat_by_name(emac, "FW_RX_B0_DROP_EARLY_EOF") +
1321 emac_get_stat_by_name(emac, "FW_RX_EXP_FRAG_Q_DROP") +
1322 emac_get_stat_by_name(emac, "FW_RX_FIFO_OVERRUN");
1323 stats->rx_dropped = ndev->stats.rx_dropped +
1324 emac_get_stat_by_name(emac, "FW_DROPPED_PKT") +
1325 emac_get_stat_by_name(emac, "FW_INF_PORT_DISABLED") +
1326 emac_get_stat_by_name(emac, "FW_INF_SAV") +
1327 emac_get_stat_by_name(emac, "FW_INF_SA_DL") +
1328 emac_get_stat_by_name(emac, "FW_INF_PORT_BLOCKED") +
1329 emac_get_stat_by_name(emac, "FW_INF_DROP_TAGGED") +
1330 emac_get_stat_by_name(emac, "FW_INF_DROP_PRIOTAGGED") +
1331 emac_get_stat_by_name(emac, "FW_INF_DROP_NOTAG") +
1332 emac_get_stat_by_name(emac, "FW_INF_DROP_NOTMEMBER");
1333 stats->tx_errors = ndev->stats.tx_errors;
1334 stats->tx_dropped = ndev->stats.tx_dropped +
1335 emac_get_stat_by_name(emac, "FW_RTU_PKT_DROP") +
1336 emac_get_stat_by_name(emac, "FW_TX_DROPPED_PACKET") +
1337 emac_get_stat_by_name(emac, "FW_TX_TS_DROPPED_PACKET") +
1338 emac_get_stat_by_name(emac, "FW_TX_JUMBO_FRM_CUTOFF");
1339 }
1340 EXPORT_SYMBOL_GPL(icssg_ndo_get_stats64);
1341
icssg_ndo_get_phys_port_name(struct net_device * ndev,char * name,size_t len)1342 int icssg_ndo_get_phys_port_name(struct net_device *ndev, char *name,
1343 size_t len)
1344 {
1345 struct prueth_emac *emac = netdev_priv(ndev);
1346 int ret;
1347
1348 ret = snprintf(name, len, "p%d", emac->port_id);
1349 if (ret >= len)
1350 return -EINVAL;
1351
1352 return 0;
1353 }
1354 EXPORT_SYMBOL_GPL(icssg_ndo_get_phys_port_name);
1355
1356 /* get emac_port corresponding to eth_node name */
prueth_node_port(struct device_node * eth_node)1357 int prueth_node_port(struct device_node *eth_node)
1358 {
1359 u32 port_id;
1360 int ret;
1361
1362 ret = of_property_read_u32(eth_node, "reg", &port_id);
1363 if (ret)
1364 return ret;
1365
1366 if (port_id == 0)
1367 return PRUETH_PORT_MII0;
1368 else if (port_id == 1)
1369 return PRUETH_PORT_MII1;
1370 else
1371 return PRUETH_PORT_INVALID;
1372 }
1373 EXPORT_SYMBOL_GPL(prueth_node_port);
1374
1375 /* get MAC instance corresponding to eth_node name */
prueth_node_mac(struct device_node * eth_node)1376 int prueth_node_mac(struct device_node *eth_node)
1377 {
1378 u32 port_id;
1379 int ret;
1380
1381 ret = of_property_read_u32(eth_node, "reg", &port_id);
1382 if (ret)
1383 return ret;
1384
1385 if (port_id == 0)
1386 return PRUETH_MAC0;
1387 else if (port_id == 1)
1388 return PRUETH_MAC1;
1389 else
1390 return PRUETH_MAC_INVALID;
1391 }
1392 EXPORT_SYMBOL_GPL(prueth_node_mac);
1393
prueth_netdev_exit(struct prueth * prueth,struct device_node * eth_node)1394 void prueth_netdev_exit(struct prueth *prueth,
1395 struct device_node *eth_node)
1396 {
1397 struct prueth_emac *emac;
1398 enum prueth_mac mac;
1399
1400 mac = prueth_node_mac(eth_node);
1401 if (mac == PRUETH_MAC_INVALID)
1402 return;
1403
1404 emac = prueth->emac[mac];
1405 if (!emac)
1406 return;
1407
1408 if (of_phy_is_fixed_link(emac->phy_node))
1409 of_phy_deregister_fixed_link(emac->phy_node);
1410
1411 netif_napi_del(&emac->napi_rx);
1412
1413 pruss_release_mem_region(prueth->pruss, &emac->dram);
1414 destroy_workqueue(emac->cmd_wq);
1415 free_netdev(emac->ndev);
1416 prueth->emac[mac] = NULL;
1417 }
1418 EXPORT_SYMBOL_GPL(prueth_netdev_exit);
1419
prueth_get_cores(struct prueth * prueth,int slice,bool is_sr1)1420 int prueth_get_cores(struct prueth *prueth, int slice, bool is_sr1)
1421 {
1422 struct device *dev = prueth->dev;
1423 enum pruss_pru_id pruss_id;
1424 struct device_node *np;
1425 int idx = -1, ret;
1426
1427 np = dev->of_node;
1428
1429 switch (slice) {
1430 case ICSS_SLICE0:
1431 idx = 0;
1432 break;
1433 case ICSS_SLICE1:
1434 idx = is_sr1 ? 2 : 3;
1435 break;
1436 default:
1437 return -EINVAL;
1438 }
1439
1440 prueth->pru[slice] = pru_rproc_get(np, idx, &pruss_id);
1441 if (IS_ERR(prueth->pru[slice])) {
1442 ret = PTR_ERR(prueth->pru[slice]);
1443 prueth->pru[slice] = NULL;
1444 return dev_err_probe(dev, ret, "unable to get PRU%d\n", slice);
1445 }
1446 prueth->pru_id[slice] = pruss_id;
1447
1448 idx++;
1449 prueth->rtu[slice] = pru_rproc_get(np, idx, NULL);
1450 if (IS_ERR(prueth->rtu[slice])) {
1451 ret = PTR_ERR(prueth->rtu[slice]);
1452 prueth->rtu[slice] = NULL;
1453 return dev_err_probe(dev, ret, "unable to get RTU%d\n", slice);
1454 }
1455
1456 if (is_sr1)
1457 return 0;
1458
1459 idx++;
1460 prueth->txpru[slice] = pru_rproc_get(np, idx, NULL);
1461 if (IS_ERR(prueth->txpru[slice])) {
1462 ret = PTR_ERR(prueth->txpru[slice]);
1463 prueth->txpru[slice] = NULL;
1464 return dev_err_probe(dev, ret, "unable to get TX_PRU%d\n", slice);
1465 }
1466
1467 return 0;
1468 }
1469 EXPORT_SYMBOL_GPL(prueth_get_cores);
1470
prueth_put_cores(struct prueth * prueth,int slice)1471 void prueth_put_cores(struct prueth *prueth, int slice)
1472 {
1473 if (prueth->txpru[slice])
1474 pru_rproc_put(prueth->txpru[slice]);
1475
1476 if (prueth->rtu[slice])
1477 pru_rproc_put(prueth->rtu[slice]);
1478
1479 if (prueth->pru[slice])
1480 pru_rproc_put(prueth->pru[slice]);
1481 }
1482 EXPORT_SYMBOL_GPL(prueth_put_cores);
1483
1484 #ifdef CONFIG_PM_SLEEP
prueth_suspend(struct device * dev)1485 static int prueth_suspend(struct device *dev)
1486 {
1487 struct prueth *prueth = dev_get_drvdata(dev);
1488 struct net_device *ndev;
1489 int i, ret;
1490
1491 for (i = 0; i < PRUETH_NUM_MACS; i++) {
1492 ndev = prueth->registered_netdevs[i];
1493
1494 if (!ndev)
1495 continue;
1496
1497 if (netif_running(ndev)) {
1498 netif_device_detach(ndev);
1499 ret = ndev->netdev_ops->ndo_stop(ndev);
1500 if (ret < 0) {
1501 netdev_err(ndev, "failed to stop: %d", ret);
1502 return ret;
1503 }
1504 }
1505 }
1506
1507 return 0;
1508 }
1509
prueth_resume(struct device * dev)1510 static int prueth_resume(struct device *dev)
1511 {
1512 struct prueth *prueth = dev_get_drvdata(dev);
1513 struct net_device *ndev;
1514 int i, ret;
1515
1516 for (i = 0; i < PRUETH_NUM_MACS; i++) {
1517 ndev = prueth->registered_netdevs[i];
1518
1519 if (!ndev)
1520 continue;
1521
1522 if (netif_running(ndev)) {
1523 ret = ndev->netdev_ops->ndo_open(ndev);
1524 if (ret < 0) {
1525 netdev_err(ndev, "failed to start: %d", ret);
1526 return ret;
1527 }
1528 netif_device_attach(ndev);
1529 }
1530 }
1531
1532 return 0;
1533 }
1534 #endif /* CONFIG_PM_SLEEP */
1535
1536 const struct dev_pm_ops prueth_dev_pm_ops = {
1537 SET_SYSTEM_SLEEP_PM_OPS(prueth_suspend, prueth_resume)
1538 };
1539 EXPORT_SYMBOL_GPL(prueth_dev_pm_ops);
1540
1541 MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
1542 MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
1543 MODULE_DESCRIPTION("PRUSS ICSSG Ethernet Driver Common Module");
1544 MODULE_LICENSE("GPL");
1545