1 // SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
2 /*
3 * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
4 * Copyright (c) 2014, Synopsys, Inc.
5 * All rights reserved
6 */
7
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/tcp.h>
11 #include <linux/if_vlan.h>
12 #include <linux/interrupt.h>
13 #include <linux/clk.h>
14 #include <linux/if_ether.h>
15 #include <linux/net_tstamp.h>
16 #include <linux/phy.h>
17 #include <net/vxlan.h>
18
19 #include "xgbe.h"
20 #include "xgbe-common.h"
21
22 static unsigned int ecc_sec_info_threshold = 10;
23 static unsigned int ecc_sec_warn_threshold = 10000;
24 static unsigned int ecc_sec_period = 600;
25 static unsigned int ecc_ded_threshold = 2;
26 static unsigned int ecc_ded_period = 600;
27
28 #ifdef CONFIG_AMD_XGBE_HAVE_ECC
29 /* Only expose the ECC parameters if supported */
30 module_param(ecc_sec_info_threshold, uint, 0644);
31 MODULE_PARM_DESC(ecc_sec_info_threshold,
32 " ECC corrected error informational threshold setting");
33
34 module_param(ecc_sec_warn_threshold, uint, 0644);
35 MODULE_PARM_DESC(ecc_sec_warn_threshold,
36 " ECC corrected error warning threshold setting");
37
38 module_param(ecc_sec_period, uint, 0644);
39 MODULE_PARM_DESC(ecc_sec_period, " ECC corrected error period (in seconds)");
40
41 module_param(ecc_ded_threshold, uint, 0644);
42 MODULE_PARM_DESC(ecc_ded_threshold, " ECC detected error threshold setting");
43
44 module_param(ecc_ded_period, uint, 0644);
45 MODULE_PARM_DESC(ecc_ded_period, " ECC detected error period (in seconds)");
46 #endif
47
48 static int xgbe_one_poll(struct napi_struct *, int);
49 static int xgbe_all_poll(struct napi_struct *, int);
50 static void xgbe_stop(struct xgbe_prv_data *);
51
xgbe_alloc_node(size_t size,int node)52 static void *xgbe_alloc_node(size_t size, int node)
53 {
54 void *mem;
55
56 mem = kzalloc_node(size, GFP_KERNEL, node);
57 if (!mem)
58 mem = kzalloc(size, GFP_KERNEL);
59
60 return mem;
61 }
62
xgbe_free_channels(struct xgbe_prv_data * pdata)63 static void xgbe_free_channels(struct xgbe_prv_data *pdata)
64 {
65 unsigned int i;
66
67 for (i = 0; i < ARRAY_SIZE(pdata->channel); i++) {
68 if (!pdata->channel[i])
69 continue;
70
71 kfree(pdata->channel[i]->rx_ring);
72 kfree(pdata->channel[i]->tx_ring);
73 kfree(pdata->channel[i]);
74
75 pdata->channel[i] = NULL;
76 }
77
78 pdata->channel_count = 0;
79 }
80
xgbe_alloc_channels(struct xgbe_prv_data * pdata)81 static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
82 {
83 struct xgbe_channel *channel;
84 struct xgbe_ring *ring;
85 unsigned int count, i;
86 unsigned int cpu;
87 int node;
88
89 count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
90 for (i = 0; i < count; i++) {
91 /* Attempt to use a CPU on the node the device is on */
92 cpu = cpumask_local_spread(i, dev_to_node(pdata->dev));
93
94 /* Set the allocation node based on the returned CPU */
95 node = cpu_to_node(cpu);
96
97 channel = xgbe_alloc_node(sizeof(*channel), node);
98 if (!channel)
99 goto err_mem;
100 pdata->channel[i] = channel;
101
102 snprintf(channel->name, sizeof(channel->name), "channel-%u", i);
103 channel->pdata = pdata;
104 channel->queue_index = i;
105 channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
106 (DMA_CH_INC * i);
107 channel->node = node;
108 cpumask_set_cpu(cpu, &channel->affinity_mask);
109
110 if (pdata->per_channel_irq)
111 channel->dma_irq = pdata->channel_irq[i];
112
113 if (i < pdata->tx_ring_count) {
114 ring = xgbe_alloc_node(sizeof(*ring), node);
115 if (!ring)
116 goto err_mem;
117
118 spin_lock_init(&ring->lock);
119 ring->node = node;
120
121 channel->tx_ring = ring;
122 }
123
124 if (i < pdata->rx_ring_count) {
125 ring = xgbe_alloc_node(sizeof(*ring), node);
126 if (!ring)
127 goto err_mem;
128
129 spin_lock_init(&ring->lock);
130 ring->node = node;
131
132 channel->rx_ring = ring;
133 }
134
135 netif_dbg(pdata, drv, pdata->netdev,
136 "%s: cpu=%u, node=%d\n", channel->name, cpu, node);
137
138 netif_dbg(pdata, drv, pdata->netdev,
139 "%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
140 channel->name, channel->dma_regs, channel->dma_irq,
141 channel->tx_ring, channel->rx_ring);
142 }
143
144 pdata->channel_count = count;
145
146 return 0;
147
148 err_mem:
149 xgbe_free_channels(pdata);
150
151 return -ENOMEM;
152 }
153
xgbe_tx_avail_desc(struct xgbe_ring * ring)154 static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
155 {
156 return (ring->rdesc_count - (ring->cur - ring->dirty));
157 }
158
xgbe_rx_dirty_desc(struct xgbe_ring * ring)159 static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring *ring)
160 {
161 return (ring->cur - ring->dirty);
162 }
163
xgbe_maybe_stop_tx_queue(struct xgbe_channel * channel,struct xgbe_ring * ring,unsigned int count)164 static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
165 struct xgbe_ring *ring, unsigned int count)
166 {
167 struct xgbe_prv_data *pdata = channel->pdata;
168
169 if (count > xgbe_tx_avail_desc(ring)) {
170 netif_info(pdata, drv, pdata->netdev,
171 "Tx queue stopped, not enough descriptors available\n");
172 netif_stop_subqueue(pdata->netdev, channel->queue_index);
173 ring->tx.queue_stopped = 1;
174
175 /* If we haven't notified the hardware because of xmit_more
176 * support, tell it now
177 */
178 if (ring->tx.xmit_more)
179 pdata->hw_if.tx_start_xmit(channel, ring);
180
181 return NETDEV_TX_BUSY;
182 }
183
184 return 0;
185 }
186
xgbe_calc_rx_buf_size(struct net_device * netdev,unsigned int mtu)187 static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
188 {
189 unsigned int rx_buf_size;
190
191 rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
192 rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
193
194 rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
195 ~(XGBE_RX_BUF_ALIGN - 1);
196
197 return rx_buf_size;
198 }
199
xgbe_enable_rx_tx_int(struct xgbe_prv_data * pdata,struct xgbe_channel * channel)200 static void xgbe_enable_rx_tx_int(struct xgbe_prv_data *pdata,
201 struct xgbe_channel *channel)
202 {
203 struct xgbe_hw_if *hw_if = &pdata->hw_if;
204 enum xgbe_int int_id;
205
206 if (channel->tx_ring && channel->rx_ring)
207 int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
208 else if (channel->tx_ring)
209 int_id = XGMAC_INT_DMA_CH_SR_TI;
210 else if (channel->rx_ring)
211 int_id = XGMAC_INT_DMA_CH_SR_RI;
212 else
213 return;
214
215 hw_if->enable_int(channel, int_id);
216 }
217
xgbe_enable_rx_tx_ints(struct xgbe_prv_data * pdata)218 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
219 {
220 unsigned int i;
221
222 for (i = 0; i < pdata->channel_count; i++)
223 xgbe_enable_rx_tx_int(pdata, pdata->channel[i]);
224 }
225
xgbe_disable_rx_tx_int(struct xgbe_prv_data * pdata,struct xgbe_channel * channel)226 static void xgbe_disable_rx_tx_int(struct xgbe_prv_data *pdata,
227 struct xgbe_channel *channel)
228 {
229 struct xgbe_hw_if *hw_if = &pdata->hw_if;
230 enum xgbe_int int_id;
231
232 if (channel->tx_ring && channel->rx_ring)
233 int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
234 else if (channel->tx_ring)
235 int_id = XGMAC_INT_DMA_CH_SR_TI;
236 else if (channel->rx_ring)
237 int_id = XGMAC_INT_DMA_CH_SR_RI;
238 else
239 return;
240
241 hw_if->disable_int(channel, int_id);
242 }
243
xgbe_disable_rx_tx_ints(struct xgbe_prv_data * pdata)244 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
245 {
246 unsigned int i;
247
248 for (i = 0; i < pdata->channel_count; i++)
249 xgbe_disable_rx_tx_int(pdata, pdata->channel[i]);
250 }
251
xgbe_ecc_sec(struct xgbe_prv_data * pdata,unsigned long * period,unsigned int * count,const char * area)252 static bool xgbe_ecc_sec(struct xgbe_prv_data *pdata, unsigned long *period,
253 unsigned int *count, const char *area)
254 {
255 if (time_before(jiffies, *period)) {
256 (*count)++;
257 } else {
258 *period = jiffies + (ecc_sec_period * HZ);
259 *count = 1;
260 }
261
262 if (*count > ecc_sec_info_threshold)
263 dev_warn_once(pdata->dev,
264 "%s ECC corrected errors exceed informational threshold\n",
265 area);
266
267 if (*count > ecc_sec_warn_threshold) {
268 dev_warn_once(pdata->dev,
269 "%s ECC corrected errors exceed warning threshold\n",
270 area);
271 return true;
272 }
273
274 return false;
275 }
276
xgbe_ecc_ded(struct xgbe_prv_data * pdata,unsigned long * period,unsigned int * count,const char * area)277 static bool xgbe_ecc_ded(struct xgbe_prv_data *pdata, unsigned long *period,
278 unsigned int *count, const char *area)
279 {
280 if (time_before(jiffies, *period)) {
281 (*count)++;
282 } else {
283 *period = jiffies + (ecc_ded_period * HZ);
284 *count = 1;
285 }
286
287 if (*count > ecc_ded_threshold) {
288 netdev_alert(pdata->netdev,
289 "%s ECC detected errors exceed threshold\n",
290 area);
291 return true;
292 }
293
294 return false;
295 }
296
xgbe_ecc_isr_bh_work(struct work_struct * work)297 static void xgbe_ecc_isr_bh_work(struct work_struct *work)
298 {
299 struct xgbe_prv_data *pdata = from_work(pdata, work, ecc_bh_work);
300 unsigned int ecc_isr;
301 bool stop = false;
302
303 /* Mask status with only the interrupts we care about */
304 ecc_isr = XP_IOREAD(pdata, XP_ECC_ISR);
305 ecc_isr &= XP_IOREAD(pdata, XP_ECC_IER);
306 netif_dbg(pdata, intr, pdata->netdev, "ECC_ISR=%#010x\n", ecc_isr);
307
308 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, TX_DED)) {
309 stop |= xgbe_ecc_ded(pdata, &pdata->tx_ded_period,
310 &pdata->tx_ded_count, "TX fifo");
311 }
312
313 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, RX_DED)) {
314 stop |= xgbe_ecc_ded(pdata, &pdata->rx_ded_period,
315 &pdata->rx_ded_count, "RX fifo");
316 }
317
318 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, DESC_DED)) {
319 stop |= xgbe_ecc_ded(pdata, &pdata->desc_ded_period,
320 &pdata->desc_ded_count,
321 "descriptor cache");
322 }
323
324 if (stop) {
325 pdata->hw_if.disable_ecc_ded(pdata);
326 schedule_work(&pdata->stopdev_work);
327 goto out;
328 }
329
330 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, TX_SEC)) {
331 if (xgbe_ecc_sec(pdata, &pdata->tx_sec_period,
332 &pdata->tx_sec_count, "TX fifo"))
333 pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_TX);
334 }
335
336 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, RX_SEC))
337 if (xgbe_ecc_sec(pdata, &pdata->rx_sec_period,
338 &pdata->rx_sec_count, "RX fifo"))
339 pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_RX);
340
341 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, DESC_SEC))
342 if (xgbe_ecc_sec(pdata, &pdata->desc_sec_period,
343 &pdata->desc_sec_count, "descriptor cache"))
344 pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_DESC);
345
346 out:
347 /* Clear all ECC interrupts */
348 XP_IOWRITE(pdata, XP_ECC_ISR, ecc_isr);
349
350 /* Reissue interrupt if status is not clear */
351 if (pdata->vdata->irq_reissue_support)
352 XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 1);
353 }
354
xgbe_ecc_isr(int irq,void * data)355 static irqreturn_t xgbe_ecc_isr(int irq, void *data)
356 {
357 struct xgbe_prv_data *pdata = data;
358
359 if (pdata->isr_as_bh_work)
360 queue_work(system_bh_wq, &pdata->ecc_bh_work);
361 else
362 xgbe_ecc_isr_bh_work(&pdata->ecc_bh_work);
363
364 return IRQ_HANDLED;
365 }
366
xgbe_isr_bh_work(struct work_struct * work)367 static void xgbe_isr_bh_work(struct work_struct *work)
368 {
369 struct xgbe_prv_data *pdata = from_work(pdata, work, dev_bh_work);
370 struct xgbe_hw_if *hw_if = &pdata->hw_if;
371 struct xgbe_channel *channel;
372 unsigned int dma_isr, dma_ch_isr;
373 unsigned int mac_isr, mac_tssr, mac_mdioisr;
374 unsigned int i;
375
376 /* The DMA interrupt status register also reports MAC and MTL
377 * interrupts. So for polling mode, we just need to check for
378 * this register to be non-zero
379 */
380 dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
381 if (!dma_isr)
382 goto isr_done;
383
384 netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
385
386 for (i = 0; i < pdata->channel_count; i++) {
387 if (!(dma_isr & (1 << i)))
388 continue;
389
390 channel = pdata->channel[i];
391
392 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
393 netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
394 i, dma_ch_isr);
395
396 /* The TI or RI interrupt bits may still be set even if using
397 * per channel DMA interrupts. Check to be sure those are not
398 * enabled before using the private data napi structure.
399 */
400 if (!pdata->per_channel_irq &&
401 (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
402 XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) {
403 if (napi_schedule_prep(&pdata->napi)) {
404 /* Disable Tx and Rx interrupts */
405 xgbe_disable_rx_tx_ints(pdata);
406
407 /* Turn on polling */
408 __napi_schedule(&pdata->napi);
409 }
410 } else {
411 /* Don't clear Rx/Tx status if doing per channel DMA
412 * interrupts, these will be cleared by the ISR for
413 * per channel DMA interrupts.
414 */
415 XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, TI, 0);
416 XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, RI, 0);
417 }
418
419 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU))
420 pdata->ext_stats.rx_buffer_unavailable++;
421
422 /* Restart the device on a Fatal Bus Error */
423 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
424 schedule_work(&pdata->restart_work);
425
426 /* Clear interrupt signals */
427 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
428 }
429
430 if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
431 mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
432
433 netif_dbg(pdata, intr, pdata->netdev, "MAC_ISR=%#010x\n",
434 mac_isr);
435
436 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
437 hw_if->tx_mmc_int(pdata);
438
439 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
440 hw_if->rx_mmc_int(pdata);
441
442 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, TSIS)) {
443 mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR);
444
445 netif_dbg(pdata, intr, pdata->netdev,
446 "MAC_TSSR=%#010x\n", mac_tssr);
447
448 if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) {
449 /* Read Tx Timestamp to clear interrupt */
450 pdata->tx_tstamp =
451 hw_if->get_tx_tstamp(pdata);
452 queue_work(pdata->dev_workqueue,
453 &pdata->tx_tstamp_work);
454 }
455 }
456
457 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, SMI)) {
458 mac_mdioisr = XGMAC_IOREAD(pdata, MAC_MDIOISR);
459
460 netif_dbg(pdata, intr, pdata->netdev,
461 "MAC_MDIOISR=%#010x\n", mac_mdioisr);
462
463 if (XGMAC_GET_BITS(mac_mdioisr, MAC_MDIOISR,
464 SNGLCOMPINT))
465 complete(&pdata->mdio_complete);
466 }
467 }
468
469 isr_done:
470 /* If there is not a separate AN irq, handle it here */
471 if (pdata->dev_irq == pdata->an_irq)
472 pdata->phy_if.an_isr(pdata);
473
474 /* If there is not a separate ECC irq, handle it here */
475 if (pdata->vdata->ecc_support && (pdata->dev_irq == pdata->ecc_irq))
476 xgbe_ecc_isr_bh_work(&pdata->ecc_bh_work);
477
478 /* If there is not a separate I2C irq, handle it here */
479 if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq))
480 pdata->i2c_if.i2c_isr(pdata);
481
482 /* Reissue interrupt if status is not clear */
483 if (pdata->vdata->irq_reissue_support) {
484 unsigned int reissue_mask;
485
486 reissue_mask = 1 << 0;
487 if (!pdata->per_channel_irq)
488 reissue_mask |= 0xffff << 4;
489
490 XP_IOWRITE(pdata, XP_INT_REISSUE_EN, reissue_mask);
491 }
492 }
493
xgbe_isr(int irq,void * data)494 static irqreturn_t xgbe_isr(int irq, void *data)
495 {
496 struct xgbe_prv_data *pdata = data;
497
498 if (pdata->isr_as_bh_work)
499 queue_work(system_bh_wq, &pdata->dev_bh_work);
500 else
501 xgbe_isr_bh_work(&pdata->dev_bh_work);
502
503 return IRQ_HANDLED;
504 }
505
xgbe_dma_isr(int irq,void * data)506 static irqreturn_t xgbe_dma_isr(int irq, void *data)
507 {
508 struct xgbe_channel *channel = data;
509 struct xgbe_prv_data *pdata = channel->pdata;
510 unsigned int dma_status;
511
512 /* Per channel DMA interrupts are enabled, so we use the per
513 * channel napi structure and not the private data napi structure
514 */
515 if (napi_schedule_prep(&channel->napi)) {
516 /* Disable Tx and Rx interrupts */
517 if (pdata->channel_irq_mode)
518 xgbe_disable_rx_tx_int(pdata, channel);
519 else
520 disable_irq_nosync(channel->dma_irq);
521
522 /* Turn on polling */
523 __napi_schedule_irqoff(&channel->napi);
524 }
525
526 /* Clear Tx/Rx signals */
527 dma_status = 0;
528 XGMAC_SET_BITS(dma_status, DMA_CH_SR, TI, 1);
529 XGMAC_SET_BITS(dma_status, DMA_CH_SR, RI, 1);
530 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_status);
531
532 return IRQ_HANDLED;
533 }
534
xgbe_tx_timer(struct timer_list * t)535 static void xgbe_tx_timer(struct timer_list *t)
536 {
537 struct xgbe_channel *channel = timer_container_of(channel, t,
538 tx_timer);
539 struct xgbe_prv_data *pdata = channel->pdata;
540 struct napi_struct *napi;
541
542 DBGPR("-->xgbe_tx_timer\n");
543
544 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
545
546 if (napi_schedule_prep(napi)) {
547 /* Disable Tx and Rx interrupts */
548 if (pdata->per_channel_irq)
549 if (pdata->channel_irq_mode)
550 xgbe_disable_rx_tx_int(pdata, channel);
551 else
552 disable_irq_nosync(channel->dma_irq);
553 else
554 xgbe_disable_rx_tx_ints(pdata);
555
556 /* Turn on polling */
557 __napi_schedule(napi);
558 }
559
560 channel->tx_timer_active = 0;
561
562 DBGPR("<--xgbe_tx_timer\n");
563 }
564
xgbe_service(struct work_struct * work)565 static void xgbe_service(struct work_struct *work)
566 {
567 struct xgbe_prv_data *pdata = container_of(work,
568 struct xgbe_prv_data,
569 service_work);
570
571 pdata->phy_if.phy_status(pdata);
572 }
573
xgbe_service_timer(struct timer_list * t)574 static void xgbe_service_timer(struct timer_list *t)
575 {
576 struct xgbe_prv_data *pdata = timer_container_of(pdata, t,
577 service_timer);
578 struct xgbe_channel *channel;
579 unsigned int i;
580
581 queue_work(pdata->dev_workqueue, &pdata->service_work);
582
583 mod_timer(&pdata->service_timer, jiffies + HZ);
584
585 if (!pdata->tx_usecs)
586 return;
587
588 for (i = 0; i < pdata->channel_count; i++) {
589 channel = pdata->channel[i];
590 if (!channel->tx_ring || channel->tx_timer_active)
591 break;
592 channel->tx_timer_active = 1;
593 mod_timer(&channel->tx_timer,
594 jiffies + usecs_to_jiffies(pdata->tx_usecs));
595 }
596 }
597
xgbe_init_timers(struct xgbe_prv_data * pdata)598 static void xgbe_init_timers(struct xgbe_prv_data *pdata)
599 {
600 struct xgbe_channel *channel;
601 unsigned int i;
602
603 timer_setup(&pdata->service_timer, xgbe_service_timer, 0);
604
605 for (i = 0; i < pdata->channel_count; i++) {
606 channel = pdata->channel[i];
607 if (!channel->tx_ring)
608 break;
609
610 timer_setup(&channel->tx_timer, xgbe_tx_timer, 0);
611 }
612 }
613
xgbe_start_timers(struct xgbe_prv_data * pdata)614 static void xgbe_start_timers(struct xgbe_prv_data *pdata)
615 {
616 mod_timer(&pdata->service_timer, jiffies + HZ);
617 }
618
xgbe_stop_timers(struct xgbe_prv_data * pdata)619 static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
620 {
621 struct xgbe_channel *channel;
622 unsigned int i;
623
624 timer_delete_sync(&pdata->service_timer);
625
626 for (i = 0; i < pdata->channel_count; i++) {
627 channel = pdata->channel[i];
628 if (!channel->tx_ring)
629 break;
630
631 /* Deactivate the Tx timer */
632 timer_delete_sync(&channel->tx_timer);
633 channel->tx_timer_active = 0;
634 }
635 }
636
xgbe_get_all_hw_features(struct xgbe_prv_data * pdata)637 void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
638 {
639 unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
640 struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
641
642 mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R);
643 mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R);
644 mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R);
645
646 memset(hw_feat, 0, sizeof(*hw_feat));
647
648 hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR);
649
650 /* Hardware feature register 0 */
651 hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
652 hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
653 hw_feat->sma = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
654 hw_feat->rwk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
655 hw_feat->mgk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
656 hw_feat->mmc = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
657 hw_feat->aoe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
658 hw_feat->ts = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
659 hw_feat->eee = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
660 hw_feat->tx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
661 hw_feat->rx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
662 hw_feat->addn_mac = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
663 ADDMACADRSEL);
664 hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
665 hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
666 hw_feat->vxn = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VXN);
667
668 /* Hardware feature register 1 */
669 hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
670 RXFIFOSIZE);
671 hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
672 TXFIFOSIZE);
673 hw_feat->adv_ts_hi = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADVTHWORD);
674 hw_feat->dma_width = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
675 hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
676 hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
677 hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
678 hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
679 hw_feat->rss = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
680 hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
681 hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
682 HASHTBLSZ);
683 hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
684 L3L4FNUM);
685
686 /* Hardware feature register 2 */
687 hw_feat->rx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
688 hw_feat->tx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
689 hw_feat->rx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
690 hw_feat->tx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
691 hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
692 hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
693
694 /* Translate the Hash Table size into actual number */
695 switch (hw_feat->hash_table_size) {
696 case 0:
697 break;
698 case 1:
699 hw_feat->hash_table_size = 64;
700 break;
701 case 2:
702 hw_feat->hash_table_size = 128;
703 break;
704 case 3:
705 hw_feat->hash_table_size = 256;
706 break;
707 }
708
709 /* Translate the address width setting into actual number */
710 switch (hw_feat->dma_width) {
711 case 0:
712 hw_feat->dma_width = 32;
713 break;
714 case 1:
715 hw_feat->dma_width = 40;
716 break;
717 case 2:
718 hw_feat->dma_width = 48;
719 break;
720 default:
721 hw_feat->dma_width = 32;
722 }
723
724 /* The Queue, Channel and TC counts are zero based so increment them
725 * to get the actual number
726 */
727 hw_feat->rx_q_cnt++;
728 hw_feat->tx_q_cnt++;
729 hw_feat->rx_ch_cnt++;
730 hw_feat->tx_ch_cnt++;
731 hw_feat->tc_cnt++;
732
733 /* Translate the fifo sizes into actual numbers */
734 hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
735 hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
736
737 if (netif_msg_probe(pdata)) {
738 dev_dbg(pdata->dev, "Hardware features:\n");
739
740 /* Hardware feature register 0 */
741 dev_dbg(pdata->dev, " 1GbE support : %s\n",
742 hw_feat->gmii ? "yes" : "no");
743 dev_dbg(pdata->dev, " VLAN hash filter : %s\n",
744 hw_feat->vlhash ? "yes" : "no");
745 dev_dbg(pdata->dev, " MDIO interface : %s\n",
746 hw_feat->sma ? "yes" : "no");
747 dev_dbg(pdata->dev, " Wake-up packet support : %s\n",
748 hw_feat->rwk ? "yes" : "no");
749 dev_dbg(pdata->dev, " Magic packet support : %s\n",
750 hw_feat->mgk ? "yes" : "no");
751 dev_dbg(pdata->dev, " Management counters : %s\n",
752 hw_feat->mmc ? "yes" : "no");
753 dev_dbg(pdata->dev, " ARP offload : %s\n",
754 hw_feat->aoe ? "yes" : "no");
755 dev_dbg(pdata->dev, " IEEE 1588-2008 Timestamp : %s\n",
756 hw_feat->ts ? "yes" : "no");
757 dev_dbg(pdata->dev, " Energy Efficient Ethernet : %s\n",
758 hw_feat->eee ? "yes" : "no");
759 dev_dbg(pdata->dev, " TX checksum offload : %s\n",
760 hw_feat->tx_coe ? "yes" : "no");
761 dev_dbg(pdata->dev, " RX checksum offload : %s\n",
762 hw_feat->rx_coe ? "yes" : "no");
763 dev_dbg(pdata->dev, " Additional MAC addresses : %u\n",
764 hw_feat->addn_mac);
765 dev_dbg(pdata->dev, " Timestamp source : %s\n",
766 (hw_feat->ts_src == 1) ? "internal" :
767 (hw_feat->ts_src == 2) ? "external" :
768 (hw_feat->ts_src == 3) ? "internal/external" : "n/a");
769 dev_dbg(pdata->dev, " SA/VLAN insertion : %s\n",
770 hw_feat->sa_vlan_ins ? "yes" : "no");
771 dev_dbg(pdata->dev, " VXLAN/NVGRE support : %s\n",
772 hw_feat->vxn ? "yes" : "no");
773
774 /* Hardware feature register 1 */
775 dev_dbg(pdata->dev, " RX fifo size : %u\n",
776 hw_feat->rx_fifo_size);
777 dev_dbg(pdata->dev, " TX fifo size : %u\n",
778 hw_feat->tx_fifo_size);
779 dev_dbg(pdata->dev, " IEEE 1588 high word : %s\n",
780 hw_feat->adv_ts_hi ? "yes" : "no");
781 dev_dbg(pdata->dev, " DMA width : %u\n",
782 hw_feat->dma_width);
783 dev_dbg(pdata->dev, " Data Center Bridging : %s\n",
784 hw_feat->dcb ? "yes" : "no");
785 dev_dbg(pdata->dev, " Split header : %s\n",
786 hw_feat->sph ? "yes" : "no");
787 dev_dbg(pdata->dev, " TCP Segmentation Offload : %s\n",
788 hw_feat->tso ? "yes" : "no");
789 dev_dbg(pdata->dev, " Debug memory interface : %s\n",
790 hw_feat->dma_debug ? "yes" : "no");
791 dev_dbg(pdata->dev, " Receive Side Scaling : %s\n",
792 hw_feat->rss ? "yes" : "no");
793 dev_dbg(pdata->dev, " Traffic Class count : %u\n",
794 hw_feat->tc_cnt);
795 dev_dbg(pdata->dev, " Hash table size : %u\n",
796 hw_feat->hash_table_size);
797 dev_dbg(pdata->dev, " L3/L4 Filters : %u\n",
798 hw_feat->l3l4_filter_num);
799
800 /* Hardware feature register 2 */
801 dev_dbg(pdata->dev, " RX queue count : %u\n",
802 hw_feat->rx_q_cnt);
803 dev_dbg(pdata->dev, " TX queue count : %u\n",
804 hw_feat->tx_q_cnt);
805 dev_dbg(pdata->dev, " RX DMA channel count : %u\n",
806 hw_feat->rx_ch_cnt);
807 dev_dbg(pdata->dev, " TX DMA channel count : %u\n",
808 hw_feat->rx_ch_cnt);
809 dev_dbg(pdata->dev, " PPS outputs : %u\n",
810 hw_feat->pps_out_num);
811 dev_dbg(pdata->dev, " Auxiliary snapshot inputs : %u\n",
812 hw_feat->aux_snap_num);
813 }
814 }
815
xgbe_vxlan_set_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)816 static int xgbe_vxlan_set_port(struct net_device *netdev, unsigned int table,
817 unsigned int entry, struct udp_tunnel_info *ti)
818 {
819 struct xgbe_prv_data *pdata = netdev_priv(netdev);
820
821 pdata->vxlan_port = be16_to_cpu(ti->port);
822 pdata->hw_if.enable_vxlan(pdata);
823
824 return 0;
825 }
826
xgbe_vxlan_unset_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)827 static int xgbe_vxlan_unset_port(struct net_device *netdev, unsigned int table,
828 unsigned int entry, struct udp_tunnel_info *ti)
829 {
830 struct xgbe_prv_data *pdata = netdev_priv(netdev);
831
832 pdata->hw_if.disable_vxlan(pdata);
833 pdata->vxlan_port = 0;
834
835 return 0;
836 }
837
838 static const struct udp_tunnel_nic_info xgbe_udp_tunnels = {
839 .set_port = xgbe_vxlan_set_port,
840 .unset_port = xgbe_vxlan_unset_port,
841 .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
842 .tables = {
843 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
844 },
845 };
846
xgbe_get_udp_tunnel_info(void)847 const struct udp_tunnel_nic_info *xgbe_get_udp_tunnel_info(void)
848 {
849 return &xgbe_udp_tunnels;
850 }
851
xgbe_napi_enable(struct xgbe_prv_data * pdata,unsigned int add)852 static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
853 {
854 struct xgbe_channel *channel;
855 unsigned int i;
856
857 if (pdata->per_channel_irq) {
858 for (i = 0; i < pdata->channel_count; i++) {
859 channel = pdata->channel[i];
860 if (add)
861 netif_napi_add(pdata->netdev, &channel->napi,
862 xgbe_one_poll);
863
864 napi_enable(&channel->napi);
865 }
866 } else {
867 if (add)
868 netif_napi_add(pdata->netdev, &pdata->napi,
869 xgbe_all_poll);
870
871 napi_enable(&pdata->napi);
872 }
873 }
874
xgbe_napi_disable(struct xgbe_prv_data * pdata,unsigned int del)875 static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
876 {
877 struct xgbe_channel *channel;
878 unsigned int i;
879
880 if (pdata->per_channel_irq) {
881 for (i = 0; i < pdata->channel_count; i++) {
882 channel = pdata->channel[i];
883 napi_disable(&channel->napi);
884
885 if (del)
886 netif_napi_del(&channel->napi);
887 }
888 } else {
889 napi_disable(&pdata->napi);
890
891 if (del)
892 netif_napi_del(&pdata->napi);
893 }
894 }
895
xgbe_request_irqs(struct xgbe_prv_data * pdata)896 static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
897 {
898 struct xgbe_channel *channel;
899 struct net_device *netdev = pdata->netdev;
900 unsigned int i;
901 int ret;
902
903 INIT_WORK(&pdata->dev_bh_work, xgbe_isr_bh_work);
904 INIT_WORK(&pdata->ecc_bh_work, xgbe_ecc_isr_bh_work);
905
906 ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
907 netdev_name(netdev), pdata);
908 if (ret) {
909 netdev_alert(netdev, "error requesting irq %d\n",
910 pdata->dev_irq);
911 return ret;
912 }
913
914 if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq)) {
915 ret = devm_request_irq(pdata->dev, pdata->ecc_irq, xgbe_ecc_isr,
916 0, pdata->ecc_name, pdata);
917 if (ret) {
918 netdev_alert(netdev, "error requesting ecc irq %d\n",
919 pdata->ecc_irq);
920 goto err_dev_irq;
921 }
922 }
923
924 if (!pdata->per_channel_irq)
925 return 0;
926
927 for (i = 0; i < pdata->channel_count; i++) {
928 channel = pdata->channel[i];
929 snprintf(channel->dma_irq_name,
930 sizeof(channel->dma_irq_name) - 1,
931 "%s-TxRx-%u", netdev_name(netdev),
932 channel->queue_index);
933
934 ret = devm_request_irq(pdata->dev, channel->dma_irq,
935 xgbe_dma_isr, 0,
936 channel->dma_irq_name, channel);
937 if (ret) {
938 netdev_alert(netdev, "error requesting irq %d\n",
939 channel->dma_irq);
940 goto err_dma_irq;
941 }
942
943 irq_set_affinity_hint(channel->dma_irq,
944 &channel->affinity_mask);
945 }
946
947 return 0;
948
949 err_dma_irq:
950 /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
951 for (i--; i < pdata->channel_count; i--) {
952 channel = pdata->channel[i];
953
954 irq_set_affinity_hint(channel->dma_irq, NULL);
955 devm_free_irq(pdata->dev, channel->dma_irq, channel);
956 }
957
958 if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
959 devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
960
961 err_dev_irq:
962 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
963
964 return ret;
965 }
966
xgbe_free_irqs(struct xgbe_prv_data * pdata)967 static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
968 {
969 struct xgbe_channel *channel;
970 unsigned int i;
971
972 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
973
974 cancel_work_sync(&pdata->dev_bh_work);
975 cancel_work_sync(&pdata->ecc_bh_work);
976
977 if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
978 devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
979
980 if (!pdata->per_channel_irq)
981 return;
982
983 for (i = 0; i < pdata->channel_count; i++) {
984 channel = pdata->channel[i];
985
986 irq_set_affinity_hint(channel->dma_irq, NULL);
987 devm_free_irq(pdata->dev, channel->dma_irq, channel);
988 }
989 }
990
xgbe_init_tx_coalesce(struct xgbe_prv_data * pdata)991 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
992 {
993 struct xgbe_hw_if *hw_if = &pdata->hw_if;
994
995 DBGPR("-->xgbe_init_tx_coalesce\n");
996
997 pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS;
998 pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES;
999
1000 hw_if->config_tx_coalesce(pdata);
1001
1002 DBGPR("<--xgbe_init_tx_coalesce\n");
1003 }
1004
xgbe_init_rx_coalesce(struct xgbe_prv_data * pdata)1005 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
1006 {
1007 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1008
1009 DBGPR("-->xgbe_init_rx_coalesce\n");
1010
1011 pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
1012 pdata->rx_usecs = XGMAC_INIT_DMA_RX_USECS;
1013 pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
1014
1015 hw_if->config_rx_coalesce(pdata);
1016
1017 DBGPR("<--xgbe_init_rx_coalesce\n");
1018 }
1019
xgbe_free_tx_data(struct xgbe_prv_data * pdata)1020 static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
1021 {
1022 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1023 struct xgbe_ring *ring;
1024 struct xgbe_ring_data *rdata;
1025 unsigned int i, j;
1026
1027 DBGPR("-->xgbe_free_tx_data\n");
1028
1029 for (i = 0; i < pdata->channel_count; i++) {
1030 ring = pdata->channel[i]->tx_ring;
1031 if (!ring)
1032 break;
1033
1034 for (j = 0; j < ring->rdesc_count; j++) {
1035 rdata = XGBE_GET_DESC_DATA(ring, j);
1036 desc_if->unmap_rdata(pdata, rdata);
1037 }
1038 }
1039
1040 DBGPR("<--xgbe_free_tx_data\n");
1041 }
1042
xgbe_free_rx_data(struct xgbe_prv_data * pdata)1043 static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
1044 {
1045 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1046 struct xgbe_ring *ring;
1047 struct xgbe_ring_data *rdata;
1048 unsigned int i, j;
1049
1050 DBGPR("-->xgbe_free_rx_data\n");
1051
1052 for (i = 0; i < pdata->channel_count; i++) {
1053 ring = pdata->channel[i]->rx_ring;
1054 if (!ring)
1055 break;
1056
1057 for (j = 0; j < ring->rdesc_count; j++) {
1058 rdata = XGBE_GET_DESC_DATA(ring, j);
1059 desc_if->unmap_rdata(pdata, rdata);
1060 }
1061 }
1062
1063 DBGPR("<--xgbe_free_rx_data\n");
1064 }
1065
xgbe_phy_reset(struct xgbe_prv_data * pdata)1066 static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
1067 {
1068 pdata->phy_link = -1;
1069 pdata->phy_speed = SPEED_UNKNOWN;
1070
1071 return pdata->phy_if.phy_reset(pdata);
1072 }
1073
xgbe_powerdown(struct net_device * netdev,unsigned int caller)1074 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
1075 {
1076 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1077 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1078 unsigned long flags;
1079
1080 DBGPR("-->xgbe_powerdown\n");
1081
1082 if (!netif_running(netdev) ||
1083 (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) {
1084 netdev_alert(netdev, "Device is already powered down\n");
1085 DBGPR("<--xgbe_powerdown\n");
1086 return -EINVAL;
1087 }
1088
1089 spin_lock_irqsave(&pdata->lock, flags);
1090
1091 if (caller == XGMAC_DRIVER_CONTEXT)
1092 netif_device_detach(netdev);
1093
1094 netif_tx_stop_all_queues(netdev);
1095
1096 xgbe_stop_timers(pdata);
1097 flush_workqueue(pdata->dev_workqueue);
1098
1099 hw_if->powerdown_tx(pdata);
1100 hw_if->powerdown_rx(pdata);
1101
1102 xgbe_napi_disable(pdata, 0);
1103
1104 pdata->power_down = 1;
1105
1106 spin_unlock_irqrestore(&pdata->lock, flags);
1107
1108 DBGPR("<--xgbe_powerdown\n");
1109
1110 return 0;
1111 }
1112
xgbe_powerup(struct net_device * netdev,unsigned int caller)1113 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
1114 {
1115 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1116 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1117 unsigned long flags;
1118
1119 DBGPR("-->xgbe_powerup\n");
1120
1121 if (!netif_running(netdev) ||
1122 (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) {
1123 netdev_alert(netdev, "Device is already powered up\n");
1124 DBGPR("<--xgbe_powerup\n");
1125 return -EINVAL;
1126 }
1127
1128 spin_lock_irqsave(&pdata->lock, flags);
1129
1130 pdata->power_down = 0;
1131
1132 xgbe_napi_enable(pdata, 0);
1133
1134 hw_if->powerup_tx(pdata);
1135 hw_if->powerup_rx(pdata);
1136
1137 if (caller == XGMAC_DRIVER_CONTEXT)
1138 netif_device_attach(netdev);
1139
1140 netif_tx_start_all_queues(netdev);
1141
1142 xgbe_start_timers(pdata);
1143
1144 spin_unlock_irqrestore(&pdata->lock, flags);
1145
1146 DBGPR("<--xgbe_powerup\n");
1147
1148 return 0;
1149 }
1150
xgbe_free_memory(struct xgbe_prv_data * pdata)1151 static void xgbe_free_memory(struct xgbe_prv_data *pdata)
1152 {
1153 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1154
1155 /* Free the ring descriptors and buffers */
1156 desc_if->free_ring_resources(pdata);
1157
1158 /* Free the channel and ring structures */
1159 xgbe_free_channels(pdata);
1160 }
1161
xgbe_alloc_memory(struct xgbe_prv_data * pdata)1162 static int xgbe_alloc_memory(struct xgbe_prv_data *pdata)
1163 {
1164 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1165 struct net_device *netdev = pdata->netdev;
1166 int ret;
1167
1168 if (pdata->new_tx_ring_count) {
1169 pdata->tx_ring_count = pdata->new_tx_ring_count;
1170 pdata->tx_q_count = pdata->tx_ring_count;
1171 pdata->new_tx_ring_count = 0;
1172 }
1173
1174 if (pdata->new_rx_ring_count) {
1175 pdata->rx_ring_count = pdata->new_rx_ring_count;
1176 pdata->new_rx_ring_count = 0;
1177 }
1178
1179 /* Calculate the Rx buffer size before allocating rings */
1180 pdata->rx_buf_size = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
1181
1182 /* Allocate the channel and ring structures */
1183 ret = xgbe_alloc_channels(pdata);
1184 if (ret)
1185 return ret;
1186
1187 /* Allocate the ring descriptors and buffers */
1188 ret = desc_if->alloc_ring_resources(pdata);
1189 if (ret)
1190 goto err_channels;
1191
1192 /* Initialize the service and Tx timers */
1193 xgbe_init_timers(pdata);
1194
1195 return 0;
1196
1197 err_channels:
1198 xgbe_free_memory(pdata);
1199
1200 return ret;
1201 }
1202
xgbe_start(struct xgbe_prv_data * pdata)1203 static int xgbe_start(struct xgbe_prv_data *pdata)
1204 {
1205 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1206 struct xgbe_phy_if *phy_if = &pdata->phy_if;
1207 struct net_device *netdev = pdata->netdev;
1208 unsigned int i;
1209 int ret;
1210
1211 /* Set the number of queues */
1212 ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
1213 if (ret) {
1214 netdev_err(netdev, "error setting real tx queue count\n");
1215 return ret;
1216 }
1217
1218 ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
1219 if (ret) {
1220 netdev_err(netdev, "error setting real rx queue count\n");
1221 return ret;
1222 }
1223
1224 /* Set RSS lookup table data for programming */
1225 for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
1226 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
1227 i % pdata->rx_ring_count);
1228
1229 ret = hw_if->init(pdata);
1230 if (ret)
1231 return ret;
1232
1233 xgbe_napi_enable(pdata, 1);
1234
1235 ret = xgbe_request_irqs(pdata);
1236 if (ret)
1237 goto err_napi;
1238
1239 ret = phy_if->phy_start(pdata);
1240 if (ret)
1241 goto err_irqs;
1242
1243 hw_if->enable_tx(pdata);
1244 hw_if->enable_rx(pdata);
1245
1246 udp_tunnel_nic_reset_ntf(netdev);
1247
1248 netif_tx_start_all_queues(netdev);
1249
1250 xgbe_start_timers(pdata);
1251 queue_work(pdata->dev_workqueue, &pdata->service_work);
1252
1253 clear_bit(XGBE_STOPPED, &pdata->dev_state);
1254
1255 return 0;
1256
1257 err_irqs:
1258 xgbe_free_irqs(pdata);
1259
1260 err_napi:
1261 xgbe_napi_disable(pdata, 1);
1262
1263 hw_if->exit(pdata);
1264
1265 return ret;
1266 }
1267
xgbe_stop(struct xgbe_prv_data * pdata)1268 static void xgbe_stop(struct xgbe_prv_data *pdata)
1269 {
1270 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1271 struct xgbe_phy_if *phy_if = &pdata->phy_if;
1272 struct xgbe_channel *channel;
1273 struct net_device *netdev = pdata->netdev;
1274 struct netdev_queue *txq;
1275 unsigned int i;
1276
1277 DBGPR("-->xgbe_stop\n");
1278
1279 if (test_bit(XGBE_STOPPED, &pdata->dev_state))
1280 return;
1281
1282 netif_tx_stop_all_queues(netdev);
1283 netif_carrier_off(pdata->netdev);
1284
1285 xgbe_stop_timers(pdata);
1286 flush_workqueue(pdata->dev_workqueue);
1287
1288 xgbe_vxlan_unset_port(netdev, 0, 0, NULL);
1289
1290 hw_if->disable_tx(pdata);
1291 hw_if->disable_rx(pdata);
1292
1293 phy_if->phy_stop(pdata);
1294
1295 xgbe_free_irqs(pdata);
1296
1297 xgbe_napi_disable(pdata, 1);
1298
1299 hw_if->exit(pdata);
1300
1301 for (i = 0; i < pdata->channel_count; i++) {
1302 channel = pdata->channel[i];
1303 if (!channel->tx_ring)
1304 continue;
1305
1306 txq = netdev_get_tx_queue(netdev, channel->queue_index);
1307 netdev_tx_reset_queue(txq);
1308 }
1309
1310 set_bit(XGBE_STOPPED, &pdata->dev_state);
1311
1312 DBGPR("<--xgbe_stop\n");
1313 }
1314
xgbe_stopdev(struct work_struct * work)1315 static void xgbe_stopdev(struct work_struct *work)
1316 {
1317 struct xgbe_prv_data *pdata = container_of(work,
1318 struct xgbe_prv_data,
1319 stopdev_work);
1320
1321 rtnl_lock();
1322
1323 xgbe_stop(pdata);
1324
1325 xgbe_free_tx_data(pdata);
1326 xgbe_free_rx_data(pdata);
1327
1328 rtnl_unlock();
1329
1330 netdev_alert(pdata->netdev, "device stopped\n");
1331 }
1332
xgbe_full_restart_dev(struct xgbe_prv_data * pdata)1333 void xgbe_full_restart_dev(struct xgbe_prv_data *pdata)
1334 {
1335 /* If not running, "restart" will happen on open */
1336 if (!netif_running(pdata->netdev))
1337 return;
1338
1339 xgbe_stop(pdata);
1340
1341 xgbe_free_memory(pdata);
1342 xgbe_alloc_memory(pdata);
1343
1344 xgbe_start(pdata);
1345 }
1346
xgbe_restart_dev(struct xgbe_prv_data * pdata)1347 void xgbe_restart_dev(struct xgbe_prv_data *pdata)
1348 {
1349 /* If not running, "restart" will happen on open */
1350 if (!netif_running(pdata->netdev))
1351 return;
1352
1353 xgbe_stop(pdata);
1354
1355 xgbe_free_tx_data(pdata);
1356 xgbe_free_rx_data(pdata);
1357
1358 xgbe_start(pdata);
1359 }
1360
xgbe_restart(struct work_struct * work)1361 static void xgbe_restart(struct work_struct *work)
1362 {
1363 struct xgbe_prv_data *pdata = container_of(work,
1364 struct xgbe_prv_data,
1365 restart_work);
1366
1367 rtnl_lock();
1368
1369 xgbe_restart_dev(pdata);
1370
1371 rtnl_unlock();
1372 }
1373
xgbe_tx_tstamp(struct work_struct * work)1374 static void xgbe_tx_tstamp(struct work_struct *work)
1375 {
1376 struct xgbe_prv_data *pdata = container_of(work,
1377 struct xgbe_prv_data,
1378 tx_tstamp_work);
1379 struct skb_shared_hwtstamps hwtstamps;
1380 u64 nsec;
1381 unsigned long flags;
1382
1383 spin_lock_irqsave(&pdata->tstamp_lock, flags);
1384 if (!pdata->tx_tstamp_skb)
1385 goto unlock;
1386
1387 if (pdata->tx_tstamp) {
1388 nsec = timecounter_cyc2time(&pdata->tstamp_tc,
1389 pdata->tx_tstamp);
1390
1391 memset(&hwtstamps, 0, sizeof(hwtstamps));
1392 hwtstamps.hwtstamp = ns_to_ktime(nsec);
1393 skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
1394 }
1395
1396 dev_kfree_skb_any(pdata->tx_tstamp_skb);
1397
1398 pdata->tx_tstamp_skb = NULL;
1399
1400 unlock:
1401 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
1402 }
1403
xgbe_get_hwtstamp_settings(struct xgbe_prv_data * pdata,struct ifreq * ifreq)1404 static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
1405 struct ifreq *ifreq)
1406 {
1407 if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
1408 sizeof(pdata->tstamp_config)))
1409 return -EFAULT;
1410
1411 return 0;
1412 }
1413
xgbe_set_hwtstamp_settings(struct xgbe_prv_data * pdata,struct ifreq * ifreq)1414 static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
1415 struct ifreq *ifreq)
1416 {
1417 struct hwtstamp_config config;
1418 unsigned int mac_tscr;
1419
1420 if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
1421 return -EFAULT;
1422
1423 mac_tscr = 0;
1424
1425 switch (config.tx_type) {
1426 case HWTSTAMP_TX_OFF:
1427 break;
1428
1429 case HWTSTAMP_TX_ON:
1430 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1431 break;
1432
1433 default:
1434 return -ERANGE;
1435 }
1436
1437 switch (config.rx_filter) {
1438 case HWTSTAMP_FILTER_NONE:
1439 break;
1440
1441 case HWTSTAMP_FILTER_NTP_ALL:
1442 case HWTSTAMP_FILTER_ALL:
1443 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
1444 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1445 break;
1446
1447 /* PTP v2, UDP, any kind of event packet */
1448 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1449 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1450 fallthrough; /* to PTP v1, UDP, any kind of event packet */
1451 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1452 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1453 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1454 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1455 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1456 break;
1457
1458 /* PTP v2, UDP, Sync packet */
1459 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1460 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1461 fallthrough; /* to PTP v1, UDP, Sync packet */
1462 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1463 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1464 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1465 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1466 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1467 break;
1468
1469 /* PTP v2, UDP, Delay_req packet */
1470 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1471 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1472 fallthrough; /* to PTP v1, UDP, Delay_req packet */
1473 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1474 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1475 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1476 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1477 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1478 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1479 break;
1480
1481 /* 802.AS1, Ethernet, any kind of event packet */
1482 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1483 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1484 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1485 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1486 break;
1487
1488 /* 802.AS1, Ethernet, Sync packet */
1489 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1490 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1491 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1492 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1493 break;
1494
1495 /* 802.AS1, Ethernet, Delay_req packet */
1496 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1497 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1498 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1499 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1500 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1501 break;
1502
1503 /* PTP v2/802.AS1, any layer, any kind of event packet */
1504 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1505 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1506 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1507 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1508 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1509 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1510 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1511 break;
1512
1513 /* PTP v2/802.AS1, any layer, Sync packet */
1514 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1515 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1516 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1517 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1518 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1519 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1520 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1521 break;
1522
1523 /* PTP v2/802.AS1, any layer, Delay_req packet */
1524 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1525 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1526 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1527 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1528 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1529 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1530 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1531 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1532 break;
1533
1534 default:
1535 return -ERANGE;
1536 }
1537
1538 pdata->hw_if.config_tstamp(pdata, mac_tscr);
1539
1540 memcpy(&pdata->tstamp_config, &config, sizeof(config));
1541
1542 return 0;
1543 }
1544
xgbe_prep_tx_tstamp(struct xgbe_prv_data * pdata,struct sk_buff * skb,struct xgbe_packet_data * packet)1545 static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
1546 struct sk_buff *skb,
1547 struct xgbe_packet_data *packet)
1548 {
1549 unsigned long flags;
1550
1551 if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
1552 spin_lock_irqsave(&pdata->tstamp_lock, flags);
1553 if (pdata->tx_tstamp_skb) {
1554 /* Another timestamp in progress, ignore this one */
1555 XGMAC_SET_BITS(packet->attributes,
1556 TX_PACKET_ATTRIBUTES, PTP, 0);
1557 } else {
1558 pdata->tx_tstamp_skb = skb_get(skb);
1559 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1560 }
1561 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
1562 }
1563
1564 skb_tx_timestamp(skb);
1565 }
1566
xgbe_prep_vlan(struct sk_buff * skb,struct xgbe_packet_data * packet)1567 static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
1568 {
1569 if (skb_vlan_tag_present(skb))
1570 packet->vlan_ctag = skb_vlan_tag_get(skb);
1571 }
1572
xgbe_prep_tso(struct sk_buff * skb,struct xgbe_packet_data * packet)1573 static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
1574 {
1575 int ret;
1576
1577 if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1578 TSO_ENABLE))
1579 return 0;
1580
1581 ret = skb_cow_head(skb, 0);
1582 if (ret)
1583 return ret;
1584
1585 if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, VXLAN)) {
1586 packet->header_len = skb_inner_tcp_all_headers(skb);
1587 packet->tcp_header_len = inner_tcp_hdrlen(skb);
1588 } else {
1589 packet->header_len = skb_tcp_all_headers(skb);
1590 packet->tcp_header_len = tcp_hdrlen(skb);
1591 }
1592 packet->tcp_payload_len = skb->len - packet->header_len;
1593 packet->mss = skb_shinfo(skb)->gso_size;
1594
1595 DBGPR(" packet->header_len=%u\n", packet->header_len);
1596 DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
1597 packet->tcp_header_len, packet->tcp_payload_len);
1598 DBGPR(" packet->mss=%u\n", packet->mss);
1599
1600 /* Update the number of packets that will ultimately be transmitted
1601 * along with the extra bytes for each extra packet
1602 */
1603 packet->tx_packets = skb_shinfo(skb)->gso_segs;
1604 packet->tx_bytes += (packet->tx_packets - 1) * packet->header_len;
1605
1606 return 0;
1607 }
1608
xgbe_is_vxlan(struct sk_buff * skb)1609 static bool xgbe_is_vxlan(struct sk_buff *skb)
1610 {
1611 if (!skb->encapsulation)
1612 return false;
1613
1614 if (skb->ip_summed != CHECKSUM_PARTIAL)
1615 return false;
1616
1617 switch (skb->protocol) {
1618 case htons(ETH_P_IP):
1619 if (ip_hdr(skb)->protocol != IPPROTO_UDP)
1620 return false;
1621 break;
1622
1623 case htons(ETH_P_IPV6):
1624 if (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP)
1625 return false;
1626 break;
1627
1628 default:
1629 return false;
1630 }
1631
1632 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
1633 skb->inner_protocol != htons(ETH_P_TEB) ||
1634 (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
1635 sizeof(struct udphdr) + sizeof(struct vxlanhdr)))
1636 return false;
1637
1638 return true;
1639 }
1640
xgbe_is_tso(struct sk_buff * skb)1641 static int xgbe_is_tso(struct sk_buff *skb)
1642 {
1643 if (skb->ip_summed != CHECKSUM_PARTIAL)
1644 return 0;
1645
1646 if (!skb_is_gso(skb))
1647 return 0;
1648
1649 DBGPR(" TSO packet to be processed\n");
1650
1651 return 1;
1652 }
1653
xgbe_packet_info(struct xgbe_prv_data * pdata,struct xgbe_ring * ring,struct sk_buff * skb,struct xgbe_packet_data * packet)1654 static void xgbe_packet_info(struct xgbe_prv_data *pdata,
1655 struct xgbe_ring *ring, struct sk_buff *skb,
1656 struct xgbe_packet_data *packet)
1657 {
1658 skb_frag_t *frag;
1659 unsigned int context_desc;
1660 unsigned int len;
1661 unsigned int i;
1662
1663 packet->skb = skb;
1664
1665 context_desc = 0;
1666 packet->rdesc_count = 0;
1667
1668 packet->tx_packets = 1;
1669 packet->tx_bytes = skb->len;
1670
1671 if (xgbe_is_tso(skb)) {
1672 /* TSO requires an extra descriptor if mss is different */
1673 if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
1674 context_desc = 1;
1675 packet->rdesc_count++;
1676 }
1677
1678 /* TSO requires an extra descriptor for TSO header */
1679 packet->rdesc_count++;
1680
1681 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1682 TSO_ENABLE, 1);
1683 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1684 CSUM_ENABLE, 1);
1685 } else if (skb->ip_summed == CHECKSUM_PARTIAL)
1686 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1687 CSUM_ENABLE, 1);
1688
1689 if (xgbe_is_vxlan(skb))
1690 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1691 VXLAN, 1);
1692
1693 if (skb_vlan_tag_present(skb)) {
1694 /* VLAN requires an extra descriptor if tag is different */
1695 if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
1696 /* We can share with the TSO context descriptor */
1697 if (!context_desc) {
1698 context_desc = 1;
1699 packet->rdesc_count++;
1700 }
1701
1702 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1703 VLAN_CTAG, 1);
1704 }
1705
1706 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1707 (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON))
1708 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1709 PTP, 1);
1710
1711 for (len = skb_headlen(skb); len;) {
1712 packet->rdesc_count++;
1713 len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
1714 }
1715
1716 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1717 frag = &skb_shinfo(skb)->frags[i];
1718 for (len = skb_frag_size(frag); len; ) {
1719 packet->rdesc_count++;
1720 len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
1721 }
1722 }
1723 }
1724
xgbe_open(struct net_device * netdev)1725 static int xgbe_open(struct net_device *netdev)
1726 {
1727 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1728 int ret;
1729
1730 /* Create the various names based on netdev name */
1731 snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs",
1732 netdev_name(netdev));
1733
1734 snprintf(pdata->ecc_name, sizeof(pdata->ecc_name) - 1, "%s-ecc",
1735 netdev_name(netdev));
1736
1737 snprintf(pdata->i2c_name, sizeof(pdata->i2c_name) - 1, "%s-i2c",
1738 netdev_name(netdev));
1739
1740 /* Create workqueues */
1741 pdata->dev_workqueue =
1742 create_singlethread_workqueue(netdev_name(netdev));
1743 if (!pdata->dev_workqueue) {
1744 netdev_err(netdev, "device workqueue creation failed\n");
1745 return -ENOMEM;
1746 }
1747
1748 pdata->an_workqueue =
1749 create_singlethread_workqueue(pdata->an_name);
1750 if (!pdata->an_workqueue) {
1751 netdev_err(netdev, "phy workqueue creation failed\n");
1752 ret = -ENOMEM;
1753 goto err_dev_wq;
1754 }
1755
1756 /* Reset the phy settings */
1757 ret = xgbe_phy_reset(pdata);
1758 if (ret)
1759 goto err_an_wq;
1760
1761 /* Enable the clocks */
1762 ret = clk_prepare_enable(pdata->sysclk);
1763 if (ret) {
1764 netdev_alert(netdev, "dma clk_prepare_enable failed\n");
1765 goto err_an_wq;
1766 }
1767
1768 ret = clk_prepare_enable(pdata->ptpclk);
1769 if (ret) {
1770 netdev_alert(netdev, "ptp clk_prepare_enable failed\n");
1771 goto err_sysclk;
1772 }
1773
1774 INIT_WORK(&pdata->service_work, xgbe_service);
1775 INIT_WORK(&pdata->restart_work, xgbe_restart);
1776 INIT_WORK(&pdata->stopdev_work, xgbe_stopdev);
1777 INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
1778
1779 ret = xgbe_alloc_memory(pdata);
1780 if (ret)
1781 goto err_ptpclk;
1782
1783 ret = xgbe_start(pdata);
1784 if (ret)
1785 goto err_mem;
1786
1787 clear_bit(XGBE_DOWN, &pdata->dev_state);
1788
1789 return 0;
1790
1791 err_mem:
1792 xgbe_free_memory(pdata);
1793
1794 err_ptpclk:
1795 clk_disable_unprepare(pdata->ptpclk);
1796
1797 err_sysclk:
1798 clk_disable_unprepare(pdata->sysclk);
1799
1800 err_an_wq:
1801 destroy_workqueue(pdata->an_workqueue);
1802
1803 err_dev_wq:
1804 destroy_workqueue(pdata->dev_workqueue);
1805
1806 return ret;
1807 }
1808
xgbe_close(struct net_device * netdev)1809 static int xgbe_close(struct net_device *netdev)
1810 {
1811 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1812
1813 /* Stop the device */
1814 xgbe_stop(pdata);
1815
1816 xgbe_free_memory(pdata);
1817
1818 /* Disable the clocks */
1819 clk_disable_unprepare(pdata->ptpclk);
1820 clk_disable_unprepare(pdata->sysclk);
1821
1822 destroy_workqueue(pdata->an_workqueue);
1823
1824 destroy_workqueue(pdata->dev_workqueue);
1825
1826 set_bit(XGBE_DOWN, &pdata->dev_state);
1827
1828 return 0;
1829 }
1830
xgbe_xmit(struct sk_buff * skb,struct net_device * netdev)1831 static netdev_tx_t xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
1832 {
1833 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1834 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1835 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1836 struct xgbe_channel *channel;
1837 struct xgbe_ring *ring;
1838 struct xgbe_packet_data *packet;
1839 struct netdev_queue *txq;
1840 netdev_tx_t ret;
1841
1842 DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
1843
1844 channel = pdata->channel[skb->queue_mapping];
1845 txq = netdev_get_tx_queue(netdev, channel->queue_index);
1846 ring = channel->tx_ring;
1847 packet = &ring->packet_data;
1848
1849 ret = NETDEV_TX_OK;
1850
1851 if (skb->len == 0) {
1852 netif_err(pdata, tx_err, netdev,
1853 "empty skb received from stack\n");
1854 dev_kfree_skb_any(skb);
1855 goto tx_netdev_return;
1856 }
1857
1858 /* Calculate preliminary packet info */
1859 memset(packet, 0, sizeof(*packet));
1860 xgbe_packet_info(pdata, ring, skb, packet);
1861
1862 /* Check that there are enough descriptors available */
1863 ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count);
1864 if (ret)
1865 goto tx_netdev_return;
1866
1867 ret = xgbe_prep_tso(skb, packet);
1868 if (ret) {
1869 netif_err(pdata, tx_err, netdev,
1870 "error processing TSO packet\n");
1871 dev_kfree_skb_any(skb);
1872 goto tx_netdev_return;
1873 }
1874 xgbe_prep_vlan(skb, packet);
1875
1876 if (!desc_if->map_tx_skb(channel, skb)) {
1877 dev_kfree_skb_any(skb);
1878 goto tx_netdev_return;
1879 }
1880
1881 xgbe_prep_tx_tstamp(pdata, skb, packet);
1882
1883 /* Report on the actual number of bytes (to be) sent */
1884 netdev_tx_sent_queue(txq, packet->tx_bytes);
1885
1886 /* Configure required descriptor fields for transmission */
1887 hw_if->dev_xmit(channel);
1888
1889 if (netif_msg_pktdata(pdata))
1890 xgbe_print_pkt(netdev, skb, true);
1891
1892 /* Stop the queue in advance if there may not be enough descriptors */
1893 xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS);
1894
1895 ret = NETDEV_TX_OK;
1896
1897 tx_netdev_return:
1898 return ret;
1899 }
1900
xgbe_set_rx_mode(struct net_device * netdev)1901 static void xgbe_set_rx_mode(struct net_device *netdev)
1902 {
1903 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1904 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1905
1906 DBGPR("-->xgbe_set_rx_mode\n");
1907
1908 hw_if->config_rx_mode(pdata);
1909
1910 DBGPR("<--xgbe_set_rx_mode\n");
1911 }
1912
xgbe_set_mac_address(struct net_device * netdev,void * addr)1913 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
1914 {
1915 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1916 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1917 struct sockaddr *saddr = addr;
1918
1919 DBGPR("-->xgbe_set_mac_address\n");
1920
1921 if (!is_valid_ether_addr(saddr->sa_data))
1922 return -EADDRNOTAVAIL;
1923
1924 eth_hw_addr_set(netdev, saddr->sa_data);
1925
1926 hw_if->set_mac_address(pdata, netdev->dev_addr);
1927
1928 DBGPR("<--xgbe_set_mac_address\n");
1929
1930 return 0;
1931 }
1932
xgbe_ioctl(struct net_device * netdev,struct ifreq * ifreq,int cmd)1933 static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd)
1934 {
1935 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1936 int ret;
1937
1938 switch (cmd) {
1939 case SIOCGHWTSTAMP:
1940 ret = xgbe_get_hwtstamp_settings(pdata, ifreq);
1941 break;
1942
1943 case SIOCSHWTSTAMP:
1944 ret = xgbe_set_hwtstamp_settings(pdata, ifreq);
1945 break;
1946
1947 default:
1948 ret = -EOPNOTSUPP;
1949 }
1950
1951 return ret;
1952 }
1953
xgbe_change_mtu(struct net_device * netdev,int mtu)1954 static int xgbe_change_mtu(struct net_device *netdev, int mtu)
1955 {
1956 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1957 int ret;
1958
1959 DBGPR("-->xgbe_change_mtu\n");
1960
1961 ret = xgbe_calc_rx_buf_size(netdev, mtu);
1962 if (ret < 0)
1963 return ret;
1964
1965 pdata->rx_buf_size = ret;
1966 WRITE_ONCE(netdev->mtu, mtu);
1967
1968 xgbe_restart_dev(pdata);
1969
1970 DBGPR("<--xgbe_change_mtu\n");
1971
1972 return 0;
1973 }
1974
xgbe_tx_timeout(struct net_device * netdev,unsigned int txqueue)1975 static void xgbe_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1976 {
1977 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1978
1979 netdev_warn(netdev, "tx timeout, device restarting\n");
1980 schedule_work(&pdata->restart_work);
1981 }
1982
xgbe_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * s)1983 static void xgbe_get_stats64(struct net_device *netdev,
1984 struct rtnl_link_stats64 *s)
1985 {
1986 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1987 struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
1988
1989 DBGPR("-->%s\n", __func__);
1990
1991 pdata->hw_if.read_mmc_stats(pdata);
1992
1993 s->rx_packets = pstats->rxframecount_gb;
1994 s->rx_bytes = pstats->rxoctetcount_gb;
1995 s->rx_errors = pstats->rxframecount_gb -
1996 pstats->rxbroadcastframes_g -
1997 pstats->rxmulticastframes_g -
1998 pstats->rxunicastframes_g;
1999 s->multicast = pstats->rxmulticastframes_g;
2000 s->rx_length_errors = pstats->rxlengtherror;
2001 s->rx_crc_errors = pstats->rxcrcerror;
2002 s->rx_fifo_errors = pstats->rxfifooverflow;
2003
2004 s->tx_packets = pstats->txframecount_gb;
2005 s->tx_bytes = pstats->txoctetcount_gb;
2006 s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
2007 s->tx_dropped = netdev->stats.tx_dropped;
2008
2009 DBGPR("<--%s\n", __func__);
2010 }
2011
xgbe_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)2012 static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
2013 u16 vid)
2014 {
2015 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2016 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2017
2018 DBGPR("-->%s\n", __func__);
2019
2020 set_bit(vid, pdata->active_vlans);
2021 hw_if->update_vlan_hash_table(pdata);
2022
2023 DBGPR("<--%s\n", __func__);
2024
2025 return 0;
2026 }
2027
xgbe_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)2028 static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
2029 u16 vid)
2030 {
2031 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2032 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2033
2034 DBGPR("-->%s\n", __func__);
2035
2036 clear_bit(vid, pdata->active_vlans);
2037 hw_if->update_vlan_hash_table(pdata);
2038
2039 DBGPR("<--%s\n", __func__);
2040
2041 return 0;
2042 }
2043
2044 #ifdef CONFIG_NET_POLL_CONTROLLER
xgbe_poll_controller(struct net_device * netdev)2045 static void xgbe_poll_controller(struct net_device *netdev)
2046 {
2047 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2048 struct xgbe_channel *channel;
2049 unsigned int i;
2050
2051 DBGPR("-->xgbe_poll_controller\n");
2052
2053 if (pdata->per_channel_irq) {
2054 for (i = 0; i < pdata->channel_count; i++) {
2055 channel = pdata->channel[i];
2056 xgbe_dma_isr(channel->dma_irq, channel);
2057 }
2058 } else {
2059 disable_irq(pdata->dev_irq);
2060 xgbe_isr(pdata->dev_irq, pdata);
2061 enable_irq(pdata->dev_irq);
2062 }
2063
2064 DBGPR("<--xgbe_poll_controller\n");
2065 }
2066 #endif /* End CONFIG_NET_POLL_CONTROLLER */
2067
xgbe_setup_tc(struct net_device * netdev,enum tc_setup_type type,void * type_data)2068 static int xgbe_setup_tc(struct net_device *netdev, enum tc_setup_type type,
2069 void *type_data)
2070 {
2071 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2072 struct tc_mqprio_qopt *mqprio = type_data;
2073 u8 tc;
2074
2075 if (type != TC_SETUP_QDISC_MQPRIO)
2076 return -EOPNOTSUPP;
2077
2078 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
2079 tc = mqprio->num_tc;
2080
2081 if (tc > pdata->hw_feat.tc_cnt)
2082 return -EINVAL;
2083
2084 pdata->num_tcs = tc;
2085 pdata->hw_if.config_tc(pdata);
2086
2087 return 0;
2088 }
2089
xgbe_fix_features(struct net_device * netdev,netdev_features_t features)2090 static netdev_features_t xgbe_fix_features(struct net_device *netdev,
2091 netdev_features_t features)
2092 {
2093 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2094 netdev_features_t vxlan_base;
2095
2096 vxlan_base = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RX_UDP_TUNNEL_PORT;
2097
2098 if (!pdata->hw_feat.vxn)
2099 return features;
2100
2101 /* VXLAN CSUM requires VXLAN base */
2102 if ((features & NETIF_F_GSO_UDP_TUNNEL_CSUM) &&
2103 !(features & NETIF_F_GSO_UDP_TUNNEL)) {
2104 netdev_notice(netdev,
2105 "forcing tx udp tunnel support\n");
2106 features |= NETIF_F_GSO_UDP_TUNNEL;
2107 }
2108
2109 /* Can't do one without doing the other */
2110 if ((features & vxlan_base) != vxlan_base) {
2111 netdev_notice(netdev,
2112 "forcing both tx and rx udp tunnel support\n");
2113 features |= vxlan_base;
2114 }
2115
2116 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
2117 if (!(features & NETIF_F_GSO_UDP_TUNNEL_CSUM)) {
2118 netdev_notice(netdev,
2119 "forcing tx udp tunnel checksumming on\n");
2120 features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
2121 }
2122 } else {
2123 if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) {
2124 netdev_notice(netdev,
2125 "forcing tx udp tunnel checksumming off\n");
2126 features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
2127 }
2128 }
2129
2130 return features;
2131 }
2132
xgbe_set_features(struct net_device * netdev,netdev_features_t features)2133 static int xgbe_set_features(struct net_device *netdev,
2134 netdev_features_t features)
2135 {
2136 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2137 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2138 netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
2139 int ret = 0;
2140
2141 rxhash = pdata->netdev_features & NETIF_F_RXHASH;
2142 rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
2143 rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
2144 rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
2145
2146 if ((features & NETIF_F_RXHASH) && !rxhash)
2147 ret = hw_if->enable_rss(pdata);
2148 else if (!(features & NETIF_F_RXHASH) && rxhash)
2149 ret = hw_if->disable_rss(pdata);
2150 if (ret)
2151 return ret;
2152
2153 if ((features & NETIF_F_RXCSUM) && !rxcsum) {
2154 hw_if->enable_sph(pdata);
2155 hw_if->enable_vxlan(pdata);
2156 hw_if->enable_rx_csum(pdata);
2157 schedule_work(&pdata->restart_work);
2158 } else if (!(features & NETIF_F_RXCSUM) && rxcsum) {
2159 hw_if->disable_sph(pdata);
2160 hw_if->disable_vxlan(pdata);
2161 hw_if->disable_rx_csum(pdata);
2162 schedule_work(&pdata->restart_work);
2163 }
2164
2165 if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
2166 hw_if->enable_rx_vlan_stripping(pdata);
2167 else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
2168 hw_if->disable_rx_vlan_stripping(pdata);
2169
2170 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
2171 hw_if->enable_rx_vlan_filtering(pdata);
2172 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
2173 hw_if->disable_rx_vlan_filtering(pdata);
2174
2175 pdata->netdev_features = features;
2176
2177 DBGPR("<--xgbe_set_features\n");
2178
2179 return 0;
2180 }
2181
xgbe_features_check(struct sk_buff * skb,struct net_device * netdev,netdev_features_t features)2182 static netdev_features_t xgbe_features_check(struct sk_buff *skb,
2183 struct net_device *netdev,
2184 netdev_features_t features)
2185 {
2186 features = vlan_features_check(skb, features);
2187 features = vxlan_features_check(skb, features);
2188
2189 return features;
2190 }
2191
2192 static const struct net_device_ops xgbe_netdev_ops = {
2193 .ndo_open = xgbe_open,
2194 .ndo_stop = xgbe_close,
2195 .ndo_start_xmit = xgbe_xmit,
2196 .ndo_set_rx_mode = xgbe_set_rx_mode,
2197 .ndo_set_mac_address = xgbe_set_mac_address,
2198 .ndo_validate_addr = eth_validate_addr,
2199 .ndo_eth_ioctl = xgbe_ioctl,
2200 .ndo_change_mtu = xgbe_change_mtu,
2201 .ndo_tx_timeout = xgbe_tx_timeout,
2202 .ndo_get_stats64 = xgbe_get_stats64,
2203 .ndo_vlan_rx_add_vid = xgbe_vlan_rx_add_vid,
2204 .ndo_vlan_rx_kill_vid = xgbe_vlan_rx_kill_vid,
2205 #ifdef CONFIG_NET_POLL_CONTROLLER
2206 .ndo_poll_controller = xgbe_poll_controller,
2207 #endif
2208 .ndo_setup_tc = xgbe_setup_tc,
2209 .ndo_fix_features = xgbe_fix_features,
2210 .ndo_set_features = xgbe_set_features,
2211 .ndo_features_check = xgbe_features_check,
2212 };
2213
xgbe_get_netdev_ops(void)2214 const struct net_device_ops *xgbe_get_netdev_ops(void)
2215 {
2216 return &xgbe_netdev_ops;
2217 }
2218
xgbe_rx_refresh(struct xgbe_channel * channel)2219 static void xgbe_rx_refresh(struct xgbe_channel *channel)
2220 {
2221 struct xgbe_prv_data *pdata = channel->pdata;
2222 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2223 struct xgbe_desc_if *desc_if = &pdata->desc_if;
2224 struct xgbe_ring *ring = channel->rx_ring;
2225 struct xgbe_ring_data *rdata;
2226
2227 while (ring->dirty != ring->cur) {
2228 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
2229
2230 /* Reset rdata values */
2231 desc_if->unmap_rdata(pdata, rdata);
2232
2233 if (desc_if->map_rx_buffer(pdata, ring, rdata))
2234 break;
2235
2236 hw_if->rx_desc_reset(pdata, rdata, ring->dirty);
2237
2238 ring->dirty++;
2239 }
2240
2241 /* Make sure everything is written before the register write */
2242 wmb();
2243
2244 /* Update the Rx Tail Pointer Register with address of
2245 * the last cleaned entry */
2246 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1);
2247 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
2248 lower_32_bits(rdata->rdesc_dma));
2249 }
2250
xgbe_create_skb(struct xgbe_prv_data * pdata,struct napi_struct * napi,struct xgbe_ring_data * rdata,unsigned int len)2251 static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
2252 struct napi_struct *napi,
2253 struct xgbe_ring_data *rdata,
2254 unsigned int len)
2255 {
2256 struct sk_buff *skb;
2257 u8 *packet;
2258
2259 skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
2260 if (!skb)
2261 return NULL;
2262
2263 /* Pull in the header buffer which may contain just the header
2264 * or the header plus data
2265 */
2266 dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
2267 rdata->rx.hdr.dma_off,
2268 rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
2269
2270 packet = page_address(rdata->rx.hdr.pa.pages) +
2271 rdata->rx.hdr.pa.pages_offset;
2272 skb_copy_to_linear_data(skb, packet, len);
2273 skb_put(skb, len);
2274
2275 return skb;
2276 }
2277
xgbe_rx_buf1_len(struct xgbe_ring_data * rdata,struct xgbe_packet_data * packet)2278 static unsigned int xgbe_rx_buf1_len(struct xgbe_ring_data *rdata,
2279 struct xgbe_packet_data *packet)
2280 {
2281 /* Always zero if not the first descriptor */
2282 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST))
2283 return 0;
2284
2285 /* First descriptor with split header, return header length */
2286 if (rdata->rx.hdr_len)
2287 return rdata->rx.hdr_len;
2288
2289 /* First descriptor but not the last descriptor and no split header,
2290 * so the full buffer was used
2291 */
2292 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
2293 return rdata->rx.hdr.dma_len;
2294
2295 /* First descriptor and last descriptor and no split header, so
2296 * calculate how much of the buffer was used
2297 */
2298 return min_t(unsigned int, rdata->rx.hdr.dma_len, rdata->rx.len);
2299 }
2300
xgbe_rx_buf2_len(struct xgbe_ring_data * rdata,struct xgbe_packet_data * packet,unsigned int len)2301 static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data *rdata,
2302 struct xgbe_packet_data *packet,
2303 unsigned int len)
2304 {
2305 /* Always the full buffer if not the last descriptor */
2306 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
2307 return rdata->rx.buf.dma_len;
2308
2309 /* Last descriptor so calculate how much of the buffer was used
2310 * for the last bit of data
2311 */
2312 return rdata->rx.len - len;
2313 }
2314
xgbe_tx_poll(struct xgbe_channel * channel)2315 static int xgbe_tx_poll(struct xgbe_channel *channel)
2316 {
2317 struct xgbe_prv_data *pdata = channel->pdata;
2318 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2319 struct xgbe_desc_if *desc_if = &pdata->desc_if;
2320 struct xgbe_ring *ring = channel->tx_ring;
2321 struct xgbe_ring_data *rdata;
2322 struct xgbe_ring_desc *rdesc;
2323 struct net_device *netdev = pdata->netdev;
2324 struct netdev_queue *txq;
2325 int processed = 0;
2326 unsigned int tx_packets = 0, tx_bytes = 0;
2327 unsigned int cur;
2328
2329 DBGPR("-->xgbe_tx_poll\n");
2330
2331 /* Nothing to do if there isn't a Tx ring for this channel */
2332 if (!ring)
2333 return 0;
2334
2335 cur = ring->cur;
2336
2337 /* Be sure we get ring->cur before accessing descriptor data */
2338 smp_rmb();
2339
2340 txq = netdev_get_tx_queue(netdev, channel->queue_index);
2341
2342 while ((processed < XGBE_TX_DESC_MAX_PROC) &&
2343 (ring->dirty != cur)) {
2344 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
2345 rdesc = rdata->rdesc;
2346
2347 if (!hw_if->tx_complete(rdesc))
2348 break;
2349
2350 /* Make sure descriptor fields are read after reading the OWN
2351 * bit */
2352 dma_rmb();
2353
2354 if (netif_msg_tx_done(pdata))
2355 xgbe_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
2356
2357 if (hw_if->is_last_desc(rdesc)) {
2358 tx_packets += rdata->tx.packets;
2359 tx_bytes += rdata->tx.bytes;
2360 }
2361
2362 /* Free the SKB and reset the descriptor for re-use */
2363 desc_if->unmap_rdata(pdata, rdata);
2364 hw_if->tx_desc_reset(rdata);
2365
2366 processed++;
2367 ring->dirty++;
2368 }
2369
2370 if (!processed)
2371 return 0;
2372
2373 netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
2374
2375 if ((ring->tx.queue_stopped == 1) &&
2376 (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) {
2377 ring->tx.queue_stopped = 0;
2378 netif_tx_wake_queue(txq);
2379 }
2380
2381 DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
2382
2383 return processed;
2384 }
2385
xgbe_rx_poll(struct xgbe_channel * channel,int budget)2386 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
2387 {
2388 struct xgbe_prv_data *pdata = channel->pdata;
2389 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2390 struct xgbe_ring *ring = channel->rx_ring;
2391 struct xgbe_ring_data *rdata;
2392 struct xgbe_packet_data *packet;
2393 struct net_device *netdev = pdata->netdev;
2394 struct napi_struct *napi;
2395 struct sk_buff *skb;
2396 struct skb_shared_hwtstamps *hwtstamps;
2397 unsigned int last, error, context_next, context;
2398 unsigned int len, buf1_len, buf2_len, max_len;
2399 unsigned int received = 0;
2400 int packet_count = 0;
2401
2402 DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
2403
2404 /* Nothing to do if there isn't a Rx ring for this channel */
2405 if (!ring)
2406 return 0;
2407
2408 last = 0;
2409 context_next = 0;
2410
2411 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
2412
2413 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
2414 packet = &ring->packet_data;
2415 while (packet_count < budget) {
2416 DBGPR(" cur = %d\n", ring->cur);
2417
2418 /* First time in loop see if we need to restore state */
2419 if (!received && rdata->state_saved) {
2420 skb = rdata->state.skb;
2421 error = rdata->state.error;
2422 len = rdata->state.len;
2423 } else {
2424 memset(packet, 0, sizeof(*packet));
2425 skb = NULL;
2426 error = 0;
2427 len = 0;
2428 }
2429
2430 read_again:
2431 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
2432
2433 if (xgbe_rx_dirty_desc(ring) > (XGBE_RX_DESC_CNT >> 3))
2434 xgbe_rx_refresh(channel);
2435
2436 if (hw_if->dev_read(channel))
2437 break;
2438
2439 received++;
2440 ring->cur++;
2441
2442 last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
2443 LAST);
2444 context_next = XGMAC_GET_BITS(packet->attributes,
2445 RX_PACKET_ATTRIBUTES,
2446 CONTEXT_NEXT);
2447 context = XGMAC_GET_BITS(packet->attributes,
2448 RX_PACKET_ATTRIBUTES,
2449 CONTEXT);
2450
2451 /* Earlier error, just drain the remaining data */
2452 if ((!last || context_next) && error)
2453 goto read_again;
2454
2455 if (error || packet->errors) {
2456 if (packet->errors)
2457 netif_err(pdata, rx_err, netdev,
2458 "error in received packet\n");
2459 dev_kfree_skb(skb);
2460 goto next_packet;
2461 }
2462
2463 if (!context) {
2464 /* Get the data length in the descriptor buffers */
2465 buf1_len = xgbe_rx_buf1_len(rdata, packet);
2466 len += buf1_len;
2467 buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
2468 len += buf2_len;
2469
2470 if (buf2_len > rdata->rx.buf.dma_len) {
2471 /* Hardware inconsistency within the descriptors
2472 * that has resulted in a length underflow.
2473 */
2474 error = 1;
2475 goto skip_data;
2476 }
2477
2478 if (!skb) {
2479 skb = xgbe_create_skb(pdata, napi, rdata,
2480 buf1_len);
2481 if (!skb) {
2482 error = 1;
2483 goto skip_data;
2484 }
2485 }
2486
2487 if (buf2_len) {
2488 dma_sync_single_range_for_cpu(pdata->dev,
2489 rdata->rx.buf.dma_base,
2490 rdata->rx.buf.dma_off,
2491 rdata->rx.buf.dma_len,
2492 DMA_FROM_DEVICE);
2493
2494 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
2495 rdata->rx.buf.pa.pages,
2496 rdata->rx.buf.pa.pages_offset,
2497 buf2_len,
2498 rdata->rx.buf.dma_len);
2499 rdata->rx.buf.pa.pages = NULL;
2500 }
2501 }
2502
2503 skip_data:
2504 if (!last || context_next)
2505 goto read_again;
2506
2507 if (!skb || error) {
2508 dev_kfree_skb(skb);
2509 goto next_packet;
2510 }
2511
2512 /* Be sure we don't exceed the configured MTU */
2513 max_len = netdev->mtu + ETH_HLEN;
2514 if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
2515 (skb->protocol == htons(ETH_P_8021Q)))
2516 max_len += VLAN_HLEN;
2517
2518 if (skb->len > max_len) {
2519 netif_err(pdata, rx_err, netdev,
2520 "packet length exceeds configured MTU\n");
2521 dev_kfree_skb(skb);
2522 goto next_packet;
2523 }
2524
2525 if (netif_msg_pktdata(pdata))
2526 xgbe_print_pkt(netdev, skb, false);
2527
2528 skb_checksum_none_assert(skb);
2529 if (XGMAC_GET_BITS(packet->attributes,
2530 RX_PACKET_ATTRIBUTES, CSUM_DONE))
2531 skb->ip_summed = CHECKSUM_UNNECESSARY;
2532
2533 if (XGMAC_GET_BITS(packet->attributes,
2534 RX_PACKET_ATTRIBUTES, TNP)) {
2535 skb->encapsulation = 1;
2536
2537 if (XGMAC_GET_BITS(packet->attributes,
2538 RX_PACKET_ATTRIBUTES, TNPCSUM_DONE))
2539 skb->csum_level = 1;
2540 }
2541
2542 if (XGMAC_GET_BITS(packet->attributes,
2543 RX_PACKET_ATTRIBUTES, VLAN_CTAG))
2544 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2545 packet->vlan_ctag);
2546
2547 if (XGMAC_GET_BITS(packet->attributes,
2548 RX_PACKET_ATTRIBUTES, RX_TSTAMP)) {
2549 u64 nsec;
2550
2551 nsec = timecounter_cyc2time(&pdata->tstamp_tc,
2552 packet->rx_tstamp);
2553 hwtstamps = skb_hwtstamps(skb);
2554 hwtstamps->hwtstamp = ns_to_ktime(nsec);
2555 }
2556
2557 if (XGMAC_GET_BITS(packet->attributes,
2558 RX_PACKET_ATTRIBUTES, RSS_HASH))
2559 skb_set_hash(skb, packet->rss_hash,
2560 packet->rss_hash_type);
2561
2562 skb->dev = netdev;
2563 skb->protocol = eth_type_trans(skb, netdev);
2564 skb_record_rx_queue(skb, channel->queue_index);
2565
2566 napi_gro_receive(napi, skb);
2567
2568 next_packet:
2569 packet_count++;
2570 }
2571
2572 /* Check if we need to save state before leaving */
2573 if (received && (!last || context_next)) {
2574 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
2575 rdata->state_saved = 1;
2576 rdata->state.skb = skb;
2577 rdata->state.len = len;
2578 rdata->state.error = error;
2579 }
2580
2581 DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count);
2582
2583 return packet_count;
2584 }
2585
xgbe_one_poll(struct napi_struct * napi,int budget)2586 static int xgbe_one_poll(struct napi_struct *napi, int budget)
2587 {
2588 struct xgbe_channel *channel = container_of(napi, struct xgbe_channel,
2589 napi);
2590 struct xgbe_prv_data *pdata = channel->pdata;
2591 int processed = 0;
2592
2593 DBGPR("-->xgbe_one_poll: budget=%d\n", budget);
2594
2595 /* Cleanup Tx ring first */
2596 xgbe_tx_poll(channel);
2597
2598 /* Process Rx ring next */
2599 processed = xgbe_rx_poll(channel, budget);
2600
2601 /* If we processed everything, we are done */
2602 if ((processed < budget) && napi_complete_done(napi, processed)) {
2603 /* Enable Tx and Rx interrupts */
2604 if (pdata->channel_irq_mode)
2605 xgbe_enable_rx_tx_int(pdata, channel);
2606 else
2607 enable_irq(channel->dma_irq);
2608 }
2609
2610 DBGPR("<--xgbe_one_poll: received = %d\n", processed);
2611
2612 return processed;
2613 }
2614
xgbe_all_poll(struct napi_struct * napi,int budget)2615 static int xgbe_all_poll(struct napi_struct *napi, int budget)
2616 {
2617 struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
2618 napi);
2619 struct xgbe_channel *channel;
2620 int ring_budget;
2621 int processed, last_processed;
2622 unsigned int i;
2623
2624 DBGPR("-->xgbe_all_poll: budget=%d\n", budget);
2625
2626 processed = 0;
2627 ring_budget = budget / pdata->rx_ring_count;
2628 do {
2629 last_processed = processed;
2630
2631 for (i = 0; i < pdata->channel_count; i++) {
2632 channel = pdata->channel[i];
2633
2634 /* Cleanup Tx ring first */
2635 xgbe_tx_poll(channel);
2636
2637 /* Process Rx ring next */
2638 if (ring_budget > (budget - processed))
2639 ring_budget = budget - processed;
2640 processed += xgbe_rx_poll(channel, ring_budget);
2641 }
2642 } while ((processed < budget) && (processed != last_processed));
2643
2644 /* If we processed everything, we are done */
2645 if ((processed < budget) && napi_complete_done(napi, processed)) {
2646 /* Enable Tx and Rx interrupts */
2647 xgbe_enable_rx_tx_ints(pdata);
2648 }
2649
2650 DBGPR("<--xgbe_all_poll: received = %d\n", processed);
2651
2652 return processed;
2653 }
2654
xgbe_dump_tx_desc(struct xgbe_prv_data * pdata,struct xgbe_ring * ring,unsigned int idx,unsigned int count,unsigned int flag)2655 void xgbe_dump_tx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
2656 unsigned int idx, unsigned int count, unsigned int flag)
2657 {
2658 struct xgbe_ring_data *rdata;
2659 struct xgbe_ring_desc *rdesc;
2660
2661 while (count--) {
2662 rdata = XGBE_GET_DESC_DATA(ring, idx);
2663 rdesc = rdata->rdesc;
2664 netdev_dbg(pdata->netdev,
2665 "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
2666 (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
2667 le32_to_cpu(rdesc->desc0),
2668 le32_to_cpu(rdesc->desc1),
2669 le32_to_cpu(rdesc->desc2),
2670 le32_to_cpu(rdesc->desc3));
2671 idx++;
2672 }
2673 }
2674
xgbe_dump_rx_desc(struct xgbe_prv_data * pdata,struct xgbe_ring * ring,unsigned int idx)2675 void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
2676 unsigned int idx)
2677 {
2678 struct xgbe_ring_data *rdata;
2679 struct xgbe_ring_desc *rdesc;
2680
2681 rdata = XGBE_GET_DESC_DATA(ring, idx);
2682 rdesc = rdata->rdesc;
2683 netdev_dbg(pdata->netdev,
2684 "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n",
2685 idx, le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
2686 le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
2687 }
2688
xgbe_print_pkt(struct net_device * netdev,struct sk_buff * skb,bool tx_rx)2689 void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
2690 {
2691 struct ethhdr *eth = (struct ethhdr *)skb->data;
2692 unsigned char buffer[128];
2693 unsigned int i;
2694
2695 netdev_dbg(netdev, "\n************** SKB dump ****************\n");
2696
2697 netdev_dbg(netdev, "%s packet of %d bytes\n",
2698 (tx_rx ? "TX" : "RX"), skb->len);
2699
2700 netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
2701 netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
2702 netdev_dbg(netdev, "Protocol: %#06x\n", ntohs(eth->h_proto));
2703
2704 for (i = 0; i < skb->len; i += 32) {
2705 unsigned int len = min(skb->len - i, 32U);
2706
2707 hex_dump_to_buffer(&skb->data[i], len, 32, 1,
2708 buffer, sizeof(buffer), false);
2709 netdev_dbg(netdev, " %#06x: %s\n", i, buffer);
2710 }
2711
2712 netdev_dbg(netdev, "\n************** SKB dump ****************\n");
2713 }
2714