xref: /linux/drivers/net/ethernet/amd/xgbe/xgbe-drv.c (revision 634ec1fc7982efeeeeed4a7688b0004827b43a21)
1 // SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
2 /*
3  * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
4  * Copyright (c) 2014, Synopsys, Inc.
5  * All rights reserved
6  */
7 
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/tcp.h>
11 #include <linux/if_vlan.h>
12 #include <linux/interrupt.h>
13 #include <linux/clk.h>
14 #include <linux/if_ether.h>
15 #include <linux/net_tstamp.h>
16 #include <linux/phy.h>
17 #include <net/vxlan.h>
18 
19 #include "xgbe.h"
20 #include "xgbe-common.h"
21 
22 static unsigned int ecc_sec_info_threshold = 10;
23 static unsigned int ecc_sec_warn_threshold = 10000;
24 static unsigned int ecc_sec_period = 600;
25 static unsigned int ecc_ded_threshold = 2;
26 static unsigned int ecc_ded_period = 600;
27 
28 #ifdef CONFIG_AMD_XGBE_HAVE_ECC
29 /* Only expose the ECC parameters if supported */
30 module_param(ecc_sec_info_threshold, uint, 0644);
31 MODULE_PARM_DESC(ecc_sec_info_threshold,
32 		 " ECC corrected error informational threshold setting");
33 
34 module_param(ecc_sec_warn_threshold, uint, 0644);
35 MODULE_PARM_DESC(ecc_sec_warn_threshold,
36 		 " ECC corrected error warning threshold setting");
37 
38 module_param(ecc_sec_period, uint, 0644);
39 MODULE_PARM_DESC(ecc_sec_period, " ECC corrected error period (in seconds)");
40 
41 module_param(ecc_ded_threshold, uint, 0644);
42 MODULE_PARM_DESC(ecc_ded_threshold, " ECC detected error threshold setting");
43 
44 module_param(ecc_ded_period, uint, 0644);
45 MODULE_PARM_DESC(ecc_ded_period, " ECC detected error period (in seconds)");
46 #endif
47 
48 static int xgbe_one_poll(struct napi_struct *, int);
49 static int xgbe_all_poll(struct napi_struct *, int);
50 static void xgbe_stop(struct xgbe_prv_data *);
51 
xgbe_alloc_node(size_t size,int node)52 static void *xgbe_alloc_node(size_t size, int node)
53 {
54 	void *mem;
55 
56 	mem = kzalloc_node(size, GFP_KERNEL, node);
57 	if (!mem)
58 		mem = kzalloc(size, GFP_KERNEL);
59 
60 	return mem;
61 }
62 
xgbe_free_channels(struct xgbe_prv_data * pdata)63 static void xgbe_free_channels(struct xgbe_prv_data *pdata)
64 {
65 	unsigned int i;
66 
67 	for (i = 0; i < ARRAY_SIZE(pdata->channel); i++) {
68 		if (!pdata->channel[i])
69 			continue;
70 
71 		kfree(pdata->channel[i]->rx_ring);
72 		kfree(pdata->channel[i]->tx_ring);
73 		kfree(pdata->channel[i]);
74 
75 		pdata->channel[i] = NULL;
76 	}
77 
78 	pdata->channel_count = 0;
79 }
80 
xgbe_alloc_channels(struct xgbe_prv_data * pdata)81 static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
82 {
83 	struct xgbe_channel *channel;
84 	struct xgbe_ring *ring;
85 	unsigned int count, i;
86 	unsigned int cpu;
87 	int node;
88 
89 	count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
90 	for (i = 0; i < count; i++) {
91 		/* Attempt to use a CPU on the node the device is on */
92 		cpu = cpumask_local_spread(i, dev_to_node(pdata->dev));
93 
94 		/* Set the allocation node based on the returned CPU */
95 		node = cpu_to_node(cpu);
96 
97 		channel = xgbe_alloc_node(sizeof(*channel), node);
98 		if (!channel)
99 			goto err_mem;
100 		pdata->channel[i] = channel;
101 
102 		snprintf(channel->name, sizeof(channel->name), "channel-%u", i);
103 		channel->pdata = pdata;
104 		channel->queue_index = i;
105 		channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
106 				    (DMA_CH_INC * i);
107 		channel->node = node;
108 		cpumask_set_cpu(cpu, &channel->affinity_mask);
109 
110 		if (pdata->per_channel_irq)
111 			channel->dma_irq = pdata->channel_irq[i];
112 
113 		if (i < pdata->tx_ring_count) {
114 			ring = xgbe_alloc_node(sizeof(*ring), node);
115 			if (!ring)
116 				goto err_mem;
117 
118 			spin_lock_init(&ring->lock);
119 			ring->node = node;
120 
121 			channel->tx_ring = ring;
122 		}
123 
124 		if (i < pdata->rx_ring_count) {
125 			ring = xgbe_alloc_node(sizeof(*ring), node);
126 			if (!ring)
127 				goto err_mem;
128 
129 			spin_lock_init(&ring->lock);
130 			ring->node = node;
131 
132 			channel->rx_ring = ring;
133 		}
134 
135 		netif_dbg(pdata, drv, pdata->netdev,
136 			  "%s: cpu=%u, node=%d\n", channel->name, cpu, node);
137 
138 		netif_dbg(pdata, drv, pdata->netdev,
139 			  "%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
140 			  channel->name, channel->dma_regs, channel->dma_irq,
141 			  channel->tx_ring, channel->rx_ring);
142 	}
143 
144 	pdata->channel_count = count;
145 
146 	return 0;
147 
148 err_mem:
149 	xgbe_free_channels(pdata);
150 
151 	return -ENOMEM;
152 }
153 
xgbe_tx_avail_desc(struct xgbe_ring * ring)154 static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
155 {
156 	return (ring->rdesc_count - (ring->cur - ring->dirty));
157 }
158 
xgbe_rx_dirty_desc(struct xgbe_ring * ring)159 static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring *ring)
160 {
161 	return (ring->cur - ring->dirty);
162 }
163 
xgbe_maybe_stop_tx_queue(struct xgbe_channel * channel,struct xgbe_ring * ring,unsigned int count)164 static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
165 				    struct xgbe_ring *ring, unsigned int count)
166 {
167 	struct xgbe_prv_data *pdata = channel->pdata;
168 
169 	if (count > xgbe_tx_avail_desc(ring)) {
170 		netif_info(pdata, drv, pdata->netdev,
171 			   "Tx queue stopped, not enough descriptors available\n");
172 		netif_stop_subqueue(pdata->netdev, channel->queue_index);
173 		ring->tx.queue_stopped = 1;
174 
175 		/* If we haven't notified the hardware because of xmit_more
176 		 * support, tell it now
177 		 */
178 		if (ring->tx.xmit_more)
179 			pdata->hw_if.tx_start_xmit(channel, ring);
180 
181 		return NETDEV_TX_BUSY;
182 	}
183 
184 	return 0;
185 }
186 
xgbe_calc_rx_buf_size(struct net_device * netdev,unsigned int mtu)187 static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
188 {
189 	unsigned int rx_buf_size;
190 
191 	rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
192 	rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
193 
194 	rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
195 		      ~(XGBE_RX_BUF_ALIGN - 1);
196 
197 	return rx_buf_size;
198 }
199 
xgbe_enable_rx_tx_int(struct xgbe_prv_data * pdata,struct xgbe_channel * channel)200 static void xgbe_enable_rx_tx_int(struct xgbe_prv_data *pdata,
201 				  struct xgbe_channel *channel)
202 {
203 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
204 	enum xgbe_int int_id;
205 
206 	if (channel->tx_ring && channel->rx_ring)
207 		int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
208 	else if (channel->tx_ring)
209 		int_id = XGMAC_INT_DMA_CH_SR_TI;
210 	else if (channel->rx_ring)
211 		int_id = XGMAC_INT_DMA_CH_SR_RI;
212 	else
213 		return;
214 
215 	hw_if->enable_int(channel, int_id);
216 }
217 
xgbe_enable_rx_tx_ints(struct xgbe_prv_data * pdata)218 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
219 {
220 	unsigned int i;
221 
222 	for (i = 0; i < pdata->channel_count; i++)
223 		xgbe_enable_rx_tx_int(pdata, pdata->channel[i]);
224 }
225 
xgbe_disable_rx_tx_int(struct xgbe_prv_data * pdata,struct xgbe_channel * channel)226 static void xgbe_disable_rx_tx_int(struct xgbe_prv_data *pdata,
227 				   struct xgbe_channel *channel)
228 {
229 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
230 	enum xgbe_int int_id;
231 
232 	if (channel->tx_ring && channel->rx_ring)
233 		int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
234 	else if (channel->tx_ring)
235 		int_id = XGMAC_INT_DMA_CH_SR_TI;
236 	else if (channel->rx_ring)
237 		int_id = XGMAC_INT_DMA_CH_SR_RI;
238 	else
239 		return;
240 
241 	hw_if->disable_int(channel, int_id);
242 }
243 
xgbe_disable_rx_tx_ints(struct xgbe_prv_data * pdata)244 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
245 {
246 	unsigned int i;
247 
248 	for (i = 0; i < pdata->channel_count; i++)
249 		xgbe_disable_rx_tx_int(pdata, pdata->channel[i]);
250 }
251 
xgbe_ecc_sec(struct xgbe_prv_data * pdata,unsigned long * period,unsigned int * count,const char * area)252 static bool xgbe_ecc_sec(struct xgbe_prv_data *pdata, unsigned long *period,
253 			 unsigned int *count, const char *area)
254 {
255 	if (time_before(jiffies, *period)) {
256 		(*count)++;
257 	} else {
258 		*period = jiffies + (ecc_sec_period * HZ);
259 		*count = 1;
260 	}
261 
262 	if (*count > ecc_sec_info_threshold)
263 		dev_warn_once(pdata->dev,
264 			      "%s ECC corrected errors exceed informational threshold\n",
265 			      area);
266 
267 	if (*count > ecc_sec_warn_threshold) {
268 		dev_warn_once(pdata->dev,
269 			      "%s ECC corrected errors exceed warning threshold\n",
270 			      area);
271 		return true;
272 	}
273 
274 	return false;
275 }
276 
xgbe_ecc_ded(struct xgbe_prv_data * pdata,unsigned long * period,unsigned int * count,const char * area)277 static bool xgbe_ecc_ded(struct xgbe_prv_data *pdata, unsigned long *period,
278 			 unsigned int *count, const char *area)
279 {
280 	if (time_before(jiffies, *period)) {
281 		(*count)++;
282 	} else {
283 		*period = jiffies + (ecc_ded_period * HZ);
284 		*count = 1;
285 	}
286 
287 	if (*count > ecc_ded_threshold) {
288 		netdev_alert(pdata->netdev,
289 			     "%s ECC detected errors exceed threshold\n",
290 			     area);
291 		return true;
292 	}
293 
294 	return false;
295 }
296 
xgbe_ecc_isr_bh_work(struct work_struct * work)297 static void xgbe_ecc_isr_bh_work(struct work_struct *work)
298 {
299 	struct xgbe_prv_data *pdata = from_work(pdata, work, ecc_bh_work);
300 	unsigned int ecc_isr;
301 	bool stop = false;
302 
303 	/* Mask status with only the interrupts we care about */
304 	ecc_isr = XP_IOREAD(pdata, XP_ECC_ISR);
305 	ecc_isr &= XP_IOREAD(pdata, XP_ECC_IER);
306 	netif_dbg(pdata, intr, pdata->netdev, "ECC_ISR=%#010x\n", ecc_isr);
307 
308 	if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, TX_DED)) {
309 		stop |= xgbe_ecc_ded(pdata, &pdata->tx_ded_period,
310 				     &pdata->tx_ded_count, "TX fifo");
311 	}
312 
313 	if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, RX_DED)) {
314 		stop |= xgbe_ecc_ded(pdata, &pdata->rx_ded_period,
315 				     &pdata->rx_ded_count, "RX fifo");
316 	}
317 
318 	if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, DESC_DED)) {
319 		stop |= xgbe_ecc_ded(pdata, &pdata->desc_ded_period,
320 				     &pdata->desc_ded_count,
321 				     "descriptor cache");
322 	}
323 
324 	if (stop) {
325 		pdata->hw_if.disable_ecc_ded(pdata);
326 		schedule_work(&pdata->stopdev_work);
327 		goto out;
328 	}
329 
330 	if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, TX_SEC)) {
331 		if (xgbe_ecc_sec(pdata, &pdata->tx_sec_period,
332 				 &pdata->tx_sec_count, "TX fifo"))
333 			pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_TX);
334 	}
335 
336 	if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, RX_SEC))
337 		if (xgbe_ecc_sec(pdata, &pdata->rx_sec_period,
338 				 &pdata->rx_sec_count, "RX fifo"))
339 			pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_RX);
340 
341 	if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, DESC_SEC))
342 		if (xgbe_ecc_sec(pdata, &pdata->desc_sec_period,
343 				 &pdata->desc_sec_count, "descriptor cache"))
344 			pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_DESC);
345 
346 out:
347 	/* Clear all ECC interrupts */
348 	XP_IOWRITE(pdata, XP_ECC_ISR, ecc_isr);
349 
350 	/* Reissue interrupt if status is not clear */
351 	if (pdata->vdata->irq_reissue_support)
352 		XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 1);
353 }
354 
xgbe_ecc_isr(int irq,void * data)355 static irqreturn_t xgbe_ecc_isr(int irq, void *data)
356 {
357 	struct xgbe_prv_data *pdata = data;
358 
359 	if (pdata->isr_as_bh_work)
360 		queue_work(system_bh_wq, &pdata->ecc_bh_work);
361 	else
362 		xgbe_ecc_isr_bh_work(&pdata->ecc_bh_work);
363 
364 	return IRQ_HANDLED;
365 }
366 
xgbe_isr_bh_work(struct work_struct * work)367 static void xgbe_isr_bh_work(struct work_struct *work)
368 {
369 	struct xgbe_prv_data *pdata = from_work(pdata, work, dev_bh_work);
370 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
371 	struct xgbe_channel *channel;
372 	unsigned int dma_isr, dma_ch_isr;
373 	unsigned int mac_isr, mac_tssr, mac_mdioisr;
374 	unsigned int i;
375 
376 	/* The DMA interrupt status register also reports MAC and MTL
377 	 * interrupts. So for polling mode, we just need to check for
378 	 * this register to be non-zero
379 	 */
380 	dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
381 	if (!dma_isr)
382 		goto isr_done;
383 
384 	netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
385 
386 	for (i = 0; i < pdata->channel_count; i++) {
387 		if (!(dma_isr & (1 << i)))
388 			continue;
389 
390 		channel = pdata->channel[i];
391 
392 		dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
393 		netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
394 			  i, dma_ch_isr);
395 
396 		/* The TI or RI interrupt bits may still be set even if using
397 		 * per channel DMA interrupts. Check to be sure those are not
398 		 * enabled before using the private data napi structure.
399 		 */
400 		if (!pdata->per_channel_irq &&
401 		    (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
402 		     XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) {
403 			if (napi_schedule_prep(&pdata->napi)) {
404 				/* Disable Tx and Rx interrupts */
405 				xgbe_disable_rx_tx_ints(pdata);
406 
407 				/* Turn on polling */
408 				__napi_schedule(&pdata->napi);
409 			}
410 		} else {
411 			/* Don't clear Rx/Tx status if doing per channel DMA
412 			 * interrupts, these will be cleared by the ISR for
413 			 * per channel DMA interrupts.
414 			 */
415 			XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, TI, 0);
416 			XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, RI, 0);
417 		}
418 
419 		if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU))
420 			pdata->ext_stats.rx_buffer_unavailable++;
421 
422 		/* Restart the device on a Fatal Bus Error */
423 		if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
424 			schedule_work(&pdata->restart_work);
425 
426 		/* Clear interrupt signals */
427 		XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
428 	}
429 
430 	if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
431 		mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
432 
433 		netif_dbg(pdata, intr, pdata->netdev, "MAC_ISR=%#010x\n",
434 			  mac_isr);
435 
436 		if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
437 			hw_if->tx_mmc_int(pdata);
438 
439 		if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
440 			hw_if->rx_mmc_int(pdata);
441 
442 		if (XGMAC_GET_BITS(mac_isr, MAC_ISR, TSIS)) {
443 			mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR);
444 
445 			netif_dbg(pdata, intr, pdata->netdev,
446 				  "MAC_TSSR=%#010x\n", mac_tssr);
447 
448 			if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) {
449 				/* Read Tx Timestamp to clear interrupt */
450 				pdata->tx_tstamp =
451 					xgbe_get_tx_tstamp(pdata);
452 				queue_work(pdata->dev_workqueue,
453 					   &pdata->tx_tstamp_work);
454 			}
455 		}
456 
457 		if (XGMAC_GET_BITS(mac_isr, MAC_ISR, SMI)) {
458 			mac_mdioisr = XGMAC_IOREAD(pdata, MAC_MDIOISR);
459 
460 			netif_dbg(pdata, intr, pdata->netdev,
461 				  "MAC_MDIOISR=%#010x\n", mac_mdioisr);
462 
463 			if (XGMAC_GET_BITS(mac_mdioisr, MAC_MDIOISR,
464 					   SNGLCOMPINT))
465 				complete(&pdata->mdio_complete);
466 		}
467 	}
468 
469 isr_done:
470 	/* If there is not a separate AN irq, handle it here */
471 	if (pdata->dev_irq == pdata->an_irq)
472 		pdata->phy_if.an_isr(pdata);
473 
474 	/* If there is not a separate ECC irq, handle it here */
475 	if (pdata->vdata->ecc_support && (pdata->dev_irq == pdata->ecc_irq))
476 		xgbe_ecc_isr_bh_work(&pdata->ecc_bh_work);
477 
478 	/* If there is not a separate I2C irq, handle it here */
479 	if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq))
480 		pdata->i2c_if.i2c_isr(pdata);
481 
482 	/* Reissue interrupt if status is not clear */
483 	if (pdata->vdata->irq_reissue_support) {
484 		unsigned int reissue_mask;
485 
486 		reissue_mask = 1 << 0;
487 		if (!pdata->per_channel_irq)
488 			reissue_mask |= 0xffff << 4;
489 
490 		XP_IOWRITE(pdata, XP_INT_REISSUE_EN, reissue_mask);
491 	}
492 }
493 
xgbe_isr(int irq,void * data)494 static irqreturn_t xgbe_isr(int irq, void *data)
495 {
496 	struct xgbe_prv_data *pdata = data;
497 
498 	if (pdata->isr_as_bh_work)
499 		queue_work(system_bh_wq, &pdata->dev_bh_work);
500 	else
501 		xgbe_isr_bh_work(&pdata->dev_bh_work);
502 
503 	return IRQ_HANDLED;
504 }
505 
xgbe_dma_isr(int irq,void * data)506 static irqreturn_t xgbe_dma_isr(int irq, void *data)
507 {
508 	struct xgbe_channel *channel = data;
509 	struct xgbe_prv_data *pdata = channel->pdata;
510 	unsigned int dma_status;
511 
512 	/* Per channel DMA interrupts are enabled, so we use the per
513 	 * channel napi structure and not the private data napi structure
514 	 */
515 	if (napi_schedule_prep(&channel->napi)) {
516 		/* Disable Tx and Rx interrupts */
517 		if (pdata->channel_irq_mode)
518 			xgbe_disable_rx_tx_int(pdata, channel);
519 		else
520 			disable_irq_nosync(channel->dma_irq);
521 
522 		/* Turn on polling */
523 		__napi_schedule_irqoff(&channel->napi);
524 	}
525 
526 	/* Clear Tx/Rx signals */
527 	dma_status = 0;
528 	XGMAC_SET_BITS(dma_status, DMA_CH_SR, TI, 1);
529 	XGMAC_SET_BITS(dma_status, DMA_CH_SR, RI, 1);
530 	XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_status);
531 
532 	return IRQ_HANDLED;
533 }
534 
xgbe_tx_timer(struct timer_list * t)535 static void xgbe_tx_timer(struct timer_list *t)
536 {
537 	struct xgbe_channel *channel = timer_container_of(channel, t,
538 							  tx_timer);
539 	struct xgbe_prv_data *pdata = channel->pdata;
540 	struct napi_struct *napi;
541 
542 	DBGPR("-->xgbe_tx_timer\n");
543 
544 	napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
545 
546 	if (napi_schedule_prep(napi)) {
547 		/* Disable Tx and Rx interrupts */
548 		if (pdata->per_channel_irq)
549 			if (pdata->channel_irq_mode)
550 				xgbe_disable_rx_tx_int(pdata, channel);
551 			else
552 				disable_irq_nosync(channel->dma_irq);
553 		else
554 			xgbe_disable_rx_tx_ints(pdata);
555 
556 		/* Turn on polling */
557 		__napi_schedule(napi);
558 	}
559 
560 	channel->tx_timer_active = 0;
561 
562 	DBGPR("<--xgbe_tx_timer\n");
563 }
564 
xgbe_service(struct work_struct * work)565 static void xgbe_service(struct work_struct *work)
566 {
567 	struct xgbe_prv_data *pdata = container_of(work,
568 						   struct xgbe_prv_data,
569 						   service_work);
570 
571 	pdata->phy_if.phy_status(pdata);
572 }
573 
xgbe_service_timer(struct timer_list * t)574 static void xgbe_service_timer(struct timer_list *t)
575 {
576 	struct xgbe_prv_data *pdata = timer_container_of(pdata, t,
577 							 service_timer);
578 	struct xgbe_channel *channel;
579 	unsigned int i;
580 
581 	queue_work(pdata->dev_workqueue, &pdata->service_work);
582 
583 	mod_timer(&pdata->service_timer, jiffies + HZ);
584 
585 	if (!pdata->tx_usecs)
586 		return;
587 
588 	for (i = 0; i < pdata->channel_count; i++) {
589 		channel = pdata->channel[i];
590 		if (!channel->tx_ring || channel->tx_timer_active)
591 			break;
592 		channel->tx_timer_active = 1;
593 		mod_timer(&channel->tx_timer,
594 			  jiffies + usecs_to_jiffies(pdata->tx_usecs));
595 	}
596 }
597 
xgbe_init_timers(struct xgbe_prv_data * pdata)598 static void xgbe_init_timers(struct xgbe_prv_data *pdata)
599 {
600 	struct xgbe_channel *channel;
601 	unsigned int i;
602 
603 	timer_setup(&pdata->service_timer, xgbe_service_timer, 0);
604 
605 	for (i = 0; i < pdata->channel_count; i++) {
606 		channel = pdata->channel[i];
607 		if (!channel->tx_ring)
608 			break;
609 
610 		timer_setup(&channel->tx_timer, xgbe_tx_timer, 0);
611 	}
612 }
613 
xgbe_start_timers(struct xgbe_prv_data * pdata)614 static void xgbe_start_timers(struct xgbe_prv_data *pdata)
615 {
616 	mod_timer(&pdata->service_timer, jiffies + HZ);
617 }
618 
xgbe_stop_timers(struct xgbe_prv_data * pdata)619 static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
620 {
621 	struct xgbe_channel *channel;
622 	unsigned int i;
623 
624 	timer_delete_sync(&pdata->service_timer);
625 
626 	for (i = 0; i < pdata->channel_count; i++) {
627 		channel = pdata->channel[i];
628 		if (!channel->tx_ring)
629 			break;
630 
631 		/* Deactivate the Tx timer */
632 		timer_delete_sync(&channel->tx_timer);
633 		channel->tx_timer_active = 0;
634 	}
635 }
636 
xgbe_get_all_hw_features(struct xgbe_prv_data * pdata)637 void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
638 {
639 	unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
640 	struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
641 
642 	mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R);
643 	mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R);
644 	mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R);
645 
646 	memset(hw_feat, 0, sizeof(*hw_feat));
647 
648 	hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR);
649 
650 	/* Hardware feature register 0 */
651 	hw_feat->gmii        = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
652 	hw_feat->vlhash      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
653 	hw_feat->sma         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
654 	hw_feat->rwk         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
655 	hw_feat->mgk         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
656 	hw_feat->mmc         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
657 	hw_feat->aoe         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
658 	hw_feat->ts          = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
659 	hw_feat->eee         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
660 	hw_feat->tx_coe      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
661 	hw_feat->rx_coe      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
662 	hw_feat->addn_mac    = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
663 					      ADDMACADRSEL);
664 	hw_feat->ts_src      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
665 	hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
666 	hw_feat->vxn         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VXN);
667 
668 	/* Hardware feature register 1 */
669 	hw_feat->rx_fifo_size  = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
670 						RXFIFOSIZE);
671 	hw_feat->tx_fifo_size  = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
672 						TXFIFOSIZE);
673 	hw_feat->adv_ts_hi     = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADVTHWORD);
674 	hw_feat->dma_width     = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
675 	hw_feat->dcb           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
676 	hw_feat->sph           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
677 	hw_feat->tso           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
678 	hw_feat->dma_debug     = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
679 	hw_feat->rss           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
680 	hw_feat->tc_cnt	       = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
681 	hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
682 						  HASHTBLSZ);
683 	hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
684 						  L3L4FNUM);
685 
686 	/* Hardware feature register 2 */
687 	hw_feat->rx_q_cnt     = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
688 	hw_feat->tx_q_cnt     = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
689 	hw_feat->rx_ch_cnt    = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
690 	hw_feat->tx_ch_cnt    = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
691 	hw_feat->pps_out_num  = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
692 	hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
693 
694 	/* Sanity check and warn if hardware reports more than supported */
695 	if (hw_feat->pps_out_num > XGBE_MAX_PPS_OUT) {
696 		dev_warn(pdata->dev,
697 			 "Hardware reports %u PPS outputs, limiting to %u\n",
698 			 hw_feat->pps_out_num, XGBE_MAX_PPS_OUT);
699 		hw_feat->pps_out_num = XGBE_MAX_PPS_OUT;
700 	}
701 
702 	if (hw_feat->aux_snap_num > XGBE_MAX_AUX_SNAP) {
703 		dev_warn(pdata->dev,
704 			 "Hardware reports %u aux snapshot inputs, limiting to %u\n",
705 			 hw_feat->aux_snap_num, XGBE_MAX_AUX_SNAP);
706 		hw_feat->aux_snap_num = XGBE_MAX_AUX_SNAP;
707 	}
708 
709 	/* Translate the Hash Table size into actual number */
710 	switch (hw_feat->hash_table_size) {
711 	case 0:
712 		break;
713 	case 1:
714 		hw_feat->hash_table_size = 64;
715 		break;
716 	case 2:
717 		hw_feat->hash_table_size = 128;
718 		break;
719 	case 3:
720 		hw_feat->hash_table_size = 256;
721 		break;
722 	}
723 
724 	/* Translate the address width setting into actual number */
725 	switch (hw_feat->dma_width) {
726 	case 0:
727 		hw_feat->dma_width = 32;
728 		break;
729 	case 1:
730 		hw_feat->dma_width = 40;
731 		break;
732 	case 2:
733 		hw_feat->dma_width = 48;
734 		break;
735 	default:
736 		hw_feat->dma_width = 32;
737 	}
738 
739 	/* The Queue, Channel and TC counts are zero based so increment them
740 	 * to get the actual number
741 	 */
742 	hw_feat->rx_q_cnt++;
743 	hw_feat->tx_q_cnt++;
744 	hw_feat->rx_ch_cnt++;
745 	hw_feat->tx_ch_cnt++;
746 	hw_feat->tc_cnt++;
747 
748 	/* Translate the fifo sizes into actual numbers */
749 	hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
750 	hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
751 
752 	if (netif_msg_probe(pdata)) {
753 		dev_dbg(pdata->dev, "Hardware features:\n");
754 
755 		/* Hardware feature register 0 */
756 		dev_dbg(pdata->dev, "  1GbE support              : %s\n",
757 			hw_feat->gmii ? "yes" : "no");
758 		dev_dbg(pdata->dev, "  VLAN hash filter          : %s\n",
759 			hw_feat->vlhash ? "yes" : "no");
760 		dev_dbg(pdata->dev, "  MDIO interface            : %s\n",
761 			hw_feat->sma ? "yes" : "no");
762 		dev_dbg(pdata->dev, "  Wake-up packet support    : %s\n",
763 			hw_feat->rwk ? "yes" : "no");
764 		dev_dbg(pdata->dev, "  Magic packet support      : %s\n",
765 			hw_feat->mgk ? "yes" : "no");
766 		dev_dbg(pdata->dev, "  Management counters       : %s\n",
767 			hw_feat->mmc ? "yes" : "no");
768 		dev_dbg(pdata->dev, "  ARP offload               : %s\n",
769 			hw_feat->aoe ? "yes" : "no");
770 		dev_dbg(pdata->dev, "  IEEE 1588-2008 Timestamp  : %s\n",
771 			hw_feat->ts ? "yes" : "no");
772 		dev_dbg(pdata->dev, "  Energy Efficient Ethernet : %s\n",
773 			hw_feat->eee ? "yes" : "no");
774 		dev_dbg(pdata->dev, "  TX checksum offload       : %s\n",
775 			hw_feat->tx_coe ? "yes" : "no");
776 		dev_dbg(pdata->dev, "  RX checksum offload       : %s\n",
777 			hw_feat->rx_coe ? "yes" : "no");
778 		dev_dbg(pdata->dev, "  Additional MAC addresses  : %u\n",
779 			hw_feat->addn_mac);
780 		dev_dbg(pdata->dev, "  Timestamp source          : %s\n",
781 			(hw_feat->ts_src == 1) ? "internal" :
782 			(hw_feat->ts_src == 2) ? "external" :
783 			(hw_feat->ts_src == 3) ? "internal/external" : "n/a");
784 		dev_dbg(pdata->dev, "  SA/VLAN insertion         : %s\n",
785 			hw_feat->sa_vlan_ins ? "yes" : "no");
786 		dev_dbg(pdata->dev, "  VXLAN/NVGRE support       : %s\n",
787 			hw_feat->vxn ? "yes" : "no");
788 
789 		/* Hardware feature register 1 */
790 		dev_dbg(pdata->dev, "  RX fifo size              : %u\n",
791 			hw_feat->rx_fifo_size);
792 		dev_dbg(pdata->dev, "  TX fifo size              : %u\n",
793 			hw_feat->tx_fifo_size);
794 		dev_dbg(pdata->dev, "  IEEE 1588 high word       : %s\n",
795 			hw_feat->adv_ts_hi ? "yes" : "no");
796 		dev_dbg(pdata->dev, "  DMA width                 : %u\n",
797 			hw_feat->dma_width);
798 		dev_dbg(pdata->dev, "  Data Center Bridging      : %s\n",
799 			hw_feat->dcb ? "yes" : "no");
800 		dev_dbg(pdata->dev, "  Split header              : %s\n",
801 			hw_feat->sph ? "yes" : "no");
802 		dev_dbg(pdata->dev, "  TCP Segmentation Offload  : %s\n",
803 			hw_feat->tso ? "yes" : "no");
804 		dev_dbg(pdata->dev, "  Debug memory interface    : %s\n",
805 			hw_feat->dma_debug ? "yes" : "no");
806 		dev_dbg(pdata->dev, "  Receive Side Scaling      : %s\n",
807 			hw_feat->rss ? "yes" : "no");
808 		dev_dbg(pdata->dev, "  Traffic Class count       : %u\n",
809 			hw_feat->tc_cnt);
810 		dev_dbg(pdata->dev, "  Hash table size           : %u\n",
811 			hw_feat->hash_table_size);
812 		dev_dbg(pdata->dev, "  L3/L4 Filters             : %u\n",
813 			hw_feat->l3l4_filter_num);
814 
815 		/* Hardware feature register 2 */
816 		dev_dbg(pdata->dev, "  RX queue count            : %u\n",
817 			hw_feat->rx_q_cnt);
818 		dev_dbg(pdata->dev, "  TX queue count            : %u\n",
819 			hw_feat->tx_q_cnt);
820 		dev_dbg(pdata->dev, "  RX DMA channel count      : %u\n",
821 			hw_feat->rx_ch_cnt);
822 		dev_dbg(pdata->dev, "  TX DMA channel count      : %u\n",
823 			hw_feat->rx_ch_cnt);
824 		dev_dbg(pdata->dev, "  PPS outputs               : %u\n",
825 			hw_feat->pps_out_num);
826 		dev_dbg(pdata->dev, "  Auxiliary snapshot inputs : %u\n",
827 			hw_feat->aux_snap_num);
828 	}
829 }
830 
xgbe_vxlan_set_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)831 static int xgbe_vxlan_set_port(struct net_device *netdev, unsigned int table,
832 			       unsigned int entry, struct udp_tunnel_info *ti)
833 {
834 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
835 
836 	pdata->vxlan_port = be16_to_cpu(ti->port);
837 	pdata->hw_if.enable_vxlan(pdata);
838 
839 	return 0;
840 }
841 
xgbe_vxlan_unset_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)842 static int xgbe_vxlan_unset_port(struct net_device *netdev, unsigned int table,
843 				 unsigned int entry, struct udp_tunnel_info *ti)
844 {
845 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
846 
847 	pdata->hw_if.disable_vxlan(pdata);
848 	pdata->vxlan_port = 0;
849 
850 	return 0;
851 }
852 
853 static const struct udp_tunnel_nic_info xgbe_udp_tunnels = {
854 	.set_port	= xgbe_vxlan_set_port,
855 	.unset_port	= xgbe_vxlan_unset_port,
856 	.flags		= UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
857 	.tables		= {
858 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
859 	},
860 };
861 
xgbe_get_udp_tunnel_info(void)862 const struct udp_tunnel_nic_info *xgbe_get_udp_tunnel_info(void)
863 {
864 	return &xgbe_udp_tunnels;
865 }
866 
xgbe_napi_enable(struct xgbe_prv_data * pdata,unsigned int add)867 static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
868 {
869 	struct xgbe_channel *channel;
870 	unsigned int i;
871 
872 	if (pdata->per_channel_irq) {
873 		for (i = 0; i < pdata->channel_count; i++) {
874 			channel = pdata->channel[i];
875 			if (add)
876 				netif_napi_add(pdata->netdev, &channel->napi,
877 					       xgbe_one_poll);
878 
879 			napi_enable(&channel->napi);
880 		}
881 	} else {
882 		if (add)
883 			netif_napi_add(pdata->netdev, &pdata->napi,
884 				       xgbe_all_poll);
885 
886 		napi_enable(&pdata->napi);
887 	}
888 }
889 
xgbe_napi_disable(struct xgbe_prv_data * pdata,unsigned int del)890 static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
891 {
892 	struct xgbe_channel *channel;
893 	unsigned int i;
894 
895 	if (pdata->per_channel_irq) {
896 		for (i = 0; i < pdata->channel_count; i++) {
897 			channel = pdata->channel[i];
898 			napi_disable(&channel->napi);
899 
900 			if (del)
901 				netif_napi_del(&channel->napi);
902 		}
903 	} else {
904 		napi_disable(&pdata->napi);
905 
906 		if (del)
907 			netif_napi_del(&pdata->napi);
908 	}
909 }
910 
xgbe_request_irqs(struct xgbe_prv_data * pdata)911 static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
912 {
913 	struct xgbe_channel *channel;
914 	struct net_device *netdev = pdata->netdev;
915 	unsigned int i;
916 	int ret;
917 
918 	INIT_WORK(&pdata->dev_bh_work, xgbe_isr_bh_work);
919 	INIT_WORK(&pdata->ecc_bh_work, xgbe_ecc_isr_bh_work);
920 
921 	ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
922 			       netdev_name(netdev), pdata);
923 	if (ret) {
924 		netdev_alert(netdev, "error requesting irq %d\n",
925 			     pdata->dev_irq);
926 		return ret;
927 	}
928 
929 	if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq)) {
930 		ret = devm_request_irq(pdata->dev, pdata->ecc_irq, xgbe_ecc_isr,
931 				       0, pdata->ecc_name, pdata);
932 		if (ret) {
933 			netdev_alert(netdev, "error requesting ecc irq %d\n",
934 				     pdata->ecc_irq);
935 			goto err_dev_irq;
936 		}
937 	}
938 
939 	if (!pdata->per_channel_irq)
940 		return 0;
941 
942 	for (i = 0; i < pdata->channel_count; i++) {
943 		channel = pdata->channel[i];
944 		snprintf(channel->dma_irq_name,
945 			 sizeof(channel->dma_irq_name) - 1,
946 			 "%s-TxRx-%u", netdev_name(netdev),
947 			 channel->queue_index);
948 
949 		ret = devm_request_irq(pdata->dev, channel->dma_irq,
950 				       xgbe_dma_isr, 0,
951 				       channel->dma_irq_name, channel);
952 		if (ret) {
953 			netdev_alert(netdev, "error requesting irq %d\n",
954 				     channel->dma_irq);
955 			goto err_dma_irq;
956 		}
957 
958 		irq_set_affinity_hint(channel->dma_irq,
959 				      &channel->affinity_mask);
960 	}
961 
962 	return 0;
963 
964 err_dma_irq:
965 	/* Using an unsigned int, 'i' will go to UINT_MAX and exit */
966 	for (i--; i < pdata->channel_count; i--) {
967 		channel = pdata->channel[i];
968 
969 		irq_set_affinity_hint(channel->dma_irq, NULL);
970 		devm_free_irq(pdata->dev, channel->dma_irq, channel);
971 	}
972 
973 	if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
974 		devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
975 
976 err_dev_irq:
977 	devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
978 
979 	return ret;
980 }
981 
xgbe_free_irqs(struct xgbe_prv_data * pdata)982 static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
983 {
984 	struct xgbe_channel *channel;
985 	unsigned int i;
986 
987 	devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
988 
989 	cancel_work_sync(&pdata->dev_bh_work);
990 	cancel_work_sync(&pdata->ecc_bh_work);
991 
992 	if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
993 		devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
994 
995 	if (!pdata->per_channel_irq)
996 		return;
997 
998 	for (i = 0; i < pdata->channel_count; i++) {
999 		channel = pdata->channel[i];
1000 
1001 		irq_set_affinity_hint(channel->dma_irq, NULL);
1002 		devm_free_irq(pdata->dev, channel->dma_irq, channel);
1003 	}
1004 }
1005 
xgbe_init_tx_coalesce(struct xgbe_prv_data * pdata)1006 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
1007 {
1008 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1009 
1010 	DBGPR("-->xgbe_init_tx_coalesce\n");
1011 
1012 	pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS;
1013 	pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES;
1014 
1015 	hw_if->config_tx_coalesce(pdata);
1016 
1017 	DBGPR("<--xgbe_init_tx_coalesce\n");
1018 }
1019 
xgbe_init_rx_coalesce(struct xgbe_prv_data * pdata)1020 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
1021 {
1022 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1023 
1024 	DBGPR("-->xgbe_init_rx_coalesce\n");
1025 
1026 	pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
1027 	pdata->rx_usecs = XGMAC_INIT_DMA_RX_USECS;
1028 	pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
1029 
1030 	hw_if->config_rx_coalesce(pdata);
1031 
1032 	DBGPR("<--xgbe_init_rx_coalesce\n");
1033 }
1034 
xgbe_free_tx_data(struct xgbe_prv_data * pdata)1035 static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
1036 {
1037 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
1038 	struct xgbe_ring *ring;
1039 	struct xgbe_ring_data *rdata;
1040 	unsigned int i, j;
1041 
1042 	DBGPR("-->xgbe_free_tx_data\n");
1043 
1044 	for (i = 0; i < pdata->channel_count; i++) {
1045 		ring = pdata->channel[i]->tx_ring;
1046 		if (!ring)
1047 			break;
1048 
1049 		for (j = 0; j < ring->rdesc_count; j++) {
1050 			rdata = XGBE_GET_DESC_DATA(ring, j);
1051 			desc_if->unmap_rdata(pdata, rdata);
1052 		}
1053 	}
1054 
1055 	DBGPR("<--xgbe_free_tx_data\n");
1056 }
1057 
xgbe_free_rx_data(struct xgbe_prv_data * pdata)1058 static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
1059 {
1060 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
1061 	struct xgbe_ring *ring;
1062 	struct xgbe_ring_data *rdata;
1063 	unsigned int i, j;
1064 
1065 	DBGPR("-->xgbe_free_rx_data\n");
1066 
1067 	for (i = 0; i < pdata->channel_count; i++) {
1068 		ring = pdata->channel[i]->rx_ring;
1069 		if (!ring)
1070 			break;
1071 
1072 		for (j = 0; j < ring->rdesc_count; j++) {
1073 			rdata = XGBE_GET_DESC_DATA(ring, j);
1074 			desc_if->unmap_rdata(pdata, rdata);
1075 		}
1076 	}
1077 
1078 	DBGPR("<--xgbe_free_rx_data\n");
1079 }
1080 
xgbe_phy_reset(struct xgbe_prv_data * pdata)1081 static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
1082 {
1083 	pdata->phy_speed = SPEED_UNKNOWN;
1084 
1085 	return pdata->phy_if.phy_reset(pdata);
1086 }
1087 
xgbe_powerdown(struct net_device * netdev,unsigned int caller)1088 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
1089 {
1090 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1091 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1092 	unsigned long flags;
1093 
1094 	DBGPR("-->xgbe_powerdown\n");
1095 
1096 	if (!netif_running(netdev) ||
1097 	    (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) {
1098 		netdev_alert(netdev, "Device is already powered down\n");
1099 		DBGPR("<--xgbe_powerdown\n");
1100 		return -EINVAL;
1101 	}
1102 
1103 	spin_lock_irqsave(&pdata->lock, flags);
1104 
1105 	if (caller == XGMAC_DRIVER_CONTEXT)
1106 		netif_device_detach(netdev);
1107 
1108 	netif_tx_stop_all_queues(netdev);
1109 
1110 	xgbe_stop_timers(pdata);
1111 	flush_workqueue(pdata->dev_workqueue);
1112 
1113 	hw_if->powerdown_tx(pdata);
1114 	hw_if->powerdown_rx(pdata);
1115 
1116 	xgbe_napi_disable(pdata, 0);
1117 
1118 	pdata->power_down = 1;
1119 
1120 	spin_unlock_irqrestore(&pdata->lock, flags);
1121 
1122 	DBGPR("<--xgbe_powerdown\n");
1123 
1124 	return 0;
1125 }
1126 
xgbe_powerup(struct net_device * netdev,unsigned int caller)1127 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
1128 {
1129 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1130 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1131 	unsigned long flags;
1132 
1133 	DBGPR("-->xgbe_powerup\n");
1134 
1135 	if (!netif_running(netdev) ||
1136 	    (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) {
1137 		netdev_alert(netdev, "Device is already powered up\n");
1138 		DBGPR("<--xgbe_powerup\n");
1139 		return -EINVAL;
1140 	}
1141 
1142 	spin_lock_irqsave(&pdata->lock, flags);
1143 
1144 	pdata->power_down = 0;
1145 
1146 	xgbe_napi_enable(pdata, 0);
1147 
1148 	hw_if->powerup_tx(pdata);
1149 	hw_if->powerup_rx(pdata);
1150 
1151 	if (caller == XGMAC_DRIVER_CONTEXT)
1152 		netif_device_attach(netdev);
1153 
1154 	netif_tx_start_all_queues(netdev);
1155 
1156 	xgbe_start_timers(pdata);
1157 
1158 	spin_unlock_irqrestore(&pdata->lock, flags);
1159 
1160 	DBGPR("<--xgbe_powerup\n");
1161 
1162 	return 0;
1163 }
1164 
xgbe_free_memory(struct xgbe_prv_data * pdata)1165 static void xgbe_free_memory(struct xgbe_prv_data *pdata)
1166 {
1167 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
1168 
1169 	/* Free the ring descriptors and buffers */
1170 	desc_if->free_ring_resources(pdata);
1171 
1172 	/* Free the channel and ring structures */
1173 	xgbe_free_channels(pdata);
1174 }
1175 
xgbe_alloc_memory(struct xgbe_prv_data * pdata)1176 static int xgbe_alloc_memory(struct xgbe_prv_data *pdata)
1177 {
1178 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
1179 	struct net_device *netdev = pdata->netdev;
1180 	int ret;
1181 
1182 	if (pdata->new_tx_ring_count) {
1183 		pdata->tx_ring_count = pdata->new_tx_ring_count;
1184 		pdata->tx_q_count = pdata->tx_ring_count;
1185 		pdata->new_tx_ring_count = 0;
1186 	}
1187 
1188 	if (pdata->new_rx_ring_count) {
1189 		pdata->rx_ring_count = pdata->new_rx_ring_count;
1190 		pdata->new_rx_ring_count = 0;
1191 	}
1192 
1193 	/* Calculate the Rx buffer size before allocating rings */
1194 	pdata->rx_buf_size = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
1195 
1196 	/* Allocate the channel and ring structures */
1197 	ret = xgbe_alloc_channels(pdata);
1198 	if (ret)
1199 		return ret;
1200 
1201 	/* Allocate the ring descriptors and buffers */
1202 	ret = desc_if->alloc_ring_resources(pdata);
1203 	if (ret)
1204 		goto err_channels;
1205 
1206 	/* Initialize the service and Tx timers */
1207 	xgbe_init_timers(pdata);
1208 
1209 	return 0;
1210 
1211 err_channels:
1212 	xgbe_free_memory(pdata);
1213 
1214 	return ret;
1215 }
1216 
xgbe_start(struct xgbe_prv_data * pdata)1217 static int xgbe_start(struct xgbe_prv_data *pdata)
1218 {
1219 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1220 	struct xgbe_phy_if *phy_if = &pdata->phy_if;
1221 	struct net_device *netdev = pdata->netdev;
1222 	unsigned int i;
1223 	int ret;
1224 
1225 	/* Set the number of queues */
1226 	ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
1227 	if (ret) {
1228 		netdev_err(netdev, "error setting real tx queue count\n");
1229 		return ret;
1230 	}
1231 
1232 	ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
1233 	if (ret) {
1234 		netdev_err(netdev, "error setting real rx queue count\n");
1235 		return ret;
1236 	}
1237 
1238 	/* Set RSS lookup table data for programming */
1239 	for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
1240 		XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
1241 			       i % pdata->rx_ring_count);
1242 
1243 	ret = hw_if->init(pdata);
1244 	if (ret)
1245 		return ret;
1246 
1247 	xgbe_napi_enable(pdata, 1);
1248 
1249 	ret = xgbe_request_irqs(pdata);
1250 	if (ret)
1251 		goto err_napi;
1252 
1253 	ret = phy_if->phy_start(pdata);
1254 	if (ret)
1255 		goto err_irqs;
1256 
1257 	hw_if->enable_tx(pdata);
1258 	hw_if->enable_rx(pdata);
1259 
1260 	udp_tunnel_nic_reset_ntf(netdev);
1261 
1262 	netif_tx_start_all_queues(netdev);
1263 
1264 	xgbe_start_timers(pdata);
1265 	queue_work(pdata->dev_workqueue, &pdata->service_work);
1266 
1267 	clear_bit(XGBE_STOPPED, &pdata->dev_state);
1268 
1269 	return 0;
1270 
1271 err_irqs:
1272 	xgbe_free_irqs(pdata);
1273 
1274 err_napi:
1275 	xgbe_napi_disable(pdata, 1);
1276 
1277 	hw_if->exit(pdata);
1278 
1279 	return ret;
1280 }
1281 
xgbe_stop(struct xgbe_prv_data * pdata)1282 static void xgbe_stop(struct xgbe_prv_data *pdata)
1283 {
1284 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1285 	struct xgbe_phy_if *phy_if = &pdata->phy_if;
1286 	struct xgbe_channel *channel;
1287 	struct net_device *netdev = pdata->netdev;
1288 	struct netdev_queue *txq;
1289 	unsigned int i;
1290 
1291 	DBGPR("-->xgbe_stop\n");
1292 
1293 	if (test_bit(XGBE_STOPPED, &pdata->dev_state))
1294 		return;
1295 
1296 	netif_tx_stop_all_queues(netdev);
1297 	netif_carrier_off(pdata->netdev);
1298 
1299 	xgbe_stop_timers(pdata);
1300 	flush_workqueue(pdata->dev_workqueue);
1301 
1302 	xgbe_vxlan_unset_port(netdev, 0, 0, NULL);
1303 
1304 	hw_if->disable_tx(pdata);
1305 	hw_if->disable_rx(pdata);
1306 
1307 	phy_if->phy_stop(pdata);
1308 
1309 	xgbe_free_irqs(pdata);
1310 
1311 	xgbe_napi_disable(pdata, 1);
1312 
1313 	hw_if->exit(pdata);
1314 
1315 	for (i = 0; i < pdata->channel_count; i++) {
1316 		channel = pdata->channel[i];
1317 		if (!channel->tx_ring)
1318 			continue;
1319 
1320 		txq = netdev_get_tx_queue(netdev, channel->queue_index);
1321 		netdev_tx_reset_queue(txq);
1322 	}
1323 
1324 	set_bit(XGBE_STOPPED, &pdata->dev_state);
1325 
1326 	DBGPR("<--xgbe_stop\n");
1327 }
1328 
xgbe_stopdev(struct work_struct * work)1329 static void xgbe_stopdev(struct work_struct *work)
1330 {
1331 	struct xgbe_prv_data *pdata = container_of(work,
1332 						   struct xgbe_prv_data,
1333 						   stopdev_work);
1334 
1335 	rtnl_lock();
1336 
1337 	xgbe_stop(pdata);
1338 
1339 	xgbe_free_tx_data(pdata);
1340 	xgbe_free_rx_data(pdata);
1341 
1342 	rtnl_unlock();
1343 
1344 	netdev_alert(pdata->netdev, "device stopped\n");
1345 }
1346 
xgbe_full_restart_dev(struct xgbe_prv_data * pdata)1347 void xgbe_full_restart_dev(struct xgbe_prv_data *pdata)
1348 {
1349 	/* If not running, "restart" will happen on open */
1350 	if (!netif_running(pdata->netdev))
1351 		return;
1352 
1353 	xgbe_stop(pdata);
1354 
1355 	xgbe_free_memory(pdata);
1356 	xgbe_alloc_memory(pdata);
1357 
1358 	xgbe_start(pdata);
1359 }
1360 
xgbe_restart_dev(struct xgbe_prv_data * pdata)1361 void xgbe_restart_dev(struct xgbe_prv_data *pdata)
1362 {
1363 	/* If not running, "restart" will happen on open */
1364 	if (!netif_running(pdata->netdev))
1365 		return;
1366 
1367 	xgbe_stop(pdata);
1368 
1369 	xgbe_free_tx_data(pdata);
1370 	xgbe_free_rx_data(pdata);
1371 
1372 	xgbe_start(pdata);
1373 }
1374 
xgbe_restart(struct work_struct * work)1375 static void xgbe_restart(struct work_struct *work)
1376 {
1377 	struct xgbe_prv_data *pdata = container_of(work,
1378 						   struct xgbe_prv_data,
1379 						   restart_work);
1380 
1381 	rtnl_lock();
1382 
1383 	xgbe_restart_dev(pdata);
1384 
1385 	rtnl_unlock();
1386 }
1387 
xgbe_prep_vlan(struct sk_buff * skb,struct xgbe_packet_data * packet)1388 static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
1389 {
1390 	if (skb_vlan_tag_present(skb))
1391 		packet->vlan_ctag = skb_vlan_tag_get(skb);
1392 }
1393 
xgbe_prep_tso(struct sk_buff * skb,struct xgbe_packet_data * packet)1394 static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
1395 {
1396 	int ret;
1397 
1398 	if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1399 			    TSO_ENABLE))
1400 		return 0;
1401 
1402 	ret = skb_cow_head(skb, 0);
1403 	if (ret)
1404 		return ret;
1405 
1406 	if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, VXLAN)) {
1407 		packet->header_len = skb_inner_tcp_all_headers(skb);
1408 		packet->tcp_header_len = inner_tcp_hdrlen(skb);
1409 	} else {
1410 		packet->header_len = skb_tcp_all_headers(skb);
1411 		packet->tcp_header_len = tcp_hdrlen(skb);
1412 	}
1413 	packet->tcp_payload_len = skb->len - packet->header_len;
1414 	packet->mss = skb_shinfo(skb)->gso_size;
1415 
1416 	DBGPR("  packet->header_len=%u\n", packet->header_len);
1417 	DBGPR("  packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
1418 	      packet->tcp_header_len, packet->tcp_payload_len);
1419 	DBGPR("  packet->mss=%u\n", packet->mss);
1420 
1421 	/* Update the number of packets that will ultimately be transmitted
1422 	 * along with the extra bytes for each extra packet
1423 	 */
1424 	packet->tx_packets = skb_shinfo(skb)->gso_segs;
1425 	packet->tx_bytes += (packet->tx_packets - 1) * packet->header_len;
1426 
1427 	return 0;
1428 }
1429 
xgbe_is_vxlan(struct sk_buff * skb)1430 static bool xgbe_is_vxlan(struct sk_buff *skb)
1431 {
1432 	if (!skb->encapsulation)
1433 		return false;
1434 
1435 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1436 		return false;
1437 
1438 	switch (skb->protocol) {
1439 	case htons(ETH_P_IP):
1440 		if (ip_hdr(skb)->protocol != IPPROTO_UDP)
1441 			return false;
1442 		break;
1443 
1444 	case htons(ETH_P_IPV6):
1445 		if (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP)
1446 			return false;
1447 		break;
1448 
1449 	default:
1450 		return false;
1451 	}
1452 
1453 	if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
1454 	    skb->inner_protocol != htons(ETH_P_TEB) ||
1455 	    (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
1456 	     sizeof(struct udphdr) + sizeof(struct vxlanhdr)))
1457 		return false;
1458 
1459 	return true;
1460 }
1461 
xgbe_is_tso(struct sk_buff * skb)1462 static int xgbe_is_tso(struct sk_buff *skb)
1463 {
1464 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1465 		return 0;
1466 
1467 	if (!skb_is_gso(skb))
1468 		return 0;
1469 
1470 	DBGPR("  TSO packet to be processed\n");
1471 
1472 	return 1;
1473 }
1474 
xgbe_packet_info(struct xgbe_prv_data * pdata,struct xgbe_ring * ring,struct sk_buff * skb,struct xgbe_packet_data * packet)1475 static void xgbe_packet_info(struct xgbe_prv_data *pdata,
1476 			     struct xgbe_ring *ring, struct sk_buff *skb,
1477 			     struct xgbe_packet_data *packet)
1478 {
1479 	skb_frag_t *frag;
1480 	unsigned int context_desc;
1481 	unsigned int len;
1482 	unsigned int i;
1483 
1484 	packet->skb = skb;
1485 
1486 	context_desc = 0;
1487 	packet->rdesc_count = 0;
1488 
1489 	packet->tx_packets = 1;
1490 	packet->tx_bytes = skb->len;
1491 
1492 	if (xgbe_is_tso(skb)) {
1493 		/* TSO requires an extra descriptor if mss is different */
1494 		if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
1495 			context_desc = 1;
1496 			packet->rdesc_count++;
1497 		}
1498 
1499 		/* TSO requires an extra descriptor for TSO header */
1500 		packet->rdesc_count++;
1501 
1502 		XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1503 			       TSO_ENABLE, 1);
1504 		XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1505 			       CSUM_ENABLE, 1);
1506 	} else if (skb->ip_summed == CHECKSUM_PARTIAL)
1507 		XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1508 			       CSUM_ENABLE, 1);
1509 
1510 	if (xgbe_is_vxlan(skb))
1511 		XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1512 			       VXLAN, 1);
1513 
1514 	if (skb_vlan_tag_present(skb)) {
1515 		/* VLAN requires an extra descriptor if tag is different */
1516 		if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
1517 			/* We can share with the TSO context descriptor */
1518 			if (!context_desc) {
1519 				context_desc = 1;
1520 				packet->rdesc_count++;
1521 			}
1522 
1523 		XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1524 			       VLAN_CTAG, 1);
1525 	}
1526 
1527 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1528 	    (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON))
1529 		XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1530 			       PTP, 1);
1531 
1532 	for (len = skb_headlen(skb); len;) {
1533 		packet->rdesc_count++;
1534 		len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
1535 	}
1536 
1537 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1538 		frag = &skb_shinfo(skb)->frags[i];
1539 		for (len = skb_frag_size(frag); len; ) {
1540 			packet->rdesc_count++;
1541 			len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
1542 		}
1543 	}
1544 }
1545 
xgbe_open(struct net_device * netdev)1546 static int xgbe_open(struct net_device *netdev)
1547 {
1548 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1549 	int ret;
1550 
1551 	/* Create the various names based on netdev name */
1552 	snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs",
1553 		 netdev_name(netdev));
1554 
1555 	snprintf(pdata->ecc_name, sizeof(pdata->ecc_name) - 1, "%s-ecc",
1556 		 netdev_name(netdev));
1557 
1558 	snprintf(pdata->i2c_name, sizeof(pdata->i2c_name) - 1, "%s-i2c",
1559 		 netdev_name(netdev));
1560 
1561 	/* Create workqueues */
1562 	pdata->dev_workqueue =
1563 		create_singlethread_workqueue(netdev_name(netdev));
1564 	if (!pdata->dev_workqueue) {
1565 		netdev_err(netdev, "device workqueue creation failed\n");
1566 		return -ENOMEM;
1567 	}
1568 
1569 	pdata->an_workqueue =
1570 		create_singlethread_workqueue(pdata->an_name);
1571 	if (!pdata->an_workqueue) {
1572 		netdev_err(netdev, "phy workqueue creation failed\n");
1573 		ret = -ENOMEM;
1574 		goto err_dev_wq;
1575 	}
1576 
1577 	/* Reset the phy settings */
1578 	ret = xgbe_phy_reset(pdata);
1579 	if (ret)
1580 		goto err_an_wq;
1581 
1582 	/* Enable the clocks */
1583 	ret = clk_prepare_enable(pdata->sysclk);
1584 	if (ret) {
1585 		netdev_alert(netdev, "dma clk_prepare_enable failed\n");
1586 		goto err_an_wq;
1587 	}
1588 
1589 	ret = clk_prepare_enable(pdata->ptpclk);
1590 	if (ret) {
1591 		netdev_alert(netdev, "ptp clk_prepare_enable failed\n");
1592 		goto err_sysclk;
1593 	}
1594 
1595 	INIT_WORK(&pdata->service_work, xgbe_service);
1596 	INIT_WORK(&pdata->restart_work, xgbe_restart);
1597 	INIT_WORK(&pdata->stopdev_work, xgbe_stopdev);
1598 	INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
1599 
1600 	/* Initialize PTP timestamping and clock. */
1601 	xgbe_init_ptp(pdata);
1602 
1603 	ret = xgbe_alloc_memory(pdata);
1604 	if (ret)
1605 		goto err_ptpclk;
1606 
1607 	ret = xgbe_start(pdata);
1608 	if (ret)
1609 		goto err_mem;
1610 
1611 	clear_bit(XGBE_DOWN, &pdata->dev_state);
1612 
1613 	return 0;
1614 
1615 err_mem:
1616 	xgbe_free_memory(pdata);
1617 
1618 err_ptpclk:
1619 	clk_disable_unprepare(pdata->ptpclk);
1620 
1621 err_sysclk:
1622 	clk_disable_unprepare(pdata->sysclk);
1623 
1624 err_an_wq:
1625 	destroy_workqueue(pdata->an_workqueue);
1626 
1627 err_dev_wq:
1628 	destroy_workqueue(pdata->dev_workqueue);
1629 
1630 	return ret;
1631 }
1632 
xgbe_close(struct net_device * netdev)1633 static int xgbe_close(struct net_device *netdev)
1634 {
1635 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1636 
1637 	/* Stop the device */
1638 	xgbe_stop(pdata);
1639 
1640 	xgbe_free_memory(pdata);
1641 
1642 	/* Disable the clocks */
1643 	clk_disable_unprepare(pdata->ptpclk);
1644 	clk_disable_unprepare(pdata->sysclk);
1645 
1646 	destroy_workqueue(pdata->an_workqueue);
1647 
1648 	destroy_workqueue(pdata->dev_workqueue);
1649 
1650 	set_bit(XGBE_DOWN, &pdata->dev_state);
1651 
1652 	return 0;
1653 }
1654 
xgbe_xmit(struct sk_buff * skb,struct net_device * netdev)1655 static netdev_tx_t xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
1656 {
1657 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1658 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1659 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
1660 	struct xgbe_channel *channel;
1661 	struct xgbe_ring *ring;
1662 	struct xgbe_packet_data *packet;
1663 	struct netdev_queue *txq;
1664 	netdev_tx_t ret;
1665 
1666 	DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
1667 
1668 	channel = pdata->channel[skb->queue_mapping];
1669 	txq = netdev_get_tx_queue(netdev, channel->queue_index);
1670 	ring = channel->tx_ring;
1671 	packet = &ring->packet_data;
1672 
1673 	ret = NETDEV_TX_OK;
1674 
1675 	if (skb->len == 0) {
1676 		netif_err(pdata, tx_err, netdev,
1677 			  "empty skb received from stack\n");
1678 		dev_kfree_skb_any(skb);
1679 		goto tx_netdev_return;
1680 	}
1681 
1682 	/* Calculate preliminary packet info */
1683 	memset(packet, 0, sizeof(*packet));
1684 	xgbe_packet_info(pdata, ring, skb, packet);
1685 
1686 	/* Check that there are enough descriptors available */
1687 	ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count);
1688 	if (ret)
1689 		goto tx_netdev_return;
1690 
1691 	ret = xgbe_prep_tso(skb, packet);
1692 	if (ret) {
1693 		netif_err(pdata, tx_err, netdev,
1694 			  "error processing TSO packet\n");
1695 		dev_kfree_skb_any(skb);
1696 		goto tx_netdev_return;
1697 	}
1698 	xgbe_prep_vlan(skb, packet);
1699 
1700 	if (!desc_if->map_tx_skb(channel, skb)) {
1701 		dev_kfree_skb_any(skb);
1702 		goto tx_netdev_return;
1703 	}
1704 
1705 	xgbe_prep_tx_tstamp(pdata, skb, packet);
1706 
1707 	/* Report on the actual number of bytes (to be) sent */
1708 	netdev_tx_sent_queue(txq, packet->tx_bytes);
1709 
1710 	/* Configure required descriptor fields for transmission */
1711 	hw_if->dev_xmit(channel);
1712 
1713 	if (netif_msg_pktdata(pdata))
1714 		xgbe_print_pkt(netdev, skb, true);
1715 
1716 	/* Stop the queue in advance if there may not be enough descriptors */
1717 	xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS);
1718 
1719 	ret = NETDEV_TX_OK;
1720 
1721 tx_netdev_return:
1722 	return ret;
1723 }
1724 
xgbe_set_rx_mode(struct net_device * netdev)1725 static void xgbe_set_rx_mode(struct net_device *netdev)
1726 {
1727 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1728 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1729 
1730 	DBGPR("-->xgbe_set_rx_mode\n");
1731 
1732 	hw_if->config_rx_mode(pdata);
1733 
1734 	DBGPR("<--xgbe_set_rx_mode\n");
1735 }
1736 
xgbe_set_mac_address(struct net_device * netdev,void * addr)1737 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
1738 {
1739 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1740 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1741 	struct sockaddr *saddr = addr;
1742 
1743 	DBGPR("-->xgbe_set_mac_address\n");
1744 
1745 	if (!is_valid_ether_addr(saddr->sa_data))
1746 		return -EADDRNOTAVAIL;
1747 
1748 	eth_hw_addr_set(netdev, saddr->sa_data);
1749 
1750 	hw_if->set_mac_address(pdata, netdev->dev_addr);
1751 
1752 	DBGPR("<--xgbe_set_mac_address\n");
1753 
1754 	return 0;
1755 }
1756 
xgbe_ioctl(struct net_device * netdev,struct ifreq * ifreq,int cmd)1757 static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd)
1758 {
1759 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1760 	int ret;
1761 
1762 	switch (cmd) {
1763 	case SIOCGHWTSTAMP:
1764 		ret = xgbe_get_hwtstamp_settings(pdata, ifreq);
1765 		break;
1766 
1767 	case SIOCSHWTSTAMP:
1768 		ret = xgbe_set_hwtstamp_settings(pdata, ifreq);
1769 		break;
1770 
1771 	default:
1772 		ret = -EOPNOTSUPP;
1773 	}
1774 
1775 	return ret;
1776 }
1777 
xgbe_change_mtu(struct net_device * netdev,int mtu)1778 static int xgbe_change_mtu(struct net_device *netdev, int mtu)
1779 {
1780 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1781 	int ret;
1782 
1783 	DBGPR("-->xgbe_change_mtu\n");
1784 
1785 	ret = xgbe_calc_rx_buf_size(netdev, mtu);
1786 	if (ret < 0)
1787 		return ret;
1788 
1789 	pdata->rx_buf_size = ret;
1790 	WRITE_ONCE(netdev->mtu, mtu);
1791 
1792 	xgbe_restart_dev(pdata);
1793 
1794 	DBGPR("<--xgbe_change_mtu\n");
1795 
1796 	return 0;
1797 }
1798 
xgbe_tx_timeout(struct net_device * netdev,unsigned int txqueue)1799 static void xgbe_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1800 {
1801 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1802 
1803 	netdev_warn(netdev, "tx timeout, device restarting\n");
1804 	schedule_work(&pdata->restart_work);
1805 }
1806 
xgbe_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * s)1807 static void xgbe_get_stats64(struct net_device *netdev,
1808 			     struct rtnl_link_stats64 *s)
1809 {
1810 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1811 	struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
1812 
1813 	DBGPR("-->%s\n", __func__);
1814 
1815 	pdata->hw_if.read_mmc_stats(pdata);
1816 
1817 	s->rx_packets = pstats->rxframecount_gb;
1818 	s->rx_bytes = pstats->rxoctetcount_gb;
1819 	s->rx_errors = pstats->rxframecount_gb -
1820 		       pstats->rxbroadcastframes_g -
1821 		       pstats->rxmulticastframes_g -
1822 		       pstats->rxunicastframes_g;
1823 	s->multicast = pstats->rxmulticastframes_g;
1824 	s->rx_length_errors = pstats->rxlengtherror;
1825 	s->rx_crc_errors = pstats->rxcrcerror;
1826 	s->rx_fifo_errors = pstats->rxfifooverflow;
1827 
1828 	s->tx_packets = pstats->txframecount_gb;
1829 	s->tx_bytes = pstats->txoctetcount_gb;
1830 	s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
1831 	s->tx_dropped = netdev->stats.tx_dropped;
1832 
1833 	DBGPR("<--%s\n", __func__);
1834 }
1835 
xgbe_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)1836 static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1837 				u16 vid)
1838 {
1839 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1840 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1841 
1842 	DBGPR("-->%s\n", __func__);
1843 
1844 	set_bit(vid, pdata->active_vlans);
1845 	hw_if->update_vlan_hash_table(pdata);
1846 
1847 	DBGPR("<--%s\n", __func__);
1848 
1849 	return 0;
1850 }
1851 
xgbe_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)1852 static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1853 				 u16 vid)
1854 {
1855 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1856 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1857 
1858 	DBGPR("-->%s\n", __func__);
1859 
1860 	clear_bit(vid, pdata->active_vlans);
1861 	hw_if->update_vlan_hash_table(pdata);
1862 
1863 	DBGPR("<--%s\n", __func__);
1864 
1865 	return 0;
1866 }
1867 
1868 #ifdef CONFIG_NET_POLL_CONTROLLER
xgbe_poll_controller(struct net_device * netdev)1869 static void xgbe_poll_controller(struct net_device *netdev)
1870 {
1871 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1872 	struct xgbe_channel *channel;
1873 	unsigned int i;
1874 
1875 	DBGPR("-->xgbe_poll_controller\n");
1876 
1877 	if (pdata->per_channel_irq) {
1878 		for (i = 0; i < pdata->channel_count; i++) {
1879 			channel = pdata->channel[i];
1880 			xgbe_dma_isr(channel->dma_irq, channel);
1881 		}
1882 	} else {
1883 		disable_irq(pdata->dev_irq);
1884 		xgbe_isr(pdata->dev_irq, pdata);
1885 		enable_irq(pdata->dev_irq);
1886 	}
1887 
1888 	DBGPR("<--xgbe_poll_controller\n");
1889 }
1890 #endif /* End CONFIG_NET_POLL_CONTROLLER */
1891 
xgbe_setup_tc(struct net_device * netdev,enum tc_setup_type type,void * type_data)1892 static int xgbe_setup_tc(struct net_device *netdev, enum tc_setup_type type,
1893 			 void *type_data)
1894 {
1895 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1896 	struct tc_mqprio_qopt *mqprio = type_data;
1897 	u8 tc;
1898 
1899 	if (type != TC_SETUP_QDISC_MQPRIO)
1900 		return -EOPNOTSUPP;
1901 
1902 	mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
1903 	tc = mqprio->num_tc;
1904 
1905 	if (tc > pdata->hw_feat.tc_cnt)
1906 		return -EINVAL;
1907 
1908 	pdata->num_tcs = tc;
1909 	pdata->hw_if.config_tc(pdata);
1910 
1911 	return 0;
1912 }
1913 
xgbe_fix_features(struct net_device * netdev,netdev_features_t features)1914 static netdev_features_t xgbe_fix_features(struct net_device *netdev,
1915 					   netdev_features_t features)
1916 {
1917 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1918 	netdev_features_t vxlan_base;
1919 
1920 	vxlan_base = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RX_UDP_TUNNEL_PORT;
1921 
1922 	if (!pdata->hw_feat.vxn)
1923 		return features;
1924 
1925 	/* VXLAN CSUM requires VXLAN base */
1926 	if ((features & NETIF_F_GSO_UDP_TUNNEL_CSUM) &&
1927 	    !(features & NETIF_F_GSO_UDP_TUNNEL)) {
1928 		netdev_notice(netdev,
1929 			      "forcing tx udp tunnel support\n");
1930 		features |= NETIF_F_GSO_UDP_TUNNEL;
1931 	}
1932 
1933 	/* Can't do one without doing the other */
1934 	if ((features & vxlan_base) != vxlan_base) {
1935 		netdev_notice(netdev,
1936 			      "forcing both tx and rx udp tunnel support\n");
1937 		features |= vxlan_base;
1938 	}
1939 
1940 	if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
1941 		if (!(features & NETIF_F_GSO_UDP_TUNNEL_CSUM)) {
1942 			netdev_notice(netdev,
1943 				      "forcing tx udp tunnel checksumming on\n");
1944 			features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
1945 		}
1946 	} else {
1947 		if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) {
1948 			netdev_notice(netdev,
1949 				      "forcing tx udp tunnel checksumming off\n");
1950 			features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
1951 		}
1952 	}
1953 
1954 	return features;
1955 }
1956 
xgbe_set_features(struct net_device * netdev,netdev_features_t features)1957 static int xgbe_set_features(struct net_device *netdev,
1958 			     netdev_features_t features)
1959 {
1960 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1961 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1962 	netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
1963 	int ret = 0;
1964 
1965 	rxhash = pdata->netdev_features & NETIF_F_RXHASH;
1966 	rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
1967 	rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
1968 	rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
1969 
1970 	if ((features & NETIF_F_RXHASH) && !rxhash)
1971 		ret = hw_if->enable_rss(pdata);
1972 	else if (!(features & NETIF_F_RXHASH) && rxhash)
1973 		ret = hw_if->disable_rss(pdata);
1974 	if (ret)
1975 		return ret;
1976 
1977 	if ((features & NETIF_F_RXCSUM) && !rxcsum) {
1978 		hw_if->enable_sph(pdata);
1979 		hw_if->enable_vxlan(pdata);
1980 		hw_if->enable_rx_csum(pdata);
1981 		schedule_work(&pdata->restart_work);
1982 	} else if (!(features & NETIF_F_RXCSUM) && rxcsum) {
1983 		hw_if->disable_sph(pdata);
1984 		hw_if->disable_vxlan(pdata);
1985 		hw_if->disable_rx_csum(pdata);
1986 		schedule_work(&pdata->restart_work);
1987 	}
1988 
1989 	if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
1990 		hw_if->enable_rx_vlan_stripping(pdata);
1991 	else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
1992 		hw_if->disable_rx_vlan_stripping(pdata);
1993 
1994 	if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
1995 		hw_if->enable_rx_vlan_filtering(pdata);
1996 	else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
1997 		hw_if->disable_rx_vlan_filtering(pdata);
1998 
1999 	pdata->netdev_features = features;
2000 
2001 	DBGPR("<--xgbe_set_features\n");
2002 
2003 	return 0;
2004 }
2005 
xgbe_features_check(struct sk_buff * skb,struct net_device * netdev,netdev_features_t features)2006 static netdev_features_t xgbe_features_check(struct sk_buff *skb,
2007 					     struct net_device *netdev,
2008 					     netdev_features_t features)
2009 {
2010 	features = vlan_features_check(skb, features);
2011 	features = vxlan_features_check(skb, features);
2012 
2013 	return features;
2014 }
2015 
2016 static const struct net_device_ops xgbe_netdev_ops = {
2017 	.ndo_open		= xgbe_open,
2018 	.ndo_stop		= xgbe_close,
2019 	.ndo_start_xmit		= xgbe_xmit,
2020 	.ndo_set_rx_mode	= xgbe_set_rx_mode,
2021 	.ndo_set_mac_address	= xgbe_set_mac_address,
2022 	.ndo_validate_addr	= eth_validate_addr,
2023 	.ndo_eth_ioctl		= xgbe_ioctl,
2024 	.ndo_change_mtu		= xgbe_change_mtu,
2025 	.ndo_tx_timeout		= xgbe_tx_timeout,
2026 	.ndo_get_stats64	= xgbe_get_stats64,
2027 	.ndo_vlan_rx_add_vid	= xgbe_vlan_rx_add_vid,
2028 	.ndo_vlan_rx_kill_vid	= xgbe_vlan_rx_kill_vid,
2029 #ifdef CONFIG_NET_POLL_CONTROLLER
2030 	.ndo_poll_controller	= xgbe_poll_controller,
2031 #endif
2032 	.ndo_setup_tc		= xgbe_setup_tc,
2033 	.ndo_fix_features	= xgbe_fix_features,
2034 	.ndo_set_features	= xgbe_set_features,
2035 	.ndo_features_check	= xgbe_features_check,
2036 };
2037 
xgbe_get_netdev_ops(void)2038 const struct net_device_ops *xgbe_get_netdev_ops(void)
2039 {
2040 	return &xgbe_netdev_ops;
2041 }
2042 
xgbe_rx_refresh(struct xgbe_channel * channel)2043 static void xgbe_rx_refresh(struct xgbe_channel *channel)
2044 {
2045 	struct xgbe_prv_data *pdata = channel->pdata;
2046 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
2047 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
2048 	struct xgbe_ring *ring = channel->rx_ring;
2049 	struct xgbe_ring_data *rdata;
2050 
2051 	while (ring->dirty != ring->cur) {
2052 		rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
2053 
2054 		/* Reset rdata values */
2055 		desc_if->unmap_rdata(pdata, rdata);
2056 
2057 		if (desc_if->map_rx_buffer(pdata, ring, rdata))
2058 			break;
2059 
2060 		hw_if->rx_desc_reset(pdata, rdata, ring->dirty);
2061 
2062 		ring->dirty++;
2063 	}
2064 
2065 	/* Make sure everything is written before the register write */
2066 	wmb();
2067 
2068 	/* Update the Rx Tail Pointer Register with address of
2069 	 * the last cleaned entry */
2070 	rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1);
2071 	XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
2072 			  lower_32_bits(rdata->rdesc_dma));
2073 }
2074 
xgbe_create_skb(struct xgbe_prv_data * pdata,struct napi_struct * napi,struct xgbe_ring_data * rdata,unsigned int len)2075 static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
2076 				       struct napi_struct *napi,
2077 				       struct xgbe_ring_data *rdata,
2078 				       unsigned int len)
2079 {
2080 	struct sk_buff *skb;
2081 	u8 *packet;
2082 
2083 	skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
2084 	if (!skb)
2085 		return NULL;
2086 
2087 	/* Pull in the header buffer which may contain just the header
2088 	 * or the header plus data
2089 	 */
2090 	dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
2091 				      rdata->rx.hdr.dma_off,
2092 				      rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
2093 
2094 	packet = page_address(rdata->rx.hdr.pa.pages) +
2095 		 rdata->rx.hdr.pa.pages_offset;
2096 	skb_copy_to_linear_data(skb, packet, len);
2097 	skb_put(skb, len);
2098 
2099 	return skb;
2100 }
2101 
xgbe_rx_buf1_len(struct xgbe_ring_data * rdata,struct xgbe_packet_data * packet)2102 static unsigned int xgbe_rx_buf1_len(struct xgbe_ring_data *rdata,
2103 				     struct xgbe_packet_data *packet)
2104 {
2105 	/* Always zero if not the first descriptor */
2106 	if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST))
2107 		return 0;
2108 
2109 	/* First descriptor with split header, return header length */
2110 	if (rdata->rx.hdr_len)
2111 		return rdata->rx.hdr_len;
2112 
2113 	/* First descriptor but not the last descriptor and no split header,
2114 	 * so the full buffer was used
2115 	 */
2116 	if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
2117 		return rdata->rx.hdr.dma_len;
2118 
2119 	/* First descriptor and last descriptor and no split header, so
2120 	 * calculate how much of the buffer was used
2121 	 */
2122 	return min_t(unsigned int, rdata->rx.hdr.dma_len, rdata->rx.len);
2123 }
2124 
xgbe_rx_buf2_len(struct xgbe_ring_data * rdata,struct xgbe_packet_data * packet,unsigned int len)2125 static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data *rdata,
2126 				     struct xgbe_packet_data *packet,
2127 				     unsigned int len)
2128 {
2129 	/* Always the full buffer if not the last descriptor */
2130 	if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
2131 		return rdata->rx.buf.dma_len;
2132 
2133 	/* Last descriptor so calculate how much of the buffer was used
2134 	 * for the last bit of data
2135 	 */
2136 	return rdata->rx.len - len;
2137 }
2138 
xgbe_tx_poll(struct xgbe_channel * channel)2139 static int xgbe_tx_poll(struct xgbe_channel *channel)
2140 {
2141 	struct xgbe_prv_data *pdata = channel->pdata;
2142 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
2143 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
2144 	struct xgbe_ring *ring = channel->tx_ring;
2145 	struct xgbe_ring_data *rdata;
2146 	struct xgbe_ring_desc *rdesc;
2147 	struct net_device *netdev = pdata->netdev;
2148 	struct netdev_queue *txq;
2149 	int processed = 0;
2150 	unsigned int tx_packets = 0, tx_bytes = 0;
2151 	unsigned int cur;
2152 
2153 	DBGPR("-->xgbe_tx_poll\n");
2154 
2155 	/* Nothing to do if there isn't a Tx ring for this channel */
2156 	if (!ring)
2157 		return 0;
2158 
2159 	cur = ring->cur;
2160 
2161 	/* Be sure we get ring->cur before accessing descriptor data */
2162 	smp_rmb();
2163 
2164 	txq = netdev_get_tx_queue(netdev, channel->queue_index);
2165 
2166 	while ((processed < XGBE_TX_DESC_MAX_PROC) &&
2167 	       (ring->dirty != cur)) {
2168 		rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
2169 		rdesc = rdata->rdesc;
2170 
2171 		if (!hw_if->tx_complete(rdesc))
2172 			break;
2173 
2174 		/* Make sure descriptor fields are read after reading the OWN
2175 		 * bit */
2176 		dma_rmb();
2177 
2178 		if (netif_msg_tx_done(pdata))
2179 			xgbe_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
2180 
2181 		if (hw_if->is_last_desc(rdesc)) {
2182 			tx_packets += rdata->tx.packets;
2183 			tx_bytes += rdata->tx.bytes;
2184 		}
2185 
2186 		/* Free the SKB and reset the descriptor for re-use */
2187 		desc_if->unmap_rdata(pdata, rdata);
2188 		hw_if->tx_desc_reset(rdata);
2189 
2190 		processed++;
2191 		ring->dirty++;
2192 	}
2193 
2194 	if (!processed)
2195 		return 0;
2196 
2197 	netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
2198 
2199 	if ((ring->tx.queue_stopped == 1) &&
2200 	    (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) {
2201 		ring->tx.queue_stopped = 0;
2202 		netif_tx_wake_queue(txq);
2203 	}
2204 
2205 	DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
2206 
2207 	return processed;
2208 }
2209 
xgbe_rx_poll(struct xgbe_channel * channel,int budget)2210 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
2211 {
2212 	struct xgbe_prv_data *pdata = channel->pdata;
2213 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
2214 	struct xgbe_ring *ring = channel->rx_ring;
2215 	struct xgbe_ring_data *rdata;
2216 	struct xgbe_packet_data *packet;
2217 	struct net_device *netdev = pdata->netdev;
2218 	struct napi_struct *napi;
2219 	struct sk_buff *skb;
2220 	struct skb_shared_hwtstamps *hwtstamps;
2221 	unsigned int last, error, context_next, context;
2222 	unsigned int len, buf1_len, buf2_len, max_len;
2223 	unsigned int received = 0;
2224 	int packet_count = 0;
2225 
2226 	DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
2227 
2228 	/* Nothing to do if there isn't a Rx ring for this channel */
2229 	if (!ring)
2230 		return 0;
2231 
2232 	last = 0;
2233 	context_next = 0;
2234 
2235 	napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
2236 
2237 	rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
2238 	packet = &ring->packet_data;
2239 	while (packet_count < budget) {
2240 		DBGPR("  cur = %d\n", ring->cur);
2241 
2242 		/* First time in loop see if we need to restore state */
2243 		if (!received && rdata->state_saved) {
2244 			skb = rdata->state.skb;
2245 			error = rdata->state.error;
2246 			len = rdata->state.len;
2247 		} else {
2248 			memset(packet, 0, sizeof(*packet));
2249 			skb = NULL;
2250 			error = 0;
2251 			len = 0;
2252 		}
2253 
2254 read_again:
2255 		rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
2256 
2257 		if (xgbe_rx_dirty_desc(ring) > (XGBE_RX_DESC_CNT >> 3))
2258 			xgbe_rx_refresh(channel);
2259 
2260 		if (hw_if->dev_read(channel))
2261 			break;
2262 
2263 		received++;
2264 		ring->cur++;
2265 
2266 		last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
2267 				      LAST);
2268 		context_next = XGMAC_GET_BITS(packet->attributes,
2269 					      RX_PACKET_ATTRIBUTES,
2270 					      CONTEXT_NEXT);
2271 		context = XGMAC_GET_BITS(packet->attributes,
2272 					 RX_PACKET_ATTRIBUTES,
2273 					 CONTEXT);
2274 
2275 		/* Earlier error, just drain the remaining data */
2276 		if ((!last || context_next) && error)
2277 			goto read_again;
2278 
2279 		if (error || packet->errors) {
2280 			if (packet->errors)
2281 				netif_err(pdata, rx_err, netdev,
2282 					  "error in received packet\n");
2283 			dev_kfree_skb(skb);
2284 			goto next_packet;
2285 		}
2286 
2287 		if (!context) {
2288 			/* Get the data length in the descriptor buffers */
2289 			buf1_len = xgbe_rx_buf1_len(rdata, packet);
2290 			len += buf1_len;
2291 			buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
2292 			len += buf2_len;
2293 
2294 			if (buf2_len > rdata->rx.buf.dma_len) {
2295 				/* Hardware inconsistency within the descriptors
2296 				 * that has resulted in a length underflow.
2297 				 */
2298 				error = 1;
2299 				goto skip_data;
2300 			}
2301 
2302 			if (!skb) {
2303 				skb = xgbe_create_skb(pdata, napi, rdata,
2304 						      buf1_len);
2305 				if (!skb) {
2306 					error = 1;
2307 					goto skip_data;
2308 				}
2309 			}
2310 
2311 			if (buf2_len) {
2312 				dma_sync_single_range_for_cpu(pdata->dev,
2313 							rdata->rx.buf.dma_base,
2314 							rdata->rx.buf.dma_off,
2315 							rdata->rx.buf.dma_len,
2316 							DMA_FROM_DEVICE);
2317 
2318 				skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
2319 						rdata->rx.buf.pa.pages,
2320 						rdata->rx.buf.pa.pages_offset,
2321 						buf2_len,
2322 						rdata->rx.buf.dma_len);
2323 				rdata->rx.buf.pa.pages = NULL;
2324 			}
2325 		}
2326 
2327 skip_data:
2328 		if (!last || context_next)
2329 			goto read_again;
2330 
2331 		if (!skb || error) {
2332 			dev_kfree_skb(skb);
2333 			goto next_packet;
2334 		}
2335 
2336 		/* Be sure we don't exceed the configured MTU */
2337 		max_len = netdev->mtu + ETH_HLEN;
2338 		if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
2339 		    (skb->protocol == htons(ETH_P_8021Q)))
2340 			max_len += VLAN_HLEN;
2341 
2342 		if (skb->len > max_len) {
2343 			netif_err(pdata, rx_err, netdev,
2344 				  "packet length exceeds configured MTU\n");
2345 			dev_kfree_skb(skb);
2346 			goto next_packet;
2347 		}
2348 
2349 		if (netif_msg_pktdata(pdata))
2350 			xgbe_print_pkt(netdev, skb, false);
2351 
2352 		skb_checksum_none_assert(skb);
2353 		if (XGMAC_GET_BITS(packet->attributes,
2354 				   RX_PACKET_ATTRIBUTES, CSUM_DONE))
2355 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2356 
2357 		if (XGMAC_GET_BITS(packet->attributes,
2358 				   RX_PACKET_ATTRIBUTES, TNP)) {
2359 			skb->encapsulation = 1;
2360 
2361 			if (XGMAC_GET_BITS(packet->attributes,
2362 					   RX_PACKET_ATTRIBUTES, TNPCSUM_DONE))
2363 				skb->csum_level = 1;
2364 		}
2365 
2366 		if (XGMAC_GET_BITS(packet->attributes,
2367 				   RX_PACKET_ATTRIBUTES, VLAN_CTAG))
2368 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2369 					       packet->vlan_ctag);
2370 
2371 		if (XGMAC_GET_BITS(packet->attributes,
2372 				   RX_PACKET_ATTRIBUTES, RX_TSTAMP)) {
2373 			hwtstamps = skb_hwtstamps(skb);
2374 			hwtstamps->hwtstamp = ns_to_ktime(packet->rx_tstamp);
2375 		}
2376 
2377 		if (XGMAC_GET_BITS(packet->attributes,
2378 				   RX_PACKET_ATTRIBUTES, RSS_HASH))
2379 			skb_set_hash(skb, packet->rss_hash,
2380 				     packet->rss_hash_type);
2381 
2382 		skb->dev = netdev;
2383 		skb->protocol = eth_type_trans(skb, netdev);
2384 		skb_record_rx_queue(skb, channel->queue_index);
2385 
2386 		napi_gro_receive(napi, skb);
2387 
2388 next_packet:
2389 		packet_count++;
2390 	}
2391 
2392 	/* Check if we need to save state before leaving */
2393 	if (received && (!last || context_next)) {
2394 		rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
2395 		rdata->state_saved = 1;
2396 		rdata->state.skb = skb;
2397 		rdata->state.len = len;
2398 		rdata->state.error = error;
2399 	}
2400 
2401 	DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count);
2402 
2403 	return packet_count;
2404 }
2405 
xgbe_one_poll(struct napi_struct * napi,int budget)2406 static int xgbe_one_poll(struct napi_struct *napi, int budget)
2407 {
2408 	struct xgbe_channel *channel = container_of(napi, struct xgbe_channel,
2409 						    napi);
2410 	struct xgbe_prv_data *pdata = channel->pdata;
2411 	int processed = 0;
2412 
2413 	DBGPR("-->xgbe_one_poll: budget=%d\n", budget);
2414 
2415 	/* Cleanup Tx ring first */
2416 	xgbe_tx_poll(channel);
2417 
2418 	/* Process Rx ring next */
2419 	processed = xgbe_rx_poll(channel, budget);
2420 
2421 	/* If we processed everything, we are done */
2422 	if ((processed < budget) && napi_complete_done(napi, processed)) {
2423 		/* Enable Tx and Rx interrupts */
2424 		if (pdata->channel_irq_mode)
2425 			xgbe_enable_rx_tx_int(pdata, channel);
2426 		else
2427 			enable_irq(channel->dma_irq);
2428 	}
2429 
2430 	DBGPR("<--xgbe_one_poll: received = %d\n", processed);
2431 
2432 	return processed;
2433 }
2434 
xgbe_all_poll(struct napi_struct * napi,int budget)2435 static int xgbe_all_poll(struct napi_struct *napi, int budget)
2436 {
2437 	struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
2438 						   napi);
2439 	struct xgbe_channel *channel;
2440 	int ring_budget;
2441 	int processed, last_processed;
2442 	unsigned int i;
2443 
2444 	DBGPR("-->xgbe_all_poll: budget=%d\n", budget);
2445 
2446 	processed = 0;
2447 	ring_budget = budget / pdata->rx_ring_count;
2448 	do {
2449 		last_processed = processed;
2450 
2451 		for (i = 0; i < pdata->channel_count; i++) {
2452 			channel = pdata->channel[i];
2453 
2454 			/* Cleanup Tx ring first */
2455 			xgbe_tx_poll(channel);
2456 
2457 			/* Process Rx ring next */
2458 			if (ring_budget > (budget - processed))
2459 				ring_budget = budget - processed;
2460 			processed += xgbe_rx_poll(channel, ring_budget);
2461 		}
2462 	} while ((processed < budget) && (processed != last_processed));
2463 
2464 	/* If we processed everything, we are done */
2465 	if ((processed < budget) && napi_complete_done(napi, processed)) {
2466 		/* Enable Tx and Rx interrupts */
2467 		xgbe_enable_rx_tx_ints(pdata);
2468 	}
2469 
2470 	DBGPR("<--xgbe_all_poll: received = %d\n", processed);
2471 
2472 	return processed;
2473 }
2474 
xgbe_dump_tx_desc(struct xgbe_prv_data * pdata,struct xgbe_ring * ring,unsigned int idx,unsigned int count,unsigned int flag)2475 void xgbe_dump_tx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
2476 		       unsigned int idx, unsigned int count, unsigned int flag)
2477 {
2478 	struct xgbe_ring_data *rdata;
2479 	struct xgbe_ring_desc *rdesc;
2480 
2481 	while (count--) {
2482 		rdata = XGBE_GET_DESC_DATA(ring, idx);
2483 		rdesc = rdata->rdesc;
2484 		netdev_dbg(pdata->netdev,
2485 			   "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
2486 			   (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
2487 			   le32_to_cpu(rdesc->desc0),
2488 			   le32_to_cpu(rdesc->desc1),
2489 			   le32_to_cpu(rdesc->desc2),
2490 			   le32_to_cpu(rdesc->desc3));
2491 		idx++;
2492 	}
2493 }
2494 
xgbe_dump_rx_desc(struct xgbe_prv_data * pdata,struct xgbe_ring * ring,unsigned int idx)2495 void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
2496 		       unsigned int idx)
2497 {
2498 	struct xgbe_ring_data *rdata;
2499 	struct xgbe_ring_desc *rdesc;
2500 
2501 	rdata = XGBE_GET_DESC_DATA(ring, idx);
2502 	rdesc = rdata->rdesc;
2503 	netdev_dbg(pdata->netdev,
2504 		   "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n",
2505 		   idx, le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
2506 		   le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
2507 }
2508 
xgbe_print_pkt(struct net_device * netdev,struct sk_buff * skb,bool tx_rx)2509 void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
2510 {
2511 	struct ethhdr *eth = (struct ethhdr *)skb->data;
2512 	unsigned char buffer[128];
2513 	unsigned int i;
2514 
2515 	netdev_dbg(netdev, "\n************** SKB dump ****************\n");
2516 
2517 	netdev_dbg(netdev, "%s packet of %d bytes\n",
2518 		   (tx_rx ? "TX" : "RX"), skb->len);
2519 
2520 	netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
2521 	netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
2522 	netdev_dbg(netdev, "Protocol: %#06x\n", ntohs(eth->h_proto));
2523 
2524 	for (i = 0; i < skb->len; i += 32) {
2525 		unsigned int len = min(skb->len - i, 32U);
2526 
2527 		hex_dump_to_buffer(&skb->data[i], len, 32, 1,
2528 				   buffer, sizeof(buffer), false);
2529 		netdev_dbg(netdev, "  %#06x: %s\n", i, buffer);
2530 	}
2531 
2532 	netdev_dbg(netdev, "\n************** SKB dump ****************\n");
2533 }
2534