xref: /linux/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c (revision ee8287e068a3995b0f8001dd6931e221dfb7c530)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Driver for Marvell PPv2 network controller for Armada 375 SoC.
4  *
5  * Copyright (C) 2014 Marvell
6  *
7  * Marcin Wojtas <mw@semihalf.com>
8  */
9 
10 #include <linux/acpi.h>
11 #include <linux/kernel.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/platform_device.h>
15 #include <linux/skbuff.h>
16 #include <linux/inetdevice.h>
17 #include <linux/mbus.h>
18 #include <linux/module.h>
19 #include <linux/mfd/syscon.h>
20 #include <linux/interrupt.h>
21 #include <linux/cpumask.h>
22 #include <linux/of.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_mdio.h>
25 #include <linux/of_net.h>
26 #include <linux/of_address.h>
27 #include <linux/phy.h>
28 #include <linux/phylink.h>
29 #include <linux/phy/phy.h>
30 #include <linux/ptp_classify.h>
31 #include <linux/clk.h>
32 #include <linux/hrtimer.h>
33 #include <linux/ktime.h>
34 #include <linux/regmap.h>
35 #include <uapi/linux/ppp_defs.h>
36 #include <net/ip.h>
37 #include <net/ipv6.h>
38 #include <net/page_pool/helpers.h>
39 #include <net/tso.h>
40 #include <linux/bpf_trace.h>
41 
42 #include "mvpp2.h"
43 #include "mvpp2_prs.h"
44 #include "mvpp2_cls.h"
45 
46 enum mvpp2_bm_pool_log_num {
47 	MVPP2_BM_SHORT,
48 	MVPP2_BM_LONG,
49 	MVPP2_BM_JUMBO,
50 	MVPP2_BM_POOLS_NUM
51 };
52 
53 static struct {
54 	int pkt_size;
55 	int buf_num;
56 } mvpp2_pools[MVPP2_BM_POOLS_NUM];
57 
58 /* The prototype is added here to be used in start_dev when using ACPI. This
59  * will be removed once phylink is used for all modes (dt+ACPI).
60  */
61 static void mvpp2_acpi_start(struct mvpp2_port *port);
62 
63 /* Queue modes */
64 #define MVPP2_QDIST_SINGLE_MODE	0
65 #define MVPP2_QDIST_MULTI_MODE	1
66 
67 static int queue_mode = MVPP2_QDIST_MULTI_MODE;
68 
69 module_param(queue_mode, int, 0444);
70 MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
71 
72 /* Utility/helper methods */
73 
74 void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
75 {
76 	writel(data, priv->swth_base[0] + offset);
77 }
78 
79 u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
80 {
81 	return readl(priv->swth_base[0] + offset);
82 }
83 
84 static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
85 {
86 	return readl_relaxed(priv->swth_base[0] + offset);
87 }
88 
89 static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu)
90 {
91 	return cpu % priv->nthreads;
92 }
93 
94 static void mvpp2_cm3_write(struct mvpp2 *priv, u32 offset, u32 data)
95 {
96 	writel(data, priv->cm3_base + offset);
97 }
98 
99 static u32 mvpp2_cm3_read(struct mvpp2 *priv, u32 offset)
100 {
101 	return readl(priv->cm3_base + offset);
102 }
103 
104 static struct page_pool *
105 mvpp2_create_page_pool(struct device *dev, int num, int len,
106 		       enum dma_data_direction dma_dir)
107 {
108 	struct page_pool_params pp_params = {
109 		/* internal DMA mapping in page_pool */
110 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
111 		.pool_size = num,
112 		.nid = NUMA_NO_NODE,
113 		.dev = dev,
114 		.dma_dir = dma_dir,
115 		.offset = MVPP2_SKB_HEADROOM,
116 		.max_len = len,
117 	};
118 
119 	return page_pool_create(&pp_params);
120 }
121 
122 /* These accessors should be used to access:
123  *
124  * - per-thread registers, where each thread has its own copy of the
125  *   register.
126  *
127  *   MVPP2_BM_VIRT_ALLOC_REG
128  *   MVPP2_BM_ADDR_HIGH_ALLOC
129  *   MVPP22_BM_ADDR_HIGH_RLS_REG
130  *   MVPP2_BM_VIRT_RLS_REG
131  *   MVPP2_ISR_RX_TX_CAUSE_REG
132  *   MVPP2_ISR_RX_TX_MASK_REG
133  *   MVPP2_TXQ_NUM_REG
134  *   MVPP2_AGGR_TXQ_UPDATE_REG
135  *   MVPP2_TXQ_RSVD_REQ_REG
136  *   MVPP2_TXQ_RSVD_RSLT_REG
137  *   MVPP2_TXQ_SENT_REG
138  *   MVPP2_RXQ_NUM_REG
139  *
140  * - global registers that must be accessed through a specific thread
141  *   window, because they are related to an access to a per-thread
142  *   register
143  *
144  *   MVPP2_BM_PHY_ALLOC_REG    (related to MVPP2_BM_VIRT_ALLOC_REG)
145  *   MVPP2_BM_PHY_RLS_REG      (related to MVPP2_BM_VIRT_RLS_REG)
146  *   MVPP2_RXQ_THRESH_REG      (related to MVPP2_RXQ_NUM_REG)
147  *   MVPP2_RXQ_DESC_ADDR_REG   (related to MVPP2_RXQ_NUM_REG)
148  *   MVPP2_RXQ_DESC_SIZE_REG   (related to MVPP2_RXQ_NUM_REG)
149  *   MVPP2_RXQ_INDEX_REG       (related to MVPP2_RXQ_NUM_REG)
150  *   MVPP2_TXQ_PENDING_REG     (related to MVPP2_TXQ_NUM_REG)
151  *   MVPP2_TXQ_DESC_ADDR_REG   (related to MVPP2_TXQ_NUM_REG)
152  *   MVPP2_TXQ_DESC_SIZE_REG   (related to MVPP2_TXQ_NUM_REG)
153  *   MVPP2_TXQ_INDEX_REG       (related to MVPP2_TXQ_NUM_REG)
154  *   MVPP2_TXQ_PENDING_REG     (related to MVPP2_TXQ_NUM_REG)
155  *   MVPP2_TXQ_PREF_BUF_REG    (related to MVPP2_TXQ_NUM_REG)
156  *   MVPP2_TXQ_PREF_BUF_REG    (related to MVPP2_TXQ_NUM_REG)
157  */
158 static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread,
159 			       u32 offset, u32 data)
160 {
161 	writel(data, priv->swth_base[thread] + offset);
162 }
163 
164 static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread,
165 			     u32 offset)
166 {
167 	return readl(priv->swth_base[thread] + offset);
168 }
169 
170 static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread,
171 				       u32 offset, u32 data)
172 {
173 	writel_relaxed(data, priv->swth_base[thread] + offset);
174 }
175 
176 static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread,
177 				     u32 offset)
178 {
179 	return readl_relaxed(priv->swth_base[thread] + offset);
180 }
181 
182 static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
183 					    struct mvpp2_tx_desc *tx_desc)
184 {
185 	if (port->priv->hw_version == MVPP21)
186 		return le32_to_cpu(tx_desc->pp21.buf_dma_addr);
187 	else
188 		return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) &
189 		       MVPP2_DESC_DMA_MASK;
190 }
191 
192 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
193 				      struct mvpp2_tx_desc *tx_desc,
194 				      dma_addr_t dma_addr)
195 {
196 	dma_addr_t addr, offset;
197 
198 	addr = dma_addr & ~MVPP2_TX_DESC_ALIGN;
199 	offset = dma_addr & MVPP2_TX_DESC_ALIGN;
200 
201 	if (port->priv->hw_version == MVPP21) {
202 		tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr);
203 		tx_desc->pp21.packet_offset = offset;
204 	} else {
205 		__le64 val = cpu_to_le64(addr);
206 
207 		tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK);
208 		tx_desc->pp22.buf_dma_addr_ptp |= val;
209 		tx_desc->pp22.packet_offset = offset;
210 	}
211 }
212 
213 static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
214 				    struct mvpp2_tx_desc *tx_desc)
215 {
216 	if (port->priv->hw_version == MVPP21)
217 		return le16_to_cpu(tx_desc->pp21.data_size);
218 	else
219 		return le16_to_cpu(tx_desc->pp22.data_size);
220 }
221 
222 static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
223 				  struct mvpp2_tx_desc *tx_desc,
224 				  size_t size)
225 {
226 	if (port->priv->hw_version == MVPP21)
227 		tx_desc->pp21.data_size = cpu_to_le16(size);
228 	else
229 		tx_desc->pp22.data_size = cpu_to_le16(size);
230 }
231 
232 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
233 				 struct mvpp2_tx_desc *tx_desc,
234 				 unsigned int txq)
235 {
236 	if (port->priv->hw_version == MVPP21)
237 		tx_desc->pp21.phys_txq = txq;
238 	else
239 		tx_desc->pp22.phys_txq = txq;
240 }
241 
242 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
243 				 struct mvpp2_tx_desc *tx_desc,
244 				 unsigned int command)
245 {
246 	if (port->priv->hw_version == MVPP21)
247 		tx_desc->pp21.command = cpu_to_le32(command);
248 	else
249 		tx_desc->pp22.command = cpu_to_le32(command);
250 }
251 
252 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
253 					    struct mvpp2_tx_desc *tx_desc)
254 {
255 	if (port->priv->hw_version == MVPP21)
256 		return tx_desc->pp21.packet_offset;
257 	else
258 		return tx_desc->pp22.packet_offset;
259 }
260 
261 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
262 					    struct mvpp2_rx_desc *rx_desc)
263 {
264 	if (port->priv->hw_version == MVPP21)
265 		return le32_to_cpu(rx_desc->pp21.buf_dma_addr);
266 	else
267 		return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) &
268 		       MVPP2_DESC_DMA_MASK;
269 }
270 
271 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
272 					     struct mvpp2_rx_desc *rx_desc)
273 {
274 	if (port->priv->hw_version == MVPP21)
275 		return le32_to_cpu(rx_desc->pp21.buf_cookie);
276 	else
277 		return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) &
278 		       MVPP2_DESC_DMA_MASK;
279 }
280 
281 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
282 				    struct mvpp2_rx_desc *rx_desc)
283 {
284 	if (port->priv->hw_version == MVPP21)
285 		return le16_to_cpu(rx_desc->pp21.data_size);
286 	else
287 		return le16_to_cpu(rx_desc->pp22.data_size);
288 }
289 
290 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
291 				   struct mvpp2_rx_desc *rx_desc)
292 {
293 	if (port->priv->hw_version == MVPP21)
294 		return le32_to_cpu(rx_desc->pp21.status);
295 	else
296 		return le32_to_cpu(rx_desc->pp22.status);
297 }
298 
299 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
300 {
301 	txq_pcpu->txq_get_index++;
302 	if (txq_pcpu->txq_get_index == txq_pcpu->size)
303 		txq_pcpu->txq_get_index = 0;
304 }
305 
306 static void mvpp2_txq_inc_put(struct mvpp2_port *port,
307 			      struct mvpp2_txq_pcpu *txq_pcpu,
308 			      void *data,
309 			      struct mvpp2_tx_desc *tx_desc,
310 			      enum mvpp2_tx_buf_type buf_type)
311 {
312 	struct mvpp2_txq_pcpu_buf *tx_buf =
313 		txq_pcpu->buffs + txq_pcpu->txq_put_index;
314 	tx_buf->type = buf_type;
315 	if (buf_type == MVPP2_TYPE_SKB)
316 		tx_buf->skb = data;
317 	else
318 		tx_buf->xdpf = data;
319 	tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
320 	tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
321 		mvpp2_txdesc_offset_get(port, tx_desc);
322 	txq_pcpu->txq_put_index++;
323 	if (txq_pcpu->txq_put_index == txq_pcpu->size)
324 		txq_pcpu->txq_put_index = 0;
325 }
326 
327 /* Get number of maximum RXQ */
328 static int mvpp2_get_nrxqs(struct mvpp2 *priv)
329 {
330 	unsigned int nrxqs;
331 
332 	if (priv->hw_version >= MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE)
333 		return 1;
334 
335 	/* According to the PPv2.2 datasheet and our experiments on
336 	 * PPv2.1, RX queues have an allocation granularity of 4 (when
337 	 * more than a single one on PPv2.2).
338 	 * Round up to nearest multiple of 4.
339 	 */
340 	nrxqs = (num_possible_cpus() + 3) & ~0x3;
341 	if (nrxqs > MVPP2_PORT_MAX_RXQ)
342 		nrxqs = MVPP2_PORT_MAX_RXQ;
343 
344 	return nrxqs;
345 }
346 
347 /* Get number of physical egress port */
348 static inline int mvpp2_egress_port(struct mvpp2_port *port)
349 {
350 	return MVPP2_MAX_TCONT + port->id;
351 }
352 
353 /* Get number of physical TXQ */
354 static inline int mvpp2_txq_phys(int port, int txq)
355 {
356 	return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
357 }
358 
359 /* Returns a struct page if page_pool is set, otherwise a buffer */
360 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool,
361 			      struct page_pool *page_pool)
362 {
363 	if (page_pool)
364 		return page_pool_dev_alloc_pages(page_pool);
365 
366 	if (likely(pool->frag_size <= PAGE_SIZE))
367 		return netdev_alloc_frag(pool->frag_size);
368 
369 	return kmalloc(pool->frag_size, GFP_ATOMIC);
370 }
371 
372 static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool,
373 			    struct page_pool *page_pool, void *data)
374 {
375 	if (page_pool)
376 		page_pool_put_full_page(page_pool, virt_to_head_page(data), false);
377 	else if (likely(pool->frag_size <= PAGE_SIZE))
378 		skb_free_frag(data);
379 	else
380 		kfree(data);
381 }
382 
383 /* Buffer Manager configuration routines */
384 
385 /* Create pool */
386 static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
387 				struct mvpp2_bm_pool *bm_pool, int size)
388 {
389 	u32 val;
390 
391 	/* Number of buffer pointers must be a multiple of 16, as per
392 	 * hardware constraints
393 	 */
394 	if (!IS_ALIGNED(size, 16))
395 		return -EINVAL;
396 
397 	/* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 and PPv2.3 needs 16
398 	 * bytes per buffer pointer
399 	 */
400 	if (priv->hw_version == MVPP21)
401 		bm_pool->size_bytes = 2 * sizeof(u32) * size;
402 	else
403 		bm_pool->size_bytes = 2 * sizeof(u64) * size;
404 
405 	bm_pool->virt_addr = dma_alloc_coherent(dev, bm_pool->size_bytes,
406 						&bm_pool->dma_addr,
407 						GFP_KERNEL);
408 	if (!bm_pool->virt_addr)
409 		return -ENOMEM;
410 
411 	if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
412 			MVPP2_BM_POOL_PTR_ALIGN)) {
413 		dma_free_coherent(dev, bm_pool->size_bytes,
414 				  bm_pool->virt_addr, bm_pool->dma_addr);
415 		dev_err(dev, "BM pool %d is not %d bytes aligned\n",
416 			bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
417 		return -ENOMEM;
418 	}
419 
420 	mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
421 		    lower_32_bits(bm_pool->dma_addr));
422 	mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
423 
424 	val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
425 	val |= MVPP2_BM_START_MASK;
426 
427 	val &= ~MVPP2_BM_LOW_THRESH_MASK;
428 	val &= ~MVPP2_BM_HIGH_THRESH_MASK;
429 
430 	/* Set 8 Pools BPPI threshold for MVPP23 */
431 	if (priv->hw_version == MVPP23) {
432 		val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP23_BM_BPPI_LOW_THRESH);
433 		val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP23_BM_BPPI_HIGH_THRESH);
434 	} else {
435 		val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP2_BM_BPPI_LOW_THRESH);
436 		val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP2_BM_BPPI_HIGH_THRESH);
437 	}
438 
439 	mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
440 
441 	bm_pool->size = size;
442 	bm_pool->pkt_size = 0;
443 	bm_pool->buf_num = 0;
444 
445 	return 0;
446 }
447 
448 /* Set pool buffer size */
449 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
450 				      struct mvpp2_bm_pool *bm_pool,
451 				      int buf_size)
452 {
453 	u32 val;
454 
455 	bm_pool->buf_size = buf_size;
456 
457 	val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
458 	mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
459 }
460 
461 static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
462 				    struct mvpp2_bm_pool *bm_pool,
463 				    dma_addr_t *dma_addr,
464 				    phys_addr_t *phys_addr)
465 {
466 	unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
467 
468 	*dma_addr = mvpp2_thread_read(priv, thread,
469 				      MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
470 	*phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG);
471 
472 	if (priv->hw_version >= MVPP22) {
473 		u32 val;
474 		u32 dma_addr_highbits, phys_addr_highbits;
475 
476 		val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC);
477 		dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
478 		phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
479 			MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
480 
481 		if (sizeof(dma_addr_t) == 8)
482 			*dma_addr |= (u64)dma_addr_highbits << 32;
483 
484 		if (sizeof(phys_addr_t) == 8)
485 			*phys_addr |= (u64)phys_addr_highbits << 32;
486 	}
487 
488 	put_cpu();
489 }
490 
491 /* Free all buffers from the pool */
492 static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
493 			       struct mvpp2_bm_pool *bm_pool, int buf_num)
494 {
495 	struct page_pool *pp = NULL;
496 	int i;
497 
498 	if (buf_num > bm_pool->buf_num) {
499 		WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n",
500 		     bm_pool->id, buf_num);
501 		buf_num = bm_pool->buf_num;
502 	}
503 
504 	if (priv->percpu_pools)
505 		pp = priv->page_pool[bm_pool->id];
506 
507 	for (i = 0; i < buf_num; i++) {
508 		dma_addr_t buf_dma_addr;
509 		phys_addr_t buf_phys_addr;
510 		void *data;
511 
512 		mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
513 					&buf_dma_addr, &buf_phys_addr);
514 
515 		if (!pp)
516 			dma_unmap_single(dev, buf_dma_addr,
517 					 bm_pool->buf_size, DMA_FROM_DEVICE);
518 
519 		data = (void *)phys_to_virt(buf_phys_addr);
520 		if (!data)
521 			break;
522 
523 		mvpp2_frag_free(bm_pool, pp, data);
524 	}
525 
526 	/* Update BM driver with number of buffers removed from pool */
527 	bm_pool->buf_num -= i;
528 }
529 
530 /* Check number of buffers in BM pool */
531 static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
532 {
533 	int buf_num = 0;
534 
535 	buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) &
536 				    MVPP22_BM_POOL_PTRS_NUM_MASK;
537 	buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) &
538 				    MVPP2_BM_BPPI_PTR_NUM_MASK;
539 
540 	/* HW has one buffer ready which is not reflected in the counters */
541 	if (buf_num)
542 		buf_num += 1;
543 
544 	return buf_num;
545 }
546 
547 /* Cleanup pool */
548 static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv,
549 				 struct mvpp2_bm_pool *bm_pool)
550 {
551 	int buf_num;
552 	u32 val;
553 
554 	buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
555 	mvpp2_bm_bufs_free(dev, priv, bm_pool, buf_num);
556 
557 	/* Check buffer counters after free */
558 	buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
559 	if (buf_num) {
560 		WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n",
561 		     bm_pool->id, bm_pool->buf_num);
562 		return 0;
563 	}
564 
565 	val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
566 	val |= MVPP2_BM_STOP_MASK;
567 	mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
568 
569 	if (priv->percpu_pools) {
570 		page_pool_destroy(priv->page_pool[bm_pool->id]);
571 		priv->page_pool[bm_pool->id] = NULL;
572 	}
573 
574 	dma_free_coherent(dev, bm_pool->size_bytes,
575 			  bm_pool->virt_addr,
576 			  bm_pool->dma_addr);
577 	return 0;
578 }
579 
580 static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv)
581 {
582 	int i, err, size, poolnum = MVPP2_BM_POOLS_NUM;
583 	struct mvpp2_bm_pool *bm_pool;
584 
585 	if (priv->percpu_pools)
586 		poolnum = mvpp2_get_nrxqs(priv) * 2;
587 
588 	/* Create all pools with maximum size */
589 	size = MVPP2_BM_POOL_SIZE_MAX;
590 	for (i = 0; i < poolnum; i++) {
591 		bm_pool = &priv->bm_pools[i];
592 		bm_pool->id = i;
593 		err = mvpp2_bm_pool_create(dev, priv, bm_pool, size);
594 		if (err)
595 			goto err_unroll_pools;
596 		mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
597 	}
598 	return 0;
599 
600 err_unroll_pools:
601 	dev_err(dev, "failed to create BM pool %d, size %d\n", i, size);
602 	for (i = i - 1; i >= 0; i--)
603 		mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
604 	return err;
605 }
606 
607 /* Routine enable PPv23 8 pool mode */
608 static void mvpp23_bm_set_8pool_mode(struct mvpp2 *priv)
609 {
610 	int val;
611 
612 	val = mvpp2_read(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG);
613 	val |= MVPP23_BM_8POOL_MODE;
614 	mvpp2_write(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG, val);
615 }
616 
617 /* Cleanup pool before actual initialization in the OS */
618 static void mvpp2_bm_pool_cleanup(struct mvpp2 *priv, int pool_id)
619 {
620 	unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
621 	u32 val;
622 	int i;
623 
624 	/* Drain the BM from all possible residues left by firmware */
625 	for (i = 0; i < MVPP2_BM_POOL_SIZE_MAX; i++)
626 		mvpp2_thread_read(priv, thread, MVPP2_BM_PHY_ALLOC_REG(pool_id));
627 
628 	put_cpu();
629 
630 	/* Stop the BM pool */
631 	val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(pool_id));
632 	val |= MVPP2_BM_STOP_MASK;
633 	mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(pool_id), val);
634 }
635 
636 static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
637 {
638 	enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
639 	int i, err, poolnum = MVPP2_BM_POOLS_NUM;
640 	struct mvpp2_port *port;
641 
642 	if (priv->percpu_pools)
643 		poolnum = mvpp2_get_nrxqs(priv) * 2;
644 
645 	/* Clean up the pool state in case it contains stale state */
646 	for (i = 0; i < poolnum; i++)
647 		mvpp2_bm_pool_cleanup(priv, i);
648 
649 	if (priv->percpu_pools) {
650 		for (i = 0; i < priv->port_count; i++) {
651 			port = priv->port_list[i];
652 			if (port->xdp_prog) {
653 				dma_dir = DMA_BIDIRECTIONAL;
654 				break;
655 			}
656 		}
657 
658 		for (i = 0; i < poolnum; i++) {
659 			/* the pool in use */
660 			int pn = i / (poolnum / 2);
661 
662 			priv->page_pool[i] =
663 				mvpp2_create_page_pool(dev,
664 						       mvpp2_pools[pn].buf_num,
665 						       mvpp2_pools[pn].pkt_size,
666 						       dma_dir);
667 			if (IS_ERR(priv->page_pool[i])) {
668 				int j;
669 
670 				for (j = 0; j < i; j++) {
671 					page_pool_destroy(priv->page_pool[j]);
672 					priv->page_pool[j] = NULL;
673 				}
674 				return PTR_ERR(priv->page_pool[i]);
675 			}
676 		}
677 	}
678 
679 	dev_info(dev, "using %d %s buffers\n", poolnum,
680 		 priv->percpu_pools ? "per-cpu" : "shared");
681 
682 	for (i = 0; i < poolnum; i++) {
683 		/* Mask BM all interrupts */
684 		mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
685 		/* Clear BM cause register */
686 		mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
687 	}
688 
689 	/* Allocate and initialize BM pools */
690 	priv->bm_pools = devm_kcalloc(dev, poolnum,
691 				      sizeof(*priv->bm_pools), GFP_KERNEL);
692 	if (!priv->bm_pools)
693 		return -ENOMEM;
694 
695 	if (priv->hw_version == MVPP23)
696 		mvpp23_bm_set_8pool_mode(priv);
697 
698 	err = mvpp2_bm_pools_init(dev, priv);
699 	if (err < 0)
700 		return err;
701 	return 0;
702 }
703 
704 static void mvpp2_setup_bm_pool(void)
705 {
706 	/* Short pool */
707 	mvpp2_pools[MVPP2_BM_SHORT].buf_num  = MVPP2_BM_SHORT_BUF_NUM;
708 	mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE;
709 
710 	/* Long pool */
711 	mvpp2_pools[MVPP2_BM_LONG].buf_num  = MVPP2_BM_LONG_BUF_NUM;
712 	mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE;
713 
714 	/* Jumbo pool */
715 	mvpp2_pools[MVPP2_BM_JUMBO].buf_num  = MVPP2_BM_JUMBO_BUF_NUM;
716 	mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE;
717 }
718 
719 /* Attach long pool to rxq */
720 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
721 				    int lrxq, int long_pool)
722 {
723 	u32 val, mask;
724 	int prxq;
725 
726 	/* Get queue physical ID */
727 	prxq = port->rxqs[lrxq]->id;
728 
729 	if (port->priv->hw_version == MVPP21)
730 		mask = MVPP21_RXQ_POOL_LONG_MASK;
731 	else
732 		mask = MVPP22_RXQ_POOL_LONG_MASK;
733 
734 	val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
735 	val &= ~mask;
736 	val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
737 	mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
738 }
739 
740 /* Attach short pool to rxq */
741 static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
742 				     int lrxq, int short_pool)
743 {
744 	u32 val, mask;
745 	int prxq;
746 
747 	/* Get queue physical ID */
748 	prxq = port->rxqs[lrxq]->id;
749 
750 	if (port->priv->hw_version == MVPP21)
751 		mask = MVPP21_RXQ_POOL_SHORT_MASK;
752 	else
753 		mask = MVPP22_RXQ_POOL_SHORT_MASK;
754 
755 	val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
756 	val &= ~mask;
757 	val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
758 	mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
759 }
760 
761 static void *mvpp2_buf_alloc(struct mvpp2_port *port,
762 			     struct mvpp2_bm_pool *bm_pool,
763 			     struct page_pool *page_pool,
764 			     dma_addr_t *buf_dma_addr,
765 			     phys_addr_t *buf_phys_addr,
766 			     gfp_t gfp_mask)
767 {
768 	dma_addr_t dma_addr;
769 	struct page *page;
770 	void *data;
771 
772 	data = mvpp2_frag_alloc(bm_pool, page_pool);
773 	if (!data)
774 		return NULL;
775 
776 	if (page_pool) {
777 		page = (struct page *)data;
778 		dma_addr = page_pool_get_dma_addr(page);
779 		data = page_to_virt(page);
780 	} else {
781 		dma_addr = dma_map_single(port->dev->dev.parent, data,
782 					  MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
783 					  DMA_FROM_DEVICE);
784 		if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
785 			mvpp2_frag_free(bm_pool, NULL, data);
786 			return NULL;
787 		}
788 	}
789 	*buf_dma_addr = dma_addr;
790 	*buf_phys_addr = virt_to_phys(data);
791 
792 	return data;
793 }
794 
795 /* Routine enable flow control for RXQs condition */
796 static void mvpp2_rxq_enable_fc(struct mvpp2_port *port)
797 {
798 	int val, cm3_state, host_id, q;
799 	int fq = port->first_rxq;
800 	unsigned long flags;
801 
802 	spin_lock_irqsave(&port->priv->mss_spinlock, flags);
803 
804 	/* Remove Flow control enable bit to prevent race between FW and Kernel
805 	 * If Flow control was enabled, it would be re-enabled.
806 	 */
807 	val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
808 	cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
809 	val &= ~FLOW_CONTROL_ENABLE_BIT;
810 	mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
811 
812 	/* Set same Flow control for all RXQs */
813 	for (q = 0; q < port->nrxqs; q++) {
814 		/* Set stop and start Flow control RXQ thresholds */
815 		val = MSS_THRESHOLD_START;
816 		val |= (MSS_THRESHOLD_STOP << MSS_RXQ_TRESH_STOP_OFFS);
817 		mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val);
818 
819 		val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq));
820 		/* Set RXQ port ID */
821 		val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq));
822 		val |= (port->id << MSS_RXQ_ASS_Q_BASE(q, fq));
823 		val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq)
824 			+ MSS_RXQ_ASS_HOSTID_OFFS));
825 
826 		/* Calculate RXQ host ID:
827 		 * In Single queue mode: Host ID equal to Host ID used for
828 		 *			 shared RX interrupt
829 		 * In Multi queue mode: Host ID equal to number of
830 		 *			RXQ ID / number of CoS queues
831 		 * In Single resource mode: Host ID always equal to 0
832 		 */
833 		if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
834 			host_id = port->nqvecs;
835 		else if (queue_mode == MVPP2_QDIST_MULTI_MODE)
836 			host_id = q;
837 		else
838 			host_id = 0;
839 
840 		/* Set RXQ host ID */
841 		val |= (host_id << (MSS_RXQ_ASS_Q_BASE(q, fq)
842 			+ MSS_RXQ_ASS_HOSTID_OFFS));
843 
844 		mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val);
845 	}
846 
847 	/* Notify Firmware that Flow control config space ready for update */
848 	val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
849 	val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
850 	val |= cm3_state;
851 	mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
852 
853 	spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
854 }
855 
856 /* Routine disable flow control for RXQs condition */
857 static void mvpp2_rxq_disable_fc(struct mvpp2_port *port)
858 {
859 	int val, cm3_state, q;
860 	unsigned long flags;
861 	int fq = port->first_rxq;
862 
863 	spin_lock_irqsave(&port->priv->mss_spinlock, flags);
864 
865 	/* Remove Flow control enable bit to prevent race between FW and Kernel
866 	 * If Flow control was enabled, it would be re-enabled.
867 	 */
868 	val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
869 	cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
870 	val &= ~FLOW_CONTROL_ENABLE_BIT;
871 	mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
872 
873 	/* Disable Flow control for all RXQs */
874 	for (q = 0; q < port->nrxqs; q++) {
875 		/* Set threshold 0 to disable Flow control */
876 		val = 0;
877 		val |= (0 << MSS_RXQ_TRESH_STOP_OFFS);
878 		mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val);
879 
880 		val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq));
881 
882 		val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq));
883 
884 		val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq)
885 			+ MSS_RXQ_ASS_HOSTID_OFFS));
886 
887 		mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val);
888 	}
889 
890 	/* Notify Firmware that Flow control config space ready for update */
891 	val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
892 	val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
893 	val |= cm3_state;
894 	mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
895 
896 	spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
897 }
898 
899 /* Routine disable/enable flow control for BM pool condition */
900 static void mvpp2_bm_pool_update_fc(struct mvpp2_port *port,
901 				    struct mvpp2_bm_pool *pool,
902 				    bool en)
903 {
904 	int val, cm3_state;
905 	unsigned long flags;
906 
907 	spin_lock_irqsave(&port->priv->mss_spinlock, flags);
908 
909 	/* Remove Flow control enable bit to prevent race between FW and Kernel
910 	 * If Flow control were enabled, it would be re-enabled.
911 	 */
912 	val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
913 	cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
914 	val &= ~FLOW_CONTROL_ENABLE_BIT;
915 	mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
916 
917 	/* Check if BM pool should be enabled/disable */
918 	if (en) {
919 		/* Set BM pool start and stop thresholds per port */
920 		val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id));
921 		val |= MSS_BUF_POOL_PORT_OFFS(port->id);
922 		val &= ~MSS_BUF_POOL_START_MASK;
923 		val |= (MSS_THRESHOLD_START << MSS_BUF_POOL_START_OFFS);
924 		val &= ~MSS_BUF_POOL_STOP_MASK;
925 		val |= MSS_THRESHOLD_STOP;
926 		mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val);
927 	} else {
928 		/* Remove BM pool from the port */
929 		val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id));
930 		val &= ~MSS_BUF_POOL_PORT_OFFS(port->id);
931 
932 		/* Zero BM pool start and stop thresholds to disable pool
933 		 * flow control if pool empty (not used by any port)
934 		 */
935 		if (!pool->buf_num) {
936 			val &= ~MSS_BUF_POOL_START_MASK;
937 			val &= ~MSS_BUF_POOL_STOP_MASK;
938 		}
939 
940 		mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val);
941 	}
942 
943 	/* Notify Firmware that Flow control config space ready for update */
944 	val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
945 	val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
946 	val |= cm3_state;
947 	mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
948 
949 	spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
950 }
951 
952 /* disable/enable flow control for BM pool on all ports */
953 static void mvpp2_bm_pool_update_priv_fc(struct mvpp2 *priv, bool en)
954 {
955 	struct mvpp2_port *port;
956 	int i;
957 
958 	for (i = 0; i < priv->port_count; i++) {
959 		port = priv->port_list[i];
960 		if (port->priv->percpu_pools) {
961 			for (i = 0; i < port->nrxqs; i++)
962 				mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i],
963 							port->tx_fc & en);
964 		} else {
965 			mvpp2_bm_pool_update_fc(port, port->pool_long, port->tx_fc & en);
966 			mvpp2_bm_pool_update_fc(port, port->pool_short, port->tx_fc & en);
967 		}
968 	}
969 }
970 
971 static int mvpp2_enable_global_fc(struct mvpp2 *priv)
972 {
973 	int val, timeout = 0;
974 
975 	/* Enable global flow control. In this stage global
976 	 * flow control enabled, but still disabled per port.
977 	 */
978 	val = mvpp2_cm3_read(priv, MSS_FC_COM_REG);
979 	val |= FLOW_CONTROL_ENABLE_BIT;
980 	mvpp2_cm3_write(priv, MSS_FC_COM_REG, val);
981 
982 	/* Check if Firmware running and disable FC if not*/
983 	val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
984 	mvpp2_cm3_write(priv, MSS_FC_COM_REG, val);
985 
986 	while (timeout < MSS_FC_MAX_TIMEOUT) {
987 		val = mvpp2_cm3_read(priv, MSS_FC_COM_REG);
988 
989 		if (!(val & FLOW_CONTROL_UPDATE_COMMAND_BIT))
990 			return 0;
991 		usleep_range(10, 20);
992 		timeout++;
993 	}
994 
995 	priv->global_tx_fc = false;
996 	return -EOPNOTSUPP;
997 }
998 
999 /* Release buffer to BM */
1000 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
1001 				     dma_addr_t buf_dma_addr,
1002 				     phys_addr_t buf_phys_addr)
1003 {
1004 	unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
1005 	unsigned long flags = 0;
1006 
1007 	if (test_bit(thread, &port->priv->lock_map))
1008 		spin_lock_irqsave(&port->bm_lock[thread], flags);
1009 
1010 	if (port->priv->hw_version >= MVPP22) {
1011 		u32 val = 0;
1012 
1013 		if (sizeof(dma_addr_t) == 8)
1014 			val |= upper_32_bits(buf_dma_addr) &
1015 				MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
1016 
1017 		if (sizeof(phys_addr_t) == 8)
1018 			val |= (upper_32_bits(buf_phys_addr)
1019 				<< MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
1020 				MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
1021 
1022 		mvpp2_thread_write_relaxed(port->priv, thread,
1023 					   MVPP22_BM_ADDR_HIGH_RLS_REG, val);
1024 	}
1025 
1026 	/* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
1027 	 * returned in the "cookie" field of the RX
1028 	 * descriptor. Instead of storing the virtual address, we
1029 	 * store the physical address
1030 	 */
1031 	mvpp2_thread_write_relaxed(port->priv, thread,
1032 				   MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
1033 	mvpp2_thread_write_relaxed(port->priv, thread,
1034 				   MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
1035 
1036 	if (test_bit(thread, &port->priv->lock_map))
1037 		spin_unlock_irqrestore(&port->bm_lock[thread], flags);
1038 
1039 	put_cpu();
1040 }
1041 
1042 /* Allocate buffers for the pool */
1043 static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
1044 			     struct mvpp2_bm_pool *bm_pool, int buf_num)
1045 {
1046 	int i, buf_size, total_size;
1047 	dma_addr_t dma_addr;
1048 	phys_addr_t phys_addr;
1049 	struct page_pool *pp = NULL;
1050 	void *buf;
1051 
1052 	if (port->priv->percpu_pools &&
1053 	    bm_pool->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
1054 		netdev_err(port->dev,
1055 			   "attempted to use jumbo frames with per-cpu pools");
1056 		return 0;
1057 	}
1058 
1059 	buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
1060 	total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
1061 
1062 	if (buf_num < 0 ||
1063 	    (buf_num + bm_pool->buf_num > bm_pool->size)) {
1064 		netdev_err(port->dev,
1065 			   "cannot allocate %d buffers for pool %d\n",
1066 			   buf_num, bm_pool->id);
1067 		return 0;
1068 	}
1069 
1070 	if (port->priv->percpu_pools)
1071 		pp = port->priv->page_pool[bm_pool->id];
1072 	for (i = 0; i < buf_num; i++) {
1073 		buf = mvpp2_buf_alloc(port, bm_pool, pp, &dma_addr,
1074 				      &phys_addr, GFP_KERNEL);
1075 		if (!buf)
1076 			break;
1077 
1078 		mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
1079 				  phys_addr);
1080 	}
1081 
1082 	/* Update BM driver with number of buffers added to pool */
1083 	bm_pool->buf_num += i;
1084 
1085 	netdev_dbg(port->dev,
1086 		   "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
1087 		   bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
1088 
1089 	netdev_dbg(port->dev,
1090 		   "pool %d: %d of %d buffers added\n",
1091 		   bm_pool->id, i, buf_num);
1092 	return i;
1093 }
1094 
1095 /* Notify the driver that BM pool is being used as specific type and return the
1096  * pool pointer on success
1097  */
1098 static struct mvpp2_bm_pool *
1099 mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
1100 {
1101 	struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
1102 	int num;
1103 
1104 	if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(port->priv) * 2) ||
1105 	    (!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) {
1106 		netdev_err(port->dev, "Invalid pool %d\n", pool);
1107 		return NULL;
1108 	}
1109 
1110 	/* Allocate buffers in case BM pool is used as long pool, but packet
1111 	 * size doesn't match MTU or BM pool hasn't being used yet
1112 	 */
1113 	if (new_pool->pkt_size == 0) {
1114 		int pkts_num;
1115 
1116 		/* Set default buffer number or free all the buffers in case
1117 		 * the pool is not empty
1118 		 */
1119 		pkts_num = new_pool->buf_num;
1120 		if (pkts_num == 0) {
1121 			if (port->priv->percpu_pools) {
1122 				if (pool < port->nrxqs)
1123 					pkts_num = mvpp2_pools[MVPP2_BM_SHORT].buf_num;
1124 				else
1125 					pkts_num = mvpp2_pools[MVPP2_BM_LONG].buf_num;
1126 			} else {
1127 				pkts_num = mvpp2_pools[pool].buf_num;
1128 			}
1129 		} else {
1130 			mvpp2_bm_bufs_free(port->dev->dev.parent,
1131 					   port->priv, new_pool, pkts_num);
1132 		}
1133 
1134 		new_pool->pkt_size = pkt_size;
1135 		new_pool->frag_size =
1136 			SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
1137 			MVPP2_SKB_SHINFO_SIZE;
1138 
1139 		/* Allocate buffers for this pool */
1140 		num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
1141 		if (num != pkts_num) {
1142 			WARN(1, "pool %d: %d of %d allocated\n",
1143 			     new_pool->id, num, pkts_num);
1144 			return NULL;
1145 		}
1146 	}
1147 
1148 	mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
1149 				  MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
1150 
1151 	return new_pool;
1152 }
1153 
1154 static struct mvpp2_bm_pool *
1155 mvpp2_bm_pool_use_percpu(struct mvpp2_port *port, int type,
1156 			 unsigned int pool, int pkt_size)
1157 {
1158 	struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
1159 	int num;
1160 
1161 	if (pool > port->nrxqs * 2) {
1162 		netdev_err(port->dev, "Invalid pool %d\n", pool);
1163 		return NULL;
1164 	}
1165 
1166 	/* Allocate buffers in case BM pool is used as long pool, but packet
1167 	 * size doesn't match MTU or BM pool hasn't being used yet
1168 	 */
1169 	if (new_pool->pkt_size == 0) {
1170 		int pkts_num;
1171 
1172 		/* Set default buffer number or free all the buffers in case
1173 		 * the pool is not empty
1174 		 */
1175 		pkts_num = new_pool->buf_num;
1176 		if (pkts_num == 0)
1177 			pkts_num = mvpp2_pools[type].buf_num;
1178 		else
1179 			mvpp2_bm_bufs_free(port->dev->dev.parent,
1180 					   port->priv, new_pool, pkts_num);
1181 
1182 		new_pool->pkt_size = pkt_size;
1183 		new_pool->frag_size =
1184 			SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
1185 			MVPP2_SKB_SHINFO_SIZE;
1186 
1187 		/* Allocate buffers for this pool */
1188 		num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
1189 		if (num != pkts_num) {
1190 			WARN(1, "pool %d: %d of %d allocated\n",
1191 			     new_pool->id, num, pkts_num);
1192 			return NULL;
1193 		}
1194 	}
1195 
1196 	mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
1197 				  MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
1198 
1199 	return new_pool;
1200 }
1201 
1202 /* Initialize pools for swf, shared buffers variant */
1203 static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port)
1204 {
1205 	enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool;
1206 	int rxq;
1207 
1208 	/* If port pkt_size is higher than 1518B:
1209 	 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
1210 	 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
1211 	 */
1212 	if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
1213 		long_log_pool = MVPP2_BM_JUMBO;
1214 		short_log_pool = MVPP2_BM_LONG;
1215 	} else {
1216 		long_log_pool = MVPP2_BM_LONG;
1217 		short_log_pool = MVPP2_BM_SHORT;
1218 	}
1219 
1220 	if (!port->pool_long) {
1221 		port->pool_long =
1222 			mvpp2_bm_pool_use(port, long_log_pool,
1223 					  mvpp2_pools[long_log_pool].pkt_size);
1224 		if (!port->pool_long)
1225 			return -ENOMEM;
1226 
1227 		port->pool_long->port_map |= BIT(port->id);
1228 
1229 		for (rxq = 0; rxq < port->nrxqs; rxq++)
1230 			mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
1231 	}
1232 
1233 	if (!port->pool_short) {
1234 		port->pool_short =
1235 			mvpp2_bm_pool_use(port, short_log_pool,
1236 					  mvpp2_pools[short_log_pool].pkt_size);
1237 		if (!port->pool_short)
1238 			return -ENOMEM;
1239 
1240 		port->pool_short->port_map |= BIT(port->id);
1241 
1242 		for (rxq = 0; rxq < port->nrxqs; rxq++)
1243 			mvpp2_rxq_short_pool_set(port, rxq,
1244 						 port->pool_short->id);
1245 	}
1246 
1247 	return 0;
1248 }
1249 
1250 /* Initialize pools for swf, percpu buffers variant */
1251 static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port)
1252 {
1253 	struct mvpp2_bm_pool *bm_pool;
1254 	int i;
1255 
1256 	for (i = 0; i < port->nrxqs; i++) {
1257 		bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i,
1258 						   mvpp2_pools[MVPP2_BM_SHORT].pkt_size);
1259 		if (!bm_pool)
1260 			return -ENOMEM;
1261 
1262 		bm_pool->port_map |= BIT(port->id);
1263 		mvpp2_rxq_short_pool_set(port, i, bm_pool->id);
1264 	}
1265 
1266 	for (i = 0; i < port->nrxqs; i++) {
1267 		bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs,
1268 						   mvpp2_pools[MVPP2_BM_LONG].pkt_size);
1269 		if (!bm_pool)
1270 			return -ENOMEM;
1271 
1272 		bm_pool->port_map |= BIT(port->id);
1273 		mvpp2_rxq_long_pool_set(port, i, bm_pool->id);
1274 	}
1275 
1276 	port->pool_long = NULL;
1277 	port->pool_short = NULL;
1278 
1279 	return 0;
1280 }
1281 
1282 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
1283 {
1284 	if (port->priv->percpu_pools)
1285 		return mvpp2_swf_bm_pool_init_percpu(port);
1286 	else
1287 		return mvpp2_swf_bm_pool_init_shared(port);
1288 }
1289 
1290 static void mvpp2_set_hw_csum(struct mvpp2_port *port,
1291 			      enum mvpp2_bm_pool_log_num new_long_pool)
1292 {
1293 	const netdev_features_t csums = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1294 
1295 	/* Update L4 checksum when jumbo enable/disable on port.
1296 	 * Only port 0 supports hardware checksum offload due to
1297 	 * the Tx FIFO size limitation.
1298 	 * Also, don't set NETIF_F_HW_CSUM because L3_offset in TX descriptor
1299 	 * has 7 bits, so the maximum L3 offset is 128.
1300 	 */
1301 	if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
1302 		port->dev->features &= ~csums;
1303 		port->dev->hw_features &= ~csums;
1304 	} else {
1305 		port->dev->features |= csums;
1306 		port->dev->hw_features |= csums;
1307 	}
1308 }
1309 
1310 static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
1311 {
1312 	struct mvpp2_port *port = netdev_priv(dev);
1313 	enum mvpp2_bm_pool_log_num new_long_pool;
1314 	int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
1315 
1316 	if (port->priv->percpu_pools)
1317 		goto out_set;
1318 
1319 	/* If port MTU is higher than 1518B:
1320 	 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
1321 	 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
1322 	 */
1323 	if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
1324 		new_long_pool = MVPP2_BM_JUMBO;
1325 	else
1326 		new_long_pool = MVPP2_BM_LONG;
1327 
1328 	if (new_long_pool != port->pool_long->id) {
1329 		if (port->tx_fc) {
1330 			if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
1331 				mvpp2_bm_pool_update_fc(port,
1332 							port->pool_short,
1333 							false);
1334 			else
1335 				mvpp2_bm_pool_update_fc(port, port->pool_long,
1336 							false);
1337 		}
1338 
1339 		/* Remove port from old short & long pool */
1340 		port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id,
1341 						    port->pool_long->pkt_size);
1342 		port->pool_long->port_map &= ~BIT(port->id);
1343 		port->pool_long = NULL;
1344 
1345 		port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id,
1346 						     port->pool_short->pkt_size);
1347 		port->pool_short->port_map &= ~BIT(port->id);
1348 		port->pool_short = NULL;
1349 
1350 		port->pkt_size =  pkt_size;
1351 
1352 		/* Add port to new short & long pool */
1353 		mvpp2_swf_bm_pool_init(port);
1354 
1355 		mvpp2_set_hw_csum(port, new_long_pool);
1356 
1357 		if (port->tx_fc) {
1358 			if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
1359 				mvpp2_bm_pool_update_fc(port, port->pool_long,
1360 							true);
1361 			else
1362 				mvpp2_bm_pool_update_fc(port, port->pool_short,
1363 							true);
1364 		}
1365 
1366 		/* Update L4 checksum when jumbo enable/disable on port */
1367 		if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
1368 			dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
1369 			dev->hw_features &= ~(NETIF_F_IP_CSUM |
1370 					      NETIF_F_IPV6_CSUM);
1371 		} else {
1372 			dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1373 			dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1374 		}
1375 	}
1376 
1377 out_set:
1378 	WRITE_ONCE(dev->mtu, mtu);
1379 	dev->wanted_features = dev->features;
1380 
1381 	netdev_update_features(dev);
1382 	return 0;
1383 }
1384 
1385 static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
1386 {
1387 	int i, sw_thread_mask = 0;
1388 
1389 	for (i = 0; i < port->nqvecs; i++)
1390 		sw_thread_mask |= port->qvecs[i].sw_thread_mask;
1391 
1392 	mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1393 		    MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask));
1394 }
1395 
1396 static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
1397 {
1398 	int i, sw_thread_mask = 0;
1399 
1400 	for (i = 0; i < port->nqvecs; i++)
1401 		sw_thread_mask |= port->qvecs[i].sw_thread_mask;
1402 
1403 	mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1404 		    MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask));
1405 }
1406 
1407 static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec)
1408 {
1409 	struct mvpp2_port *port = qvec->port;
1410 
1411 	mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1412 		    MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask));
1413 }
1414 
1415 static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
1416 {
1417 	struct mvpp2_port *port = qvec->port;
1418 
1419 	mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1420 		    MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
1421 }
1422 
1423 /* Mask the current thread's Rx/Tx interrupts
1424  * Called by on_each_cpu(), guaranteed to run with migration disabled,
1425  * using smp_processor_id() is OK.
1426  */
1427 static void mvpp2_interrupts_mask(void *arg)
1428 {
1429 	struct mvpp2_port *port = arg;
1430 	int cpu = smp_processor_id();
1431 	u32 thread;
1432 
1433 	/* If the thread isn't used, don't do anything */
1434 	if (cpu > port->priv->nthreads)
1435 		return;
1436 
1437 	thread = mvpp2_cpu_to_thread(port->priv, cpu);
1438 
1439 	mvpp2_thread_write(port->priv, thread,
1440 			   MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
1441 	mvpp2_thread_write(port->priv, thread,
1442 			   MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), 0);
1443 }
1444 
1445 /* Unmask the current thread's Rx/Tx interrupts.
1446  * Called by on_each_cpu(), guaranteed to run with migration disabled,
1447  * using smp_processor_id() is OK.
1448  */
1449 static void mvpp2_interrupts_unmask(void *arg)
1450 {
1451 	struct mvpp2_port *port = arg;
1452 	int cpu = smp_processor_id();
1453 	u32 val, thread;
1454 
1455 	/* If the thread isn't used, don't do anything */
1456 	if (cpu >= port->priv->nthreads)
1457 		return;
1458 
1459 	thread = mvpp2_cpu_to_thread(port->priv, cpu);
1460 
1461 	val = MVPP2_CAUSE_MISC_SUM_MASK |
1462 		MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
1463 	if (port->has_tx_irqs)
1464 		val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
1465 
1466 	mvpp2_thread_write(port->priv, thread,
1467 			   MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
1468 	mvpp2_thread_write(port->priv, thread,
1469 			   MVPP2_ISR_RX_ERR_CAUSE_REG(port->id),
1470 			   MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK);
1471 }
1472 
1473 static void
1474 mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
1475 {
1476 	u32 val;
1477 	int i;
1478 
1479 	if (port->priv->hw_version == MVPP21)
1480 		return;
1481 
1482 	if (mask)
1483 		val = 0;
1484 	else
1485 		val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22);
1486 
1487 	for (i = 0; i < port->nqvecs; i++) {
1488 		struct mvpp2_queue_vector *v = port->qvecs + i;
1489 
1490 		if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
1491 			continue;
1492 
1493 		mvpp2_thread_write(port->priv, v->sw_thread_id,
1494 				   MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
1495 		mvpp2_thread_write(port->priv, v->sw_thread_id,
1496 				   MVPP2_ISR_RX_ERR_CAUSE_REG(port->id),
1497 				   MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK);
1498 	}
1499 }
1500 
1501 /* Only GOP port 0 has an XLG MAC */
1502 static bool mvpp2_port_supports_xlg(struct mvpp2_port *port)
1503 {
1504 	return port->gop_id == 0;
1505 }
1506 
1507 static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port)
1508 {
1509 	return !(port->priv->hw_version >= MVPP22 && port->gop_id == 0);
1510 }
1511 
1512 /* Port configuration routines */
1513 static bool mvpp2_is_xlg(phy_interface_t interface)
1514 {
1515 	return interface == PHY_INTERFACE_MODE_10GBASER ||
1516 	       interface == PHY_INTERFACE_MODE_5GBASER ||
1517 	       interface == PHY_INTERFACE_MODE_XAUI;
1518 }
1519 
1520 static void mvpp2_modify(void __iomem *ptr, u32 mask, u32 set)
1521 {
1522 	u32 old, val;
1523 
1524 	old = val = readl(ptr);
1525 	val &= ~mask;
1526 	val |= set;
1527 	if (old != val)
1528 		writel(val, ptr);
1529 }
1530 
1531 static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
1532 {
1533 	struct mvpp2 *priv = port->priv;
1534 	u32 val;
1535 
1536 	regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1537 	val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
1538 	regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1539 
1540 	regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
1541 	if (port->gop_id == 2) {
1542 		val |= GENCONF_CTRL0_PORT2_RGMII;
1543 	} else if (port->gop_id == 3) {
1544 		val |= GENCONF_CTRL0_PORT3_RGMII_MII;
1545 
1546 		/* According to the specification, GENCONF_CTRL0_PORT3_RGMII
1547 		 * should be set to 1 for RGMII and 0 for MII. However, tests
1548 		 * show that it is the other way around. This is also what
1549 		 * U-Boot does for mvpp2, so it is assumed to be correct.
1550 		 */
1551 		if (port->phy_interface == PHY_INTERFACE_MODE_MII)
1552 			val |= GENCONF_CTRL0_PORT3_RGMII;
1553 		else
1554 			val &= ~GENCONF_CTRL0_PORT3_RGMII;
1555 	}
1556 	regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
1557 }
1558 
1559 static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
1560 {
1561 	struct mvpp2 *priv = port->priv;
1562 	u32 val;
1563 
1564 	regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1565 	val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
1566 	       GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
1567 	regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1568 
1569 	if (port->gop_id > 1) {
1570 		regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
1571 		if (port->gop_id == 2)
1572 			val &= ~GENCONF_CTRL0_PORT2_RGMII;
1573 		else if (port->gop_id == 3)
1574 			val &= ~GENCONF_CTRL0_PORT3_RGMII_MII;
1575 		regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
1576 	}
1577 }
1578 
1579 static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
1580 {
1581 	struct mvpp2 *priv = port->priv;
1582 	void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
1583 	void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
1584 	u32 val;
1585 
1586 	val = readl(xpcs + MVPP22_XPCS_CFG0);
1587 	val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
1588 		 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
1589 	val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
1590 	writel(val, xpcs + MVPP22_XPCS_CFG0);
1591 
1592 	val = readl(mpcs + MVPP22_MPCS_CTRL);
1593 	val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
1594 	writel(val, mpcs + MVPP22_MPCS_CTRL);
1595 
1596 	val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
1597 	val &= ~MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7);
1598 	val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
1599 	writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
1600 }
1601 
1602 static void mvpp22_gop_fca_enable_periodic(struct mvpp2_port *port, bool en)
1603 {
1604 	struct mvpp2 *priv = port->priv;
1605 	void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id);
1606 	u32 val;
1607 
1608 	val = readl(fca + MVPP22_FCA_CONTROL_REG);
1609 	val &= ~MVPP22_FCA_ENABLE_PERIODIC;
1610 	if (en)
1611 		val |= MVPP22_FCA_ENABLE_PERIODIC;
1612 	writel(val, fca + MVPP22_FCA_CONTROL_REG);
1613 }
1614 
1615 static void mvpp22_gop_fca_set_timer(struct mvpp2_port *port, u32 timer)
1616 {
1617 	struct mvpp2 *priv = port->priv;
1618 	void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id);
1619 	u32 lsb, msb;
1620 
1621 	lsb = timer & MVPP22_FCA_REG_MASK;
1622 	msb = timer >> MVPP22_FCA_REG_SIZE;
1623 
1624 	writel(lsb, fca + MVPP22_PERIODIC_COUNTER_LSB_REG);
1625 	writel(msb, fca + MVPP22_PERIODIC_COUNTER_MSB_REG);
1626 }
1627 
1628 /* Set Flow Control timer x100 faster than pause quanta to ensure that link
1629  * partner won't send traffic if port is in XOFF mode.
1630  */
1631 static void mvpp22_gop_fca_set_periodic_timer(struct mvpp2_port *port)
1632 {
1633 	u32 timer;
1634 
1635 	timer = (port->priv->tclk / (USEC_PER_SEC * FC_CLK_DIVIDER))
1636 		* FC_QUANTA;
1637 
1638 	mvpp22_gop_fca_enable_periodic(port, false);
1639 
1640 	mvpp22_gop_fca_set_timer(port, timer);
1641 
1642 	mvpp22_gop_fca_enable_periodic(port, true);
1643 }
1644 
1645 static int mvpp22_gop_init(struct mvpp2_port *port, phy_interface_t interface)
1646 {
1647 	struct mvpp2 *priv = port->priv;
1648 	u32 val;
1649 
1650 	if (!priv->sysctrl_base)
1651 		return 0;
1652 
1653 	switch (interface) {
1654 	case PHY_INTERFACE_MODE_MII:
1655 	case PHY_INTERFACE_MODE_RGMII:
1656 	case PHY_INTERFACE_MODE_RGMII_ID:
1657 	case PHY_INTERFACE_MODE_RGMII_RXID:
1658 	case PHY_INTERFACE_MODE_RGMII_TXID:
1659 		if (!mvpp2_port_supports_rgmii(port))
1660 			goto invalid_conf;
1661 		mvpp22_gop_init_rgmii(port);
1662 		break;
1663 	case PHY_INTERFACE_MODE_SGMII:
1664 	case PHY_INTERFACE_MODE_1000BASEX:
1665 	case PHY_INTERFACE_MODE_2500BASEX:
1666 		mvpp22_gop_init_sgmii(port);
1667 		break;
1668 	case PHY_INTERFACE_MODE_5GBASER:
1669 	case PHY_INTERFACE_MODE_10GBASER:
1670 		if (!mvpp2_port_supports_xlg(port))
1671 			goto invalid_conf;
1672 		mvpp22_gop_init_10gkr(port);
1673 		break;
1674 	default:
1675 		goto unsupported_conf;
1676 	}
1677 
1678 	regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val);
1679 	val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) |
1680 	       GENCONF_PORT_CTRL1_EN(port->gop_id);
1681 	regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val);
1682 
1683 	regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1684 	val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
1685 	regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1686 
1687 	regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val);
1688 	val |= GENCONF_SOFT_RESET1_GOP;
1689 	regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
1690 
1691 	mvpp22_gop_fca_set_periodic_timer(port);
1692 
1693 unsupported_conf:
1694 	return 0;
1695 
1696 invalid_conf:
1697 	netdev_err(port->dev, "Invalid port configuration\n");
1698 	return -EINVAL;
1699 }
1700 
1701 static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
1702 {
1703 	u32 val;
1704 
1705 	if (phy_interface_mode_is_rgmii(port->phy_interface) ||
1706 	    phy_interface_mode_is_8023z(port->phy_interface) ||
1707 	    port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1708 		/* Enable the GMAC link status irq for this port */
1709 		val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
1710 		val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
1711 		writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
1712 	}
1713 
1714 	if (mvpp2_port_supports_xlg(port)) {
1715 		/* Enable the XLG/GIG irqs for this port */
1716 		val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
1717 		if (mvpp2_is_xlg(port->phy_interface))
1718 			val |= MVPP22_XLG_EXT_INT_MASK_XLG;
1719 		else
1720 			val |= MVPP22_XLG_EXT_INT_MASK_GIG;
1721 		writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
1722 	}
1723 }
1724 
1725 static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
1726 {
1727 	u32 val;
1728 
1729 	if (mvpp2_port_supports_xlg(port)) {
1730 		val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
1731 		val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG |
1732 			 MVPP22_XLG_EXT_INT_MASK_GIG);
1733 		writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
1734 	}
1735 
1736 	if (phy_interface_mode_is_rgmii(port->phy_interface) ||
1737 	    phy_interface_mode_is_8023z(port->phy_interface) ||
1738 	    port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1739 		val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
1740 		val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
1741 		writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
1742 	}
1743 }
1744 
1745 static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
1746 {
1747 	u32 val;
1748 
1749 	mvpp2_modify(port->base + MVPP22_GMAC_INT_SUM_MASK,
1750 		     MVPP22_GMAC_INT_SUM_MASK_PTP,
1751 		     MVPP22_GMAC_INT_SUM_MASK_PTP);
1752 
1753 	if (port->phylink ||
1754 	    phy_interface_mode_is_rgmii(port->phy_interface) ||
1755 	    phy_interface_mode_is_8023z(port->phy_interface) ||
1756 	    port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1757 		val = readl(port->base + MVPP22_GMAC_INT_MASK);
1758 		val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
1759 		writel(val, port->base + MVPP22_GMAC_INT_MASK);
1760 	}
1761 
1762 	if (mvpp2_port_supports_xlg(port)) {
1763 		val = readl(port->base + MVPP22_XLG_INT_MASK);
1764 		val |= MVPP22_XLG_INT_MASK_LINK;
1765 		writel(val, port->base + MVPP22_XLG_INT_MASK);
1766 
1767 		mvpp2_modify(port->base + MVPP22_XLG_EXT_INT_MASK,
1768 			     MVPP22_XLG_EXT_INT_MASK_PTP,
1769 			     MVPP22_XLG_EXT_INT_MASK_PTP);
1770 	}
1771 
1772 	mvpp22_gop_unmask_irq(port);
1773 }
1774 
1775 /* Sets the PHY mode of the COMPHY (which configures the serdes lanes).
1776  *
1777  * The PHY mode used by the PPv2 driver comes from the network subsystem, while
1778  * the one given to the COMPHY comes from the generic PHY subsystem. Hence they
1779  * differ.
1780  *
1781  * The COMPHY configures the serdes lanes regardless of the actual use of the
1782  * lanes by the physical layer. This is why configurations like
1783  * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid.
1784  */
1785 static int mvpp22_comphy_init(struct mvpp2_port *port,
1786 			      phy_interface_t interface)
1787 {
1788 	int ret;
1789 
1790 	if (!port->comphy)
1791 		return 0;
1792 
1793 	ret = phy_set_mode_ext(port->comphy, PHY_MODE_ETHERNET, interface);
1794 	if (ret)
1795 		return ret;
1796 
1797 	return phy_power_on(port->comphy);
1798 }
1799 
1800 static void mvpp2_port_enable(struct mvpp2_port *port)
1801 {
1802 	u32 val;
1803 
1804 	if (mvpp2_port_supports_xlg(port) &&
1805 	    mvpp2_is_xlg(port->phy_interface)) {
1806 		val = readl(port->base + MVPP22_XLG_CTRL0_REG);
1807 		val |= MVPP22_XLG_CTRL0_PORT_EN;
1808 		val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
1809 		writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1810 	} else {
1811 		val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1812 		val |= MVPP2_GMAC_PORT_EN_MASK;
1813 		val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
1814 		writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1815 	}
1816 }
1817 
1818 static void mvpp2_port_disable(struct mvpp2_port *port)
1819 {
1820 	u32 val;
1821 
1822 	if (mvpp2_port_supports_xlg(port) &&
1823 	    mvpp2_is_xlg(port->phy_interface)) {
1824 		val = readl(port->base + MVPP22_XLG_CTRL0_REG);
1825 		val &= ~MVPP22_XLG_CTRL0_PORT_EN;
1826 		writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1827 	}
1828 
1829 	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1830 	val &= ~(MVPP2_GMAC_PORT_EN_MASK);
1831 	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1832 }
1833 
1834 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
1835 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
1836 {
1837 	u32 val;
1838 
1839 	val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
1840 		    ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
1841 	writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
1842 }
1843 
1844 /* Configure loopback port */
1845 static void mvpp2_port_loopback_set(struct mvpp2_port *port,
1846 				    const struct phylink_link_state *state)
1847 {
1848 	u32 val;
1849 
1850 	val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
1851 
1852 	if (state->speed == 1000)
1853 		val |= MVPP2_GMAC_GMII_LB_EN_MASK;
1854 	else
1855 		val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
1856 
1857 	if (phy_interface_mode_is_8023z(state->interface) ||
1858 	    state->interface == PHY_INTERFACE_MODE_SGMII)
1859 		val |= MVPP2_GMAC_PCS_LB_EN_MASK;
1860 	else
1861 		val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
1862 
1863 	writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
1864 }
1865 
1866 enum {
1867 	ETHTOOL_XDP_REDIRECT,
1868 	ETHTOOL_XDP_PASS,
1869 	ETHTOOL_XDP_DROP,
1870 	ETHTOOL_XDP_TX,
1871 	ETHTOOL_XDP_TX_ERR,
1872 	ETHTOOL_XDP_XMIT,
1873 	ETHTOOL_XDP_XMIT_ERR,
1874 };
1875 
1876 struct mvpp2_ethtool_counter {
1877 	unsigned int offset;
1878 	const char string[ETH_GSTRING_LEN];
1879 	bool reg_is_64b;
1880 };
1881 
1882 static u64 mvpp2_read_count(struct mvpp2_port *port,
1883 			    const struct mvpp2_ethtool_counter *counter)
1884 {
1885 	u64 val;
1886 
1887 	val = readl(port->stats_base + counter->offset);
1888 	if (counter->reg_is_64b)
1889 		val += (u64)readl(port->stats_base + counter->offset + 4) << 32;
1890 
1891 	return val;
1892 }
1893 
1894 /* Some counters are accessed indirectly by first writing an index to
1895  * MVPP2_CTRS_IDX. The index can represent various resources depending on the
1896  * register we access, it can be a hit counter for some classification tables,
1897  * a counter specific to a rxq, a txq or a buffer pool.
1898  */
1899 static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg)
1900 {
1901 	mvpp2_write(priv, MVPP2_CTRS_IDX, index);
1902 	return mvpp2_read(priv, reg);
1903 }
1904 
1905 /* Due to the fact that software statistics and hardware statistics are, by
1906  * design, incremented at different moments in the chain of packet processing,
1907  * it is very likely that incoming packets could have been dropped after being
1908  * counted by hardware but before reaching software statistics (most probably
1909  * multicast packets), and in the opposite way, during transmission, FCS bytes
1910  * are added in between as well as TSO skb will be split and header bytes added.
1911  * Hence, statistics gathered from userspace with ifconfig (software) and
1912  * ethtool (hardware) cannot be compared.
1913  */
1914 static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] = {
1915 	{ MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true },
1916 	{ MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" },
1917 	{ MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" },
1918 	{ MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" },
1919 	{ MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" },
1920 	{ MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" },
1921 	{ MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" },
1922 	{ MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" },
1923 	{ MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" },
1924 	{ MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" },
1925 	{ MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" },
1926 	{ MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" },
1927 	{ MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true },
1928 	{ MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" },
1929 	{ MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" },
1930 	{ MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" },
1931 	{ MVPP2_MIB_FC_SENT, "fc_sent" },
1932 	{ MVPP2_MIB_FC_RCVD, "fc_received" },
1933 	{ MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" },
1934 	{ MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" },
1935 	{ MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" },
1936 	{ MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" },
1937 	{ MVPP2_MIB_JABBER_RCVD, "jabber_received" },
1938 	{ MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" },
1939 	{ MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" },
1940 	{ MVPP2_MIB_COLLISION, "collision" },
1941 	{ MVPP2_MIB_LATE_COLLISION, "late_collision" },
1942 };
1943 
1944 static const struct mvpp2_ethtool_counter mvpp2_ethtool_port_regs[] = {
1945 	{ MVPP2_OVERRUN_ETH_DROP, "rx_fifo_or_parser_overrun_drops" },
1946 	{ MVPP2_CLS_ETH_DROP, "rx_classifier_drops" },
1947 };
1948 
1949 static const struct mvpp2_ethtool_counter mvpp2_ethtool_txq_regs[] = {
1950 	{ MVPP2_TX_DESC_ENQ_CTR, "txq_%d_desc_enqueue" },
1951 	{ MVPP2_TX_DESC_ENQ_TO_DDR_CTR, "txq_%d_desc_enqueue_to_ddr" },
1952 	{ MVPP2_TX_BUFF_ENQ_TO_DDR_CTR, "txq_%d_buff_euqueue_to_ddr" },
1953 	{ MVPP2_TX_DESC_ENQ_HW_FWD_CTR, "txq_%d_desc_hardware_forwarded" },
1954 	{ MVPP2_TX_PKTS_DEQ_CTR, "txq_%d_packets_dequeued" },
1955 	{ MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR, "txq_%d_queue_full_drops" },
1956 	{ MVPP2_TX_PKTS_EARLY_DROP_CTR, "txq_%d_packets_early_drops" },
1957 	{ MVPP2_TX_PKTS_BM_DROP_CTR, "txq_%d_packets_bm_drops" },
1958 	{ MVPP2_TX_PKTS_BM_MC_DROP_CTR, "txq_%d_packets_rep_bm_drops" },
1959 };
1960 
1961 static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs[] = {
1962 	{ MVPP2_RX_DESC_ENQ_CTR, "rxq_%d_desc_enqueue" },
1963 	{ MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR, "rxq_%d_queue_full_drops" },
1964 	{ MVPP2_RX_PKTS_EARLY_DROP_CTR, "rxq_%d_packets_early_drops" },
1965 	{ MVPP2_RX_PKTS_BM_DROP_CTR, "rxq_%d_packets_bm_drops" },
1966 };
1967 
1968 static const struct mvpp2_ethtool_counter mvpp2_ethtool_xdp[] = {
1969 	{ ETHTOOL_XDP_REDIRECT, "rx_xdp_redirect", },
1970 	{ ETHTOOL_XDP_PASS, "rx_xdp_pass", },
1971 	{ ETHTOOL_XDP_DROP, "rx_xdp_drop", },
1972 	{ ETHTOOL_XDP_TX, "rx_xdp_tx", },
1973 	{ ETHTOOL_XDP_TX_ERR, "rx_xdp_tx_errors", },
1974 	{ ETHTOOL_XDP_XMIT, "tx_xdp_xmit", },
1975 	{ ETHTOOL_XDP_XMIT_ERR, "tx_xdp_xmit_errors", },
1976 };
1977 
1978 #define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs)	(ARRAY_SIZE(mvpp2_ethtool_mib_regs) + \
1979 						 ARRAY_SIZE(mvpp2_ethtool_port_regs) + \
1980 						 (ARRAY_SIZE(mvpp2_ethtool_txq_regs) * (ntxqs)) + \
1981 						 (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs)) + \
1982 						 ARRAY_SIZE(mvpp2_ethtool_xdp))
1983 
1984 static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
1985 				      u8 *data)
1986 {
1987 	struct mvpp2_port *port = netdev_priv(netdev);
1988 	int i, q;
1989 
1990 	if (sset != ETH_SS_STATS)
1991 		return;
1992 
1993 	for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) {
1994 		strscpy(data, mvpp2_ethtool_mib_regs[i].string,
1995 			ETH_GSTRING_LEN);
1996 		data += ETH_GSTRING_LEN;
1997 	}
1998 
1999 	for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) {
2000 		strscpy(data, mvpp2_ethtool_port_regs[i].string,
2001 			ETH_GSTRING_LEN);
2002 		data += ETH_GSTRING_LEN;
2003 	}
2004 
2005 	for (q = 0; q < port->ntxqs; q++) {
2006 		for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) {
2007 			snprintf(data, ETH_GSTRING_LEN,
2008 				 mvpp2_ethtool_txq_regs[i].string, q);
2009 			data += ETH_GSTRING_LEN;
2010 		}
2011 	}
2012 
2013 	for (q = 0; q < port->nrxqs; q++) {
2014 		for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) {
2015 			snprintf(data, ETH_GSTRING_LEN,
2016 				 mvpp2_ethtool_rxq_regs[i].string,
2017 				 q);
2018 			data += ETH_GSTRING_LEN;
2019 		}
2020 	}
2021 
2022 	for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_xdp); i++) {
2023 		strscpy(data, mvpp2_ethtool_xdp[i].string,
2024 			ETH_GSTRING_LEN);
2025 		data += ETH_GSTRING_LEN;
2026 	}
2027 }
2028 
2029 static void
2030 mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats)
2031 {
2032 	unsigned int start;
2033 	unsigned int cpu;
2034 
2035 	/* Gather XDP Statistics */
2036 	for_each_possible_cpu(cpu) {
2037 		struct mvpp2_pcpu_stats *cpu_stats;
2038 		u64	xdp_redirect;
2039 		u64	xdp_pass;
2040 		u64	xdp_drop;
2041 		u64	xdp_xmit;
2042 		u64	xdp_xmit_err;
2043 		u64	xdp_tx;
2044 		u64	xdp_tx_err;
2045 
2046 		cpu_stats = per_cpu_ptr(port->stats, cpu);
2047 		do {
2048 			start = u64_stats_fetch_begin(&cpu_stats->syncp);
2049 			xdp_redirect = cpu_stats->xdp_redirect;
2050 			xdp_pass   = cpu_stats->xdp_pass;
2051 			xdp_drop = cpu_stats->xdp_drop;
2052 			xdp_xmit   = cpu_stats->xdp_xmit;
2053 			xdp_xmit_err   = cpu_stats->xdp_xmit_err;
2054 			xdp_tx   = cpu_stats->xdp_tx;
2055 			xdp_tx_err   = cpu_stats->xdp_tx_err;
2056 		} while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
2057 
2058 		xdp_stats->xdp_redirect += xdp_redirect;
2059 		xdp_stats->xdp_pass   += xdp_pass;
2060 		xdp_stats->xdp_drop += xdp_drop;
2061 		xdp_stats->xdp_xmit   += xdp_xmit;
2062 		xdp_stats->xdp_xmit_err   += xdp_xmit_err;
2063 		xdp_stats->xdp_tx   += xdp_tx;
2064 		xdp_stats->xdp_tx_err   += xdp_tx_err;
2065 	}
2066 }
2067 
2068 static void mvpp2_read_stats(struct mvpp2_port *port)
2069 {
2070 	struct mvpp2_pcpu_stats xdp_stats = {};
2071 	const struct mvpp2_ethtool_counter *s;
2072 	u64 *pstats;
2073 	int i, q;
2074 
2075 	pstats = port->ethtool_stats;
2076 
2077 	for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++)
2078 		*pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_mib_regs[i]);
2079 
2080 	for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++)
2081 		*pstats++ += mvpp2_read(port->priv,
2082 					mvpp2_ethtool_port_regs[i].offset +
2083 					4 * port->id);
2084 
2085 	for (q = 0; q < port->ntxqs; q++)
2086 		for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++)
2087 			*pstats++ += mvpp2_read_index(port->priv,
2088 						      MVPP22_CTRS_TX_CTR(port->id, q),
2089 						      mvpp2_ethtool_txq_regs[i].offset);
2090 
2091 	/* Rxqs are numbered from 0 from the user standpoint, but not from the
2092 	 * driver's. We need to add the  port->first_rxq offset.
2093 	 */
2094 	for (q = 0; q < port->nrxqs; q++)
2095 		for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++)
2096 			*pstats++ += mvpp2_read_index(port->priv,
2097 						      port->first_rxq + q,
2098 						      mvpp2_ethtool_rxq_regs[i].offset);
2099 
2100 	/* Gather XDP Statistics */
2101 	mvpp2_get_xdp_stats(port, &xdp_stats);
2102 
2103 	for (i = 0, s = mvpp2_ethtool_xdp;
2104 		 s < mvpp2_ethtool_xdp + ARRAY_SIZE(mvpp2_ethtool_xdp);
2105 	     s++, i++) {
2106 		switch (s->offset) {
2107 		case ETHTOOL_XDP_REDIRECT:
2108 			*pstats++ = xdp_stats.xdp_redirect;
2109 			break;
2110 		case ETHTOOL_XDP_PASS:
2111 			*pstats++ = xdp_stats.xdp_pass;
2112 			break;
2113 		case ETHTOOL_XDP_DROP:
2114 			*pstats++ = xdp_stats.xdp_drop;
2115 			break;
2116 		case ETHTOOL_XDP_TX:
2117 			*pstats++ = xdp_stats.xdp_tx;
2118 			break;
2119 		case ETHTOOL_XDP_TX_ERR:
2120 			*pstats++ = xdp_stats.xdp_tx_err;
2121 			break;
2122 		case ETHTOOL_XDP_XMIT:
2123 			*pstats++ = xdp_stats.xdp_xmit;
2124 			break;
2125 		case ETHTOOL_XDP_XMIT_ERR:
2126 			*pstats++ = xdp_stats.xdp_xmit_err;
2127 			break;
2128 		}
2129 	}
2130 }
2131 
2132 static void mvpp2_gather_hw_statistics(struct work_struct *work)
2133 {
2134 	struct delayed_work *del_work = to_delayed_work(work);
2135 	struct mvpp2_port *port = container_of(del_work, struct mvpp2_port,
2136 					       stats_work);
2137 
2138 	mutex_lock(&port->gather_stats_lock);
2139 
2140 	mvpp2_read_stats(port);
2141 
2142 	/* No need to read again the counters right after this function if it
2143 	 * was called asynchronously by the user (ie. use of ethtool).
2144 	 */
2145 	cancel_delayed_work(&port->stats_work);
2146 	queue_delayed_work(port->priv->stats_queue, &port->stats_work,
2147 			   MVPP2_MIB_COUNTERS_STATS_DELAY);
2148 
2149 	mutex_unlock(&port->gather_stats_lock);
2150 }
2151 
2152 static void mvpp2_ethtool_get_stats(struct net_device *dev,
2153 				    struct ethtool_stats *stats, u64 *data)
2154 {
2155 	struct mvpp2_port *port = netdev_priv(dev);
2156 
2157 	/* Update statistics for the given port, then take the lock to avoid
2158 	 * concurrent accesses on the ethtool_stats structure during its copy.
2159 	 */
2160 	mvpp2_gather_hw_statistics(&port->stats_work.work);
2161 
2162 	mutex_lock(&port->gather_stats_lock);
2163 	memcpy(data, port->ethtool_stats,
2164 	       sizeof(u64) * MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs));
2165 	mutex_unlock(&port->gather_stats_lock);
2166 }
2167 
2168 static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset)
2169 {
2170 	struct mvpp2_port *port = netdev_priv(dev);
2171 
2172 	if (sset == ETH_SS_STATS)
2173 		return MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs);
2174 
2175 	return -EOPNOTSUPP;
2176 }
2177 
2178 static void mvpp2_mac_reset_assert(struct mvpp2_port *port)
2179 {
2180 	u32 val;
2181 
2182 	val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) |
2183 	      MVPP2_GMAC_PORT_RESET_MASK;
2184 	writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2185 
2186 	if (port->priv->hw_version >= MVPP22 && port->gop_id == 0) {
2187 		val = readl(port->base + MVPP22_XLG_CTRL0_REG) &
2188 		      ~MVPP22_XLG_CTRL0_MAC_RESET_DIS;
2189 		writel(val, port->base + MVPP22_XLG_CTRL0_REG);
2190 	}
2191 }
2192 
2193 static void mvpp22_pcs_reset_assert(struct mvpp2_port *port)
2194 {
2195 	struct mvpp2 *priv = port->priv;
2196 	void __iomem *mpcs, *xpcs;
2197 	u32 val;
2198 
2199 	if (port->priv->hw_version == MVPP21 || port->gop_id != 0)
2200 		return;
2201 
2202 	mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
2203 	xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
2204 
2205 	val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
2206 	val &= ~(MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
2207 	val |= MVPP22_MPCS_CLK_RESET_DIV_SET;
2208 	writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
2209 
2210 	val = readl(xpcs + MVPP22_XPCS_CFG0);
2211 	writel(val & ~MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
2212 }
2213 
2214 static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port,
2215 				      phy_interface_t interface)
2216 {
2217 	struct mvpp2 *priv = port->priv;
2218 	void __iomem *mpcs, *xpcs;
2219 	u32 val;
2220 
2221 	if (port->priv->hw_version == MVPP21 || port->gop_id != 0)
2222 		return;
2223 
2224 	mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
2225 	xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
2226 
2227 	switch (interface) {
2228 	case PHY_INTERFACE_MODE_5GBASER:
2229 	case PHY_INTERFACE_MODE_10GBASER:
2230 		val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
2231 		val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX |
2232 		       MAC_CLK_RESET_SD_TX;
2233 		val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
2234 		writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
2235 		break;
2236 	case PHY_INTERFACE_MODE_XAUI:
2237 	case PHY_INTERFACE_MODE_RXAUI:
2238 		val = readl(xpcs + MVPP22_XPCS_CFG0);
2239 		writel(val | MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
2240 		break;
2241 	default:
2242 		break;
2243 	}
2244 }
2245 
2246 /* Change maximum receive size of the port */
2247 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
2248 {
2249 	u32 val;
2250 
2251 	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2252 	val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
2253 	val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
2254 		    MVPP2_GMAC_MAX_RX_SIZE_OFFS);
2255 	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2256 }
2257 
2258 /* Change maximum receive size of the port */
2259 static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
2260 {
2261 	u32 val;
2262 
2263 	val =  readl(port->base + MVPP22_XLG_CTRL1_REG);
2264 	val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK;
2265 	val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
2266 	       MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS;
2267 	writel(val, port->base + MVPP22_XLG_CTRL1_REG);
2268 }
2269 
2270 /* Set defaults to the MVPP2 port */
2271 static void mvpp2_defaults_set(struct mvpp2_port *port)
2272 {
2273 	int tx_port_num, val, queue, lrxq;
2274 
2275 	if (port->priv->hw_version == MVPP21) {
2276 		/* Update TX FIFO MIN Threshold */
2277 		val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
2278 		val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
2279 		/* Min. TX threshold must be less than minimal packet length */
2280 		val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
2281 		writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
2282 	}
2283 
2284 	/* Disable Legacy WRR, Disable EJP, Release from reset */
2285 	tx_port_num = mvpp2_egress_port(port);
2286 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
2287 		    tx_port_num);
2288 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
2289 
2290 	/* Set TXQ scheduling to Round-Robin */
2291 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0);
2292 
2293 	/* Close bandwidth for all queues */
2294 	for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
2295 		mvpp2_write(port->priv,
2296 			    MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
2297 
2298 	/* Set refill period to 1 usec, refill tokens
2299 	 * and bucket size to maximum
2300 	 */
2301 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
2302 		    port->priv->tclk / USEC_PER_SEC);
2303 	val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
2304 	val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
2305 	val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
2306 	val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
2307 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
2308 	val = MVPP2_TXP_TOKEN_SIZE_MAX;
2309 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2310 
2311 	/* Set MaximumLowLatencyPacketSize value to 256 */
2312 	mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
2313 		    MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
2314 		    MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
2315 
2316 	/* Enable Rx cache snoop */
2317 	for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
2318 		queue = port->rxqs[lrxq]->id;
2319 		val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2320 		val |= MVPP2_SNOOP_PKT_SIZE_MASK |
2321 			   MVPP2_SNOOP_BUF_HDR_MASK;
2322 		mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2323 	}
2324 
2325 	/* At default, mask all interrupts to all present cpus */
2326 	mvpp2_interrupts_disable(port);
2327 }
2328 
2329 /* Enable/disable receiving packets */
2330 static void mvpp2_ingress_enable(struct mvpp2_port *port)
2331 {
2332 	u32 val;
2333 	int lrxq, queue;
2334 
2335 	for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
2336 		queue = port->rxqs[lrxq]->id;
2337 		val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2338 		val &= ~MVPP2_RXQ_DISABLE_MASK;
2339 		mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2340 	}
2341 }
2342 
2343 static void mvpp2_ingress_disable(struct mvpp2_port *port)
2344 {
2345 	u32 val;
2346 	int lrxq, queue;
2347 
2348 	for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
2349 		queue = port->rxqs[lrxq]->id;
2350 		val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2351 		val |= MVPP2_RXQ_DISABLE_MASK;
2352 		mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2353 	}
2354 }
2355 
2356 /* Enable transmit via physical egress queue
2357  * - HW starts take descriptors from DRAM
2358  */
2359 static void mvpp2_egress_enable(struct mvpp2_port *port)
2360 {
2361 	u32 qmap;
2362 	int queue;
2363 	int tx_port_num = mvpp2_egress_port(port);
2364 
2365 	/* Enable all initialized TXs. */
2366 	qmap = 0;
2367 	for (queue = 0; queue < port->ntxqs; queue++) {
2368 		struct mvpp2_tx_queue *txq = port->txqs[queue];
2369 
2370 		if (txq->descs)
2371 			qmap |= (1 << queue);
2372 	}
2373 
2374 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2375 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
2376 }
2377 
2378 /* Disable transmit via physical egress queue
2379  * - HW doesn't take descriptors from DRAM
2380  */
2381 static void mvpp2_egress_disable(struct mvpp2_port *port)
2382 {
2383 	u32 reg_data;
2384 	int delay;
2385 	int tx_port_num = mvpp2_egress_port(port);
2386 
2387 	/* Issue stop command for active channels only */
2388 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2389 	reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
2390 		    MVPP2_TXP_SCHED_ENQ_MASK;
2391 	if (reg_data != 0)
2392 		mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
2393 			    (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
2394 
2395 	/* Wait for all Tx activity to terminate. */
2396 	delay = 0;
2397 	do {
2398 		if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
2399 			netdev_warn(port->dev,
2400 				    "Tx stop timed out, status=0x%08x\n",
2401 				    reg_data);
2402 			break;
2403 		}
2404 		mdelay(1);
2405 		delay++;
2406 
2407 		/* Check port TX Command register that all
2408 		 * Tx queues are stopped
2409 		 */
2410 		reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
2411 	} while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
2412 }
2413 
2414 /* Rx descriptors helper methods */
2415 
2416 /* Get number of Rx descriptors occupied by received packets */
2417 static inline int
2418 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
2419 {
2420 	u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
2421 
2422 	return val & MVPP2_RXQ_OCCUPIED_MASK;
2423 }
2424 
2425 /* Update Rx queue status with the number of occupied and available
2426  * Rx descriptor slots.
2427  */
2428 static inline void
2429 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
2430 			int used_count, int free_count)
2431 {
2432 	/* Decrement the number of used descriptors and increment count
2433 	 * increment the number of free descriptors.
2434 	 */
2435 	u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
2436 
2437 	mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
2438 }
2439 
2440 /* Get pointer to next RX descriptor to be processed by SW */
2441 static inline struct mvpp2_rx_desc *
2442 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
2443 {
2444 	int rx_desc = rxq->next_desc_to_proc;
2445 
2446 	rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
2447 	prefetch(rxq->descs + rxq->next_desc_to_proc);
2448 	return rxq->descs + rx_desc;
2449 }
2450 
2451 /* Set rx queue offset */
2452 static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
2453 				 int prxq, int offset)
2454 {
2455 	u32 val;
2456 
2457 	/* Convert offset from bytes to units of 32 bytes */
2458 	offset = offset >> 5;
2459 
2460 	val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
2461 	val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
2462 
2463 	/* Offset is in */
2464 	val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
2465 		    MVPP2_RXQ_PACKET_OFFSET_MASK);
2466 
2467 	mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
2468 }
2469 
2470 /* Tx descriptors helper methods */
2471 
2472 /* Get pointer to next Tx descriptor to be processed (send) by HW */
2473 static struct mvpp2_tx_desc *
2474 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
2475 {
2476 	int tx_desc = txq->next_desc_to_proc;
2477 
2478 	txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
2479 	return txq->descs + tx_desc;
2480 }
2481 
2482 /* Update HW with number of aggregated Tx descriptors to be sent
2483  *
2484  * Called only from mvpp2_tx(), so migration is disabled, using
2485  * smp_processor_id() is OK.
2486  */
2487 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
2488 {
2489 	/* aggregated access - relevant TXQ number is written in TX desc */
2490 	mvpp2_thread_write(port->priv,
2491 			   mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2492 			   MVPP2_AGGR_TXQ_UPDATE_REG, pending);
2493 }
2494 
2495 /* Check if there are enough free descriptors in aggregated txq.
2496  * If not, update the number of occupied descriptors and repeat the check.
2497  *
2498  * Called only from mvpp2_tx(), so migration is disabled, using
2499  * smp_processor_id() is OK.
2500  */
2501 static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port,
2502 				     struct mvpp2_tx_queue *aggr_txq, int num)
2503 {
2504 	if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) {
2505 		/* Update number of occupied aggregated Tx descriptors */
2506 		unsigned int thread =
2507 			mvpp2_cpu_to_thread(port->priv, smp_processor_id());
2508 		u32 val = mvpp2_read_relaxed(port->priv,
2509 					     MVPP2_AGGR_TXQ_STATUS_REG(thread));
2510 
2511 		aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
2512 
2513 		if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE)
2514 			return -ENOMEM;
2515 	}
2516 	return 0;
2517 }
2518 
2519 /* Reserved Tx descriptors allocation request
2520  *
2521  * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
2522  * only by mvpp2_tx(), so migration is disabled, using
2523  * smp_processor_id() is OK.
2524  */
2525 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port,
2526 					 struct mvpp2_tx_queue *txq, int num)
2527 {
2528 	unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
2529 	struct mvpp2 *priv = port->priv;
2530 	u32 val;
2531 
2532 	val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
2533 	mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val);
2534 
2535 	val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG);
2536 
2537 	return val & MVPP2_TXQ_RSVD_RSLT_MASK;
2538 }
2539 
2540 /* Check if there are enough reserved descriptors for transmission.
2541  * If not, request chunk of reserved descriptors and check again.
2542  */
2543 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port,
2544 					    struct mvpp2_tx_queue *txq,
2545 					    struct mvpp2_txq_pcpu *txq_pcpu,
2546 					    int num)
2547 {
2548 	int req, desc_count;
2549 	unsigned int thread;
2550 
2551 	if (txq_pcpu->reserved_num >= num)
2552 		return 0;
2553 
2554 	/* Not enough descriptors reserved! Update the reserved descriptor
2555 	 * count and check again.
2556 	 */
2557 
2558 	desc_count = 0;
2559 	/* Compute total of used descriptors */
2560 	for (thread = 0; thread < port->priv->nthreads; thread++) {
2561 		struct mvpp2_txq_pcpu *txq_pcpu_aux;
2562 
2563 		txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread);
2564 		desc_count += txq_pcpu_aux->count;
2565 		desc_count += txq_pcpu_aux->reserved_num;
2566 	}
2567 
2568 	req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
2569 	desc_count += req;
2570 
2571 	if (desc_count >
2572 	   (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK)))
2573 		return -ENOMEM;
2574 
2575 	txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req);
2576 
2577 	/* OK, the descriptor could have been updated: check again. */
2578 	if (txq_pcpu->reserved_num < num)
2579 		return -ENOMEM;
2580 	return 0;
2581 }
2582 
2583 /* Release the last allocated Tx descriptor. Useful to handle DMA
2584  * mapping failures in the Tx path.
2585  */
2586 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
2587 {
2588 	if (txq->next_desc_to_proc == 0)
2589 		txq->next_desc_to_proc = txq->last_desc - 1;
2590 	else
2591 		txq->next_desc_to_proc--;
2592 }
2593 
2594 /* Set Tx descriptors fields relevant for CSUM calculation */
2595 static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
2596 			       int ip_hdr_len, int l4_proto)
2597 {
2598 	u32 command;
2599 
2600 	/* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
2601 	 * G_L4_chk, L4_type required only for checksum calculation
2602 	 */
2603 	command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
2604 	command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
2605 	command |= MVPP2_TXD_IP_CSUM_DISABLE;
2606 
2607 	if (l3_proto == htons(ETH_P_IP)) {
2608 		command &= ~MVPP2_TXD_IP_CSUM_DISABLE;	/* enable IPv4 csum */
2609 		command &= ~MVPP2_TXD_L3_IP6;		/* enable IPv4 */
2610 	} else {
2611 		command |= MVPP2_TXD_L3_IP6;		/* enable IPv6 */
2612 	}
2613 
2614 	if (l4_proto == IPPROTO_TCP) {
2615 		command &= ~MVPP2_TXD_L4_UDP;		/* enable TCP */
2616 		command &= ~MVPP2_TXD_L4_CSUM_FRAG;	/* generate L4 csum */
2617 	} else if (l4_proto == IPPROTO_UDP) {
2618 		command |= MVPP2_TXD_L4_UDP;		/* enable UDP */
2619 		command &= ~MVPP2_TXD_L4_CSUM_FRAG;	/* generate L4 csum */
2620 	} else {
2621 		command |= MVPP2_TXD_L4_CSUM_NOT;
2622 	}
2623 
2624 	return command;
2625 }
2626 
2627 /* Get number of sent descriptors and decrement counter.
2628  * The number of sent descriptors is returned.
2629  * Per-thread access
2630  *
2631  * Called only from mvpp2_txq_done(), called from mvpp2_tx()
2632  * (migration disabled) and from the TX completion tasklet (migration
2633  * disabled) so using smp_processor_id() is OK.
2634  */
2635 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
2636 					   struct mvpp2_tx_queue *txq)
2637 {
2638 	u32 val;
2639 
2640 	/* Reading status reg resets transmitted descriptor counter */
2641 	val = mvpp2_thread_read_relaxed(port->priv,
2642 					mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2643 					MVPP2_TXQ_SENT_REG(txq->id));
2644 
2645 	return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
2646 		MVPP2_TRANSMITTED_COUNT_OFFSET;
2647 }
2648 
2649 /* Called through on_each_cpu(), so runs on all CPUs, with migration
2650  * disabled, therefore using smp_processor_id() is OK.
2651  */
2652 static void mvpp2_txq_sent_counter_clear(void *arg)
2653 {
2654 	struct mvpp2_port *port = arg;
2655 	int queue;
2656 
2657 	/* If the thread isn't used, don't do anything */
2658 	if (smp_processor_id() >= port->priv->nthreads)
2659 		return;
2660 
2661 	for (queue = 0; queue < port->ntxqs; queue++) {
2662 		int id = port->txqs[queue]->id;
2663 
2664 		mvpp2_thread_read(port->priv,
2665 				  mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2666 				  MVPP2_TXQ_SENT_REG(id));
2667 	}
2668 }
2669 
2670 /* Set max sizes for Tx queues */
2671 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
2672 {
2673 	u32	val, size, mtu;
2674 	int	txq, tx_port_num;
2675 
2676 	mtu = port->pkt_size * 8;
2677 	if (mtu > MVPP2_TXP_MTU_MAX)
2678 		mtu = MVPP2_TXP_MTU_MAX;
2679 
2680 	/* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
2681 	mtu = 3 * mtu;
2682 
2683 	/* Indirect access to registers */
2684 	tx_port_num = mvpp2_egress_port(port);
2685 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2686 
2687 	/* Set MTU */
2688 	val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
2689 	val &= ~MVPP2_TXP_MTU_MAX;
2690 	val |= mtu;
2691 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
2692 
2693 	/* TXP token size and all TXQs token size must be larger that MTU */
2694 	val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
2695 	size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
2696 	if (size < mtu) {
2697 		size = mtu;
2698 		val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
2699 		val |= size;
2700 		mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2701 	}
2702 
2703 	for (txq = 0; txq < port->ntxqs; txq++) {
2704 		val = mvpp2_read(port->priv,
2705 				 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
2706 		size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
2707 
2708 		if (size < mtu) {
2709 			size = mtu;
2710 			val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
2711 			val |= size;
2712 			mvpp2_write(port->priv,
2713 				    MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
2714 				    val);
2715 		}
2716 	}
2717 }
2718 
2719 /* Set the number of non-occupied descriptors threshold */
2720 static void mvpp2_set_rxq_free_tresh(struct mvpp2_port *port,
2721 				     struct mvpp2_rx_queue *rxq)
2722 {
2723 	u32 val;
2724 
2725 	mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
2726 
2727 	val = mvpp2_read(port->priv, MVPP2_RXQ_THRESH_REG);
2728 	val &= ~MVPP2_RXQ_NON_OCCUPIED_MASK;
2729 	val |= MSS_THRESHOLD_STOP << MVPP2_RXQ_NON_OCCUPIED_OFFSET;
2730 	mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val);
2731 }
2732 
2733 /* Set the number of packets that will be received before Rx interrupt
2734  * will be generated by HW.
2735  */
2736 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
2737 				   struct mvpp2_rx_queue *rxq)
2738 {
2739 	unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2740 
2741 	if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
2742 		rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
2743 
2744 	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2745 	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG,
2746 			   rxq->pkts_coal);
2747 
2748 	put_cpu();
2749 }
2750 
2751 /* For some reason in the LSP this is done on each CPU. Why ? */
2752 static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
2753 				   struct mvpp2_tx_queue *txq)
2754 {
2755 	unsigned int thread;
2756 	u32 val;
2757 
2758 	if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
2759 		txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
2760 
2761 	val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
2762 	/* PKT-coalescing registers are per-queue + per-thread */
2763 	for (thread = 0; thread < MVPP2_MAX_THREADS; thread++) {
2764 		mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2765 		mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
2766 	}
2767 }
2768 
2769 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
2770 {
2771 	u64 tmp = (u64)clk_hz * usec;
2772 
2773 	do_div(tmp, USEC_PER_SEC);
2774 
2775 	return tmp > U32_MAX ? U32_MAX : tmp;
2776 }
2777 
2778 static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
2779 {
2780 	u64 tmp = (u64)cycles * USEC_PER_SEC;
2781 
2782 	do_div(tmp, clk_hz);
2783 
2784 	return tmp > U32_MAX ? U32_MAX : tmp;
2785 }
2786 
2787 /* Set the time delay in usec before Rx interrupt */
2788 static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
2789 				   struct mvpp2_rx_queue *rxq)
2790 {
2791 	unsigned long freq = port->priv->tclk;
2792 	u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
2793 
2794 	if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
2795 		rxq->time_coal =
2796 			mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
2797 
2798 		/* re-evaluate to get actual register value */
2799 		val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
2800 	}
2801 
2802 	mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
2803 }
2804 
2805 static void mvpp2_tx_time_coal_set(struct mvpp2_port *port)
2806 {
2807 	unsigned long freq = port->priv->tclk;
2808 	u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
2809 
2810 	if (val > MVPP2_MAX_ISR_TX_THRESHOLD) {
2811 		port->tx_time_coal =
2812 			mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq);
2813 
2814 		/* re-evaluate to get actual register value */
2815 		val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
2816 	}
2817 
2818 	mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val);
2819 }
2820 
2821 /* Free Tx queue skbuffs */
2822 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
2823 				struct mvpp2_tx_queue *txq,
2824 				struct mvpp2_txq_pcpu *txq_pcpu, int num)
2825 {
2826 	struct xdp_frame_bulk bq;
2827 	int i;
2828 
2829 	xdp_frame_bulk_init(&bq);
2830 
2831 	rcu_read_lock(); /* need for xdp_return_frame_bulk */
2832 
2833 	for (i = 0; i < num; i++) {
2834 		struct mvpp2_txq_pcpu_buf *tx_buf =
2835 			txq_pcpu->buffs + txq_pcpu->txq_get_index;
2836 
2837 		if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma) &&
2838 		    tx_buf->type != MVPP2_TYPE_XDP_TX)
2839 			dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
2840 					 tx_buf->size, DMA_TO_DEVICE);
2841 		if (tx_buf->type == MVPP2_TYPE_SKB && tx_buf->skb)
2842 			dev_kfree_skb_any(tx_buf->skb);
2843 		else if (tx_buf->type == MVPP2_TYPE_XDP_TX ||
2844 			 tx_buf->type == MVPP2_TYPE_XDP_NDO)
2845 			xdp_return_frame_bulk(tx_buf->xdpf, &bq);
2846 
2847 		mvpp2_txq_inc_get(txq_pcpu);
2848 	}
2849 	xdp_flush_frame_bulk(&bq);
2850 
2851 	rcu_read_unlock();
2852 }
2853 
2854 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
2855 							u32 cause)
2856 {
2857 	int queue = fls(cause) - 1;
2858 
2859 	return port->rxqs[queue];
2860 }
2861 
2862 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
2863 							u32 cause)
2864 {
2865 	int queue = fls(cause) - 1;
2866 
2867 	return port->txqs[queue];
2868 }
2869 
2870 /* Handle end of transmission */
2871 static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
2872 			   struct mvpp2_txq_pcpu *txq_pcpu)
2873 {
2874 	struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
2875 	int tx_done;
2876 
2877 	if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id()))
2878 		netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
2879 
2880 	tx_done = mvpp2_txq_sent_desc_proc(port, txq);
2881 	if (!tx_done)
2882 		return;
2883 	mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
2884 
2885 	txq_pcpu->count -= tx_done;
2886 
2887 	if (netif_tx_queue_stopped(nq))
2888 		if (txq_pcpu->count <= txq_pcpu->wake_threshold)
2889 			netif_tx_wake_queue(nq);
2890 }
2891 
2892 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
2893 				  unsigned int thread)
2894 {
2895 	struct mvpp2_tx_queue *txq;
2896 	struct mvpp2_txq_pcpu *txq_pcpu;
2897 	unsigned int tx_todo = 0;
2898 
2899 	while (cause) {
2900 		txq = mvpp2_get_tx_queue(port, cause);
2901 		if (!txq)
2902 			break;
2903 
2904 		txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2905 
2906 		if (txq_pcpu->count) {
2907 			mvpp2_txq_done(port, txq, txq_pcpu);
2908 			tx_todo += txq_pcpu->count;
2909 		}
2910 
2911 		cause &= ~(1 << txq->log_id);
2912 	}
2913 	return tx_todo;
2914 }
2915 
2916 /* Rx/Tx queue initialization/cleanup methods */
2917 
2918 /* Allocate and initialize descriptors for aggr TXQ */
2919 static int mvpp2_aggr_txq_init(struct platform_device *pdev,
2920 			       struct mvpp2_tx_queue *aggr_txq,
2921 			       unsigned int thread, struct mvpp2 *priv)
2922 {
2923 	u32 txq_dma;
2924 
2925 	/* Allocate memory for TX descriptors */
2926 	aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
2927 					     MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
2928 					     &aggr_txq->descs_dma, GFP_KERNEL);
2929 	if (!aggr_txq->descs)
2930 		return -ENOMEM;
2931 
2932 	aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1;
2933 
2934 	/* Aggr TXQ no reset WA */
2935 	aggr_txq->next_desc_to_proc = mvpp2_read(priv,
2936 						 MVPP2_AGGR_TXQ_INDEX_REG(thread));
2937 
2938 	/* Set Tx descriptors queue starting address indirect
2939 	 * access
2940 	 */
2941 	if (priv->hw_version == MVPP21)
2942 		txq_dma = aggr_txq->descs_dma;
2943 	else
2944 		txq_dma = aggr_txq->descs_dma >>
2945 			MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
2946 
2947 	mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma);
2948 	mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread),
2949 		    MVPP2_AGGR_TXQ_SIZE);
2950 
2951 	return 0;
2952 }
2953 
2954 /* Create a specified Rx queue */
2955 static int mvpp2_rxq_init(struct mvpp2_port *port,
2956 			  struct mvpp2_rx_queue *rxq)
2957 {
2958 	struct mvpp2 *priv = port->priv;
2959 	unsigned int thread;
2960 	u32 rxq_dma;
2961 	int err;
2962 
2963 	rxq->size = port->rx_ring_size;
2964 
2965 	/* Allocate memory for RX descriptors */
2966 	rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
2967 					rxq->size * MVPP2_DESC_ALIGNED_SIZE,
2968 					&rxq->descs_dma, GFP_KERNEL);
2969 	if (!rxq->descs)
2970 		return -ENOMEM;
2971 
2972 	rxq->last_desc = rxq->size - 1;
2973 
2974 	/* Zero occupied and non-occupied counters - direct access */
2975 	mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2976 
2977 	/* Set Rx descriptors queue starting address - indirect access */
2978 	thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2979 	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2980 	if (port->priv->hw_version == MVPP21)
2981 		rxq_dma = rxq->descs_dma;
2982 	else
2983 		rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
2984 	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
2985 	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
2986 	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0);
2987 	put_cpu();
2988 
2989 	/* Set Offset */
2990 	mvpp2_rxq_offset_set(port, rxq->id, MVPP2_SKB_HEADROOM);
2991 
2992 	/* Set coalescing pkts and time */
2993 	mvpp2_rx_pkts_coal_set(port, rxq);
2994 	mvpp2_rx_time_coal_set(port, rxq);
2995 
2996 	/* Set the number of non occupied descriptors threshold */
2997 	mvpp2_set_rxq_free_tresh(port, rxq);
2998 
2999 	/* Add number of descriptors ready for receiving packets */
3000 	mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
3001 
3002 	if (priv->percpu_pools) {
3003 		err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->logic_rxq, 0);
3004 		if (err < 0)
3005 			goto err_free_dma;
3006 
3007 		err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->logic_rxq, 0);
3008 		if (err < 0)
3009 			goto err_unregister_rxq_short;
3010 
3011 		/* Every RXQ has a pool for short and another for long packets */
3012 		err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_short,
3013 						 MEM_TYPE_PAGE_POOL,
3014 						 priv->page_pool[rxq->logic_rxq]);
3015 		if (err < 0)
3016 			goto err_unregister_rxq_long;
3017 
3018 		err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_long,
3019 						 MEM_TYPE_PAGE_POOL,
3020 						 priv->page_pool[rxq->logic_rxq +
3021 								 port->nrxqs]);
3022 		if (err < 0)
3023 			goto err_unregister_mem_rxq_short;
3024 	}
3025 
3026 	return 0;
3027 
3028 err_unregister_mem_rxq_short:
3029 	xdp_rxq_info_unreg_mem_model(&rxq->xdp_rxq_short);
3030 err_unregister_rxq_long:
3031 	xdp_rxq_info_unreg(&rxq->xdp_rxq_long);
3032 err_unregister_rxq_short:
3033 	xdp_rxq_info_unreg(&rxq->xdp_rxq_short);
3034 err_free_dma:
3035 	dma_free_coherent(port->dev->dev.parent,
3036 			  rxq->size * MVPP2_DESC_ALIGNED_SIZE,
3037 			  rxq->descs, rxq->descs_dma);
3038 	return err;
3039 }
3040 
3041 /* Push packets received by the RXQ to BM pool */
3042 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
3043 				struct mvpp2_rx_queue *rxq)
3044 {
3045 	int rx_received, i;
3046 
3047 	rx_received = mvpp2_rxq_received(port, rxq->id);
3048 	if (!rx_received)
3049 		return;
3050 
3051 	for (i = 0; i < rx_received; i++) {
3052 		struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
3053 		u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
3054 		int pool;
3055 
3056 		pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
3057 			MVPP2_RXD_BM_POOL_ID_OFFS;
3058 
3059 		mvpp2_bm_pool_put(port, pool,
3060 				  mvpp2_rxdesc_dma_addr_get(port, rx_desc),
3061 				  mvpp2_rxdesc_cookie_get(port, rx_desc));
3062 	}
3063 	mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
3064 }
3065 
3066 /* Cleanup Rx queue */
3067 static void mvpp2_rxq_deinit(struct mvpp2_port *port,
3068 			     struct mvpp2_rx_queue *rxq)
3069 {
3070 	unsigned int thread;
3071 
3072 	if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_short))
3073 		xdp_rxq_info_unreg(&rxq->xdp_rxq_short);
3074 
3075 	if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_long))
3076 		xdp_rxq_info_unreg(&rxq->xdp_rxq_long);
3077 
3078 	mvpp2_rxq_drop_pkts(port, rxq);
3079 
3080 	if (rxq->descs)
3081 		dma_free_coherent(port->dev->dev.parent,
3082 				  rxq->size * MVPP2_DESC_ALIGNED_SIZE,
3083 				  rxq->descs,
3084 				  rxq->descs_dma);
3085 
3086 	rxq->descs             = NULL;
3087 	rxq->last_desc         = 0;
3088 	rxq->next_desc_to_proc = 0;
3089 	rxq->descs_dma         = 0;
3090 
3091 	/* Clear Rx descriptors queue starting address and size;
3092 	 * free descriptor number
3093 	 */
3094 	mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
3095 	thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
3096 	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
3097 	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0);
3098 	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0);
3099 	put_cpu();
3100 }
3101 
3102 /* Create and initialize a Tx queue */
3103 static int mvpp2_txq_init(struct mvpp2_port *port,
3104 			  struct mvpp2_tx_queue *txq)
3105 {
3106 	u32 val;
3107 	unsigned int thread;
3108 	int desc, desc_per_txq, tx_port_num;
3109 	struct mvpp2_txq_pcpu *txq_pcpu;
3110 
3111 	txq->size = port->tx_ring_size;
3112 
3113 	/* Allocate memory for Tx descriptors */
3114 	txq->descs = dma_alloc_coherent(port->dev->dev.parent,
3115 				txq->size * MVPP2_DESC_ALIGNED_SIZE,
3116 				&txq->descs_dma, GFP_KERNEL);
3117 	if (!txq->descs)
3118 		return -ENOMEM;
3119 
3120 	txq->last_desc = txq->size - 1;
3121 
3122 	/* Set Tx descriptors queue starting address - indirect access */
3123 	thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
3124 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
3125 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG,
3126 			   txq->descs_dma);
3127 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG,
3128 			   txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
3129 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0);
3130 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG,
3131 			   txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
3132 	val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG);
3133 	val &= ~MVPP2_TXQ_PENDING_MASK;
3134 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val);
3135 
3136 	/* Calculate base address in prefetch buffer. We reserve 16 descriptors
3137 	 * for each existing TXQ.
3138 	 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
3139 	 * GBE ports assumed to be continuous from 0 to MVPP2_MAX_PORTS
3140 	 */
3141 	desc_per_txq = 16;
3142 	desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
3143 	       (txq->log_id * desc_per_txq);
3144 
3145 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG,
3146 			   MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
3147 			   MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
3148 	put_cpu();
3149 
3150 	/* WRR / EJP configuration - indirect access */
3151 	tx_port_num = mvpp2_egress_port(port);
3152 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3153 
3154 	val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
3155 	val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
3156 	val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
3157 	val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
3158 	mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
3159 
3160 	val = MVPP2_TXQ_TOKEN_SIZE_MAX;
3161 	mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
3162 		    val);
3163 
3164 	for (thread = 0; thread < port->priv->nthreads; thread++) {
3165 		txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3166 		txq_pcpu->size = txq->size;
3167 		txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
3168 						sizeof(*txq_pcpu->buffs),
3169 						GFP_KERNEL);
3170 		if (!txq_pcpu->buffs)
3171 			return -ENOMEM;
3172 
3173 		txq_pcpu->count = 0;
3174 		txq_pcpu->reserved_num = 0;
3175 		txq_pcpu->txq_put_index = 0;
3176 		txq_pcpu->txq_get_index = 0;
3177 		txq_pcpu->tso_headers = NULL;
3178 
3179 		txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS;
3180 		txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2;
3181 
3182 		txq_pcpu->tso_headers =
3183 			dma_alloc_coherent(port->dev->dev.parent,
3184 					   txq_pcpu->size * TSO_HEADER_SIZE,
3185 					   &txq_pcpu->tso_headers_dma,
3186 					   GFP_KERNEL);
3187 		if (!txq_pcpu->tso_headers)
3188 			return -ENOMEM;
3189 	}
3190 
3191 	return 0;
3192 }
3193 
3194 /* Free allocated TXQ resources */
3195 static void mvpp2_txq_deinit(struct mvpp2_port *port,
3196 			     struct mvpp2_tx_queue *txq)
3197 {
3198 	struct mvpp2_txq_pcpu *txq_pcpu;
3199 	unsigned int thread;
3200 
3201 	for (thread = 0; thread < port->priv->nthreads; thread++) {
3202 		txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3203 		kfree(txq_pcpu->buffs);
3204 
3205 		if (txq_pcpu->tso_headers)
3206 			dma_free_coherent(port->dev->dev.parent,
3207 					  txq_pcpu->size * TSO_HEADER_SIZE,
3208 					  txq_pcpu->tso_headers,
3209 					  txq_pcpu->tso_headers_dma);
3210 
3211 		txq_pcpu->tso_headers = NULL;
3212 	}
3213 
3214 	if (txq->descs)
3215 		dma_free_coherent(port->dev->dev.parent,
3216 				  txq->size * MVPP2_DESC_ALIGNED_SIZE,
3217 				  txq->descs, txq->descs_dma);
3218 
3219 	txq->descs             = NULL;
3220 	txq->last_desc         = 0;
3221 	txq->next_desc_to_proc = 0;
3222 	txq->descs_dma         = 0;
3223 
3224 	/* Set minimum bandwidth for disabled TXQs */
3225 	mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
3226 
3227 	/* Set Tx descriptors queue starting address and size */
3228 	thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
3229 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
3230 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0);
3231 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0);
3232 	put_cpu();
3233 }
3234 
3235 /* Cleanup Tx ports */
3236 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
3237 {
3238 	struct mvpp2_txq_pcpu *txq_pcpu;
3239 	int delay, pending;
3240 	unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
3241 	u32 val;
3242 
3243 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
3244 	val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG);
3245 	val |= MVPP2_TXQ_DRAIN_EN_MASK;
3246 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
3247 
3248 	/* The napi queue has been stopped so wait for all packets
3249 	 * to be transmitted.
3250 	 */
3251 	delay = 0;
3252 	do {
3253 		if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
3254 			netdev_warn(port->dev,
3255 				    "port %d: cleaning queue %d timed out\n",
3256 				    port->id, txq->log_id);
3257 			break;
3258 		}
3259 		mdelay(1);
3260 		delay++;
3261 
3262 		pending = mvpp2_thread_read(port->priv, thread,
3263 					    MVPP2_TXQ_PENDING_REG);
3264 		pending &= MVPP2_TXQ_PENDING_MASK;
3265 	} while (pending);
3266 
3267 	val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
3268 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
3269 	put_cpu();
3270 
3271 	for (thread = 0; thread < port->priv->nthreads; thread++) {
3272 		txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3273 
3274 		/* Release all packets */
3275 		mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
3276 
3277 		/* Reset queue */
3278 		txq_pcpu->count = 0;
3279 		txq_pcpu->txq_put_index = 0;
3280 		txq_pcpu->txq_get_index = 0;
3281 	}
3282 }
3283 
3284 /* Cleanup all Tx queues */
3285 static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
3286 {
3287 	struct mvpp2_tx_queue *txq;
3288 	int queue;
3289 	u32 val;
3290 
3291 	val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
3292 
3293 	/* Reset Tx ports and delete Tx queues */
3294 	val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
3295 	mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
3296 
3297 	for (queue = 0; queue < port->ntxqs; queue++) {
3298 		txq = port->txqs[queue];
3299 		mvpp2_txq_clean(port, txq);
3300 		mvpp2_txq_deinit(port, txq);
3301 	}
3302 
3303 	on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
3304 
3305 	val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
3306 	mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
3307 }
3308 
3309 /* Cleanup all Rx queues */
3310 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
3311 {
3312 	int queue;
3313 
3314 	for (queue = 0; queue < port->nrxqs; queue++)
3315 		mvpp2_rxq_deinit(port, port->rxqs[queue]);
3316 
3317 	if (port->tx_fc)
3318 		mvpp2_rxq_disable_fc(port);
3319 }
3320 
3321 /* Init all Rx queues for port */
3322 static int mvpp2_setup_rxqs(struct mvpp2_port *port)
3323 {
3324 	int queue, err;
3325 
3326 	for (queue = 0; queue < port->nrxqs; queue++) {
3327 		err = mvpp2_rxq_init(port, port->rxqs[queue]);
3328 		if (err)
3329 			goto err_cleanup;
3330 	}
3331 
3332 	if (port->tx_fc)
3333 		mvpp2_rxq_enable_fc(port);
3334 
3335 	return 0;
3336 
3337 err_cleanup:
3338 	mvpp2_cleanup_rxqs(port);
3339 	return err;
3340 }
3341 
3342 /* Init all tx queues for port */
3343 static int mvpp2_setup_txqs(struct mvpp2_port *port)
3344 {
3345 	struct mvpp2_tx_queue *txq;
3346 	int queue, err;
3347 
3348 	for (queue = 0; queue < port->ntxqs; queue++) {
3349 		txq = port->txqs[queue];
3350 		err = mvpp2_txq_init(port, txq);
3351 		if (err)
3352 			goto err_cleanup;
3353 
3354 		/* Assign this queue to a CPU */
3355 		if (queue < num_possible_cpus())
3356 			netif_set_xps_queue(port->dev, cpumask_of(queue), queue);
3357 	}
3358 
3359 	if (port->has_tx_irqs) {
3360 		mvpp2_tx_time_coal_set(port);
3361 		for (queue = 0; queue < port->ntxqs; queue++) {
3362 			txq = port->txqs[queue];
3363 			mvpp2_tx_pkts_coal_set(port, txq);
3364 		}
3365 	}
3366 
3367 	on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
3368 	return 0;
3369 
3370 err_cleanup:
3371 	mvpp2_cleanup_txqs(port);
3372 	return err;
3373 }
3374 
3375 /* The callback for per-port interrupt */
3376 static irqreturn_t mvpp2_isr(int irq, void *dev_id)
3377 {
3378 	struct mvpp2_queue_vector *qv = dev_id;
3379 
3380 	mvpp2_qvec_interrupt_disable(qv);
3381 
3382 	napi_schedule(&qv->napi);
3383 
3384 	return IRQ_HANDLED;
3385 }
3386 
3387 static void mvpp2_isr_handle_ptp_queue(struct mvpp2_port *port, int nq)
3388 {
3389 	struct skb_shared_hwtstamps shhwtstamps;
3390 	struct mvpp2_hwtstamp_queue *queue;
3391 	struct sk_buff *skb;
3392 	void __iomem *ptp_q;
3393 	unsigned int id;
3394 	u32 r0, r1, r2;
3395 
3396 	ptp_q = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
3397 	if (nq)
3398 		ptp_q += MVPP22_PTP_TX_Q1_R0 - MVPP22_PTP_TX_Q0_R0;
3399 
3400 	queue = &port->tx_hwtstamp_queue[nq];
3401 
3402 	while (1) {
3403 		r0 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R0) & 0xffff;
3404 		if (!r0)
3405 			break;
3406 
3407 		r1 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R1) & 0xffff;
3408 		r2 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R2) & 0xffff;
3409 
3410 		id = (r0 >> 1) & 31;
3411 
3412 		skb = queue->skb[id];
3413 		queue->skb[id] = NULL;
3414 		if (skb) {
3415 			u32 ts = r2 << 19 | r1 << 3 | r0 >> 13;
3416 
3417 			mvpp22_tai_tstamp(port->priv->tai, ts, &shhwtstamps);
3418 			skb_tstamp_tx(skb, &shhwtstamps);
3419 			dev_kfree_skb_any(skb);
3420 		}
3421 	}
3422 }
3423 
3424 static void mvpp2_isr_handle_ptp(struct mvpp2_port *port)
3425 {
3426 	void __iomem *ptp;
3427 	u32 val;
3428 
3429 	ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
3430 	val = readl(ptp + MVPP22_PTP_INT_CAUSE);
3431 	if (val & MVPP22_PTP_INT_CAUSE_QUEUE0)
3432 		mvpp2_isr_handle_ptp_queue(port, 0);
3433 	if (val & MVPP22_PTP_INT_CAUSE_QUEUE1)
3434 		mvpp2_isr_handle_ptp_queue(port, 1);
3435 }
3436 
3437 static void mvpp2_isr_handle_link(struct mvpp2_port *port,
3438 				  struct phylink_pcs *pcs, bool link)
3439 {
3440 	struct net_device *dev = port->dev;
3441 
3442 	if (port->phylink) {
3443 		phylink_pcs_change(pcs, link);
3444 		return;
3445 	}
3446 
3447 	if (!netif_running(dev))
3448 		return;
3449 
3450 	if (link) {
3451 		mvpp2_interrupts_enable(port);
3452 
3453 		mvpp2_egress_enable(port);
3454 		mvpp2_ingress_enable(port);
3455 		netif_carrier_on(dev);
3456 		netif_tx_wake_all_queues(dev);
3457 	} else {
3458 		netif_tx_stop_all_queues(dev);
3459 		netif_carrier_off(dev);
3460 		mvpp2_ingress_disable(port);
3461 		mvpp2_egress_disable(port);
3462 
3463 		mvpp2_interrupts_disable(port);
3464 	}
3465 }
3466 
3467 static void mvpp2_isr_handle_xlg(struct mvpp2_port *port)
3468 {
3469 	bool link;
3470 	u32 val;
3471 
3472 	val = readl(port->base + MVPP22_XLG_INT_STAT);
3473 	if (val & MVPP22_XLG_INT_STAT_LINK) {
3474 		val = readl(port->base + MVPP22_XLG_STATUS);
3475 		link = (val & MVPP22_XLG_STATUS_LINK_UP);
3476 		mvpp2_isr_handle_link(port, &port->pcs_xlg, link);
3477 	}
3478 }
3479 
3480 static void mvpp2_isr_handle_gmac_internal(struct mvpp2_port *port)
3481 {
3482 	bool link;
3483 	u32 val;
3484 
3485 	if (phy_interface_mode_is_rgmii(port->phy_interface) ||
3486 	    phy_interface_mode_is_8023z(port->phy_interface) ||
3487 	    port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
3488 		val = readl(port->base + MVPP22_GMAC_INT_STAT);
3489 		if (val & MVPP22_GMAC_INT_STAT_LINK) {
3490 			val = readl(port->base + MVPP2_GMAC_STATUS0);
3491 			link = (val & MVPP2_GMAC_STATUS0_LINK_UP);
3492 			mvpp2_isr_handle_link(port, &port->pcs_gmac, link);
3493 		}
3494 	}
3495 }
3496 
3497 /* Per-port interrupt for link status changes */
3498 static irqreturn_t mvpp2_port_isr(int irq, void *dev_id)
3499 {
3500 	struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
3501 	u32 val;
3502 
3503 	mvpp22_gop_mask_irq(port);
3504 
3505 	if (mvpp2_port_supports_xlg(port) &&
3506 	    mvpp2_is_xlg(port->phy_interface)) {
3507 		/* Check the external status register */
3508 		val = readl(port->base + MVPP22_XLG_EXT_INT_STAT);
3509 		if (val & MVPP22_XLG_EXT_INT_STAT_XLG)
3510 			mvpp2_isr_handle_xlg(port);
3511 		if (val & MVPP22_XLG_EXT_INT_STAT_PTP)
3512 			mvpp2_isr_handle_ptp(port);
3513 	} else {
3514 		/* If it's not the XLG, we must be using the GMAC.
3515 		 * Check the summary status.
3516 		 */
3517 		val = readl(port->base + MVPP22_GMAC_INT_SUM_STAT);
3518 		if (val & MVPP22_GMAC_INT_SUM_STAT_INTERNAL)
3519 			mvpp2_isr_handle_gmac_internal(port);
3520 		if (val & MVPP22_GMAC_INT_SUM_STAT_PTP)
3521 			mvpp2_isr_handle_ptp(port);
3522 	}
3523 
3524 	mvpp22_gop_unmask_irq(port);
3525 	return IRQ_HANDLED;
3526 }
3527 
3528 static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
3529 {
3530 	struct net_device *dev;
3531 	struct mvpp2_port *port;
3532 	struct mvpp2_port_pcpu *port_pcpu;
3533 	unsigned int tx_todo, cause;
3534 
3535 	port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer);
3536 	dev = port_pcpu->dev;
3537 
3538 	if (!netif_running(dev))
3539 		return HRTIMER_NORESTART;
3540 
3541 	port_pcpu->timer_scheduled = false;
3542 	port = netdev_priv(dev);
3543 
3544 	/* Process all the Tx queues */
3545 	cause = (1 << port->ntxqs) - 1;
3546 	tx_todo = mvpp2_tx_done(port, cause,
3547 				mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
3548 
3549 	/* Set the timer in case not all the packets were processed */
3550 	if (tx_todo && !port_pcpu->timer_scheduled) {
3551 		port_pcpu->timer_scheduled = true;
3552 		hrtimer_forward_now(&port_pcpu->tx_done_timer,
3553 				    MVPP2_TXDONE_HRTIMER_PERIOD_NS);
3554 
3555 		return HRTIMER_RESTART;
3556 	}
3557 	return HRTIMER_NORESTART;
3558 }
3559 
3560 /* Main RX/TX processing routines */
3561 
3562 /* Display more error info */
3563 static void mvpp2_rx_error(struct mvpp2_port *port,
3564 			   struct mvpp2_rx_desc *rx_desc)
3565 {
3566 	u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
3567 	size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
3568 	char *err_str = NULL;
3569 
3570 	switch (status & MVPP2_RXD_ERR_CODE_MASK) {
3571 	case MVPP2_RXD_ERR_CRC:
3572 		err_str = "crc";
3573 		break;
3574 	case MVPP2_RXD_ERR_OVERRUN:
3575 		err_str = "overrun";
3576 		break;
3577 	case MVPP2_RXD_ERR_RESOURCE:
3578 		err_str = "resource";
3579 		break;
3580 	}
3581 	if (err_str && net_ratelimit())
3582 		netdev_err(port->dev,
3583 			   "bad rx status %08x (%s error), size=%zu\n",
3584 			   status, err_str, sz);
3585 }
3586 
3587 /* Handle RX checksum offload */
3588 static int mvpp2_rx_csum(struct mvpp2_port *port, u32 status)
3589 {
3590 	if (((status & MVPP2_RXD_L3_IP4) &&
3591 	     !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
3592 	    (status & MVPP2_RXD_L3_IP6))
3593 		if (((status & MVPP2_RXD_L4_UDP) ||
3594 		     (status & MVPP2_RXD_L4_TCP)) &&
3595 		     (status & MVPP2_RXD_L4_CSUM_OK))
3596 			return CHECKSUM_UNNECESSARY;
3597 
3598 	return CHECKSUM_NONE;
3599 }
3600 
3601 /* Allocate a new skb and add it to BM pool */
3602 static int mvpp2_rx_refill(struct mvpp2_port *port,
3603 			   struct mvpp2_bm_pool *bm_pool,
3604 			   struct page_pool *page_pool, int pool)
3605 {
3606 	dma_addr_t dma_addr;
3607 	phys_addr_t phys_addr;
3608 	void *buf;
3609 
3610 	buf = mvpp2_buf_alloc(port, bm_pool, page_pool,
3611 			      &dma_addr, &phys_addr, GFP_ATOMIC);
3612 	if (!buf)
3613 		return -ENOMEM;
3614 
3615 	mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
3616 
3617 	return 0;
3618 }
3619 
3620 /* Handle tx checksum */
3621 static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
3622 {
3623 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
3624 		int ip_hdr_len = 0;
3625 		u8 l4_proto;
3626 		__be16 l3_proto = vlan_get_protocol(skb);
3627 
3628 		if (l3_proto == htons(ETH_P_IP)) {
3629 			struct iphdr *ip4h = ip_hdr(skb);
3630 
3631 			/* Calculate IPv4 checksum and L4 checksum */
3632 			ip_hdr_len = ip4h->ihl;
3633 			l4_proto = ip4h->protocol;
3634 		} else if (l3_proto == htons(ETH_P_IPV6)) {
3635 			struct ipv6hdr *ip6h = ipv6_hdr(skb);
3636 
3637 			/* Read l4_protocol from one of IPv6 extra headers */
3638 			if (skb_network_header_len(skb) > 0)
3639 				ip_hdr_len = (skb_network_header_len(skb) >> 2);
3640 			l4_proto = ip6h->nexthdr;
3641 		} else {
3642 			return MVPP2_TXD_L4_CSUM_NOT;
3643 		}
3644 
3645 		return mvpp2_txq_desc_csum(skb_network_offset(skb),
3646 					   l3_proto, ip_hdr_len, l4_proto);
3647 	}
3648 
3649 	return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
3650 }
3651 
3652 static void mvpp2_xdp_finish_tx(struct mvpp2_port *port, u16 txq_id, int nxmit, int nxmit_byte)
3653 {
3654 	unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3655 	struct mvpp2_tx_queue *aggr_txq;
3656 	struct mvpp2_txq_pcpu *txq_pcpu;
3657 	struct mvpp2_tx_queue *txq;
3658 	struct netdev_queue *nq;
3659 
3660 	txq = port->txqs[txq_id];
3661 	txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3662 	nq = netdev_get_tx_queue(port->dev, txq_id);
3663 	aggr_txq = &port->priv->aggr_txqs[thread];
3664 
3665 	txq_pcpu->reserved_num -= nxmit;
3666 	txq_pcpu->count += nxmit;
3667 	aggr_txq->count += nxmit;
3668 
3669 	/* Enable transmit */
3670 	wmb();
3671 	mvpp2_aggr_txq_pend_desc_add(port, nxmit);
3672 
3673 	if (txq_pcpu->count >= txq_pcpu->stop_threshold)
3674 		netif_tx_stop_queue(nq);
3675 
3676 	/* Finalize TX processing */
3677 	if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
3678 		mvpp2_txq_done(port, txq, txq_pcpu);
3679 }
3680 
3681 static int
3682 mvpp2_xdp_submit_frame(struct mvpp2_port *port, u16 txq_id,
3683 		       struct xdp_frame *xdpf, bool dma_map)
3684 {
3685 	unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3686 	u32 tx_cmd = MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE |
3687 		     MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
3688 	enum mvpp2_tx_buf_type buf_type;
3689 	struct mvpp2_txq_pcpu *txq_pcpu;
3690 	struct mvpp2_tx_queue *aggr_txq;
3691 	struct mvpp2_tx_desc *tx_desc;
3692 	struct mvpp2_tx_queue *txq;
3693 	int ret = MVPP2_XDP_TX;
3694 	dma_addr_t dma_addr;
3695 
3696 	txq = port->txqs[txq_id];
3697 	txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3698 	aggr_txq = &port->priv->aggr_txqs[thread];
3699 
3700 	/* Check number of available descriptors */
3701 	if (mvpp2_aggr_desc_num_check(port, aggr_txq, 1) ||
3702 	    mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, 1)) {
3703 		ret = MVPP2_XDP_DROPPED;
3704 		goto out;
3705 	}
3706 
3707 	/* Get a descriptor for the first part of the packet */
3708 	tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3709 	mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3710 	mvpp2_txdesc_size_set(port, tx_desc, xdpf->len);
3711 
3712 	if (dma_map) {
3713 		/* XDP_REDIRECT or AF_XDP */
3714 		dma_addr = dma_map_single(port->dev->dev.parent, xdpf->data,
3715 					  xdpf->len, DMA_TO_DEVICE);
3716 
3717 		if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
3718 			mvpp2_txq_desc_put(txq);
3719 			ret = MVPP2_XDP_DROPPED;
3720 			goto out;
3721 		}
3722 
3723 		buf_type = MVPP2_TYPE_XDP_NDO;
3724 	} else {
3725 		/* XDP_TX */
3726 		struct page *page = virt_to_page(xdpf->data);
3727 
3728 		dma_addr = page_pool_get_dma_addr(page) +
3729 			   sizeof(*xdpf) + xdpf->headroom;
3730 		dma_sync_single_for_device(port->dev->dev.parent, dma_addr,
3731 					   xdpf->len, DMA_BIDIRECTIONAL);
3732 
3733 		buf_type = MVPP2_TYPE_XDP_TX;
3734 	}
3735 
3736 	mvpp2_txdesc_dma_addr_set(port, tx_desc, dma_addr);
3737 
3738 	mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
3739 	mvpp2_txq_inc_put(port, txq_pcpu, xdpf, tx_desc, buf_type);
3740 
3741 out:
3742 	return ret;
3743 }
3744 
3745 static int
3746 mvpp2_xdp_xmit_back(struct mvpp2_port *port, struct xdp_buff *xdp)
3747 {
3748 	struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
3749 	struct xdp_frame *xdpf;
3750 	u16 txq_id;
3751 	int ret;
3752 
3753 	xdpf = xdp_convert_buff_to_frame(xdp);
3754 	if (unlikely(!xdpf))
3755 		return MVPP2_XDP_DROPPED;
3756 
3757 	/* The first of the TX queues are used for XPS,
3758 	 * the second half for XDP_TX
3759 	 */
3760 	txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2);
3761 
3762 	ret = mvpp2_xdp_submit_frame(port, txq_id, xdpf, false);
3763 	if (ret == MVPP2_XDP_TX) {
3764 		u64_stats_update_begin(&stats->syncp);
3765 		stats->tx_bytes += xdpf->len;
3766 		stats->tx_packets++;
3767 		stats->xdp_tx++;
3768 		u64_stats_update_end(&stats->syncp);
3769 
3770 		mvpp2_xdp_finish_tx(port, txq_id, 1, xdpf->len);
3771 	} else {
3772 		u64_stats_update_begin(&stats->syncp);
3773 		stats->xdp_tx_err++;
3774 		u64_stats_update_end(&stats->syncp);
3775 	}
3776 
3777 	return ret;
3778 }
3779 
3780 static int
3781 mvpp2_xdp_xmit(struct net_device *dev, int num_frame,
3782 	       struct xdp_frame **frames, u32 flags)
3783 {
3784 	struct mvpp2_port *port = netdev_priv(dev);
3785 	int i, nxmit_byte = 0, nxmit = 0;
3786 	struct mvpp2_pcpu_stats *stats;
3787 	u16 txq_id;
3788 	u32 ret;
3789 
3790 	if (unlikely(test_bit(0, &port->state)))
3791 		return -ENETDOWN;
3792 
3793 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
3794 		return -EINVAL;
3795 
3796 	/* The first of the TX queues are used for XPS,
3797 	 * the second half for XDP_TX
3798 	 */
3799 	txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2);
3800 
3801 	for (i = 0; i < num_frame; i++) {
3802 		ret = mvpp2_xdp_submit_frame(port, txq_id, frames[i], true);
3803 		if (ret != MVPP2_XDP_TX)
3804 			break;
3805 
3806 		nxmit_byte += frames[i]->len;
3807 		nxmit++;
3808 	}
3809 
3810 	if (likely(nxmit > 0))
3811 		mvpp2_xdp_finish_tx(port, txq_id, nxmit, nxmit_byte);
3812 
3813 	stats = this_cpu_ptr(port->stats);
3814 	u64_stats_update_begin(&stats->syncp);
3815 	stats->tx_bytes += nxmit_byte;
3816 	stats->tx_packets += nxmit;
3817 	stats->xdp_xmit += nxmit;
3818 	stats->xdp_xmit_err += num_frame - nxmit;
3819 	u64_stats_update_end(&stats->syncp);
3820 
3821 	return nxmit;
3822 }
3823 
3824 static int
3825 mvpp2_run_xdp(struct mvpp2_port *port, struct bpf_prog *prog,
3826 	      struct xdp_buff *xdp, struct page_pool *pp,
3827 	      struct mvpp2_pcpu_stats *stats)
3828 {
3829 	unsigned int len, sync, err;
3830 	struct page *page;
3831 	u32 ret, act;
3832 
3833 	len = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM;
3834 	act = bpf_prog_run_xdp(prog, xdp);
3835 
3836 	/* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
3837 	sync = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM;
3838 	sync = max(sync, len);
3839 
3840 	switch (act) {
3841 	case XDP_PASS:
3842 		stats->xdp_pass++;
3843 		ret = MVPP2_XDP_PASS;
3844 		break;
3845 	case XDP_REDIRECT:
3846 		err = xdp_do_redirect(port->dev, xdp, prog);
3847 		if (unlikely(err)) {
3848 			ret = MVPP2_XDP_DROPPED;
3849 			page = virt_to_head_page(xdp->data);
3850 			page_pool_put_page(pp, page, sync, true);
3851 		} else {
3852 			ret = MVPP2_XDP_REDIR;
3853 			stats->xdp_redirect++;
3854 		}
3855 		break;
3856 	case XDP_TX:
3857 		ret = mvpp2_xdp_xmit_back(port, xdp);
3858 		if (ret != MVPP2_XDP_TX) {
3859 			page = virt_to_head_page(xdp->data);
3860 			page_pool_put_page(pp, page, sync, true);
3861 		}
3862 		break;
3863 	default:
3864 		bpf_warn_invalid_xdp_action(port->dev, prog, act);
3865 		fallthrough;
3866 	case XDP_ABORTED:
3867 		trace_xdp_exception(port->dev, prog, act);
3868 		fallthrough;
3869 	case XDP_DROP:
3870 		page = virt_to_head_page(xdp->data);
3871 		page_pool_put_page(pp, page, sync, true);
3872 		ret = MVPP2_XDP_DROPPED;
3873 		stats->xdp_drop++;
3874 		break;
3875 	}
3876 
3877 	return ret;
3878 }
3879 
3880 static void mvpp2_buff_hdr_pool_put(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc,
3881 				    int pool, u32 rx_status)
3882 {
3883 	phys_addr_t phys_addr, phys_addr_next;
3884 	dma_addr_t dma_addr, dma_addr_next;
3885 	struct mvpp2_buff_hdr *buff_hdr;
3886 
3887 	phys_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
3888 	dma_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
3889 
3890 	do {
3891 		buff_hdr = (struct mvpp2_buff_hdr *)phys_to_virt(phys_addr);
3892 
3893 		phys_addr_next = le32_to_cpu(buff_hdr->next_phys_addr);
3894 		dma_addr_next = le32_to_cpu(buff_hdr->next_dma_addr);
3895 
3896 		if (port->priv->hw_version >= MVPP22) {
3897 			phys_addr_next |= ((u64)buff_hdr->next_phys_addr_high << 32);
3898 			dma_addr_next |= ((u64)buff_hdr->next_dma_addr_high << 32);
3899 		}
3900 
3901 		mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
3902 
3903 		phys_addr = phys_addr_next;
3904 		dma_addr = dma_addr_next;
3905 
3906 	} while (!MVPP2_B_HDR_INFO_IS_LAST(le16_to_cpu(buff_hdr->info)));
3907 }
3908 
3909 /* Main rx processing */
3910 static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
3911 		    int rx_todo, struct mvpp2_rx_queue *rxq)
3912 {
3913 	struct net_device *dev = port->dev;
3914 	struct mvpp2_pcpu_stats ps = {};
3915 	enum dma_data_direction dma_dir;
3916 	struct bpf_prog *xdp_prog;
3917 	struct xdp_buff xdp;
3918 	int rx_received;
3919 	int rx_done = 0;
3920 	u32 xdp_ret = 0;
3921 
3922 	xdp_prog = READ_ONCE(port->xdp_prog);
3923 
3924 	/* Get number of received packets and clamp the to-do */
3925 	rx_received = mvpp2_rxq_received(port, rxq->id);
3926 	if (rx_todo > rx_received)
3927 		rx_todo = rx_received;
3928 
3929 	while (rx_done < rx_todo) {
3930 		struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
3931 		struct mvpp2_bm_pool *bm_pool;
3932 		struct page_pool *pp = NULL;
3933 		struct sk_buff *skb;
3934 		unsigned int frag_size;
3935 		dma_addr_t dma_addr;
3936 		phys_addr_t phys_addr;
3937 		u32 rx_status, timestamp;
3938 		int pool, rx_bytes, err, ret;
3939 		struct page *page;
3940 		void *data;
3941 
3942 		phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
3943 		data = (void *)phys_to_virt(phys_addr);
3944 		page = virt_to_page(data);
3945 		prefetch(page);
3946 
3947 		rx_done++;
3948 		rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
3949 		rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
3950 		rx_bytes -= MVPP2_MH_SIZE;
3951 		dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
3952 
3953 		pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
3954 			MVPP2_RXD_BM_POOL_ID_OFFS;
3955 		bm_pool = &port->priv->bm_pools[pool];
3956 
3957 		if (port->priv->percpu_pools) {
3958 			pp = port->priv->page_pool[pool];
3959 			dma_dir = page_pool_get_dma_dir(pp);
3960 		} else {
3961 			dma_dir = DMA_FROM_DEVICE;
3962 		}
3963 
3964 		dma_sync_single_for_cpu(dev->dev.parent, dma_addr,
3965 					rx_bytes + MVPP2_MH_SIZE,
3966 					dma_dir);
3967 
3968 		/* Buffer header not supported */
3969 		if (rx_status & MVPP2_RXD_BUF_HDR)
3970 			goto err_drop_frame;
3971 
3972 		/* In case of an error, release the requested buffer pointer
3973 		 * to the Buffer Manager. This request process is controlled
3974 		 * by the hardware, and the information about the buffer is
3975 		 * comprised by the RX descriptor.
3976 		 */
3977 		if (rx_status & MVPP2_RXD_ERR_SUMMARY)
3978 			goto err_drop_frame;
3979 
3980 		/* Prefetch header */
3981 		prefetch(data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
3982 
3983 		if (bm_pool->frag_size > PAGE_SIZE)
3984 			frag_size = 0;
3985 		else
3986 			frag_size = bm_pool->frag_size;
3987 
3988 		if (xdp_prog) {
3989 			struct xdp_rxq_info *xdp_rxq;
3990 
3991 			if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE)
3992 				xdp_rxq = &rxq->xdp_rxq_short;
3993 			else
3994 				xdp_rxq = &rxq->xdp_rxq_long;
3995 
3996 			xdp_init_buff(&xdp, PAGE_SIZE, xdp_rxq);
3997 			xdp_prepare_buff(&xdp, data,
3998 					 MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM,
3999 					 rx_bytes, false);
4000 
4001 			ret = mvpp2_run_xdp(port, xdp_prog, &xdp, pp, &ps);
4002 
4003 			if (ret) {
4004 				xdp_ret |= ret;
4005 				err = mvpp2_rx_refill(port, bm_pool, pp, pool);
4006 				if (err) {
4007 					netdev_err(port->dev, "failed to refill BM pools\n");
4008 					goto err_drop_frame;
4009 				}
4010 
4011 				ps.rx_packets++;
4012 				ps.rx_bytes += rx_bytes;
4013 				continue;
4014 			}
4015 		}
4016 
4017 		skb = build_skb(data, frag_size);
4018 		if (!skb) {
4019 			netdev_warn(port->dev, "skb build failed\n");
4020 			goto err_drop_frame;
4021 		}
4022 
4023 		/* If we have RX hardware timestamping enabled, grab the
4024 		 * timestamp from the queue and convert.
4025 		 */
4026 		if (mvpp22_rx_hwtstamping(port)) {
4027 			timestamp = le32_to_cpu(rx_desc->pp22.timestamp);
4028 			mvpp22_tai_tstamp(port->priv->tai, timestamp,
4029 					 skb_hwtstamps(skb));
4030 		}
4031 
4032 		err = mvpp2_rx_refill(port, bm_pool, pp, pool);
4033 		if (err) {
4034 			netdev_err(port->dev, "failed to refill BM pools\n");
4035 			dev_kfree_skb_any(skb);
4036 			goto err_drop_frame;
4037 		}
4038 
4039 		if (pp)
4040 			skb_mark_for_recycle(skb);
4041 		else
4042 			dma_unmap_single_attrs(dev->dev.parent, dma_addr,
4043 					       bm_pool->buf_size, DMA_FROM_DEVICE,
4044 					       DMA_ATTR_SKIP_CPU_SYNC);
4045 
4046 		ps.rx_packets++;
4047 		ps.rx_bytes += rx_bytes;
4048 
4049 		skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
4050 		skb_put(skb, rx_bytes);
4051 		skb->ip_summed = mvpp2_rx_csum(port, rx_status);
4052 		skb->protocol = eth_type_trans(skb, dev);
4053 
4054 		napi_gro_receive(napi, skb);
4055 		continue;
4056 
4057 err_drop_frame:
4058 		dev->stats.rx_errors++;
4059 		mvpp2_rx_error(port, rx_desc);
4060 		/* Return the buffer to the pool */
4061 		if (rx_status & MVPP2_RXD_BUF_HDR)
4062 			mvpp2_buff_hdr_pool_put(port, rx_desc, pool, rx_status);
4063 		else
4064 			mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
4065 	}
4066 
4067 	if (xdp_ret & MVPP2_XDP_REDIR)
4068 		xdp_do_flush();
4069 
4070 	if (ps.rx_packets) {
4071 		struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
4072 
4073 		u64_stats_update_begin(&stats->syncp);
4074 		stats->rx_packets += ps.rx_packets;
4075 		stats->rx_bytes   += ps.rx_bytes;
4076 		/* xdp */
4077 		stats->xdp_redirect += ps.xdp_redirect;
4078 		stats->xdp_pass += ps.xdp_pass;
4079 		stats->xdp_drop += ps.xdp_drop;
4080 		u64_stats_update_end(&stats->syncp);
4081 	}
4082 
4083 	/* Update Rx queue management counters */
4084 	wmb();
4085 	mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
4086 
4087 	return rx_todo;
4088 }
4089 
4090 static inline void
4091 tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
4092 		  struct mvpp2_tx_desc *desc)
4093 {
4094 	unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
4095 	struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
4096 
4097 	dma_addr_t buf_dma_addr =
4098 		mvpp2_txdesc_dma_addr_get(port, desc);
4099 	size_t buf_sz =
4100 		mvpp2_txdesc_size_get(port, desc);
4101 	if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr))
4102 		dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
4103 				 buf_sz, DMA_TO_DEVICE);
4104 	mvpp2_txq_desc_put(txq);
4105 }
4106 
4107 static void mvpp2_txdesc_clear_ptp(struct mvpp2_port *port,
4108 				   struct mvpp2_tx_desc *desc)
4109 {
4110 	/* We only need to clear the low bits */
4111 	if (port->priv->hw_version >= MVPP22)
4112 		desc->pp22.ptp_descriptor &=
4113 			cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW);
4114 }
4115 
4116 static bool mvpp2_tx_hw_tstamp(struct mvpp2_port *port,
4117 			       struct mvpp2_tx_desc *tx_desc,
4118 			       struct sk_buff *skb)
4119 {
4120 	struct mvpp2_hwtstamp_queue *queue;
4121 	unsigned int mtype, type, i;
4122 	struct ptp_header *hdr;
4123 	u64 ptpdesc;
4124 
4125 	if (port->priv->hw_version == MVPP21 ||
4126 	    port->tx_hwtstamp_type == HWTSTAMP_TX_OFF)
4127 		return false;
4128 
4129 	type = ptp_classify_raw(skb);
4130 	if (!type)
4131 		return false;
4132 
4133 	hdr = ptp_parse_header(skb, type);
4134 	if (!hdr)
4135 		return false;
4136 
4137 	skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4138 
4139 	ptpdesc = MVPP22_PTP_MACTIMESTAMPINGEN |
4140 		  MVPP22_PTP_ACTION_CAPTURE;
4141 	queue = &port->tx_hwtstamp_queue[0];
4142 
4143 	switch (type & PTP_CLASS_VMASK) {
4144 	case PTP_CLASS_V1:
4145 		ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV1);
4146 		break;
4147 
4148 	case PTP_CLASS_V2:
4149 		ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV2);
4150 		mtype = hdr->tsmt & 15;
4151 		/* Direct PTP Sync messages to queue 1 */
4152 		if (mtype == 0) {
4153 			ptpdesc |= MVPP22_PTP_TIMESTAMPQUEUESELECT;
4154 			queue = &port->tx_hwtstamp_queue[1];
4155 		}
4156 		break;
4157 	}
4158 
4159 	/* Take a reference on the skb and insert into our queue */
4160 	i = queue->next;
4161 	queue->next = (i + 1) & 31;
4162 	if (queue->skb[i])
4163 		dev_kfree_skb_any(queue->skb[i]);
4164 	queue->skb[i] = skb_get(skb);
4165 
4166 	ptpdesc |= MVPP22_PTP_TIMESTAMPENTRYID(i);
4167 
4168 	/*
4169 	 * 3:0		- PTPAction
4170 	 * 6:4		- PTPPacketFormat
4171 	 * 7		- PTP_CF_WraparoundCheckEn
4172 	 * 9:8		- IngressTimestampSeconds[1:0]
4173 	 * 10		- Reserved
4174 	 * 11		- MACTimestampingEn
4175 	 * 17:12	- PTP_TimestampQueueEntryID[5:0]
4176 	 * 18		- PTPTimestampQueueSelect
4177 	 * 19		- UDPChecksumUpdateEn
4178 	 * 27:20	- TimestampOffset
4179 	 *			PTP, NTPTransmit, OWAMP/TWAMP - L3 to PTP header
4180 	 *			NTPTs, Y.1731 - L3 to timestamp entry
4181 	 * 35:28	- UDP Checksum Offset
4182 	 *
4183 	 * stored in tx descriptor bits 75:64 (11:0) and 191:168 (35:12)
4184 	 */
4185 	tx_desc->pp22.ptp_descriptor &=
4186 		cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW);
4187 	tx_desc->pp22.ptp_descriptor |=
4188 		cpu_to_le32(ptpdesc & MVPP22_PTP_DESC_MASK_LOW);
4189 	tx_desc->pp22.buf_dma_addr_ptp &= cpu_to_le64(~0xffffff0000000000ULL);
4190 	tx_desc->pp22.buf_dma_addr_ptp |= cpu_to_le64((ptpdesc >> 12) << 40);
4191 
4192 	return true;
4193 }
4194 
4195 /* Handle tx fragmentation processing */
4196 static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
4197 				 struct mvpp2_tx_queue *aggr_txq,
4198 				 struct mvpp2_tx_queue *txq)
4199 {
4200 	unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
4201 	struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
4202 	struct mvpp2_tx_desc *tx_desc;
4203 	int i;
4204 	dma_addr_t buf_dma_addr;
4205 
4206 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4207 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4208 		void *addr = skb_frag_address(frag);
4209 
4210 		tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
4211 		mvpp2_txdesc_clear_ptp(port, tx_desc);
4212 		mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
4213 		mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag));
4214 
4215 		buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
4216 					      skb_frag_size(frag),
4217 					      DMA_TO_DEVICE);
4218 		if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
4219 			mvpp2_txq_desc_put(txq);
4220 			goto cleanup;
4221 		}
4222 
4223 		mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
4224 
4225 		if (i == (skb_shinfo(skb)->nr_frags - 1)) {
4226 			/* Last descriptor */
4227 			mvpp2_txdesc_cmd_set(port, tx_desc,
4228 					     MVPP2_TXD_L_DESC);
4229 			mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
4230 		} else {
4231 			/* Descriptor in the middle: Not First, Not Last */
4232 			mvpp2_txdesc_cmd_set(port, tx_desc, 0);
4233 			mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
4234 		}
4235 	}
4236 
4237 	return 0;
4238 cleanup:
4239 	/* Release all descriptors that were used to map fragments of
4240 	 * this packet, as well as the corresponding DMA mappings
4241 	 */
4242 	for (i = i - 1; i >= 0; i--) {
4243 		tx_desc = txq->descs + i;
4244 		tx_desc_unmap_put(port, txq, tx_desc);
4245 	}
4246 
4247 	return -ENOMEM;
4248 }
4249 
4250 static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
4251 				     struct net_device *dev,
4252 				     struct mvpp2_tx_queue *txq,
4253 				     struct mvpp2_tx_queue *aggr_txq,
4254 				     struct mvpp2_txq_pcpu *txq_pcpu,
4255 				     int hdr_sz)
4256 {
4257 	struct mvpp2_port *port = netdev_priv(dev);
4258 	struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
4259 	dma_addr_t addr;
4260 
4261 	mvpp2_txdesc_clear_ptp(port, tx_desc);
4262 	mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
4263 	mvpp2_txdesc_size_set(port, tx_desc, hdr_sz);
4264 
4265 	addr = txq_pcpu->tso_headers_dma +
4266 	       txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
4267 	mvpp2_txdesc_dma_addr_set(port, tx_desc, addr);
4268 
4269 	mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) |
4270 					    MVPP2_TXD_F_DESC |
4271 					    MVPP2_TXD_PADDING_DISABLE);
4272 	mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
4273 }
4274 
4275 static inline int mvpp2_tso_put_data(struct sk_buff *skb,
4276 				     struct net_device *dev, struct tso_t *tso,
4277 				     struct mvpp2_tx_queue *txq,
4278 				     struct mvpp2_tx_queue *aggr_txq,
4279 				     struct mvpp2_txq_pcpu *txq_pcpu,
4280 				     int sz, bool left, bool last)
4281 {
4282 	struct mvpp2_port *port = netdev_priv(dev);
4283 	struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
4284 	dma_addr_t buf_dma_addr;
4285 
4286 	mvpp2_txdesc_clear_ptp(port, tx_desc);
4287 	mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
4288 	mvpp2_txdesc_size_set(port, tx_desc, sz);
4289 
4290 	buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz,
4291 				      DMA_TO_DEVICE);
4292 	if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
4293 		mvpp2_txq_desc_put(txq);
4294 		return -ENOMEM;
4295 	}
4296 
4297 	mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
4298 
4299 	if (!left) {
4300 		mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC);
4301 		if (last) {
4302 			mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
4303 			return 0;
4304 		}
4305 	} else {
4306 		mvpp2_txdesc_cmd_set(port, tx_desc, 0);
4307 	}
4308 
4309 	mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
4310 	return 0;
4311 }
4312 
4313 static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
4314 			struct mvpp2_tx_queue *txq,
4315 			struct mvpp2_tx_queue *aggr_txq,
4316 			struct mvpp2_txq_pcpu *txq_pcpu)
4317 {
4318 	struct mvpp2_port *port = netdev_priv(dev);
4319 	int hdr_sz, i, len, descs = 0;
4320 	struct tso_t tso;
4321 
4322 	/* Check number of available descriptors */
4323 	if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) ||
4324 	    mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu,
4325 					     tso_count_descs(skb)))
4326 		return 0;
4327 
4328 	hdr_sz = tso_start(skb, &tso);
4329 
4330 	len = skb->len - hdr_sz;
4331 	while (len > 0) {
4332 		int left = min_t(int, skb_shinfo(skb)->gso_size, len);
4333 		char *hdr = txq_pcpu->tso_headers +
4334 			    txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
4335 
4336 		len -= left;
4337 		descs++;
4338 
4339 		tso_build_hdr(skb, hdr, &tso, left, len == 0);
4340 		mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz);
4341 
4342 		while (left > 0) {
4343 			int sz = min_t(int, tso.size, left);
4344 			left -= sz;
4345 			descs++;
4346 
4347 			if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq,
4348 					       txq_pcpu, sz, left, len == 0))
4349 				goto release;
4350 			tso_build_data(skb, &tso, sz);
4351 		}
4352 	}
4353 
4354 	return descs;
4355 
4356 release:
4357 	for (i = descs - 1; i >= 0; i--) {
4358 		struct mvpp2_tx_desc *tx_desc = txq->descs + i;
4359 		tx_desc_unmap_put(port, txq, tx_desc);
4360 	}
4361 	return 0;
4362 }
4363 
4364 /* Main tx processing */
4365 static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
4366 {
4367 	struct mvpp2_port *port = netdev_priv(dev);
4368 	struct mvpp2_tx_queue *txq, *aggr_txq;
4369 	struct mvpp2_txq_pcpu *txq_pcpu;
4370 	struct mvpp2_tx_desc *tx_desc;
4371 	dma_addr_t buf_dma_addr;
4372 	unsigned long flags = 0;
4373 	unsigned int thread;
4374 	int frags = 0;
4375 	u16 txq_id;
4376 	u32 tx_cmd;
4377 
4378 	thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
4379 
4380 	txq_id = skb_get_queue_mapping(skb);
4381 	txq = port->txqs[txq_id];
4382 	txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
4383 	aggr_txq = &port->priv->aggr_txqs[thread];
4384 
4385 	if (test_bit(thread, &port->priv->lock_map))
4386 		spin_lock_irqsave(&port->tx_lock[thread], flags);
4387 
4388 	if (skb_is_gso(skb)) {
4389 		frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
4390 		goto out;
4391 	}
4392 	frags = skb_shinfo(skb)->nr_frags + 1;
4393 
4394 	/* Check number of available descriptors */
4395 	if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) ||
4396 	    mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) {
4397 		frags = 0;
4398 		goto out;
4399 	}
4400 
4401 	/* Get a descriptor for the first part of the packet */
4402 	tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
4403 	if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ||
4404 	    !mvpp2_tx_hw_tstamp(port, tx_desc, skb))
4405 		mvpp2_txdesc_clear_ptp(port, tx_desc);
4406 	mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
4407 	mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
4408 
4409 	buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
4410 				      skb_headlen(skb), DMA_TO_DEVICE);
4411 	if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
4412 		mvpp2_txq_desc_put(txq);
4413 		frags = 0;
4414 		goto out;
4415 	}
4416 
4417 	mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
4418 
4419 	tx_cmd = mvpp2_skb_tx_csum(port, skb);
4420 
4421 	if (frags == 1) {
4422 		/* First and Last descriptor */
4423 		tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
4424 		mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
4425 		mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
4426 	} else {
4427 		/* First but not Last */
4428 		tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
4429 		mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
4430 		mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
4431 
4432 		/* Continue with other skb fragments */
4433 		if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
4434 			tx_desc_unmap_put(port, txq, tx_desc);
4435 			frags = 0;
4436 		}
4437 	}
4438 
4439 out:
4440 	if (frags > 0) {
4441 		struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread);
4442 		struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
4443 
4444 		txq_pcpu->reserved_num -= frags;
4445 		txq_pcpu->count += frags;
4446 		aggr_txq->count += frags;
4447 
4448 		/* Enable transmit */
4449 		wmb();
4450 		mvpp2_aggr_txq_pend_desc_add(port, frags);
4451 
4452 		if (txq_pcpu->count >= txq_pcpu->stop_threshold)
4453 			netif_tx_stop_queue(nq);
4454 
4455 		u64_stats_update_begin(&stats->syncp);
4456 		stats->tx_packets++;
4457 		stats->tx_bytes += skb->len;
4458 		u64_stats_update_end(&stats->syncp);
4459 	} else {
4460 		dev->stats.tx_dropped++;
4461 		dev_kfree_skb_any(skb);
4462 	}
4463 
4464 	/* Finalize TX processing */
4465 	if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
4466 		mvpp2_txq_done(port, txq, txq_pcpu);
4467 
4468 	/* Set the timer in case not all frags were processed */
4469 	if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
4470 	    txq_pcpu->count > 0) {
4471 		struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread);
4472 
4473 		if (!port_pcpu->timer_scheduled) {
4474 			port_pcpu->timer_scheduled = true;
4475 			hrtimer_start(&port_pcpu->tx_done_timer,
4476 				      MVPP2_TXDONE_HRTIMER_PERIOD_NS,
4477 				      HRTIMER_MODE_REL_PINNED_SOFT);
4478 		}
4479 	}
4480 
4481 	if (test_bit(thread, &port->priv->lock_map))
4482 		spin_unlock_irqrestore(&port->tx_lock[thread], flags);
4483 
4484 	return NETDEV_TX_OK;
4485 }
4486 
4487 static inline void mvpp2_cause_error(struct net_device *dev, int cause)
4488 {
4489 	if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
4490 		netdev_err(dev, "FCS error\n");
4491 	if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
4492 		netdev_err(dev, "rx fifo overrun error\n");
4493 	if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
4494 		netdev_err(dev, "tx fifo underrun error\n");
4495 }
4496 
4497 static int mvpp2_poll(struct napi_struct *napi, int budget)
4498 {
4499 	u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
4500 	int rx_done = 0;
4501 	struct mvpp2_port *port = netdev_priv(napi->dev);
4502 	struct mvpp2_queue_vector *qv;
4503 	unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
4504 
4505 	qv = container_of(napi, struct mvpp2_queue_vector, napi);
4506 
4507 	/* Rx/Tx cause register
4508 	 *
4509 	 * Bits 0-15: each bit indicates received packets on the Rx queue
4510 	 * (bit 0 is for Rx queue 0).
4511 	 *
4512 	 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
4513 	 * (bit 16 is for Tx queue 0).
4514 	 *
4515 	 * Each CPU has its own Rx/Tx cause register
4516 	 */
4517 	cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id,
4518 						MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
4519 
4520 	cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
4521 	if (cause_misc) {
4522 		mvpp2_cause_error(port->dev, cause_misc);
4523 
4524 		/* Clear the cause register */
4525 		mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
4526 		mvpp2_thread_write(port->priv, thread,
4527 				   MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
4528 				   cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
4529 	}
4530 
4531 	if (port->has_tx_irqs) {
4532 		cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
4533 		if (cause_tx) {
4534 			cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
4535 			mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
4536 		}
4537 	}
4538 
4539 	/* Process RX packets */
4540 	cause_rx = cause_rx_tx &
4541 		   MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
4542 	cause_rx <<= qv->first_rxq;
4543 	cause_rx |= qv->pending_cause_rx;
4544 	while (cause_rx && budget > 0) {
4545 		int count;
4546 		struct mvpp2_rx_queue *rxq;
4547 
4548 		rxq = mvpp2_get_rx_queue(port, cause_rx);
4549 		if (!rxq)
4550 			break;
4551 
4552 		count = mvpp2_rx(port, napi, budget, rxq);
4553 		rx_done += count;
4554 		budget -= count;
4555 		if (budget > 0) {
4556 			/* Clear the bit associated to this Rx queue
4557 			 * so that next iteration will continue from
4558 			 * the next Rx queue.
4559 			 */
4560 			cause_rx &= ~(1 << rxq->logic_rxq);
4561 		}
4562 	}
4563 
4564 	if (budget > 0) {
4565 		cause_rx = 0;
4566 		napi_complete_done(napi, rx_done);
4567 
4568 		mvpp2_qvec_interrupt_enable(qv);
4569 	}
4570 	qv->pending_cause_rx = cause_rx;
4571 	return rx_done;
4572 }
4573 
4574 static void mvpp22_mode_reconfigure(struct mvpp2_port *port,
4575 				    phy_interface_t interface)
4576 {
4577 	u32 ctrl3;
4578 
4579 	/* Set the GMAC & XLG MAC in reset */
4580 	mvpp2_mac_reset_assert(port);
4581 
4582 	/* Set the MPCS and XPCS in reset */
4583 	mvpp22_pcs_reset_assert(port);
4584 
4585 	/* comphy reconfiguration */
4586 	mvpp22_comphy_init(port, interface);
4587 
4588 	/* gop reconfiguration */
4589 	mvpp22_gop_init(port, interface);
4590 
4591 	mvpp22_pcs_reset_deassert(port, interface);
4592 
4593 	if (mvpp2_port_supports_xlg(port)) {
4594 		ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG);
4595 		ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
4596 
4597 		if (mvpp2_is_xlg(interface))
4598 			ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
4599 		else
4600 			ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
4601 
4602 		writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG);
4603 	}
4604 
4605 	if (mvpp2_port_supports_xlg(port) && mvpp2_is_xlg(interface))
4606 		mvpp2_xlg_max_rx_size_set(port);
4607 	else
4608 		mvpp2_gmac_max_rx_size_set(port);
4609 }
4610 
4611 /* Set hw internals when starting port */
4612 static void mvpp2_start_dev(struct mvpp2_port *port)
4613 {
4614 	int i;
4615 
4616 	mvpp2_txp_max_tx_size_set(port);
4617 
4618 	for (i = 0; i < port->nqvecs; i++)
4619 		napi_enable(&port->qvecs[i].napi);
4620 
4621 	/* Enable interrupts on all threads */
4622 	mvpp2_interrupts_enable(port);
4623 
4624 	if (port->priv->hw_version >= MVPP22)
4625 		mvpp22_mode_reconfigure(port, port->phy_interface);
4626 
4627 	if (port->phylink) {
4628 		phylink_start(port->phylink);
4629 	} else {
4630 		mvpp2_acpi_start(port);
4631 	}
4632 
4633 	netif_tx_start_all_queues(port->dev);
4634 
4635 	clear_bit(0, &port->state);
4636 }
4637 
4638 /* Set hw internals when stopping port */
4639 static void mvpp2_stop_dev(struct mvpp2_port *port)
4640 {
4641 	int i;
4642 
4643 	set_bit(0, &port->state);
4644 
4645 	/* Disable interrupts on all threads */
4646 	mvpp2_interrupts_disable(port);
4647 
4648 	for (i = 0; i < port->nqvecs; i++)
4649 		napi_disable(&port->qvecs[i].napi);
4650 
4651 	if (port->phylink)
4652 		phylink_stop(port->phylink);
4653 	phy_power_off(port->comphy);
4654 }
4655 
4656 static int mvpp2_check_ringparam_valid(struct net_device *dev,
4657 				       struct ethtool_ringparam *ring)
4658 {
4659 	u16 new_rx_pending = ring->rx_pending;
4660 	u16 new_tx_pending = ring->tx_pending;
4661 
4662 	if (ring->rx_pending == 0 || ring->tx_pending == 0)
4663 		return -EINVAL;
4664 
4665 	if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
4666 		new_rx_pending = MVPP2_MAX_RXD_MAX;
4667 	else if (ring->rx_pending < MSS_THRESHOLD_START)
4668 		new_rx_pending = MSS_THRESHOLD_START;
4669 	else if (!IS_ALIGNED(ring->rx_pending, 16))
4670 		new_rx_pending = ALIGN(ring->rx_pending, 16);
4671 
4672 	if (ring->tx_pending > MVPP2_MAX_TXD_MAX)
4673 		new_tx_pending = MVPP2_MAX_TXD_MAX;
4674 	else if (!IS_ALIGNED(ring->tx_pending, 32))
4675 		new_tx_pending = ALIGN(ring->tx_pending, 32);
4676 
4677 	/* The Tx ring size cannot be smaller than the minimum number of
4678 	 * descriptors needed for TSO.
4679 	 */
4680 	if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
4681 		new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
4682 
4683 	if (ring->rx_pending != new_rx_pending) {
4684 		netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
4685 			    ring->rx_pending, new_rx_pending);
4686 		ring->rx_pending = new_rx_pending;
4687 	}
4688 
4689 	if (ring->tx_pending != new_tx_pending) {
4690 		netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
4691 			    ring->tx_pending, new_tx_pending);
4692 		ring->tx_pending = new_tx_pending;
4693 	}
4694 
4695 	return 0;
4696 }
4697 
4698 static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
4699 {
4700 	u32 mac_addr_l, mac_addr_m, mac_addr_h;
4701 
4702 	mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
4703 	mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
4704 	mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
4705 	addr[0] = (mac_addr_h >> 24) & 0xFF;
4706 	addr[1] = (mac_addr_h >> 16) & 0xFF;
4707 	addr[2] = (mac_addr_h >> 8) & 0xFF;
4708 	addr[3] = mac_addr_h & 0xFF;
4709 	addr[4] = mac_addr_m & 0xFF;
4710 	addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
4711 }
4712 
4713 static int mvpp2_irqs_init(struct mvpp2_port *port)
4714 {
4715 	int err, i;
4716 
4717 	for (i = 0; i < port->nqvecs; i++) {
4718 		struct mvpp2_queue_vector *qv = port->qvecs + i;
4719 
4720 		if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
4721 			qv->mask = kzalloc(cpumask_size(), GFP_KERNEL);
4722 			if (!qv->mask) {
4723 				err = -ENOMEM;
4724 				goto err;
4725 			}
4726 
4727 			irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
4728 		}
4729 
4730 		err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
4731 		if (err)
4732 			goto err;
4733 
4734 		if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
4735 			unsigned int cpu;
4736 
4737 			for_each_present_cpu(cpu) {
4738 				if (mvpp2_cpu_to_thread(port->priv, cpu) ==
4739 				    qv->sw_thread_id)
4740 					cpumask_set_cpu(cpu, qv->mask);
4741 			}
4742 
4743 			irq_set_affinity_hint(qv->irq, qv->mask);
4744 		}
4745 	}
4746 
4747 	return 0;
4748 err:
4749 	for (i = 0; i < port->nqvecs; i++) {
4750 		struct mvpp2_queue_vector *qv = port->qvecs + i;
4751 
4752 		irq_set_affinity_hint(qv->irq, NULL);
4753 		kfree(qv->mask);
4754 		qv->mask = NULL;
4755 		free_irq(qv->irq, qv);
4756 	}
4757 
4758 	return err;
4759 }
4760 
4761 static void mvpp2_irqs_deinit(struct mvpp2_port *port)
4762 {
4763 	int i;
4764 
4765 	for (i = 0; i < port->nqvecs; i++) {
4766 		struct mvpp2_queue_vector *qv = port->qvecs + i;
4767 
4768 		irq_set_affinity_hint(qv->irq, NULL);
4769 		kfree(qv->mask);
4770 		qv->mask = NULL;
4771 		irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
4772 		free_irq(qv->irq, qv);
4773 	}
4774 }
4775 
4776 static bool mvpp22_rss_is_supported(struct mvpp2_port *port)
4777 {
4778 	return (queue_mode == MVPP2_QDIST_MULTI_MODE) &&
4779 		!(port->flags & MVPP2_F_LOOPBACK);
4780 }
4781 
4782 static int mvpp2_open(struct net_device *dev)
4783 {
4784 	struct mvpp2_port *port = netdev_priv(dev);
4785 	struct mvpp2 *priv = port->priv;
4786 	unsigned char mac_bcast[ETH_ALEN] = {
4787 			0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
4788 	bool valid = false;
4789 	int err;
4790 
4791 	err = mvpp2_prs_mac_da_accept(port, mac_bcast, true);
4792 	if (err) {
4793 		netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
4794 		return err;
4795 	}
4796 	err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true);
4797 	if (err) {
4798 		netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n");
4799 		return err;
4800 	}
4801 	err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
4802 	if (err) {
4803 		netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
4804 		return err;
4805 	}
4806 	err = mvpp2_prs_def_flow(port);
4807 	if (err) {
4808 		netdev_err(dev, "mvpp2_prs_def_flow failed\n");
4809 		return err;
4810 	}
4811 
4812 	/* Allocate the Rx/Tx queues */
4813 	err = mvpp2_setup_rxqs(port);
4814 	if (err) {
4815 		netdev_err(port->dev, "cannot allocate Rx queues\n");
4816 		return err;
4817 	}
4818 
4819 	err = mvpp2_setup_txqs(port);
4820 	if (err) {
4821 		netdev_err(port->dev, "cannot allocate Tx queues\n");
4822 		goto err_cleanup_rxqs;
4823 	}
4824 
4825 	err = mvpp2_irqs_init(port);
4826 	if (err) {
4827 		netdev_err(port->dev, "cannot init IRQs\n");
4828 		goto err_cleanup_txqs;
4829 	}
4830 
4831 	if (port->phylink) {
4832 		err = phylink_fwnode_phy_connect(port->phylink, port->fwnode, 0);
4833 		if (err) {
4834 			netdev_err(port->dev, "could not attach PHY (%d)\n",
4835 				   err);
4836 			goto err_free_irq;
4837 		}
4838 
4839 		valid = true;
4840 	}
4841 
4842 	if (priv->hw_version >= MVPP22 && port->port_irq) {
4843 		err = request_irq(port->port_irq, mvpp2_port_isr, 0,
4844 				  dev->name, port);
4845 		if (err) {
4846 			netdev_err(port->dev,
4847 				   "cannot request port link/ptp IRQ %d\n",
4848 				   port->port_irq);
4849 			goto err_free_irq;
4850 		}
4851 
4852 		mvpp22_gop_setup_irq(port);
4853 
4854 		/* In default link is down */
4855 		netif_carrier_off(port->dev);
4856 
4857 		valid = true;
4858 	} else {
4859 		port->port_irq = 0;
4860 	}
4861 
4862 	if (!valid) {
4863 		netdev_err(port->dev,
4864 			   "invalid configuration: no dt or link IRQ");
4865 		err = -ENOENT;
4866 		goto err_free_irq;
4867 	}
4868 
4869 	/* Unmask interrupts on all CPUs */
4870 	on_each_cpu(mvpp2_interrupts_unmask, port, 1);
4871 	mvpp2_shared_interrupt_mask_unmask(port, false);
4872 
4873 	mvpp2_start_dev(port);
4874 
4875 	/* Start hardware statistics gathering */
4876 	queue_delayed_work(priv->stats_queue, &port->stats_work,
4877 			   MVPP2_MIB_COUNTERS_STATS_DELAY);
4878 
4879 	return 0;
4880 
4881 err_free_irq:
4882 	mvpp2_irqs_deinit(port);
4883 err_cleanup_txqs:
4884 	mvpp2_cleanup_txqs(port);
4885 err_cleanup_rxqs:
4886 	mvpp2_cleanup_rxqs(port);
4887 	return err;
4888 }
4889 
4890 static int mvpp2_stop(struct net_device *dev)
4891 {
4892 	struct mvpp2_port *port = netdev_priv(dev);
4893 	struct mvpp2_port_pcpu *port_pcpu;
4894 	unsigned int thread;
4895 
4896 	mvpp2_stop_dev(port);
4897 
4898 	/* Mask interrupts on all threads */
4899 	on_each_cpu(mvpp2_interrupts_mask, port, 1);
4900 	mvpp2_shared_interrupt_mask_unmask(port, true);
4901 
4902 	if (port->phylink)
4903 		phylink_disconnect_phy(port->phylink);
4904 	if (port->port_irq)
4905 		free_irq(port->port_irq, port);
4906 
4907 	mvpp2_irqs_deinit(port);
4908 	if (!port->has_tx_irqs) {
4909 		for (thread = 0; thread < port->priv->nthreads; thread++) {
4910 			port_pcpu = per_cpu_ptr(port->pcpu, thread);
4911 
4912 			hrtimer_cancel(&port_pcpu->tx_done_timer);
4913 			port_pcpu->timer_scheduled = false;
4914 		}
4915 	}
4916 	mvpp2_cleanup_rxqs(port);
4917 	mvpp2_cleanup_txqs(port);
4918 
4919 	cancel_delayed_work_sync(&port->stats_work);
4920 
4921 	mvpp2_mac_reset_assert(port);
4922 	mvpp22_pcs_reset_assert(port);
4923 
4924 	return 0;
4925 }
4926 
4927 static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port,
4928 					struct netdev_hw_addr_list *list)
4929 {
4930 	struct netdev_hw_addr *ha;
4931 	int ret;
4932 
4933 	netdev_hw_addr_list_for_each(ha, list) {
4934 		ret = mvpp2_prs_mac_da_accept(port, ha->addr, true);
4935 		if (ret)
4936 			return ret;
4937 	}
4938 
4939 	return 0;
4940 }
4941 
4942 static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable)
4943 {
4944 	if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
4945 		mvpp2_prs_vid_enable_filtering(port);
4946 	else
4947 		mvpp2_prs_vid_disable_filtering(port);
4948 
4949 	mvpp2_prs_mac_promisc_set(port->priv, port->id,
4950 				  MVPP2_PRS_L2_UNI_CAST, enable);
4951 
4952 	mvpp2_prs_mac_promisc_set(port->priv, port->id,
4953 				  MVPP2_PRS_L2_MULTI_CAST, enable);
4954 }
4955 
4956 static void mvpp2_set_rx_mode(struct net_device *dev)
4957 {
4958 	struct mvpp2_port *port = netdev_priv(dev);
4959 
4960 	/* Clear the whole UC and MC list */
4961 	mvpp2_prs_mac_del_all(port);
4962 
4963 	if (dev->flags & IFF_PROMISC) {
4964 		mvpp2_set_rx_promisc(port, true);
4965 		return;
4966 	}
4967 
4968 	mvpp2_set_rx_promisc(port, false);
4969 
4970 	if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX ||
4971 	    mvpp2_prs_mac_da_accept_list(port, &dev->uc))
4972 		mvpp2_prs_mac_promisc_set(port->priv, port->id,
4973 					  MVPP2_PRS_L2_UNI_CAST, true);
4974 
4975 	if (dev->flags & IFF_ALLMULTI) {
4976 		mvpp2_prs_mac_promisc_set(port->priv, port->id,
4977 					  MVPP2_PRS_L2_MULTI_CAST, true);
4978 		return;
4979 	}
4980 
4981 	if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX ||
4982 	    mvpp2_prs_mac_da_accept_list(port, &dev->mc))
4983 		mvpp2_prs_mac_promisc_set(port->priv, port->id,
4984 					  MVPP2_PRS_L2_MULTI_CAST, true);
4985 }
4986 
4987 static int mvpp2_set_mac_address(struct net_device *dev, void *p)
4988 {
4989 	const struct sockaddr *addr = p;
4990 	int err;
4991 
4992 	if (!is_valid_ether_addr(addr->sa_data))
4993 		return -EADDRNOTAVAIL;
4994 
4995 	err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
4996 	if (err) {
4997 		/* Reconfigure parser accept the original MAC address */
4998 		mvpp2_prs_update_mac_da(dev, dev->dev_addr);
4999 		netdev_err(dev, "failed to change MAC address\n");
5000 	}
5001 	return err;
5002 }
5003 
5004 /* Shut down all the ports, reconfigure the pools as percpu or shared,
5005  * then bring up again all ports.
5006  */
5007 static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
5008 {
5009 	bool change_percpu = (percpu != priv->percpu_pools);
5010 	int numbufs = MVPP2_BM_POOLS_NUM, i;
5011 	struct mvpp2_port *port = NULL;
5012 	bool status[MVPP2_MAX_PORTS];
5013 
5014 	for (i = 0; i < priv->port_count; i++) {
5015 		port = priv->port_list[i];
5016 		status[i] = netif_running(port->dev);
5017 		if (status[i])
5018 			mvpp2_stop(port->dev);
5019 	}
5020 
5021 	/* nrxqs is the same for all ports */
5022 	if (priv->percpu_pools)
5023 		numbufs = port->nrxqs * 2;
5024 
5025 	if (change_percpu)
5026 		mvpp2_bm_pool_update_priv_fc(priv, false);
5027 
5028 	for (i = 0; i < numbufs; i++)
5029 		mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]);
5030 
5031 	devm_kfree(port->dev->dev.parent, priv->bm_pools);
5032 	priv->percpu_pools = percpu;
5033 	mvpp2_bm_init(port->dev->dev.parent, priv);
5034 
5035 	for (i = 0; i < priv->port_count; i++) {
5036 		port = priv->port_list[i];
5037 		if (percpu && port->ntxqs >= num_possible_cpus() * 2)
5038 			xdp_set_features_flag(port->dev,
5039 					      NETDEV_XDP_ACT_BASIC |
5040 					      NETDEV_XDP_ACT_REDIRECT |
5041 					      NETDEV_XDP_ACT_NDO_XMIT);
5042 		else
5043 			xdp_clear_features_flag(port->dev);
5044 
5045 		mvpp2_swf_bm_pool_init(port);
5046 		if (status[i])
5047 			mvpp2_open(port->dev);
5048 	}
5049 
5050 	if (change_percpu)
5051 		mvpp2_bm_pool_update_priv_fc(priv, true);
5052 
5053 	return 0;
5054 }
5055 
5056 static int mvpp2_change_mtu(struct net_device *dev, int mtu)
5057 {
5058 	struct mvpp2_port *port = netdev_priv(dev);
5059 	bool running = netif_running(dev);
5060 	struct mvpp2 *priv = port->priv;
5061 	int err;
5062 
5063 	if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
5064 		netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
5065 			    ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
5066 		mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
5067 	}
5068 
5069 	if (port->xdp_prog && mtu > MVPP2_MAX_RX_BUF_SIZE) {
5070 		netdev_err(dev, "Illegal MTU value %d (> %d) for XDP mode\n",
5071 			   mtu, (int)MVPP2_MAX_RX_BUF_SIZE);
5072 		return -EINVAL;
5073 	}
5074 
5075 	if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) {
5076 		if (priv->percpu_pools) {
5077 			netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu);
5078 			mvpp2_bm_switch_buffers(priv, false);
5079 		}
5080 	} else {
5081 		bool jumbo = false;
5082 		int i;
5083 
5084 		for (i = 0; i < priv->port_count; i++)
5085 			if (priv->port_list[i] != port &&
5086 			    MVPP2_RX_PKT_SIZE(priv->port_list[i]->dev->mtu) >
5087 			    MVPP2_BM_LONG_PKT_SIZE) {
5088 				jumbo = true;
5089 				break;
5090 			}
5091 
5092 		/* No port is using jumbo frames */
5093 		if (!jumbo) {
5094 			dev_info(port->dev->dev.parent,
5095 				 "all ports have a low MTU, switching to per-cpu buffers");
5096 			mvpp2_bm_switch_buffers(priv, true);
5097 		}
5098 	}
5099 
5100 	if (running)
5101 		mvpp2_stop_dev(port);
5102 
5103 	err = mvpp2_bm_update_mtu(dev, mtu);
5104 	if (err) {
5105 		netdev_err(dev, "failed to change MTU\n");
5106 		/* Reconfigure BM to the original MTU */
5107 		mvpp2_bm_update_mtu(dev, dev->mtu);
5108 	} else {
5109 		port->pkt_size =  MVPP2_RX_PKT_SIZE(mtu);
5110 	}
5111 
5112 	if (running) {
5113 		mvpp2_start_dev(port);
5114 		mvpp2_egress_enable(port);
5115 		mvpp2_ingress_enable(port);
5116 	}
5117 
5118 	return err;
5119 }
5120 
5121 static int mvpp2_check_pagepool_dma(struct mvpp2_port *port)
5122 {
5123 	enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
5124 	struct mvpp2 *priv = port->priv;
5125 	int err = -1, i;
5126 
5127 	if (!priv->percpu_pools)
5128 		return err;
5129 
5130 	if (!priv->page_pool[0])
5131 		return -ENOMEM;
5132 
5133 	for (i = 0; i < priv->port_count; i++) {
5134 		port = priv->port_list[i];
5135 		if (port->xdp_prog) {
5136 			dma_dir = DMA_BIDIRECTIONAL;
5137 			break;
5138 		}
5139 	}
5140 
5141 	/* All pools are equal in terms of DMA direction */
5142 	if (priv->page_pool[0]->p.dma_dir != dma_dir)
5143 		err = mvpp2_bm_switch_buffers(priv, true);
5144 
5145 	return err;
5146 }
5147 
5148 static void
5149 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
5150 {
5151 	struct mvpp2_port *port = netdev_priv(dev);
5152 	unsigned int start;
5153 	unsigned int cpu;
5154 
5155 	for_each_possible_cpu(cpu) {
5156 		struct mvpp2_pcpu_stats *cpu_stats;
5157 		u64 rx_packets;
5158 		u64 rx_bytes;
5159 		u64 tx_packets;
5160 		u64 tx_bytes;
5161 
5162 		cpu_stats = per_cpu_ptr(port->stats, cpu);
5163 		do {
5164 			start = u64_stats_fetch_begin(&cpu_stats->syncp);
5165 			rx_packets = cpu_stats->rx_packets;
5166 			rx_bytes   = cpu_stats->rx_bytes;
5167 			tx_packets = cpu_stats->tx_packets;
5168 			tx_bytes   = cpu_stats->tx_bytes;
5169 		} while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
5170 
5171 		stats->rx_packets += rx_packets;
5172 		stats->rx_bytes   += rx_bytes;
5173 		stats->tx_packets += tx_packets;
5174 		stats->tx_bytes   += tx_bytes;
5175 	}
5176 
5177 	stats->rx_errors	= dev->stats.rx_errors;
5178 	stats->rx_dropped	= dev->stats.rx_dropped;
5179 	stats->tx_dropped	= dev->stats.tx_dropped;
5180 }
5181 
5182 static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
5183 {
5184 	struct hwtstamp_config config;
5185 	void __iomem *ptp;
5186 	u32 gcr, int_mask;
5187 
5188 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
5189 		return -EFAULT;
5190 
5191 	if (config.tx_type != HWTSTAMP_TX_OFF &&
5192 	    config.tx_type != HWTSTAMP_TX_ON)
5193 		return -ERANGE;
5194 
5195 	ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
5196 
5197 	int_mask = gcr = 0;
5198 	if (config.tx_type != HWTSTAMP_TX_OFF) {
5199 		gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_TX_RESET;
5200 		int_mask |= MVPP22_PTP_INT_MASK_QUEUE1 |
5201 			    MVPP22_PTP_INT_MASK_QUEUE0;
5202 	}
5203 
5204 	/* It seems we must also release the TX reset when enabling the TSU */
5205 	if (config.rx_filter != HWTSTAMP_FILTER_NONE)
5206 		gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_RX_RESET |
5207 		       MVPP22_PTP_GCR_TX_RESET;
5208 
5209 	if (gcr & MVPP22_PTP_GCR_TSU_ENABLE)
5210 		mvpp22_tai_start(port->priv->tai);
5211 
5212 	if (config.rx_filter != HWTSTAMP_FILTER_NONE) {
5213 		config.rx_filter = HWTSTAMP_FILTER_ALL;
5214 		mvpp2_modify(ptp + MVPP22_PTP_GCR,
5215 			     MVPP22_PTP_GCR_RX_RESET |
5216 			     MVPP22_PTP_GCR_TX_RESET |
5217 			     MVPP22_PTP_GCR_TSU_ENABLE, gcr);
5218 		port->rx_hwtstamp = true;
5219 	} else {
5220 		port->rx_hwtstamp = false;
5221 		mvpp2_modify(ptp + MVPP22_PTP_GCR,
5222 			     MVPP22_PTP_GCR_RX_RESET |
5223 			     MVPP22_PTP_GCR_TX_RESET |
5224 			     MVPP22_PTP_GCR_TSU_ENABLE, gcr);
5225 	}
5226 
5227 	mvpp2_modify(ptp + MVPP22_PTP_INT_MASK,
5228 		     MVPP22_PTP_INT_MASK_QUEUE1 |
5229 		     MVPP22_PTP_INT_MASK_QUEUE0, int_mask);
5230 
5231 	if (!(gcr & MVPP22_PTP_GCR_TSU_ENABLE))
5232 		mvpp22_tai_stop(port->priv->tai);
5233 
5234 	port->tx_hwtstamp_type = config.tx_type;
5235 
5236 	if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
5237 		return -EFAULT;
5238 
5239 	return 0;
5240 }
5241 
5242 static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
5243 {
5244 	struct hwtstamp_config config;
5245 
5246 	memset(&config, 0, sizeof(config));
5247 
5248 	config.tx_type = port->tx_hwtstamp_type;
5249 	config.rx_filter = port->rx_hwtstamp ?
5250 		HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
5251 
5252 	if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
5253 		return -EFAULT;
5254 
5255 	return 0;
5256 }
5257 
5258 static int mvpp2_ethtool_get_ts_info(struct net_device *dev,
5259 				     struct ethtool_ts_info *info)
5260 {
5261 	struct mvpp2_port *port = netdev_priv(dev);
5262 
5263 	if (!port->hwtstamp)
5264 		return -EOPNOTSUPP;
5265 
5266 	info->phc_index = mvpp22_tai_ptp_clock_index(port->priv->tai);
5267 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5268 				SOF_TIMESTAMPING_RX_SOFTWARE |
5269 				SOF_TIMESTAMPING_SOFTWARE |
5270 				SOF_TIMESTAMPING_TX_HARDWARE |
5271 				SOF_TIMESTAMPING_RX_HARDWARE |
5272 				SOF_TIMESTAMPING_RAW_HARDWARE;
5273 	info->tx_types = BIT(HWTSTAMP_TX_OFF) |
5274 			 BIT(HWTSTAMP_TX_ON);
5275 	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
5276 			   BIT(HWTSTAMP_FILTER_ALL);
5277 
5278 	return 0;
5279 }
5280 
5281 static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5282 {
5283 	struct mvpp2_port *port = netdev_priv(dev);
5284 
5285 	switch (cmd) {
5286 	case SIOCSHWTSTAMP:
5287 		if (port->hwtstamp)
5288 			return mvpp2_set_ts_config(port, ifr);
5289 		break;
5290 
5291 	case SIOCGHWTSTAMP:
5292 		if (port->hwtstamp)
5293 			return mvpp2_get_ts_config(port, ifr);
5294 		break;
5295 	}
5296 
5297 	if (!port->phylink)
5298 		return -ENOTSUPP;
5299 
5300 	return phylink_mii_ioctl(port->phylink, ifr, cmd);
5301 }
5302 
5303 static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
5304 {
5305 	struct mvpp2_port *port = netdev_priv(dev);
5306 	int ret;
5307 
5308 	ret = mvpp2_prs_vid_entry_add(port, vid);
5309 	if (ret)
5310 		netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n",
5311 			   MVPP2_PRS_VLAN_FILT_MAX - 1);
5312 	return ret;
5313 }
5314 
5315 static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
5316 {
5317 	struct mvpp2_port *port = netdev_priv(dev);
5318 
5319 	mvpp2_prs_vid_entry_remove(port, vid);
5320 	return 0;
5321 }
5322 
5323 static int mvpp2_set_features(struct net_device *dev,
5324 			      netdev_features_t features)
5325 {
5326 	netdev_features_t changed = dev->features ^ features;
5327 	struct mvpp2_port *port = netdev_priv(dev);
5328 
5329 	if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
5330 		if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
5331 			mvpp2_prs_vid_enable_filtering(port);
5332 		} else {
5333 			/* Invalidate all registered VID filters for this
5334 			 * port
5335 			 */
5336 			mvpp2_prs_vid_remove_all(port);
5337 
5338 			mvpp2_prs_vid_disable_filtering(port);
5339 		}
5340 	}
5341 
5342 	if (changed & NETIF_F_RXHASH) {
5343 		if (features & NETIF_F_RXHASH)
5344 			mvpp22_port_rss_enable(port);
5345 		else
5346 			mvpp22_port_rss_disable(port);
5347 	}
5348 
5349 	return 0;
5350 }
5351 
5352 static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf)
5353 {
5354 	struct bpf_prog *prog = bpf->prog, *old_prog;
5355 	bool running = netif_running(port->dev);
5356 	bool reset = !prog != !port->xdp_prog;
5357 
5358 	if (port->dev->mtu > MVPP2_MAX_RX_BUF_SIZE) {
5359 		NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP");
5360 		return -EOPNOTSUPP;
5361 	}
5362 
5363 	if (!port->priv->percpu_pools) {
5364 		NL_SET_ERR_MSG_MOD(bpf->extack, "Per CPU Pools required for XDP");
5365 		return -EOPNOTSUPP;
5366 	}
5367 
5368 	if (port->ntxqs < num_possible_cpus() * 2) {
5369 		NL_SET_ERR_MSG_MOD(bpf->extack, "XDP_TX needs two TX queues per CPU");
5370 		return -EOPNOTSUPP;
5371 	}
5372 
5373 	/* device is up and bpf is added/removed, must setup the RX queues */
5374 	if (running && reset)
5375 		mvpp2_stop(port->dev);
5376 
5377 	old_prog = xchg(&port->xdp_prog, prog);
5378 	if (old_prog)
5379 		bpf_prog_put(old_prog);
5380 
5381 	/* bpf is just replaced, RXQ and MTU are already setup */
5382 	if (!reset)
5383 		return 0;
5384 
5385 	/* device was up, restore the link */
5386 	if (running)
5387 		mvpp2_open(port->dev);
5388 
5389 	/* Check Page Pool DMA Direction */
5390 	mvpp2_check_pagepool_dma(port);
5391 
5392 	return 0;
5393 }
5394 
5395 static int mvpp2_xdp(struct net_device *dev, struct netdev_bpf *xdp)
5396 {
5397 	struct mvpp2_port *port = netdev_priv(dev);
5398 
5399 	switch (xdp->command) {
5400 	case XDP_SETUP_PROG:
5401 		return mvpp2_xdp_setup(port, xdp);
5402 	default:
5403 		return -EINVAL;
5404 	}
5405 }
5406 
5407 /* Ethtool methods */
5408 
5409 static int mvpp2_ethtool_nway_reset(struct net_device *dev)
5410 {
5411 	struct mvpp2_port *port = netdev_priv(dev);
5412 
5413 	if (!port->phylink)
5414 		return -ENOTSUPP;
5415 
5416 	return phylink_ethtool_nway_reset(port->phylink);
5417 }
5418 
5419 /* Set interrupt coalescing for ethtools */
5420 static int
5421 mvpp2_ethtool_set_coalesce(struct net_device *dev,
5422 			   struct ethtool_coalesce *c,
5423 			   struct kernel_ethtool_coalesce *kernel_coal,
5424 			   struct netlink_ext_ack *extack)
5425 {
5426 	struct mvpp2_port *port = netdev_priv(dev);
5427 	int queue;
5428 
5429 	for (queue = 0; queue < port->nrxqs; queue++) {
5430 		struct mvpp2_rx_queue *rxq = port->rxqs[queue];
5431 
5432 		rxq->time_coal = c->rx_coalesce_usecs;
5433 		rxq->pkts_coal = c->rx_max_coalesced_frames;
5434 		mvpp2_rx_pkts_coal_set(port, rxq);
5435 		mvpp2_rx_time_coal_set(port, rxq);
5436 	}
5437 
5438 	if (port->has_tx_irqs) {
5439 		port->tx_time_coal = c->tx_coalesce_usecs;
5440 		mvpp2_tx_time_coal_set(port);
5441 	}
5442 
5443 	for (queue = 0; queue < port->ntxqs; queue++) {
5444 		struct mvpp2_tx_queue *txq = port->txqs[queue];
5445 
5446 		txq->done_pkts_coal = c->tx_max_coalesced_frames;
5447 
5448 		if (port->has_tx_irqs)
5449 			mvpp2_tx_pkts_coal_set(port, txq);
5450 	}
5451 
5452 	return 0;
5453 }
5454 
5455 /* get coalescing for ethtools */
5456 static int
5457 mvpp2_ethtool_get_coalesce(struct net_device *dev,
5458 			   struct ethtool_coalesce *c,
5459 			   struct kernel_ethtool_coalesce *kernel_coal,
5460 			   struct netlink_ext_ack *extack)
5461 {
5462 	struct mvpp2_port *port = netdev_priv(dev);
5463 
5464 	c->rx_coalesce_usecs       = port->rxqs[0]->time_coal;
5465 	c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
5466 	c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
5467 	c->tx_coalesce_usecs       = port->tx_time_coal;
5468 	return 0;
5469 }
5470 
5471 static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
5472 				      struct ethtool_drvinfo *drvinfo)
5473 {
5474 	strscpy(drvinfo->driver, MVPP2_DRIVER_NAME,
5475 		sizeof(drvinfo->driver));
5476 	strscpy(drvinfo->version, MVPP2_DRIVER_VERSION,
5477 		sizeof(drvinfo->version));
5478 	strscpy(drvinfo->bus_info, dev_name(&dev->dev),
5479 		sizeof(drvinfo->bus_info));
5480 }
5481 
5482 static void
5483 mvpp2_ethtool_get_ringparam(struct net_device *dev,
5484 			    struct ethtool_ringparam *ring,
5485 			    struct kernel_ethtool_ringparam *kernel_ring,
5486 			    struct netlink_ext_ack *extack)
5487 {
5488 	struct mvpp2_port *port = netdev_priv(dev);
5489 
5490 	ring->rx_max_pending = MVPP2_MAX_RXD_MAX;
5491 	ring->tx_max_pending = MVPP2_MAX_TXD_MAX;
5492 	ring->rx_pending = port->rx_ring_size;
5493 	ring->tx_pending = port->tx_ring_size;
5494 }
5495 
5496 static int
5497 mvpp2_ethtool_set_ringparam(struct net_device *dev,
5498 			    struct ethtool_ringparam *ring,
5499 			    struct kernel_ethtool_ringparam *kernel_ring,
5500 			    struct netlink_ext_ack *extack)
5501 {
5502 	struct mvpp2_port *port = netdev_priv(dev);
5503 	u16 prev_rx_ring_size = port->rx_ring_size;
5504 	u16 prev_tx_ring_size = port->tx_ring_size;
5505 	int err;
5506 
5507 	err = mvpp2_check_ringparam_valid(dev, ring);
5508 	if (err)
5509 		return err;
5510 
5511 	if (!netif_running(dev)) {
5512 		port->rx_ring_size = ring->rx_pending;
5513 		port->tx_ring_size = ring->tx_pending;
5514 		return 0;
5515 	}
5516 
5517 	/* The interface is running, so we have to force a
5518 	 * reallocation of the queues
5519 	 */
5520 	mvpp2_stop_dev(port);
5521 	mvpp2_cleanup_rxqs(port);
5522 	mvpp2_cleanup_txqs(port);
5523 
5524 	port->rx_ring_size = ring->rx_pending;
5525 	port->tx_ring_size = ring->tx_pending;
5526 
5527 	err = mvpp2_setup_rxqs(port);
5528 	if (err) {
5529 		/* Reallocate Rx queues with the original ring size */
5530 		port->rx_ring_size = prev_rx_ring_size;
5531 		ring->rx_pending = prev_rx_ring_size;
5532 		err = mvpp2_setup_rxqs(port);
5533 		if (err)
5534 			goto err_out;
5535 	}
5536 	err = mvpp2_setup_txqs(port);
5537 	if (err) {
5538 		/* Reallocate Tx queues with the original ring size */
5539 		port->tx_ring_size = prev_tx_ring_size;
5540 		ring->tx_pending = prev_tx_ring_size;
5541 		err = mvpp2_setup_txqs(port);
5542 		if (err)
5543 			goto err_clean_rxqs;
5544 	}
5545 
5546 	mvpp2_start_dev(port);
5547 	mvpp2_egress_enable(port);
5548 	mvpp2_ingress_enable(port);
5549 
5550 	return 0;
5551 
5552 err_clean_rxqs:
5553 	mvpp2_cleanup_rxqs(port);
5554 err_out:
5555 	netdev_err(dev, "failed to change ring parameters");
5556 	return err;
5557 }
5558 
5559 static void mvpp2_ethtool_get_pause_param(struct net_device *dev,
5560 					  struct ethtool_pauseparam *pause)
5561 {
5562 	struct mvpp2_port *port = netdev_priv(dev);
5563 
5564 	if (!port->phylink)
5565 		return;
5566 
5567 	phylink_ethtool_get_pauseparam(port->phylink, pause);
5568 }
5569 
5570 static int mvpp2_ethtool_set_pause_param(struct net_device *dev,
5571 					 struct ethtool_pauseparam *pause)
5572 {
5573 	struct mvpp2_port *port = netdev_priv(dev);
5574 
5575 	if (!port->phylink)
5576 		return -ENOTSUPP;
5577 
5578 	return phylink_ethtool_set_pauseparam(port->phylink, pause);
5579 }
5580 
5581 static int mvpp2_ethtool_get_link_ksettings(struct net_device *dev,
5582 					    struct ethtool_link_ksettings *cmd)
5583 {
5584 	struct mvpp2_port *port = netdev_priv(dev);
5585 
5586 	if (!port->phylink)
5587 		return -ENOTSUPP;
5588 
5589 	return phylink_ethtool_ksettings_get(port->phylink, cmd);
5590 }
5591 
5592 static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev,
5593 					    const struct ethtool_link_ksettings *cmd)
5594 {
5595 	struct mvpp2_port *port = netdev_priv(dev);
5596 
5597 	if (!port->phylink)
5598 		return -ENOTSUPP;
5599 
5600 	return phylink_ethtool_ksettings_set(port->phylink, cmd);
5601 }
5602 
5603 static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
5604 				   struct ethtool_rxnfc *info, u32 *rules)
5605 {
5606 	struct mvpp2_port *port = netdev_priv(dev);
5607 	int ret = 0, i, loc = 0;
5608 
5609 	if (!mvpp22_rss_is_supported(port))
5610 		return -EOPNOTSUPP;
5611 
5612 	switch (info->cmd) {
5613 	case ETHTOOL_GRXFH:
5614 		ret = mvpp2_ethtool_rxfh_get(port, info);
5615 		break;
5616 	case ETHTOOL_GRXRINGS:
5617 		info->data = port->nrxqs;
5618 		break;
5619 	case ETHTOOL_GRXCLSRLCNT:
5620 		info->rule_cnt = port->n_rfs_rules;
5621 		break;
5622 	case ETHTOOL_GRXCLSRULE:
5623 		ret = mvpp2_ethtool_cls_rule_get(port, info);
5624 		break;
5625 	case ETHTOOL_GRXCLSRLALL:
5626 		for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
5627 			if (loc == info->rule_cnt) {
5628 				ret = -EMSGSIZE;
5629 				break;
5630 			}
5631 
5632 			if (port->rfs_rules[i])
5633 				rules[loc++] = i;
5634 		}
5635 		break;
5636 	default:
5637 		return -ENOTSUPP;
5638 	}
5639 
5640 	return ret;
5641 }
5642 
5643 static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
5644 				   struct ethtool_rxnfc *info)
5645 {
5646 	struct mvpp2_port *port = netdev_priv(dev);
5647 	int ret = 0;
5648 
5649 	if (!mvpp22_rss_is_supported(port))
5650 		return -EOPNOTSUPP;
5651 
5652 	switch (info->cmd) {
5653 	case ETHTOOL_SRXFH:
5654 		ret = mvpp2_ethtool_rxfh_set(port, info);
5655 		break;
5656 	case ETHTOOL_SRXCLSRLINS:
5657 		ret = mvpp2_ethtool_cls_rule_ins(port, info);
5658 		break;
5659 	case ETHTOOL_SRXCLSRLDEL:
5660 		ret = mvpp2_ethtool_cls_rule_del(port, info);
5661 		break;
5662 	default:
5663 		return -EOPNOTSUPP;
5664 	}
5665 	return ret;
5666 }
5667 
5668 static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev)
5669 {
5670 	struct mvpp2_port *port = netdev_priv(dev);
5671 
5672 	return mvpp22_rss_is_supported(port) ? MVPP22_RSS_TABLE_ENTRIES : 0;
5673 }
5674 
5675 static int mvpp2_ethtool_get_rxfh(struct net_device *dev,
5676 				  struct ethtool_rxfh_param *rxfh)
5677 {
5678 	struct mvpp2_port *port = netdev_priv(dev);
5679 	u32 rss_context = rxfh->rss_context;
5680 	int ret = 0;
5681 
5682 	if (!mvpp22_rss_is_supported(port))
5683 		return -EOPNOTSUPP;
5684 	if (rss_context >= MVPP22_N_RSS_TABLES)
5685 		return -EINVAL;
5686 
5687 	rxfh->hfunc = ETH_RSS_HASH_CRC32;
5688 
5689 	if (rxfh->indir)
5690 		ret = mvpp22_port_rss_ctx_indir_get(port, rss_context,
5691 						    rxfh->indir);
5692 
5693 	return ret;
5694 }
5695 
5696 static int mvpp2_ethtool_set_rxfh(struct net_device *dev,
5697 				  struct ethtool_rxfh_param *rxfh,
5698 				  struct netlink_ext_ack *extack)
5699 {
5700 	struct mvpp2_port *port = netdev_priv(dev);
5701 	u32 *rss_context = &rxfh->rss_context;
5702 	int ret = 0;
5703 
5704 	if (!mvpp22_rss_is_supported(port))
5705 		return -EOPNOTSUPP;
5706 
5707 	if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
5708 	    rxfh->hfunc != ETH_RSS_HASH_CRC32)
5709 		return -EOPNOTSUPP;
5710 
5711 	if (rxfh->key)
5712 		return -EOPNOTSUPP;
5713 
5714 	if (*rss_context && rxfh->rss_delete)
5715 		return mvpp22_port_rss_ctx_delete(port, *rss_context);
5716 
5717 	if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
5718 		ret = mvpp22_port_rss_ctx_create(port, rss_context);
5719 		if (ret)
5720 			return ret;
5721 	}
5722 
5723 	if (rxfh->indir)
5724 		ret = mvpp22_port_rss_ctx_indir_set(port, *rss_context,
5725 						    rxfh->indir);
5726 
5727 	return ret;
5728 }
5729 
5730 /* Device ops */
5731 
5732 static const struct net_device_ops mvpp2_netdev_ops = {
5733 	.ndo_open		= mvpp2_open,
5734 	.ndo_stop		= mvpp2_stop,
5735 	.ndo_start_xmit		= mvpp2_tx,
5736 	.ndo_set_rx_mode	= mvpp2_set_rx_mode,
5737 	.ndo_set_mac_address	= mvpp2_set_mac_address,
5738 	.ndo_change_mtu		= mvpp2_change_mtu,
5739 	.ndo_get_stats64	= mvpp2_get_stats64,
5740 	.ndo_eth_ioctl		= mvpp2_ioctl,
5741 	.ndo_vlan_rx_add_vid	= mvpp2_vlan_rx_add_vid,
5742 	.ndo_vlan_rx_kill_vid	= mvpp2_vlan_rx_kill_vid,
5743 	.ndo_set_features	= mvpp2_set_features,
5744 	.ndo_bpf		= mvpp2_xdp,
5745 	.ndo_xdp_xmit		= mvpp2_xdp_xmit,
5746 };
5747 
5748 static const struct ethtool_ops mvpp2_eth_tool_ops = {
5749 	.cap_rss_ctx_supported	= true,
5750 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
5751 				     ETHTOOL_COALESCE_MAX_FRAMES,
5752 	.nway_reset		= mvpp2_ethtool_nway_reset,
5753 	.get_link		= ethtool_op_get_link,
5754 	.get_ts_info		= mvpp2_ethtool_get_ts_info,
5755 	.set_coalesce		= mvpp2_ethtool_set_coalesce,
5756 	.get_coalesce		= mvpp2_ethtool_get_coalesce,
5757 	.get_drvinfo		= mvpp2_ethtool_get_drvinfo,
5758 	.get_ringparam		= mvpp2_ethtool_get_ringparam,
5759 	.set_ringparam		= mvpp2_ethtool_set_ringparam,
5760 	.get_strings		= mvpp2_ethtool_get_strings,
5761 	.get_ethtool_stats	= mvpp2_ethtool_get_stats,
5762 	.get_sset_count		= mvpp2_ethtool_get_sset_count,
5763 	.get_pauseparam		= mvpp2_ethtool_get_pause_param,
5764 	.set_pauseparam		= mvpp2_ethtool_set_pause_param,
5765 	.get_link_ksettings	= mvpp2_ethtool_get_link_ksettings,
5766 	.set_link_ksettings	= mvpp2_ethtool_set_link_ksettings,
5767 	.get_rxnfc		= mvpp2_ethtool_get_rxnfc,
5768 	.set_rxnfc		= mvpp2_ethtool_set_rxnfc,
5769 	.get_rxfh_indir_size	= mvpp2_ethtool_get_rxfh_indir_size,
5770 	.get_rxfh		= mvpp2_ethtool_get_rxfh,
5771 	.set_rxfh		= mvpp2_ethtool_set_rxfh,
5772 };
5773 
5774 /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
5775  * had a single IRQ defined per-port.
5776  */
5777 static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
5778 					   struct device_node *port_node)
5779 {
5780 	struct mvpp2_queue_vector *v = &port->qvecs[0];
5781 
5782 	v->first_rxq = 0;
5783 	v->nrxqs = port->nrxqs;
5784 	v->type = MVPP2_QUEUE_VECTOR_SHARED;
5785 	v->sw_thread_id = 0;
5786 	v->sw_thread_mask = *cpumask_bits(cpu_online_mask);
5787 	v->port = port;
5788 	v->irq = irq_of_parse_and_map(port_node, 0);
5789 	if (v->irq <= 0)
5790 		return -EINVAL;
5791 	netif_napi_add(port->dev, &v->napi, mvpp2_poll);
5792 
5793 	port->nqvecs = 1;
5794 
5795 	return 0;
5796 }
5797 
5798 static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
5799 					  struct device_node *port_node)
5800 {
5801 	struct mvpp2 *priv = port->priv;
5802 	struct mvpp2_queue_vector *v;
5803 	int i, ret;
5804 
5805 	switch (queue_mode) {
5806 	case MVPP2_QDIST_SINGLE_MODE:
5807 		port->nqvecs = priv->nthreads + 1;
5808 		break;
5809 	case MVPP2_QDIST_MULTI_MODE:
5810 		port->nqvecs = priv->nthreads;
5811 		break;
5812 	}
5813 
5814 	for (i = 0; i < port->nqvecs; i++) {
5815 		char irqname[16];
5816 
5817 		v = port->qvecs + i;
5818 
5819 		v->port = port;
5820 		v->type = MVPP2_QUEUE_VECTOR_PRIVATE;
5821 		v->sw_thread_id = i;
5822 		v->sw_thread_mask = BIT(i);
5823 
5824 		if (port->flags & MVPP2_F_DT_COMPAT)
5825 			snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
5826 		else
5827 			snprintf(irqname, sizeof(irqname), "hif%d", i);
5828 
5829 		if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
5830 			v->first_rxq = i;
5831 			v->nrxqs = 1;
5832 		} else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
5833 			   i == (port->nqvecs - 1)) {
5834 			v->first_rxq = 0;
5835 			v->nrxqs = port->nrxqs;
5836 			v->type = MVPP2_QUEUE_VECTOR_SHARED;
5837 
5838 			if (port->flags & MVPP2_F_DT_COMPAT)
5839 				strscpy(irqname, "rx-shared", sizeof(irqname));
5840 		}
5841 
5842 		if (port_node)
5843 			v->irq = of_irq_get_byname(port_node, irqname);
5844 		else
5845 			v->irq = fwnode_irq_get(port->fwnode, i);
5846 		if (v->irq <= 0) {
5847 			ret = -EINVAL;
5848 			goto err;
5849 		}
5850 
5851 		netif_napi_add(port->dev, &v->napi, mvpp2_poll);
5852 	}
5853 
5854 	return 0;
5855 
5856 err:
5857 	for (i = 0; i < port->nqvecs; i++)
5858 		irq_dispose_mapping(port->qvecs[i].irq);
5859 	return ret;
5860 }
5861 
5862 static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
5863 				    struct device_node *port_node)
5864 {
5865 	if (port->has_tx_irqs)
5866 		return mvpp2_multi_queue_vectors_init(port, port_node);
5867 	else
5868 		return mvpp2_simple_queue_vectors_init(port, port_node);
5869 }
5870 
5871 static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port)
5872 {
5873 	int i;
5874 
5875 	for (i = 0; i < port->nqvecs; i++)
5876 		irq_dispose_mapping(port->qvecs[i].irq);
5877 }
5878 
5879 /* Configure Rx queue group interrupt for this port */
5880 static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
5881 {
5882 	struct mvpp2 *priv = port->priv;
5883 	u32 val;
5884 	int i;
5885 
5886 	if (priv->hw_version == MVPP21) {
5887 		mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
5888 			    port->nrxqs);
5889 		return;
5890 	}
5891 
5892 	/* Handle the more complicated PPv2.2 and PPv2.3 case */
5893 	for (i = 0; i < port->nqvecs; i++) {
5894 		struct mvpp2_queue_vector *qv = port->qvecs + i;
5895 
5896 		if (!qv->nrxqs)
5897 			continue;
5898 
5899 		val = qv->sw_thread_id;
5900 		val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET;
5901 		mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
5902 
5903 		val = qv->first_rxq;
5904 		val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET;
5905 		mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
5906 	}
5907 }
5908 
5909 /* Initialize port HW */
5910 static int mvpp2_port_init(struct mvpp2_port *port)
5911 {
5912 	struct device *dev = port->dev->dev.parent;
5913 	struct mvpp2 *priv = port->priv;
5914 	struct mvpp2_txq_pcpu *txq_pcpu;
5915 	unsigned int thread;
5916 	int queue, err, val;
5917 
5918 	/* Checks for hardware constraints */
5919 	if (port->first_rxq + port->nrxqs >
5920 	    MVPP2_MAX_PORTS * priv->max_port_rxqs)
5921 		return -EINVAL;
5922 
5923 	if (port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ)
5924 		return -EINVAL;
5925 
5926 	/* Disable port */
5927 	mvpp2_egress_disable(port);
5928 	mvpp2_port_disable(port);
5929 
5930 	if (mvpp2_is_xlg(port->phy_interface)) {
5931 		val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5932 		val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
5933 		val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
5934 		writel(val, port->base + MVPP22_XLG_CTRL0_REG);
5935 	} else {
5936 		val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5937 		val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
5938 		val |= MVPP2_GMAC_FORCE_LINK_DOWN;
5939 		writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5940 	}
5941 
5942 	port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
5943 
5944 	port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
5945 				  GFP_KERNEL);
5946 	if (!port->txqs)
5947 		return -ENOMEM;
5948 
5949 	/* Associate physical Tx queues to this port and initialize.
5950 	 * The mapping is predefined.
5951 	 */
5952 	for (queue = 0; queue < port->ntxqs; queue++) {
5953 		int queue_phy_id = mvpp2_txq_phys(port->id, queue);
5954 		struct mvpp2_tx_queue *txq;
5955 
5956 		txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
5957 		if (!txq) {
5958 			err = -ENOMEM;
5959 			goto err_free_percpu;
5960 		}
5961 
5962 		txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
5963 		if (!txq->pcpu) {
5964 			err = -ENOMEM;
5965 			goto err_free_percpu;
5966 		}
5967 
5968 		txq->id = queue_phy_id;
5969 		txq->log_id = queue;
5970 		txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
5971 		for (thread = 0; thread < priv->nthreads; thread++) {
5972 			txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
5973 			txq_pcpu->thread = thread;
5974 		}
5975 
5976 		port->txqs[queue] = txq;
5977 	}
5978 
5979 	port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs),
5980 				  GFP_KERNEL);
5981 	if (!port->rxqs) {
5982 		err = -ENOMEM;
5983 		goto err_free_percpu;
5984 	}
5985 
5986 	/* Allocate and initialize Rx queue for this port */
5987 	for (queue = 0; queue < port->nrxqs; queue++) {
5988 		struct mvpp2_rx_queue *rxq;
5989 
5990 		/* Map physical Rx queue to port's logical Rx queue */
5991 		rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
5992 		if (!rxq) {
5993 			err = -ENOMEM;
5994 			goto err_free_percpu;
5995 		}
5996 		/* Map this Rx queue to a physical queue */
5997 		rxq->id = port->first_rxq + queue;
5998 		rxq->port = port->id;
5999 		rxq->logic_rxq = queue;
6000 
6001 		port->rxqs[queue] = rxq;
6002 	}
6003 
6004 	mvpp2_rx_irqs_setup(port);
6005 
6006 	/* Create Rx descriptor rings */
6007 	for (queue = 0; queue < port->nrxqs; queue++) {
6008 		struct mvpp2_rx_queue *rxq = port->rxqs[queue];
6009 
6010 		rxq->size = port->rx_ring_size;
6011 		rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
6012 		rxq->time_coal = MVPP2_RX_COAL_USEC;
6013 	}
6014 
6015 	mvpp2_ingress_disable(port);
6016 
6017 	/* Port default configuration */
6018 	mvpp2_defaults_set(port);
6019 
6020 	/* Port's classifier configuration */
6021 	mvpp2_cls_oversize_rxq_set(port);
6022 	mvpp2_cls_port_config(port);
6023 
6024 	if (mvpp22_rss_is_supported(port))
6025 		mvpp22_port_rss_init(port);
6026 
6027 	/* Provide an initial Rx packet size */
6028 	port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
6029 
6030 	/* Initialize pools for swf */
6031 	err = mvpp2_swf_bm_pool_init(port);
6032 	if (err)
6033 		goto err_free_percpu;
6034 
6035 	/* Clear all port stats */
6036 	mvpp2_read_stats(port);
6037 	memset(port->ethtool_stats, 0,
6038 	       MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs) * sizeof(u64));
6039 
6040 	return 0;
6041 
6042 err_free_percpu:
6043 	for (queue = 0; queue < port->ntxqs; queue++) {
6044 		if (!port->txqs[queue])
6045 			continue;
6046 		free_percpu(port->txqs[queue]->pcpu);
6047 	}
6048 	return err;
6049 }
6050 
6051 static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node,
6052 					   unsigned long *flags)
6053 {
6054 	char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2",
6055 			  "tx-cpu3" };
6056 	int i;
6057 
6058 	for (i = 0; i < 5; i++)
6059 		if (of_property_match_string(port_node, "interrupt-names",
6060 					     irqs[i]) < 0)
6061 			return false;
6062 
6063 	*flags |= MVPP2_F_DT_COMPAT;
6064 	return true;
6065 }
6066 
6067 /* Checks if the port dt description has the required Tx interrupts:
6068  * - PPv2.1: there are no such interrupts.
6069  * - PPv2.2 and PPv2.3:
6070  *   - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3]
6071  *   - The new ones have: "hifX" with X in [0..8]
6072  *
6073  * All those variants are supported to keep the backward compatibility.
6074  */
6075 static bool mvpp2_port_has_irqs(struct mvpp2 *priv,
6076 				struct device_node *port_node,
6077 				unsigned long *flags)
6078 {
6079 	char name[5];
6080 	int i;
6081 
6082 	/* ACPI */
6083 	if (!port_node)
6084 		return true;
6085 
6086 	if (priv->hw_version == MVPP21)
6087 		return false;
6088 
6089 	if (mvpp22_port_has_legacy_tx_irqs(port_node, flags))
6090 		return true;
6091 
6092 	for (i = 0; i < MVPP2_MAX_THREADS; i++) {
6093 		snprintf(name, 5, "hif%d", i);
6094 		if (of_property_match_string(port_node, "interrupt-names",
6095 					     name) < 0)
6096 			return false;
6097 	}
6098 
6099 	return true;
6100 }
6101 
6102 static int mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
6103 				    struct fwnode_handle *fwnode,
6104 				    char **mac_from)
6105 {
6106 	struct mvpp2_port *port = netdev_priv(dev);
6107 	char hw_mac_addr[ETH_ALEN] = {0};
6108 	char fw_mac_addr[ETH_ALEN];
6109 	int ret;
6110 
6111 	if (!fwnode_get_mac_address(fwnode, fw_mac_addr)) {
6112 		*mac_from = "firmware node";
6113 		eth_hw_addr_set(dev, fw_mac_addr);
6114 		return 0;
6115 	}
6116 
6117 	if (priv->hw_version == MVPP21) {
6118 		mvpp21_get_mac_address(port, hw_mac_addr);
6119 		if (is_valid_ether_addr(hw_mac_addr)) {
6120 			*mac_from = "hardware";
6121 			eth_hw_addr_set(dev, hw_mac_addr);
6122 			return 0;
6123 		}
6124 	}
6125 
6126 	/* Only valid on OF enabled platforms */
6127 	ret = of_get_mac_address_nvmem(to_of_node(fwnode), fw_mac_addr);
6128 	if (ret == -EPROBE_DEFER)
6129 		return ret;
6130 	if (!ret) {
6131 		*mac_from = "nvmem cell";
6132 		eth_hw_addr_set(dev, fw_mac_addr);
6133 		return 0;
6134 	}
6135 
6136 	*mac_from = "random";
6137 	eth_hw_addr_random(dev);
6138 
6139 	return 0;
6140 }
6141 
6142 static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config)
6143 {
6144 	return container_of(config, struct mvpp2_port, phylink_config);
6145 }
6146 
6147 static struct mvpp2_port *mvpp2_pcs_xlg_to_port(struct phylink_pcs *pcs)
6148 {
6149 	return container_of(pcs, struct mvpp2_port, pcs_xlg);
6150 }
6151 
6152 static struct mvpp2_port *mvpp2_pcs_gmac_to_port(struct phylink_pcs *pcs)
6153 {
6154 	return container_of(pcs, struct mvpp2_port, pcs_gmac);
6155 }
6156 
6157 static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs,
6158 				    struct phylink_link_state *state)
6159 {
6160 	struct mvpp2_port *port = mvpp2_pcs_xlg_to_port(pcs);
6161 	u32 val;
6162 
6163 	if (port->phy_interface == PHY_INTERFACE_MODE_5GBASER)
6164 		state->speed = SPEED_5000;
6165 	else
6166 		state->speed = SPEED_10000;
6167 	state->duplex = 1;
6168 	state->an_complete = 1;
6169 
6170 	val = readl(port->base + MVPP22_XLG_STATUS);
6171 	state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP);
6172 
6173 	state->pause = 0;
6174 	val = readl(port->base + MVPP22_XLG_CTRL0_REG);
6175 	if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN)
6176 		state->pause |= MLO_PAUSE_TX;
6177 	if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN)
6178 		state->pause |= MLO_PAUSE_RX;
6179 }
6180 
6181 static int mvpp2_xlg_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
6182 				phy_interface_t interface,
6183 				const unsigned long *advertising,
6184 				bool permit_pause_to_mac)
6185 {
6186 	return 0;
6187 }
6188 
6189 static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = {
6190 	.pcs_get_state = mvpp2_xlg_pcs_get_state,
6191 	.pcs_config = mvpp2_xlg_pcs_config,
6192 };
6193 
6194 static int mvpp2_gmac_pcs_validate(struct phylink_pcs *pcs,
6195 				   unsigned long *supported,
6196 				   const struct phylink_link_state *state)
6197 {
6198 	/* When in 802.3z mode, we must have AN enabled:
6199 	 * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
6200 	 * When <PortType> = 1 (1000BASE-X) this field must be set to 1.
6201 	 */
6202 	if (phy_interface_mode_is_8023z(state->interface) &&
6203 	    !phylink_test(state->advertising, Autoneg))
6204 		return -EINVAL;
6205 
6206 	return 0;
6207 }
6208 
6209 static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs,
6210 				     struct phylink_link_state *state)
6211 {
6212 	struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
6213 	u32 val;
6214 
6215 	val = readl(port->base + MVPP2_GMAC_STATUS0);
6216 
6217 	state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE);
6218 	state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP);
6219 	state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX);
6220 
6221 	switch (port->phy_interface) {
6222 	case PHY_INTERFACE_MODE_1000BASEX:
6223 		state->speed = SPEED_1000;
6224 		break;
6225 	case PHY_INTERFACE_MODE_2500BASEX:
6226 		state->speed = SPEED_2500;
6227 		break;
6228 	default:
6229 		if (val & MVPP2_GMAC_STATUS0_GMII_SPEED)
6230 			state->speed = SPEED_1000;
6231 		else if (val & MVPP2_GMAC_STATUS0_MII_SPEED)
6232 			state->speed = SPEED_100;
6233 		else
6234 			state->speed = SPEED_10;
6235 	}
6236 
6237 	state->pause = 0;
6238 	if (val & MVPP2_GMAC_STATUS0_RX_PAUSE)
6239 		state->pause |= MLO_PAUSE_RX;
6240 	if (val & MVPP2_GMAC_STATUS0_TX_PAUSE)
6241 		state->pause |= MLO_PAUSE_TX;
6242 }
6243 
6244 static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
6245 				 phy_interface_t interface,
6246 				 const unsigned long *advertising,
6247 				 bool permit_pause_to_mac)
6248 {
6249 	struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
6250 	u32 mask, val, an, old_an, changed;
6251 
6252 	mask = MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
6253 	       MVPP2_GMAC_IN_BAND_AUTONEG |
6254 	       MVPP2_GMAC_AN_SPEED_EN |
6255 	       MVPP2_GMAC_FLOW_CTRL_AUTONEG |
6256 	       MVPP2_GMAC_AN_DUPLEX_EN;
6257 
6258 	if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) {
6259 		mask |= MVPP2_GMAC_CONFIG_MII_SPEED |
6260 			MVPP2_GMAC_CONFIG_GMII_SPEED |
6261 			MVPP2_GMAC_CONFIG_FULL_DUPLEX;
6262 		val = MVPP2_GMAC_IN_BAND_AUTONEG;
6263 
6264 		if (interface == PHY_INTERFACE_MODE_SGMII) {
6265 			/* SGMII mode receives the speed and duplex from PHY */
6266 			val |= MVPP2_GMAC_AN_SPEED_EN |
6267 			       MVPP2_GMAC_AN_DUPLEX_EN;
6268 		} else {
6269 			/* 802.3z mode has fixed speed and duplex */
6270 			val |= MVPP2_GMAC_CONFIG_GMII_SPEED |
6271 			       MVPP2_GMAC_CONFIG_FULL_DUPLEX;
6272 
6273 			/* The FLOW_CTRL_AUTONEG bit selects either the hardware
6274 			 * automatically or the bits in MVPP22_GMAC_CTRL_4_REG
6275 			 * manually controls the GMAC pause modes.
6276 			 */
6277 			if (permit_pause_to_mac)
6278 				val |= MVPP2_GMAC_FLOW_CTRL_AUTONEG;
6279 
6280 			/* Configure advertisement bits */
6281 			mask |= MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN;
6282 			if (phylink_test(advertising, Pause))
6283 				val |= MVPP2_GMAC_FC_ADV_EN;
6284 			if (phylink_test(advertising, Asym_Pause))
6285 				val |= MVPP2_GMAC_FC_ADV_ASM_EN;
6286 		}
6287 	} else {
6288 		val = 0;
6289 	}
6290 
6291 	old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6292 	an = (an & ~mask) | val;
6293 	changed = an ^ old_an;
6294 	if (changed)
6295 		writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6296 
6297 	/* We are only interested in the advertisement bits changing */
6298 	return changed & (MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN);
6299 }
6300 
6301 static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs)
6302 {
6303 	struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
6304 	u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6305 
6306 	writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
6307 	       port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6308 	writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN,
6309 	       port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6310 }
6311 
6312 static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = {
6313 	.pcs_validate = mvpp2_gmac_pcs_validate,
6314 	.pcs_get_state = mvpp2_gmac_pcs_get_state,
6315 	.pcs_config = mvpp2_gmac_pcs_config,
6316 	.pcs_an_restart = mvpp2_gmac_pcs_an_restart,
6317 };
6318 
6319 static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
6320 			     const struct phylink_link_state *state)
6321 {
6322 	u32 val;
6323 
6324 	mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
6325 		     MVPP22_XLG_CTRL0_MAC_RESET_DIS,
6326 		     MVPP22_XLG_CTRL0_MAC_RESET_DIS);
6327 	mvpp2_modify(port->base + MVPP22_XLG_CTRL4_REG,
6328 		     MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
6329 		     MVPP22_XLG_CTRL4_EN_IDLE_CHECK |
6330 		     MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC,
6331 		     MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC);
6332 
6333 	/* Wait for reset to deassert */
6334 	do {
6335 		val = readl(port->base + MVPP22_XLG_CTRL0_REG);
6336 	} while (!(val & MVPP22_XLG_CTRL0_MAC_RESET_DIS));
6337 }
6338 
6339 static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
6340 			      const struct phylink_link_state *state)
6341 {
6342 	u32 old_ctrl0, ctrl0;
6343 	u32 old_ctrl2, ctrl2;
6344 	u32 old_ctrl4, ctrl4;
6345 
6346 	old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
6347 	old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
6348 	old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
6349 
6350 	ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
6351 	ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_FLOW_CTRL_MASK);
6352 
6353 	/* Configure port type */
6354 	if (phy_interface_mode_is_8023z(state->interface)) {
6355 		ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
6356 		ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
6357 		ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
6358 			 MVPP22_CTRL4_DP_CLK_SEL |
6359 			 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
6360 	} else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
6361 		ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_INBAND_AN_MASK;
6362 		ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
6363 		ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
6364 			 MVPP22_CTRL4_DP_CLK_SEL |
6365 			 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
6366 	} else if (phy_interface_mode_is_rgmii(state->interface)) {
6367 		ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL;
6368 		ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
6369 			 MVPP22_CTRL4_SYNC_BYPASS_DIS |
6370 			 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
6371 	}
6372 
6373 	/* Configure negotiation style */
6374 	if (!phylink_autoneg_inband(mode)) {
6375 		/* Phy or fixed speed - no in-band AN, nothing to do, leave the
6376 		 * configured speed, duplex and flow control as-is.
6377 		 */
6378 	} else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
6379 		/* SGMII in-band mode receives the speed and duplex from
6380 		 * the PHY. Flow control information is not received. */
6381 	} else if (phy_interface_mode_is_8023z(state->interface)) {
6382 		/* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can
6383 		 * they negotiate duplex: they are always operating with a fixed
6384 		 * speed of 1000/2500Mbps in full duplex, so force 1000/2500
6385 		 * speed and full duplex here.
6386 		 */
6387 		ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
6388 	}
6389 
6390 	if (old_ctrl0 != ctrl0)
6391 		writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG);
6392 	if (old_ctrl2 != ctrl2)
6393 		writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
6394 	if (old_ctrl4 != ctrl4)
6395 		writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
6396 }
6397 
6398 static struct phylink_pcs *mvpp2_select_pcs(struct phylink_config *config,
6399 					    phy_interface_t interface)
6400 {
6401 	struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6402 
6403 	/* Select the appropriate PCS operations depending on the
6404 	 * configured interface mode. We will only switch to a mode
6405 	 * that the validate() checks have already passed.
6406 	 */
6407 	if (mvpp2_is_xlg(interface))
6408 		return &port->pcs_xlg;
6409 	else
6410 		return &port->pcs_gmac;
6411 }
6412 
6413 static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode,
6414 			     phy_interface_t interface)
6415 {
6416 	struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6417 
6418 	/* Check for invalid configuration */
6419 	if (mvpp2_is_xlg(interface) && port->gop_id != 0) {
6420 		netdev_err(port->dev, "Invalid mode on %s\n", port->dev->name);
6421 		return -EINVAL;
6422 	}
6423 
6424 	if (port->phy_interface != interface ||
6425 	    phylink_autoneg_inband(mode)) {
6426 		/* Force the link down when changing the interface or if in
6427 		 * in-band mode to ensure we do not change the configuration
6428 		 * while the hardware is indicating link is up. We force both
6429 		 * XLG and GMAC down to ensure that they're both in a known
6430 		 * state.
6431 		 */
6432 		mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
6433 			     MVPP2_GMAC_FORCE_LINK_PASS |
6434 			     MVPP2_GMAC_FORCE_LINK_DOWN,
6435 			     MVPP2_GMAC_FORCE_LINK_DOWN);
6436 
6437 		if (mvpp2_port_supports_xlg(port))
6438 			mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
6439 				     MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
6440 				     MVPP22_XLG_CTRL0_FORCE_LINK_DOWN,
6441 				     MVPP22_XLG_CTRL0_FORCE_LINK_DOWN);
6442 	}
6443 
6444 	/* Make sure the port is disabled when reconfiguring the mode */
6445 	mvpp2_port_disable(port);
6446 
6447 	if (port->phy_interface != interface) {
6448 		/* Place GMAC into reset */
6449 		mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG,
6450 			     MVPP2_GMAC_PORT_RESET_MASK,
6451 			     MVPP2_GMAC_PORT_RESET_MASK);
6452 
6453 		if (port->priv->hw_version >= MVPP22) {
6454 			mvpp22_gop_mask_irq(port);
6455 
6456 			phy_power_off(port->comphy);
6457 
6458 			/* Reconfigure the serdes lanes */
6459 			mvpp22_mode_reconfigure(port, interface);
6460 		}
6461 	}
6462 
6463 	return 0;
6464 }
6465 
6466 static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
6467 			     const struct phylink_link_state *state)
6468 {
6469 	struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6470 
6471 	/* mac (re)configuration */
6472 	if (mvpp2_is_xlg(state->interface))
6473 		mvpp2_xlg_config(port, mode, state);
6474 	else if (phy_interface_mode_is_rgmii(state->interface) ||
6475 		 phy_interface_mode_is_8023z(state->interface) ||
6476 		 state->interface == PHY_INTERFACE_MODE_SGMII)
6477 		mvpp2_gmac_config(port, mode, state);
6478 
6479 	if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
6480 		mvpp2_port_loopback_set(port, state);
6481 }
6482 
6483 static int mvpp2_mac_finish(struct phylink_config *config, unsigned int mode,
6484 			    phy_interface_t interface)
6485 {
6486 	struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6487 
6488 	if (port->priv->hw_version >= MVPP22 &&
6489 	    port->phy_interface != interface) {
6490 		port->phy_interface = interface;
6491 
6492 		/* Unmask interrupts */
6493 		mvpp22_gop_unmask_irq(port);
6494 	}
6495 
6496 	if (!mvpp2_is_xlg(interface)) {
6497 		/* Release GMAC reset and wait */
6498 		mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG,
6499 			     MVPP2_GMAC_PORT_RESET_MASK, 0);
6500 
6501 		while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
6502 		       MVPP2_GMAC_PORT_RESET_MASK)
6503 			continue;
6504 	}
6505 
6506 	mvpp2_port_enable(port);
6507 
6508 	/* Allow the link to come up if in in-band mode, otherwise the
6509 	 * link is forced via mac_link_down()/mac_link_up()
6510 	 */
6511 	if (phylink_autoneg_inband(mode)) {
6512 		if (mvpp2_is_xlg(interface))
6513 			mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
6514 				     MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
6515 				     MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, 0);
6516 		else
6517 			mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
6518 				     MVPP2_GMAC_FORCE_LINK_PASS |
6519 				     MVPP2_GMAC_FORCE_LINK_DOWN, 0);
6520 	}
6521 
6522 	return 0;
6523 }
6524 
6525 static void mvpp2_mac_link_up(struct phylink_config *config,
6526 			      struct phy_device *phy,
6527 			      unsigned int mode, phy_interface_t interface,
6528 			      int speed, int duplex,
6529 			      bool tx_pause, bool rx_pause)
6530 {
6531 	struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6532 	u32 val;
6533 	int i;
6534 
6535 	if (mvpp2_is_xlg(interface)) {
6536 		if (!phylink_autoneg_inband(mode)) {
6537 			val = MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
6538 			if (tx_pause)
6539 				val |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN;
6540 			if (rx_pause)
6541 				val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
6542 
6543 			mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
6544 				     MVPP22_XLG_CTRL0_FORCE_LINK_DOWN |
6545 				     MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
6546 				     MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN |
6547 				     MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN, val);
6548 		}
6549 	} else {
6550 		if (!phylink_autoneg_inband(mode)) {
6551 			val = MVPP2_GMAC_FORCE_LINK_PASS;
6552 
6553 			if (speed == SPEED_1000 || speed == SPEED_2500)
6554 				val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
6555 			else if (speed == SPEED_100)
6556 				val |= MVPP2_GMAC_CONFIG_MII_SPEED;
6557 
6558 			if (duplex == DUPLEX_FULL)
6559 				val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
6560 
6561 			mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
6562 				     MVPP2_GMAC_FORCE_LINK_DOWN |
6563 				     MVPP2_GMAC_FORCE_LINK_PASS |
6564 				     MVPP2_GMAC_CONFIG_MII_SPEED |
6565 				     MVPP2_GMAC_CONFIG_GMII_SPEED |
6566 				     MVPP2_GMAC_CONFIG_FULL_DUPLEX, val);
6567 		}
6568 
6569 		/* We can always update the flow control enable bits;
6570 		 * these will only be effective if flow control AN
6571 		 * (MVPP2_GMAC_FLOW_CTRL_AUTONEG) is disabled.
6572 		 */
6573 		val = 0;
6574 		if (tx_pause)
6575 			val |= MVPP22_CTRL4_TX_FC_EN;
6576 		if (rx_pause)
6577 			val |= MVPP22_CTRL4_RX_FC_EN;
6578 
6579 		mvpp2_modify(port->base + MVPP22_GMAC_CTRL_4_REG,
6580 			     MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN,
6581 			     val);
6582 	}
6583 
6584 	if (port->priv->global_tx_fc) {
6585 		port->tx_fc = tx_pause;
6586 		if (tx_pause)
6587 			mvpp2_rxq_enable_fc(port);
6588 		else
6589 			mvpp2_rxq_disable_fc(port);
6590 		if (port->priv->percpu_pools) {
6591 			for (i = 0; i < port->nrxqs; i++)
6592 				mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i], tx_pause);
6593 		} else {
6594 			mvpp2_bm_pool_update_fc(port, port->pool_long, tx_pause);
6595 			mvpp2_bm_pool_update_fc(port, port->pool_short, tx_pause);
6596 		}
6597 		if (port->priv->hw_version == MVPP23)
6598 			mvpp23_rx_fifo_fc_en(port->priv, port->id, tx_pause);
6599 	}
6600 
6601 	mvpp2_port_enable(port);
6602 
6603 	mvpp2_egress_enable(port);
6604 	mvpp2_ingress_enable(port);
6605 	netif_tx_wake_all_queues(port->dev);
6606 }
6607 
6608 static void mvpp2_mac_link_down(struct phylink_config *config,
6609 				unsigned int mode, phy_interface_t interface)
6610 {
6611 	struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6612 	u32 val;
6613 
6614 	if (!phylink_autoneg_inband(mode)) {
6615 		if (mvpp2_is_xlg(interface)) {
6616 			val = readl(port->base + MVPP22_XLG_CTRL0_REG);
6617 			val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
6618 			val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
6619 			writel(val, port->base + MVPP22_XLG_CTRL0_REG);
6620 		} else {
6621 			val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6622 			val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
6623 			val |= MVPP2_GMAC_FORCE_LINK_DOWN;
6624 			writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6625 		}
6626 	}
6627 
6628 	netif_tx_stop_all_queues(port->dev);
6629 	mvpp2_egress_disable(port);
6630 	mvpp2_ingress_disable(port);
6631 
6632 	mvpp2_port_disable(port);
6633 }
6634 
6635 static const struct phylink_mac_ops mvpp2_phylink_ops = {
6636 	.mac_select_pcs = mvpp2_select_pcs,
6637 	.mac_prepare = mvpp2_mac_prepare,
6638 	.mac_config = mvpp2_mac_config,
6639 	.mac_finish = mvpp2_mac_finish,
6640 	.mac_link_up = mvpp2_mac_link_up,
6641 	.mac_link_down = mvpp2_mac_link_down,
6642 };
6643 
6644 /* Work-around for ACPI */
6645 static void mvpp2_acpi_start(struct mvpp2_port *port)
6646 {
6647 	/* Phylink isn't used as of now for ACPI, so the MAC has to be
6648 	 * configured manually when the interface is started. This will
6649 	 * be removed as soon as the phylink ACPI support lands in.
6650 	 */
6651 	struct phylink_link_state state = {
6652 		.interface = port->phy_interface,
6653 	};
6654 	struct phylink_pcs *pcs;
6655 
6656 	pcs = mvpp2_select_pcs(&port->phylink_config, port->phy_interface);
6657 
6658 	mvpp2_mac_prepare(&port->phylink_config, MLO_AN_INBAND,
6659 			  port->phy_interface);
6660 	mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state);
6661 	pcs->ops->pcs_config(pcs, PHYLINK_PCS_NEG_INBAND_ENABLED,
6662 			     port->phy_interface, state.advertising,
6663 			     false);
6664 	mvpp2_mac_finish(&port->phylink_config, MLO_AN_INBAND,
6665 			 port->phy_interface);
6666 	mvpp2_mac_link_up(&port->phylink_config, NULL,
6667 			  MLO_AN_INBAND, port->phy_interface,
6668 			  SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false);
6669 }
6670 
6671 /* In order to ensure backward compatibility for ACPI, check if the port
6672  * firmware node comprises the necessary description allowing to use phylink.
6673  */
6674 static bool mvpp2_use_acpi_compat_mode(struct fwnode_handle *port_fwnode)
6675 {
6676 	if (!is_acpi_node(port_fwnode))
6677 		return false;
6678 
6679 	return (!fwnode_property_present(port_fwnode, "phy-handle") &&
6680 		!fwnode_property_present(port_fwnode, "managed") &&
6681 		!fwnode_get_named_child_node(port_fwnode, "fixed-link"));
6682 }
6683 
6684 /* Ports initialization */
6685 static int mvpp2_port_probe(struct platform_device *pdev,
6686 			    struct fwnode_handle *port_fwnode,
6687 			    struct mvpp2 *priv)
6688 {
6689 	struct phy *comphy = NULL;
6690 	struct mvpp2_port *port;
6691 	struct mvpp2_port_pcpu *port_pcpu;
6692 	struct device_node *port_node = to_of_node(port_fwnode);
6693 	netdev_features_t features;
6694 	struct net_device *dev;
6695 	struct phylink *phylink;
6696 	char *mac_from = "";
6697 	unsigned int ntxqs, nrxqs, thread;
6698 	unsigned long flags = 0;
6699 	bool has_tx_irqs;
6700 	u32 id;
6701 	int phy_mode;
6702 	int err, i;
6703 
6704 	has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags);
6705 	if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) {
6706 		dev_err(&pdev->dev,
6707 			"not enough IRQs to support multi queue mode\n");
6708 		return -EINVAL;
6709 	}
6710 
6711 	ntxqs = MVPP2_MAX_TXQ;
6712 	nrxqs = mvpp2_get_nrxqs(priv);
6713 
6714 	dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
6715 	if (!dev)
6716 		return -ENOMEM;
6717 
6718 	phy_mode = fwnode_get_phy_mode(port_fwnode);
6719 	if (phy_mode < 0) {
6720 		dev_err(&pdev->dev, "incorrect phy mode\n");
6721 		err = phy_mode;
6722 		goto err_free_netdev;
6723 	}
6724 
6725 	/*
6726 	 * Rewrite 10GBASE-KR to 10GBASE-R for compatibility with existing DT.
6727 	 * Existing usage of 10GBASE-KR is not correct; no backplane
6728 	 * negotiation is done, and this driver does not actually support
6729 	 * 10GBASE-KR.
6730 	 */
6731 	if (phy_mode == PHY_INTERFACE_MODE_10GKR)
6732 		phy_mode = PHY_INTERFACE_MODE_10GBASER;
6733 
6734 	if (port_node) {
6735 		comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
6736 		if (IS_ERR(comphy)) {
6737 			if (PTR_ERR(comphy) == -EPROBE_DEFER) {
6738 				err = -EPROBE_DEFER;
6739 				goto err_free_netdev;
6740 			}
6741 			comphy = NULL;
6742 		}
6743 	}
6744 
6745 	if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) {
6746 		err = -EINVAL;
6747 		dev_err(&pdev->dev, "missing port-id value\n");
6748 		goto err_free_netdev;
6749 	}
6750 
6751 	dev->tx_queue_len = MVPP2_MAX_TXD_MAX;
6752 	dev->watchdog_timeo = 5 * HZ;
6753 	dev->netdev_ops = &mvpp2_netdev_ops;
6754 	dev->ethtool_ops = &mvpp2_eth_tool_ops;
6755 
6756 	port = netdev_priv(dev);
6757 	port->dev = dev;
6758 	port->fwnode = port_fwnode;
6759 	port->ntxqs = ntxqs;
6760 	port->nrxqs = nrxqs;
6761 	port->priv = priv;
6762 	port->has_tx_irqs = has_tx_irqs;
6763 	port->flags = flags;
6764 
6765 	err = mvpp2_queue_vectors_init(port, port_node);
6766 	if (err)
6767 		goto err_free_netdev;
6768 
6769 	if (port_node)
6770 		port->port_irq = of_irq_get_byname(port_node, "link");
6771 	else
6772 		port->port_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
6773 	if (port->port_irq == -EPROBE_DEFER) {
6774 		err = -EPROBE_DEFER;
6775 		goto err_deinit_qvecs;
6776 	}
6777 	if (port->port_irq <= 0)
6778 		/* the link irq is optional */
6779 		port->port_irq = 0;
6780 
6781 	if (fwnode_property_read_bool(port_fwnode, "marvell,loopback"))
6782 		port->flags |= MVPP2_F_LOOPBACK;
6783 
6784 	port->id = id;
6785 	if (priv->hw_version == MVPP21)
6786 		port->first_rxq = port->id * port->nrxqs;
6787 	else
6788 		port->first_rxq = port->id * priv->max_port_rxqs;
6789 
6790 	port->of_node = port_node;
6791 	port->phy_interface = phy_mode;
6792 	port->comphy = comphy;
6793 
6794 	if (priv->hw_version == MVPP21) {
6795 		port->base = devm_platform_ioremap_resource(pdev, 2 + id);
6796 		if (IS_ERR(port->base)) {
6797 			err = PTR_ERR(port->base);
6798 			goto err_free_irq;
6799 		}
6800 
6801 		port->stats_base = port->priv->lms_base +
6802 				   MVPP21_MIB_COUNTERS_OFFSET +
6803 				   port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ;
6804 	} else {
6805 		if (fwnode_property_read_u32(port_fwnode, "gop-port-id",
6806 					     &port->gop_id)) {
6807 			err = -EINVAL;
6808 			dev_err(&pdev->dev, "missing gop-port-id value\n");
6809 			goto err_deinit_qvecs;
6810 		}
6811 
6812 		port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
6813 		port->stats_base = port->priv->iface_base +
6814 				   MVPP22_MIB_COUNTERS_OFFSET +
6815 				   port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ;
6816 
6817 		/* We may want a property to describe whether we should use
6818 		 * MAC hardware timestamping.
6819 		 */
6820 		if (priv->tai)
6821 			port->hwtstamp = true;
6822 	}
6823 
6824 	/* Alloc per-cpu and ethtool stats */
6825 	port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
6826 	if (!port->stats) {
6827 		err = -ENOMEM;
6828 		goto err_free_irq;
6829 	}
6830 
6831 	port->ethtool_stats = devm_kcalloc(&pdev->dev,
6832 					   MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs),
6833 					   sizeof(u64), GFP_KERNEL);
6834 	if (!port->ethtool_stats) {
6835 		err = -ENOMEM;
6836 		goto err_free_stats;
6837 	}
6838 
6839 	mutex_init(&port->gather_stats_lock);
6840 	INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
6841 
6842 	err = mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
6843 	if (err < 0)
6844 		goto err_free_stats;
6845 
6846 	port->tx_ring_size = MVPP2_MAX_TXD_DFLT;
6847 	port->rx_ring_size = MVPP2_MAX_RXD_DFLT;
6848 	SET_NETDEV_DEV(dev, &pdev->dev);
6849 
6850 	err = mvpp2_port_init(port);
6851 	if (err < 0) {
6852 		dev_err(&pdev->dev, "failed to init port %d\n", id);
6853 		goto err_free_stats;
6854 	}
6855 
6856 	mvpp2_port_periodic_xon_disable(port);
6857 
6858 	mvpp2_mac_reset_assert(port);
6859 	mvpp22_pcs_reset_assert(port);
6860 
6861 	port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
6862 	if (!port->pcpu) {
6863 		err = -ENOMEM;
6864 		goto err_free_txq_pcpu;
6865 	}
6866 
6867 	if (!port->has_tx_irqs) {
6868 		for (thread = 0; thread < priv->nthreads; thread++) {
6869 			port_pcpu = per_cpu_ptr(port->pcpu, thread);
6870 
6871 			hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
6872 				     HRTIMER_MODE_REL_PINNED_SOFT);
6873 			port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
6874 			port_pcpu->timer_scheduled = false;
6875 			port_pcpu->dev = dev;
6876 		}
6877 	}
6878 
6879 	features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6880 		   NETIF_F_TSO;
6881 	dev->features = features | NETIF_F_RXCSUM;
6882 	dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
6883 			    NETIF_F_HW_VLAN_CTAG_FILTER;
6884 
6885 	if (mvpp22_rss_is_supported(port)) {
6886 		dev->hw_features |= NETIF_F_RXHASH;
6887 		dev->features |= NETIF_F_NTUPLE;
6888 	}
6889 
6890 	if (!port->priv->percpu_pools)
6891 		mvpp2_set_hw_csum(port, port->pool_long->id);
6892 	else if (port->ntxqs >= num_possible_cpus() * 2)
6893 		dev->xdp_features = NETDEV_XDP_ACT_BASIC |
6894 				    NETDEV_XDP_ACT_REDIRECT |
6895 				    NETDEV_XDP_ACT_NDO_XMIT;
6896 
6897 	dev->vlan_features |= features;
6898 	netif_set_tso_max_segs(dev, MVPP2_MAX_TSO_SEGS);
6899 
6900 	dev->priv_flags |= IFF_UNICAST_FLT;
6901 
6902 	/* MTU range: 68 - 9704 */
6903 	dev->min_mtu = ETH_MIN_MTU;
6904 	/* 9704 == 9728 - 20 and rounding to 8 */
6905 	dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
6906 	device_set_node(&dev->dev, port_fwnode);
6907 
6908 	port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
6909 	port->pcs_gmac.neg_mode = true;
6910 	port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops;
6911 	port->pcs_xlg.neg_mode = true;
6912 
6913 	if (!mvpp2_use_acpi_compat_mode(port_fwnode)) {
6914 		port->phylink_config.dev = &dev->dev;
6915 		port->phylink_config.type = PHYLINK_NETDEV;
6916 		port->phylink_config.mac_capabilities =
6917 			MAC_2500FD | MAC_1000FD | MAC_100 | MAC_10;
6918 
6919 		if (port->priv->global_tx_fc)
6920 			port->phylink_config.mac_capabilities |=
6921 				MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
6922 
6923 		if (mvpp2_port_supports_xlg(port)) {
6924 			/* If a COMPHY is present, we can support any of
6925 			 * the serdes modes and switch between them.
6926 			 */
6927 			if (comphy) {
6928 				__set_bit(PHY_INTERFACE_MODE_5GBASER,
6929 					  port->phylink_config.supported_interfaces);
6930 				__set_bit(PHY_INTERFACE_MODE_10GBASER,
6931 					  port->phylink_config.supported_interfaces);
6932 				__set_bit(PHY_INTERFACE_MODE_XAUI,
6933 					  port->phylink_config.supported_interfaces);
6934 			} else if (phy_mode == PHY_INTERFACE_MODE_5GBASER) {
6935 				__set_bit(PHY_INTERFACE_MODE_5GBASER,
6936 					  port->phylink_config.supported_interfaces);
6937 			} else if (phy_mode == PHY_INTERFACE_MODE_10GBASER) {
6938 				__set_bit(PHY_INTERFACE_MODE_10GBASER,
6939 					  port->phylink_config.supported_interfaces);
6940 			} else if (phy_mode == PHY_INTERFACE_MODE_XAUI) {
6941 				__set_bit(PHY_INTERFACE_MODE_XAUI,
6942 					  port->phylink_config.supported_interfaces);
6943 			}
6944 
6945 			if (comphy)
6946 				port->phylink_config.mac_capabilities |=
6947 					MAC_10000FD | MAC_5000FD;
6948 			else if (phy_mode == PHY_INTERFACE_MODE_5GBASER)
6949 				port->phylink_config.mac_capabilities |=
6950 					MAC_5000FD;
6951 			else
6952 				port->phylink_config.mac_capabilities |=
6953 					MAC_10000FD;
6954 		}
6955 
6956 		if (mvpp2_port_supports_rgmii(port)) {
6957 			phy_interface_set_rgmii(port->phylink_config.supported_interfaces);
6958 			__set_bit(PHY_INTERFACE_MODE_MII,
6959 				  port->phylink_config.supported_interfaces);
6960 		}
6961 
6962 		if (comphy) {
6963 			/* If a COMPHY is present, we can support any of the
6964 			 * serdes modes and switch between them.
6965 			 */
6966 			__set_bit(PHY_INTERFACE_MODE_SGMII,
6967 				  port->phylink_config.supported_interfaces);
6968 			__set_bit(PHY_INTERFACE_MODE_1000BASEX,
6969 				  port->phylink_config.supported_interfaces);
6970 			__set_bit(PHY_INTERFACE_MODE_2500BASEX,
6971 				  port->phylink_config.supported_interfaces);
6972 		} else if (phy_mode == PHY_INTERFACE_MODE_2500BASEX) {
6973 			/* No COMPHY, with only 2500BASE-X mode supported */
6974 			__set_bit(PHY_INTERFACE_MODE_2500BASEX,
6975 				  port->phylink_config.supported_interfaces);
6976 		} else if (phy_mode == PHY_INTERFACE_MODE_1000BASEX ||
6977 			   phy_mode == PHY_INTERFACE_MODE_SGMII) {
6978 			/* No COMPHY, we can switch between 1000BASE-X and SGMII
6979 			 */
6980 			__set_bit(PHY_INTERFACE_MODE_1000BASEX,
6981 				  port->phylink_config.supported_interfaces);
6982 			__set_bit(PHY_INTERFACE_MODE_SGMII,
6983 				  port->phylink_config.supported_interfaces);
6984 		}
6985 
6986 		phylink = phylink_create(&port->phylink_config, port_fwnode,
6987 					 phy_mode, &mvpp2_phylink_ops);
6988 		if (IS_ERR(phylink)) {
6989 			err = PTR_ERR(phylink);
6990 			goto err_free_port_pcpu;
6991 		}
6992 		port->phylink = phylink;
6993 	} else {
6994 		dev_warn(&pdev->dev, "Use link irqs for port#%d. FW update required\n", port->id);
6995 		port->phylink = NULL;
6996 	}
6997 
6998 	/* Cycle the comphy to power it down, saving 270mW per port -
6999 	 * don't worry about an error powering it up. When the comphy
7000 	 * driver does this, we can remove this code.
7001 	 */
7002 	if (port->comphy) {
7003 		err = mvpp22_comphy_init(port, port->phy_interface);
7004 		if (err == 0)
7005 			phy_power_off(port->comphy);
7006 	}
7007 
7008 	err = register_netdev(dev);
7009 	if (err < 0) {
7010 		dev_err(&pdev->dev, "failed to register netdev\n");
7011 		goto err_phylink;
7012 	}
7013 	netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
7014 
7015 	priv->port_list[priv->port_count++] = port;
7016 
7017 	return 0;
7018 
7019 err_phylink:
7020 	if (port->phylink)
7021 		phylink_destroy(port->phylink);
7022 err_free_port_pcpu:
7023 	free_percpu(port->pcpu);
7024 err_free_txq_pcpu:
7025 	for (i = 0; i < port->ntxqs; i++)
7026 		free_percpu(port->txqs[i]->pcpu);
7027 err_free_stats:
7028 	free_percpu(port->stats);
7029 err_free_irq:
7030 	if (port->port_irq)
7031 		irq_dispose_mapping(port->port_irq);
7032 err_deinit_qvecs:
7033 	mvpp2_queue_vectors_deinit(port);
7034 err_free_netdev:
7035 	free_netdev(dev);
7036 	return err;
7037 }
7038 
7039 /* Ports removal routine */
7040 static void mvpp2_port_remove(struct mvpp2_port *port)
7041 {
7042 	int i;
7043 
7044 	unregister_netdev(port->dev);
7045 	if (port->phylink)
7046 		phylink_destroy(port->phylink);
7047 	free_percpu(port->pcpu);
7048 	free_percpu(port->stats);
7049 	for (i = 0; i < port->ntxqs; i++)
7050 		free_percpu(port->txqs[i]->pcpu);
7051 	mvpp2_queue_vectors_deinit(port);
7052 	if (port->port_irq)
7053 		irq_dispose_mapping(port->port_irq);
7054 	free_netdev(port->dev);
7055 }
7056 
7057 /* Initialize decoding windows */
7058 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
7059 				    struct mvpp2 *priv)
7060 {
7061 	u32 win_enable;
7062 	int i;
7063 
7064 	for (i = 0; i < 6; i++) {
7065 		mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
7066 		mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
7067 
7068 		if (i < 4)
7069 			mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
7070 	}
7071 
7072 	win_enable = 0;
7073 
7074 	for (i = 0; i < dram->num_cs; i++) {
7075 		const struct mbus_dram_window *cs = dram->cs + i;
7076 
7077 		mvpp2_write(priv, MVPP2_WIN_BASE(i),
7078 			    (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
7079 			    dram->mbus_dram_target_id);
7080 
7081 		mvpp2_write(priv, MVPP2_WIN_SIZE(i),
7082 			    (cs->size - 1) & 0xffff0000);
7083 
7084 		win_enable |= (1 << i);
7085 	}
7086 
7087 	mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
7088 }
7089 
7090 /* Initialize Rx FIFO's */
7091 static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
7092 {
7093 	int port;
7094 
7095 	for (port = 0; port < MVPP2_MAX_PORTS; port++) {
7096 		mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
7097 			    MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
7098 		mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
7099 			    MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
7100 	}
7101 
7102 	mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
7103 		    MVPP2_RX_FIFO_PORT_MIN_PKT);
7104 	mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
7105 }
7106 
7107 static void mvpp22_rx_fifo_set_hw(struct mvpp2 *priv, int port, int data_size)
7108 {
7109 	int attr_size = MVPP2_RX_FIFO_PORT_ATTR_SIZE(data_size);
7110 
7111 	mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), data_size);
7112 	mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), attr_size);
7113 }
7114 
7115 /* Initialize TX FIFO's: the total FIFO size is 48kB on PPv2.2 and PPv2.3.
7116  * 4kB fixed space must be assigned for the loopback port.
7117  * Redistribute remaining avialable 44kB space among all active ports.
7118  * Guarantee minimum 32kB for 10G port and 8kB for port 1, capable of 2.5G
7119  * SGMII link.
7120  */
7121 static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
7122 {
7123 	int remaining_ports_count;
7124 	unsigned long port_map;
7125 	int size_remainder;
7126 	int port, size;
7127 
7128 	/* The loopback requires fixed 4kB of the FIFO space assignment. */
7129 	mvpp22_rx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX,
7130 			      MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
7131 	port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX);
7132 
7133 	/* Set RX FIFO size to 0 for inactive ports. */
7134 	for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX)
7135 		mvpp22_rx_fifo_set_hw(priv, port, 0);
7136 
7137 	/* Assign remaining RX FIFO space among all active ports. */
7138 	size_remainder = MVPP2_RX_FIFO_PORT_DATA_SIZE_44KB;
7139 	remaining_ports_count = hweight_long(port_map);
7140 
7141 	for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) {
7142 		if (remaining_ports_count == 1)
7143 			size = size_remainder;
7144 		else if (port == 0)
7145 			size = max(size_remainder / remaining_ports_count,
7146 				   MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
7147 		else if (port == 1)
7148 			size = max(size_remainder / remaining_ports_count,
7149 				   MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
7150 		else
7151 			size = size_remainder / remaining_ports_count;
7152 
7153 		size_remainder -= size;
7154 		remaining_ports_count--;
7155 
7156 		mvpp22_rx_fifo_set_hw(priv, port, size);
7157 	}
7158 
7159 	mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
7160 		    MVPP2_RX_FIFO_PORT_MIN_PKT);
7161 	mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
7162 }
7163 
7164 /* Configure Rx FIFO Flow control thresholds */
7165 static void mvpp23_rx_fifo_fc_set_tresh(struct mvpp2 *priv)
7166 {
7167 	int port, val;
7168 
7169 	/* Port 0: maximum speed -10Gb/s port
7170 	 *	   required by spec RX FIFO threshold 9KB
7171 	 * Port 1: maximum speed -5Gb/s port
7172 	 *	   required by spec RX FIFO threshold 4KB
7173 	 * Port 2: maximum speed -1Gb/s port
7174 	 *	   required by spec RX FIFO threshold 2KB
7175 	 */
7176 
7177 	/* Without loopback port */
7178 	for (port = 0; port < (MVPP2_MAX_PORTS - 1); port++) {
7179 		if (port == 0) {
7180 			val = (MVPP23_PORT0_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
7181 				<< MVPP2_RX_FC_TRSH_OFFS;
7182 			val &= MVPP2_RX_FC_TRSH_MASK;
7183 			mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
7184 		} else if (port == 1) {
7185 			val = (MVPP23_PORT1_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
7186 				<< MVPP2_RX_FC_TRSH_OFFS;
7187 			val &= MVPP2_RX_FC_TRSH_MASK;
7188 			mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
7189 		} else {
7190 			val = (MVPP23_PORT2_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
7191 				<< MVPP2_RX_FC_TRSH_OFFS;
7192 			val &= MVPP2_RX_FC_TRSH_MASK;
7193 			mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
7194 		}
7195 	}
7196 }
7197 
7198 /* Configure Rx FIFO Flow control thresholds */
7199 void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en)
7200 {
7201 	int val;
7202 
7203 	val = mvpp2_read(priv, MVPP2_RX_FC_REG(port));
7204 
7205 	if (en)
7206 		val |= MVPP2_RX_FC_EN;
7207 	else
7208 		val &= ~MVPP2_RX_FC_EN;
7209 
7210 	mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
7211 }
7212 
7213 static void mvpp22_tx_fifo_set_hw(struct mvpp2 *priv, int port, int size)
7214 {
7215 	int threshold = MVPP2_TX_FIFO_THRESHOLD(size);
7216 
7217 	mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size);
7218 	mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), threshold);
7219 }
7220 
7221 /* Initialize TX FIFO's: the total FIFO size is 19kB on PPv2.2 and PPv2.3.
7222  * 1kB fixed space must be assigned for the loopback port.
7223  * Redistribute remaining avialable 18kB space among all active ports.
7224  * The 10G interface should use 10kB (which is maximum possible size
7225  * per single port).
7226  */
7227 static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
7228 {
7229 	int remaining_ports_count;
7230 	unsigned long port_map;
7231 	int size_remainder;
7232 	int port, size;
7233 
7234 	/* The loopback requires fixed 1kB of the FIFO space assignment. */
7235 	mvpp22_tx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX,
7236 			      MVPP22_TX_FIFO_DATA_SIZE_1KB);
7237 	port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX);
7238 
7239 	/* Set TX FIFO size to 0 for inactive ports. */
7240 	for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX)
7241 		mvpp22_tx_fifo_set_hw(priv, port, 0);
7242 
7243 	/* Assign remaining TX FIFO space among all active ports. */
7244 	size_remainder = MVPP22_TX_FIFO_DATA_SIZE_18KB;
7245 	remaining_ports_count = hweight_long(port_map);
7246 
7247 	for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) {
7248 		if (remaining_ports_count == 1)
7249 			size = min(size_remainder,
7250 				   MVPP22_TX_FIFO_DATA_SIZE_10KB);
7251 		else if (port == 0)
7252 			size = MVPP22_TX_FIFO_DATA_SIZE_10KB;
7253 		else
7254 			size = size_remainder / remaining_ports_count;
7255 
7256 		size_remainder -= size;
7257 		remaining_ports_count--;
7258 
7259 		mvpp22_tx_fifo_set_hw(priv, port, size);
7260 	}
7261 }
7262 
7263 static void mvpp2_axi_init(struct mvpp2 *priv)
7264 {
7265 	u32 val, rdval, wrval;
7266 
7267 	mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
7268 
7269 	/* AXI Bridge Configuration */
7270 
7271 	rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
7272 		<< MVPP22_AXI_ATTR_CACHE_OFFS;
7273 	rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7274 		<< MVPP22_AXI_ATTR_DOMAIN_OFFS;
7275 
7276 	wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
7277 		<< MVPP22_AXI_ATTR_CACHE_OFFS;
7278 	wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7279 		<< MVPP22_AXI_ATTR_DOMAIN_OFFS;
7280 
7281 	/* BM */
7282 	mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
7283 	mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
7284 
7285 	/* Descriptors */
7286 	mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
7287 	mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
7288 	mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
7289 	mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
7290 
7291 	/* Buffer Data */
7292 	mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
7293 	mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
7294 
7295 	val = MVPP22_AXI_CODE_CACHE_NON_CACHE
7296 		<< MVPP22_AXI_CODE_CACHE_OFFS;
7297 	val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
7298 		<< MVPP22_AXI_CODE_DOMAIN_OFFS;
7299 	mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
7300 	mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
7301 
7302 	val = MVPP22_AXI_CODE_CACHE_RD_CACHE
7303 		<< MVPP22_AXI_CODE_CACHE_OFFS;
7304 	val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7305 		<< MVPP22_AXI_CODE_DOMAIN_OFFS;
7306 
7307 	mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
7308 
7309 	val = MVPP22_AXI_CODE_CACHE_WR_CACHE
7310 		<< MVPP22_AXI_CODE_CACHE_OFFS;
7311 	val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7312 		<< MVPP22_AXI_CODE_DOMAIN_OFFS;
7313 
7314 	mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
7315 }
7316 
7317 /* Initialize network controller common part HW */
7318 static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
7319 {
7320 	const struct mbus_dram_target_info *dram_target_info;
7321 	int err, i;
7322 	u32 val;
7323 
7324 	/* MBUS windows configuration */
7325 	dram_target_info = mv_mbus_dram_info();
7326 	if (dram_target_info)
7327 		mvpp2_conf_mbus_windows(dram_target_info, priv);
7328 
7329 	if (priv->hw_version >= MVPP22)
7330 		mvpp2_axi_init(priv);
7331 
7332 	/* Disable HW PHY polling */
7333 	if (priv->hw_version == MVPP21) {
7334 		val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
7335 		val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
7336 		writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
7337 	} else {
7338 		val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
7339 		val &= ~MVPP22_SMI_POLLING_EN;
7340 		writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
7341 	}
7342 
7343 	/* Allocate and initialize aggregated TXQs */
7344 	priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS,
7345 				       sizeof(*priv->aggr_txqs),
7346 				       GFP_KERNEL);
7347 	if (!priv->aggr_txqs)
7348 		return -ENOMEM;
7349 
7350 	for (i = 0; i < MVPP2_MAX_THREADS; i++) {
7351 		priv->aggr_txqs[i].id = i;
7352 		priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
7353 		err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv);
7354 		if (err < 0)
7355 			return err;
7356 	}
7357 
7358 	/* Fifo Init */
7359 	if (priv->hw_version == MVPP21) {
7360 		mvpp2_rx_fifo_init(priv);
7361 	} else {
7362 		mvpp22_rx_fifo_init(priv);
7363 		mvpp22_tx_fifo_init(priv);
7364 		if (priv->hw_version == MVPP23)
7365 			mvpp23_rx_fifo_fc_set_tresh(priv);
7366 	}
7367 
7368 	if (priv->hw_version == MVPP21)
7369 		writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
7370 		       priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
7371 
7372 	/* Allow cache snoop when transmiting packets */
7373 	mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
7374 
7375 	/* Buffer Manager initialization */
7376 	err = mvpp2_bm_init(&pdev->dev, priv);
7377 	if (err < 0)
7378 		return err;
7379 
7380 	/* Parser default initialization */
7381 	err = mvpp2_prs_default_init(pdev, priv);
7382 	if (err < 0)
7383 		return err;
7384 
7385 	/* Classifier default initialization */
7386 	mvpp2_cls_init(priv);
7387 
7388 	return 0;
7389 }
7390 
7391 static int mvpp2_get_sram(struct platform_device *pdev,
7392 			  struct mvpp2 *priv)
7393 {
7394 	struct resource *res;
7395 	void __iomem *base;
7396 
7397 	res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
7398 	if (!res) {
7399 		if (has_acpi_companion(&pdev->dev))
7400 			dev_warn(&pdev->dev, "ACPI is too old, Flow control not supported\n");
7401 		else
7402 			dev_warn(&pdev->dev, "DT is too old, Flow control not supported\n");
7403 		return 0;
7404 	}
7405 
7406 	base = devm_ioremap_resource(&pdev->dev, res);
7407 	if (IS_ERR(base))
7408 		return PTR_ERR(base);
7409 
7410 	priv->cm3_base = base;
7411 	return 0;
7412 }
7413 
7414 static int mvpp2_probe(struct platform_device *pdev)
7415 {
7416 	struct fwnode_handle *fwnode = pdev->dev.fwnode;
7417 	struct fwnode_handle *port_fwnode;
7418 	struct mvpp2 *priv;
7419 	struct resource *res;
7420 	void __iomem *base;
7421 	int i, shared;
7422 	int err;
7423 
7424 	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
7425 	if (!priv)
7426 		return -ENOMEM;
7427 
7428 	priv->hw_version = (unsigned long)device_get_match_data(&pdev->dev);
7429 
7430 	/* multi queue mode isn't supported on PPV2.1, fallback to single
7431 	 * mode
7432 	 */
7433 	if (priv->hw_version == MVPP21)
7434 		queue_mode = MVPP2_QDIST_SINGLE_MODE;
7435 
7436 	base = devm_platform_ioremap_resource(pdev, 0);
7437 	if (IS_ERR(base))
7438 		return PTR_ERR(base);
7439 
7440 	if (priv->hw_version == MVPP21) {
7441 		priv->lms_base = devm_platform_ioremap_resource(pdev, 1);
7442 		if (IS_ERR(priv->lms_base))
7443 			return PTR_ERR(priv->lms_base);
7444 	} else {
7445 		res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
7446 		if (!res) {
7447 			dev_err(&pdev->dev, "Invalid resource\n");
7448 			return -EINVAL;
7449 		}
7450 		if (has_acpi_companion(&pdev->dev)) {
7451 			/* In case the MDIO memory region is declared in
7452 			 * the ACPI, it can already appear as 'in-use'
7453 			 * in the OS. Because it is overlapped by second
7454 			 * region of the network controller, make
7455 			 * sure it is released, before requesting it again.
7456 			 * The care is taken by mvpp2 driver to avoid
7457 			 * concurrent access to this memory region.
7458 			 */
7459 			release_resource(res);
7460 		}
7461 		priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
7462 		if (IS_ERR(priv->iface_base))
7463 			return PTR_ERR(priv->iface_base);
7464 
7465 		/* Map CM3 SRAM */
7466 		err = mvpp2_get_sram(pdev, priv);
7467 		if (err)
7468 			dev_warn(&pdev->dev, "Fail to alloc CM3 SRAM\n");
7469 
7470 		/* Enable global Flow Control only if handler to SRAM not NULL */
7471 		if (priv->cm3_base)
7472 			priv->global_tx_fc = true;
7473 	}
7474 
7475 	if (priv->hw_version >= MVPP22 && dev_of_node(&pdev->dev)) {
7476 		priv->sysctrl_base =
7477 			syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
7478 							"marvell,system-controller");
7479 		if (IS_ERR(priv->sysctrl_base))
7480 			/* The system controller regmap is optional for dt
7481 			 * compatibility reasons. When not provided, the
7482 			 * configuration of the GoP relies on the
7483 			 * firmware/bootloader.
7484 			 */
7485 			priv->sysctrl_base = NULL;
7486 	}
7487 
7488 	if (priv->hw_version >= MVPP22 &&
7489 	    mvpp2_get_nrxqs(priv) * 2 <= MVPP2_BM_MAX_POOLS)
7490 		priv->percpu_pools = 1;
7491 
7492 	mvpp2_setup_bm_pool();
7493 
7494 
7495 	priv->nthreads = min_t(unsigned int, num_present_cpus(),
7496 			       MVPP2_MAX_THREADS);
7497 
7498 	shared = num_present_cpus() - priv->nthreads;
7499 	if (shared > 0)
7500 		bitmap_set(&priv->lock_map, 0,
7501 			    min_t(int, shared, MVPP2_MAX_THREADS));
7502 
7503 	for (i = 0; i < MVPP2_MAX_THREADS; i++) {
7504 		u32 addr_space_sz;
7505 
7506 		addr_space_sz = (priv->hw_version == MVPP21 ?
7507 				 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
7508 		priv->swth_base[i] = base + i * addr_space_sz;
7509 	}
7510 
7511 	if (priv->hw_version == MVPP21)
7512 		priv->max_port_rxqs = 8;
7513 	else
7514 		priv->max_port_rxqs = 32;
7515 
7516 	if (dev_of_node(&pdev->dev)) {
7517 		priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
7518 		if (IS_ERR(priv->pp_clk))
7519 			return PTR_ERR(priv->pp_clk);
7520 		err = clk_prepare_enable(priv->pp_clk);
7521 		if (err < 0)
7522 			return err;
7523 
7524 		priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
7525 		if (IS_ERR(priv->gop_clk)) {
7526 			err = PTR_ERR(priv->gop_clk);
7527 			goto err_pp_clk;
7528 		}
7529 		err = clk_prepare_enable(priv->gop_clk);
7530 		if (err < 0)
7531 			goto err_pp_clk;
7532 
7533 		if (priv->hw_version >= MVPP22) {
7534 			priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
7535 			if (IS_ERR(priv->mg_clk)) {
7536 				err = PTR_ERR(priv->mg_clk);
7537 				goto err_gop_clk;
7538 			}
7539 
7540 			err = clk_prepare_enable(priv->mg_clk);
7541 			if (err < 0)
7542 				goto err_gop_clk;
7543 
7544 			priv->mg_core_clk = devm_clk_get_optional(&pdev->dev, "mg_core_clk");
7545 			if (IS_ERR(priv->mg_core_clk)) {
7546 				err = PTR_ERR(priv->mg_core_clk);
7547 				goto err_mg_clk;
7548 			}
7549 
7550 			err = clk_prepare_enable(priv->mg_core_clk);
7551 			if (err < 0)
7552 				goto err_mg_clk;
7553 		}
7554 
7555 		priv->axi_clk = devm_clk_get_optional(&pdev->dev, "axi_clk");
7556 		if (IS_ERR(priv->axi_clk)) {
7557 			err = PTR_ERR(priv->axi_clk);
7558 			goto err_mg_core_clk;
7559 		}
7560 
7561 		err = clk_prepare_enable(priv->axi_clk);
7562 		if (err < 0)
7563 			goto err_mg_core_clk;
7564 
7565 		/* Get system's tclk rate */
7566 		priv->tclk = clk_get_rate(priv->pp_clk);
7567 	} else {
7568 		err = device_property_read_u32(&pdev->dev, "clock-frequency", &priv->tclk);
7569 		if (err) {
7570 			dev_err(&pdev->dev, "missing clock-frequency value\n");
7571 			return err;
7572 		}
7573 	}
7574 
7575 	if (priv->hw_version >= MVPP22) {
7576 		err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
7577 		if (err)
7578 			goto err_axi_clk;
7579 		/* Sadly, the BM pools all share the same register to
7580 		 * store the high 32 bits of their address. So they
7581 		 * must all have the same high 32 bits, which forces
7582 		 * us to restrict coherent memory to DMA_BIT_MASK(32).
7583 		 */
7584 		err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
7585 		if (err)
7586 			goto err_axi_clk;
7587 	}
7588 
7589 	/* Map DTS-active ports. Should be done before FIFO mvpp2_init */
7590 	fwnode_for_each_available_child_node(fwnode, port_fwnode) {
7591 		if (!fwnode_property_read_u32(port_fwnode, "port-id", &i))
7592 			priv->port_map |= BIT(i);
7593 	}
7594 
7595 	if (mvpp2_read(priv, MVPP2_VER_ID_REG) == MVPP2_VER_PP23)
7596 		priv->hw_version = MVPP23;
7597 
7598 	/* Init mss lock */
7599 	spin_lock_init(&priv->mss_spinlock);
7600 
7601 	/* Initialize network controller */
7602 	err = mvpp2_init(pdev, priv);
7603 	if (err < 0) {
7604 		dev_err(&pdev->dev, "failed to initialize controller\n");
7605 		goto err_axi_clk;
7606 	}
7607 
7608 	err = mvpp22_tai_probe(&pdev->dev, priv);
7609 	if (err < 0)
7610 		goto err_axi_clk;
7611 
7612 	/* Initialize ports */
7613 	fwnode_for_each_available_child_node(fwnode, port_fwnode) {
7614 		err = mvpp2_port_probe(pdev, port_fwnode, priv);
7615 		if (err < 0)
7616 			goto err_port_probe;
7617 	}
7618 
7619 	if (priv->port_count == 0) {
7620 		dev_err(&pdev->dev, "no ports enabled\n");
7621 		err = -ENODEV;
7622 		goto err_axi_clk;
7623 	}
7624 
7625 	/* Statistics must be gathered regularly because some of them (like
7626 	 * packets counters) are 32-bit registers and could overflow quite
7627 	 * quickly. For instance, a 10Gb link used at full bandwidth with the
7628 	 * smallest packets (64B) will overflow a 32-bit counter in less than
7629 	 * 30 seconds. Then, use a workqueue to fill 64-bit counters.
7630 	 */
7631 	snprintf(priv->queue_name, sizeof(priv->queue_name),
7632 		 "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev),
7633 		 priv->port_count > 1 ? "+" : "");
7634 	priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
7635 	if (!priv->stats_queue) {
7636 		err = -ENOMEM;
7637 		goto err_port_probe;
7638 	}
7639 
7640 	if (priv->global_tx_fc && priv->hw_version >= MVPP22) {
7641 		err = mvpp2_enable_global_fc(priv);
7642 		if (err)
7643 			dev_warn(&pdev->dev, "Minimum of CM3 firmware 18.09 and chip revision B0 required for flow control\n");
7644 	}
7645 
7646 	mvpp2_dbgfs_init(priv, pdev->name);
7647 
7648 	platform_set_drvdata(pdev, priv);
7649 	return 0;
7650 
7651 err_port_probe:
7652 	fwnode_handle_put(port_fwnode);
7653 
7654 	i = 0;
7655 	fwnode_for_each_available_child_node(fwnode, port_fwnode) {
7656 		if (priv->port_list[i])
7657 			mvpp2_port_remove(priv->port_list[i]);
7658 		i++;
7659 	}
7660 err_axi_clk:
7661 	clk_disable_unprepare(priv->axi_clk);
7662 err_mg_core_clk:
7663 	clk_disable_unprepare(priv->mg_core_clk);
7664 err_mg_clk:
7665 	clk_disable_unprepare(priv->mg_clk);
7666 err_gop_clk:
7667 	clk_disable_unprepare(priv->gop_clk);
7668 err_pp_clk:
7669 	clk_disable_unprepare(priv->pp_clk);
7670 	return err;
7671 }
7672 
7673 static void mvpp2_remove(struct platform_device *pdev)
7674 {
7675 	struct mvpp2 *priv = platform_get_drvdata(pdev);
7676 	struct fwnode_handle *fwnode = pdev->dev.fwnode;
7677 	int i = 0, poolnum = MVPP2_BM_POOLS_NUM;
7678 	struct fwnode_handle *port_fwnode;
7679 
7680 	mvpp2_dbgfs_cleanup(priv);
7681 
7682 	fwnode_for_each_available_child_node(fwnode, port_fwnode) {
7683 		if (priv->port_list[i]) {
7684 			mutex_destroy(&priv->port_list[i]->gather_stats_lock);
7685 			mvpp2_port_remove(priv->port_list[i]);
7686 		}
7687 		i++;
7688 	}
7689 
7690 	destroy_workqueue(priv->stats_queue);
7691 
7692 	if (priv->percpu_pools)
7693 		poolnum = mvpp2_get_nrxqs(priv) * 2;
7694 
7695 	for (i = 0; i < poolnum; i++) {
7696 		struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
7697 
7698 		mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool);
7699 	}
7700 
7701 	for (i = 0; i < MVPP2_MAX_THREADS; i++) {
7702 		struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
7703 
7704 		dma_free_coherent(&pdev->dev,
7705 				  MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
7706 				  aggr_txq->descs,
7707 				  aggr_txq->descs_dma);
7708 	}
7709 
7710 	if (is_acpi_node(port_fwnode))
7711 		return;
7712 
7713 	clk_disable_unprepare(priv->axi_clk);
7714 	clk_disable_unprepare(priv->mg_core_clk);
7715 	clk_disable_unprepare(priv->mg_clk);
7716 	clk_disable_unprepare(priv->pp_clk);
7717 	clk_disable_unprepare(priv->gop_clk);
7718 }
7719 
7720 static const struct of_device_id mvpp2_match[] = {
7721 	{
7722 		.compatible = "marvell,armada-375-pp2",
7723 		.data = (void *)MVPP21,
7724 	},
7725 	{
7726 		.compatible = "marvell,armada-7k-pp22",
7727 		.data = (void *)MVPP22,
7728 	},
7729 	{ }
7730 };
7731 MODULE_DEVICE_TABLE(of, mvpp2_match);
7732 
7733 #ifdef CONFIG_ACPI
7734 static const struct acpi_device_id mvpp2_acpi_match[] = {
7735 	{ "MRVL0110", MVPP22 },
7736 	{ },
7737 };
7738 MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
7739 #endif
7740 
7741 static struct platform_driver mvpp2_driver = {
7742 	.probe = mvpp2_probe,
7743 	.remove_new = mvpp2_remove,
7744 	.driver = {
7745 		.name = MVPP2_DRIVER_NAME,
7746 		.of_match_table = mvpp2_match,
7747 		.acpi_match_table = ACPI_PTR(mvpp2_acpi_match),
7748 	},
7749 };
7750 
7751 static int __init mvpp2_driver_init(void)
7752 {
7753 	return platform_driver_register(&mvpp2_driver);
7754 }
7755 module_init(mvpp2_driver_init);
7756 
7757 static void __exit mvpp2_driver_exit(void)
7758 {
7759 	platform_driver_unregister(&mvpp2_driver);
7760 	mvpp2_dbgfs_exit();
7761 }
7762 module_exit(mvpp2_driver_exit);
7763 
7764 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
7765 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
7766 MODULE_LICENSE("GPL v2");
7767