1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
4 *
5 * Copyright (C) 2014 Marvell
6 *
7 * Marcin Wojtas <mw@semihalf.com>
8 */
9
10 #include <linux/acpi.h>
11 #include <linux/kernel.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/platform_device.h>
15 #include <linux/skbuff.h>
16 #include <linux/inetdevice.h>
17 #include <linux/mbus.h>
18 #include <linux/module.h>
19 #include <linux/mfd/syscon.h>
20 #include <linux/interrupt.h>
21 #include <linux/cpumask.h>
22 #include <linux/of.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_mdio.h>
25 #include <linux/of_net.h>
26 #include <linux/of_address.h>
27 #include <linux/phy.h>
28 #include <linux/phylink.h>
29 #include <linux/phy/phy.h>
30 #include <linux/ptp_classify.h>
31 #include <linux/clk.h>
32 #include <linux/hrtimer.h>
33 #include <linux/ktime.h>
34 #include <linux/regmap.h>
35 #include <uapi/linux/ppp_defs.h>
36 #include <net/ip.h>
37 #include <net/ipv6.h>
38 #include <net/page_pool/helpers.h>
39 #include <net/tso.h>
40 #include <linux/bpf_trace.h>
41
42 #include "mvpp2.h"
43 #include "mvpp2_prs.h"
44 #include "mvpp2_cls.h"
45
46 enum mvpp2_bm_pool_log_num {
47 MVPP2_BM_SHORT,
48 MVPP2_BM_LONG,
49 MVPP2_BM_JUMBO,
50 MVPP2_BM_POOLS_NUM
51 };
52
53 static struct {
54 int pkt_size;
55 int buf_num;
56 } mvpp2_pools[MVPP2_BM_POOLS_NUM];
57
58 /* The prototype is added here to be used in start_dev when using ACPI. This
59 * will be removed once phylink is used for all modes (dt+ACPI).
60 */
61 static void mvpp2_acpi_start(struct mvpp2_port *port);
62
63 /* Queue modes */
64 #define MVPP2_QDIST_SINGLE_MODE 0
65 #define MVPP2_QDIST_MULTI_MODE 1
66
67 static int queue_mode = MVPP2_QDIST_MULTI_MODE;
68
69 module_param(queue_mode, int, 0444);
70 MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
71
72 /* Utility/helper methods */
73
mvpp2_write(struct mvpp2 * priv,u32 offset,u32 data)74 void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
75 {
76 writel(data, priv->swth_base[0] + offset);
77 }
78
mvpp2_read(struct mvpp2 * priv,u32 offset)79 u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
80 {
81 return readl(priv->swth_base[0] + offset);
82 }
83
mvpp2_read_relaxed(struct mvpp2 * priv,u32 offset)84 static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
85 {
86 return readl_relaxed(priv->swth_base[0] + offset);
87 }
88
mvpp2_cpu_to_thread(struct mvpp2 * priv,int cpu)89 static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu)
90 {
91 return cpu % priv->nthreads;
92 }
93
mvpp2_cm3_write(struct mvpp2 * priv,u32 offset,u32 data)94 static void mvpp2_cm3_write(struct mvpp2 *priv, u32 offset, u32 data)
95 {
96 writel(data, priv->cm3_base + offset);
97 }
98
mvpp2_cm3_read(struct mvpp2 * priv,u32 offset)99 static u32 mvpp2_cm3_read(struct mvpp2 *priv, u32 offset)
100 {
101 return readl(priv->cm3_base + offset);
102 }
103
104 static struct page_pool *
mvpp2_create_page_pool(struct device * dev,int num,int len,enum dma_data_direction dma_dir)105 mvpp2_create_page_pool(struct device *dev, int num, int len,
106 enum dma_data_direction dma_dir)
107 {
108 struct page_pool_params pp_params = {
109 /* internal DMA mapping in page_pool */
110 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
111 .pool_size = num,
112 .nid = NUMA_NO_NODE,
113 .dev = dev,
114 .dma_dir = dma_dir,
115 .offset = MVPP2_SKB_HEADROOM,
116 .max_len = len,
117 };
118
119 return page_pool_create(&pp_params);
120 }
121
122 /* These accessors should be used to access:
123 *
124 * - per-thread registers, where each thread has its own copy of the
125 * register.
126 *
127 * MVPP2_BM_VIRT_ALLOC_REG
128 * MVPP2_BM_ADDR_HIGH_ALLOC
129 * MVPP22_BM_ADDR_HIGH_RLS_REG
130 * MVPP2_BM_VIRT_RLS_REG
131 * MVPP2_ISR_RX_TX_CAUSE_REG
132 * MVPP2_ISR_RX_TX_MASK_REG
133 * MVPP2_TXQ_NUM_REG
134 * MVPP2_AGGR_TXQ_UPDATE_REG
135 * MVPP2_TXQ_RSVD_REQ_REG
136 * MVPP2_TXQ_RSVD_RSLT_REG
137 * MVPP2_TXQ_SENT_REG
138 * MVPP2_RXQ_NUM_REG
139 *
140 * - global registers that must be accessed through a specific thread
141 * window, because they are related to an access to a per-thread
142 * register
143 *
144 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
145 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
146 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
147 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
148 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
149 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
150 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
151 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
152 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
153 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
154 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
155 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
156 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
157 */
mvpp2_thread_write(struct mvpp2 * priv,unsigned int thread,u32 offset,u32 data)158 static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread,
159 u32 offset, u32 data)
160 {
161 writel(data, priv->swth_base[thread] + offset);
162 }
163
mvpp2_thread_read(struct mvpp2 * priv,unsigned int thread,u32 offset)164 static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread,
165 u32 offset)
166 {
167 return readl(priv->swth_base[thread] + offset);
168 }
169
mvpp2_thread_write_relaxed(struct mvpp2 * priv,unsigned int thread,u32 offset,u32 data)170 static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread,
171 u32 offset, u32 data)
172 {
173 writel_relaxed(data, priv->swth_base[thread] + offset);
174 }
175
mvpp2_thread_read_relaxed(struct mvpp2 * priv,unsigned int thread,u32 offset)176 static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread,
177 u32 offset)
178 {
179 return readl_relaxed(priv->swth_base[thread] + offset);
180 }
181
mvpp2_txdesc_dma_addr_get(struct mvpp2_port * port,struct mvpp2_tx_desc * tx_desc)182 static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
183 struct mvpp2_tx_desc *tx_desc)
184 {
185 if (port->priv->hw_version == MVPP21)
186 return le32_to_cpu(tx_desc->pp21.buf_dma_addr);
187 else
188 return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) &
189 MVPP2_DESC_DMA_MASK;
190 }
191
mvpp2_txdesc_dma_addr_set(struct mvpp2_port * port,struct mvpp2_tx_desc * tx_desc,dma_addr_t dma_addr)192 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
193 struct mvpp2_tx_desc *tx_desc,
194 dma_addr_t dma_addr)
195 {
196 dma_addr_t addr, offset;
197
198 addr = dma_addr & ~MVPP2_TX_DESC_ALIGN;
199 offset = dma_addr & MVPP2_TX_DESC_ALIGN;
200
201 if (port->priv->hw_version == MVPP21) {
202 tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr);
203 tx_desc->pp21.packet_offset = offset;
204 } else {
205 __le64 val = cpu_to_le64(addr);
206
207 tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK);
208 tx_desc->pp22.buf_dma_addr_ptp |= val;
209 tx_desc->pp22.packet_offset = offset;
210 }
211 }
212
mvpp2_txdesc_size_get(struct mvpp2_port * port,struct mvpp2_tx_desc * tx_desc)213 static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
214 struct mvpp2_tx_desc *tx_desc)
215 {
216 if (port->priv->hw_version == MVPP21)
217 return le16_to_cpu(tx_desc->pp21.data_size);
218 else
219 return le16_to_cpu(tx_desc->pp22.data_size);
220 }
221
mvpp2_txdesc_size_set(struct mvpp2_port * port,struct mvpp2_tx_desc * tx_desc,size_t size)222 static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
223 struct mvpp2_tx_desc *tx_desc,
224 size_t size)
225 {
226 if (port->priv->hw_version == MVPP21)
227 tx_desc->pp21.data_size = cpu_to_le16(size);
228 else
229 tx_desc->pp22.data_size = cpu_to_le16(size);
230 }
231
mvpp2_txdesc_txq_set(struct mvpp2_port * port,struct mvpp2_tx_desc * tx_desc,unsigned int txq)232 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
233 struct mvpp2_tx_desc *tx_desc,
234 unsigned int txq)
235 {
236 if (port->priv->hw_version == MVPP21)
237 tx_desc->pp21.phys_txq = txq;
238 else
239 tx_desc->pp22.phys_txq = txq;
240 }
241
mvpp2_txdesc_cmd_set(struct mvpp2_port * port,struct mvpp2_tx_desc * tx_desc,unsigned int command)242 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
243 struct mvpp2_tx_desc *tx_desc,
244 unsigned int command)
245 {
246 if (port->priv->hw_version == MVPP21)
247 tx_desc->pp21.command = cpu_to_le32(command);
248 else
249 tx_desc->pp22.command = cpu_to_le32(command);
250 }
251
mvpp2_txdesc_offset_get(struct mvpp2_port * port,struct mvpp2_tx_desc * tx_desc)252 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
253 struct mvpp2_tx_desc *tx_desc)
254 {
255 if (port->priv->hw_version == MVPP21)
256 return tx_desc->pp21.packet_offset;
257 else
258 return tx_desc->pp22.packet_offset;
259 }
260
mvpp2_rxdesc_dma_addr_get(struct mvpp2_port * port,struct mvpp2_rx_desc * rx_desc)261 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
262 struct mvpp2_rx_desc *rx_desc)
263 {
264 if (port->priv->hw_version == MVPP21)
265 return le32_to_cpu(rx_desc->pp21.buf_dma_addr);
266 else
267 return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) &
268 MVPP2_DESC_DMA_MASK;
269 }
270
mvpp2_rxdesc_cookie_get(struct mvpp2_port * port,struct mvpp2_rx_desc * rx_desc)271 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
272 struct mvpp2_rx_desc *rx_desc)
273 {
274 if (port->priv->hw_version == MVPP21)
275 return le32_to_cpu(rx_desc->pp21.buf_cookie);
276 else
277 return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) &
278 MVPP2_DESC_DMA_MASK;
279 }
280
mvpp2_rxdesc_size_get(struct mvpp2_port * port,struct mvpp2_rx_desc * rx_desc)281 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
282 struct mvpp2_rx_desc *rx_desc)
283 {
284 if (port->priv->hw_version == MVPP21)
285 return le16_to_cpu(rx_desc->pp21.data_size);
286 else
287 return le16_to_cpu(rx_desc->pp22.data_size);
288 }
289
mvpp2_rxdesc_status_get(struct mvpp2_port * port,struct mvpp2_rx_desc * rx_desc)290 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
291 struct mvpp2_rx_desc *rx_desc)
292 {
293 if (port->priv->hw_version == MVPP21)
294 return le32_to_cpu(rx_desc->pp21.status);
295 else
296 return le32_to_cpu(rx_desc->pp22.status);
297 }
298
mvpp2_txq_inc_get(struct mvpp2_txq_pcpu * txq_pcpu)299 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
300 {
301 txq_pcpu->txq_get_index++;
302 if (txq_pcpu->txq_get_index == txq_pcpu->size)
303 txq_pcpu->txq_get_index = 0;
304 }
305
mvpp2_txq_inc_put(struct mvpp2_port * port,struct mvpp2_txq_pcpu * txq_pcpu,void * data,struct mvpp2_tx_desc * tx_desc,enum mvpp2_tx_buf_type buf_type)306 static void mvpp2_txq_inc_put(struct mvpp2_port *port,
307 struct mvpp2_txq_pcpu *txq_pcpu,
308 void *data,
309 struct mvpp2_tx_desc *tx_desc,
310 enum mvpp2_tx_buf_type buf_type)
311 {
312 struct mvpp2_txq_pcpu_buf *tx_buf =
313 txq_pcpu->buffs + txq_pcpu->txq_put_index;
314 tx_buf->type = buf_type;
315 if (buf_type == MVPP2_TYPE_SKB)
316 tx_buf->skb = data;
317 else
318 tx_buf->xdpf = data;
319 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
320 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
321 mvpp2_txdesc_offset_get(port, tx_desc);
322 txq_pcpu->txq_put_index++;
323 if (txq_pcpu->txq_put_index == txq_pcpu->size)
324 txq_pcpu->txq_put_index = 0;
325 }
326
327 /* Get number of maximum RXQ */
mvpp2_get_nrxqs(struct mvpp2 * priv)328 static int mvpp2_get_nrxqs(struct mvpp2 *priv)
329 {
330 unsigned int nrxqs;
331
332 if (priv->hw_version >= MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE)
333 return 1;
334
335 /* According to the PPv2.2 datasheet and our experiments on
336 * PPv2.1, RX queues have an allocation granularity of 4 (when
337 * more than a single one on PPv2.2).
338 * Round up to nearest multiple of 4.
339 */
340 nrxqs = (num_possible_cpus() + 3) & ~0x3;
341 if (nrxqs > MVPP2_PORT_MAX_RXQ)
342 nrxqs = MVPP2_PORT_MAX_RXQ;
343
344 return nrxqs;
345 }
346
347 /* Get number of physical egress port */
mvpp2_egress_port(struct mvpp2_port * port)348 static inline int mvpp2_egress_port(struct mvpp2_port *port)
349 {
350 return MVPP2_MAX_TCONT + port->id;
351 }
352
353 /* Get number of physical TXQ */
mvpp2_txq_phys(int port,int txq)354 static inline int mvpp2_txq_phys(int port, int txq)
355 {
356 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
357 }
358
359 /* Returns a struct page if page_pool is set, otherwise a buffer */
mvpp2_frag_alloc(const struct mvpp2_bm_pool * pool,struct page_pool * page_pool)360 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool,
361 struct page_pool *page_pool)
362 {
363 if (page_pool)
364 return page_pool_dev_alloc_pages(page_pool);
365
366 if (likely(pool->frag_size <= PAGE_SIZE))
367 return netdev_alloc_frag(pool->frag_size);
368
369 return kmalloc(pool->frag_size, GFP_ATOMIC);
370 }
371
mvpp2_frag_free(const struct mvpp2_bm_pool * pool,struct page_pool * page_pool,void * data)372 static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool,
373 struct page_pool *page_pool, void *data)
374 {
375 if (page_pool)
376 page_pool_put_full_page(page_pool, virt_to_head_page(data), false);
377 else if (likely(pool->frag_size <= PAGE_SIZE))
378 skb_free_frag(data);
379 else
380 kfree(data);
381 }
382
383 /* Buffer Manager configuration routines */
384
385 /* Create pool */
mvpp2_bm_pool_create(struct device * dev,struct mvpp2 * priv,struct mvpp2_bm_pool * bm_pool,int size)386 static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
387 struct mvpp2_bm_pool *bm_pool, int size)
388 {
389 u32 val;
390
391 /* Number of buffer pointers must be a multiple of 16, as per
392 * hardware constraints
393 */
394 if (!IS_ALIGNED(size, 16))
395 return -EINVAL;
396
397 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 and PPv2.3 needs 16
398 * bytes per buffer pointer
399 */
400 if (priv->hw_version == MVPP21)
401 bm_pool->size_bytes = 2 * sizeof(u32) * size;
402 else
403 bm_pool->size_bytes = 2 * sizeof(u64) * size;
404
405 bm_pool->virt_addr = dma_alloc_coherent(dev, bm_pool->size_bytes,
406 &bm_pool->dma_addr,
407 GFP_KERNEL);
408 if (!bm_pool->virt_addr)
409 return -ENOMEM;
410
411 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
412 MVPP2_BM_POOL_PTR_ALIGN)) {
413 dma_free_coherent(dev, bm_pool->size_bytes,
414 bm_pool->virt_addr, bm_pool->dma_addr);
415 dev_err(dev, "BM pool %d is not %d bytes aligned\n",
416 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
417 return -ENOMEM;
418 }
419
420 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
421 lower_32_bits(bm_pool->dma_addr));
422 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
423
424 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
425 val |= MVPP2_BM_START_MASK;
426
427 val &= ~MVPP2_BM_LOW_THRESH_MASK;
428 val &= ~MVPP2_BM_HIGH_THRESH_MASK;
429
430 /* Set 8 Pools BPPI threshold for MVPP23 */
431 if (priv->hw_version == MVPP23) {
432 val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP23_BM_BPPI_LOW_THRESH);
433 val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP23_BM_BPPI_HIGH_THRESH);
434 } else {
435 val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP2_BM_BPPI_LOW_THRESH);
436 val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP2_BM_BPPI_HIGH_THRESH);
437 }
438
439 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
440
441 bm_pool->size = size;
442 bm_pool->pkt_size = 0;
443 bm_pool->buf_num = 0;
444
445 return 0;
446 }
447
448 /* Set pool buffer size */
mvpp2_bm_pool_bufsize_set(struct mvpp2 * priv,struct mvpp2_bm_pool * bm_pool,int buf_size)449 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
450 struct mvpp2_bm_pool *bm_pool,
451 int buf_size)
452 {
453 u32 val;
454
455 bm_pool->buf_size = buf_size;
456
457 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
458 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
459 }
460
mvpp2_bm_bufs_get_addrs(struct device * dev,struct mvpp2 * priv,struct mvpp2_bm_pool * bm_pool,dma_addr_t * dma_addr,phys_addr_t * phys_addr)461 static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
462 struct mvpp2_bm_pool *bm_pool,
463 dma_addr_t *dma_addr,
464 phys_addr_t *phys_addr)
465 {
466 unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
467
468 *dma_addr = mvpp2_thread_read(priv, thread,
469 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
470 *phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG);
471
472 if (priv->hw_version >= MVPP22) {
473 u32 val;
474 u32 dma_addr_highbits, phys_addr_highbits;
475
476 val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC);
477 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
478 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
479 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
480
481 if (sizeof(dma_addr_t) == 8)
482 *dma_addr |= (u64)dma_addr_highbits << 32;
483
484 if (sizeof(phys_addr_t) == 8)
485 *phys_addr |= (u64)phys_addr_highbits << 32;
486 }
487
488 put_cpu();
489 }
490
491 /* Free all buffers from the pool */
mvpp2_bm_bufs_free(struct device * dev,struct mvpp2 * priv,struct mvpp2_bm_pool * bm_pool,int buf_num)492 static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
493 struct mvpp2_bm_pool *bm_pool, int buf_num)
494 {
495 struct page_pool *pp = NULL;
496 int i;
497
498 if (buf_num > bm_pool->buf_num) {
499 WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n",
500 bm_pool->id, buf_num);
501 buf_num = bm_pool->buf_num;
502 }
503
504 if (priv->percpu_pools)
505 pp = priv->page_pool[bm_pool->id];
506
507 for (i = 0; i < buf_num; i++) {
508 dma_addr_t buf_dma_addr;
509 phys_addr_t buf_phys_addr;
510 void *data;
511
512 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
513 &buf_dma_addr, &buf_phys_addr);
514
515 if (!pp)
516 dma_unmap_single(dev, buf_dma_addr,
517 bm_pool->buf_size, DMA_FROM_DEVICE);
518
519 data = (void *)phys_to_virt(buf_phys_addr);
520 if (!data)
521 break;
522
523 mvpp2_frag_free(bm_pool, pp, data);
524 }
525
526 /* Update BM driver with number of buffers removed from pool */
527 bm_pool->buf_num -= i;
528 }
529
530 /* Check number of buffers in BM pool */
mvpp2_check_hw_buf_num(struct mvpp2 * priv,struct mvpp2_bm_pool * bm_pool)531 static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
532 {
533 int buf_num = 0;
534
535 buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) &
536 MVPP22_BM_POOL_PTRS_NUM_MASK;
537 buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) &
538 MVPP2_BM_BPPI_PTR_NUM_MASK;
539
540 /* HW has one buffer ready which is not reflected in the counters */
541 if (buf_num)
542 buf_num += 1;
543
544 return buf_num;
545 }
546
547 /* Cleanup pool */
mvpp2_bm_pool_destroy(struct device * dev,struct mvpp2 * priv,struct mvpp2_bm_pool * bm_pool)548 static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv,
549 struct mvpp2_bm_pool *bm_pool)
550 {
551 int buf_num;
552 u32 val;
553
554 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
555 mvpp2_bm_bufs_free(dev, priv, bm_pool, buf_num);
556
557 /* Check buffer counters after free */
558 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
559 if (buf_num) {
560 WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n",
561 bm_pool->id, bm_pool->buf_num);
562 return 0;
563 }
564
565 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
566 val |= MVPP2_BM_STOP_MASK;
567 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
568
569 if (priv->percpu_pools) {
570 page_pool_destroy(priv->page_pool[bm_pool->id]);
571 priv->page_pool[bm_pool->id] = NULL;
572 }
573
574 dma_free_coherent(dev, bm_pool->size_bytes,
575 bm_pool->virt_addr,
576 bm_pool->dma_addr);
577 return 0;
578 }
579
mvpp2_bm_pools_init(struct device * dev,struct mvpp2 * priv)580 static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv)
581 {
582 int i, err, size, poolnum = MVPP2_BM_POOLS_NUM;
583 struct mvpp2_bm_pool *bm_pool;
584
585 if (priv->percpu_pools)
586 poolnum = mvpp2_get_nrxqs(priv) * 2;
587
588 /* Create all pools with maximum size */
589 size = MVPP2_BM_POOL_SIZE_MAX;
590 for (i = 0; i < poolnum; i++) {
591 bm_pool = &priv->bm_pools[i];
592 bm_pool->id = i;
593 err = mvpp2_bm_pool_create(dev, priv, bm_pool, size);
594 if (err)
595 goto err_unroll_pools;
596 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
597 }
598 return 0;
599
600 err_unroll_pools:
601 dev_err(dev, "failed to create BM pool %d, size %d\n", i, size);
602 for (i = i - 1; i >= 0; i--)
603 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
604 return err;
605 }
606
607 /* Routine enable PPv23 8 pool mode */
mvpp23_bm_set_8pool_mode(struct mvpp2 * priv)608 static void mvpp23_bm_set_8pool_mode(struct mvpp2 *priv)
609 {
610 int val;
611
612 val = mvpp2_read(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG);
613 val |= MVPP23_BM_8POOL_MODE;
614 mvpp2_write(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG, val);
615 }
616
617 /* Cleanup pool before actual initialization in the OS */
mvpp2_bm_pool_cleanup(struct mvpp2 * priv,int pool_id)618 static void mvpp2_bm_pool_cleanup(struct mvpp2 *priv, int pool_id)
619 {
620 unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
621 u32 val;
622 int i;
623
624 /* Drain the BM from all possible residues left by firmware */
625 for (i = 0; i < MVPP2_BM_POOL_SIZE_MAX; i++)
626 mvpp2_thread_read(priv, thread, MVPP2_BM_PHY_ALLOC_REG(pool_id));
627
628 put_cpu();
629
630 /* Stop the BM pool */
631 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(pool_id));
632 val |= MVPP2_BM_STOP_MASK;
633 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(pool_id), val);
634 }
635
mvpp2_bm_init(struct device * dev,struct mvpp2 * priv)636 static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
637 {
638 enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
639 int i, err, poolnum = MVPP2_BM_POOLS_NUM;
640 struct mvpp2_port *port;
641
642 if (priv->percpu_pools)
643 poolnum = mvpp2_get_nrxqs(priv) * 2;
644
645 /* Clean up the pool state in case it contains stale state */
646 for (i = 0; i < poolnum; i++)
647 mvpp2_bm_pool_cleanup(priv, i);
648
649 if (priv->percpu_pools) {
650 for (i = 0; i < priv->port_count; i++) {
651 port = priv->port_list[i];
652 if (port->xdp_prog) {
653 dma_dir = DMA_BIDIRECTIONAL;
654 break;
655 }
656 }
657
658 for (i = 0; i < poolnum; i++) {
659 /* the pool in use */
660 int pn = i / (poolnum / 2);
661
662 priv->page_pool[i] =
663 mvpp2_create_page_pool(dev,
664 mvpp2_pools[pn].buf_num,
665 mvpp2_pools[pn].pkt_size,
666 dma_dir);
667 if (IS_ERR(priv->page_pool[i])) {
668 int j;
669
670 for (j = 0; j < i; j++) {
671 page_pool_destroy(priv->page_pool[j]);
672 priv->page_pool[j] = NULL;
673 }
674 return PTR_ERR(priv->page_pool[i]);
675 }
676 }
677 }
678
679 dev_info(dev, "using %d %s buffers\n", poolnum,
680 priv->percpu_pools ? "per-cpu" : "shared");
681
682 for (i = 0; i < poolnum; i++) {
683 /* Mask BM all interrupts */
684 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
685 /* Clear BM cause register */
686 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
687 }
688
689 /* Allocate and initialize BM pools */
690 priv->bm_pools = devm_kcalloc(dev, poolnum,
691 sizeof(*priv->bm_pools), GFP_KERNEL);
692 if (!priv->bm_pools)
693 return -ENOMEM;
694
695 if (priv->hw_version == MVPP23)
696 mvpp23_bm_set_8pool_mode(priv);
697
698 err = mvpp2_bm_pools_init(dev, priv);
699 if (err < 0)
700 return err;
701 return 0;
702 }
703
mvpp2_setup_bm_pool(void)704 static void mvpp2_setup_bm_pool(void)
705 {
706 /* Short pool */
707 mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM;
708 mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE;
709
710 /* Long pool */
711 mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM;
712 mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE;
713
714 /* Jumbo pool */
715 mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM;
716 mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE;
717 }
718
719 /* Attach long pool to rxq */
mvpp2_rxq_long_pool_set(struct mvpp2_port * port,int lrxq,int long_pool)720 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
721 int lrxq, int long_pool)
722 {
723 u32 val, mask;
724 int prxq;
725
726 /* Get queue physical ID */
727 prxq = port->rxqs[lrxq]->id;
728
729 if (port->priv->hw_version == MVPP21)
730 mask = MVPP21_RXQ_POOL_LONG_MASK;
731 else
732 mask = MVPP22_RXQ_POOL_LONG_MASK;
733
734 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
735 val &= ~mask;
736 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
737 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
738 }
739
740 /* Attach short pool to rxq */
mvpp2_rxq_short_pool_set(struct mvpp2_port * port,int lrxq,int short_pool)741 static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
742 int lrxq, int short_pool)
743 {
744 u32 val, mask;
745 int prxq;
746
747 /* Get queue physical ID */
748 prxq = port->rxqs[lrxq]->id;
749
750 if (port->priv->hw_version == MVPP21)
751 mask = MVPP21_RXQ_POOL_SHORT_MASK;
752 else
753 mask = MVPP22_RXQ_POOL_SHORT_MASK;
754
755 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
756 val &= ~mask;
757 val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
758 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
759 }
760
mvpp2_buf_alloc(struct mvpp2_port * port,struct mvpp2_bm_pool * bm_pool,struct page_pool * page_pool,dma_addr_t * buf_dma_addr,phys_addr_t * buf_phys_addr,gfp_t gfp_mask)761 static void *mvpp2_buf_alloc(struct mvpp2_port *port,
762 struct mvpp2_bm_pool *bm_pool,
763 struct page_pool *page_pool,
764 dma_addr_t *buf_dma_addr,
765 phys_addr_t *buf_phys_addr,
766 gfp_t gfp_mask)
767 {
768 dma_addr_t dma_addr;
769 struct page *page;
770 void *data;
771
772 data = mvpp2_frag_alloc(bm_pool, page_pool);
773 if (!data)
774 return NULL;
775
776 if (page_pool) {
777 page = (struct page *)data;
778 dma_addr = page_pool_get_dma_addr(page);
779 data = page_to_virt(page);
780 } else {
781 dma_addr = dma_map_single(port->dev->dev.parent, data,
782 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
783 DMA_FROM_DEVICE);
784 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
785 mvpp2_frag_free(bm_pool, NULL, data);
786 return NULL;
787 }
788 }
789 *buf_dma_addr = dma_addr;
790 *buf_phys_addr = virt_to_phys(data);
791
792 return data;
793 }
794
795 /* Routine enable flow control for RXQs condition */
mvpp2_rxq_enable_fc(struct mvpp2_port * port)796 static void mvpp2_rxq_enable_fc(struct mvpp2_port *port)
797 {
798 int val, cm3_state, host_id, q;
799 int fq = port->first_rxq;
800 unsigned long flags;
801
802 spin_lock_irqsave(&port->priv->mss_spinlock, flags);
803
804 /* Remove Flow control enable bit to prevent race between FW and Kernel
805 * If Flow control was enabled, it would be re-enabled.
806 */
807 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
808 cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
809 val &= ~FLOW_CONTROL_ENABLE_BIT;
810 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
811
812 /* Set same Flow control for all RXQs */
813 for (q = 0; q < port->nrxqs; q++) {
814 /* Set stop and start Flow control RXQ thresholds */
815 val = MSS_THRESHOLD_START;
816 val |= (MSS_THRESHOLD_STOP << MSS_RXQ_TRESH_STOP_OFFS);
817 mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val);
818
819 val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq));
820 /* Set RXQ port ID */
821 val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq));
822 val |= (port->id << MSS_RXQ_ASS_Q_BASE(q, fq));
823 val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq)
824 + MSS_RXQ_ASS_HOSTID_OFFS));
825
826 /* Calculate RXQ host ID:
827 * In Single queue mode: Host ID equal to Host ID used for
828 * shared RX interrupt
829 * In Multi queue mode: Host ID equal to number of
830 * RXQ ID / number of CoS queues
831 * In Single resource mode: Host ID always equal to 0
832 */
833 if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
834 host_id = port->nqvecs;
835 else if (queue_mode == MVPP2_QDIST_MULTI_MODE)
836 host_id = q;
837 else
838 host_id = 0;
839
840 /* Set RXQ host ID */
841 val |= (host_id << (MSS_RXQ_ASS_Q_BASE(q, fq)
842 + MSS_RXQ_ASS_HOSTID_OFFS));
843
844 mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val);
845 }
846
847 /* Notify Firmware that Flow control config space ready for update */
848 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
849 val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
850 val |= cm3_state;
851 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
852
853 spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
854 }
855
856 /* Routine disable flow control for RXQs condition */
mvpp2_rxq_disable_fc(struct mvpp2_port * port)857 static void mvpp2_rxq_disable_fc(struct mvpp2_port *port)
858 {
859 int val, cm3_state, q;
860 unsigned long flags;
861 int fq = port->first_rxq;
862
863 spin_lock_irqsave(&port->priv->mss_spinlock, flags);
864
865 /* Remove Flow control enable bit to prevent race between FW and Kernel
866 * If Flow control was enabled, it would be re-enabled.
867 */
868 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
869 cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
870 val &= ~FLOW_CONTROL_ENABLE_BIT;
871 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
872
873 /* Disable Flow control for all RXQs */
874 for (q = 0; q < port->nrxqs; q++) {
875 /* Set threshold 0 to disable Flow control */
876 val = 0;
877 val |= (0 << MSS_RXQ_TRESH_STOP_OFFS);
878 mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val);
879
880 val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq));
881
882 val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq));
883
884 val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq)
885 + MSS_RXQ_ASS_HOSTID_OFFS));
886
887 mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val);
888 }
889
890 /* Notify Firmware that Flow control config space ready for update */
891 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
892 val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
893 val |= cm3_state;
894 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
895
896 spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
897 }
898
899 /* Routine disable/enable flow control for BM pool condition */
mvpp2_bm_pool_update_fc(struct mvpp2_port * port,struct mvpp2_bm_pool * pool,bool en)900 static void mvpp2_bm_pool_update_fc(struct mvpp2_port *port,
901 struct mvpp2_bm_pool *pool,
902 bool en)
903 {
904 int val, cm3_state;
905 unsigned long flags;
906
907 spin_lock_irqsave(&port->priv->mss_spinlock, flags);
908
909 /* Remove Flow control enable bit to prevent race between FW and Kernel
910 * If Flow control were enabled, it would be re-enabled.
911 */
912 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
913 cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
914 val &= ~FLOW_CONTROL_ENABLE_BIT;
915 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
916
917 /* Check if BM pool should be enabled/disable */
918 if (en) {
919 /* Set BM pool start and stop thresholds per port */
920 val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id));
921 val |= MSS_BUF_POOL_PORT_OFFS(port->id);
922 val &= ~MSS_BUF_POOL_START_MASK;
923 val |= (MSS_THRESHOLD_START << MSS_BUF_POOL_START_OFFS);
924 val &= ~MSS_BUF_POOL_STOP_MASK;
925 val |= MSS_THRESHOLD_STOP;
926 mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val);
927 } else {
928 /* Remove BM pool from the port */
929 val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id));
930 val &= ~MSS_BUF_POOL_PORT_OFFS(port->id);
931
932 /* Zero BM pool start and stop thresholds to disable pool
933 * flow control if pool empty (not used by any port)
934 */
935 if (!pool->buf_num) {
936 val &= ~MSS_BUF_POOL_START_MASK;
937 val &= ~MSS_BUF_POOL_STOP_MASK;
938 }
939
940 mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val);
941 }
942
943 /* Notify Firmware that Flow control config space ready for update */
944 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
945 val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
946 val |= cm3_state;
947 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
948
949 spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
950 }
951
952 /* disable/enable flow control for BM pool on all ports */
mvpp2_bm_pool_update_priv_fc(struct mvpp2 * priv,bool en)953 static void mvpp2_bm_pool_update_priv_fc(struct mvpp2 *priv, bool en)
954 {
955 struct mvpp2_port *port;
956 int i, j;
957
958 for (i = 0; i < priv->port_count; i++) {
959 port = priv->port_list[i];
960 if (port->priv->percpu_pools) {
961 for (j = 0; j < port->nrxqs; j++)
962 mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[j],
963 port->tx_fc & en);
964 } else {
965 mvpp2_bm_pool_update_fc(port, port->pool_long, port->tx_fc & en);
966 mvpp2_bm_pool_update_fc(port, port->pool_short, port->tx_fc & en);
967 }
968 }
969 }
970
mvpp2_enable_global_fc(struct mvpp2 * priv)971 static int mvpp2_enable_global_fc(struct mvpp2 *priv)
972 {
973 int val, timeout = 0;
974
975 /* Enable global flow control. In this stage global
976 * flow control enabled, but still disabled per port.
977 */
978 val = mvpp2_cm3_read(priv, MSS_FC_COM_REG);
979 val |= FLOW_CONTROL_ENABLE_BIT;
980 mvpp2_cm3_write(priv, MSS_FC_COM_REG, val);
981
982 /* Check if Firmware running and disable FC if not*/
983 val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
984 mvpp2_cm3_write(priv, MSS_FC_COM_REG, val);
985
986 while (timeout < MSS_FC_MAX_TIMEOUT) {
987 val = mvpp2_cm3_read(priv, MSS_FC_COM_REG);
988
989 if (!(val & FLOW_CONTROL_UPDATE_COMMAND_BIT))
990 return 0;
991 usleep_range(10, 20);
992 timeout++;
993 }
994
995 priv->global_tx_fc = false;
996 return -EOPNOTSUPP;
997 }
998
999 /* Release buffer to BM */
mvpp2_bm_pool_put(struct mvpp2_port * port,int pool,dma_addr_t buf_dma_addr,phys_addr_t buf_phys_addr)1000 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
1001 dma_addr_t buf_dma_addr,
1002 phys_addr_t buf_phys_addr)
1003 {
1004 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
1005 unsigned long flags = 0;
1006
1007 if (test_bit(thread, &port->priv->lock_map))
1008 spin_lock_irqsave(&port->bm_lock[thread], flags);
1009
1010 if (port->priv->hw_version >= MVPP22) {
1011 u32 val = 0;
1012
1013 if (sizeof(dma_addr_t) == 8)
1014 val |= upper_32_bits(buf_dma_addr) &
1015 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
1016
1017 if (sizeof(phys_addr_t) == 8)
1018 val |= (upper_32_bits(buf_phys_addr)
1019 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
1020 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
1021
1022 mvpp2_thread_write_relaxed(port->priv, thread,
1023 MVPP22_BM_ADDR_HIGH_RLS_REG, val);
1024 }
1025
1026 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
1027 * returned in the "cookie" field of the RX
1028 * descriptor. Instead of storing the virtual address, we
1029 * store the physical address
1030 */
1031 mvpp2_thread_write_relaxed(port->priv, thread,
1032 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
1033 mvpp2_thread_write_relaxed(port->priv, thread,
1034 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
1035
1036 if (test_bit(thread, &port->priv->lock_map))
1037 spin_unlock_irqrestore(&port->bm_lock[thread], flags);
1038
1039 put_cpu();
1040 }
1041
1042 /* Allocate buffers for the pool */
mvpp2_bm_bufs_add(struct mvpp2_port * port,struct mvpp2_bm_pool * bm_pool,int buf_num)1043 static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
1044 struct mvpp2_bm_pool *bm_pool, int buf_num)
1045 {
1046 int i, buf_size, total_size;
1047 dma_addr_t dma_addr;
1048 phys_addr_t phys_addr;
1049 struct page_pool *pp = NULL;
1050 void *buf;
1051
1052 if (port->priv->percpu_pools &&
1053 bm_pool->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
1054 netdev_err(port->dev,
1055 "attempted to use jumbo frames with per-cpu pools");
1056 return 0;
1057 }
1058
1059 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
1060 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
1061
1062 if (buf_num < 0 ||
1063 (buf_num + bm_pool->buf_num > bm_pool->size)) {
1064 netdev_err(port->dev,
1065 "cannot allocate %d buffers for pool %d\n",
1066 buf_num, bm_pool->id);
1067 return 0;
1068 }
1069
1070 if (port->priv->percpu_pools)
1071 pp = port->priv->page_pool[bm_pool->id];
1072 for (i = 0; i < buf_num; i++) {
1073 buf = mvpp2_buf_alloc(port, bm_pool, pp, &dma_addr,
1074 &phys_addr, GFP_KERNEL);
1075 if (!buf)
1076 break;
1077
1078 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
1079 phys_addr);
1080 }
1081
1082 /* Update BM driver with number of buffers added to pool */
1083 bm_pool->buf_num += i;
1084
1085 netdev_dbg(port->dev,
1086 "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
1087 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
1088
1089 netdev_dbg(port->dev,
1090 "pool %d: %d of %d buffers added\n",
1091 bm_pool->id, i, buf_num);
1092 return i;
1093 }
1094
1095 /* Notify the driver that BM pool is being used as specific type and return the
1096 * pool pointer on success
1097 */
1098 static struct mvpp2_bm_pool *
mvpp2_bm_pool_use(struct mvpp2_port * port,unsigned pool,int pkt_size)1099 mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
1100 {
1101 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
1102 int num;
1103
1104 if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(port->priv) * 2) ||
1105 (!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) {
1106 netdev_err(port->dev, "Invalid pool %d\n", pool);
1107 return NULL;
1108 }
1109
1110 /* Allocate buffers in case BM pool is used as long pool, but packet
1111 * size doesn't match MTU or BM pool hasn't being used yet
1112 */
1113 if (new_pool->pkt_size == 0) {
1114 int pkts_num;
1115
1116 /* Set default buffer number or free all the buffers in case
1117 * the pool is not empty
1118 */
1119 pkts_num = new_pool->buf_num;
1120 if (pkts_num == 0) {
1121 if (port->priv->percpu_pools) {
1122 if (pool < port->nrxqs)
1123 pkts_num = mvpp2_pools[MVPP2_BM_SHORT].buf_num;
1124 else
1125 pkts_num = mvpp2_pools[MVPP2_BM_LONG].buf_num;
1126 } else {
1127 pkts_num = mvpp2_pools[pool].buf_num;
1128 }
1129 } else {
1130 mvpp2_bm_bufs_free(port->dev->dev.parent,
1131 port->priv, new_pool, pkts_num);
1132 }
1133
1134 new_pool->pkt_size = pkt_size;
1135 new_pool->frag_size =
1136 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
1137 MVPP2_SKB_SHINFO_SIZE;
1138
1139 /* Allocate buffers for this pool */
1140 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
1141 if (num != pkts_num) {
1142 WARN(1, "pool %d: %d of %d allocated\n",
1143 new_pool->id, num, pkts_num);
1144 return NULL;
1145 }
1146 }
1147
1148 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
1149 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
1150
1151 return new_pool;
1152 }
1153
1154 static struct mvpp2_bm_pool *
mvpp2_bm_pool_use_percpu(struct mvpp2_port * port,int type,unsigned int pool,int pkt_size)1155 mvpp2_bm_pool_use_percpu(struct mvpp2_port *port, int type,
1156 unsigned int pool, int pkt_size)
1157 {
1158 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
1159 int num;
1160
1161 if (pool > port->nrxqs * 2) {
1162 netdev_err(port->dev, "Invalid pool %d\n", pool);
1163 return NULL;
1164 }
1165
1166 /* Allocate buffers in case BM pool is used as long pool, but packet
1167 * size doesn't match MTU or BM pool hasn't being used yet
1168 */
1169 if (new_pool->pkt_size == 0) {
1170 int pkts_num;
1171
1172 /* Set default buffer number or free all the buffers in case
1173 * the pool is not empty
1174 */
1175 pkts_num = new_pool->buf_num;
1176 if (pkts_num == 0)
1177 pkts_num = mvpp2_pools[type].buf_num;
1178 else
1179 mvpp2_bm_bufs_free(port->dev->dev.parent,
1180 port->priv, new_pool, pkts_num);
1181
1182 new_pool->pkt_size = pkt_size;
1183 new_pool->frag_size =
1184 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
1185 MVPP2_SKB_SHINFO_SIZE;
1186
1187 /* Allocate buffers for this pool */
1188 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
1189 if (num != pkts_num) {
1190 WARN(1, "pool %d: %d of %d allocated\n",
1191 new_pool->id, num, pkts_num);
1192 return NULL;
1193 }
1194 }
1195
1196 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
1197 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
1198
1199 return new_pool;
1200 }
1201
1202 /* Initialize pools for swf, shared buffers variant */
mvpp2_swf_bm_pool_init_shared(struct mvpp2_port * port)1203 static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port)
1204 {
1205 enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool;
1206 int rxq;
1207
1208 /* If port pkt_size is higher than 1518B:
1209 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
1210 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
1211 */
1212 if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
1213 long_log_pool = MVPP2_BM_JUMBO;
1214 short_log_pool = MVPP2_BM_LONG;
1215 } else {
1216 long_log_pool = MVPP2_BM_LONG;
1217 short_log_pool = MVPP2_BM_SHORT;
1218 }
1219
1220 if (!port->pool_long) {
1221 port->pool_long =
1222 mvpp2_bm_pool_use(port, long_log_pool,
1223 mvpp2_pools[long_log_pool].pkt_size);
1224 if (!port->pool_long)
1225 return -ENOMEM;
1226
1227 port->pool_long->port_map |= BIT(port->id);
1228
1229 for (rxq = 0; rxq < port->nrxqs; rxq++)
1230 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
1231 }
1232
1233 if (!port->pool_short) {
1234 port->pool_short =
1235 mvpp2_bm_pool_use(port, short_log_pool,
1236 mvpp2_pools[short_log_pool].pkt_size);
1237 if (!port->pool_short)
1238 return -ENOMEM;
1239
1240 port->pool_short->port_map |= BIT(port->id);
1241
1242 for (rxq = 0; rxq < port->nrxqs; rxq++)
1243 mvpp2_rxq_short_pool_set(port, rxq,
1244 port->pool_short->id);
1245 }
1246
1247 return 0;
1248 }
1249
1250 /* Initialize pools for swf, percpu buffers variant */
mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port * port)1251 static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port)
1252 {
1253 struct mvpp2_bm_pool *bm_pool;
1254 int i;
1255
1256 for (i = 0; i < port->nrxqs; i++) {
1257 bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i,
1258 mvpp2_pools[MVPP2_BM_SHORT].pkt_size);
1259 if (!bm_pool)
1260 return -ENOMEM;
1261
1262 bm_pool->port_map |= BIT(port->id);
1263 mvpp2_rxq_short_pool_set(port, i, bm_pool->id);
1264 }
1265
1266 for (i = 0; i < port->nrxqs; i++) {
1267 bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs,
1268 mvpp2_pools[MVPP2_BM_LONG].pkt_size);
1269 if (!bm_pool)
1270 return -ENOMEM;
1271
1272 bm_pool->port_map |= BIT(port->id);
1273 mvpp2_rxq_long_pool_set(port, i, bm_pool->id);
1274 }
1275
1276 port->pool_long = NULL;
1277 port->pool_short = NULL;
1278
1279 return 0;
1280 }
1281
mvpp2_swf_bm_pool_init(struct mvpp2_port * port)1282 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
1283 {
1284 if (port->priv->percpu_pools)
1285 return mvpp2_swf_bm_pool_init_percpu(port);
1286 else
1287 return mvpp2_swf_bm_pool_init_shared(port);
1288 }
1289
mvpp2_set_hw_csum(struct mvpp2_port * port,enum mvpp2_bm_pool_log_num new_long_pool)1290 static void mvpp2_set_hw_csum(struct mvpp2_port *port,
1291 enum mvpp2_bm_pool_log_num new_long_pool)
1292 {
1293 const netdev_features_t csums = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1294
1295 /* Update L4 checksum when jumbo enable/disable on port.
1296 * Only port 0 supports hardware checksum offload due to
1297 * the Tx FIFO size limitation.
1298 * Also, don't set NETIF_F_HW_CSUM because L3_offset in TX descriptor
1299 * has 7 bits, so the maximum L3 offset is 128.
1300 */
1301 if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
1302 port->dev->features &= ~csums;
1303 port->dev->hw_features &= ~csums;
1304 } else {
1305 port->dev->features |= csums;
1306 port->dev->hw_features |= csums;
1307 }
1308 }
1309
mvpp2_bm_update_mtu(struct net_device * dev,int mtu)1310 static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
1311 {
1312 struct mvpp2_port *port = netdev_priv(dev);
1313 enum mvpp2_bm_pool_log_num new_long_pool;
1314 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
1315
1316 if (port->priv->percpu_pools)
1317 goto out_set;
1318
1319 /* If port MTU is higher than 1518B:
1320 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
1321 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
1322 */
1323 if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
1324 new_long_pool = MVPP2_BM_JUMBO;
1325 else
1326 new_long_pool = MVPP2_BM_LONG;
1327
1328 if (new_long_pool != port->pool_long->id) {
1329 if (port->tx_fc) {
1330 if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
1331 mvpp2_bm_pool_update_fc(port,
1332 port->pool_short,
1333 false);
1334 else
1335 mvpp2_bm_pool_update_fc(port, port->pool_long,
1336 false);
1337 }
1338
1339 /* Remove port from old short & long pool */
1340 port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id,
1341 port->pool_long->pkt_size);
1342 port->pool_long->port_map &= ~BIT(port->id);
1343 port->pool_long = NULL;
1344
1345 port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id,
1346 port->pool_short->pkt_size);
1347 port->pool_short->port_map &= ~BIT(port->id);
1348 port->pool_short = NULL;
1349
1350 port->pkt_size = pkt_size;
1351
1352 /* Add port to new short & long pool */
1353 mvpp2_swf_bm_pool_init(port);
1354
1355 mvpp2_set_hw_csum(port, new_long_pool);
1356
1357 if (port->tx_fc) {
1358 if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
1359 mvpp2_bm_pool_update_fc(port, port->pool_long,
1360 true);
1361 else
1362 mvpp2_bm_pool_update_fc(port, port->pool_short,
1363 true);
1364 }
1365
1366 /* Update L4 checksum when jumbo enable/disable on port */
1367 if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
1368 dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
1369 dev->hw_features &= ~(NETIF_F_IP_CSUM |
1370 NETIF_F_IPV6_CSUM);
1371 } else {
1372 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1373 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1374 }
1375 }
1376
1377 out_set:
1378 WRITE_ONCE(dev->mtu, mtu);
1379 dev->wanted_features = dev->features;
1380
1381 netdev_update_features(dev);
1382 return 0;
1383 }
1384
mvpp2_interrupts_enable(struct mvpp2_port * port)1385 static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
1386 {
1387 int i, sw_thread_mask = 0;
1388
1389 for (i = 0; i < port->nqvecs; i++)
1390 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
1391
1392 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1393 MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask));
1394 }
1395
mvpp2_interrupts_disable(struct mvpp2_port * port)1396 static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
1397 {
1398 int i, sw_thread_mask = 0;
1399
1400 for (i = 0; i < port->nqvecs; i++)
1401 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
1402
1403 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1404 MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask));
1405 }
1406
mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector * qvec)1407 static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec)
1408 {
1409 struct mvpp2_port *port = qvec->port;
1410
1411 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1412 MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask));
1413 }
1414
mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector * qvec)1415 static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
1416 {
1417 struct mvpp2_port *port = qvec->port;
1418
1419 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1420 MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
1421 }
1422
1423 /* Mask the current thread's Rx/Tx interrupts
1424 * Called by on_each_cpu(), guaranteed to run with migration disabled,
1425 * using smp_processor_id() is OK.
1426 */
mvpp2_interrupts_mask(void * arg)1427 static void mvpp2_interrupts_mask(void *arg)
1428 {
1429 struct mvpp2_port *port = arg;
1430 int cpu = smp_processor_id();
1431 u32 thread;
1432
1433 /* If the thread isn't used, don't do anything */
1434 if (cpu > port->priv->nthreads)
1435 return;
1436
1437 thread = mvpp2_cpu_to_thread(port->priv, cpu);
1438
1439 mvpp2_thread_write(port->priv, thread,
1440 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
1441 mvpp2_thread_write(port->priv, thread,
1442 MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), 0);
1443 }
1444
1445 /* Unmask the current thread's Rx/Tx interrupts.
1446 * Called by on_each_cpu(), guaranteed to run with migration disabled,
1447 * using smp_processor_id() is OK.
1448 */
mvpp2_interrupts_unmask(void * arg)1449 static void mvpp2_interrupts_unmask(void *arg)
1450 {
1451 struct mvpp2_port *port = arg;
1452 int cpu = smp_processor_id();
1453 u32 val, thread;
1454
1455 /* If the thread isn't used, don't do anything */
1456 if (cpu >= port->priv->nthreads)
1457 return;
1458
1459 thread = mvpp2_cpu_to_thread(port->priv, cpu);
1460
1461 val = MVPP2_CAUSE_MISC_SUM_MASK |
1462 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
1463 if (port->has_tx_irqs)
1464 val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
1465
1466 mvpp2_thread_write(port->priv, thread,
1467 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
1468 mvpp2_thread_write(port->priv, thread,
1469 MVPP2_ISR_RX_ERR_CAUSE_REG(port->id),
1470 MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK);
1471 }
1472
1473 static void
mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port * port,bool mask)1474 mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
1475 {
1476 u32 val;
1477 int i;
1478
1479 if (port->priv->hw_version == MVPP21)
1480 return;
1481
1482 if (mask)
1483 val = 0;
1484 else
1485 val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22);
1486
1487 for (i = 0; i < port->nqvecs; i++) {
1488 struct mvpp2_queue_vector *v = port->qvecs + i;
1489
1490 if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
1491 continue;
1492
1493 mvpp2_thread_write(port->priv, v->sw_thread_id,
1494 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
1495 mvpp2_thread_write(port->priv, v->sw_thread_id,
1496 MVPP2_ISR_RX_ERR_CAUSE_REG(port->id),
1497 MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK);
1498 }
1499 }
1500
1501 /* Only GOP port 0 has an XLG MAC */
mvpp2_port_supports_xlg(struct mvpp2_port * port)1502 static bool mvpp2_port_supports_xlg(struct mvpp2_port *port)
1503 {
1504 return port->gop_id == 0;
1505 }
1506
mvpp2_port_supports_rgmii(struct mvpp2_port * port)1507 static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port)
1508 {
1509 return !(port->priv->hw_version >= MVPP22 && port->gop_id == 0);
1510 }
1511
1512 /* Port configuration routines */
mvpp2_is_xlg(phy_interface_t interface)1513 static bool mvpp2_is_xlg(phy_interface_t interface)
1514 {
1515 return interface == PHY_INTERFACE_MODE_10GBASER ||
1516 interface == PHY_INTERFACE_MODE_5GBASER ||
1517 interface == PHY_INTERFACE_MODE_XAUI;
1518 }
1519
mvpp2_modify(void __iomem * ptr,u32 mask,u32 set)1520 static void mvpp2_modify(void __iomem *ptr, u32 mask, u32 set)
1521 {
1522 u32 old, val;
1523
1524 old = val = readl(ptr);
1525 val &= ~mask;
1526 val |= set;
1527 if (old != val)
1528 writel(val, ptr);
1529 }
1530
mvpp22_gop_init_rgmii(struct mvpp2_port * port)1531 static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
1532 {
1533 struct mvpp2 *priv = port->priv;
1534 u32 val;
1535
1536 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1537 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
1538 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1539
1540 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
1541 if (port->gop_id == 2) {
1542 val |= GENCONF_CTRL0_PORT2_RGMII;
1543 } else if (port->gop_id == 3) {
1544 val |= GENCONF_CTRL0_PORT3_RGMII_MII;
1545
1546 /* According to the specification, GENCONF_CTRL0_PORT3_RGMII
1547 * should be set to 1 for RGMII and 0 for MII. However, tests
1548 * show that it is the other way around. This is also what
1549 * U-Boot does for mvpp2, so it is assumed to be correct.
1550 */
1551 if (port->phy_interface == PHY_INTERFACE_MODE_MII)
1552 val |= GENCONF_CTRL0_PORT3_RGMII;
1553 else
1554 val &= ~GENCONF_CTRL0_PORT3_RGMII;
1555 }
1556 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
1557 }
1558
mvpp22_gop_init_sgmii(struct mvpp2_port * port)1559 static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
1560 {
1561 struct mvpp2 *priv = port->priv;
1562 u32 val;
1563
1564 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1565 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
1566 GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
1567 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1568
1569 if (port->gop_id > 1) {
1570 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
1571 if (port->gop_id == 2)
1572 val &= ~GENCONF_CTRL0_PORT2_RGMII;
1573 else if (port->gop_id == 3)
1574 val &= ~GENCONF_CTRL0_PORT3_RGMII_MII;
1575 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
1576 }
1577 }
1578
mvpp22_gop_init_10gkr(struct mvpp2_port * port)1579 static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
1580 {
1581 struct mvpp2 *priv = port->priv;
1582 void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
1583 void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
1584 u32 val;
1585
1586 val = readl(xpcs + MVPP22_XPCS_CFG0);
1587 val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
1588 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
1589 val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
1590 writel(val, xpcs + MVPP22_XPCS_CFG0);
1591
1592 val = readl(mpcs + MVPP22_MPCS_CTRL);
1593 val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
1594 writel(val, mpcs + MVPP22_MPCS_CTRL);
1595
1596 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
1597 val &= ~MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7);
1598 val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
1599 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
1600 }
1601
mvpp22_gop_fca_enable_periodic(struct mvpp2_port * port,bool en)1602 static void mvpp22_gop_fca_enable_periodic(struct mvpp2_port *port, bool en)
1603 {
1604 struct mvpp2 *priv = port->priv;
1605 void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id);
1606 u32 val;
1607
1608 val = readl(fca + MVPP22_FCA_CONTROL_REG);
1609 val &= ~MVPP22_FCA_ENABLE_PERIODIC;
1610 if (en)
1611 val |= MVPP22_FCA_ENABLE_PERIODIC;
1612 writel(val, fca + MVPP22_FCA_CONTROL_REG);
1613 }
1614
mvpp22_gop_fca_set_timer(struct mvpp2_port * port,u32 timer)1615 static void mvpp22_gop_fca_set_timer(struct mvpp2_port *port, u32 timer)
1616 {
1617 struct mvpp2 *priv = port->priv;
1618 void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id);
1619 u32 lsb, msb;
1620
1621 lsb = timer & MVPP22_FCA_REG_MASK;
1622 msb = timer >> MVPP22_FCA_REG_SIZE;
1623
1624 writel(lsb, fca + MVPP22_PERIODIC_COUNTER_LSB_REG);
1625 writel(msb, fca + MVPP22_PERIODIC_COUNTER_MSB_REG);
1626 }
1627
1628 /* Set Flow Control timer x100 faster than pause quanta to ensure that link
1629 * partner won't send traffic if port is in XOFF mode.
1630 */
mvpp22_gop_fca_set_periodic_timer(struct mvpp2_port * port)1631 static void mvpp22_gop_fca_set_periodic_timer(struct mvpp2_port *port)
1632 {
1633 u32 timer;
1634
1635 timer = (port->priv->tclk / (USEC_PER_SEC * FC_CLK_DIVIDER))
1636 * FC_QUANTA;
1637
1638 mvpp22_gop_fca_enable_periodic(port, false);
1639
1640 mvpp22_gop_fca_set_timer(port, timer);
1641
1642 mvpp22_gop_fca_enable_periodic(port, true);
1643 }
1644
mvpp22_gop_init(struct mvpp2_port * port,phy_interface_t interface)1645 static int mvpp22_gop_init(struct mvpp2_port *port, phy_interface_t interface)
1646 {
1647 struct mvpp2 *priv = port->priv;
1648 u32 val;
1649
1650 if (!priv->sysctrl_base)
1651 return 0;
1652
1653 switch (interface) {
1654 case PHY_INTERFACE_MODE_MII:
1655 case PHY_INTERFACE_MODE_RGMII:
1656 case PHY_INTERFACE_MODE_RGMII_ID:
1657 case PHY_INTERFACE_MODE_RGMII_RXID:
1658 case PHY_INTERFACE_MODE_RGMII_TXID:
1659 if (!mvpp2_port_supports_rgmii(port))
1660 goto invalid_conf;
1661 mvpp22_gop_init_rgmii(port);
1662 break;
1663 case PHY_INTERFACE_MODE_SGMII:
1664 case PHY_INTERFACE_MODE_1000BASEX:
1665 case PHY_INTERFACE_MODE_2500BASEX:
1666 mvpp22_gop_init_sgmii(port);
1667 break;
1668 case PHY_INTERFACE_MODE_5GBASER:
1669 case PHY_INTERFACE_MODE_10GBASER:
1670 if (!mvpp2_port_supports_xlg(port))
1671 goto invalid_conf;
1672 mvpp22_gop_init_10gkr(port);
1673 break;
1674 default:
1675 goto unsupported_conf;
1676 }
1677
1678 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val);
1679 val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) |
1680 GENCONF_PORT_CTRL1_EN(port->gop_id);
1681 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val);
1682
1683 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1684 val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
1685 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1686
1687 regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val);
1688 val |= GENCONF_SOFT_RESET1_GOP;
1689 regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
1690
1691 mvpp22_gop_fca_set_periodic_timer(port);
1692
1693 unsupported_conf:
1694 return 0;
1695
1696 invalid_conf:
1697 netdev_err(port->dev, "Invalid port configuration\n");
1698 return -EINVAL;
1699 }
1700
mvpp22_gop_unmask_irq(struct mvpp2_port * port)1701 static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
1702 {
1703 u32 val;
1704
1705 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
1706 phy_interface_mode_is_8023z(port->phy_interface) ||
1707 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1708 /* Enable the GMAC link status irq for this port */
1709 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
1710 val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
1711 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
1712 }
1713
1714 if (mvpp2_port_supports_xlg(port)) {
1715 /* Enable the XLG/GIG irqs for this port */
1716 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
1717 if (mvpp2_is_xlg(port->phy_interface))
1718 val |= MVPP22_XLG_EXT_INT_MASK_XLG;
1719 else
1720 val |= MVPP22_XLG_EXT_INT_MASK_GIG;
1721 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
1722 }
1723 }
1724
mvpp22_gop_mask_irq(struct mvpp2_port * port)1725 static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
1726 {
1727 u32 val;
1728
1729 if (mvpp2_port_supports_xlg(port)) {
1730 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
1731 val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG |
1732 MVPP22_XLG_EXT_INT_MASK_GIG);
1733 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
1734 }
1735
1736 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
1737 phy_interface_mode_is_8023z(port->phy_interface) ||
1738 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1739 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
1740 val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
1741 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
1742 }
1743 }
1744
mvpp22_gop_setup_irq(struct mvpp2_port * port)1745 static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
1746 {
1747 u32 val;
1748
1749 mvpp2_modify(port->base + MVPP22_GMAC_INT_SUM_MASK,
1750 MVPP22_GMAC_INT_SUM_MASK_PTP,
1751 MVPP22_GMAC_INT_SUM_MASK_PTP);
1752
1753 if (port->phylink ||
1754 phy_interface_mode_is_rgmii(port->phy_interface) ||
1755 phy_interface_mode_is_8023z(port->phy_interface) ||
1756 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1757 val = readl(port->base + MVPP22_GMAC_INT_MASK);
1758 val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
1759 writel(val, port->base + MVPP22_GMAC_INT_MASK);
1760 }
1761
1762 if (mvpp2_port_supports_xlg(port)) {
1763 val = readl(port->base + MVPP22_XLG_INT_MASK);
1764 val |= MVPP22_XLG_INT_MASK_LINK;
1765 writel(val, port->base + MVPP22_XLG_INT_MASK);
1766
1767 mvpp2_modify(port->base + MVPP22_XLG_EXT_INT_MASK,
1768 MVPP22_XLG_EXT_INT_MASK_PTP,
1769 MVPP22_XLG_EXT_INT_MASK_PTP);
1770 }
1771
1772 mvpp22_gop_unmask_irq(port);
1773 }
1774
1775 /* Sets the PHY mode of the COMPHY (which configures the serdes lanes).
1776 *
1777 * The PHY mode used by the PPv2 driver comes from the network subsystem, while
1778 * the one given to the COMPHY comes from the generic PHY subsystem. Hence they
1779 * differ.
1780 *
1781 * The COMPHY configures the serdes lanes regardless of the actual use of the
1782 * lanes by the physical layer. This is why configurations like
1783 * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid.
1784 */
mvpp22_comphy_init(struct mvpp2_port * port,phy_interface_t interface)1785 static int mvpp22_comphy_init(struct mvpp2_port *port,
1786 phy_interface_t interface)
1787 {
1788 int ret;
1789
1790 if (!port->comphy)
1791 return 0;
1792
1793 ret = phy_set_mode_ext(port->comphy, PHY_MODE_ETHERNET, interface);
1794 if (ret)
1795 return ret;
1796
1797 return phy_power_on(port->comphy);
1798 }
1799
mvpp2_port_enable(struct mvpp2_port * port)1800 static void mvpp2_port_enable(struct mvpp2_port *port)
1801 {
1802 u32 val;
1803
1804 if (mvpp2_port_supports_xlg(port) &&
1805 mvpp2_is_xlg(port->phy_interface)) {
1806 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
1807 val |= MVPP22_XLG_CTRL0_PORT_EN;
1808 val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
1809 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1810 } else {
1811 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1812 val |= MVPP2_GMAC_PORT_EN_MASK;
1813 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
1814 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1815 }
1816 }
1817
mvpp2_port_disable(struct mvpp2_port * port)1818 static void mvpp2_port_disable(struct mvpp2_port *port)
1819 {
1820 u32 val;
1821
1822 if (mvpp2_port_supports_xlg(port) &&
1823 mvpp2_is_xlg(port->phy_interface)) {
1824 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
1825 val &= ~MVPP22_XLG_CTRL0_PORT_EN;
1826 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1827 }
1828
1829 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1830 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
1831 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1832 }
1833
1834 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
mvpp2_port_periodic_xon_disable(struct mvpp2_port * port)1835 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
1836 {
1837 u32 val;
1838
1839 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
1840 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
1841 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
1842 }
1843
1844 /* Configure loopback port */
mvpp2_port_loopback_set(struct mvpp2_port * port,const struct phylink_link_state * state)1845 static void mvpp2_port_loopback_set(struct mvpp2_port *port,
1846 const struct phylink_link_state *state)
1847 {
1848 u32 val;
1849
1850 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
1851
1852 if (state->speed == 1000)
1853 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
1854 else
1855 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
1856
1857 if (phy_interface_mode_is_8023z(state->interface) ||
1858 state->interface == PHY_INTERFACE_MODE_SGMII)
1859 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
1860 else
1861 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
1862
1863 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
1864 }
1865
1866 enum {
1867 ETHTOOL_XDP_REDIRECT,
1868 ETHTOOL_XDP_PASS,
1869 ETHTOOL_XDP_DROP,
1870 ETHTOOL_XDP_TX,
1871 ETHTOOL_XDP_TX_ERR,
1872 ETHTOOL_XDP_XMIT,
1873 ETHTOOL_XDP_XMIT_ERR,
1874 };
1875
1876 struct mvpp2_ethtool_counter {
1877 unsigned int offset;
1878 const char string[ETH_GSTRING_LEN];
1879 bool reg_is_64b;
1880 };
1881
mvpp2_read_count(struct mvpp2_port * port,const struct mvpp2_ethtool_counter * counter)1882 static u64 mvpp2_read_count(struct mvpp2_port *port,
1883 const struct mvpp2_ethtool_counter *counter)
1884 {
1885 u64 val;
1886
1887 val = readl(port->stats_base + counter->offset);
1888 if (counter->reg_is_64b)
1889 val += (u64)readl(port->stats_base + counter->offset + 4) << 32;
1890
1891 return val;
1892 }
1893
1894 /* Some counters are accessed indirectly by first writing an index to
1895 * MVPP2_CTRS_IDX. The index can represent various resources depending on the
1896 * register we access, it can be a hit counter for some classification tables,
1897 * a counter specific to a rxq, a txq or a buffer pool.
1898 */
mvpp2_read_index(struct mvpp2 * priv,u32 index,u32 reg)1899 static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg)
1900 {
1901 mvpp2_write(priv, MVPP2_CTRS_IDX, index);
1902 return mvpp2_read(priv, reg);
1903 }
1904
1905 /* Due to the fact that software statistics and hardware statistics are, by
1906 * design, incremented at different moments in the chain of packet processing,
1907 * it is very likely that incoming packets could have been dropped after being
1908 * counted by hardware but before reaching software statistics (most probably
1909 * multicast packets), and in the opposite way, during transmission, FCS bytes
1910 * are added in between as well as TSO skb will be split and header bytes added.
1911 * Hence, statistics gathered from userspace with ifconfig (software) and
1912 * ethtool (hardware) cannot be compared.
1913 */
1914 static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] = {
1915 { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true },
1916 { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" },
1917 { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" },
1918 { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" },
1919 { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" },
1920 { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" },
1921 { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" },
1922 { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" },
1923 { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" },
1924 { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" },
1925 { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" },
1926 { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" },
1927 { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true },
1928 { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" },
1929 { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" },
1930 { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" },
1931 { MVPP2_MIB_FC_SENT, "fc_sent" },
1932 { MVPP2_MIB_FC_RCVD, "fc_received" },
1933 { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" },
1934 { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" },
1935 { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" },
1936 { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" },
1937 { MVPP2_MIB_JABBER_RCVD, "jabber_received" },
1938 { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" },
1939 { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" },
1940 { MVPP2_MIB_COLLISION, "collision" },
1941 { MVPP2_MIB_LATE_COLLISION, "late_collision" },
1942 };
1943
1944 static const struct mvpp2_ethtool_counter mvpp2_ethtool_port_regs[] = {
1945 { MVPP2_OVERRUN_ETH_DROP, "rx_fifo_or_parser_overrun_drops" },
1946 { MVPP2_CLS_ETH_DROP, "rx_classifier_drops" },
1947 };
1948
1949 static const struct mvpp2_ethtool_counter mvpp2_ethtool_txq_regs[] = {
1950 { MVPP2_TX_DESC_ENQ_CTR, "txq_%d_desc_enqueue" },
1951 { MVPP2_TX_DESC_ENQ_TO_DDR_CTR, "txq_%d_desc_enqueue_to_ddr" },
1952 { MVPP2_TX_BUFF_ENQ_TO_DDR_CTR, "txq_%d_buff_euqueue_to_ddr" },
1953 { MVPP2_TX_DESC_ENQ_HW_FWD_CTR, "txq_%d_desc_hardware_forwarded" },
1954 { MVPP2_TX_PKTS_DEQ_CTR, "txq_%d_packets_dequeued" },
1955 { MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR, "txq_%d_queue_full_drops" },
1956 { MVPP2_TX_PKTS_EARLY_DROP_CTR, "txq_%d_packets_early_drops" },
1957 { MVPP2_TX_PKTS_BM_DROP_CTR, "txq_%d_packets_bm_drops" },
1958 { MVPP2_TX_PKTS_BM_MC_DROP_CTR, "txq_%d_packets_rep_bm_drops" },
1959 };
1960
1961 static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs[] = {
1962 { MVPP2_RX_DESC_ENQ_CTR, "rxq_%d_desc_enqueue" },
1963 { MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR, "rxq_%d_queue_full_drops" },
1964 { MVPP2_RX_PKTS_EARLY_DROP_CTR, "rxq_%d_packets_early_drops" },
1965 { MVPP2_RX_PKTS_BM_DROP_CTR, "rxq_%d_packets_bm_drops" },
1966 };
1967
1968 static const struct mvpp2_ethtool_counter mvpp2_ethtool_xdp[] = {
1969 { ETHTOOL_XDP_REDIRECT, "rx_xdp_redirect", },
1970 { ETHTOOL_XDP_PASS, "rx_xdp_pass", },
1971 { ETHTOOL_XDP_DROP, "rx_xdp_drop", },
1972 { ETHTOOL_XDP_TX, "rx_xdp_tx", },
1973 { ETHTOOL_XDP_TX_ERR, "rx_xdp_tx_errors", },
1974 { ETHTOOL_XDP_XMIT, "tx_xdp_xmit", },
1975 { ETHTOOL_XDP_XMIT_ERR, "tx_xdp_xmit_errors", },
1976 };
1977
1978 #define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs) (ARRAY_SIZE(mvpp2_ethtool_mib_regs) + \
1979 ARRAY_SIZE(mvpp2_ethtool_port_regs) + \
1980 (ARRAY_SIZE(mvpp2_ethtool_txq_regs) * (ntxqs)) + \
1981 (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs)) + \
1982 ARRAY_SIZE(mvpp2_ethtool_xdp))
1983
mvpp2_ethtool_get_strings(struct net_device * netdev,u32 sset,u8 * data)1984 static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
1985 u8 *data)
1986 {
1987 struct mvpp2_port *port = netdev_priv(netdev);
1988 const char *str;
1989 int i, q;
1990
1991 if (sset != ETH_SS_STATS)
1992 return;
1993
1994 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++)
1995 ethtool_puts(&data, mvpp2_ethtool_mib_regs[i].string);
1996
1997 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++)
1998 ethtool_puts(&data, mvpp2_ethtool_port_regs[i].string);
1999
2000 for (q = 0; q < port->ntxqs; q++)
2001 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) {
2002 str = mvpp2_ethtool_txq_regs[i].string;
2003 ethtool_sprintf(&data, str, q);
2004 }
2005
2006 for (q = 0; q < port->nrxqs; q++)
2007 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) {
2008 str = mvpp2_ethtool_rxq_regs[i].string;
2009 ethtool_sprintf(&data, str, q);
2010 }
2011
2012 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_xdp); i++)
2013 ethtool_puts(&data, mvpp2_ethtool_xdp[i].string);
2014 }
2015
2016 static void
mvpp2_get_xdp_stats(struct mvpp2_port * port,struct mvpp2_pcpu_stats * xdp_stats)2017 mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats)
2018 {
2019 unsigned int start;
2020 unsigned int cpu;
2021
2022 /* Gather XDP Statistics */
2023 for_each_possible_cpu(cpu) {
2024 struct mvpp2_pcpu_stats *cpu_stats;
2025 u64 xdp_redirect;
2026 u64 xdp_pass;
2027 u64 xdp_drop;
2028 u64 xdp_xmit;
2029 u64 xdp_xmit_err;
2030 u64 xdp_tx;
2031 u64 xdp_tx_err;
2032
2033 cpu_stats = per_cpu_ptr(port->stats, cpu);
2034 do {
2035 start = u64_stats_fetch_begin(&cpu_stats->syncp);
2036 xdp_redirect = cpu_stats->xdp_redirect;
2037 xdp_pass = cpu_stats->xdp_pass;
2038 xdp_drop = cpu_stats->xdp_drop;
2039 xdp_xmit = cpu_stats->xdp_xmit;
2040 xdp_xmit_err = cpu_stats->xdp_xmit_err;
2041 xdp_tx = cpu_stats->xdp_tx;
2042 xdp_tx_err = cpu_stats->xdp_tx_err;
2043 } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
2044
2045 xdp_stats->xdp_redirect += xdp_redirect;
2046 xdp_stats->xdp_pass += xdp_pass;
2047 xdp_stats->xdp_drop += xdp_drop;
2048 xdp_stats->xdp_xmit += xdp_xmit;
2049 xdp_stats->xdp_xmit_err += xdp_xmit_err;
2050 xdp_stats->xdp_tx += xdp_tx;
2051 xdp_stats->xdp_tx_err += xdp_tx_err;
2052 }
2053 }
2054
mvpp2_read_stats(struct mvpp2_port * port)2055 static void mvpp2_read_stats(struct mvpp2_port *port)
2056 {
2057 struct mvpp2_pcpu_stats xdp_stats = {};
2058 const struct mvpp2_ethtool_counter *s;
2059 u64 *pstats;
2060 int i, q;
2061
2062 pstats = port->ethtool_stats;
2063
2064 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++)
2065 *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_mib_regs[i]);
2066
2067 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++)
2068 *pstats++ += mvpp2_read(port->priv,
2069 mvpp2_ethtool_port_regs[i].offset +
2070 4 * port->id);
2071
2072 for (q = 0; q < port->ntxqs; q++)
2073 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++)
2074 *pstats++ += mvpp2_read_index(port->priv,
2075 MVPP22_CTRS_TX_CTR(port->id, q),
2076 mvpp2_ethtool_txq_regs[i].offset);
2077
2078 /* Rxqs are numbered from 0 from the user standpoint, but not from the
2079 * driver's. We need to add the port->first_rxq offset.
2080 */
2081 for (q = 0; q < port->nrxqs; q++)
2082 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++)
2083 *pstats++ += mvpp2_read_index(port->priv,
2084 port->first_rxq + q,
2085 mvpp2_ethtool_rxq_regs[i].offset);
2086
2087 /* Gather XDP Statistics */
2088 mvpp2_get_xdp_stats(port, &xdp_stats);
2089
2090 for (i = 0, s = mvpp2_ethtool_xdp;
2091 s < mvpp2_ethtool_xdp + ARRAY_SIZE(mvpp2_ethtool_xdp);
2092 s++, i++) {
2093 switch (s->offset) {
2094 case ETHTOOL_XDP_REDIRECT:
2095 *pstats++ = xdp_stats.xdp_redirect;
2096 break;
2097 case ETHTOOL_XDP_PASS:
2098 *pstats++ = xdp_stats.xdp_pass;
2099 break;
2100 case ETHTOOL_XDP_DROP:
2101 *pstats++ = xdp_stats.xdp_drop;
2102 break;
2103 case ETHTOOL_XDP_TX:
2104 *pstats++ = xdp_stats.xdp_tx;
2105 break;
2106 case ETHTOOL_XDP_TX_ERR:
2107 *pstats++ = xdp_stats.xdp_tx_err;
2108 break;
2109 case ETHTOOL_XDP_XMIT:
2110 *pstats++ = xdp_stats.xdp_xmit;
2111 break;
2112 case ETHTOOL_XDP_XMIT_ERR:
2113 *pstats++ = xdp_stats.xdp_xmit_err;
2114 break;
2115 }
2116 }
2117 }
2118
mvpp2_gather_hw_statistics(struct work_struct * work)2119 static void mvpp2_gather_hw_statistics(struct work_struct *work)
2120 {
2121 struct delayed_work *del_work = to_delayed_work(work);
2122 struct mvpp2_port *port = container_of(del_work, struct mvpp2_port,
2123 stats_work);
2124
2125 mutex_lock(&port->gather_stats_lock);
2126
2127 mvpp2_read_stats(port);
2128
2129 /* No need to read again the counters right after this function if it
2130 * was called asynchronously by the user (ie. use of ethtool).
2131 */
2132 cancel_delayed_work(&port->stats_work);
2133 queue_delayed_work(port->priv->stats_queue, &port->stats_work,
2134 MVPP2_MIB_COUNTERS_STATS_DELAY);
2135
2136 mutex_unlock(&port->gather_stats_lock);
2137 }
2138
mvpp2_ethtool_get_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)2139 static void mvpp2_ethtool_get_stats(struct net_device *dev,
2140 struct ethtool_stats *stats, u64 *data)
2141 {
2142 struct mvpp2_port *port = netdev_priv(dev);
2143
2144 /* Update statistics for the given port, then take the lock to avoid
2145 * concurrent accesses on the ethtool_stats structure during its copy.
2146 */
2147 mvpp2_gather_hw_statistics(&port->stats_work.work);
2148
2149 mutex_lock(&port->gather_stats_lock);
2150 memcpy(data, port->ethtool_stats,
2151 sizeof(u64) * MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs));
2152 mutex_unlock(&port->gather_stats_lock);
2153 }
2154
mvpp2_ethtool_get_sset_count(struct net_device * dev,int sset)2155 static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset)
2156 {
2157 struct mvpp2_port *port = netdev_priv(dev);
2158
2159 if (sset == ETH_SS_STATS)
2160 return MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs);
2161
2162 return -EOPNOTSUPP;
2163 }
2164
mvpp2_mac_reset_assert(struct mvpp2_port * port)2165 static void mvpp2_mac_reset_assert(struct mvpp2_port *port)
2166 {
2167 u32 val;
2168
2169 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) |
2170 MVPP2_GMAC_PORT_RESET_MASK;
2171 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2172
2173 if (port->priv->hw_version >= MVPP22 && port->gop_id == 0) {
2174 val = readl(port->base + MVPP22_XLG_CTRL0_REG) &
2175 ~MVPP22_XLG_CTRL0_MAC_RESET_DIS;
2176 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
2177 }
2178 }
2179
mvpp22_pcs_reset_assert(struct mvpp2_port * port)2180 static void mvpp22_pcs_reset_assert(struct mvpp2_port *port)
2181 {
2182 struct mvpp2 *priv = port->priv;
2183 void __iomem *mpcs, *xpcs;
2184 u32 val;
2185
2186 if (port->priv->hw_version == MVPP21 || port->gop_id != 0)
2187 return;
2188
2189 mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
2190 xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
2191
2192 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
2193 val &= ~(MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
2194 val |= MVPP22_MPCS_CLK_RESET_DIV_SET;
2195 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
2196
2197 val = readl(xpcs + MVPP22_XPCS_CFG0);
2198 writel(val & ~MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
2199 }
2200
mvpp22_pcs_reset_deassert(struct mvpp2_port * port,phy_interface_t interface)2201 static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port,
2202 phy_interface_t interface)
2203 {
2204 struct mvpp2 *priv = port->priv;
2205 void __iomem *mpcs, *xpcs;
2206 u32 val;
2207
2208 if (port->priv->hw_version == MVPP21 || port->gop_id != 0)
2209 return;
2210
2211 mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
2212 xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
2213
2214 switch (interface) {
2215 case PHY_INTERFACE_MODE_5GBASER:
2216 case PHY_INTERFACE_MODE_10GBASER:
2217 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
2218 val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX |
2219 MAC_CLK_RESET_SD_TX;
2220 val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
2221 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
2222 break;
2223 case PHY_INTERFACE_MODE_XAUI:
2224 case PHY_INTERFACE_MODE_RXAUI:
2225 val = readl(xpcs + MVPP22_XPCS_CFG0);
2226 writel(val | MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
2227 break;
2228 default:
2229 break;
2230 }
2231 }
2232
2233 /* Change maximum receive size of the port */
mvpp2_gmac_max_rx_size_set(struct mvpp2_port * port)2234 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
2235 {
2236 u32 val;
2237
2238 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2239 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
2240 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
2241 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
2242 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2243 }
2244
2245 /* Change maximum receive size of the port */
mvpp2_xlg_max_rx_size_set(struct mvpp2_port * port)2246 static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
2247 {
2248 u32 val;
2249
2250 val = readl(port->base + MVPP22_XLG_CTRL1_REG);
2251 val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK;
2252 val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
2253 MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS;
2254 writel(val, port->base + MVPP22_XLG_CTRL1_REG);
2255 }
2256
2257 /* Set defaults to the MVPP2 port */
mvpp2_defaults_set(struct mvpp2_port * port)2258 static void mvpp2_defaults_set(struct mvpp2_port *port)
2259 {
2260 int tx_port_num, val, queue, lrxq;
2261
2262 if (port->priv->hw_version == MVPP21) {
2263 /* Update TX FIFO MIN Threshold */
2264 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
2265 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
2266 /* Min. TX threshold must be less than minimal packet length */
2267 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
2268 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
2269 }
2270
2271 /* Disable Legacy WRR, Disable EJP, Release from reset */
2272 tx_port_num = mvpp2_egress_port(port);
2273 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
2274 tx_port_num);
2275 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
2276
2277 /* Set TXQ scheduling to Round-Robin */
2278 mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0);
2279
2280 /* Close bandwidth for all queues */
2281 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
2282 mvpp2_write(port->priv,
2283 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
2284
2285 /* Set refill period to 1 usec, refill tokens
2286 * and bucket size to maximum
2287 */
2288 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
2289 port->priv->tclk / USEC_PER_SEC);
2290 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
2291 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
2292 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
2293 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
2294 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
2295 val = MVPP2_TXP_TOKEN_SIZE_MAX;
2296 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2297
2298 /* Set MaximumLowLatencyPacketSize value to 256 */
2299 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
2300 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
2301 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
2302
2303 /* Enable Rx cache snoop */
2304 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
2305 queue = port->rxqs[lrxq]->id;
2306 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2307 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
2308 MVPP2_SNOOP_BUF_HDR_MASK;
2309 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2310 }
2311
2312 /* At default, mask all interrupts to all present cpus */
2313 mvpp2_interrupts_disable(port);
2314 }
2315
2316 /* Enable/disable receiving packets */
mvpp2_ingress_enable(struct mvpp2_port * port)2317 static void mvpp2_ingress_enable(struct mvpp2_port *port)
2318 {
2319 u32 val;
2320 int lrxq, queue;
2321
2322 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
2323 queue = port->rxqs[lrxq]->id;
2324 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2325 val &= ~MVPP2_RXQ_DISABLE_MASK;
2326 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2327 }
2328 }
2329
mvpp2_ingress_disable(struct mvpp2_port * port)2330 static void mvpp2_ingress_disable(struct mvpp2_port *port)
2331 {
2332 u32 val;
2333 int lrxq, queue;
2334
2335 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
2336 queue = port->rxqs[lrxq]->id;
2337 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2338 val |= MVPP2_RXQ_DISABLE_MASK;
2339 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2340 }
2341 }
2342
2343 /* Enable transmit via physical egress queue
2344 * - HW starts take descriptors from DRAM
2345 */
mvpp2_egress_enable(struct mvpp2_port * port)2346 static void mvpp2_egress_enable(struct mvpp2_port *port)
2347 {
2348 u32 qmap;
2349 int queue;
2350 int tx_port_num = mvpp2_egress_port(port);
2351
2352 /* Enable all initialized TXs. */
2353 qmap = 0;
2354 for (queue = 0; queue < port->ntxqs; queue++) {
2355 struct mvpp2_tx_queue *txq = port->txqs[queue];
2356
2357 if (txq->descs)
2358 qmap |= (1 << queue);
2359 }
2360
2361 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2362 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
2363 }
2364
2365 /* Disable transmit via physical egress queue
2366 * - HW doesn't take descriptors from DRAM
2367 */
mvpp2_egress_disable(struct mvpp2_port * port)2368 static void mvpp2_egress_disable(struct mvpp2_port *port)
2369 {
2370 u32 reg_data;
2371 int delay;
2372 int tx_port_num = mvpp2_egress_port(port);
2373
2374 /* Issue stop command for active channels only */
2375 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2376 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
2377 MVPP2_TXP_SCHED_ENQ_MASK;
2378 if (reg_data != 0)
2379 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
2380 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
2381
2382 /* Wait for all Tx activity to terminate. */
2383 delay = 0;
2384 do {
2385 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
2386 netdev_warn(port->dev,
2387 "Tx stop timed out, status=0x%08x\n",
2388 reg_data);
2389 break;
2390 }
2391 mdelay(1);
2392 delay++;
2393
2394 /* Check port TX Command register that all
2395 * Tx queues are stopped
2396 */
2397 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
2398 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
2399 }
2400
2401 /* Rx descriptors helper methods */
2402
2403 /* Get number of Rx descriptors occupied by received packets */
2404 static inline int
mvpp2_rxq_received(struct mvpp2_port * port,int rxq_id)2405 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
2406 {
2407 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
2408
2409 return val & MVPP2_RXQ_OCCUPIED_MASK;
2410 }
2411
2412 /* Update Rx queue status with the number of occupied and available
2413 * Rx descriptor slots.
2414 */
2415 static inline void
mvpp2_rxq_status_update(struct mvpp2_port * port,int rxq_id,int used_count,int free_count)2416 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
2417 int used_count, int free_count)
2418 {
2419 /* Decrement the number of used descriptors and increment count
2420 * increment the number of free descriptors.
2421 */
2422 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
2423
2424 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
2425 }
2426
2427 /* Get pointer to next RX descriptor to be processed by SW */
2428 static inline struct mvpp2_rx_desc *
mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue * rxq)2429 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
2430 {
2431 int rx_desc = rxq->next_desc_to_proc;
2432
2433 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
2434 prefetch(rxq->descs + rxq->next_desc_to_proc);
2435 return rxq->descs + rx_desc;
2436 }
2437
2438 /* Set rx queue offset */
mvpp2_rxq_offset_set(struct mvpp2_port * port,int prxq,int offset)2439 static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
2440 int prxq, int offset)
2441 {
2442 u32 val;
2443
2444 /* Convert offset from bytes to units of 32 bytes */
2445 offset = offset >> 5;
2446
2447 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
2448 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
2449
2450 /* Offset is in */
2451 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
2452 MVPP2_RXQ_PACKET_OFFSET_MASK);
2453
2454 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
2455 }
2456
2457 /* Tx descriptors helper methods */
2458
2459 /* Get pointer to next Tx descriptor to be processed (send) by HW */
2460 static struct mvpp2_tx_desc *
mvpp2_txq_next_desc_get(struct mvpp2_tx_queue * txq)2461 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
2462 {
2463 int tx_desc = txq->next_desc_to_proc;
2464
2465 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
2466 return txq->descs + tx_desc;
2467 }
2468
2469 /* Update HW with number of aggregated Tx descriptors to be sent
2470 *
2471 * Called only from mvpp2_tx(), so migration is disabled, using
2472 * smp_processor_id() is OK.
2473 */
mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port * port,int pending)2474 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
2475 {
2476 /* aggregated access - relevant TXQ number is written in TX desc */
2477 mvpp2_thread_write(port->priv,
2478 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2479 MVPP2_AGGR_TXQ_UPDATE_REG, pending);
2480 }
2481
2482 /* Check if there are enough free descriptors in aggregated txq.
2483 * If not, update the number of occupied descriptors and repeat the check.
2484 *
2485 * Called only from mvpp2_tx(), so migration is disabled, using
2486 * smp_processor_id() is OK.
2487 */
mvpp2_aggr_desc_num_check(struct mvpp2_port * port,struct mvpp2_tx_queue * aggr_txq,int num)2488 static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port,
2489 struct mvpp2_tx_queue *aggr_txq, int num)
2490 {
2491 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) {
2492 /* Update number of occupied aggregated Tx descriptors */
2493 unsigned int thread =
2494 mvpp2_cpu_to_thread(port->priv, smp_processor_id());
2495 u32 val = mvpp2_read_relaxed(port->priv,
2496 MVPP2_AGGR_TXQ_STATUS_REG(thread));
2497
2498 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
2499
2500 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE)
2501 return -ENOMEM;
2502 }
2503 return 0;
2504 }
2505
2506 /* Reserved Tx descriptors allocation request
2507 *
2508 * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
2509 * only by mvpp2_tx(), so migration is disabled, using
2510 * smp_processor_id() is OK.
2511 */
mvpp2_txq_alloc_reserved_desc(struct mvpp2_port * port,struct mvpp2_tx_queue * txq,int num)2512 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port,
2513 struct mvpp2_tx_queue *txq, int num)
2514 {
2515 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
2516 struct mvpp2 *priv = port->priv;
2517 u32 val;
2518
2519 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
2520 mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val);
2521
2522 val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG);
2523
2524 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
2525 }
2526
2527 /* Check if there are enough reserved descriptors for transmission.
2528 * If not, request chunk of reserved descriptors and check again.
2529 */
mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port * port,struct mvpp2_tx_queue * txq,struct mvpp2_txq_pcpu * txq_pcpu,int num)2530 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port,
2531 struct mvpp2_tx_queue *txq,
2532 struct mvpp2_txq_pcpu *txq_pcpu,
2533 int num)
2534 {
2535 int req, desc_count;
2536 unsigned int thread;
2537
2538 if (txq_pcpu->reserved_num >= num)
2539 return 0;
2540
2541 /* Not enough descriptors reserved! Update the reserved descriptor
2542 * count and check again.
2543 */
2544
2545 desc_count = 0;
2546 /* Compute total of used descriptors */
2547 for (thread = 0; thread < port->priv->nthreads; thread++) {
2548 struct mvpp2_txq_pcpu *txq_pcpu_aux;
2549
2550 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread);
2551 desc_count += txq_pcpu_aux->count;
2552 desc_count += txq_pcpu_aux->reserved_num;
2553 }
2554
2555 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
2556 desc_count += req;
2557
2558 if (desc_count >
2559 (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK)))
2560 return -ENOMEM;
2561
2562 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req);
2563
2564 /* OK, the descriptor could have been updated: check again. */
2565 if (txq_pcpu->reserved_num < num)
2566 return -ENOMEM;
2567 return 0;
2568 }
2569
2570 /* Release the last allocated Tx descriptor. Useful to handle DMA
2571 * mapping failures in the Tx path.
2572 */
mvpp2_txq_desc_put(struct mvpp2_tx_queue * txq)2573 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
2574 {
2575 if (txq->next_desc_to_proc == 0)
2576 txq->next_desc_to_proc = txq->last_desc - 1;
2577 else
2578 txq->next_desc_to_proc--;
2579 }
2580
2581 /* Set Tx descriptors fields relevant for CSUM calculation */
mvpp2_txq_desc_csum(int l3_offs,__be16 l3_proto,int ip_hdr_len,int l4_proto)2582 static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
2583 int ip_hdr_len, int l4_proto)
2584 {
2585 u32 command;
2586
2587 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
2588 * G_L4_chk, L4_type required only for checksum calculation
2589 */
2590 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
2591 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
2592 command |= MVPP2_TXD_IP_CSUM_DISABLE;
2593
2594 if (l3_proto == htons(ETH_P_IP)) {
2595 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
2596 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
2597 } else {
2598 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
2599 }
2600
2601 if (l4_proto == IPPROTO_TCP) {
2602 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
2603 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
2604 } else if (l4_proto == IPPROTO_UDP) {
2605 command |= MVPP2_TXD_L4_UDP; /* enable UDP */
2606 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
2607 } else {
2608 command |= MVPP2_TXD_L4_CSUM_NOT;
2609 }
2610
2611 return command;
2612 }
2613
2614 /* Get number of sent descriptors and decrement counter.
2615 * The number of sent descriptors is returned.
2616 * Per-thread access
2617 *
2618 * Called only from mvpp2_txq_done(), called from mvpp2_tx()
2619 * (migration disabled) and from the TX completion tasklet (migration
2620 * disabled) so using smp_processor_id() is OK.
2621 */
mvpp2_txq_sent_desc_proc(struct mvpp2_port * port,struct mvpp2_tx_queue * txq)2622 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
2623 struct mvpp2_tx_queue *txq)
2624 {
2625 u32 val;
2626
2627 /* Reading status reg resets transmitted descriptor counter */
2628 val = mvpp2_thread_read_relaxed(port->priv,
2629 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2630 MVPP2_TXQ_SENT_REG(txq->id));
2631
2632 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
2633 MVPP2_TRANSMITTED_COUNT_OFFSET;
2634 }
2635
2636 /* Called through on_each_cpu(), so runs on all CPUs, with migration
2637 * disabled, therefore using smp_processor_id() is OK.
2638 */
mvpp2_txq_sent_counter_clear(void * arg)2639 static void mvpp2_txq_sent_counter_clear(void *arg)
2640 {
2641 struct mvpp2_port *port = arg;
2642 int queue;
2643
2644 /* If the thread isn't used, don't do anything */
2645 if (smp_processor_id() >= port->priv->nthreads)
2646 return;
2647
2648 for (queue = 0; queue < port->ntxqs; queue++) {
2649 int id = port->txqs[queue]->id;
2650
2651 mvpp2_thread_read(port->priv,
2652 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2653 MVPP2_TXQ_SENT_REG(id));
2654 }
2655 }
2656
2657 /* Set max sizes for Tx queues */
mvpp2_txp_max_tx_size_set(struct mvpp2_port * port)2658 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
2659 {
2660 u32 val, size, mtu;
2661 int txq, tx_port_num;
2662
2663 mtu = port->pkt_size * 8;
2664 if (mtu > MVPP2_TXP_MTU_MAX)
2665 mtu = MVPP2_TXP_MTU_MAX;
2666
2667 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
2668 mtu = 3 * mtu;
2669
2670 /* Indirect access to registers */
2671 tx_port_num = mvpp2_egress_port(port);
2672 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2673
2674 /* Set MTU */
2675 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
2676 val &= ~MVPP2_TXP_MTU_MAX;
2677 val |= mtu;
2678 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
2679
2680 /* TXP token size and all TXQs token size must be larger that MTU */
2681 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
2682 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
2683 if (size < mtu) {
2684 size = mtu;
2685 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
2686 val |= size;
2687 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2688 }
2689
2690 for (txq = 0; txq < port->ntxqs; txq++) {
2691 val = mvpp2_read(port->priv,
2692 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
2693 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
2694
2695 if (size < mtu) {
2696 size = mtu;
2697 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
2698 val |= size;
2699 mvpp2_write(port->priv,
2700 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
2701 val);
2702 }
2703 }
2704 }
2705
2706 /* Set the number of non-occupied descriptors threshold */
mvpp2_set_rxq_free_tresh(struct mvpp2_port * port,struct mvpp2_rx_queue * rxq)2707 static void mvpp2_set_rxq_free_tresh(struct mvpp2_port *port,
2708 struct mvpp2_rx_queue *rxq)
2709 {
2710 u32 val;
2711
2712 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
2713
2714 val = mvpp2_read(port->priv, MVPP2_RXQ_THRESH_REG);
2715 val &= ~MVPP2_RXQ_NON_OCCUPIED_MASK;
2716 val |= MSS_THRESHOLD_STOP << MVPP2_RXQ_NON_OCCUPIED_OFFSET;
2717 mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val);
2718 }
2719
2720 /* Set the number of packets that will be received before Rx interrupt
2721 * will be generated by HW.
2722 */
mvpp2_rx_pkts_coal_set(struct mvpp2_port * port,struct mvpp2_rx_queue * rxq)2723 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
2724 struct mvpp2_rx_queue *rxq)
2725 {
2726 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2727
2728 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
2729 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
2730
2731 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2732 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG,
2733 rxq->pkts_coal);
2734
2735 put_cpu();
2736 }
2737
2738 /* For some reason in the LSP this is done on each CPU. Why ? */
mvpp2_tx_pkts_coal_set(struct mvpp2_port * port,struct mvpp2_tx_queue * txq)2739 static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
2740 struct mvpp2_tx_queue *txq)
2741 {
2742 unsigned int thread;
2743 u32 val;
2744
2745 if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
2746 txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
2747
2748 val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
2749 /* PKT-coalescing registers are per-queue + per-thread */
2750 for (thread = 0; thread < MVPP2_MAX_THREADS; thread++) {
2751 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2752 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
2753 }
2754 }
2755
mvpp2_usec_to_cycles(u32 usec,unsigned long clk_hz)2756 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
2757 {
2758 u64 tmp = (u64)clk_hz * usec;
2759
2760 do_div(tmp, USEC_PER_SEC);
2761
2762 return tmp > U32_MAX ? U32_MAX : tmp;
2763 }
2764
mvpp2_cycles_to_usec(u32 cycles,unsigned long clk_hz)2765 static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
2766 {
2767 u64 tmp = (u64)cycles * USEC_PER_SEC;
2768
2769 do_div(tmp, clk_hz);
2770
2771 return tmp > U32_MAX ? U32_MAX : tmp;
2772 }
2773
2774 /* Set the time delay in usec before Rx interrupt */
mvpp2_rx_time_coal_set(struct mvpp2_port * port,struct mvpp2_rx_queue * rxq)2775 static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
2776 struct mvpp2_rx_queue *rxq)
2777 {
2778 unsigned long freq = port->priv->tclk;
2779 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
2780
2781 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
2782 rxq->time_coal =
2783 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
2784
2785 /* re-evaluate to get actual register value */
2786 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
2787 }
2788
2789 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
2790 }
2791
mvpp2_tx_time_coal_set(struct mvpp2_port * port)2792 static void mvpp2_tx_time_coal_set(struct mvpp2_port *port)
2793 {
2794 unsigned long freq = port->priv->tclk;
2795 u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
2796
2797 if (val > MVPP2_MAX_ISR_TX_THRESHOLD) {
2798 port->tx_time_coal =
2799 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq);
2800
2801 /* re-evaluate to get actual register value */
2802 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
2803 }
2804
2805 mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val);
2806 }
2807
2808 /* Free Tx queue skbuffs */
mvpp2_txq_bufs_free(struct mvpp2_port * port,struct mvpp2_tx_queue * txq,struct mvpp2_txq_pcpu * txq_pcpu,int num)2809 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
2810 struct mvpp2_tx_queue *txq,
2811 struct mvpp2_txq_pcpu *txq_pcpu, int num)
2812 {
2813 struct xdp_frame_bulk bq;
2814 int i;
2815
2816 xdp_frame_bulk_init(&bq);
2817
2818 rcu_read_lock(); /* need for xdp_return_frame_bulk */
2819
2820 for (i = 0; i < num; i++) {
2821 struct mvpp2_txq_pcpu_buf *tx_buf =
2822 txq_pcpu->buffs + txq_pcpu->txq_get_index;
2823
2824 if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma) &&
2825 tx_buf->type != MVPP2_TYPE_XDP_TX)
2826 dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
2827 tx_buf->size, DMA_TO_DEVICE);
2828 if (tx_buf->type == MVPP2_TYPE_SKB && tx_buf->skb)
2829 dev_kfree_skb_any(tx_buf->skb);
2830 else if (tx_buf->type == MVPP2_TYPE_XDP_TX ||
2831 tx_buf->type == MVPP2_TYPE_XDP_NDO)
2832 xdp_return_frame_bulk(tx_buf->xdpf, &bq);
2833
2834 mvpp2_txq_inc_get(txq_pcpu);
2835 }
2836 xdp_flush_frame_bulk(&bq);
2837
2838 rcu_read_unlock();
2839 }
2840
mvpp2_get_rx_queue(struct mvpp2_port * port,u32 cause)2841 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
2842 u32 cause)
2843 {
2844 int queue = fls(cause) - 1;
2845
2846 return port->rxqs[queue];
2847 }
2848
mvpp2_get_tx_queue(struct mvpp2_port * port,u32 cause)2849 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
2850 u32 cause)
2851 {
2852 int queue = fls(cause) - 1;
2853
2854 return port->txqs[queue];
2855 }
2856
2857 /* Handle end of transmission */
mvpp2_txq_done(struct mvpp2_port * port,struct mvpp2_tx_queue * txq,struct mvpp2_txq_pcpu * txq_pcpu)2858 static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
2859 struct mvpp2_txq_pcpu *txq_pcpu)
2860 {
2861 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
2862 int tx_done;
2863
2864 if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id()))
2865 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
2866
2867 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
2868 if (!tx_done)
2869 return;
2870 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
2871
2872 txq_pcpu->count -= tx_done;
2873
2874 if (netif_tx_queue_stopped(nq))
2875 if (txq_pcpu->count <= txq_pcpu->wake_threshold)
2876 netif_tx_wake_queue(nq);
2877 }
2878
mvpp2_tx_done(struct mvpp2_port * port,u32 cause,unsigned int thread)2879 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
2880 unsigned int thread)
2881 {
2882 struct mvpp2_tx_queue *txq;
2883 struct mvpp2_txq_pcpu *txq_pcpu;
2884 unsigned int tx_todo = 0;
2885
2886 while (cause) {
2887 txq = mvpp2_get_tx_queue(port, cause);
2888 if (!txq)
2889 break;
2890
2891 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2892
2893 if (txq_pcpu->count) {
2894 mvpp2_txq_done(port, txq, txq_pcpu);
2895 tx_todo += txq_pcpu->count;
2896 }
2897
2898 cause &= ~(1 << txq->log_id);
2899 }
2900 return tx_todo;
2901 }
2902
2903 /* Rx/Tx queue initialization/cleanup methods */
2904
2905 /* Allocate and initialize descriptors for aggr TXQ */
mvpp2_aggr_txq_init(struct platform_device * pdev,struct mvpp2_tx_queue * aggr_txq,unsigned int thread,struct mvpp2 * priv)2906 static int mvpp2_aggr_txq_init(struct platform_device *pdev,
2907 struct mvpp2_tx_queue *aggr_txq,
2908 unsigned int thread, struct mvpp2 *priv)
2909 {
2910 u32 txq_dma;
2911
2912 /* Allocate memory for TX descriptors */
2913 aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
2914 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
2915 &aggr_txq->descs_dma, GFP_KERNEL);
2916 if (!aggr_txq->descs)
2917 return -ENOMEM;
2918
2919 aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1;
2920
2921 /* Aggr TXQ no reset WA */
2922 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
2923 MVPP2_AGGR_TXQ_INDEX_REG(thread));
2924
2925 /* Set Tx descriptors queue starting address indirect
2926 * access
2927 */
2928 if (priv->hw_version == MVPP21)
2929 txq_dma = aggr_txq->descs_dma;
2930 else
2931 txq_dma = aggr_txq->descs_dma >>
2932 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
2933
2934 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma);
2935 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread),
2936 MVPP2_AGGR_TXQ_SIZE);
2937
2938 return 0;
2939 }
2940
2941 /* Create a specified Rx queue */
mvpp2_rxq_init(struct mvpp2_port * port,struct mvpp2_rx_queue * rxq)2942 static int mvpp2_rxq_init(struct mvpp2_port *port,
2943 struct mvpp2_rx_queue *rxq)
2944 {
2945 struct mvpp2 *priv = port->priv;
2946 unsigned int thread;
2947 u32 rxq_dma;
2948 int err;
2949
2950 rxq->size = port->rx_ring_size;
2951
2952 /* Allocate memory for RX descriptors */
2953 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
2954 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
2955 &rxq->descs_dma, GFP_KERNEL);
2956 if (!rxq->descs)
2957 return -ENOMEM;
2958
2959 rxq->last_desc = rxq->size - 1;
2960
2961 /* Zero occupied and non-occupied counters - direct access */
2962 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2963
2964 /* Set Rx descriptors queue starting address - indirect access */
2965 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2966 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2967 if (port->priv->hw_version == MVPP21)
2968 rxq_dma = rxq->descs_dma;
2969 else
2970 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
2971 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
2972 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
2973 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0);
2974 put_cpu();
2975
2976 /* Set Offset */
2977 mvpp2_rxq_offset_set(port, rxq->id, MVPP2_SKB_HEADROOM);
2978
2979 /* Set coalescing pkts and time */
2980 mvpp2_rx_pkts_coal_set(port, rxq);
2981 mvpp2_rx_time_coal_set(port, rxq);
2982
2983 /* Set the number of non occupied descriptors threshold */
2984 mvpp2_set_rxq_free_tresh(port, rxq);
2985
2986 /* Add number of descriptors ready for receiving packets */
2987 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
2988
2989 if (priv->percpu_pools) {
2990 err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->logic_rxq, 0);
2991 if (err < 0)
2992 goto err_free_dma;
2993
2994 err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->logic_rxq, 0);
2995 if (err < 0)
2996 goto err_unregister_rxq_short;
2997
2998 /* Every RXQ has a pool for short and another for long packets */
2999 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_short,
3000 MEM_TYPE_PAGE_POOL,
3001 priv->page_pool[rxq->logic_rxq]);
3002 if (err < 0)
3003 goto err_unregister_rxq_long;
3004
3005 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_long,
3006 MEM_TYPE_PAGE_POOL,
3007 priv->page_pool[rxq->logic_rxq +
3008 port->nrxqs]);
3009 if (err < 0)
3010 goto err_unregister_mem_rxq_short;
3011 }
3012
3013 return 0;
3014
3015 err_unregister_mem_rxq_short:
3016 xdp_rxq_info_unreg_mem_model(&rxq->xdp_rxq_short);
3017 err_unregister_rxq_long:
3018 xdp_rxq_info_unreg(&rxq->xdp_rxq_long);
3019 err_unregister_rxq_short:
3020 xdp_rxq_info_unreg(&rxq->xdp_rxq_short);
3021 err_free_dma:
3022 dma_free_coherent(port->dev->dev.parent,
3023 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
3024 rxq->descs, rxq->descs_dma);
3025 return err;
3026 }
3027
3028 /* Push packets received by the RXQ to BM pool */
mvpp2_rxq_drop_pkts(struct mvpp2_port * port,struct mvpp2_rx_queue * rxq)3029 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
3030 struct mvpp2_rx_queue *rxq)
3031 {
3032 int rx_received, i;
3033
3034 rx_received = mvpp2_rxq_received(port, rxq->id);
3035 if (!rx_received)
3036 return;
3037
3038 for (i = 0; i < rx_received; i++) {
3039 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
3040 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
3041 int pool;
3042
3043 pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
3044 MVPP2_RXD_BM_POOL_ID_OFFS;
3045
3046 mvpp2_bm_pool_put(port, pool,
3047 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
3048 mvpp2_rxdesc_cookie_get(port, rx_desc));
3049 }
3050 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
3051 }
3052
3053 /* Cleanup Rx queue */
mvpp2_rxq_deinit(struct mvpp2_port * port,struct mvpp2_rx_queue * rxq)3054 static void mvpp2_rxq_deinit(struct mvpp2_port *port,
3055 struct mvpp2_rx_queue *rxq)
3056 {
3057 unsigned int thread;
3058
3059 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_short))
3060 xdp_rxq_info_unreg(&rxq->xdp_rxq_short);
3061
3062 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_long))
3063 xdp_rxq_info_unreg(&rxq->xdp_rxq_long);
3064
3065 mvpp2_rxq_drop_pkts(port, rxq);
3066
3067 if (rxq->descs)
3068 dma_free_coherent(port->dev->dev.parent,
3069 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
3070 rxq->descs,
3071 rxq->descs_dma);
3072
3073 rxq->descs = NULL;
3074 rxq->last_desc = 0;
3075 rxq->next_desc_to_proc = 0;
3076 rxq->descs_dma = 0;
3077
3078 /* Clear Rx descriptors queue starting address and size;
3079 * free descriptor number
3080 */
3081 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
3082 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
3083 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
3084 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0);
3085 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0);
3086 put_cpu();
3087 }
3088
3089 /* Create and initialize a Tx queue */
mvpp2_txq_init(struct mvpp2_port * port,struct mvpp2_tx_queue * txq)3090 static int mvpp2_txq_init(struct mvpp2_port *port,
3091 struct mvpp2_tx_queue *txq)
3092 {
3093 u32 val;
3094 unsigned int thread;
3095 int desc, desc_per_txq, tx_port_num;
3096 struct mvpp2_txq_pcpu *txq_pcpu;
3097
3098 txq->size = port->tx_ring_size;
3099
3100 /* Allocate memory for Tx descriptors */
3101 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
3102 txq->size * MVPP2_DESC_ALIGNED_SIZE,
3103 &txq->descs_dma, GFP_KERNEL);
3104 if (!txq->descs)
3105 return -ENOMEM;
3106
3107 txq->last_desc = txq->size - 1;
3108
3109 /* Set Tx descriptors queue starting address - indirect access */
3110 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
3111 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
3112 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG,
3113 txq->descs_dma);
3114 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG,
3115 txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
3116 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0);
3117 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG,
3118 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
3119 val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG);
3120 val &= ~MVPP2_TXQ_PENDING_MASK;
3121 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val);
3122
3123 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
3124 * for each existing TXQ.
3125 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
3126 * GBE ports assumed to be continuous from 0 to MVPP2_MAX_PORTS
3127 */
3128 desc_per_txq = 16;
3129 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
3130 (txq->log_id * desc_per_txq);
3131
3132 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG,
3133 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
3134 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
3135 put_cpu();
3136
3137 /* WRR / EJP configuration - indirect access */
3138 tx_port_num = mvpp2_egress_port(port);
3139 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3140
3141 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
3142 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
3143 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
3144 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
3145 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
3146
3147 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
3148 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
3149 val);
3150
3151 for (thread = 0; thread < port->priv->nthreads; thread++) {
3152 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3153 txq_pcpu->size = txq->size;
3154 txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
3155 sizeof(*txq_pcpu->buffs),
3156 GFP_KERNEL);
3157 if (!txq_pcpu->buffs)
3158 return -ENOMEM;
3159
3160 txq_pcpu->count = 0;
3161 txq_pcpu->reserved_num = 0;
3162 txq_pcpu->txq_put_index = 0;
3163 txq_pcpu->txq_get_index = 0;
3164 txq_pcpu->tso_headers = NULL;
3165
3166 txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS;
3167 txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2;
3168
3169 txq_pcpu->tso_headers =
3170 dma_alloc_coherent(port->dev->dev.parent,
3171 txq_pcpu->size * TSO_HEADER_SIZE,
3172 &txq_pcpu->tso_headers_dma,
3173 GFP_KERNEL);
3174 if (!txq_pcpu->tso_headers)
3175 return -ENOMEM;
3176 }
3177
3178 return 0;
3179 }
3180
3181 /* Free allocated TXQ resources */
mvpp2_txq_deinit(struct mvpp2_port * port,struct mvpp2_tx_queue * txq)3182 static void mvpp2_txq_deinit(struct mvpp2_port *port,
3183 struct mvpp2_tx_queue *txq)
3184 {
3185 struct mvpp2_txq_pcpu *txq_pcpu;
3186 unsigned int thread;
3187
3188 for (thread = 0; thread < port->priv->nthreads; thread++) {
3189 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3190 kfree(txq_pcpu->buffs);
3191
3192 if (txq_pcpu->tso_headers)
3193 dma_free_coherent(port->dev->dev.parent,
3194 txq_pcpu->size * TSO_HEADER_SIZE,
3195 txq_pcpu->tso_headers,
3196 txq_pcpu->tso_headers_dma);
3197
3198 txq_pcpu->tso_headers = NULL;
3199 }
3200
3201 if (txq->descs)
3202 dma_free_coherent(port->dev->dev.parent,
3203 txq->size * MVPP2_DESC_ALIGNED_SIZE,
3204 txq->descs, txq->descs_dma);
3205
3206 txq->descs = NULL;
3207 txq->last_desc = 0;
3208 txq->next_desc_to_proc = 0;
3209 txq->descs_dma = 0;
3210
3211 /* Set minimum bandwidth for disabled TXQs */
3212 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
3213
3214 /* Set Tx descriptors queue starting address and size */
3215 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
3216 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
3217 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0);
3218 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0);
3219 put_cpu();
3220 }
3221
3222 /* Cleanup Tx ports */
mvpp2_txq_clean(struct mvpp2_port * port,struct mvpp2_tx_queue * txq)3223 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
3224 {
3225 struct mvpp2_txq_pcpu *txq_pcpu;
3226 int delay, pending;
3227 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
3228 u32 val;
3229
3230 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
3231 val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG);
3232 val |= MVPP2_TXQ_DRAIN_EN_MASK;
3233 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
3234
3235 /* The napi queue has been stopped so wait for all packets
3236 * to be transmitted.
3237 */
3238 delay = 0;
3239 do {
3240 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
3241 netdev_warn(port->dev,
3242 "port %d: cleaning queue %d timed out\n",
3243 port->id, txq->log_id);
3244 break;
3245 }
3246 mdelay(1);
3247 delay++;
3248
3249 pending = mvpp2_thread_read(port->priv, thread,
3250 MVPP2_TXQ_PENDING_REG);
3251 pending &= MVPP2_TXQ_PENDING_MASK;
3252 } while (pending);
3253
3254 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
3255 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
3256 put_cpu();
3257
3258 for (thread = 0; thread < port->priv->nthreads; thread++) {
3259 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3260
3261 /* Release all packets */
3262 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
3263
3264 /* Reset queue */
3265 txq_pcpu->count = 0;
3266 txq_pcpu->txq_put_index = 0;
3267 txq_pcpu->txq_get_index = 0;
3268 }
3269 }
3270
3271 /* Cleanup all Tx queues */
mvpp2_cleanup_txqs(struct mvpp2_port * port)3272 static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
3273 {
3274 struct mvpp2_tx_queue *txq;
3275 int queue;
3276 u32 val;
3277
3278 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
3279
3280 /* Reset Tx ports and delete Tx queues */
3281 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
3282 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
3283
3284 for (queue = 0; queue < port->ntxqs; queue++) {
3285 txq = port->txqs[queue];
3286 mvpp2_txq_clean(port, txq);
3287 mvpp2_txq_deinit(port, txq);
3288 }
3289
3290 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
3291
3292 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
3293 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
3294 }
3295
3296 /* Cleanup all Rx queues */
mvpp2_cleanup_rxqs(struct mvpp2_port * port)3297 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
3298 {
3299 int queue;
3300
3301 for (queue = 0; queue < port->nrxqs; queue++)
3302 mvpp2_rxq_deinit(port, port->rxqs[queue]);
3303
3304 if (port->tx_fc)
3305 mvpp2_rxq_disable_fc(port);
3306 }
3307
3308 /* Init all Rx queues for port */
mvpp2_setup_rxqs(struct mvpp2_port * port)3309 static int mvpp2_setup_rxqs(struct mvpp2_port *port)
3310 {
3311 int queue, err;
3312
3313 for (queue = 0; queue < port->nrxqs; queue++) {
3314 err = mvpp2_rxq_init(port, port->rxqs[queue]);
3315 if (err)
3316 goto err_cleanup;
3317 }
3318
3319 if (port->tx_fc)
3320 mvpp2_rxq_enable_fc(port);
3321
3322 return 0;
3323
3324 err_cleanup:
3325 mvpp2_cleanup_rxqs(port);
3326 return err;
3327 }
3328
3329 /* Init all tx queues for port */
mvpp2_setup_txqs(struct mvpp2_port * port)3330 static int mvpp2_setup_txqs(struct mvpp2_port *port)
3331 {
3332 struct mvpp2_tx_queue *txq;
3333 int queue, err;
3334
3335 for (queue = 0; queue < port->ntxqs; queue++) {
3336 txq = port->txqs[queue];
3337 err = mvpp2_txq_init(port, txq);
3338 if (err)
3339 goto err_cleanup;
3340
3341 /* Assign this queue to a CPU */
3342 if (queue < num_possible_cpus())
3343 netif_set_xps_queue(port->dev, cpumask_of(queue), queue);
3344 }
3345
3346 if (port->has_tx_irqs) {
3347 mvpp2_tx_time_coal_set(port);
3348 for (queue = 0; queue < port->ntxqs; queue++) {
3349 txq = port->txqs[queue];
3350 mvpp2_tx_pkts_coal_set(port, txq);
3351 }
3352 }
3353
3354 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
3355 return 0;
3356
3357 err_cleanup:
3358 mvpp2_cleanup_txqs(port);
3359 return err;
3360 }
3361
3362 /* The callback for per-port interrupt */
mvpp2_isr(int irq,void * dev_id)3363 static irqreturn_t mvpp2_isr(int irq, void *dev_id)
3364 {
3365 struct mvpp2_queue_vector *qv = dev_id;
3366
3367 mvpp2_qvec_interrupt_disable(qv);
3368
3369 napi_schedule(&qv->napi);
3370
3371 return IRQ_HANDLED;
3372 }
3373
mvpp2_isr_handle_ptp_queue(struct mvpp2_port * port,int nq)3374 static void mvpp2_isr_handle_ptp_queue(struct mvpp2_port *port, int nq)
3375 {
3376 struct skb_shared_hwtstamps shhwtstamps;
3377 struct mvpp2_hwtstamp_queue *queue;
3378 struct sk_buff *skb;
3379 void __iomem *ptp_q;
3380 unsigned int id;
3381 u32 r0, r1, r2;
3382
3383 ptp_q = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
3384 if (nq)
3385 ptp_q += MVPP22_PTP_TX_Q1_R0 - MVPP22_PTP_TX_Q0_R0;
3386
3387 queue = &port->tx_hwtstamp_queue[nq];
3388
3389 while (1) {
3390 r0 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R0) & 0xffff;
3391 if (!r0)
3392 break;
3393
3394 r1 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R1) & 0xffff;
3395 r2 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R2) & 0xffff;
3396
3397 id = (r0 >> 1) & 31;
3398
3399 skb = queue->skb[id];
3400 queue->skb[id] = NULL;
3401 if (skb) {
3402 u32 ts = r2 << 19 | r1 << 3 | r0 >> 13;
3403
3404 mvpp22_tai_tstamp(port->priv->tai, ts, &shhwtstamps);
3405 skb_tstamp_tx(skb, &shhwtstamps);
3406 dev_kfree_skb_any(skb);
3407 }
3408 }
3409 }
3410
mvpp2_isr_handle_ptp(struct mvpp2_port * port)3411 static void mvpp2_isr_handle_ptp(struct mvpp2_port *port)
3412 {
3413 void __iomem *ptp;
3414 u32 val;
3415
3416 ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
3417 val = readl(ptp + MVPP22_PTP_INT_CAUSE);
3418 if (val & MVPP22_PTP_INT_CAUSE_QUEUE0)
3419 mvpp2_isr_handle_ptp_queue(port, 0);
3420 if (val & MVPP22_PTP_INT_CAUSE_QUEUE1)
3421 mvpp2_isr_handle_ptp_queue(port, 1);
3422 }
3423
mvpp2_isr_handle_link(struct mvpp2_port * port,struct phylink_pcs * pcs,bool link)3424 static void mvpp2_isr_handle_link(struct mvpp2_port *port,
3425 struct phylink_pcs *pcs, bool link)
3426 {
3427 struct net_device *dev = port->dev;
3428
3429 if (port->phylink) {
3430 phylink_pcs_change(pcs, link);
3431 return;
3432 }
3433
3434 if (!netif_running(dev))
3435 return;
3436
3437 if (link) {
3438 mvpp2_interrupts_enable(port);
3439
3440 mvpp2_egress_enable(port);
3441 mvpp2_ingress_enable(port);
3442 netif_carrier_on(dev);
3443 netif_tx_wake_all_queues(dev);
3444 } else {
3445 netif_tx_stop_all_queues(dev);
3446 netif_carrier_off(dev);
3447 mvpp2_ingress_disable(port);
3448 mvpp2_egress_disable(port);
3449
3450 mvpp2_interrupts_disable(port);
3451 }
3452 }
3453
mvpp2_isr_handle_xlg(struct mvpp2_port * port)3454 static void mvpp2_isr_handle_xlg(struct mvpp2_port *port)
3455 {
3456 bool link;
3457 u32 val;
3458
3459 val = readl(port->base + MVPP22_XLG_INT_STAT);
3460 if (val & MVPP22_XLG_INT_STAT_LINK) {
3461 val = readl(port->base + MVPP22_XLG_STATUS);
3462 link = (val & MVPP22_XLG_STATUS_LINK_UP);
3463 mvpp2_isr_handle_link(port, &port->pcs_xlg, link);
3464 }
3465 }
3466
mvpp2_isr_handle_gmac_internal(struct mvpp2_port * port)3467 static void mvpp2_isr_handle_gmac_internal(struct mvpp2_port *port)
3468 {
3469 bool link;
3470 u32 val;
3471
3472 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
3473 phy_interface_mode_is_8023z(port->phy_interface) ||
3474 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
3475 val = readl(port->base + MVPP22_GMAC_INT_STAT);
3476 if (val & MVPP22_GMAC_INT_STAT_LINK) {
3477 val = readl(port->base + MVPP2_GMAC_STATUS0);
3478 link = (val & MVPP2_GMAC_STATUS0_LINK_UP);
3479 mvpp2_isr_handle_link(port, &port->pcs_gmac, link);
3480 }
3481 }
3482 }
3483
3484 /* Per-port interrupt for link status changes */
mvpp2_port_isr(int irq,void * dev_id)3485 static irqreturn_t mvpp2_port_isr(int irq, void *dev_id)
3486 {
3487 struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
3488 u32 val;
3489
3490 mvpp22_gop_mask_irq(port);
3491
3492 if (mvpp2_port_supports_xlg(port) &&
3493 mvpp2_is_xlg(port->phy_interface)) {
3494 /* Check the external status register */
3495 val = readl(port->base + MVPP22_XLG_EXT_INT_STAT);
3496 if (val & MVPP22_XLG_EXT_INT_STAT_XLG)
3497 mvpp2_isr_handle_xlg(port);
3498 if (val & MVPP22_XLG_EXT_INT_STAT_PTP)
3499 mvpp2_isr_handle_ptp(port);
3500 } else {
3501 /* If it's not the XLG, we must be using the GMAC.
3502 * Check the summary status.
3503 */
3504 val = readl(port->base + MVPP22_GMAC_INT_SUM_STAT);
3505 if (val & MVPP22_GMAC_INT_SUM_STAT_INTERNAL)
3506 mvpp2_isr_handle_gmac_internal(port);
3507 if (val & MVPP22_GMAC_INT_SUM_STAT_PTP)
3508 mvpp2_isr_handle_ptp(port);
3509 }
3510
3511 mvpp22_gop_unmask_irq(port);
3512 return IRQ_HANDLED;
3513 }
3514
mvpp2_hr_timer_cb(struct hrtimer * timer)3515 static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
3516 {
3517 struct net_device *dev;
3518 struct mvpp2_port *port;
3519 struct mvpp2_port_pcpu *port_pcpu;
3520 unsigned int tx_todo, cause;
3521
3522 port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer);
3523 dev = port_pcpu->dev;
3524
3525 if (!netif_running(dev))
3526 return HRTIMER_NORESTART;
3527
3528 port_pcpu->timer_scheduled = false;
3529 port = netdev_priv(dev);
3530
3531 /* Process all the Tx queues */
3532 cause = (1 << port->ntxqs) - 1;
3533 tx_todo = mvpp2_tx_done(port, cause,
3534 mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
3535
3536 /* Set the timer in case not all the packets were processed */
3537 if (tx_todo && !port_pcpu->timer_scheduled) {
3538 port_pcpu->timer_scheduled = true;
3539 hrtimer_forward_now(&port_pcpu->tx_done_timer,
3540 MVPP2_TXDONE_HRTIMER_PERIOD_NS);
3541
3542 return HRTIMER_RESTART;
3543 }
3544 return HRTIMER_NORESTART;
3545 }
3546
3547 /* Main RX/TX processing routines */
3548
3549 /* Display more error info */
mvpp2_rx_error(struct mvpp2_port * port,struct mvpp2_rx_desc * rx_desc)3550 static void mvpp2_rx_error(struct mvpp2_port *port,
3551 struct mvpp2_rx_desc *rx_desc)
3552 {
3553 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
3554 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
3555 char *err_str = NULL;
3556
3557 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
3558 case MVPP2_RXD_ERR_CRC:
3559 err_str = "crc";
3560 break;
3561 case MVPP2_RXD_ERR_OVERRUN:
3562 err_str = "overrun";
3563 break;
3564 case MVPP2_RXD_ERR_RESOURCE:
3565 err_str = "resource";
3566 break;
3567 }
3568 if (err_str && net_ratelimit())
3569 netdev_err(port->dev,
3570 "bad rx status %08x (%s error), size=%zu\n",
3571 status, err_str, sz);
3572 }
3573
3574 /* Handle RX checksum offload */
mvpp2_rx_csum(struct mvpp2_port * port,u32 status)3575 static int mvpp2_rx_csum(struct mvpp2_port *port, u32 status)
3576 {
3577 if (((status & MVPP2_RXD_L3_IP4) &&
3578 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
3579 (status & MVPP2_RXD_L3_IP6))
3580 if (((status & MVPP2_RXD_L4_UDP) ||
3581 (status & MVPP2_RXD_L4_TCP)) &&
3582 (status & MVPP2_RXD_L4_CSUM_OK))
3583 return CHECKSUM_UNNECESSARY;
3584
3585 return CHECKSUM_NONE;
3586 }
3587
3588 /* Allocate a new skb and add it to BM pool */
mvpp2_rx_refill(struct mvpp2_port * port,struct mvpp2_bm_pool * bm_pool,struct page_pool * page_pool,int pool)3589 static int mvpp2_rx_refill(struct mvpp2_port *port,
3590 struct mvpp2_bm_pool *bm_pool,
3591 struct page_pool *page_pool, int pool)
3592 {
3593 dma_addr_t dma_addr;
3594 phys_addr_t phys_addr;
3595 void *buf;
3596
3597 buf = mvpp2_buf_alloc(port, bm_pool, page_pool,
3598 &dma_addr, &phys_addr, GFP_ATOMIC);
3599 if (!buf)
3600 return -ENOMEM;
3601
3602 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
3603
3604 return 0;
3605 }
3606
3607 /* Handle tx checksum */
mvpp2_skb_tx_csum(struct mvpp2_port * port,struct sk_buff * skb)3608 static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
3609 {
3610 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3611 int ip_hdr_len = 0;
3612 u8 l4_proto;
3613 __be16 l3_proto = vlan_get_protocol(skb);
3614
3615 if (l3_proto == htons(ETH_P_IP)) {
3616 struct iphdr *ip4h = ip_hdr(skb);
3617
3618 /* Calculate IPv4 checksum and L4 checksum */
3619 ip_hdr_len = ip4h->ihl;
3620 l4_proto = ip4h->protocol;
3621 } else if (l3_proto == htons(ETH_P_IPV6)) {
3622 struct ipv6hdr *ip6h = ipv6_hdr(skb);
3623
3624 /* Read l4_protocol from one of IPv6 extra headers */
3625 if (skb_network_header_len(skb) > 0)
3626 ip_hdr_len = (skb_network_header_len(skb) >> 2);
3627 l4_proto = ip6h->nexthdr;
3628 } else {
3629 return MVPP2_TXD_L4_CSUM_NOT;
3630 }
3631
3632 return mvpp2_txq_desc_csum(skb_network_offset(skb),
3633 l3_proto, ip_hdr_len, l4_proto);
3634 }
3635
3636 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
3637 }
3638
mvpp2_xdp_finish_tx(struct mvpp2_port * port,u16 txq_id,int nxmit,int nxmit_byte)3639 static void mvpp2_xdp_finish_tx(struct mvpp2_port *port, u16 txq_id, int nxmit, int nxmit_byte)
3640 {
3641 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3642 struct mvpp2_tx_queue *aggr_txq;
3643 struct mvpp2_txq_pcpu *txq_pcpu;
3644 struct mvpp2_tx_queue *txq;
3645 struct netdev_queue *nq;
3646
3647 txq = port->txqs[txq_id];
3648 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3649 nq = netdev_get_tx_queue(port->dev, txq_id);
3650 aggr_txq = &port->priv->aggr_txqs[thread];
3651
3652 txq_pcpu->reserved_num -= nxmit;
3653 txq_pcpu->count += nxmit;
3654 aggr_txq->count += nxmit;
3655
3656 /* Enable transmit */
3657 wmb();
3658 mvpp2_aggr_txq_pend_desc_add(port, nxmit);
3659
3660 if (txq_pcpu->count >= txq_pcpu->stop_threshold)
3661 netif_tx_stop_queue(nq);
3662
3663 /* Finalize TX processing */
3664 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
3665 mvpp2_txq_done(port, txq, txq_pcpu);
3666 }
3667
3668 static int
mvpp2_xdp_submit_frame(struct mvpp2_port * port,u16 txq_id,struct xdp_frame * xdpf,bool dma_map)3669 mvpp2_xdp_submit_frame(struct mvpp2_port *port, u16 txq_id,
3670 struct xdp_frame *xdpf, bool dma_map)
3671 {
3672 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3673 u32 tx_cmd = MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE |
3674 MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
3675 enum mvpp2_tx_buf_type buf_type;
3676 struct mvpp2_txq_pcpu *txq_pcpu;
3677 struct mvpp2_tx_queue *aggr_txq;
3678 struct mvpp2_tx_desc *tx_desc;
3679 struct mvpp2_tx_queue *txq;
3680 int ret = MVPP2_XDP_TX;
3681 dma_addr_t dma_addr;
3682
3683 txq = port->txqs[txq_id];
3684 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3685 aggr_txq = &port->priv->aggr_txqs[thread];
3686
3687 /* Check number of available descriptors */
3688 if (mvpp2_aggr_desc_num_check(port, aggr_txq, 1) ||
3689 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, 1)) {
3690 ret = MVPP2_XDP_DROPPED;
3691 goto out;
3692 }
3693
3694 /* Get a descriptor for the first part of the packet */
3695 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3696 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3697 mvpp2_txdesc_size_set(port, tx_desc, xdpf->len);
3698
3699 if (dma_map) {
3700 /* XDP_REDIRECT or AF_XDP */
3701 dma_addr = dma_map_single(port->dev->dev.parent, xdpf->data,
3702 xdpf->len, DMA_TO_DEVICE);
3703
3704 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
3705 mvpp2_txq_desc_put(txq);
3706 ret = MVPP2_XDP_DROPPED;
3707 goto out;
3708 }
3709
3710 buf_type = MVPP2_TYPE_XDP_NDO;
3711 } else {
3712 /* XDP_TX */
3713 struct page *page = virt_to_page(xdpf->data);
3714
3715 dma_addr = page_pool_get_dma_addr(page) +
3716 sizeof(*xdpf) + xdpf->headroom;
3717 dma_sync_single_for_device(port->dev->dev.parent, dma_addr,
3718 xdpf->len, DMA_BIDIRECTIONAL);
3719
3720 buf_type = MVPP2_TYPE_XDP_TX;
3721 }
3722
3723 mvpp2_txdesc_dma_addr_set(port, tx_desc, dma_addr);
3724
3725 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
3726 mvpp2_txq_inc_put(port, txq_pcpu, xdpf, tx_desc, buf_type);
3727
3728 out:
3729 return ret;
3730 }
3731
3732 static int
mvpp2_xdp_xmit_back(struct mvpp2_port * port,struct xdp_buff * xdp)3733 mvpp2_xdp_xmit_back(struct mvpp2_port *port, struct xdp_buff *xdp)
3734 {
3735 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
3736 struct xdp_frame *xdpf;
3737 u16 txq_id;
3738 int ret;
3739
3740 xdpf = xdp_convert_buff_to_frame(xdp);
3741 if (unlikely(!xdpf))
3742 return MVPP2_XDP_DROPPED;
3743
3744 /* The first of the TX queues are used for XPS,
3745 * the second half for XDP_TX
3746 */
3747 txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2);
3748
3749 ret = mvpp2_xdp_submit_frame(port, txq_id, xdpf, false);
3750 if (ret == MVPP2_XDP_TX) {
3751 u64_stats_update_begin(&stats->syncp);
3752 stats->tx_bytes += xdpf->len;
3753 stats->tx_packets++;
3754 stats->xdp_tx++;
3755 u64_stats_update_end(&stats->syncp);
3756
3757 mvpp2_xdp_finish_tx(port, txq_id, 1, xdpf->len);
3758 } else {
3759 u64_stats_update_begin(&stats->syncp);
3760 stats->xdp_tx_err++;
3761 u64_stats_update_end(&stats->syncp);
3762 }
3763
3764 return ret;
3765 }
3766
3767 static int
mvpp2_xdp_xmit(struct net_device * dev,int num_frame,struct xdp_frame ** frames,u32 flags)3768 mvpp2_xdp_xmit(struct net_device *dev, int num_frame,
3769 struct xdp_frame **frames, u32 flags)
3770 {
3771 struct mvpp2_port *port = netdev_priv(dev);
3772 int i, nxmit_byte = 0, nxmit = 0;
3773 struct mvpp2_pcpu_stats *stats;
3774 u16 txq_id;
3775 u32 ret;
3776
3777 if (unlikely(test_bit(0, &port->state)))
3778 return -ENETDOWN;
3779
3780 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
3781 return -EINVAL;
3782
3783 /* The first of the TX queues are used for XPS,
3784 * the second half for XDP_TX
3785 */
3786 txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2);
3787
3788 for (i = 0; i < num_frame; i++) {
3789 ret = mvpp2_xdp_submit_frame(port, txq_id, frames[i], true);
3790 if (ret != MVPP2_XDP_TX)
3791 break;
3792
3793 nxmit_byte += frames[i]->len;
3794 nxmit++;
3795 }
3796
3797 if (likely(nxmit > 0))
3798 mvpp2_xdp_finish_tx(port, txq_id, nxmit, nxmit_byte);
3799
3800 stats = this_cpu_ptr(port->stats);
3801 u64_stats_update_begin(&stats->syncp);
3802 stats->tx_bytes += nxmit_byte;
3803 stats->tx_packets += nxmit;
3804 stats->xdp_xmit += nxmit;
3805 stats->xdp_xmit_err += num_frame - nxmit;
3806 u64_stats_update_end(&stats->syncp);
3807
3808 return nxmit;
3809 }
3810
3811 static int
mvpp2_run_xdp(struct mvpp2_port * port,struct bpf_prog * prog,struct xdp_buff * xdp,struct page_pool * pp,struct mvpp2_pcpu_stats * stats)3812 mvpp2_run_xdp(struct mvpp2_port *port, struct bpf_prog *prog,
3813 struct xdp_buff *xdp, struct page_pool *pp,
3814 struct mvpp2_pcpu_stats *stats)
3815 {
3816 unsigned int len, sync, err;
3817 struct page *page;
3818 u32 ret, act;
3819
3820 len = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM;
3821 act = bpf_prog_run_xdp(prog, xdp);
3822
3823 /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
3824 sync = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM;
3825 sync = max(sync, len);
3826
3827 switch (act) {
3828 case XDP_PASS:
3829 stats->xdp_pass++;
3830 ret = MVPP2_XDP_PASS;
3831 break;
3832 case XDP_REDIRECT:
3833 err = xdp_do_redirect(port->dev, xdp, prog);
3834 if (unlikely(err)) {
3835 ret = MVPP2_XDP_DROPPED;
3836 page = virt_to_head_page(xdp->data);
3837 page_pool_put_page(pp, page, sync, true);
3838 } else {
3839 ret = MVPP2_XDP_REDIR;
3840 stats->xdp_redirect++;
3841 }
3842 break;
3843 case XDP_TX:
3844 ret = mvpp2_xdp_xmit_back(port, xdp);
3845 if (ret != MVPP2_XDP_TX) {
3846 page = virt_to_head_page(xdp->data);
3847 page_pool_put_page(pp, page, sync, true);
3848 }
3849 break;
3850 default:
3851 bpf_warn_invalid_xdp_action(port->dev, prog, act);
3852 fallthrough;
3853 case XDP_ABORTED:
3854 trace_xdp_exception(port->dev, prog, act);
3855 fallthrough;
3856 case XDP_DROP:
3857 page = virt_to_head_page(xdp->data);
3858 page_pool_put_page(pp, page, sync, true);
3859 ret = MVPP2_XDP_DROPPED;
3860 stats->xdp_drop++;
3861 break;
3862 }
3863
3864 return ret;
3865 }
3866
mvpp2_buff_hdr_pool_put(struct mvpp2_port * port,struct mvpp2_rx_desc * rx_desc,int pool,u32 rx_status)3867 static void mvpp2_buff_hdr_pool_put(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc,
3868 int pool, u32 rx_status)
3869 {
3870 phys_addr_t phys_addr, phys_addr_next;
3871 dma_addr_t dma_addr, dma_addr_next;
3872 struct mvpp2_buff_hdr *buff_hdr;
3873
3874 phys_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
3875 dma_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
3876
3877 do {
3878 buff_hdr = (struct mvpp2_buff_hdr *)phys_to_virt(phys_addr);
3879
3880 phys_addr_next = le32_to_cpu(buff_hdr->next_phys_addr);
3881 dma_addr_next = le32_to_cpu(buff_hdr->next_dma_addr);
3882
3883 if (port->priv->hw_version >= MVPP22) {
3884 phys_addr_next |= ((u64)buff_hdr->next_phys_addr_high << 32);
3885 dma_addr_next |= ((u64)buff_hdr->next_dma_addr_high << 32);
3886 }
3887
3888 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
3889
3890 phys_addr = phys_addr_next;
3891 dma_addr = dma_addr_next;
3892
3893 } while (!MVPP2_B_HDR_INFO_IS_LAST(le16_to_cpu(buff_hdr->info)));
3894 }
3895
3896 /* Main rx processing */
mvpp2_rx(struct mvpp2_port * port,struct napi_struct * napi,int rx_todo,struct mvpp2_rx_queue * rxq)3897 static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
3898 int rx_todo, struct mvpp2_rx_queue *rxq)
3899 {
3900 struct net_device *dev = port->dev;
3901 struct mvpp2_pcpu_stats ps = {};
3902 enum dma_data_direction dma_dir;
3903 struct bpf_prog *xdp_prog;
3904 struct xdp_buff xdp;
3905 int rx_received;
3906 int rx_done = 0;
3907 u32 xdp_ret = 0;
3908
3909 xdp_prog = READ_ONCE(port->xdp_prog);
3910
3911 /* Get number of received packets and clamp the to-do */
3912 rx_received = mvpp2_rxq_received(port, rxq->id);
3913 if (rx_todo > rx_received)
3914 rx_todo = rx_received;
3915
3916 while (rx_done < rx_todo) {
3917 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
3918 u32 rx_status, timestamp, metasize = 0;
3919 struct mvpp2_bm_pool *bm_pool;
3920 struct page_pool *pp = NULL;
3921 struct sk_buff *skb;
3922 unsigned int frag_size;
3923 dma_addr_t dma_addr;
3924 phys_addr_t phys_addr;
3925 int pool, rx_bytes, err, ret;
3926 struct page *page;
3927 void *data;
3928
3929 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
3930 data = (void *)phys_to_virt(phys_addr);
3931 page = virt_to_page(data);
3932 prefetch(page);
3933
3934 rx_done++;
3935 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
3936 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
3937 rx_bytes -= MVPP2_MH_SIZE;
3938 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
3939
3940 pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
3941 MVPP2_RXD_BM_POOL_ID_OFFS;
3942 bm_pool = &port->priv->bm_pools[pool];
3943
3944 if (port->priv->percpu_pools) {
3945 pp = port->priv->page_pool[pool];
3946 dma_dir = page_pool_get_dma_dir(pp);
3947 } else {
3948 dma_dir = DMA_FROM_DEVICE;
3949 }
3950
3951 dma_sync_single_for_cpu(dev->dev.parent, dma_addr,
3952 rx_bytes + MVPP2_MH_SIZE,
3953 dma_dir);
3954
3955 /* Buffer header not supported */
3956 if (rx_status & MVPP2_RXD_BUF_HDR)
3957 goto err_drop_frame;
3958
3959 /* In case of an error, release the requested buffer pointer
3960 * to the Buffer Manager. This request process is controlled
3961 * by the hardware, and the information about the buffer is
3962 * comprised by the RX descriptor.
3963 */
3964 if (rx_status & MVPP2_RXD_ERR_SUMMARY)
3965 goto err_drop_frame;
3966
3967 /* Prefetch header */
3968 prefetch(data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
3969
3970 if (bm_pool->frag_size > PAGE_SIZE)
3971 frag_size = 0;
3972 else
3973 frag_size = bm_pool->frag_size;
3974
3975 if (xdp_prog) {
3976 struct xdp_rxq_info *xdp_rxq;
3977
3978 if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE)
3979 xdp_rxq = &rxq->xdp_rxq_short;
3980 else
3981 xdp_rxq = &rxq->xdp_rxq_long;
3982
3983 xdp_init_buff(&xdp, PAGE_SIZE, xdp_rxq);
3984 xdp_prepare_buff(&xdp, data,
3985 MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM,
3986 rx_bytes, true);
3987
3988 ret = mvpp2_run_xdp(port, xdp_prog, &xdp, pp, &ps);
3989
3990 if (ret) {
3991 xdp_ret |= ret;
3992 err = mvpp2_rx_refill(port, bm_pool, pp, pool);
3993 if (err) {
3994 netdev_err(port->dev, "failed to refill BM pools\n");
3995 goto err_drop_frame;
3996 }
3997
3998 ps.rx_packets++;
3999 ps.rx_bytes += rx_bytes;
4000 continue;
4001 }
4002
4003 metasize = xdp.data - xdp.data_meta;
4004 }
4005
4006 if (frag_size)
4007 skb = build_skb(data, frag_size);
4008 else
4009 skb = slab_build_skb(data);
4010 if (!skb) {
4011 netdev_warn(port->dev, "skb build failed\n");
4012 goto err_drop_frame;
4013 }
4014
4015 /* If we have RX hardware timestamping enabled, grab the
4016 * timestamp from the queue and convert.
4017 */
4018 if (mvpp22_rx_hwtstamping(port)) {
4019 timestamp = le32_to_cpu(rx_desc->pp22.timestamp);
4020 mvpp22_tai_tstamp(port->priv->tai, timestamp,
4021 skb_hwtstamps(skb));
4022 }
4023
4024 err = mvpp2_rx_refill(port, bm_pool, pp, pool);
4025 if (err) {
4026 netdev_err(port->dev, "failed to refill BM pools\n");
4027 dev_kfree_skb_any(skb);
4028 goto err_drop_frame;
4029 }
4030
4031 if (pp)
4032 skb_mark_for_recycle(skb);
4033 else
4034 dma_unmap_single_attrs(dev->dev.parent, dma_addr,
4035 bm_pool->buf_size, DMA_FROM_DEVICE,
4036 DMA_ATTR_SKIP_CPU_SYNC);
4037
4038 ps.rx_packets++;
4039 ps.rx_bytes += rx_bytes;
4040
4041 skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
4042 skb_put(skb, rx_bytes);
4043 if (metasize)
4044 skb_metadata_set(skb, metasize);
4045 skb->ip_summed = mvpp2_rx_csum(port, rx_status);
4046 skb->protocol = eth_type_trans(skb, dev);
4047
4048 napi_gro_receive(napi, skb);
4049 continue;
4050
4051 err_drop_frame:
4052 dev->stats.rx_errors++;
4053 mvpp2_rx_error(port, rx_desc);
4054 /* Return the buffer to the pool */
4055 if (rx_status & MVPP2_RXD_BUF_HDR)
4056 mvpp2_buff_hdr_pool_put(port, rx_desc, pool, rx_status);
4057 else
4058 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
4059 }
4060
4061 if (xdp_ret & MVPP2_XDP_REDIR)
4062 xdp_do_flush();
4063
4064 if (ps.rx_packets) {
4065 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
4066
4067 u64_stats_update_begin(&stats->syncp);
4068 stats->rx_packets += ps.rx_packets;
4069 stats->rx_bytes += ps.rx_bytes;
4070 /* xdp */
4071 stats->xdp_redirect += ps.xdp_redirect;
4072 stats->xdp_pass += ps.xdp_pass;
4073 stats->xdp_drop += ps.xdp_drop;
4074 u64_stats_update_end(&stats->syncp);
4075 }
4076
4077 /* Update Rx queue management counters */
4078 wmb();
4079 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
4080
4081 return rx_todo;
4082 }
4083
4084 static inline void
tx_desc_unmap_put(struct mvpp2_port * port,struct mvpp2_tx_queue * txq,struct mvpp2_tx_desc * desc)4085 tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
4086 struct mvpp2_tx_desc *desc)
4087 {
4088 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
4089 struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
4090
4091 dma_addr_t buf_dma_addr =
4092 mvpp2_txdesc_dma_addr_get(port, desc);
4093 size_t buf_sz =
4094 mvpp2_txdesc_size_get(port, desc);
4095 if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr))
4096 dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
4097 buf_sz, DMA_TO_DEVICE);
4098 mvpp2_txq_desc_put(txq);
4099 }
4100
mvpp2_txdesc_clear_ptp(struct mvpp2_port * port,struct mvpp2_tx_desc * desc)4101 static void mvpp2_txdesc_clear_ptp(struct mvpp2_port *port,
4102 struct mvpp2_tx_desc *desc)
4103 {
4104 /* We only need to clear the low bits */
4105 if (port->priv->hw_version >= MVPP22)
4106 desc->pp22.ptp_descriptor &=
4107 cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW);
4108 }
4109
mvpp2_tx_hw_tstamp(struct mvpp2_port * port,struct mvpp2_tx_desc * tx_desc,struct sk_buff * skb)4110 static bool mvpp2_tx_hw_tstamp(struct mvpp2_port *port,
4111 struct mvpp2_tx_desc *tx_desc,
4112 struct sk_buff *skb)
4113 {
4114 struct mvpp2_hwtstamp_queue *queue;
4115 unsigned int mtype, type, i;
4116 struct ptp_header *hdr;
4117 u64 ptpdesc;
4118
4119 if (port->priv->hw_version == MVPP21 ||
4120 port->tx_hwtstamp_type == HWTSTAMP_TX_OFF)
4121 return false;
4122
4123 type = ptp_classify_raw(skb);
4124 if (!type)
4125 return false;
4126
4127 hdr = ptp_parse_header(skb, type);
4128 if (!hdr)
4129 return false;
4130
4131 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4132
4133 ptpdesc = MVPP22_PTP_MACTIMESTAMPINGEN |
4134 MVPP22_PTP_ACTION_CAPTURE;
4135 queue = &port->tx_hwtstamp_queue[0];
4136
4137 switch (type & PTP_CLASS_VMASK) {
4138 case PTP_CLASS_V1:
4139 ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV1);
4140 break;
4141
4142 case PTP_CLASS_V2:
4143 ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV2);
4144 mtype = hdr->tsmt & 15;
4145 /* Direct PTP Sync messages to queue 1 */
4146 if (mtype == 0) {
4147 ptpdesc |= MVPP22_PTP_TIMESTAMPQUEUESELECT;
4148 queue = &port->tx_hwtstamp_queue[1];
4149 }
4150 break;
4151 }
4152
4153 /* Take a reference on the skb and insert into our queue */
4154 i = queue->next;
4155 queue->next = (i + 1) & 31;
4156 if (queue->skb[i])
4157 dev_kfree_skb_any(queue->skb[i]);
4158 queue->skb[i] = skb_get(skb);
4159
4160 ptpdesc |= MVPP22_PTP_TIMESTAMPENTRYID(i);
4161
4162 /*
4163 * 3:0 - PTPAction
4164 * 6:4 - PTPPacketFormat
4165 * 7 - PTP_CF_WraparoundCheckEn
4166 * 9:8 - IngressTimestampSeconds[1:0]
4167 * 10 - Reserved
4168 * 11 - MACTimestampingEn
4169 * 17:12 - PTP_TimestampQueueEntryID[5:0]
4170 * 18 - PTPTimestampQueueSelect
4171 * 19 - UDPChecksumUpdateEn
4172 * 27:20 - TimestampOffset
4173 * PTP, NTPTransmit, OWAMP/TWAMP - L3 to PTP header
4174 * NTPTs, Y.1731 - L3 to timestamp entry
4175 * 35:28 - UDP Checksum Offset
4176 *
4177 * stored in tx descriptor bits 75:64 (11:0) and 191:168 (35:12)
4178 */
4179 tx_desc->pp22.ptp_descriptor &=
4180 cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW);
4181 tx_desc->pp22.ptp_descriptor |=
4182 cpu_to_le32(ptpdesc & MVPP22_PTP_DESC_MASK_LOW);
4183 tx_desc->pp22.buf_dma_addr_ptp &= cpu_to_le64(~0xffffff0000000000ULL);
4184 tx_desc->pp22.buf_dma_addr_ptp |= cpu_to_le64((ptpdesc >> 12) << 40);
4185
4186 return true;
4187 }
4188
4189 /* Handle tx fragmentation processing */
mvpp2_tx_frag_process(struct mvpp2_port * port,struct sk_buff * skb,struct mvpp2_tx_queue * aggr_txq,struct mvpp2_tx_queue * txq)4190 static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
4191 struct mvpp2_tx_queue *aggr_txq,
4192 struct mvpp2_tx_queue *txq)
4193 {
4194 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
4195 struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
4196 struct mvpp2_tx_desc *tx_desc;
4197 int i;
4198 dma_addr_t buf_dma_addr;
4199
4200 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4201 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4202 void *addr = skb_frag_address(frag);
4203
4204 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
4205 mvpp2_txdesc_clear_ptp(port, tx_desc);
4206 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
4207 mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag));
4208
4209 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
4210 skb_frag_size(frag),
4211 DMA_TO_DEVICE);
4212 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
4213 mvpp2_txq_desc_put(txq);
4214 goto cleanup;
4215 }
4216
4217 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
4218
4219 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
4220 /* Last descriptor */
4221 mvpp2_txdesc_cmd_set(port, tx_desc,
4222 MVPP2_TXD_L_DESC);
4223 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
4224 } else {
4225 /* Descriptor in the middle: Not First, Not Last */
4226 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
4227 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
4228 }
4229 }
4230
4231 return 0;
4232 cleanup:
4233 /* Release all descriptors that were used to map fragments of
4234 * this packet, as well as the corresponding DMA mappings
4235 */
4236 for (i = i - 1; i >= 0; i--) {
4237 tx_desc = txq->descs + i;
4238 tx_desc_unmap_put(port, txq, tx_desc);
4239 }
4240
4241 return -ENOMEM;
4242 }
4243
mvpp2_tso_put_hdr(struct sk_buff * skb,struct net_device * dev,struct mvpp2_tx_queue * txq,struct mvpp2_tx_queue * aggr_txq,struct mvpp2_txq_pcpu * txq_pcpu,int hdr_sz)4244 static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
4245 struct net_device *dev,
4246 struct mvpp2_tx_queue *txq,
4247 struct mvpp2_tx_queue *aggr_txq,
4248 struct mvpp2_txq_pcpu *txq_pcpu,
4249 int hdr_sz)
4250 {
4251 struct mvpp2_port *port = netdev_priv(dev);
4252 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
4253 dma_addr_t addr;
4254
4255 mvpp2_txdesc_clear_ptp(port, tx_desc);
4256 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
4257 mvpp2_txdesc_size_set(port, tx_desc, hdr_sz);
4258
4259 addr = txq_pcpu->tso_headers_dma +
4260 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
4261 mvpp2_txdesc_dma_addr_set(port, tx_desc, addr);
4262
4263 mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) |
4264 MVPP2_TXD_F_DESC |
4265 MVPP2_TXD_PADDING_DISABLE);
4266 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
4267 }
4268
mvpp2_tso_put_data(struct sk_buff * skb,struct net_device * dev,struct tso_t * tso,struct mvpp2_tx_queue * txq,struct mvpp2_tx_queue * aggr_txq,struct mvpp2_txq_pcpu * txq_pcpu,int sz,bool left,bool last)4269 static inline int mvpp2_tso_put_data(struct sk_buff *skb,
4270 struct net_device *dev, struct tso_t *tso,
4271 struct mvpp2_tx_queue *txq,
4272 struct mvpp2_tx_queue *aggr_txq,
4273 struct mvpp2_txq_pcpu *txq_pcpu,
4274 int sz, bool left, bool last)
4275 {
4276 struct mvpp2_port *port = netdev_priv(dev);
4277 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
4278 dma_addr_t buf_dma_addr;
4279
4280 mvpp2_txdesc_clear_ptp(port, tx_desc);
4281 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
4282 mvpp2_txdesc_size_set(port, tx_desc, sz);
4283
4284 buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz,
4285 DMA_TO_DEVICE);
4286 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
4287 mvpp2_txq_desc_put(txq);
4288 return -ENOMEM;
4289 }
4290
4291 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
4292
4293 if (!left) {
4294 mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC);
4295 if (last) {
4296 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
4297 return 0;
4298 }
4299 } else {
4300 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
4301 }
4302
4303 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
4304 return 0;
4305 }
4306
mvpp2_tx_tso(struct sk_buff * skb,struct net_device * dev,struct mvpp2_tx_queue * txq,struct mvpp2_tx_queue * aggr_txq,struct mvpp2_txq_pcpu * txq_pcpu)4307 static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
4308 struct mvpp2_tx_queue *txq,
4309 struct mvpp2_tx_queue *aggr_txq,
4310 struct mvpp2_txq_pcpu *txq_pcpu)
4311 {
4312 struct mvpp2_port *port = netdev_priv(dev);
4313 int hdr_sz, i, len, descs = 0;
4314 struct tso_t tso;
4315
4316 /* Check number of available descriptors */
4317 if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) ||
4318 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu,
4319 tso_count_descs(skb)))
4320 return 0;
4321
4322 hdr_sz = tso_start(skb, &tso);
4323
4324 len = skb->len - hdr_sz;
4325 while (len > 0) {
4326 int left = min_t(int, skb_shinfo(skb)->gso_size, len);
4327 char *hdr = txq_pcpu->tso_headers +
4328 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
4329
4330 len -= left;
4331 descs++;
4332
4333 tso_build_hdr(skb, hdr, &tso, left, len == 0);
4334 mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz);
4335
4336 while (left > 0) {
4337 int sz = min_t(int, tso.size, left);
4338 left -= sz;
4339 descs++;
4340
4341 if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq,
4342 txq_pcpu, sz, left, len == 0))
4343 goto release;
4344 tso_build_data(skb, &tso, sz);
4345 }
4346 }
4347
4348 return descs;
4349
4350 release:
4351 for (i = descs - 1; i >= 0; i--) {
4352 struct mvpp2_tx_desc *tx_desc = txq->descs + i;
4353 tx_desc_unmap_put(port, txq, tx_desc);
4354 }
4355 return 0;
4356 }
4357
4358 /* Main tx processing */
mvpp2_tx(struct sk_buff * skb,struct net_device * dev)4359 static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
4360 {
4361 struct mvpp2_port *port = netdev_priv(dev);
4362 struct mvpp2_tx_queue *txq, *aggr_txq;
4363 struct mvpp2_txq_pcpu *txq_pcpu;
4364 struct mvpp2_tx_desc *tx_desc;
4365 dma_addr_t buf_dma_addr;
4366 unsigned long flags = 0;
4367 unsigned int thread;
4368 int frags = 0;
4369 u16 txq_id;
4370 u32 tx_cmd;
4371
4372 thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
4373
4374 txq_id = skb_get_queue_mapping(skb);
4375 txq = port->txqs[txq_id];
4376 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
4377 aggr_txq = &port->priv->aggr_txqs[thread];
4378
4379 if (test_bit(thread, &port->priv->lock_map))
4380 spin_lock_irqsave(&port->tx_lock[thread], flags);
4381
4382 if (skb_is_gso(skb)) {
4383 frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
4384 goto out;
4385 }
4386 frags = skb_shinfo(skb)->nr_frags + 1;
4387
4388 /* Check number of available descriptors */
4389 if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) ||
4390 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) {
4391 frags = 0;
4392 goto out;
4393 }
4394
4395 /* Get a descriptor for the first part of the packet */
4396 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
4397 if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ||
4398 !mvpp2_tx_hw_tstamp(port, tx_desc, skb))
4399 mvpp2_txdesc_clear_ptp(port, tx_desc);
4400 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
4401 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
4402
4403 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
4404 skb_headlen(skb), DMA_TO_DEVICE);
4405 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
4406 mvpp2_txq_desc_put(txq);
4407 frags = 0;
4408 goto out;
4409 }
4410
4411 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
4412
4413 tx_cmd = mvpp2_skb_tx_csum(port, skb);
4414
4415 if (frags == 1) {
4416 /* First and Last descriptor */
4417 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
4418 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
4419 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
4420 } else {
4421 /* First but not Last */
4422 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
4423 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
4424 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
4425
4426 /* Continue with other skb fragments */
4427 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
4428 tx_desc_unmap_put(port, txq, tx_desc);
4429 frags = 0;
4430 }
4431 }
4432
4433 out:
4434 if (frags > 0) {
4435 struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread);
4436 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
4437
4438 txq_pcpu->reserved_num -= frags;
4439 txq_pcpu->count += frags;
4440 aggr_txq->count += frags;
4441
4442 /* Enable transmit */
4443 wmb();
4444 mvpp2_aggr_txq_pend_desc_add(port, frags);
4445
4446 if (txq_pcpu->count >= txq_pcpu->stop_threshold)
4447 netif_tx_stop_queue(nq);
4448
4449 u64_stats_update_begin(&stats->syncp);
4450 stats->tx_packets++;
4451 stats->tx_bytes += skb->len;
4452 u64_stats_update_end(&stats->syncp);
4453 } else {
4454 dev->stats.tx_dropped++;
4455 dev_kfree_skb_any(skb);
4456 }
4457
4458 /* Finalize TX processing */
4459 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
4460 mvpp2_txq_done(port, txq, txq_pcpu);
4461
4462 /* Set the timer in case not all frags were processed */
4463 if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
4464 txq_pcpu->count > 0) {
4465 struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread);
4466
4467 if (!port_pcpu->timer_scheduled) {
4468 port_pcpu->timer_scheduled = true;
4469 hrtimer_start(&port_pcpu->tx_done_timer,
4470 MVPP2_TXDONE_HRTIMER_PERIOD_NS,
4471 HRTIMER_MODE_REL_PINNED_SOFT);
4472 }
4473 }
4474
4475 if (test_bit(thread, &port->priv->lock_map))
4476 spin_unlock_irqrestore(&port->tx_lock[thread], flags);
4477
4478 return NETDEV_TX_OK;
4479 }
4480
mvpp2_cause_error(struct net_device * dev,int cause)4481 static inline void mvpp2_cause_error(struct net_device *dev, int cause)
4482 {
4483 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
4484 netdev_err(dev, "FCS error\n");
4485 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
4486 netdev_err(dev, "rx fifo overrun error\n");
4487 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
4488 netdev_err(dev, "tx fifo underrun error\n");
4489 }
4490
mvpp2_poll(struct napi_struct * napi,int budget)4491 static int mvpp2_poll(struct napi_struct *napi, int budget)
4492 {
4493 u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
4494 int rx_done = 0;
4495 struct mvpp2_port *port = netdev_priv(napi->dev);
4496 struct mvpp2_queue_vector *qv;
4497 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
4498
4499 qv = container_of(napi, struct mvpp2_queue_vector, napi);
4500
4501 /* Rx/Tx cause register
4502 *
4503 * Bits 0-15: each bit indicates received packets on the Rx queue
4504 * (bit 0 is for Rx queue 0).
4505 *
4506 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
4507 * (bit 16 is for Tx queue 0).
4508 *
4509 * Each CPU has its own Rx/Tx cause register
4510 */
4511 cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id,
4512 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
4513
4514 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
4515 if (cause_misc) {
4516 mvpp2_cause_error(port->dev, cause_misc);
4517
4518 /* Clear the cause register */
4519 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
4520 mvpp2_thread_write(port->priv, thread,
4521 MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
4522 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
4523 }
4524
4525 if (port->has_tx_irqs) {
4526 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
4527 if (cause_tx) {
4528 cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
4529 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
4530 }
4531 }
4532
4533 /* Process RX packets */
4534 cause_rx = cause_rx_tx &
4535 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
4536 cause_rx <<= qv->first_rxq;
4537 cause_rx |= qv->pending_cause_rx;
4538 while (cause_rx && budget > 0) {
4539 int count;
4540 struct mvpp2_rx_queue *rxq;
4541
4542 rxq = mvpp2_get_rx_queue(port, cause_rx);
4543 if (!rxq)
4544 break;
4545
4546 count = mvpp2_rx(port, napi, budget, rxq);
4547 rx_done += count;
4548 budget -= count;
4549 if (budget > 0) {
4550 /* Clear the bit associated to this Rx queue
4551 * so that next iteration will continue from
4552 * the next Rx queue.
4553 */
4554 cause_rx &= ~(1 << rxq->logic_rxq);
4555 }
4556 }
4557
4558 if (budget > 0) {
4559 cause_rx = 0;
4560 napi_complete_done(napi, rx_done);
4561
4562 mvpp2_qvec_interrupt_enable(qv);
4563 }
4564 qv->pending_cause_rx = cause_rx;
4565 return rx_done;
4566 }
4567
mvpp22_mode_reconfigure(struct mvpp2_port * port,phy_interface_t interface)4568 static void mvpp22_mode_reconfigure(struct mvpp2_port *port,
4569 phy_interface_t interface)
4570 {
4571 u32 ctrl3;
4572
4573 /* Set the GMAC & XLG MAC in reset */
4574 mvpp2_mac_reset_assert(port);
4575
4576 /* Set the MPCS and XPCS in reset */
4577 mvpp22_pcs_reset_assert(port);
4578
4579 /* comphy reconfiguration */
4580 mvpp22_comphy_init(port, interface);
4581
4582 /* gop reconfiguration */
4583 mvpp22_gop_init(port, interface);
4584
4585 mvpp22_pcs_reset_deassert(port, interface);
4586
4587 if (mvpp2_port_supports_xlg(port)) {
4588 ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG);
4589 ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
4590
4591 if (mvpp2_is_xlg(interface))
4592 ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
4593 else
4594 ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
4595
4596 writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG);
4597 }
4598
4599 if (mvpp2_port_supports_xlg(port) && mvpp2_is_xlg(interface))
4600 mvpp2_xlg_max_rx_size_set(port);
4601 else
4602 mvpp2_gmac_max_rx_size_set(port);
4603 }
4604
4605 /* Set hw internals when starting port */
mvpp2_start_dev(struct mvpp2_port * port)4606 static void mvpp2_start_dev(struct mvpp2_port *port)
4607 {
4608 int i;
4609
4610 mvpp2_txp_max_tx_size_set(port);
4611
4612 for (i = 0; i < port->nqvecs; i++)
4613 napi_enable(&port->qvecs[i].napi);
4614
4615 /* Enable interrupts on all threads */
4616 mvpp2_interrupts_enable(port);
4617
4618 if (port->priv->hw_version >= MVPP22)
4619 mvpp22_mode_reconfigure(port, port->phy_interface);
4620
4621 if (port->phylink) {
4622 phylink_start(port->phylink);
4623 } else {
4624 mvpp2_acpi_start(port);
4625 }
4626
4627 netif_tx_start_all_queues(port->dev);
4628
4629 clear_bit(0, &port->state);
4630 }
4631
4632 /* Set hw internals when stopping port */
mvpp2_stop_dev(struct mvpp2_port * port)4633 static void mvpp2_stop_dev(struct mvpp2_port *port)
4634 {
4635 int i;
4636
4637 set_bit(0, &port->state);
4638
4639 /* Disable interrupts on all threads */
4640 mvpp2_interrupts_disable(port);
4641
4642 for (i = 0; i < port->nqvecs; i++)
4643 napi_disable(&port->qvecs[i].napi);
4644
4645 if (port->phylink)
4646 phylink_stop(port->phylink);
4647 phy_power_off(port->comphy);
4648 }
4649
mvpp2_check_ringparam_valid(struct net_device * dev,struct ethtool_ringparam * ring)4650 static int mvpp2_check_ringparam_valid(struct net_device *dev,
4651 struct ethtool_ringparam *ring)
4652 {
4653 u16 new_rx_pending = ring->rx_pending;
4654 u16 new_tx_pending = ring->tx_pending;
4655
4656 if (ring->rx_pending == 0 || ring->tx_pending == 0)
4657 return -EINVAL;
4658
4659 if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
4660 new_rx_pending = MVPP2_MAX_RXD_MAX;
4661 else if (ring->rx_pending < MSS_THRESHOLD_START)
4662 new_rx_pending = MSS_THRESHOLD_START;
4663 else if (!IS_ALIGNED(ring->rx_pending, 16))
4664 new_rx_pending = ALIGN(ring->rx_pending, 16);
4665
4666 if (ring->tx_pending > MVPP2_MAX_TXD_MAX)
4667 new_tx_pending = MVPP2_MAX_TXD_MAX;
4668 else if (!IS_ALIGNED(ring->tx_pending, 32))
4669 new_tx_pending = ALIGN(ring->tx_pending, 32);
4670
4671 /* The Tx ring size cannot be smaller than the minimum number of
4672 * descriptors needed for TSO.
4673 */
4674 if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
4675 new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
4676
4677 if (ring->rx_pending != new_rx_pending) {
4678 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
4679 ring->rx_pending, new_rx_pending);
4680 ring->rx_pending = new_rx_pending;
4681 }
4682
4683 if (ring->tx_pending != new_tx_pending) {
4684 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
4685 ring->tx_pending, new_tx_pending);
4686 ring->tx_pending = new_tx_pending;
4687 }
4688
4689 return 0;
4690 }
4691
mvpp21_get_mac_address(struct mvpp2_port * port,unsigned char * addr)4692 static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
4693 {
4694 u32 mac_addr_l, mac_addr_m, mac_addr_h;
4695
4696 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
4697 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
4698 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
4699 addr[0] = (mac_addr_h >> 24) & 0xFF;
4700 addr[1] = (mac_addr_h >> 16) & 0xFF;
4701 addr[2] = (mac_addr_h >> 8) & 0xFF;
4702 addr[3] = mac_addr_h & 0xFF;
4703 addr[4] = mac_addr_m & 0xFF;
4704 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
4705 }
4706
mvpp2_irqs_init(struct mvpp2_port * port)4707 static int mvpp2_irqs_init(struct mvpp2_port *port)
4708 {
4709 int err, i;
4710
4711 for (i = 0; i < port->nqvecs; i++) {
4712 struct mvpp2_queue_vector *qv = port->qvecs + i;
4713
4714 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
4715 qv->mask = kzalloc(cpumask_size(), GFP_KERNEL);
4716 if (!qv->mask) {
4717 err = -ENOMEM;
4718 goto err;
4719 }
4720
4721 irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
4722 }
4723
4724 err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
4725 if (err)
4726 goto err;
4727
4728 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
4729 unsigned int cpu;
4730
4731 for_each_present_cpu(cpu) {
4732 if (mvpp2_cpu_to_thread(port->priv, cpu) ==
4733 qv->sw_thread_id)
4734 cpumask_set_cpu(cpu, qv->mask);
4735 }
4736
4737 irq_set_affinity_hint(qv->irq, qv->mask);
4738 }
4739 }
4740
4741 return 0;
4742 err:
4743 for (i = 0; i < port->nqvecs; i++) {
4744 struct mvpp2_queue_vector *qv = port->qvecs + i;
4745
4746 irq_set_affinity_hint(qv->irq, NULL);
4747 kfree(qv->mask);
4748 qv->mask = NULL;
4749 free_irq(qv->irq, qv);
4750 }
4751
4752 return err;
4753 }
4754
mvpp2_irqs_deinit(struct mvpp2_port * port)4755 static void mvpp2_irqs_deinit(struct mvpp2_port *port)
4756 {
4757 int i;
4758
4759 for (i = 0; i < port->nqvecs; i++) {
4760 struct mvpp2_queue_vector *qv = port->qvecs + i;
4761
4762 irq_set_affinity_hint(qv->irq, NULL);
4763 kfree(qv->mask);
4764 qv->mask = NULL;
4765 irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
4766 free_irq(qv->irq, qv);
4767 }
4768 }
4769
mvpp22_rss_is_supported(struct mvpp2_port * port)4770 static bool mvpp22_rss_is_supported(struct mvpp2_port *port)
4771 {
4772 return (queue_mode == MVPP2_QDIST_MULTI_MODE) &&
4773 !(port->flags & MVPP2_F_LOOPBACK);
4774 }
4775
mvpp2_open(struct net_device * dev)4776 static int mvpp2_open(struct net_device *dev)
4777 {
4778 struct mvpp2_port *port = netdev_priv(dev);
4779 struct mvpp2 *priv = port->priv;
4780 unsigned char mac_bcast[ETH_ALEN] = {
4781 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
4782 bool valid = false;
4783 int err;
4784
4785 err = mvpp2_prs_mac_da_accept(port, mac_bcast, true);
4786 if (err) {
4787 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
4788 return err;
4789 }
4790 err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true);
4791 if (err) {
4792 netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n");
4793 return err;
4794 }
4795 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
4796 if (err) {
4797 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
4798 return err;
4799 }
4800 err = mvpp2_prs_def_flow(port);
4801 if (err) {
4802 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
4803 return err;
4804 }
4805
4806 /* Allocate the Rx/Tx queues */
4807 err = mvpp2_setup_rxqs(port);
4808 if (err) {
4809 netdev_err(port->dev, "cannot allocate Rx queues\n");
4810 return err;
4811 }
4812
4813 err = mvpp2_setup_txqs(port);
4814 if (err) {
4815 netdev_err(port->dev, "cannot allocate Tx queues\n");
4816 goto err_cleanup_rxqs;
4817 }
4818
4819 err = mvpp2_irqs_init(port);
4820 if (err) {
4821 netdev_err(port->dev, "cannot init IRQs\n");
4822 goto err_cleanup_txqs;
4823 }
4824
4825 if (port->phylink) {
4826 err = phylink_fwnode_phy_connect(port->phylink, port->fwnode, 0);
4827 if (err) {
4828 netdev_err(port->dev, "could not attach PHY (%d)\n",
4829 err);
4830 goto err_free_irq;
4831 }
4832
4833 valid = true;
4834 }
4835
4836 if (priv->hw_version >= MVPP22 && port->port_irq) {
4837 err = request_irq(port->port_irq, mvpp2_port_isr, 0,
4838 dev->name, port);
4839 if (err) {
4840 netdev_err(port->dev,
4841 "cannot request port link/ptp IRQ %d\n",
4842 port->port_irq);
4843 goto err_free_irq;
4844 }
4845
4846 mvpp22_gop_setup_irq(port);
4847
4848 /* In default link is down */
4849 netif_carrier_off(port->dev);
4850
4851 valid = true;
4852 } else {
4853 port->port_irq = 0;
4854 }
4855
4856 if (!valid) {
4857 netdev_err(port->dev,
4858 "invalid configuration: no dt or link IRQ");
4859 err = -ENOENT;
4860 goto err_free_irq;
4861 }
4862
4863 /* Unmask interrupts on all CPUs */
4864 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
4865 mvpp2_shared_interrupt_mask_unmask(port, false);
4866
4867 mvpp2_start_dev(port);
4868
4869 /* Start hardware statistics gathering */
4870 queue_delayed_work(priv->stats_queue, &port->stats_work,
4871 MVPP2_MIB_COUNTERS_STATS_DELAY);
4872
4873 return 0;
4874
4875 err_free_irq:
4876 mvpp2_irqs_deinit(port);
4877 err_cleanup_txqs:
4878 mvpp2_cleanup_txqs(port);
4879 err_cleanup_rxqs:
4880 mvpp2_cleanup_rxqs(port);
4881 return err;
4882 }
4883
mvpp2_stop(struct net_device * dev)4884 static int mvpp2_stop(struct net_device *dev)
4885 {
4886 struct mvpp2_port *port = netdev_priv(dev);
4887 struct mvpp2_port_pcpu *port_pcpu;
4888 unsigned int thread;
4889
4890 mvpp2_stop_dev(port);
4891
4892 /* Mask interrupts on all threads */
4893 on_each_cpu(mvpp2_interrupts_mask, port, 1);
4894 mvpp2_shared_interrupt_mask_unmask(port, true);
4895
4896 if (port->phylink)
4897 phylink_disconnect_phy(port->phylink);
4898 if (port->port_irq)
4899 free_irq(port->port_irq, port);
4900
4901 mvpp2_irqs_deinit(port);
4902 if (!port->has_tx_irqs) {
4903 for (thread = 0; thread < port->priv->nthreads; thread++) {
4904 port_pcpu = per_cpu_ptr(port->pcpu, thread);
4905
4906 hrtimer_cancel(&port_pcpu->tx_done_timer);
4907 port_pcpu->timer_scheduled = false;
4908 }
4909 }
4910 mvpp2_cleanup_rxqs(port);
4911 mvpp2_cleanup_txqs(port);
4912
4913 cancel_delayed_work_sync(&port->stats_work);
4914
4915 mvpp2_mac_reset_assert(port);
4916 mvpp22_pcs_reset_assert(port);
4917
4918 return 0;
4919 }
4920
mvpp2_prs_mac_da_accept_list(struct mvpp2_port * port,struct netdev_hw_addr_list * list)4921 static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port,
4922 struct netdev_hw_addr_list *list)
4923 {
4924 struct netdev_hw_addr *ha;
4925 int ret;
4926
4927 netdev_hw_addr_list_for_each(ha, list) {
4928 ret = mvpp2_prs_mac_da_accept(port, ha->addr, true);
4929 if (ret)
4930 return ret;
4931 }
4932
4933 return 0;
4934 }
4935
mvpp2_set_rx_promisc(struct mvpp2_port * port,bool enable)4936 static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable)
4937 {
4938 if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
4939 mvpp2_prs_vid_enable_filtering(port);
4940 else
4941 mvpp2_prs_vid_disable_filtering(port);
4942
4943 mvpp2_prs_mac_promisc_set(port->priv, port->id,
4944 MVPP2_PRS_L2_UNI_CAST, enable);
4945
4946 mvpp2_prs_mac_promisc_set(port->priv, port->id,
4947 MVPP2_PRS_L2_MULTI_CAST, enable);
4948 }
4949
mvpp2_set_rx_mode(struct net_device * dev)4950 static void mvpp2_set_rx_mode(struct net_device *dev)
4951 {
4952 struct mvpp2_port *port = netdev_priv(dev);
4953
4954 /* Clear the whole UC and MC list */
4955 mvpp2_prs_mac_del_all(port);
4956
4957 if (dev->flags & IFF_PROMISC) {
4958 mvpp2_set_rx_promisc(port, true);
4959 return;
4960 }
4961
4962 mvpp2_set_rx_promisc(port, false);
4963
4964 if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX ||
4965 mvpp2_prs_mac_da_accept_list(port, &dev->uc))
4966 mvpp2_prs_mac_promisc_set(port->priv, port->id,
4967 MVPP2_PRS_L2_UNI_CAST, true);
4968
4969 if (dev->flags & IFF_ALLMULTI) {
4970 mvpp2_prs_mac_promisc_set(port->priv, port->id,
4971 MVPP2_PRS_L2_MULTI_CAST, true);
4972 return;
4973 }
4974
4975 if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX ||
4976 mvpp2_prs_mac_da_accept_list(port, &dev->mc))
4977 mvpp2_prs_mac_promisc_set(port->priv, port->id,
4978 MVPP2_PRS_L2_MULTI_CAST, true);
4979 }
4980
mvpp2_set_mac_address(struct net_device * dev,void * p)4981 static int mvpp2_set_mac_address(struct net_device *dev, void *p)
4982 {
4983 const struct sockaddr *addr = p;
4984 int err;
4985
4986 if (!is_valid_ether_addr(addr->sa_data))
4987 return -EADDRNOTAVAIL;
4988
4989 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
4990 if (err) {
4991 /* Reconfigure parser accept the original MAC address */
4992 mvpp2_prs_update_mac_da(dev, dev->dev_addr);
4993 netdev_err(dev, "failed to change MAC address\n");
4994 }
4995 return err;
4996 }
4997
4998 /* Shut down all the ports, reconfigure the pools as percpu or shared,
4999 * then bring up again all ports.
5000 */
mvpp2_bm_switch_buffers(struct mvpp2 * priv,bool percpu)5001 static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
5002 {
5003 bool change_percpu = (percpu != priv->percpu_pools);
5004 int numbufs = MVPP2_BM_POOLS_NUM, i;
5005 struct mvpp2_port *port = NULL;
5006 bool status[MVPP2_MAX_PORTS];
5007
5008 for (i = 0; i < priv->port_count; i++) {
5009 port = priv->port_list[i];
5010 status[i] = netif_running(port->dev);
5011 if (status[i])
5012 mvpp2_stop(port->dev);
5013 }
5014
5015 /* nrxqs is the same for all ports */
5016 if (priv->percpu_pools)
5017 numbufs = port->nrxqs * 2;
5018
5019 if (change_percpu)
5020 mvpp2_bm_pool_update_priv_fc(priv, false);
5021
5022 for (i = 0; i < numbufs; i++)
5023 mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]);
5024
5025 devm_kfree(port->dev->dev.parent, priv->bm_pools);
5026 priv->percpu_pools = percpu;
5027 mvpp2_bm_init(port->dev->dev.parent, priv);
5028
5029 for (i = 0; i < priv->port_count; i++) {
5030 port = priv->port_list[i];
5031 if (percpu && port->ntxqs >= num_possible_cpus() * 2)
5032 xdp_set_features_flag(port->dev,
5033 NETDEV_XDP_ACT_BASIC |
5034 NETDEV_XDP_ACT_REDIRECT |
5035 NETDEV_XDP_ACT_NDO_XMIT);
5036 else
5037 xdp_clear_features_flag(port->dev);
5038
5039 mvpp2_swf_bm_pool_init(port);
5040 if (status[i])
5041 mvpp2_open(port->dev);
5042 }
5043
5044 if (change_percpu)
5045 mvpp2_bm_pool_update_priv_fc(priv, true);
5046
5047 return 0;
5048 }
5049
mvpp2_change_mtu(struct net_device * dev,int mtu)5050 static int mvpp2_change_mtu(struct net_device *dev, int mtu)
5051 {
5052 struct mvpp2_port *port = netdev_priv(dev);
5053 bool running = netif_running(dev);
5054 struct mvpp2 *priv = port->priv;
5055 int err;
5056
5057 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
5058 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
5059 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
5060 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
5061 }
5062
5063 if (port->xdp_prog && mtu > MVPP2_MAX_RX_BUF_SIZE) {
5064 netdev_err(dev, "Illegal MTU value %d (> %d) for XDP mode\n",
5065 mtu, (int)MVPP2_MAX_RX_BUF_SIZE);
5066 return -EINVAL;
5067 }
5068
5069 if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) {
5070 if (priv->percpu_pools) {
5071 netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu);
5072 mvpp2_bm_switch_buffers(priv, false);
5073 }
5074 } else {
5075 bool jumbo = false;
5076 int i;
5077
5078 for (i = 0; i < priv->port_count; i++)
5079 if (priv->port_list[i] != port &&
5080 MVPP2_RX_PKT_SIZE(priv->port_list[i]->dev->mtu) >
5081 MVPP2_BM_LONG_PKT_SIZE) {
5082 jumbo = true;
5083 break;
5084 }
5085
5086 /* No port is using jumbo frames */
5087 if (!jumbo) {
5088 dev_info(port->dev->dev.parent,
5089 "all ports have a low MTU, switching to per-cpu buffers");
5090 mvpp2_bm_switch_buffers(priv, true);
5091 }
5092 }
5093
5094 if (running)
5095 mvpp2_stop_dev(port);
5096
5097 err = mvpp2_bm_update_mtu(dev, mtu);
5098 if (err) {
5099 netdev_err(dev, "failed to change MTU\n");
5100 /* Reconfigure BM to the original MTU */
5101 mvpp2_bm_update_mtu(dev, dev->mtu);
5102 } else {
5103 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
5104 }
5105
5106 if (running) {
5107 mvpp2_start_dev(port);
5108 mvpp2_egress_enable(port);
5109 mvpp2_ingress_enable(port);
5110 }
5111
5112 return err;
5113 }
5114
mvpp2_check_pagepool_dma(struct mvpp2_port * port)5115 static int mvpp2_check_pagepool_dma(struct mvpp2_port *port)
5116 {
5117 enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
5118 struct mvpp2 *priv = port->priv;
5119 int err = -1, i;
5120
5121 if (!priv->percpu_pools)
5122 return err;
5123
5124 if (!priv->page_pool[0])
5125 return -ENOMEM;
5126
5127 for (i = 0; i < priv->port_count; i++) {
5128 port = priv->port_list[i];
5129 if (port->xdp_prog) {
5130 dma_dir = DMA_BIDIRECTIONAL;
5131 break;
5132 }
5133 }
5134
5135 /* All pools are equal in terms of DMA direction */
5136 if (priv->page_pool[0]->p.dma_dir != dma_dir)
5137 err = mvpp2_bm_switch_buffers(priv, true);
5138
5139 return err;
5140 }
5141
5142 static void
mvpp2_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)5143 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
5144 {
5145 struct mvpp2_port *port = netdev_priv(dev);
5146 unsigned int start;
5147 unsigned int cpu;
5148
5149 for_each_possible_cpu(cpu) {
5150 struct mvpp2_pcpu_stats *cpu_stats;
5151 u64 rx_packets;
5152 u64 rx_bytes;
5153 u64 tx_packets;
5154 u64 tx_bytes;
5155
5156 cpu_stats = per_cpu_ptr(port->stats, cpu);
5157 do {
5158 start = u64_stats_fetch_begin(&cpu_stats->syncp);
5159 rx_packets = cpu_stats->rx_packets;
5160 rx_bytes = cpu_stats->rx_bytes;
5161 tx_packets = cpu_stats->tx_packets;
5162 tx_bytes = cpu_stats->tx_bytes;
5163 } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
5164
5165 stats->rx_packets += rx_packets;
5166 stats->rx_bytes += rx_bytes;
5167 stats->tx_packets += tx_packets;
5168 stats->tx_bytes += tx_bytes;
5169 }
5170
5171 stats->rx_errors = dev->stats.rx_errors;
5172 stats->rx_dropped = dev->stats.rx_dropped;
5173 stats->tx_dropped = dev->stats.tx_dropped;
5174 }
5175
mvpp2_set_ts_config(struct mvpp2_port * port,struct ifreq * ifr)5176 static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
5177 {
5178 struct hwtstamp_config config;
5179 void __iomem *ptp;
5180 u32 gcr, int_mask;
5181
5182 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
5183 return -EFAULT;
5184
5185 if (config.tx_type != HWTSTAMP_TX_OFF &&
5186 config.tx_type != HWTSTAMP_TX_ON)
5187 return -ERANGE;
5188
5189 ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
5190
5191 int_mask = gcr = 0;
5192 if (config.tx_type != HWTSTAMP_TX_OFF) {
5193 gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_TX_RESET;
5194 int_mask |= MVPP22_PTP_INT_MASK_QUEUE1 |
5195 MVPP22_PTP_INT_MASK_QUEUE0;
5196 }
5197
5198 /* It seems we must also release the TX reset when enabling the TSU */
5199 if (config.rx_filter != HWTSTAMP_FILTER_NONE)
5200 gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_RX_RESET |
5201 MVPP22_PTP_GCR_TX_RESET;
5202
5203 if (gcr & MVPP22_PTP_GCR_TSU_ENABLE)
5204 mvpp22_tai_start(port->priv->tai);
5205
5206 if (config.rx_filter != HWTSTAMP_FILTER_NONE) {
5207 config.rx_filter = HWTSTAMP_FILTER_ALL;
5208 mvpp2_modify(ptp + MVPP22_PTP_GCR,
5209 MVPP22_PTP_GCR_RX_RESET |
5210 MVPP22_PTP_GCR_TX_RESET |
5211 MVPP22_PTP_GCR_TSU_ENABLE, gcr);
5212 port->rx_hwtstamp = true;
5213 } else {
5214 port->rx_hwtstamp = false;
5215 mvpp2_modify(ptp + MVPP22_PTP_GCR,
5216 MVPP22_PTP_GCR_RX_RESET |
5217 MVPP22_PTP_GCR_TX_RESET |
5218 MVPP22_PTP_GCR_TSU_ENABLE, gcr);
5219 }
5220
5221 mvpp2_modify(ptp + MVPP22_PTP_INT_MASK,
5222 MVPP22_PTP_INT_MASK_QUEUE1 |
5223 MVPP22_PTP_INT_MASK_QUEUE0, int_mask);
5224
5225 if (!(gcr & MVPP22_PTP_GCR_TSU_ENABLE))
5226 mvpp22_tai_stop(port->priv->tai);
5227
5228 port->tx_hwtstamp_type = config.tx_type;
5229
5230 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
5231 return -EFAULT;
5232
5233 return 0;
5234 }
5235
mvpp2_get_ts_config(struct mvpp2_port * port,struct ifreq * ifr)5236 static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
5237 {
5238 struct hwtstamp_config config;
5239
5240 memset(&config, 0, sizeof(config));
5241
5242 config.tx_type = port->tx_hwtstamp_type;
5243 config.rx_filter = port->rx_hwtstamp ?
5244 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
5245
5246 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
5247 return -EFAULT;
5248
5249 return 0;
5250 }
5251
mvpp2_ethtool_get_ts_info(struct net_device * dev,struct kernel_ethtool_ts_info * info)5252 static int mvpp2_ethtool_get_ts_info(struct net_device *dev,
5253 struct kernel_ethtool_ts_info *info)
5254 {
5255 struct mvpp2_port *port = netdev_priv(dev);
5256
5257 if (!port->hwtstamp)
5258 return -EOPNOTSUPP;
5259
5260 info->phc_index = mvpp22_tai_ptp_clock_index(port->priv->tai);
5261 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5262 SOF_TIMESTAMPING_TX_HARDWARE |
5263 SOF_TIMESTAMPING_RX_HARDWARE |
5264 SOF_TIMESTAMPING_RAW_HARDWARE;
5265 info->tx_types = BIT(HWTSTAMP_TX_OFF) |
5266 BIT(HWTSTAMP_TX_ON);
5267 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
5268 BIT(HWTSTAMP_FILTER_ALL);
5269
5270 return 0;
5271 }
5272
mvpp2_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)5273 static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5274 {
5275 struct mvpp2_port *port = netdev_priv(dev);
5276
5277 switch (cmd) {
5278 case SIOCSHWTSTAMP:
5279 if (port->hwtstamp)
5280 return mvpp2_set_ts_config(port, ifr);
5281 break;
5282
5283 case SIOCGHWTSTAMP:
5284 if (port->hwtstamp)
5285 return mvpp2_get_ts_config(port, ifr);
5286 break;
5287 }
5288
5289 if (!port->phylink)
5290 return -ENOTSUPP;
5291
5292 return phylink_mii_ioctl(port->phylink, ifr, cmd);
5293 }
5294
mvpp2_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)5295 static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
5296 {
5297 struct mvpp2_port *port = netdev_priv(dev);
5298 int ret;
5299
5300 ret = mvpp2_prs_vid_entry_add(port, vid);
5301 if (ret)
5302 netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n",
5303 MVPP2_PRS_VLAN_FILT_MAX - 1);
5304 return ret;
5305 }
5306
mvpp2_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)5307 static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
5308 {
5309 struct mvpp2_port *port = netdev_priv(dev);
5310
5311 mvpp2_prs_vid_entry_remove(port, vid);
5312 return 0;
5313 }
5314
mvpp2_set_features(struct net_device * dev,netdev_features_t features)5315 static int mvpp2_set_features(struct net_device *dev,
5316 netdev_features_t features)
5317 {
5318 netdev_features_t changed = dev->features ^ features;
5319 struct mvpp2_port *port = netdev_priv(dev);
5320
5321 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
5322 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
5323 mvpp2_prs_vid_enable_filtering(port);
5324 } else {
5325 /* Invalidate all registered VID filters for this
5326 * port
5327 */
5328 mvpp2_prs_vid_remove_all(port);
5329
5330 mvpp2_prs_vid_disable_filtering(port);
5331 }
5332 }
5333
5334 if (changed & NETIF_F_RXHASH) {
5335 if (features & NETIF_F_RXHASH)
5336 mvpp22_port_rss_enable(port);
5337 else
5338 mvpp22_port_rss_disable(port);
5339 }
5340
5341 return 0;
5342 }
5343
mvpp2_xdp_setup(struct mvpp2_port * port,struct netdev_bpf * bpf)5344 static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf)
5345 {
5346 struct bpf_prog *prog = bpf->prog, *old_prog;
5347 bool running = netif_running(port->dev);
5348 bool reset = !prog != !port->xdp_prog;
5349
5350 if (port->dev->mtu > MVPP2_MAX_RX_BUF_SIZE) {
5351 NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP");
5352 return -EOPNOTSUPP;
5353 }
5354
5355 if (!port->priv->percpu_pools) {
5356 NL_SET_ERR_MSG_MOD(bpf->extack, "Per CPU Pools required for XDP");
5357 return -EOPNOTSUPP;
5358 }
5359
5360 if (port->ntxqs < num_possible_cpus() * 2) {
5361 NL_SET_ERR_MSG_MOD(bpf->extack, "XDP_TX needs two TX queues per CPU");
5362 return -EOPNOTSUPP;
5363 }
5364
5365 /* device is up and bpf is added/removed, must setup the RX queues */
5366 if (running && reset)
5367 mvpp2_stop(port->dev);
5368
5369 old_prog = xchg(&port->xdp_prog, prog);
5370 if (old_prog)
5371 bpf_prog_put(old_prog);
5372
5373 /* bpf is just replaced, RXQ and MTU are already setup */
5374 if (!reset)
5375 return 0;
5376
5377 /* device was up, restore the link */
5378 if (running)
5379 mvpp2_open(port->dev);
5380
5381 /* Check Page Pool DMA Direction */
5382 mvpp2_check_pagepool_dma(port);
5383
5384 return 0;
5385 }
5386
mvpp2_xdp(struct net_device * dev,struct netdev_bpf * xdp)5387 static int mvpp2_xdp(struct net_device *dev, struct netdev_bpf *xdp)
5388 {
5389 struct mvpp2_port *port = netdev_priv(dev);
5390
5391 switch (xdp->command) {
5392 case XDP_SETUP_PROG:
5393 return mvpp2_xdp_setup(port, xdp);
5394 default:
5395 return -EINVAL;
5396 }
5397 }
5398
5399 /* Ethtool methods */
5400
mvpp2_ethtool_nway_reset(struct net_device * dev)5401 static int mvpp2_ethtool_nway_reset(struct net_device *dev)
5402 {
5403 struct mvpp2_port *port = netdev_priv(dev);
5404
5405 if (!port->phylink)
5406 return -ENOTSUPP;
5407
5408 return phylink_ethtool_nway_reset(port->phylink);
5409 }
5410
5411 /* Set interrupt coalescing for ethtools */
5412 static int
mvpp2_ethtool_set_coalesce(struct net_device * dev,struct ethtool_coalesce * c,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)5413 mvpp2_ethtool_set_coalesce(struct net_device *dev,
5414 struct ethtool_coalesce *c,
5415 struct kernel_ethtool_coalesce *kernel_coal,
5416 struct netlink_ext_ack *extack)
5417 {
5418 struct mvpp2_port *port = netdev_priv(dev);
5419 int queue;
5420
5421 for (queue = 0; queue < port->nrxqs; queue++) {
5422 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
5423
5424 rxq->time_coal = c->rx_coalesce_usecs;
5425 rxq->pkts_coal = c->rx_max_coalesced_frames;
5426 mvpp2_rx_pkts_coal_set(port, rxq);
5427 mvpp2_rx_time_coal_set(port, rxq);
5428 }
5429
5430 if (port->has_tx_irqs) {
5431 port->tx_time_coal = c->tx_coalesce_usecs;
5432 mvpp2_tx_time_coal_set(port);
5433 }
5434
5435 for (queue = 0; queue < port->ntxqs; queue++) {
5436 struct mvpp2_tx_queue *txq = port->txqs[queue];
5437
5438 txq->done_pkts_coal = c->tx_max_coalesced_frames;
5439
5440 if (port->has_tx_irqs)
5441 mvpp2_tx_pkts_coal_set(port, txq);
5442 }
5443
5444 return 0;
5445 }
5446
5447 /* get coalescing for ethtools */
5448 static int
mvpp2_ethtool_get_coalesce(struct net_device * dev,struct ethtool_coalesce * c,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)5449 mvpp2_ethtool_get_coalesce(struct net_device *dev,
5450 struct ethtool_coalesce *c,
5451 struct kernel_ethtool_coalesce *kernel_coal,
5452 struct netlink_ext_ack *extack)
5453 {
5454 struct mvpp2_port *port = netdev_priv(dev);
5455
5456 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
5457 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
5458 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
5459 c->tx_coalesce_usecs = port->tx_time_coal;
5460 return 0;
5461 }
5462
mvpp2_ethtool_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * drvinfo)5463 static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
5464 struct ethtool_drvinfo *drvinfo)
5465 {
5466 strscpy(drvinfo->driver, MVPP2_DRIVER_NAME,
5467 sizeof(drvinfo->driver));
5468 strscpy(drvinfo->version, MVPP2_DRIVER_VERSION,
5469 sizeof(drvinfo->version));
5470 strscpy(drvinfo->bus_info, dev_name(&dev->dev),
5471 sizeof(drvinfo->bus_info));
5472 }
5473
5474 static void
mvpp2_ethtool_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)5475 mvpp2_ethtool_get_ringparam(struct net_device *dev,
5476 struct ethtool_ringparam *ring,
5477 struct kernel_ethtool_ringparam *kernel_ring,
5478 struct netlink_ext_ack *extack)
5479 {
5480 struct mvpp2_port *port = netdev_priv(dev);
5481
5482 ring->rx_max_pending = MVPP2_MAX_RXD_MAX;
5483 ring->tx_max_pending = MVPP2_MAX_TXD_MAX;
5484 ring->rx_pending = port->rx_ring_size;
5485 ring->tx_pending = port->tx_ring_size;
5486 }
5487
5488 static int
mvpp2_ethtool_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)5489 mvpp2_ethtool_set_ringparam(struct net_device *dev,
5490 struct ethtool_ringparam *ring,
5491 struct kernel_ethtool_ringparam *kernel_ring,
5492 struct netlink_ext_ack *extack)
5493 {
5494 struct mvpp2_port *port = netdev_priv(dev);
5495 u16 prev_rx_ring_size = port->rx_ring_size;
5496 u16 prev_tx_ring_size = port->tx_ring_size;
5497 int err;
5498
5499 err = mvpp2_check_ringparam_valid(dev, ring);
5500 if (err)
5501 return err;
5502
5503 if (!netif_running(dev)) {
5504 port->rx_ring_size = ring->rx_pending;
5505 port->tx_ring_size = ring->tx_pending;
5506 return 0;
5507 }
5508
5509 /* The interface is running, so we have to force a
5510 * reallocation of the queues
5511 */
5512 mvpp2_stop_dev(port);
5513 mvpp2_cleanup_rxqs(port);
5514 mvpp2_cleanup_txqs(port);
5515
5516 port->rx_ring_size = ring->rx_pending;
5517 port->tx_ring_size = ring->tx_pending;
5518
5519 err = mvpp2_setup_rxqs(port);
5520 if (err) {
5521 /* Reallocate Rx queues with the original ring size */
5522 port->rx_ring_size = prev_rx_ring_size;
5523 ring->rx_pending = prev_rx_ring_size;
5524 err = mvpp2_setup_rxqs(port);
5525 if (err)
5526 goto err_out;
5527 }
5528 err = mvpp2_setup_txqs(port);
5529 if (err) {
5530 /* Reallocate Tx queues with the original ring size */
5531 port->tx_ring_size = prev_tx_ring_size;
5532 ring->tx_pending = prev_tx_ring_size;
5533 err = mvpp2_setup_txqs(port);
5534 if (err)
5535 goto err_clean_rxqs;
5536 }
5537
5538 mvpp2_start_dev(port);
5539 mvpp2_egress_enable(port);
5540 mvpp2_ingress_enable(port);
5541
5542 return 0;
5543
5544 err_clean_rxqs:
5545 mvpp2_cleanup_rxqs(port);
5546 err_out:
5547 netdev_err(dev, "failed to change ring parameters");
5548 return err;
5549 }
5550
mvpp2_ethtool_get_pause_param(struct net_device * dev,struct ethtool_pauseparam * pause)5551 static void mvpp2_ethtool_get_pause_param(struct net_device *dev,
5552 struct ethtool_pauseparam *pause)
5553 {
5554 struct mvpp2_port *port = netdev_priv(dev);
5555
5556 if (!port->phylink)
5557 return;
5558
5559 phylink_ethtool_get_pauseparam(port->phylink, pause);
5560 }
5561
mvpp2_ethtool_set_pause_param(struct net_device * dev,struct ethtool_pauseparam * pause)5562 static int mvpp2_ethtool_set_pause_param(struct net_device *dev,
5563 struct ethtool_pauseparam *pause)
5564 {
5565 struct mvpp2_port *port = netdev_priv(dev);
5566
5567 if (!port->phylink)
5568 return -ENOTSUPP;
5569
5570 return phylink_ethtool_set_pauseparam(port->phylink, pause);
5571 }
5572
mvpp2_ethtool_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)5573 static int mvpp2_ethtool_get_link_ksettings(struct net_device *dev,
5574 struct ethtool_link_ksettings *cmd)
5575 {
5576 struct mvpp2_port *port = netdev_priv(dev);
5577
5578 if (!port->phylink)
5579 return -ENOTSUPP;
5580
5581 return phylink_ethtool_ksettings_get(port->phylink, cmd);
5582 }
5583
mvpp2_ethtool_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)5584 static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev,
5585 const struct ethtool_link_ksettings *cmd)
5586 {
5587 struct mvpp2_port *port = netdev_priv(dev);
5588
5589 if (!port->phylink)
5590 return -ENOTSUPP;
5591
5592 return phylink_ethtool_ksettings_set(port->phylink, cmd);
5593 }
5594
mvpp2_ethtool_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info,u32 * rules)5595 static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
5596 struct ethtool_rxnfc *info, u32 *rules)
5597 {
5598 struct mvpp2_port *port = netdev_priv(dev);
5599 int ret = 0, i, loc = 0;
5600
5601 if (!mvpp22_rss_is_supported(port))
5602 return -EOPNOTSUPP;
5603
5604 switch (info->cmd) {
5605 case ETHTOOL_GRXFH:
5606 ret = mvpp2_ethtool_rxfh_get(port, info);
5607 break;
5608 case ETHTOOL_GRXRINGS:
5609 info->data = port->nrxqs;
5610 break;
5611 case ETHTOOL_GRXCLSRLCNT:
5612 info->rule_cnt = port->n_rfs_rules;
5613 break;
5614 case ETHTOOL_GRXCLSRULE:
5615 ret = mvpp2_ethtool_cls_rule_get(port, info);
5616 break;
5617 case ETHTOOL_GRXCLSRLALL:
5618 for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
5619 if (loc == info->rule_cnt) {
5620 ret = -EMSGSIZE;
5621 break;
5622 }
5623
5624 if (port->rfs_rules[i])
5625 rules[loc++] = i;
5626 }
5627 break;
5628 default:
5629 return -ENOTSUPP;
5630 }
5631
5632 return ret;
5633 }
5634
mvpp2_ethtool_set_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info)5635 static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
5636 struct ethtool_rxnfc *info)
5637 {
5638 struct mvpp2_port *port = netdev_priv(dev);
5639 int ret = 0;
5640
5641 if (!mvpp22_rss_is_supported(port))
5642 return -EOPNOTSUPP;
5643
5644 switch (info->cmd) {
5645 case ETHTOOL_SRXFH:
5646 ret = mvpp2_ethtool_rxfh_set(port, info);
5647 break;
5648 case ETHTOOL_SRXCLSRLINS:
5649 ret = mvpp2_ethtool_cls_rule_ins(port, info);
5650 break;
5651 case ETHTOOL_SRXCLSRLDEL:
5652 ret = mvpp2_ethtool_cls_rule_del(port, info);
5653 break;
5654 default:
5655 return -EOPNOTSUPP;
5656 }
5657 return ret;
5658 }
5659
mvpp2_ethtool_get_rxfh_indir_size(struct net_device * dev)5660 static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev)
5661 {
5662 struct mvpp2_port *port = netdev_priv(dev);
5663
5664 return mvpp22_rss_is_supported(port) ? MVPP22_RSS_TABLE_ENTRIES : 0;
5665 }
5666
mvpp2_ethtool_get_rxfh(struct net_device * dev,struct ethtool_rxfh_param * rxfh)5667 static int mvpp2_ethtool_get_rxfh(struct net_device *dev,
5668 struct ethtool_rxfh_param *rxfh)
5669 {
5670 struct mvpp2_port *port = netdev_priv(dev);
5671 u32 rss_context = rxfh->rss_context;
5672 int ret = 0;
5673
5674 if (!mvpp22_rss_is_supported(port))
5675 return -EOPNOTSUPP;
5676 if (rss_context >= MVPP22_N_RSS_TABLES)
5677 return -EINVAL;
5678
5679 rxfh->hfunc = ETH_RSS_HASH_CRC32;
5680
5681 if (rxfh->indir)
5682 ret = mvpp22_port_rss_ctx_indir_get(port, rss_context,
5683 rxfh->indir);
5684
5685 return ret;
5686 }
5687
mvpp2_ethtool_rxfh_okay(struct mvpp2_port * port,const struct ethtool_rxfh_param * rxfh)5688 static bool mvpp2_ethtool_rxfh_okay(struct mvpp2_port *port,
5689 const struct ethtool_rxfh_param *rxfh)
5690 {
5691 if (!mvpp22_rss_is_supported(port))
5692 return false;
5693
5694 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
5695 rxfh->hfunc != ETH_RSS_HASH_CRC32)
5696 return false;
5697
5698 if (rxfh->key)
5699 return false;
5700
5701 return true;
5702 }
5703
mvpp2_create_rxfh_context(struct net_device * dev,struct ethtool_rxfh_context * ctx,const struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)5704 static int mvpp2_create_rxfh_context(struct net_device *dev,
5705 struct ethtool_rxfh_context *ctx,
5706 const struct ethtool_rxfh_param *rxfh,
5707 struct netlink_ext_ack *extack)
5708 {
5709 struct mvpp2_port *port = netdev_priv(dev);
5710 int ret = 0;
5711
5712 if (!mvpp2_ethtool_rxfh_okay(port, rxfh))
5713 return -EOPNOTSUPP;
5714
5715 ctx->hfunc = ETH_RSS_HASH_CRC32;
5716
5717 ret = mvpp22_port_rss_ctx_create(port, rxfh->rss_context);
5718 if (ret)
5719 return ret;
5720
5721 if (!rxfh->indir)
5722 ret = mvpp22_port_rss_ctx_indir_get(port, rxfh->rss_context,
5723 ethtool_rxfh_context_indir(ctx));
5724 else
5725 ret = mvpp22_port_rss_ctx_indir_set(port, rxfh->rss_context,
5726 rxfh->indir);
5727 return ret;
5728 }
5729
mvpp2_modify_rxfh_context(struct net_device * dev,struct ethtool_rxfh_context * ctx,const struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)5730 static int mvpp2_modify_rxfh_context(struct net_device *dev,
5731 struct ethtool_rxfh_context *ctx,
5732 const struct ethtool_rxfh_param *rxfh,
5733 struct netlink_ext_ack *extack)
5734 {
5735 struct mvpp2_port *port = netdev_priv(dev);
5736 int ret = 0;
5737
5738 if (!mvpp2_ethtool_rxfh_okay(port, rxfh))
5739 return -EOPNOTSUPP;
5740
5741 if (rxfh->indir)
5742 ret = mvpp22_port_rss_ctx_indir_set(port, rxfh->rss_context,
5743 rxfh->indir);
5744 return ret;
5745 }
5746
mvpp2_remove_rxfh_context(struct net_device * dev,struct ethtool_rxfh_context * ctx,u32 rss_context,struct netlink_ext_ack * extack)5747 static int mvpp2_remove_rxfh_context(struct net_device *dev,
5748 struct ethtool_rxfh_context *ctx,
5749 u32 rss_context,
5750 struct netlink_ext_ack *extack)
5751 {
5752 struct mvpp2_port *port = netdev_priv(dev);
5753
5754 return mvpp22_port_rss_ctx_delete(port, rss_context);
5755 }
5756
mvpp2_ethtool_set_rxfh(struct net_device * dev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)5757 static int mvpp2_ethtool_set_rxfh(struct net_device *dev,
5758 struct ethtool_rxfh_param *rxfh,
5759 struct netlink_ext_ack *extack)
5760 {
5761 return mvpp2_modify_rxfh_context(dev, NULL, rxfh, extack);
5762 }
5763
mvpp2_ethtool_get_eee(struct net_device * dev,struct ethtool_keee * eee)5764 static int mvpp2_ethtool_get_eee(struct net_device *dev,
5765 struct ethtool_keee *eee)
5766 {
5767 struct mvpp2_port *port = netdev_priv(dev);
5768
5769 if (!port->phylink)
5770 return -EOPNOTSUPP;
5771
5772 return phylink_ethtool_get_eee(port->phylink, eee);
5773 }
5774
mvpp2_ethtool_set_eee(struct net_device * dev,struct ethtool_keee * eee)5775 static int mvpp2_ethtool_set_eee(struct net_device *dev,
5776 struct ethtool_keee *eee)
5777 {
5778 struct mvpp2_port *port = netdev_priv(dev);
5779
5780 if (!port->phylink)
5781 return -EOPNOTSUPP;
5782
5783 return phylink_ethtool_set_eee(port->phylink, eee);
5784 }
5785
5786 /* Device ops */
5787
5788 static const struct net_device_ops mvpp2_netdev_ops = {
5789 .ndo_open = mvpp2_open,
5790 .ndo_stop = mvpp2_stop,
5791 .ndo_start_xmit = mvpp2_tx,
5792 .ndo_set_rx_mode = mvpp2_set_rx_mode,
5793 .ndo_set_mac_address = mvpp2_set_mac_address,
5794 .ndo_change_mtu = mvpp2_change_mtu,
5795 .ndo_get_stats64 = mvpp2_get_stats64,
5796 .ndo_eth_ioctl = mvpp2_ioctl,
5797 .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid,
5798 .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid,
5799 .ndo_set_features = mvpp2_set_features,
5800 .ndo_bpf = mvpp2_xdp,
5801 .ndo_xdp_xmit = mvpp2_xdp_xmit,
5802 };
5803
5804 static const struct ethtool_ops mvpp2_eth_tool_ops = {
5805 .rxfh_max_num_contexts = MVPP22_N_RSS_TABLES,
5806 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
5807 ETHTOOL_COALESCE_MAX_FRAMES,
5808 .nway_reset = mvpp2_ethtool_nway_reset,
5809 .get_link = ethtool_op_get_link,
5810 .get_ts_info = mvpp2_ethtool_get_ts_info,
5811 .set_coalesce = mvpp2_ethtool_set_coalesce,
5812 .get_coalesce = mvpp2_ethtool_get_coalesce,
5813 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
5814 .get_ringparam = mvpp2_ethtool_get_ringparam,
5815 .set_ringparam = mvpp2_ethtool_set_ringparam,
5816 .get_strings = mvpp2_ethtool_get_strings,
5817 .get_ethtool_stats = mvpp2_ethtool_get_stats,
5818 .get_sset_count = mvpp2_ethtool_get_sset_count,
5819 .get_pauseparam = mvpp2_ethtool_get_pause_param,
5820 .set_pauseparam = mvpp2_ethtool_set_pause_param,
5821 .get_link_ksettings = mvpp2_ethtool_get_link_ksettings,
5822 .set_link_ksettings = mvpp2_ethtool_set_link_ksettings,
5823 .get_rxnfc = mvpp2_ethtool_get_rxnfc,
5824 .set_rxnfc = mvpp2_ethtool_set_rxnfc,
5825 .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
5826 .get_rxfh = mvpp2_ethtool_get_rxfh,
5827 .set_rxfh = mvpp2_ethtool_set_rxfh,
5828 .create_rxfh_context = mvpp2_create_rxfh_context,
5829 .modify_rxfh_context = mvpp2_modify_rxfh_context,
5830 .remove_rxfh_context = mvpp2_remove_rxfh_context,
5831 .get_eee = mvpp2_ethtool_get_eee,
5832 .set_eee = mvpp2_ethtool_set_eee,
5833 };
5834
5835 /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
5836 * had a single IRQ defined per-port.
5837 */
mvpp2_simple_queue_vectors_init(struct mvpp2_port * port,struct device_node * port_node)5838 static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
5839 struct device_node *port_node)
5840 {
5841 struct mvpp2_queue_vector *v = &port->qvecs[0];
5842
5843 v->first_rxq = 0;
5844 v->nrxqs = port->nrxqs;
5845 v->type = MVPP2_QUEUE_VECTOR_SHARED;
5846 v->sw_thread_id = 0;
5847 v->sw_thread_mask = *cpumask_bits(cpu_online_mask);
5848 v->port = port;
5849 v->irq = irq_of_parse_and_map(port_node, 0);
5850 if (v->irq <= 0)
5851 return -EINVAL;
5852 netif_napi_add(port->dev, &v->napi, mvpp2_poll);
5853
5854 port->nqvecs = 1;
5855
5856 return 0;
5857 }
5858
mvpp2_multi_queue_vectors_init(struct mvpp2_port * port,struct device_node * port_node)5859 static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
5860 struct device_node *port_node)
5861 {
5862 struct mvpp2 *priv = port->priv;
5863 struct mvpp2_queue_vector *v;
5864 int i, ret;
5865
5866 switch (queue_mode) {
5867 case MVPP2_QDIST_SINGLE_MODE:
5868 port->nqvecs = priv->nthreads + 1;
5869 break;
5870 case MVPP2_QDIST_MULTI_MODE:
5871 port->nqvecs = priv->nthreads;
5872 break;
5873 }
5874
5875 for (i = 0; i < port->nqvecs; i++) {
5876 char irqname[16];
5877
5878 v = port->qvecs + i;
5879
5880 v->port = port;
5881 v->type = MVPP2_QUEUE_VECTOR_PRIVATE;
5882 v->sw_thread_id = i;
5883 v->sw_thread_mask = BIT(i);
5884
5885 if (port->flags & MVPP2_F_DT_COMPAT)
5886 snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
5887 else
5888 snprintf(irqname, sizeof(irqname), "hif%d", i);
5889
5890 if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
5891 v->first_rxq = i;
5892 v->nrxqs = 1;
5893 } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
5894 i == (port->nqvecs - 1)) {
5895 v->first_rxq = 0;
5896 v->nrxqs = port->nrxqs;
5897 v->type = MVPP2_QUEUE_VECTOR_SHARED;
5898
5899 if (port->flags & MVPP2_F_DT_COMPAT)
5900 strscpy(irqname, "rx-shared", sizeof(irqname));
5901 }
5902
5903 if (port_node)
5904 v->irq = of_irq_get_byname(port_node, irqname);
5905 else
5906 v->irq = fwnode_irq_get(port->fwnode, i);
5907 if (v->irq <= 0) {
5908 ret = -EINVAL;
5909 goto err;
5910 }
5911
5912 netif_napi_add(port->dev, &v->napi, mvpp2_poll);
5913 }
5914
5915 return 0;
5916
5917 err:
5918 for (i = 0; i < port->nqvecs; i++)
5919 irq_dispose_mapping(port->qvecs[i].irq);
5920 return ret;
5921 }
5922
mvpp2_queue_vectors_init(struct mvpp2_port * port,struct device_node * port_node)5923 static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
5924 struct device_node *port_node)
5925 {
5926 if (port->has_tx_irqs)
5927 return mvpp2_multi_queue_vectors_init(port, port_node);
5928 else
5929 return mvpp2_simple_queue_vectors_init(port, port_node);
5930 }
5931
mvpp2_queue_vectors_deinit(struct mvpp2_port * port)5932 static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port)
5933 {
5934 int i;
5935
5936 for (i = 0; i < port->nqvecs; i++)
5937 irq_dispose_mapping(port->qvecs[i].irq);
5938 }
5939
5940 /* Configure Rx queue group interrupt for this port */
mvpp2_rx_irqs_setup(struct mvpp2_port * port)5941 static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
5942 {
5943 struct mvpp2 *priv = port->priv;
5944 u32 val;
5945 int i;
5946
5947 if (priv->hw_version == MVPP21) {
5948 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
5949 port->nrxqs);
5950 return;
5951 }
5952
5953 /* Handle the more complicated PPv2.2 and PPv2.3 case */
5954 for (i = 0; i < port->nqvecs; i++) {
5955 struct mvpp2_queue_vector *qv = port->qvecs + i;
5956
5957 if (!qv->nrxqs)
5958 continue;
5959
5960 val = qv->sw_thread_id;
5961 val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET;
5962 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
5963
5964 val = qv->first_rxq;
5965 val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET;
5966 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
5967 }
5968 }
5969
5970 /* Initialize port HW */
mvpp2_port_init(struct mvpp2_port * port)5971 static int mvpp2_port_init(struct mvpp2_port *port)
5972 {
5973 struct device *dev = port->dev->dev.parent;
5974 struct mvpp2 *priv = port->priv;
5975 struct mvpp2_txq_pcpu *txq_pcpu;
5976 unsigned int thread;
5977 int queue, err, val;
5978
5979 /* Checks for hardware constraints */
5980 if (port->first_rxq + port->nrxqs >
5981 MVPP2_MAX_PORTS * priv->max_port_rxqs)
5982 return -EINVAL;
5983
5984 if (port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ)
5985 return -EINVAL;
5986
5987 /* Disable port */
5988 mvpp2_egress_disable(port);
5989 mvpp2_port_disable(port);
5990
5991 if (mvpp2_is_xlg(port->phy_interface)) {
5992 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5993 val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
5994 val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
5995 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
5996 } else {
5997 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5998 val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
5999 val |= MVPP2_GMAC_FORCE_LINK_DOWN;
6000 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6001 }
6002
6003 port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
6004
6005 port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
6006 GFP_KERNEL);
6007 if (!port->txqs)
6008 return -ENOMEM;
6009
6010 /* Associate physical Tx queues to this port and initialize.
6011 * The mapping is predefined.
6012 */
6013 for (queue = 0; queue < port->ntxqs; queue++) {
6014 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
6015 struct mvpp2_tx_queue *txq;
6016
6017 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
6018 if (!txq) {
6019 err = -ENOMEM;
6020 goto err_free_percpu;
6021 }
6022
6023 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
6024 if (!txq->pcpu) {
6025 err = -ENOMEM;
6026 goto err_free_percpu;
6027 }
6028
6029 txq->id = queue_phy_id;
6030 txq->log_id = queue;
6031 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
6032 for (thread = 0; thread < priv->nthreads; thread++) {
6033 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
6034 txq_pcpu->thread = thread;
6035 }
6036
6037 port->txqs[queue] = txq;
6038 }
6039
6040 port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs),
6041 GFP_KERNEL);
6042 if (!port->rxqs) {
6043 err = -ENOMEM;
6044 goto err_free_percpu;
6045 }
6046
6047 /* Allocate and initialize Rx queue for this port */
6048 for (queue = 0; queue < port->nrxqs; queue++) {
6049 struct mvpp2_rx_queue *rxq;
6050
6051 /* Map physical Rx queue to port's logical Rx queue */
6052 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
6053 if (!rxq) {
6054 err = -ENOMEM;
6055 goto err_free_percpu;
6056 }
6057 /* Map this Rx queue to a physical queue */
6058 rxq->id = port->first_rxq + queue;
6059 rxq->port = port->id;
6060 rxq->logic_rxq = queue;
6061
6062 port->rxqs[queue] = rxq;
6063 }
6064
6065 mvpp2_rx_irqs_setup(port);
6066
6067 /* Create Rx descriptor rings */
6068 for (queue = 0; queue < port->nrxqs; queue++) {
6069 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
6070
6071 rxq->size = port->rx_ring_size;
6072 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
6073 rxq->time_coal = MVPP2_RX_COAL_USEC;
6074 }
6075
6076 mvpp2_ingress_disable(port);
6077
6078 /* Port default configuration */
6079 mvpp2_defaults_set(port);
6080
6081 /* Port's classifier configuration */
6082 mvpp2_cls_oversize_rxq_set(port);
6083 mvpp2_cls_port_config(port);
6084
6085 if (mvpp22_rss_is_supported(port))
6086 mvpp22_port_rss_init(port);
6087
6088 /* Provide an initial Rx packet size */
6089 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
6090
6091 /* Initialize pools for swf */
6092 err = mvpp2_swf_bm_pool_init(port);
6093 if (err)
6094 goto err_free_percpu;
6095
6096 /* Clear all port stats */
6097 mvpp2_read_stats(port);
6098 memset(port->ethtool_stats, 0,
6099 MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs) * sizeof(u64));
6100
6101 return 0;
6102
6103 err_free_percpu:
6104 for (queue = 0; queue < port->ntxqs; queue++) {
6105 if (!port->txqs[queue])
6106 continue;
6107 free_percpu(port->txqs[queue]->pcpu);
6108 }
6109 return err;
6110 }
6111
mvpp22_port_has_legacy_tx_irqs(struct device_node * port_node,unsigned long * flags)6112 static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node,
6113 unsigned long *flags)
6114 {
6115 char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2",
6116 "tx-cpu3" };
6117 int i;
6118
6119 for (i = 0; i < 5; i++)
6120 if (of_property_match_string(port_node, "interrupt-names",
6121 irqs[i]) < 0)
6122 return false;
6123
6124 *flags |= MVPP2_F_DT_COMPAT;
6125 return true;
6126 }
6127
6128 /* Checks if the port dt description has the required Tx interrupts:
6129 * - PPv2.1: there are no such interrupts.
6130 * - PPv2.2 and PPv2.3:
6131 * - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3]
6132 * - The new ones have: "hifX" with X in [0..8]
6133 *
6134 * All those variants are supported to keep the backward compatibility.
6135 */
mvpp2_port_has_irqs(struct mvpp2 * priv,struct device_node * port_node,unsigned long * flags)6136 static bool mvpp2_port_has_irqs(struct mvpp2 *priv,
6137 struct device_node *port_node,
6138 unsigned long *flags)
6139 {
6140 char name[5];
6141 int i;
6142
6143 /* ACPI */
6144 if (!port_node)
6145 return true;
6146
6147 if (priv->hw_version == MVPP21)
6148 return false;
6149
6150 if (mvpp22_port_has_legacy_tx_irqs(port_node, flags))
6151 return true;
6152
6153 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
6154 snprintf(name, 5, "hif%d", i);
6155 if (of_property_match_string(port_node, "interrupt-names",
6156 name) < 0)
6157 return false;
6158 }
6159
6160 return true;
6161 }
6162
mvpp2_port_copy_mac_addr(struct net_device * dev,struct mvpp2 * priv,struct fwnode_handle * fwnode,char ** mac_from)6163 static int mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
6164 struct fwnode_handle *fwnode,
6165 char **mac_from)
6166 {
6167 struct mvpp2_port *port = netdev_priv(dev);
6168 char hw_mac_addr[ETH_ALEN] = {0};
6169 char fw_mac_addr[ETH_ALEN];
6170 int ret;
6171
6172 if (!fwnode_get_mac_address(fwnode, fw_mac_addr)) {
6173 *mac_from = "firmware node";
6174 eth_hw_addr_set(dev, fw_mac_addr);
6175 return 0;
6176 }
6177
6178 if (priv->hw_version == MVPP21) {
6179 mvpp21_get_mac_address(port, hw_mac_addr);
6180 if (is_valid_ether_addr(hw_mac_addr)) {
6181 *mac_from = "hardware";
6182 eth_hw_addr_set(dev, hw_mac_addr);
6183 return 0;
6184 }
6185 }
6186
6187 /* Only valid on OF enabled platforms */
6188 ret = of_get_mac_address_nvmem(to_of_node(fwnode), fw_mac_addr);
6189 if (ret == -EPROBE_DEFER)
6190 return ret;
6191 if (!ret) {
6192 *mac_from = "nvmem cell";
6193 eth_hw_addr_set(dev, fw_mac_addr);
6194 return 0;
6195 }
6196
6197 *mac_from = "random";
6198 eth_hw_addr_random(dev);
6199
6200 return 0;
6201 }
6202
mvpp2_phylink_to_port(struct phylink_config * config)6203 static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config)
6204 {
6205 return container_of(config, struct mvpp2_port, phylink_config);
6206 }
6207
mvpp2_pcs_xlg_to_port(struct phylink_pcs * pcs)6208 static struct mvpp2_port *mvpp2_pcs_xlg_to_port(struct phylink_pcs *pcs)
6209 {
6210 return container_of(pcs, struct mvpp2_port, pcs_xlg);
6211 }
6212
mvpp2_pcs_gmac_to_port(struct phylink_pcs * pcs)6213 static struct mvpp2_port *mvpp2_pcs_gmac_to_port(struct phylink_pcs *pcs)
6214 {
6215 return container_of(pcs, struct mvpp2_port, pcs_gmac);
6216 }
6217
mvpp2_xlg_pcs_get_state(struct phylink_pcs * pcs,unsigned int neg_mode,struct phylink_link_state * state)6218 static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs,
6219 unsigned int neg_mode,
6220 struct phylink_link_state *state)
6221 {
6222 struct mvpp2_port *port = mvpp2_pcs_xlg_to_port(pcs);
6223 u32 val;
6224
6225 if (port->phy_interface == PHY_INTERFACE_MODE_5GBASER)
6226 state->speed = SPEED_5000;
6227 else
6228 state->speed = SPEED_10000;
6229 state->duplex = 1;
6230 state->an_complete = 1;
6231
6232 val = readl(port->base + MVPP22_XLG_STATUS);
6233 state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP);
6234
6235 state->pause = 0;
6236 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
6237 if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN)
6238 state->pause |= MLO_PAUSE_TX;
6239 if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN)
6240 state->pause |= MLO_PAUSE_RX;
6241 }
6242
mvpp2_xlg_pcs_config(struct phylink_pcs * pcs,unsigned int neg_mode,phy_interface_t interface,const unsigned long * advertising,bool permit_pause_to_mac)6243 static int mvpp2_xlg_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
6244 phy_interface_t interface,
6245 const unsigned long *advertising,
6246 bool permit_pause_to_mac)
6247 {
6248 return 0;
6249 }
6250
6251 static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = {
6252 .pcs_get_state = mvpp2_xlg_pcs_get_state,
6253 .pcs_config = mvpp2_xlg_pcs_config,
6254 };
6255
mvpp2_gmac_pcs_inband_caps(struct phylink_pcs * pcs,phy_interface_t interface)6256 static unsigned int mvpp2_gmac_pcs_inband_caps(struct phylink_pcs *pcs,
6257 phy_interface_t interface)
6258 {
6259 /* When operating in an 802.3z mode, we must have AN enabled:
6260 * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
6261 * When <PortType> = 1 (1000BASE-X) this field must be set to 1.
6262 * Therefore, inband is "required".
6263 */
6264 if (phy_interface_mode_is_8023z(interface))
6265 return LINK_INBAND_ENABLE;
6266
6267 /* SGMII and RGMII can be configured to use inband signalling of the
6268 * AN result. Indicate these as "possible".
6269 */
6270 if (interface == PHY_INTERFACE_MODE_SGMII ||
6271 phy_interface_mode_is_rgmii(interface))
6272 return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
6273
6274 /* For any other modes, indicate that inband is not supported. */
6275 return LINK_INBAND_DISABLE;
6276 }
6277
mvpp2_gmac_pcs_get_state(struct phylink_pcs * pcs,unsigned int neg_mode,struct phylink_link_state * state)6278 static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs,
6279 unsigned int neg_mode,
6280 struct phylink_link_state *state)
6281 {
6282 struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
6283 u32 val;
6284
6285 val = readl(port->base + MVPP2_GMAC_STATUS0);
6286
6287 state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE);
6288 state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP);
6289 state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX);
6290
6291 switch (port->phy_interface) {
6292 case PHY_INTERFACE_MODE_1000BASEX:
6293 state->speed = SPEED_1000;
6294 break;
6295 case PHY_INTERFACE_MODE_2500BASEX:
6296 state->speed = SPEED_2500;
6297 break;
6298 default:
6299 if (val & MVPP2_GMAC_STATUS0_GMII_SPEED)
6300 state->speed = SPEED_1000;
6301 else if (val & MVPP2_GMAC_STATUS0_MII_SPEED)
6302 state->speed = SPEED_100;
6303 else
6304 state->speed = SPEED_10;
6305 }
6306
6307 state->pause = 0;
6308 if (val & MVPP2_GMAC_STATUS0_RX_PAUSE)
6309 state->pause |= MLO_PAUSE_RX;
6310 if (val & MVPP2_GMAC_STATUS0_TX_PAUSE)
6311 state->pause |= MLO_PAUSE_TX;
6312 }
6313
mvpp2_gmac_pcs_config(struct phylink_pcs * pcs,unsigned int neg_mode,phy_interface_t interface,const unsigned long * advertising,bool permit_pause_to_mac)6314 static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
6315 phy_interface_t interface,
6316 const unsigned long *advertising,
6317 bool permit_pause_to_mac)
6318 {
6319 struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
6320 u32 mask, val, an, old_an, changed;
6321
6322 mask = MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
6323 MVPP2_GMAC_IN_BAND_AUTONEG |
6324 MVPP2_GMAC_AN_SPEED_EN |
6325 MVPP2_GMAC_FLOW_CTRL_AUTONEG |
6326 MVPP2_GMAC_AN_DUPLEX_EN;
6327
6328 if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) {
6329 mask |= MVPP2_GMAC_CONFIG_MII_SPEED |
6330 MVPP2_GMAC_CONFIG_GMII_SPEED |
6331 MVPP2_GMAC_CONFIG_FULL_DUPLEX;
6332 val = MVPP2_GMAC_IN_BAND_AUTONEG;
6333
6334 if (interface == PHY_INTERFACE_MODE_SGMII) {
6335 /* SGMII mode receives the speed and duplex from PHY */
6336 val |= MVPP2_GMAC_AN_SPEED_EN |
6337 MVPP2_GMAC_AN_DUPLEX_EN;
6338 } else {
6339 /* 802.3z mode has fixed speed and duplex */
6340 val |= MVPP2_GMAC_CONFIG_GMII_SPEED |
6341 MVPP2_GMAC_CONFIG_FULL_DUPLEX;
6342
6343 /* The FLOW_CTRL_AUTONEG bit selects either the hardware
6344 * automatically or the bits in MVPP22_GMAC_CTRL_4_REG
6345 * manually controls the GMAC pause modes.
6346 */
6347 if (permit_pause_to_mac)
6348 val |= MVPP2_GMAC_FLOW_CTRL_AUTONEG;
6349
6350 /* Configure advertisement bits */
6351 mask |= MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN;
6352 if (phylink_test(advertising, Pause))
6353 val |= MVPP2_GMAC_FC_ADV_EN;
6354 if (phylink_test(advertising, Asym_Pause))
6355 val |= MVPP2_GMAC_FC_ADV_ASM_EN;
6356 }
6357 } else {
6358 val = 0;
6359 }
6360
6361 old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6362 an = (an & ~mask) | val;
6363 changed = an ^ old_an;
6364 if (changed)
6365 writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6366
6367 /* We are only interested in the advertisement bits changing */
6368 return changed & (MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN);
6369 }
6370
mvpp2_gmac_pcs_an_restart(struct phylink_pcs * pcs)6371 static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs)
6372 {
6373 struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
6374 u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6375
6376 writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
6377 port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6378 writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN,
6379 port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6380 }
6381
6382 static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = {
6383 .pcs_inband_caps = mvpp2_gmac_pcs_inband_caps,
6384 .pcs_get_state = mvpp2_gmac_pcs_get_state,
6385 .pcs_config = mvpp2_gmac_pcs_config,
6386 .pcs_an_restart = mvpp2_gmac_pcs_an_restart,
6387 };
6388
mvpp2_xlg_config(struct mvpp2_port * port,unsigned int mode,const struct phylink_link_state * state)6389 static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
6390 const struct phylink_link_state *state)
6391 {
6392 u32 val;
6393
6394 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
6395 MVPP22_XLG_CTRL0_MAC_RESET_DIS,
6396 MVPP22_XLG_CTRL0_MAC_RESET_DIS);
6397 mvpp2_modify(port->base + MVPP22_XLG_CTRL4_REG,
6398 MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
6399 MVPP22_XLG_CTRL4_EN_IDLE_CHECK |
6400 MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC,
6401 MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC);
6402
6403 /* Wait for reset to deassert */
6404 do {
6405 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
6406 } while (!(val & MVPP22_XLG_CTRL0_MAC_RESET_DIS));
6407 }
6408
mvpp2_gmac_config(struct mvpp2_port * port,unsigned int mode,const struct phylink_link_state * state)6409 static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
6410 const struct phylink_link_state *state)
6411 {
6412 u32 old_ctrl0, ctrl0;
6413 u32 old_ctrl2, ctrl2;
6414 u32 old_ctrl4, ctrl4;
6415
6416 old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
6417 old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
6418 old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
6419
6420 ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
6421 ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_FLOW_CTRL_MASK);
6422
6423 /* Configure port type */
6424 if (phy_interface_mode_is_8023z(state->interface)) {
6425 ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
6426 ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
6427 ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
6428 MVPP22_CTRL4_DP_CLK_SEL |
6429 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
6430 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
6431 ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_INBAND_AN_MASK;
6432 ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
6433 ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
6434 MVPP22_CTRL4_DP_CLK_SEL |
6435 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
6436 } else if (phy_interface_mode_is_rgmii(state->interface)) {
6437 ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL;
6438 ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
6439 MVPP22_CTRL4_SYNC_BYPASS_DIS |
6440 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
6441 }
6442
6443 /* Configure negotiation style */
6444 if (!phylink_autoneg_inband(mode)) {
6445 /* Phy or fixed speed - no in-band AN, nothing to do, leave the
6446 * configured speed, duplex and flow control as-is.
6447 */
6448 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
6449 /* SGMII in-band mode receives the speed and duplex from
6450 * the PHY. Flow control information is not received. */
6451 } else if (phy_interface_mode_is_8023z(state->interface)) {
6452 /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can
6453 * they negotiate duplex: they are always operating with a fixed
6454 * speed of 1000/2500Mbps in full duplex, so force 1000/2500
6455 * speed and full duplex here.
6456 */
6457 ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
6458 }
6459
6460 if (old_ctrl0 != ctrl0)
6461 writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG);
6462 if (old_ctrl2 != ctrl2)
6463 writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
6464 if (old_ctrl4 != ctrl4)
6465 writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
6466 }
6467
mvpp2_select_pcs(struct phylink_config * config,phy_interface_t interface)6468 static struct phylink_pcs *mvpp2_select_pcs(struct phylink_config *config,
6469 phy_interface_t interface)
6470 {
6471 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6472
6473 /* Select the appropriate PCS operations depending on the
6474 * configured interface mode. We will only switch to a mode
6475 * that the validate() checks have already passed.
6476 */
6477 if (mvpp2_is_xlg(interface))
6478 return &port->pcs_xlg;
6479 else
6480 return &port->pcs_gmac;
6481 }
6482
mvpp2_mac_prepare(struct phylink_config * config,unsigned int mode,phy_interface_t interface)6483 static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode,
6484 phy_interface_t interface)
6485 {
6486 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6487
6488 /* Check for invalid configuration */
6489 if (mvpp2_is_xlg(interface) && port->gop_id != 0) {
6490 netdev_err(port->dev, "Invalid mode on %s\n", port->dev->name);
6491 return -EINVAL;
6492 }
6493
6494 if (port->phy_interface != interface ||
6495 phylink_autoneg_inband(mode)) {
6496 /* Force the link down when changing the interface or if in
6497 * in-band mode to ensure we do not change the configuration
6498 * while the hardware is indicating link is up. We force both
6499 * XLG and GMAC down to ensure that they're both in a known
6500 * state.
6501 */
6502 mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
6503 MVPP2_GMAC_FORCE_LINK_PASS |
6504 MVPP2_GMAC_FORCE_LINK_DOWN,
6505 MVPP2_GMAC_FORCE_LINK_DOWN);
6506
6507 if (mvpp2_port_supports_xlg(port))
6508 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
6509 MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
6510 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN,
6511 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN);
6512 }
6513
6514 /* Make sure the port is disabled when reconfiguring the mode */
6515 mvpp2_port_disable(port);
6516
6517 if (port->phy_interface != interface) {
6518 /* Place GMAC into reset */
6519 mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG,
6520 MVPP2_GMAC_PORT_RESET_MASK,
6521 MVPP2_GMAC_PORT_RESET_MASK);
6522
6523 if (port->priv->hw_version >= MVPP22) {
6524 mvpp22_gop_mask_irq(port);
6525
6526 phy_power_off(port->comphy);
6527
6528 /* Reconfigure the serdes lanes */
6529 mvpp22_mode_reconfigure(port, interface);
6530 }
6531 }
6532
6533 return 0;
6534 }
6535
mvpp2_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)6536 static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
6537 const struct phylink_link_state *state)
6538 {
6539 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6540
6541 /* mac (re)configuration */
6542 if (mvpp2_is_xlg(state->interface))
6543 mvpp2_xlg_config(port, mode, state);
6544 else if (phy_interface_mode_is_rgmii(state->interface) ||
6545 phy_interface_mode_is_8023z(state->interface) ||
6546 state->interface == PHY_INTERFACE_MODE_SGMII)
6547 mvpp2_gmac_config(port, mode, state);
6548
6549 if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
6550 mvpp2_port_loopback_set(port, state);
6551 }
6552
mvpp2_mac_finish(struct phylink_config * config,unsigned int mode,phy_interface_t interface)6553 static int mvpp2_mac_finish(struct phylink_config *config, unsigned int mode,
6554 phy_interface_t interface)
6555 {
6556 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6557
6558 if (port->priv->hw_version >= MVPP22 &&
6559 port->phy_interface != interface) {
6560 port->phy_interface = interface;
6561
6562 /* Unmask interrupts */
6563 mvpp22_gop_unmask_irq(port);
6564 }
6565
6566 if (!mvpp2_is_xlg(interface)) {
6567 /* Release GMAC reset and wait */
6568 mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG,
6569 MVPP2_GMAC_PORT_RESET_MASK, 0);
6570
6571 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
6572 MVPP2_GMAC_PORT_RESET_MASK)
6573 continue;
6574 }
6575
6576 mvpp2_port_enable(port);
6577
6578 /* Allow the link to come up if in in-band mode, otherwise the
6579 * link is forced via mac_link_down()/mac_link_up()
6580 */
6581 if (phylink_autoneg_inband(mode)) {
6582 if (mvpp2_is_xlg(interface))
6583 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
6584 MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
6585 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, 0);
6586 else
6587 mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
6588 MVPP2_GMAC_FORCE_LINK_PASS |
6589 MVPP2_GMAC_FORCE_LINK_DOWN, 0);
6590 }
6591
6592 return 0;
6593 }
6594
mvpp2_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)6595 static void mvpp2_mac_link_up(struct phylink_config *config,
6596 struct phy_device *phy,
6597 unsigned int mode, phy_interface_t interface,
6598 int speed, int duplex,
6599 bool tx_pause, bool rx_pause)
6600 {
6601 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6602 u32 val;
6603 int i;
6604
6605 if (mvpp2_is_xlg(interface)) {
6606 if (!phylink_autoneg_inband(mode)) {
6607 val = MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
6608 if (tx_pause)
6609 val |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN;
6610 if (rx_pause)
6611 val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
6612
6613 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
6614 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN |
6615 MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
6616 MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN |
6617 MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN, val);
6618 }
6619 } else {
6620 if (!phylink_autoneg_inband(mode)) {
6621 val = MVPP2_GMAC_FORCE_LINK_PASS;
6622
6623 if (speed == SPEED_1000 || speed == SPEED_2500)
6624 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
6625 else if (speed == SPEED_100)
6626 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
6627
6628 if (duplex == DUPLEX_FULL)
6629 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
6630
6631 mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
6632 MVPP2_GMAC_FORCE_LINK_DOWN |
6633 MVPP2_GMAC_FORCE_LINK_PASS |
6634 MVPP2_GMAC_CONFIG_MII_SPEED |
6635 MVPP2_GMAC_CONFIG_GMII_SPEED |
6636 MVPP2_GMAC_CONFIG_FULL_DUPLEX, val);
6637 }
6638
6639 /* We can always update the flow control enable bits;
6640 * these will only be effective if flow control AN
6641 * (MVPP2_GMAC_FLOW_CTRL_AUTONEG) is disabled.
6642 */
6643 val = 0;
6644 if (tx_pause)
6645 val |= MVPP22_CTRL4_TX_FC_EN;
6646 if (rx_pause)
6647 val |= MVPP22_CTRL4_RX_FC_EN;
6648
6649 mvpp2_modify(port->base + MVPP22_GMAC_CTRL_4_REG,
6650 MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN,
6651 val);
6652 }
6653
6654 if (port->priv->global_tx_fc) {
6655 port->tx_fc = tx_pause;
6656 if (tx_pause)
6657 mvpp2_rxq_enable_fc(port);
6658 else
6659 mvpp2_rxq_disable_fc(port);
6660 if (port->priv->percpu_pools) {
6661 for (i = 0; i < port->nrxqs; i++)
6662 mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i], tx_pause);
6663 } else {
6664 mvpp2_bm_pool_update_fc(port, port->pool_long, tx_pause);
6665 mvpp2_bm_pool_update_fc(port, port->pool_short, tx_pause);
6666 }
6667 if (port->priv->hw_version == MVPP23)
6668 mvpp23_rx_fifo_fc_en(port->priv, port->id, tx_pause);
6669 }
6670
6671 mvpp2_port_enable(port);
6672
6673 mvpp2_egress_enable(port);
6674 mvpp2_ingress_enable(port);
6675 netif_tx_wake_all_queues(port->dev);
6676 }
6677
mvpp2_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)6678 static void mvpp2_mac_link_down(struct phylink_config *config,
6679 unsigned int mode, phy_interface_t interface)
6680 {
6681 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6682 u32 val;
6683
6684 if (!phylink_autoneg_inband(mode)) {
6685 if (mvpp2_is_xlg(interface)) {
6686 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
6687 val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
6688 val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
6689 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
6690 } else {
6691 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6692 val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
6693 val |= MVPP2_GMAC_FORCE_LINK_DOWN;
6694 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6695 }
6696 }
6697
6698 netif_tx_stop_all_queues(port->dev);
6699 mvpp2_egress_disable(port);
6700 mvpp2_ingress_disable(port);
6701
6702 mvpp2_port_disable(port);
6703 }
6704
mvpp2_mac_disable_tx_lpi(struct phylink_config * config)6705 static void mvpp2_mac_disable_tx_lpi(struct phylink_config *config)
6706 {
6707 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6708
6709 mvpp2_modify(port->base + MVPP2_GMAC_LPI_CTRL1,
6710 MVPP2_GMAC_LPI_CTRL1_REQ_EN, 0);
6711 }
6712
mvpp2_mac_enable_tx_lpi(struct phylink_config * config,u32 timer,bool tx_clk_stop)6713 static int mvpp2_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
6714 bool tx_clk_stop)
6715 {
6716 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6717 u32 ts, tw, lpi1, status;
6718
6719 status = readl(port->base + MVPP2_GMAC_STATUS0);
6720 if (status & MVPP2_GMAC_STATUS0_GMII_SPEED) {
6721 /* At 1G speeds, the timer resolution are 1us, and
6722 * 802.3 says tw is 16.5us. Round up to 17us.
6723 */
6724 tw = 17;
6725 ts = timer;
6726 } else {
6727 /* At 100M speeds, the timer resolutions are 10us, and
6728 * 802.3 says tw is 30us.
6729 */
6730 tw = 3;
6731 ts = DIV_ROUND_UP(timer, 10);
6732 }
6733
6734 if (ts > 255)
6735 ts = 255;
6736
6737 /* Configure ts */
6738 mvpp2_modify(port->base + MVPP2_GMAC_LPI_CTRL0,
6739 MVPP2_GMAC_LPI_CTRL0_TS_MASK,
6740 FIELD_PREP(MVPP2_GMAC_LPI_CTRL0_TS_MASK, ts));
6741
6742 lpi1 = readl(port->base + MVPP2_GMAC_LPI_CTRL1);
6743
6744 /* Configure tw */
6745 lpi1 = u32_replace_bits(lpi1, tw, MVPP2_GMAC_LPI_CTRL1_TW_MASK);
6746
6747 /* Enable LPI generation */
6748 writel(lpi1 | MVPP2_GMAC_LPI_CTRL1_REQ_EN,
6749 port->base + MVPP2_GMAC_LPI_CTRL1);
6750
6751 return 0;
6752 }
6753
6754 static const struct phylink_mac_ops mvpp2_phylink_ops = {
6755 .mac_select_pcs = mvpp2_select_pcs,
6756 .mac_prepare = mvpp2_mac_prepare,
6757 .mac_config = mvpp2_mac_config,
6758 .mac_finish = mvpp2_mac_finish,
6759 .mac_link_up = mvpp2_mac_link_up,
6760 .mac_link_down = mvpp2_mac_link_down,
6761 .mac_enable_tx_lpi = mvpp2_mac_enable_tx_lpi,
6762 .mac_disable_tx_lpi = mvpp2_mac_disable_tx_lpi,
6763 };
6764
6765 /* Work-around for ACPI */
mvpp2_acpi_start(struct mvpp2_port * port)6766 static void mvpp2_acpi_start(struct mvpp2_port *port)
6767 {
6768 /* Phylink isn't used as of now for ACPI, so the MAC has to be
6769 * configured manually when the interface is started. This will
6770 * be removed as soon as the phylink ACPI support lands in.
6771 */
6772 struct phylink_link_state state = {
6773 .interface = port->phy_interface,
6774 };
6775 struct phylink_pcs *pcs;
6776
6777 pcs = mvpp2_select_pcs(&port->phylink_config, port->phy_interface);
6778
6779 mvpp2_mac_prepare(&port->phylink_config, MLO_AN_INBAND,
6780 port->phy_interface);
6781 mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state);
6782 pcs->ops->pcs_config(pcs, PHYLINK_PCS_NEG_INBAND_ENABLED,
6783 port->phy_interface, state.advertising,
6784 false);
6785 mvpp2_mac_finish(&port->phylink_config, MLO_AN_INBAND,
6786 port->phy_interface);
6787 mvpp2_mac_link_up(&port->phylink_config, NULL,
6788 MLO_AN_INBAND, port->phy_interface,
6789 SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false);
6790 }
6791
6792 /* In order to ensure backward compatibility for ACPI, check if the port
6793 * firmware node comprises the necessary description allowing to use phylink.
6794 */
mvpp2_use_acpi_compat_mode(struct fwnode_handle * port_fwnode)6795 static bool mvpp2_use_acpi_compat_mode(struct fwnode_handle *port_fwnode)
6796 {
6797 if (!is_acpi_node(port_fwnode))
6798 return false;
6799
6800 return (!fwnode_property_present(port_fwnode, "phy-handle") &&
6801 !fwnode_property_present(port_fwnode, "managed") &&
6802 !fwnode_get_named_child_node(port_fwnode, "fixed-link"));
6803 }
6804
6805 /* Ports initialization */
mvpp2_port_probe(struct platform_device * pdev,struct fwnode_handle * port_fwnode,struct mvpp2 * priv)6806 static int mvpp2_port_probe(struct platform_device *pdev,
6807 struct fwnode_handle *port_fwnode,
6808 struct mvpp2 *priv)
6809 {
6810 struct phy *comphy = NULL;
6811 struct mvpp2_port *port;
6812 struct mvpp2_port_pcpu *port_pcpu;
6813 struct device_node *port_node = to_of_node(port_fwnode);
6814 netdev_features_t features;
6815 struct net_device *dev;
6816 struct phylink *phylink;
6817 char *mac_from = "";
6818 unsigned int ntxqs, nrxqs, thread;
6819 unsigned long flags = 0;
6820 bool has_tx_irqs;
6821 u32 id;
6822 int phy_mode;
6823 int err, i;
6824
6825 has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags);
6826 if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) {
6827 dev_err(&pdev->dev,
6828 "not enough IRQs to support multi queue mode\n");
6829 return -EINVAL;
6830 }
6831
6832 ntxqs = MVPP2_MAX_TXQ;
6833 nrxqs = mvpp2_get_nrxqs(priv);
6834
6835 dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
6836 if (!dev)
6837 return -ENOMEM;
6838
6839 phy_mode = fwnode_get_phy_mode(port_fwnode);
6840 if (phy_mode < 0) {
6841 dev_err(&pdev->dev, "incorrect phy mode\n");
6842 err = phy_mode;
6843 goto err_free_netdev;
6844 }
6845
6846 /*
6847 * Rewrite 10GBASE-KR to 10GBASE-R for compatibility with existing DT.
6848 * Existing usage of 10GBASE-KR is not correct; no backplane
6849 * negotiation is done, and this driver does not actually support
6850 * 10GBASE-KR.
6851 */
6852 if (phy_mode == PHY_INTERFACE_MODE_10GKR)
6853 phy_mode = PHY_INTERFACE_MODE_10GBASER;
6854
6855 if (port_node) {
6856 comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
6857 if (IS_ERR(comphy)) {
6858 if (PTR_ERR(comphy) == -EPROBE_DEFER) {
6859 err = -EPROBE_DEFER;
6860 goto err_free_netdev;
6861 }
6862 comphy = NULL;
6863 }
6864 }
6865
6866 if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) {
6867 err = -EINVAL;
6868 dev_err(&pdev->dev, "missing port-id value\n");
6869 goto err_free_netdev;
6870 }
6871
6872 dev->tx_queue_len = MVPP2_MAX_TXD_MAX;
6873 dev->watchdog_timeo = 5 * HZ;
6874 dev->netdev_ops = &mvpp2_netdev_ops;
6875 dev->ethtool_ops = &mvpp2_eth_tool_ops;
6876
6877 port = netdev_priv(dev);
6878 port->dev = dev;
6879 port->fwnode = port_fwnode;
6880 port->ntxqs = ntxqs;
6881 port->nrxqs = nrxqs;
6882 port->priv = priv;
6883 port->has_tx_irqs = has_tx_irqs;
6884 port->flags = flags;
6885
6886 err = mvpp2_queue_vectors_init(port, port_node);
6887 if (err)
6888 goto err_free_netdev;
6889
6890 if (port_node)
6891 port->port_irq = of_irq_get_byname(port_node, "link");
6892 else
6893 port->port_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
6894 if (port->port_irq == -EPROBE_DEFER) {
6895 err = -EPROBE_DEFER;
6896 goto err_deinit_qvecs;
6897 }
6898 if (port->port_irq <= 0)
6899 /* the link irq is optional */
6900 port->port_irq = 0;
6901
6902 if (fwnode_property_read_bool(port_fwnode, "marvell,loopback"))
6903 port->flags |= MVPP2_F_LOOPBACK;
6904
6905 port->id = id;
6906 if (priv->hw_version == MVPP21)
6907 port->first_rxq = port->id * port->nrxqs;
6908 else
6909 port->first_rxq = port->id * priv->max_port_rxqs;
6910
6911 port->of_node = port_node;
6912 port->phy_interface = phy_mode;
6913 port->comphy = comphy;
6914
6915 if (priv->hw_version == MVPP21) {
6916 port->base = devm_platform_ioremap_resource(pdev, 2 + id);
6917 if (IS_ERR(port->base)) {
6918 err = PTR_ERR(port->base);
6919 goto err_free_irq;
6920 }
6921
6922 port->stats_base = port->priv->lms_base +
6923 MVPP21_MIB_COUNTERS_OFFSET +
6924 port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ;
6925 } else {
6926 if (fwnode_property_read_u32(port_fwnode, "gop-port-id",
6927 &port->gop_id)) {
6928 err = -EINVAL;
6929 dev_err(&pdev->dev, "missing gop-port-id value\n");
6930 goto err_deinit_qvecs;
6931 }
6932
6933 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
6934 port->stats_base = port->priv->iface_base +
6935 MVPP22_MIB_COUNTERS_OFFSET +
6936 port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ;
6937
6938 /* We may want a property to describe whether we should use
6939 * MAC hardware timestamping.
6940 */
6941 if (priv->tai)
6942 port->hwtstamp = true;
6943 }
6944
6945 /* Alloc per-cpu and ethtool stats */
6946 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
6947 if (!port->stats) {
6948 err = -ENOMEM;
6949 goto err_free_irq;
6950 }
6951
6952 port->ethtool_stats = devm_kcalloc(&pdev->dev,
6953 MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs),
6954 sizeof(u64), GFP_KERNEL);
6955 if (!port->ethtool_stats) {
6956 err = -ENOMEM;
6957 goto err_free_stats;
6958 }
6959
6960 mutex_init(&port->gather_stats_lock);
6961 INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
6962
6963 err = mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
6964 if (err < 0)
6965 goto err_free_stats;
6966
6967 port->tx_ring_size = MVPP2_MAX_TXD_DFLT;
6968 port->rx_ring_size = MVPP2_MAX_RXD_DFLT;
6969 SET_NETDEV_DEV(dev, &pdev->dev);
6970
6971 err = mvpp2_port_init(port);
6972 if (err < 0) {
6973 dev_err(&pdev->dev, "failed to init port %d\n", id);
6974 goto err_free_stats;
6975 }
6976
6977 mvpp2_port_periodic_xon_disable(port);
6978
6979 mvpp2_mac_reset_assert(port);
6980 mvpp22_pcs_reset_assert(port);
6981
6982 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
6983 if (!port->pcpu) {
6984 err = -ENOMEM;
6985 goto err_free_txq_pcpu;
6986 }
6987
6988 if (!port->has_tx_irqs) {
6989 for (thread = 0; thread < priv->nthreads; thread++) {
6990 port_pcpu = per_cpu_ptr(port->pcpu, thread);
6991
6992 hrtimer_setup(&port_pcpu->tx_done_timer, mvpp2_hr_timer_cb, CLOCK_MONOTONIC,
6993 HRTIMER_MODE_REL_PINNED_SOFT);
6994 port_pcpu->timer_scheduled = false;
6995 port_pcpu->dev = dev;
6996 }
6997 }
6998
6999 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7000 NETIF_F_TSO;
7001 dev->features = features | NETIF_F_RXCSUM;
7002 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
7003 NETIF_F_HW_VLAN_CTAG_FILTER;
7004
7005 if (mvpp22_rss_is_supported(port)) {
7006 dev->hw_features |= NETIF_F_RXHASH;
7007 dev->features |= NETIF_F_NTUPLE;
7008 }
7009
7010 if (!port->priv->percpu_pools)
7011 mvpp2_set_hw_csum(port, port->pool_long->id);
7012 else if (port->ntxqs >= num_possible_cpus() * 2)
7013 dev->xdp_features = NETDEV_XDP_ACT_BASIC |
7014 NETDEV_XDP_ACT_REDIRECT |
7015 NETDEV_XDP_ACT_NDO_XMIT;
7016
7017 dev->vlan_features |= features;
7018 netif_set_tso_max_segs(dev, MVPP2_MAX_TSO_SEGS);
7019
7020 dev->priv_flags |= IFF_UNICAST_FLT;
7021
7022 /* MTU range: 68 - 9704 */
7023 dev->min_mtu = ETH_MIN_MTU;
7024 /* 9704 == 9728 - 20 and rounding to 8 */
7025 dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
7026 device_set_node(&dev->dev, port_fwnode);
7027 dev->dev_port = port->id;
7028
7029 port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
7030 port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops;
7031
7032 if (!mvpp2_use_acpi_compat_mode(port_fwnode)) {
7033 port->phylink_config.dev = &dev->dev;
7034 port->phylink_config.type = PHYLINK_NETDEV;
7035 port->phylink_config.mac_capabilities =
7036 MAC_2500FD | MAC_1000FD | MAC_100 | MAC_10;
7037
7038 __set_bit(PHY_INTERFACE_MODE_SGMII,
7039 port->phylink_config.lpi_interfaces);
7040
7041 port->phylink_config.lpi_capabilities = MAC_1000FD | MAC_100FD;
7042
7043 /* Setup EEE. Choose 250us idle. */
7044 port->phylink_config.lpi_timer_default = 250;
7045 port->phylink_config.eee_enabled_default = true;
7046
7047 if (port->priv->global_tx_fc)
7048 port->phylink_config.mac_capabilities |=
7049 MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
7050
7051 if (mvpp2_port_supports_xlg(port)) {
7052 /* If a COMPHY is present, we can support any of
7053 * the serdes modes and switch between them.
7054 */
7055 if (comphy) {
7056 __set_bit(PHY_INTERFACE_MODE_5GBASER,
7057 port->phylink_config.supported_interfaces);
7058 __set_bit(PHY_INTERFACE_MODE_10GBASER,
7059 port->phylink_config.supported_interfaces);
7060 __set_bit(PHY_INTERFACE_MODE_XAUI,
7061 port->phylink_config.supported_interfaces);
7062 } else if (phy_mode == PHY_INTERFACE_MODE_5GBASER) {
7063 __set_bit(PHY_INTERFACE_MODE_5GBASER,
7064 port->phylink_config.supported_interfaces);
7065 } else if (phy_mode == PHY_INTERFACE_MODE_10GBASER) {
7066 __set_bit(PHY_INTERFACE_MODE_10GBASER,
7067 port->phylink_config.supported_interfaces);
7068 } else if (phy_mode == PHY_INTERFACE_MODE_XAUI) {
7069 __set_bit(PHY_INTERFACE_MODE_XAUI,
7070 port->phylink_config.supported_interfaces);
7071 }
7072
7073 if (comphy)
7074 port->phylink_config.mac_capabilities |=
7075 MAC_10000FD | MAC_5000FD;
7076 else if (phy_mode == PHY_INTERFACE_MODE_5GBASER)
7077 port->phylink_config.mac_capabilities |=
7078 MAC_5000FD;
7079 else
7080 port->phylink_config.mac_capabilities |=
7081 MAC_10000FD;
7082 }
7083
7084 if (mvpp2_port_supports_rgmii(port)) {
7085 phy_interface_set_rgmii(port->phylink_config.supported_interfaces);
7086 __set_bit(PHY_INTERFACE_MODE_MII,
7087 port->phylink_config.supported_interfaces);
7088 }
7089
7090 if (comphy) {
7091 /* If a COMPHY is present, we can support any of the
7092 * serdes modes and switch between them.
7093 */
7094 __set_bit(PHY_INTERFACE_MODE_SGMII,
7095 port->phylink_config.supported_interfaces);
7096 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
7097 port->phylink_config.supported_interfaces);
7098 __set_bit(PHY_INTERFACE_MODE_2500BASEX,
7099 port->phylink_config.supported_interfaces);
7100 } else if (phy_mode == PHY_INTERFACE_MODE_2500BASEX) {
7101 /* No COMPHY, with only 2500BASE-X mode supported */
7102 __set_bit(PHY_INTERFACE_MODE_2500BASEX,
7103 port->phylink_config.supported_interfaces);
7104 } else if (phy_mode == PHY_INTERFACE_MODE_1000BASEX ||
7105 phy_mode == PHY_INTERFACE_MODE_SGMII) {
7106 /* No COMPHY, we can switch between 1000BASE-X and SGMII
7107 */
7108 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
7109 port->phylink_config.supported_interfaces);
7110 __set_bit(PHY_INTERFACE_MODE_SGMII,
7111 port->phylink_config.supported_interfaces);
7112 }
7113
7114 phylink = phylink_create(&port->phylink_config, port_fwnode,
7115 phy_mode, &mvpp2_phylink_ops);
7116 if (IS_ERR(phylink)) {
7117 err = PTR_ERR(phylink);
7118 goto err_free_port_pcpu;
7119 }
7120 port->phylink = phylink;
7121
7122 mvpp2_mac_disable_tx_lpi(&port->phylink_config);
7123 } else {
7124 dev_warn(&pdev->dev, "Use link irqs for port#%d. FW update required\n", port->id);
7125 port->phylink = NULL;
7126 }
7127
7128 /* Cycle the comphy to power it down, saving 270mW per port -
7129 * don't worry about an error powering it up. When the comphy
7130 * driver does this, we can remove this code.
7131 */
7132 if (port->comphy) {
7133 err = mvpp22_comphy_init(port, port->phy_interface);
7134 if (err == 0)
7135 phy_power_off(port->comphy);
7136 }
7137
7138 err = register_netdev(dev);
7139 if (err < 0) {
7140 dev_err(&pdev->dev, "failed to register netdev\n");
7141 goto err_phylink;
7142 }
7143 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
7144
7145 priv->port_list[priv->port_count++] = port;
7146
7147 return 0;
7148
7149 err_phylink:
7150 if (port->phylink)
7151 phylink_destroy(port->phylink);
7152 err_free_port_pcpu:
7153 free_percpu(port->pcpu);
7154 err_free_txq_pcpu:
7155 for (i = 0; i < port->ntxqs; i++)
7156 free_percpu(port->txqs[i]->pcpu);
7157 err_free_stats:
7158 free_percpu(port->stats);
7159 err_free_irq:
7160 if (port->port_irq)
7161 irq_dispose_mapping(port->port_irq);
7162 err_deinit_qvecs:
7163 mvpp2_queue_vectors_deinit(port);
7164 err_free_netdev:
7165 free_netdev(dev);
7166 return err;
7167 }
7168
7169 /* Ports removal routine */
mvpp2_port_remove(struct mvpp2_port * port)7170 static void mvpp2_port_remove(struct mvpp2_port *port)
7171 {
7172 int i;
7173
7174 unregister_netdev(port->dev);
7175 if (port->phylink)
7176 phylink_destroy(port->phylink);
7177 free_percpu(port->pcpu);
7178 free_percpu(port->stats);
7179 for (i = 0; i < port->ntxqs; i++)
7180 free_percpu(port->txqs[i]->pcpu);
7181 mvpp2_queue_vectors_deinit(port);
7182 if (port->port_irq)
7183 irq_dispose_mapping(port->port_irq);
7184 free_netdev(port->dev);
7185 }
7186
7187 /* Initialize decoding windows */
mvpp2_conf_mbus_windows(const struct mbus_dram_target_info * dram,struct mvpp2 * priv)7188 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
7189 struct mvpp2 *priv)
7190 {
7191 u32 win_enable;
7192 int i;
7193
7194 for (i = 0; i < 6; i++) {
7195 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
7196 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
7197
7198 if (i < 4)
7199 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
7200 }
7201
7202 win_enable = 0;
7203
7204 for (i = 0; i < dram->num_cs; i++) {
7205 const struct mbus_dram_window *cs = dram->cs + i;
7206
7207 mvpp2_write(priv, MVPP2_WIN_BASE(i),
7208 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
7209 dram->mbus_dram_target_id);
7210
7211 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
7212 (cs->size - 1) & 0xffff0000);
7213
7214 win_enable |= (1 << i);
7215 }
7216
7217 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
7218 }
7219
7220 /* Initialize Rx FIFO's */
mvpp2_rx_fifo_init(struct mvpp2 * priv)7221 static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
7222 {
7223 int port;
7224
7225 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
7226 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
7227 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
7228 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
7229 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
7230 }
7231
7232 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
7233 MVPP2_RX_FIFO_PORT_MIN_PKT);
7234 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
7235 }
7236
mvpp22_rx_fifo_set_hw(struct mvpp2 * priv,int port,int data_size)7237 static void mvpp22_rx_fifo_set_hw(struct mvpp2 *priv, int port, int data_size)
7238 {
7239 int attr_size = MVPP2_RX_FIFO_PORT_ATTR_SIZE(data_size);
7240
7241 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), data_size);
7242 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), attr_size);
7243 }
7244
7245 /* Initialize TX FIFO's: the total FIFO size is 48kB on PPv2.2 and PPv2.3.
7246 * 4kB fixed space must be assigned for the loopback port.
7247 * Redistribute remaining avialable 44kB space among all active ports.
7248 * Guarantee minimum 32kB for 10G port and 8kB for port 1, capable of 2.5G
7249 * SGMII link.
7250 */
mvpp22_rx_fifo_init(struct mvpp2 * priv)7251 static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
7252 {
7253 int remaining_ports_count;
7254 unsigned long port_map;
7255 int size_remainder;
7256 int port, size;
7257
7258 /* The loopback requires fixed 4kB of the FIFO space assignment. */
7259 mvpp22_rx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX,
7260 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
7261 port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX);
7262
7263 /* Set RX FIFO size to 0 for inactive ports. */
7264 for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX)
7265 mvpp22_rx_fifo_set_hw(priv, port, 0);
7266
7267 /* Assign remaining RX FIFO space among all active ports. */
7268 size_remainder = MVPP2_RX_FIFO_PORT_DATA_SIZE_44KB;
7269 remaining_ports_count = hweight_long(port_map);
7270
7271 for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) {
7272 if (remaining_ports_count == 1)
7273 size = size_remainder;
7274 else if (port == 0)
7275 size = max(size_remainder / remaining_ports_count,
7276 MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
7277 else if (port == 1)
7278 size = max(size_remainder / remaining_ports_count,
7279 MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
7280 else
7281 size = size_remainder / remaining_ports_count;
7282
7283 size_remainder -= size;
7284 remaining_ports_count--;
7285
7286 mvpp22_rx_fifo_set_hw(priv, port, size);
7287 }
7288
7289 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
7290 MVPP2_RX_FIFO_PORT_MIN_PKT);
7291 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
7292 }
7293
7294 /* Configure Rx FIFO Flow control thresholds */
mvpp23_rx_fifo_fc_set_tresh(struct mvpp2 * priv)7295 static void mvpp23_rx_fifo_fc_set_tresh(struct mvpp2 *priv)
7296 {
7297 int port, val;
7298
7299 /* Port 0: maximum speed -10Gb/s port
7300 * required by spec RX FIFO threshold 9KB
7301 * Port 1: maximum speed -5Gb/s port
7302 * required by spec RX FIFO threshold 4KB
7303 * Port 2: maximum speed -1Gb/s port
7304 * required by spec RX FIFO threshold 2KB
7305 */
7306
7307 /* Without loopback port */
7308 for (port = 0; port < (MVPP2_MAX_PORTS - 1); port++) {
7309 if (port == 0) {
7310 val = (MVPP23_PORT0_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
7311 << MVPP2_RX_FC_TRSH_OFFS;
7312 val &= MVPP2_RX_FC_TRSH_MASK;
7313 mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
7314 } else if (port == 1) {
7315 val = (MVPP23_PORT1_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
7316 << MVPP2_RX_FC_TRSH_OFFS;
7317 val &= MVPP2_RX_FC_TRSH_MASK;
7318 mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
7319 } else {
7320 val = (MVPP23_PORT2_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
7321 << MVPP2_RX_FC_TRSH_OFFS;
7322 val &= MVPP2_RX_FC_TRSH_MASK;
7323 mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
7324 }
7325 }
7326 }
7327
7328 /* Configure Rx FIFO Flow control thresholds */
mvpp23_rx_fifo_fc_en(struct mvpp2 * priv,int port,bool en)7329 void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en)
7330 {
7331 int val;
7332
7333 val = mvpp2_read(priv, MVPP2_RX_FC_REG(port));
7334
7335 if (en)
7336 val |= MVPP2_RX_FC_EN;
7337 else
7338 val &= ~MVPP2_RX_FC_EN;
7339
7340 mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
7341 }
7342
mvpp22_tx_fifo_set_hw(struct mvpp2 * priv,int port,int size)7343 static void mvpp22_tx_fifo_set_hw(struct mvpp2 *priv, int port, int size)
7344 {
7345 int threshold = MVPP2_TX_FIFO_THRESHOLD(size);
7346
7347 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size);
7348 mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), threshold);
7349 }
7350
7351 /* Initialize TX FIFO's: the total FIFO size is 19kB on PPv2.2 and PPv2.3.
7352 * 1kB fixed space must be assigned for the loopback port.
7353 * Redistribute remaining avialable 18kB space among all active ports.
7354 * The 10G interface should use 10kB (which is maximum possible size
7355 * per single port).
7356 */
mvpp22_tx_fifo_init(struct mvpp2 * priv)7357 static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
7358 {
7359 int remaining_ports_count;
7360 unsigned long port_map;
7361 int size_remainder;
7362 int port, size;
7363
7364 /* The loopback requires fixed 1kB of the FIFO space assignment. */
7365 mvpp22_tx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX,
7366 MVPP22_TX_FIFO_DATA_SIZE_1KB);
7367 port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX);
7368
7369 /* Set TX FIFO size to 0 for inactive ports. */
7370 for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX)
7371 mvpp22_tx_fifo_set_hw(priv, port, 0);
7372
7373 /* Assign remaining TX FIFO space among all active ports. */
7374 size_remainder = MVPP22_TX_FIFO_DATA_SIZE_18KB;
7375 remaining_ports_count = hweight_long(port_map);
7376
7377 for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) {
7378 if (remaining_ports_count == 1)
7379 size = min(size_remainder,
7380 MVPP22_TX_FIFO_DATA_SIZE_10KB);
7381 else if (port == 0)
7382 size = MVPP22_TX_FIFO_DATA_SIZE_10KB;
7383 else
7384 size = size_remainder / remaining_ports_count;
7385
7386 size_remainder -= size;
7387 remaining_ports_count--;
7388
7389 mvpp22_tx_fifo_set_hw(priv, port, size);
7390 }
7391 }
7392
mvpp2_axi_init(struct mvpp2 * priv)7393 static void mvpp2_axi_init(struct mvpp2 *priv)
7394 {
7395 u32 val, rdval, wrval;
7396
7397 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
7398
7399 /* AXI Bridge Configuration */
7400
7401 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
7402 << MVPP22_AXI_ATTR_CACHE_OFFS;
7403 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7404 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
7405
7406 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
7407 << MVPP22_AXI_ATTR_CACHE_OFFS;
7408 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7409 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
7410
7411 /* BM */
7412 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
7413 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
7414
7415 /* Descriptors */
7416 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
7417 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
7418 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
7419 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
7420
7421 /* Buffer Data */
7422 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
7423 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
7424
7425 val = MVPP22_AXI_CODE_CACHE_NON_CACHE
7426 << MVPP22_AXI_CODE_CACHE_OFFS;
7427 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
7428 << MVPP22_AXI_CODE_DOMAIN_OFFS;
7429 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
7430 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
7431
7432 val = MVPP22_AXI_CODE_CACHE_RD_CACHE
7433 << MVPP22_AXI_CODE_CACHE_OFFS;
7434 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7435 << MVPP22_AXI_CODE_DOMAIN_OFFS;
7436
7437 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
7438
7439 val = MVPP22_AXI_CODE_CACHE_WR_CACHE
7440 << MVPP22_AXI_CODE_CACHE_OFFS;
7441 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7442 << MVPP22_AXI_CODE_DOMAIN_OFFS;
7443
7444 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
7445 }
7446
7447 /* Initialize network controller common part HW */
mvpp2_init(struct platform_device * pdev,struct mvpp2 * priv)7448 static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
7449 {
7450 const struct mbus_dram_target_info *dram_target_info;
7451 int err, i;
7452 u32 val;
7453
7454 /* MBUS windows configuration */
7455 dram_target_info = mv_mbus_dram_info();
7456 if (dram_target_info)
7457 mvpp2_conf_mbus_windows(dram_target_info, priv);
7458
7459 if (priv->hw_version >= MVPP22)
7460 mvpp2_axi_init(priv);
7461
7462 /* Disable HW PHY polling */
7463 if (priv->hw_version == MVPP21) {
7464 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
7465 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
7466 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
7467 } else {
7468 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
7469 val &= ~MVPP22_SMI_POLLING_EN;
7470 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
7471 }
7472
7473 /* Allocate and initialize aggregated TXQs */
7474 priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS,
7475 sizeof(*priv->aggr_txqs),
7476 GFP_KERNEL);
7477 if (!priv->aggr_txqs)
7478 return -ENOMEM;
7479
7480 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
7481 priv->aggr_txqs[i].id = i;
7482 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
7483 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv);
7484 if (err < 0)
7485 return err;
7486 }
7487
7488 /* Fifo Init */
7489 if (priv->hw_version == MVPP21) {
7490 mvpp2_rx_fifo_init(priv);
7491 } else {
7492 mvpp22_rx_fifo_init(priv);
7493 mvpp22_tx_fifo_init(priv);
7494 if (priv->hw_version == MVPP23)
7495 mvpp23_rx_fifo_fc_set_tresh(priv);
7496 }
7497
7498 if (priv->hw_version == MVPP21)
7499 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
7500 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
7501
7502 /* Allow cache snoop when transmiting packets */
7503 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
7504
7505 /* Buffer Manager initialization */
7506 err = mvpp2_bm_init(&pdev->dev, priv);
7507 if (err < 0)
7508 return err;
7509
7510 /* Parser default initialization */
7511 err = mvpp2_prs_default_init(pdev, priv);
7512 if (err < 0)
7513 return err;
7514
7515 /* Classifier default initialization */
7516 mvpp2_cls_init(priv);
7517
7518 return 0;
7519 }
7520
mvpp2_get_sram(struct platform_device * pdev,struct mvpp2 * priv)7521 static int mvpp2_get_sram(struct platform_device *pdev,
7522 struct mvpp2 *priv)
7523 {
7524 struct resource *res;
7525 void __iomem *base;
7526
7527 res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
7528 if (!res) {
7529 if (has_acpi_companion(&pdev->dev))
7530 dev_warn(&pdev->dev, "ACPI is too old, Flow control not supported\n");
7531 else
7532 dev_warn(&pdev->dev, "DT is too old, Flow control not supported\n");
7533 return 0;
7534 }
7535
7536 base = devm_ioremap_resource(&pdev->dev, res);
7537 if (IS_ERR(base))
7538 return PTR_ERR(base);
7539
7540 priv->cm3_base = base;
7541 return 0;
7542 }
7543
mvpp2_probe(struct platform_device * pdev)7544 static int mvpp2_probe(struct platform_device *pdev)
7545 {
7546 struct mvpp2 *priv;
7547 struct resource *res;
7548 void __iomem *base;
7549 int i, shared;
7550 int err;
7551
7552 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
7553 if (!priv)
7554 return -ENOMEM;
7555
7556 priv->hw_version = (unsigned long)device_get_match_data(&pdev->dev);
7557
7558 /* multi queue mode isn't supported on PPV2.1, fallback to single
7559 * mode
7560 */
7561 if (priv->hw_version == MVPP21)
7562 queue_mode = MVPP2_QDIST_SINGLE_MODE;
7563
7564 base = devm_platform_ioremap_resource(pdev, 0);
7565 if (IS_ERR(base))
7566 return PTR_ERR(base);
7567
7568 if (priv->hw_version == MVPP21) {
7569 priv->lms_base = devm_platform_ioremap_resource(pdev, 1);
7570 if (IS_ERR(priv->lms_base))
7571 return PTR_ERR(priv->lms_base);
7572 } else {
7573 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
7574 if (!res) {
7575 dev_err(&pdev->dev, "Invalid resource\n");
7576 return -EINVAL;
7577 }
7578 if (has_acpi_companion(&pdev->dev)) {
7579 /* In case the MDIO memory region is declared in
7580 * the ACPI, it can already appear as 'in-use'
7581 * in the OS. Because it is overlapped by second
7582 * region of the network controller, make
7583 * sure it is released, before requesting it again.
7584 * The care is taken by mvpp2 driver to avoid
7585 * concurrent access to this memory region.
7586 */
7587 release_resource(res);
7588 }
7589 priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
7590 if (IS_ERR(priv->iface_base))
7591 return PTR_ERR(priv->iface_base);
7592
7593 /* Map CM3 SRAM */
7594 err = mvpp2_get_sram(pdev, priv);
7595 if (err)
7596 dev_warn(&pdev->dev, "Fail to alloc CM3 SRAM\n");
7597
7598 /* Enable global Flow Control only if handler to SRAM not NULL */
7599 if (priv->cm3_base)
7600 priv->global_tx_fc = true;
7601 }
7602
7603 if (priv->hw_version >= MVPP22 && dev_of_node(&pdev->dev)) {
7604 priv->sysctrl_base =
7605 syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
7606 "marvell,system-controller");
7607 if (IS_ERR(priv->sysctrl_base))
7608 /* The system controller regmap is optional for dt
7609 * compatibility reasons. When not provided, the
7610 * configuration of the GoP relies on the
7611 * firmware/bootloader.
7612 */
7613 priv->sysctrl_base = NULL;
7614 }
7615
7616 if (priv->hw_version >= MVPP22 &&
7617 mvpp2_get_nrxqs(priv) * 2 <= MVPP2_BM_MAX_POOLS)
7618 priv->percpu_pools = 1;
7619
7620 mvpp2_setup_bm_pool();
7621
7622
7623 priv->nthreads = min_t(unsigned int, num_present_cpus(),
7624 MVPP2_MAX_THREADS);
7625
7626 shared = num_present_cpus() - priv->nthreads;
7627 if (shared > 0)
7628 bitmap_set(&priv->lock_map, 0,
7629 min_t(int, shared, MVPP2_MAX_THREADS));
7630
7631 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
7632 u32 addr_space_sz;
7633
7634 addr_space_sz = (priv->hw_version == MVPP21 ?
7635 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
7636 priv->swth_base[i] = base + i * addr_space_sz;
7637 }
7638
7639 if (priv->hw_version == MVPP21)
7640 priv->max_port_rxqs = 8;
7641 else
7642 priv->max_port_rxqs = 32;
7643
7644 if (dev_of_node(&pdev->dev)) {
7645 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
7646 if (IS_ERR(priv->pp_clk))
7647 return PTR_ERR(priv->pp_clk);
7648 err = clk_prepare_enable(priv->pp_clk);
7649 if (err < 0)
7650 return err;
7651
7652 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
7653 if (IS_ERR(priv->gop_clk)) {
7654 err = PTR_ERR(priv->gop_clk);
7655 goto err_pp_clk;
7656 }
7657 err = clk_prepare_enable(priv->gop_clk);
7658 if (err < 0)
7659 goto err_pp_clk;
7660
7661 if (priv->hw_version >= MVPP22) {
7662 priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
7663 if (IS_ERR(priv->mg_clk)) {
7664 err = PTR_ERR(priv->mg_clk);
7665 goto err_gop_clk;
7666 }
7667
7668 err = clk_prepare_enable(priv->mg_clk);
7669 if (err < 0)
7670 goto err_gop_clk;
7671
7672 priv->mg_core_clk = devm_clk_get_optional(&pdev->dev, "mg_core_clk");
7673 if (IS_ERR(priv->mg_core_clk)) {
7674 err = PTR_ERR(priv->mg_core_clk);
7675 goto err_mg_clk;
7676 }
7677
7678 err = clk_prepare_enable(priv->mg_core_clk);
7679 if (err < 0)
7680 goto err_mg_clk;
7681 }
7682
7683 priv->axi_clk = devm_clk_get_optional(&pdev->dev, "axi_clk");
7684 if (IS_ERR(priv->axi_clk)) {
7685 err = PTR_ERR(priv->axi_clk);
7686 goto err_mg_core_clk;
7687 }
7688
7689 err = clk_prepare_enable(priv->axi_clk);
7690 if (err < 0)
7691 goto err_mg_core_clk;
7692
7693 /* Get system's tclk rate */
7694 priv->tclk = clk_get_rate(priv->pp_clk);
7695 } else {
7696 err = device_property_read_u32(&pdev->dev, "clock-frequency", &priv->tclk);
7697 if (err) {
7698 dev_err(&pdev->dev, "missing clock-frequency value\n");
7699 return err;
7700 }
7701 }
7702
7703 if (priv->hw_version >= MVPP22) {
7704 err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
7705 if (err)
7706 goto err_axi_clk;
7707 /* Sadly, the BM pools all share the same register to
7708 * store the high 32 bits of their address. So they
7709 * must all have the same high 32 bits, which forces
7710 * us to restrict coherent memory to DMA_BIT_MASK(32).
7711 */
7712 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
7713 if (err)
7714 goto err_axi_clk;
7715 }
7716
7717 /* Map DTS-active ports. Should be done before FIFO mvpp2_init */
7718 device_for_each_child_node_scoped(&pdev->dev, port_fwnode) {
7719 if (!fwnode_property_read_u32(port_fwnode, "port-id", &i))
7720 priv->port_map |= BIT(i);
7721 }
7722
7723 if (mvpp2_read(priv, MVPP2_VER_ID_REG) == MVPP2_VER_PP23)
7724 priv->hw_version = MVPP23;
7725
7726 /* Init mss lock */
7727 spin_lock_init(&priv->mss_spinlock);
7728
7729 /* Initialize network controller */
7730 err = mvpp2_init(pdev, priv);
7731 if (err < 0) {
7732 dev_err(&pdev->dev, "failed to initialize controller\n");
7733 goto err_axi_clk;
7734 }
7735
7736 err = mvpp22_tai_probe(&pdev->dev, priv);
7737 if (err < 0)
7738 goto err_axi_clk;
7739
7740 /* Initialize ports */
7741 device_for_each_child_node_scoped(&pdev->dev, port_fwnode) {
7742 err = mvpp2_port_probe(pdev, port_fwnode, priv);
7743 if (err < 0)
7744 goto err_port_probe;
7745 }
7746
7747 if (priv->port_count == 0) {
7748 dev_err(&pdev->dev, "no ports enabled\n");
7749 err = -ENODEV;
7750 goto err_axi_clk;
7751 }
7752
7753 /* Statistics must be gathered regularly because some of them (like
7754 * packets counters) are 32-bit registers and could overflow quite
7755 * quickly. For instance, a 10Gb link used at full bandwidth with the
7756 * smallest packets (64B) will overflow a 32-bit counter in less than
7757 * 30 seconds. Then, use a workqueue to fill 64-bit counters.
7758 */
7759 snprintf(priv->queue_name, sizeof(priv->queue_name),
7760 "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev),
7761 priv->port_count > 1 ? "+" : "");
7762 priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
7763 if (!priv->stats_queue) {
7764 err = -ENOMEM;
7765 goto err_port_probe;
7766 }
7767
7768 if (priv->global_tx_fc && priv->hw_version >= MVPP22) {
7769 err = mvpp2_enable_global_fc(priv);
7770 if (err)
7771 dev_warn(&pdev->dev, "Minimum of CM3 firmware 18.09 and chip revision B0 required for flow control\n");
7772 }
7773
7774 mvpp2_dbgfs_init(priv, pdev->name);
7775
7776 platform_set_drvdata(pdev, priv);
7777 return 0;
7778
7779 err_port_probe:
7780 for (i = 0; i < priv->port_count; i++)
7781 mvpp2_port_remove(priv->port_list[i]);
7782 err_axi_clk:
7783 clk_disable_unprepare(priv->axi_clk);
7784 err_mg_core_clk:
7785 clk_disable_unprepare(priv->mg_core_clk);
7786 err_mg_clk:
7787 clk_disable_unprepare(priv->mg_clk);
7788 err_gop_clk:
7789 clk_disable_unprepare(priv->gop_clk);
7790 err_pp_clk:
7791 clk_disable_unprepare(priv->pp_clk);
7792 return err;
7793 }
7794
mvpp2_remove(struct platform_device * pdev)7795 static void mvpp2_remove(struct platform_device *pdev)
7796 {
7797 struct mvpp2 *priv = platform_get_drvdata(pdev);
7798 int i, poolnum = MVPP2_BM_POOLS_NUM;
7799
7800 mvpp2_dbgfs_cleanup(priv);
7801
7802 for (i = 0; i < priv->port_count; i++) {
7803 mutex_destroy(&priv->port_list[i]->gather_stats_lock);
7804 mvpp2_port_remove(priv->port_list[i]);
7805 }
7806
7807 destroy_workqueue(priv->stats_queue);
7808
7809 if (priv->percpu_pools)
7810 poolnum = mvpp2_get_nrxqs(priv) * 2;
7811
7812 for (i = 0; i < poolnum; i++) {
7813 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
7814
7815 mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool);
7816 }
7817
7818 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
7819 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
7820
7821 dma_free_coherent(&pdev->dev,
7822 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
7823 aggr_txq->descs,
7824 aggr_txq->descs_dma);
7825 }
7826
7827 if (!dev_of_node(&pdev->dev))
7828 return;
7829
7830 clk_disable_unprepare(priv->axi_clk);
7831 clk_disable_unprepare(priv->mg_core_clk);
7832 clk_disable_unprepare(priv->mg_clk);
7833 clk_disable_unprepare(priv->pp_clk);
7834 clk_disable_unprepare(priv->gop_clk);
7835 }
7836
7837 static const struct of_device_id mvpp2_match[] = {
7838 {
7839 .compatible = "marvell,armada-375-pp2",
7840 .data = (void *)MVPP21,
7841 },
7842 {
7843 .compatible = "marvell,armada-7k-pp22",
7844 .data = (void *)MVPP22,
7845 },
7846 { }
7847 };
7848 MODULE_DEVICE_TABLE(of, mvpp2_match);
7849
7850 #ifdef CONFIG_ACPI
7851 static const struct acpi_device_id mvpp2_acpi_match[] = {
7852 { "MRVL0110", MVPP22 },
7853 { },
7854 };
7855 MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
7856 #endif
7857
7858 static struct platform_driver mvpp2_driver = {
7859 .probe = mvpp2_probe,
7860 .remove = mvpp2_remove,
7861 .driver = {
7862 .name = MVPP2_DRIVER_NAME,
7863 .of_match_table = mvpp2_match,
7864 .acpi_match_table = ACPI_PTR(mvpp2_acpi_match),
7865 },
7866 };
7867
mvpp2_driver_init(void)7868 static int __init mvpp2_driver_init(void)
7869 {
7870 return platform_driver_register(&mvpp2_driver);
7871 }
7872 module_init(mvpp2_driver_init);
7873
mvpp2_driver_exit(void)7874 static void __exit mvpp2_driver_exit(void)
7875 {
7876 platform_driver_unregister(&mvpp2_driver);
7877 mvpp2_dbgfs_exit();
7878 }
7879 module_exit(mvpp2_driver_exit);
7880
7881 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
7882 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
7883 MODULE_LICENSE("GPL v2");
7884