xref: /linux/drivers/net/ethernet/marvell/mvneta.c (revision 55a42f78ffd386e01a5404419f8c5ded7db70a21)
1 /*
2  * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
3  *
4  * Copyright (C) 2012 Marvell
5  *
6  * Rami Rosen <rosenr@marvell.com>
7  * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
8  *
9  * This file is licensed under the terms of the GNU General Public
10  * License version 2. This program is licensed "as is" without any
11  * warranty of any kind, whether express or implied.
12  */
13 
14 #include <linux/clk.h>
15 #include <linux/cpu.h>
16 #include <linux/etherdevice.h>
17 #include <linux/if_vlan.h>
18 #include <linux/inetdevice.h>
19 #include <linux/interrupt.h>
20 #include <linux/io.h>
21 #include <linux/kernel.h>
22 #include <linux/mbus.h>
23 #include <linux/module.h>
24 #include <linux/netdevice.h>
25 #include <linux/of.h>
26 #include <linux/of_address.h>
27 #include <linux/of_irq.h>
28 #include <linux/of_mdio.h>
29 #include <linux/of_net.h>
30 #include <linux/phy/phy.h>
31 #include <linux/phy.h>
32 #include <linux/phylink.h>
33 #include <linux/platform_device.h>
34 #include <linux/skbuff.h>
35 #include <net/hwbm.h>
36 #include "mvneta_bm.h"
37 #include <net/ip.h>
38 #include <net/ipv6.h>
39 #include <net/tso.h>
40 #include <net/page_pool/helpers.h>
41 #include <net/pkt_sched.h>
42 #include <linux/bpf_trace.h>
43 
44 /* Registers */
45 #define MVNETA_RXQ_CONFIG_REG(q)                (0x1400 + ((q) << 2))
46 #define      MVNETA_RXQ_HW_BUF_ALLOC            BIT(0)
47 #define      MVNETA_RXQ_SHORT_POOL_ID_SHIFT	4
48 #define      MVNETA_RXQ_SHORT_POOL_ID_MASK	0x30
49 #define      MVNETA_RXQ_LONG_POOL_ID_SHIFT	6
50 #define      MVNETA_RXQ_LONG_POOL_ID_MASK	0xc0
51 #define      MVNETA_RXQ_PKT_OFFSET_ALL_MASK     (0xf    << 8)
52 #define      MVNETA_RXQ_PKT_OFFSET_MASK(offs)   ((offs) << 8)
53 #define MVNETA_RXQ_THRESHOLD_REG(q)             (0x14c0 + ((q) << 2))
54 #define      MVNETA_RXQ_NON_OCCUPIED(v)         ((v) << 16)
55 #define MVNETA_RXQ_BASE_ADDR_REG(q)             (0x1480 + ((q) << 2))
56 #define MVNETA_RXQ_SIZE_REG(q)                  (0x14a0 + ((q) << 2))
57 #define      MVNETA_RXQ_BUF_SIZE_SHIFT          19
58 #define      MVNETA_RXQ_BUF_SIZE_MASK           (0x1fff << 19)
59 #define MVNETA_RXQ_STATUS_REG(q)                (0x14e0 + ((q) << 2))
60 #define      MVNETA_RXQ_OCCUPIED_ALL_MASK       0x3fff
61 #define MVNETA_RXQ_STATUS_UPDATE_REG(q)         (0x1500 + ((q) << 2))
62 #define      MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT  16
63 #define      MVNETA_RXQ_ADD_NON_OCCUPIED_MAX    255
64 #define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool)	(0x1700 + ((pool) << 2))
65 #define      MVNETA_PORT_POOL_BUFFER_SZ_SHIFT	3
66 #define      MVNETA_PORT_POOL_BUFFER_SZ_MASK	0xfff8
67 #define MVNETA_PORT_RX_RESET                    0x1cc0
68 #define      MVNETA_PORT_RX_DMA_RESET           BIT(0)
69 #define MVNETA_PHY_ADDR                         0x2000
70 #define      MVNETA_PHY_ADDR_MASK               0x1f
71 #define MVNETA_MBUS_RETRY                       0x2010
72 #define MVNETA_UNIT_INTR_CAUSE                  0x2080
73 #define MVNETA_UNIT_CONTROL                     0x20B0
74 #define      MVNETA_PHY_POLLING_ENABLE          BIT(1)
75 #define MVNETA_WIN_BASE(w)                      (0x2200 + ((w) << 3))
76 #define MVNETA_WIN_SIZE(w)                      (0x2204 + ((w) << 3))
77 #define MVNETA_WIN_REMAP(w)                     (0x2280 + ((w) << 2))
78 #define MVNETA_BASE_ADDR_ENABLE                 0x2290
79 #define      MVNETA_AC5_CNM_DDR_TARGET		0x2
80 #define      MVNETA_AC5_CNM_DDR_ATTR		0xb
81 #define MVNETA_ACCESS_PROTECT_ENABLE            0x2294
82 #define MVNETA_PORT_CONFIG                      0x2400
83 #define      MVNETA_UNI_PROMISC_MODE            BIT(0)
84 #define      MVNETA_DEF_RXQ(q)                  ((q) << 1)
85 #define      MVNETA_DEF_RXQ_ARP(q)              ((q) << 4)
86 #define      MVNETA_TX_UNSET_ERR_SUM            BIT(12)
87 #define      MVNETA_DEF_RXQ_TCP(q)              ((q) << 16)
88 #define      MVNETA_DEF_RXQ_UDP(q)              ((q) << 19)
89 #define      MVNETA_DEF_RXQ_BPDU(q)             ((q) << 22)
90 #define      MVNETA_RX_CSUM_WITH_PSEUDO_HDR     BIT(25)
91 #define      MVNETA_PORT_CONFIG_DEFL_VALUE(q)   (MVNETA_DEF_RXQ(q)       | \
92 						 MVNETA_DEF_RXQ_ARP(q)	 | \
93 						 MVNETA_DEF_RXQ_TCP(q)	 | \
94 						 MVNETA_DEF_RXQ_UDP(q)	 | \
95 						 MVNETA_DEF_RXQ_BPDU(q)	 | \
96 						 MVNETA_TX_UNSET_ERR_SUM | \
97 						 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
98 #define MVNETA_PORT_CONFIG_EXTEND                0x2404
99 #define MVNETA_MAC_ADDR_LOW                      0x2414
100 #define MVNETA_MAC_ADDR_HIGH                     0x2418
101 #define MVNETA_SDMA_CONFIG                       0x241c
102 #define      MVNETA_SDMA_BRST_SIZE_16            4
103 #define      MVNETA_RX_BRST_SZ_MASK(burst)       ((burst) << 1)
104 #define      MVNETA_RX_NO_DATA_SWAP              BIT(4)
105 #define      MVNETA_TX_NO_DATA_SWAP              BIT(5)
106 #define      MVNETA_DESC_SWAP                    BIT(6)
107 #define      MVNETA_TX_BRST_SZ_MASK(burst)       ((burst) << 22)
108 #define	MVNETA_VLAN_PRIO_TO_RXQ			 0x2440
109 #define      MVNETA_VLAN_PRIO_RXQ_MAP(prio, rxq) ((rxq) << ((prio) * 3))
110 #define MVNETA_PORT_STATUS                       0x2444
111 #define      MVNETA_TX_IN_PRGRS                  BIT(0)
112 #define      MVNETA_TX_FIFO_EMPTY                BIT(8)
113 #define MVNETA_RX_MIN_FRAME_SIZE                 0x247c
114 /* Only exists on Armada XP and Armada 370 */
115 #define MVNETA_SERDES_CFG			 0x24A0
116 #define      MVNETA_SGMII_SERDES_PROTO		 0x0cc7
117 #define      MVNETA_QSGMII_SERDES_PROTO		 0x0667
118 #define      MVNETA_HSGMII_SERDES_PROTO		 0x1107
119 #define MVNETA_TYPE_PRIO                         0x24bc
120 #define      MVNETA_FORCE_UNI                    BIT(21)
121 #define MVNETA_TXQ_CMD_1                         0x24e4
122 #define MVNETA_TXQ_CMD                           0x2448
123 #define      MVNETA_TXQ_DISABLE_SHIFT            8
124 #define      MVNETA_TXQ_ENABLE_MASK              0x000000ff
125 #define MVNETA_RX_DISCARD_FRAME_COUNT		 0x2484
126 #define MVNETA_OVERRUN_FRAME_COUNT		 0x2488
127 #define MVNETA_GMAC_CLOCK_DIVIDER                0x24f4
128 #define      MVNETA_GMAC_1MS_CLOCK_ENABLE        BIT(31)
129 #define MVNETA_ACC_MODE                          0x2500
130 #define MVNETA_BM_ADDRESS                        0x2504
131 #define MVNETA_CPU_MAP(cpu)                      (0x2540 + ((cpu) << 2))
132 #define      MVNETA_CPU_RXQ_ACCESS_ALL_MASK      0x000000ff
133 #define      MVNETA_CPU_TXQ_ACCESS_ALL_MASK      0x0000ff00
134 #define      MVNETA_CPU_RXQ_ACCESS(rxq)		 BIT(rxq)
135 #define      MVNETA_CPU_TXQ_ACCESS(txq)		 BIT(txq + 8)
136 #define MVNETA_RXQ_TIME_COAL_REG(q)              (0x2580 + ((q) << 2))
137 
138 /* Exception Interrupt Port/Queue Cause register
139  *
140  * Their behavior depend of the mapping done using the PCPX2Q
141  * registers. For a given CPU if the bit associated to a queue is not
142  * set, then for the register a read from this CPU will always return
143  * 0 and a write won't do anything
144  */
145 
146 #define MVNETA_INTR_NEW_CAUSE                    0x25a0
147 #define MVNETA_INTR_NEW_MASK                     0x25a4
148 
149 /* bits  0..7  = TXQ SENT, one bit per queue.
150  * bits  8..15 = RXQ OCCUP, one bit per queue.
151  * bits 16..23 = RXQ FREE, one bit per queue.
152  * bit  29 = OLD_REG_SUM, see old reg ?
153  * bit  30 = TX_ERR_SUM, one bit for 4 ports
154  * bit  31 = MISC_SUM,   one bit for 4 ports
155  */
156 #define      MVNETA_TX_INTR_MASK(nr_txqs)        (((1 << nr_txqs) - 1) << 0)
157 #define      MVNETA_TX_INTR_MASK_ALL             (0xff << 0)
158 #define      MVNETA_RX_INTR_MASK(nr_rxqs)        (((1 << nr_rxqs) - 1) << 8)
159 #define      MVNETA_RX_INTR_MASK_ALL             (0xff << 8)
160 #define      MVNETA_MISCINTR_INTR_MASK           BIT(31)
161 
162 #define MVNETA_INTR_OLD_CAUSE                    0x25a8
163 #define MVNETA_INTR_OLD_MASK                     0x25ac
164 
165 /* Data Path Port/Queue Cause Register */
166 #define MVNETA_INTR_MISC_CAUSE                   0x25b0
167 #define MVNETA_INTR_MISC_MASK                    0x25b4
168 
169 #define      MVNETA_CAUSE_PHY_STATUS_CHANGE      BIT(0)
170 #define      MVNETA_CAUSE_LINK_CHANGE            BIT(1)
171 #define      MVNETA_CAUSE_PTP                    BIT(4)
172 
173 #define      MVNETA_CAUSE_INTERNAL_ADDR_ERR      BIT(7)
174 #define      MVNETA_CAUSE_RX_OVERRUN             BIT(8)
175 #define      MVNETA_CAUSE_RX_CRC_ERROR           BIT(9)
176 #define      MVNETA_CAUSE_RX_LARGE_PKT           BIT(10)
177 #define      MVNETA_CAUSE_TX_UNDERUN             BIT(11)
178 #define      MVNETA_CAUSE_PRBS_ERR               BIT(12)
179 #define      MVNETA_CAUSE_PSC_SYNC_CHANGE        BIT(13)
180 #define      MVNETA_CAUSE_SERDES_SYNC_ERR        BIT(14)
181 
182 #define      MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT    16
183 #define      MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK   (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
184 #define      MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
185 
186 #define      MVNETA_CAUSE_TXQ_ERROR_SHIFT        24
187 #define      MVNETA_CAUSE_TXQ_ERROR_ALL_MASK     (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
188 #define      MVNETA_CAUSE_TXQ_ERROR_MASK(q)      (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
189 
190 #define MVNETA_INTR_ENABLE                       0x25b8
191 #define      MVNETA_TXQ_INTR_ENABLE_ALL_MASK     0x0000ff00
192 #define      MVNETA_RXQ_INTR_ENABLE_ALL_MASK     0x000000ff
193 
194 #define MVNETA_RXQ_CMD                           0x2680
195 #define      MVNETA_RXQ_DISABLE_SHIFT            8
196 #define      MVNETA_RXQ_ENABLE_MASK              0x000000ff
197 #define MVETH_TXQ_TOKEN_COUNT_REG(q)             (0x2700 + ((q) << 4))
198 #define MVETH_TXQ_TOKEN_CFG_REG(q)               (0x2704 + ((q) << 4))
199 #define MVNETA_GMAC_CTRL_0                       0x2c00
200 #define      MVNETA_GMAC_MAX_RX_SIZE_SHIFT       2
201 #define      MVNETA_GMAC_MAX_RX_SIZE_MASK        0x7ffc
202 #define      MVNETA_GMAC0_PORT_1000BASE_X        BIT(1)
203 #define      MVNETA_GMAC0_PORT_ENABLE            BIT(0)
204 #define MVNETA_GMAC_CTRL_2                       0x2c08
205 #define      MVNETA_GMAC2_INBAND_AN_ENABLE       BIT(0)
206 #define      MVNETA_GMAC2_PCS_ENABLE             BIT(3)
207 #define      MVNETA_GMAC2_PORT_RGMII             BIT(4)
208 #define      MVNETA_GMAC2_PORT_RESET             BIT(6)
209 #define MVNETA_GMAC_STATUS                       0x2c10
210 #define      MVNETA_GMAC_LINK_UP                 BIT(0)
211 #define      MVNETA_GMAC_SPEED_1000              BIT(1)
212 #define      MVNETA_GMAC_SPEED_100               BIT(2)
213 #define      MVNETA_GMAC_FULL_DUPLEX             BIT(3)
214 #define      MVNETA_GMAC_RX_FLOW_CTRL_ENABLE     BIT(4)
215 #define      MVNETA_GMAC_TX_FLOW_CTRL_ENABLE     BIT(5)
216 #define      MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE     BIT(6)
217 #define      MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE     BIT(7)
218 #define      MVNETA_GMAC_AN_COMPLETE             BIT(11)
219 #define      MVNETA_GMAC_SYNC_OK                 BIT(14)
220 #define MVNETA_GMAC_AUTONEG_CONFIG               0x2c0c
221 #define      MVNETA_GMAC_FORCE_LINK_DOWN         BIT(0)
222 #define      MVNETA_GMAC_FORCE_LINK_PASS         BIT(1)
223 #define      MVNETA_GMAC_INBAND_AN_ENABLE        BIT(2)
224 #define      MVNETA_GMAC_AN_BYPASS_ENABLE        BIT(3)
225 #define      MVNETA_GMAC_INBAND_RESTART_AN       BIT(4)
226 #define      MVNETA_GMAC_CONFIG_MII_SPEED        BIT(5)
227 #define      MVNETA_GMAC_CONFIG_GMII_SPEED       BIT(6)
228 #define      MVNETA_GMAC_AN_SPEED_EN             BIT(7)
229 #define      MVNETA_GMAC_CONFIG_FLOW_CTRL        BIT(8)
230 #define      MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL    BIT(9)
231 #define      MVNETA_GMAC_AN_FLOW_CTRL_EN         BIT(11)
232 #define      MVNETA_GMAC_CONFIG_FULL_DUPLEX      BIT(12)
233 #define      MVNETA_GMAC_AN_DUPLEX_EN            BIT(13)
234 #define MVNETA_GMAC_CTRL_4                       0x2c90
235 #define      MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE  BIT(1)
236 #define MVNETA_MIB_COUNTERS_BASE                 0x3000
237 #define      MVNETA_MIB_LATE_COLLISION           0x7c
238 #define MVNETA_DA_FILT_SPEC_MCAST                0x3400
239 #define MVNETA_DA_FILT_OTH_MCAST                 0x3500
240 #define MVNETA_DA_FILT_UCAST_BASE                0x3600
241 #define MVNETA_TXQ_BASE_ADDR_REG(q)              (0x3c00 + ((q) << 2))
242 #define MVNETA_TXQ_SIZE_REG(q)                   (0x3c20 + ((q) << 2))
243 #define      MVNETA_TXQ_SENT_THRESH_ALL_MASK     0x3fff0000
244 #define      MVNETA_TXQ_SENT_THRESH_MASK(coal)   ((coal) << 16)
245 #define MVNETA_TXQ_UPDATE_REG(q)                 (0x3c60 + ((q) << 2))
246 #define      MVNETA_TXQ_DEC_SENT_SHIFT           16
247 #define      MVNETA_TXQ_DEC_SENT_MASK            0xff
248 #define MVNETA_TXQ_STATUS_REG(q)                 (0x3c40 + ((q) << 2))
249 #define      MVNETA_TXQ_SENT_DESC_SHIFT          16
250 #define      MVNETA_TXQ_SENT_DESC_MASK           0x3fff0000
251 #define MVNETA_PORT_TX_RESET                     0x3cf0
252 #define      MVNETA_PORT_TX_DMA_RESET            BIT(0)
253 #define MVNETA_TXQ_CMD1_REG			 0x3e00
254 #define      MVNETA_TXQ_CMD1_BW_LIM_SEL_V1	 BIT(3)
255 #define      MVNETA_TXQ_CMD1_BW_LIM_EN		 BIT(0)
256 #define MVNETA_REFILL_NUM_CLK_REG		 0x3e08
257 #define      MVNETA_REFILL_MAX_NUM_CLK		 0x0000ffff
258 #define MVNETA_TX_MTU                            0x3e0c
259 #define MVNETA_TX_TOKEN_SIZE                     0x3e14
260 #define      MVNETA_TX_TOKEN_SIZE_MAX            0xffffffff
261 #define MVNETA_TXQ_BUCKET_REFILL_REG(q)		 (0x3e20 + ((q) << 2))
262 #define      MVNETA_TXQ_BUCKET_REFILL_PERIOD_MASK	0x3ff00000
263 #define      MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT	20
264 #define      MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX	 0x0007ffff
265 #define MVNETA_TXQ_TOKEN_SIZE_REG(q)             (0x3e40 + ((q) << 2))
266 #define      MVNETA_TXQ_TOKEN_SIZE_MAX           0x7fffffff
267 
268 /* The values of the bucket refill base period and refill period are taken from
269  * the reference manual, and adds up to a base resolution of 10Kbps. This allows
270  * to cover all rate-limit values from 10Kbps up to 5Gbps
271  */
272 
273 /* Base period for the rate limit algorithm */
274 #define MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS	100
275 
276 /* Number of Base Period to wait between each bucket refill */
277 #define MVNETA_TXQ_BUCKET_REFILL_PERIOD	1000
278 
279 /* The base resolution for rate limiting, in bps. Any max_rate value should be
280  * a multiple of that value.
281  */
282 #define MVNETA_TXQ_RATE_LIMIT_RESOLUTION (NSEC_PER_SEC / \
283 					 (MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS * \
284 					  MVNETA_TXQ_BUCKET_REFILL_PERIOD))
285 
286 #define MVNETA_LPI_CTRL_0                        0x2cc0
287 #define      MVNETA_LPI_CTRL_0_TS                (0xff << 8)
288 #define MVNETA_LPI_CTRL_1                        0x2cc4
289 #define      MVNETA_LPI_CTRL_1_REQUEST_ENABLE    BIT(0)
290 #define      MVNETA_LPI_CTRL_1_REQUEST_FORCE     BIT(1)
291 #define      MVNETA_LPI_CTRL_1_MANUAL_MODE       BIT(2)
292 #define      MVNETA_LPI_CTRL_1_TW                (0xfff << 4)
293 #define MVNETA_LPI_CTRL_2                        0x2cc8
294 #define MVNETA_LPI_STATUS                        0x2ccc
295 
296 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK	 0xff
297 
298 /* Descriptor ring Macros */
299 #define MVNETA_QUEUE_NEXT_DESC(q, index)	\
300 	(((index) < (q)->last_desc) ? ((index) + 1) : 0)
301 
302 /* Various constants */
303 
304 /* Coalescing */
305 #define MVNETA_TXDONE_COAL_PKTS		0	/* interrupt per packet */
306 #define MVNETA_RX_COAL_PKTS		32
307 #define MVNETA_RX_COAL_USEC		100
308 
309 /* The two bytes Marvell header. Either contains a special value used
310  * by Marvell switches when a specific hardware mode is enabled (not
311  * supported by this driver) or is filled automatically by zeroes on
312  * the RX side. Those two bytes being at the front of the Ethernet
313  * header, they allow to have the IP header aligned on a 4 bytes
314  * boundary automatically: the hardware skips those two bytes on its
315  * own.
316  */
317 #define MVNETA_MH_SIZE			2
318 
319 #define MVNETA_VLAN_TAG_LEN             4
320 
321 #define MVNETA_TX_CSUM_DEF_SIZE		1600
322 #define MVNETA_TX_CSUM_MAX_SIZE		9800
323 #define MVNETA_ACC_MODE_EXT1		1
324 #define MVNETA_ACC_MODE_EXT2		2
325 
326 #define MVNETA_MAX_DECODE_WIN		6
327 
328 /* Timeout constants */
329 #define MVNETA_TX_DISABLE_TIMEOUT_MSEC	1000
330 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC	1000
331 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT	10000
332 
333 #define MVNETA_TX_MTU_MAX		0x3ffff
334 
335 /* The RSS lookup table actually has 256 entries but we do not use
336  * them yet
337  */
338 #define MVNETA_RSS_LU_TABLE_SIZE	1
339 
340 /* Max number of Rx descriptors */
341 #define MVNETA_MAX_RXD 512
342 
343 /* Max number of Tx descriptors */
344 #define MVNETA_MAX_TXD 1024
345 
346 /* Max number of allowed TCP segments for software TSO */
347 #define MVNETA_MAX_TSO_SEGS 100
348 
349 #define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
350 
351 /* The size of a TSO header page */
352 #define MVNETA_TSO_PAGE_SIZE (2 * PAGE_SIZE)
353 
354 /* Number of TSO headers per page. This should be a power of 2 */
355 #define MVNETA_TSO_PER_PAGE (MVNETA_TSO_PAGE_SIZE / TSO_HEADER_SIZE)
356 
357 /* Maximum number of TSO header pages */
358 #define MVNETA_MAX_TSO_PAGES (MVNETA_MAX_TXD / MVNETA_TSO_PER_PAGE)
359 
360 /* descriptor aligned size */
361 #define MVNETA_DESC_ALIGNED_SIZE	32
362 
363 /* Number of bytes to be taken into account by HW when putting incoming data
364  * to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet
365  * offset supported in MVNETA_RXQ_CONFIG_REG(q) registers.
366  */
367 #define MVNETA_RX_PKT_OFFSET_CORRECTION		64
368 
369 #define MVNETA_RX_PKT_SIZE(mtu) \
370 	ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
371 	      ETH_HLEN + ETH_FCS_LEN,			     \
372 	      cache_line_size())
373 
374 /* Driver assumes that the last 3 bits are 0 */
375 #define MVNETA_SKB_HEADROOM	ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8)
376 #define MVNETA_SKB_PAD	(SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \
377 			 MVNETA_SKB_HEADROOM))
378 #define MVNETA_MAX_RX_BUF_SIZE	(PAGE_SIZE - MVNETA_SKB_PAD)
379 
380 #define MVNETA_RX_GET_BM_POOL_ID(rxd) \
381 	(((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
382 
383 enum {
384 	ETHTOOL_STAT_EEE_WAKEUP,
385 	ETHTOOL_STAT_SKB_ALLOC_ERR,
386 	ETHTOOL_STAT_REFILL_ERR,
387 	ETHTOOL_XDP_REDIRECT,
388 	ETHTOOL_XDP_PASS,
389 	ETHTOOL_XDP_DROP,
390 	ETHTOOL_XDP_TX,
391 	ETHTOOL_XDP_TX_ERR,
392 	ETHTOOL_XDP_XMIT,
393 	ETHTOOL_XDP_XMIT_ERR,
394 	ETHTOOL_MAX_STATS,
395 };
396 
397 struct mvneta_statistic {
398 	unsigned short offset;
399 	unsigned short type;
400 	const char name[ETH_GSTRING_LEN];
401 };
402 
403 #define T_REG_32	32
404 #define T_REG_64	64
405 #define T_SW		1
406 
407 #define MVNETA_XDP_PASS		0
408 #define MVNETA_XDP_DROPPED	BIT(0)
409 #define MVNETA_XDP_TX		BIT(1)
410 #define MVNETA_XDP_REDIR	BIT(2)
411 
412 static const struct mvneta_statistic mvneta_statistics[] = {
413 	{ 0x3000, T_REG_64, "good_octets_received", },
414 	{ 0x3010, T_REG_32, "good_frames_received", },
415 	{ 0x3008, T_REG_32, "bad_octets_received", },
416 	{ 0x3014, T_REG_32, "bad_frames_received", },
417 	{ 0x3018, T_REG_32, "broadcast_frames_received", },
418 	{ 0x301c, T_REG_32, "multicast_frames_received", },
419 	{ 0x3050, T_REG_32, "unrec_mac_control_received", },
420 	{ 0x3058, T_REG_32, "good_fc_received", },
421 	{ 0x305c, T_REG_32, "bad_fc_received", },
422 	{ 0x3060, T_REG_32, "undersize_received", },
423 	{ 0x3064, T_REG_32, "fragments_received", },
424 	{ 0x3068, T_REG_32, "oversize_received", },
425 	{ 0x306c, T_REG_32, "jabber_received", },
426 	{ 0x3070, T_REG_32, "mac_receive_error", },
427 	{ 0x3074, T_REG_32, "bad_crc_event", },
428 	{ 0x3078, T_REG_32, "collision", },
429 	{ 0x307c, T_REG_32, "late_collision", },
430 	{ 0x2484, T_REG_32, "rx_discard", },
431 	{ 0x2488, T_REG_32, "rx_overrun", },
432 	{ 0x3020, T_REG_32, "frames_64_octets", },
433 	{ 0x3024, T_REG_32, "frames_65_to_127_octets", },
434 	{ 0x3028, T_REG_32, "frames_128_to_255_octets", },
435 	{ 0x302c, T_REG_32, "frames_256_to_511_octets", },
436 	{ 0x3030, T_REG_32, "frames_512_to_1023_octets", },
437 	{ 0x3034, T_REG_32, "frames_1024_to_max_octets", },
438 	{ 0x3038, T_REG_64, "good_octets_sent", },
439 	{ 0x3040, T_REG_32, "good_frames_sent", },
440 	{ 0x3044, T_REG_32, "excessive_collision", },
441 	{ 0x3048, T_REG_32, "multicast_frames_sent", },
442 	{ 0x304c, T_REG_32, "broadcast_frames_sent", },
443 	{ 0x3054, T_REG_32, "fc_sent", },
444 	{ 0x300c, T_REG_32, "internal_mac_transmit_err", },
445 	{ ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", },
446 	{ ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", },
447 	{ ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", },
448 	{ ETHTOOL_XDP_REDIRECT, T_SW, "rx_xdp_redirect", },
449 	{ ETHTOOL_XDP_PASS, T_SW, "rx_xdp_pass", },
450 	{ ETHTOOL_XDP_DROP, T_SW, "rx_xdp_drop", },
451 	{ ETHTOOL_XDP_TX, T_SW, "rx_xdp_tx", },
452 	{ ETHTOOL_XDP_TX_ERR, T_SW, "rx_xdp_tx_errors", },
453 	{ ETHTOOL_XDP_XMIT, T_SW, "tx_xdp_xmit", },
454 	{ ETHTOOL_XDP_XMIT_ERR, T_SW, "tx_xdp_xmit_errors", },
455 };
456 
457 struct mvneta_stats {
458 	u64	rx_packets;
459 	u64	rx_bytes;
460 	u64	tx_packets;
461 	u64	tx_bytes;
462 	/* xdp */
463 	u64	xdp_redirect;
464 	u64	xdp_pass;
465 	u64	xdp_drop;
466 	u64	xdp_xmit;
467 	u64	xdp_xmit_err;
468 	u64	xdp_tx;
469 	u64	xdp_tx_err;
470 };
471 
472 struct mvneta_ethtool_stats {
473 	struct mvneta_stats ps;
474 	u64	skb_alloc_error;
475 	u64	refill_error;
476 };
477 
478 struct mvneta_pcpu_stats {
479 	struct u64_stats_sync syncp;
480 
481 	struct mvneta_ethtool_stats es;
482 	u64	rx_dropped;
483 	u64	rx_errors;
484 };
485 
486 struct mvneta_pcpu_port {
487 	/* Pointer to the shared port */
488 	struct mvneta_port	*pp;
489 
490 	/* Pointer to the CPU-local NAPI struct */
491 	struct napi_struct	napi;
492 
493 	/* Cause of the previous interrupt */
494 	u32			cause_rx_tx;
495 };
496 
497 enum {
498 	__MVNETA_DOWN,
499 };
500 
501 struct mvneta_port {
502 	u8 id;
503 	struct mvneta_pcpu_port __percpu	*ports;
504 	struct mvneta_pcpu_stats __percpu	*stats;
505 
506 	unsigned long state;
507 
508 	int pkt_size;
509 	void __iomem *base;
510 	struct mvneta_rx_queue *rxqs;
511 	struct mvneta_tx_queue *txqs;
512 	struct net_device *dev;
513 	struct hlist_node node_online;
514 	struct hlist_node node_dead;
515 	int rxq_def;
516 	/* Protect the access to the percpu interrupt registers,
517 	 * ensuring that the configuration remains coherent.
518 	 */
519 	spinlock_t lock;
520 	bool is_stopped;
521 
522 	u32 cause_rx_tx;
523 	struct napi_struct napi;
524 
525 	struct bpf_prog *xdp_prog;
526 
527 	/* Core clock */
528 	struct clk *clk;
529 	/* AXI clock */
530 	struct clk *clk_bus;
531 	u8 mcast_count[256];
532 	u16 tx_ring_size;
533 	u16 rx_ring_size;
534 
535 	phy_interface_t phy_interface;
536 	struct device_node *dn;
537 	unsigned int tx_csum_limit;
538 	struct phylink *phylink;
539 	struct phylink_config phylink_config;
540 	struct phylink_pcs phylink_pcs;
541 	struct phy *comphy;
542 
543 	struct mvneta_bm *bm_priv;
544 	struct mvneta_bm_pool *pool_long;
545 	struct mvneta_bm_pool *pool_short;
546 	int bm_win_id;
547 
548 	u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
549 
550 	u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
551 
552 	/* Flags for special SoC configurations */
553 	bool neta_armada3700;
554 	bool neta_ac5;
555 	u16 rx_offset_correction;
556 	const struct mbus_dram_target_info *dram_target_info;
557 };
558 
559 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
560  * layout of the transmit and reception DMA descriptors, and their
561  * layout is therefore defined by the hardware design
562  */
563 
564 #define MVNETA_TX_L3_OFF_SHIFT	0
565 #define MVNETA_TX_IP_HLEN_SHIFT	8
566 #define MVNETA_TX_L4_UDP	BIT(16)
567 #define MVNETA_TX_L3_IP6	BIT(17)
568 #define MVNETA_TXD_IP_CSUM	BIT(18)
569 #define MVNETA_TXD_Z_PAD	BIT(19)
570 #define MVNETA_TXD_L_DESC	BIT(20)
571 #define MVNETA_TXD_F_DESC	BIT(21)
572 #define MVNETA_TXD_FLZ_DESC	(MVNETA_TXD_Z_PAD  | \
573 				 MVNETA_TXD_L_DESC | \
574 				 MVNETA_TXD_F_DESC)
575 #define MVNETA_TX_L4_CSUM_FULL	BIT(30)
576 #define MVNETA_TX_L4_CSUM_NOT	BIT(31)
577 
578 #define MVNETA_RXD_ERR_CRC		0x0
579 #define MVNETA_RXD_BM_POOL_SHIFT	13
580 #define MVNETA_RXD_BM_POOL_MASK		(BIT(13) | BIT(14))
581 #define MVNETA_RXD_ERR_SUMMARY		BIT(16)
582 #define MVNETA_RXD_ERR_OVERRUN		BIT(17)
583 #define MVNETA_RXD_ERR_LEN		BIT(18)
584 #define MVNETA_RXD_ERR_RESOURCE		(BIT(17) | BIT(18))
585 #define MVNETA_RXD_ERR_CODE_MASK	(BIT(17) | BIT(18))
586 #define MVNETA_RXD_L3_IP4		BIT(25)
587 #define MVNETA_RXD_LAST_DESC		BIT(26)
588 #define MVNETA_RXD_FIRST_DESC		BIT(27)
589 #define MVNETA_RXD_FIRST_LAST_DESC	(MVNETA_RXD_FIRST_DESC | \
590 					 MVNETA_RXD_LAST_DESC)
591 #define MVNETA_RXD_L4_CSUM_OK		BIT(30)
592 
593 #if defined(__LITTLE_ENDIAN)
594 struct mvneta_tx_desc {
595 	u32  command;		/* Options used by HW for packet transmitting.*/
596 	u16  reserved1;		/* csum_l4 (for future use)		*/
597 	u16  data_size;		/* Data size of transmitted packet in bytes */
598 	u32  buf_phys_addr;	/* Physical addr of transmitted buffer	*/
599 	u32  reserved2;		/* hw_cmd - (for future use, PMT)	*/
600 	u32  reserved3[4];	/* Reserved - (for future use)		*/
601 };
602 
603 struct mvneta_rx_desc {
604 	u32  status;		/* Info about received packet		*/
605 	u16  reserved1;		/* pnc_info - (for future use, PnC)	*/
606 	u16  data_size;		/* Size of received packet in bytes	*/
607 
608 	u32  buf_phys_addr;	/* Physical address of the buffer	*/
609 	u32  reserved2;		/* pnc_flow_id  (for future use, PnC)	*/
610 
611 	u32  buf_cookie;	/* cookie for access to RX buffer in rx path */
612 	u16  reserved3;		/* prefetch_cmd, for future use		*/
613 	u16  reserved4;		/* csum_l4 - (for future use, PnC)	*/
614 
615 	u32  reserved5;		/* pnc_extra PnC (for future use, PnC)	*/
616 	u32  reserved6;		/* hw_cmd (for future use, PnC and HWF)	*/
617 };
618 #else
619 struct mvneta_tx_desc {
620 	u16  data_size;		/* Data size of transmitted packet in bytes */
621 	u16  reserved1;		/* csum_l4 (for future use)		*/
622 	u32  command;		/* Options used by HW for packet transmitting.*/
623 	u32  reserved2;		/* hw_cmd - (for future use, PMT)	*/
624 	u32  buf_phys_addr;	/* Physical addr of transmitted buffer	*/
625 	u32  reserved3[4];	/* Reserved - (for future use)		*/
626 };
627 
628 struct mvneta_rx_desc {
629 	u16  data_size;		/* Size of received packet in bytes	*/
630 	u16  reserved1;		/* pnc_info - (for future use, PnC)	*/
631 	u32  status;		/* Info about received packet		*/
632 
633 	u32  reserved2;		/* pnc_flow_id  (for future use, PnC)	*/
634 	u32  buf_phys_addr;	/* Physical address of the buffer	*/
635 
636 	u16  reserved4;		/* csum_l4 - (for future use, PnC)	*/
637 	u16  reserved3;		/* prefetch_cmd, for future use		*/
638 	u32  buf_cookie;	/* cookie for access to RX buffer in rx path */
639 
640 	u32  reserved5;		/* pnc_extra PnC (for future use, PnC)	*/
641 	u32  reserved6;		/* hw_cmd (for future use, PnC and HWF)	*/
642 };
643 #endif
644 
645 enum mvneta_tx_buf_type {
646 	MVNETA_TYPE_TSO,
647 	MVNETA_TYPE_SKB,
648 	MVNETA_TYPE_XDP_TX,
649 	MVNETA_TYPE_XDP_NDO,
650 };
651 
652 struct mvneta_tx_buf {
653 	enum mvneta_tx_buf_type type;
654 	union {
655 		struct xdp_frame *xdpf;
656 		struct sk_buff *skb;
657 	};
658 };
659 
660 struct mvneta_tx_queue {
661 	/* Number of this TX queue, in the range 0-7 */
662 	u8 id;
663 
664 	/* Number of TX DMA descriptors in the descriptor ring */
665 	int size;
666 
667 	/* Number of currently used TX DMA descriptor in the
668 	 * descriptor ring
669 	 */
670 	int count;
671 	int pending;
672 	int tx_stop_threshold;
673 	int tx_wake_threshold;
674 
675 	/* Array of transmitted buffers */
676 	struct mvneta_tx_buf *buf;
677 
678 	/* Index of last TX DMA descriptor that was inserted */
679 	int txq_put_index;
680 
681 	/* Index of the TX DMA descriptor to be cleaned up */
682 	int txq_get_index;
683 
684 	u32 done_pkts_coal;
685 
686 	/* Virtual address of the TX DMA descriptors array */
687 	struct mvneta_tx_desc *descs;
688 
689 	/* DMA address of the TX DMA descriptors array */
690 	dma_addr_t descs_phys;
691 
692 	/* Index of the last TX DMA descriptor */
693 	int last_desc;
694 
695 	/* Index of the next TX DMA descriptor to process */
696 	int next_desc_to_proc;
697 
698 	/* DMA buffers for TSO headers */
699 	char *tso_hdrs[MVNETA_MAX_TSO_PAGES];
700 
701 	/* DMA address of TSO headers */
702 	dma_addr_t tso_hdrs_phys[MVNETA_MAX_TSO_PAGES];
703 
704 	/* Affinity mask for CPUs*/
705 	cpumask_t affinity_mask;
706 };
707 
708 struct mvneta_rx_queue {
709 	/* rx queue number, in the range 0-7 */
710 	u8 id;
711 
712 	/* num of rx descriptors in the rx descriptor ring */
713 	int size;
714 
715 	u32 pkts_coal;
716 	u32 time_coal;
717 
718 	/* page_pool */
719 	struct page_pool *page_pool;
720 	struct xdp_rxq_info xdp_rxq;
721 
722 	/* Virtual address of the RX buffer */
723 	void  **buf_virt_addr;
724 
725 	/* Virtual address of the RX DMA descriptors array */
726 	struct mvneta_rx_desc *descs;
727 
728 	/* DMA address of the RX DMA descriptors array */
729 	dma_addr_t descs_phys;
730 
731 	/* Index of the last RX DMA descriptor */
732 	int last_desc;
733 
734 	/* Index of the next RX DMA descriptor to process */
735 	int next_desc_to_proc;
736 
737 	/* Index of first RX DMA descriptor to refill */
738 	int first_to_refill;
739 	u32 refill_num;
740 };
741 
742 static enum cpuhp_state online_hpstate;
743 /* The hardware supports eight (8) rx queues, but we are only allowing
744  * the first one to be used. Therefore, let's just allocate one queue.
745  */
746 static int rxq_number = 8;
747 static int txq_number = 8;
748 
749 static int rxq_def;
750 
751 static int rx_copybreak __read_mostly = 256;
752 
753 /* HW BM need that each port be identify by a unique ID */
754 static int global_port_id;
755 
756 #define MVNETA_DRIVER_NAME "mvneta"
757 #define MVNETA_DRIVER_VERSION "1.0"
758 
759 /* Utility/helper methods */
760 
761 /* Write helper method */
762 static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
763 {
764 	writel(data, pp->base + offset);
765 }
766 
767 /* Read helper method */
768 static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
769 {
770 	return readl(pp->base + offset);
771 }
772 
773 /* Increment txq get counter */
774 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
775 {
776 	txq->txq_get_index++;
777 	if (txq->txq_get_index == txq->size)
778 		txq->txq_get_index = 0;
779 }
780 
781 /* Increment txq put counter */
782 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
783 {
784 	txq->txq_put_index++;
785 	if (txq->txq_put_index == txq->size)
786 		txq->txq_put_index = 0;
787 }
788 
789 
790 /* Clear all MIB counters */
791 static void mvneta_mib_counters_clear(struct mvneta_port *pp)
792 {
793 	int i;
794 
795 	/* Perform dummy reads from MIB counters */
796 	for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
797 		mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
798 	mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
799 	mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
800 }
801 
802 /* Get System Network Statistics */
803 static void
804 mvneta_get_stats64(struct net_device *dev,
805 		   struct rtnl_link_stats64 *stats)
806 {
807 	struct mvneta_port *pp = netdev_priv(dev);
808 	unsigned int start;
809 	int cpu;
810 
811 	for_each_possible_cpu(cpu) {
812 		struct mvneta_pcpu_stats *cpu_stats;
813 		u64 rx_packets;
814 		u64 rx_bytes;
815 		u64 rx_dropped;
816 		u64 rx_errors;
817 		u64 tx_packets;
818 		u64 tx_bytes;
819 
820 		cpu_stats = per_cpu_ptr(pp->stats, cpu);
821 		do {
822 			start = u64_stats_fetch_begin(&cpu_stats->syncp);
823 			rx_packets = cpu_stats->es.ps.rx_packets;
824 			rx_bytes   = cpu_stats->es.ps.rx_bytes;
825 			rx_dropped = cpu_stats->rx_dropped;
826 			rx_errors  = cpu_stats->rx_errors;
827 			tx_packets = cpu_stats->es.ps.tx_packets;
828 			tx_bytes   = cpu_stats->es.ps.tx_bytes;
829 		} while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
830 
831 		stats->rx_packets += rx_packets;
832 		stats->rx_bytes   += rx_bytes;
833 		stats->rx_dropped += rx_dropped;
834 		stats->rx_errors  += rx_errors;
835 		stats->tx_packets += tx_packets;
836 		stats->tx_bytes   += tx_bytes;
837 	}
838 
839 	stats->tx_dropped	= dev->stats.tx_dropped;
840 }
841 
842 /* Rx descriptors helper methods */
843 
844 /* Checks whether the RX descriptor having this status is both the first
845  * and the last descriptor for the RX packet. Each RX packet is currently
846  * received through a single RX descriptor, so not having each RX
847  * descriptor with its first and last bits set is an error
848  */
849 static int mvneta_rxq_desc_is_first_last(u32 status)
850 {
851 	return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
852 		MVNETA_RXD_FIRST_LAST_DESC;
853 }
854 
855 /* Add number of descriptors ready to receive new packets */
856 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
857 					  struct mvneta_rx_queue *rxq,
858 					  int ndescs)
859 {
860 	/* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
861 	 * be added at once
862 	 */
863 	while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
864 		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
865 			    (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
866 			     MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
867 		ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
868 	}
869 
870 	mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
871 		    (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
872 }
873 
874 /* Get number of RX descriptors occupied by received packets */
875 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
876 					struct mvneta_rx_queue *rxq)
877 {
878 	u32 val;
879 
880 	val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
881 	return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
882 }
883 
884 /* Update num of rx desc called upon return from rx path or
885  * from mvneta_rxq_drop_pkts().
886  */
887 static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
888 				       struct mvneta_rx_queue *rxq,
889 				       int rx_done, int rx_filled)
890 {
891 	u32 val;
892 
893 	if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
894 		val = rx_done |
895 		  (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
896 		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
897 		return;
898 	}
899 
900 	/* Only 255 descriptors can be added at once */
901 	while ((rx_done > 0) || (rx_filled > 0)) {
902 		if (rx_done <= 0xff) {
903 			val = rx_done;
904 			rx_done = 0;
905 		} else {
906 			val = 0xff;
907 			rx_done -= 0xff;
908 		}
909 		if (rx_filled <= 0xff) {
910 			val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
911 			rx_filled = 0;
912 		} else {
913 			val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
914 			rx_filled -= 0xff;
915 		}
916 		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
917 	}
918 }
919 
920 /* Get pointer to next RX descriptor to be processed by SW */
921 static struct mvneta_rx_desc *
922 mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
923 {
924 	int rx_desc = rxq->next_desc_to_proc;
925 
926 	rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
927 	prefetch(rxq->descs + rxq->next_desc_to_proc);
928 	return rxq->descs + rx_desc;
929 }
930 
931 /* Change maximum receive size of the port. */
932 static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
933 {
934 	u32 val;
935 
936 	val =  mvreg_read(pp, MVNETA_GMAC_CTRL_0);
937 	val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
938 	val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
939 		MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
940 	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
941 }
942 
943 
944 /* Set rx queue offset */
945 static void mvneta_rxq_offset_set(struct mvneta_port *pp,
946 				  struct mvneta_rx_queue *rxq,
947 				  int offset)
948 {
949 	u32 val;
950 
951 	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
952 	val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
953 
954 	/* Offset is in */
955 	val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
956 	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
957 }
958 
959 
960 /* Tx descriptors helper methods */
961 
962 /* Update HW with number of TX descriptors to be sent */
963 static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
964 				     struct mvneta_tx_queue *txq,
965 				     int pend_desc)
966 {
967 	u32 val;
968 
969 	pend_desc += txq->pending;
970 
971 	/* Only 255 Tx descriptors can be added at once */
972 	do {
973 		val = min(pend_desc, 255);
974 		mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
975 		pend_desc -= val;
976 	} while (pend_desc > 0);
977 	txq->pending = 0;
978 }
979 
980 /* Get pointer to next TX descriptor to be processed (send) by HW */
981 static struct mvneta_tx_desc *
982 mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
983 {
984 	int tx_desc = txq->next_desc_to_proc;
985 
986 	txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
987 	return txq->descs + tx_desc;
988 }
989 
990 /* Release the last allocated TX descriptor. Useful to handle DMA
991  * mapping failures in the TX path.
992  */
993 static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
994 {
995 	if (txq->next_desc_to_proc == 0)
996 		txq->next_desc_to_proc = txq->last_desc - 1;
997 	else
998 		txq->next_desc_to_proc--;
999 }
1000 
1001 /* Set rxq buf size */
1002 static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
1003 				    struct mvneta_rx_queue *rxq,
1004 				    int buf_size)
1005 {
1006 	u32 val;
1007 
1008 	val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
1009 
1010 	val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
1011 	val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
1012 
1013 	mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
1014 }
1015 
1016 /* Disable buffer management (BM) */
1017 static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
1018 				  struct mvneta_rx_queue *rxq)
1019 {
1020 	u32 val;
1021 
1022 	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
1023 	val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
1024 	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1025 }
1026 
1027 /* Enable buffer management (BM) */
1028 static void mvneta_rxq_bm_enable(struct mvneta_port *pp,
1029 				 struct mvneta_rx_queue *rxq)
1030 {
1031 	u32 val;
1032 
1033 	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
1034 	val |= MVNETA_RXQ_HW_BUF_ALLOC;
1035 	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1036 }
1037 
1038 /* Notify HW about port's assignment of pool for bigger packets */
1039 static void mvneta_rxq_long_pool_set(struct mvneta_port *pp,
1040 				     struct mvneta_rx_queue *rxq)
1041 {
1042 	u32 val;
1043 
1044 	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
1045 	val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK;
1046 	val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT);
1047 
1048 	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1049 }
1050 
1051 /* Notify HW about port's assignment of pool for smaller packets */
1052 static void mvneta_rxq_short_pool_set(struct mvneta_port *pp,
1053 				      struct mvneta_rx_queue *rxq)
1054 {
1055 	u32 val;
1056 
1057 	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
1058 	val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK;
1059 	val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT);
1060 
1061 	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1062 }
1063 
1064 /* Set port's receive buffer size for assigned BM pool */
1065 static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp,
1066 					      int buf_size,
1067 					      u8 pool_id)
1068 {
1069 	u32 val;
1070 
1071 	if (!IS_ALIGNED(buf_size, 8)) {
1072 		dev_warn(pp->dev->dev.parent,
1073 			 "illegal buf_size value %d, round to %d\n",
1074 			 buf_size, ALIGN(buf_size, 8));
1075 		buf_size = ALIGN(buf_size, 8);
1076 	}
1077 
1078 	val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id));
1079 	val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK;
1080 	mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val);
1081 }
1082 
1083 /* Configure MBUS window in order to enable access BM internal SRAM */
1084 static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize,
1085 				  u8 target, u8 attr)
1086 {
1087 	u32 win_enable, win_protect;
1088 	int i;
1089 
1090 	win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE);
1091 
1092 	if (pp->bm_win_id < 0) {
1093 		/* Find first not occupied window */
1094 		for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) {
1095 			if (win_enable & (1 << i)) {
1096 				pp->bm_win_id = i;
1097 				break;
1098 			}
1099 		}
1100 		if (i == MVNETA_MAX_DECODE_WIN)
1101 			return -ENOMEM;
1102 	} else {
1103 		i = pp->bm_win_id;
1104 	}
1105 
1106 	mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
1107 	mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
1108 
1109 	if (i < 4)
1110 		mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
1111 
1112 	mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) |
1113 		    (attr << 8) | target);
1114 
1115 	mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000);
1116 
1117 	win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE);
1118 	win_protect |= 3 << (2 * i);
1119 	mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
1120 
1121 	win_enable &= ~(1 << i);
1122 	mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
1123 
1124 	return 0;
1125 }
1126 
1127 static int mvneta_bm_port_mbus_init(struct mvneta_port *pp)
1128 {
1129 	u32 wsize;
1130 	u8 target, attr;
1131 	int err;
1132 
1133 	/* Get BM window information */
1134 	err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize,
1135 					 &target, &attr);
1136 	if (err < 0)
1137 		return err;
1138 
1139 	pp->bm_win_id = -1;
1140 
1141 	/* Open NETA -> BM window */
1142 	err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize,
1143 				     target, attr);
1144 	if (err < 0) {
1145 		netdev_info(pp->dev, "fail to configure mbus window to BM\n");
1146 		return err;
1147 	}
1148 	return 0;
1149 }
1150 
1151 /* Assign and initialize pools for port. In case of fail
1152  * buffer manager will remain disabled for current port.
1153  */
1154 static int mvneta_bm_port_init(struct platform_device *pdev,
1155 			       struct mvneta_port *pp)
1156 {
1157 	struct device_node *dn = pdev->dev.of_node;
1158 	u32 long_pool_id, short_pool_id;
1159 
1160 	if (!pp->neta_armada3700) {
1161 		int ret;
1162 
1163 		ret = mvneta_bm_port_mbus_init(pp);
1164 		if (ret)
1165 			return ret;
1166 	}
1167 
1168 	if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) {
1169 		netdev_info(pp->dev, "missing long pool id\n");
1170 		return -EINVAL;
1171 	}
1172 
1173 	/* Create port's long pool depending on mtu */
1174 	pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id,
1175 					   MVNETA_BM_LONG, pp->id,
1176 					   MVNETA_RX_PKT_SIZE(pp->dev->mtu));
1177 	if (!pp->pool_long) {
1178 		netdev_info(pp->dev, "fail to obtain long pool for port\n");
1179 		return -ENOMEM;
1180 	}
1181 
1182 	pp->pool_long->port_map |= 1 << pp->id;
1183 
1184 	mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size,
1185 				   pp->pool_long->id);
1186 
1187 	/* If short pool id is not defined, assume using single pool */
1188 	if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id))
1189 		short_pool_id = long_pool_id;
1190 
1191 	/* Create port's short pool */
1192 	pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id,
1193 					    MVNETA_BM_SHORT, pp->id,
1194 					    MVNETA_BM_SHORT_PKT_SIZE);
1195 	if (!pp->pool_short) {
1196 		netdev_info(pp->dev, "fail to obtain short pool for port\n");
1197 		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1198 		return -ENOMEM;
1199 	}
1200 
1201 	if (short_pool_id != long_pool_id) {
1202 		pp->pool_short->port_map |= 1 << pp->id;
1203 		mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size,
1204 					   pp->pool_short->id);
1205 	}
1206 
1207 	return 0;
1208 }
1209 
1210 /* Update settings of a pool for bigger packets */
1211 static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
1212 {
1213 	struct mvneta_bm_pool *bm_pool = pp->pool_long;
1214 	struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
1215 	int num;
1216 
1217 	/* Release all buffers from long pool */
1218 	mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id);
1219 	if (hwbm_pool->buf_num) {
1220 		WARN(1, "cannot free all buffers in pool %d\n",
1221 		     bm_pool->id);
1222 		goto bm_mtu_err;
1223 	}
1224 
1225 	bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu);
1226 	bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size);
1227 	hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1228 			SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));
1229 
1230 	/* Fill entire long pool */
1231 	num = hwbm_pool_add(hwbm_pool, hwbm_pool->size);
1232 	if (num != hwbm_pool->size) {
1233 		WARN(1, "pool %d: %d of %d allocated\n",
1234 		     bm_pool->id, num, hwbm_pool->size);
1235 		goto bm_mtu_err;
1236 	}
1237 	mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id);
1238 
1239 	return;
1240 
1241 bm_mtu_err:
1242 	mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1243 	mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id);
1244 
1245 	pp->bm_priv = NULL;
1246 	pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
1247 	mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1);
1248 	netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n");
1249 }
1250 
1251 /* Start the Ethernet port RX and TX activity */
1252 static void mvneta_port_up(struct mvneta_port *pp)
1253 {
1254 	int queue;
1255 	u32 q_map;
1256 
1257 	/* Enable all initialized TXs. */
1258 	q_map = 0;
1259 	for (queue = 0; queue < txq_number; queue++) {
1260 		struct mvneta_tx_queue *txq = &pp->txqs[queue];
1261 		if (txq->descs)
1262 			q_map |= (1 << queue);
1263 	}
1264 	mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
1265 
1266 	q_map = 0;
1267 	/* Enable all initialized RXQs. */
1268 	for (queue = 0; queue < rxq_number; queue++) {
1269 		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
1270 
1271 		if (rxq->descs)
1272 			q_map |= (1 << queue);
1273 	}
1274 	mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
1275 }
1276 
1277 /* Stop the Ethernet port activity */
1278 static void mvneta_port_down(struct mvneta_port *pp)
1279 {
1280 	u32 val;
1281 	int count;
1282 
1283 	/* Stop Rx port activity. Check port Rx activity. */
1284 	val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
1285 
1286 	/* Issue stop command for active channels only */
1287 	if (val != 0)
1288 		mvreg_write(pp, MVNETA_RXQ_CMD,
1289 			    val << MVNETA_RXQ_DISABLE_SHIFT);
1290 
1291 	/* Wait for all Rx activity to terminate. */
1292 	count = 0;
1293 	do {
1294 		if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
1295 			netdev_warn(pp->dev,
1296 				    "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n",
1297 				    val);
1298 			break;
1299 		}
1300 		mdelay(1);
1301 
1302 		val = mvreg_read(pp, MVNETA_RXQ_CMD);
1303 	} while (val & MVNETA_RXQ_ENABLE_MASK);
1304 
1305 	/* Stop Tx port activity. Check port Tx activity. Issue stop
1306 	 * command for active channels only
1307 	 */
1308 	val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
1309 
1310 	if (val != 0)
1311 		mvreg_write(pp, MVNETA_TXQ_CMD,
1312 			    (val << MVNETA_TXQ_DISABLE_SHIFT));
1313 
1314 	/* Wait for all Tx activity to terminate. */
1315 	count = 0;
1316 	do {
1317 		if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
1318 			netdev_warn(pp->dev,
1319 				    "TIMEOUT for TX stopped status=0x%08x\n",
1320 				    val);
1321 			break;
1322 		}
1323 		mdelay(1);
1324 
1325 		/* Check TX Command reg that all Txqs are stopped */
1326 		val = mvreg_read(pp, MVNETA_TXQ_CMD);
1327 
1328 	} while (val & MVNETA_TXQ_ENABLE_MASK);
1329 
1330 	/* Double check to verify that TX FIFO is empty */
1331 	count = 0;
1332 	do {
1333 		if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
1334 			netdev_warn(pp->dev,
1335 				    "TX FIFO empty timeout status=0x%08x\n",
1336 				    val);
1337 			break;
1338 		}
1339 		mdelay(1);
1340 
1341 		val = mvreg_read(pp, MVNETA_PORT_STATUS);
1342 	} while (!(val & MVNETA_TX_FIFO_EMPTY) &&
1343 		 (val & MVNETA_TX_IN_PRGRS));
1344 
1345 	udelay(200);
1346 }
1347 
1348 /* Enable the port by setting the port enable bit of the MAC control register */
1349 static void mvneta_port_enable(struct mvneta_port *pp)
1350 {
1351 	u32 val;
1352 
1353 	/* Enable port */
1354 	val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1355 	val |= MVNETA_GMAC0_PORT_ENABLE;
1356 	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1357 }
1358 
1359 /* Disable the port and wait for about 200 usec before retuning */
1360 static void mvneta_port_disable(struct mvneta_port *pp)
1361 {
1362 	u32 val;
1363 
1364 	/* Reset the Enable bit in the Serial Control Register */
1365 	val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1366 	val &= ~MVNETA_GMAC0_PORT_ENABLE;
1367 	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1368 
1369 	udelay(200);
1370 }
1371 
1372 /* Multicast tables methods */
1373 
1374 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */
1375 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
1376 {
1377 	int offset;
1378 	u32 val;
1379 
1380 	if (queue == -1) {
1381 		val = 0;
1382 	} else {
1383 		val = 0x1 | (queue << 1);
1384 		val |= (val << 24) | (val << 16) | (val << 8);
1385 	}
1386 
1387 	for (offset = 0; offset <= 0xc; offset += 4)
1388 		mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
1389 }
1390 
1391 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
1392 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
1393 {
1394 	int offset;
1395 	u32 val;
1396 
1397 	if (queue == -1) {
1398 		val = 0;
1399 	} else {
1400 		val = 0x1 | (queue << 1);
1401 		val |= (val << 24) | (val << 16) | (val << 8);
1402 	}
1403 
1404 	for (offset = 0; offset <= 0xfc; offset += 4)
1405 		mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
1406 
1407 }
1408 
1409 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
1410 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
1411 {
1412 	int offset;
1413 	u32 val;
1414 
1415 	if (queue == -1) {
1416 		memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
1417 		val = 0;
1418 	} else {
1419 		memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
1420 		val = 0x1 | (queue << 1);
1421 		val |= (val << 24) | (val << 16) | (val << 8);
1422 	}
1423 
1424 	for (offset = 0; offset <= 0xfc; offset += 4)
1425 		mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
1426 }
1427 
1428 static void mvneta_percpu_unmask_interrupt(void *arg)
1429 {
1430 	struct mvneta_port *pp = arg;
1431 
1432 	/* All the queue are unmasked, but actually only the ones
1433 	 * mapped to this CPU will be unmasked
1434 	 */
1435 	mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1436 		    MVNETA_RX_INTR_MASK_ALL |
1437 		    MVNETA_TX_INTR_MASK_ALL |
1438 		    MVNETA_MISCINTR_INTR_MASK);
1439 }
1440 
1441 static void mvneta_percpu_mask_interrupt(void *arg)
1442 {
1443 	struct mvneta_port *pp = arg;
1444 
1445 	/* All the queue are masked, but actually only the ones
1446 	 * mapped to this CPU will be masked
1447 	 */
1448 	mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1449 	mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
1450 	mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1451 }
1452 
1453 static void mvneta_percpu_clear_intr_cause(void *arg)
1454 {
1455 	struct mvneta_port *pp = arg;
1456 
1457 	/* All the queue are cleared, but actually only the ones
1458 	 * mapped to this CPU will be cleared
1459 	 */
1460 	mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
1461 	mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
1462 	mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
1463 }
1464 
1465 /* This method sets defaults to the NETA port:
1466  *	Clears interrupt Cause and Mask registers.
1467  *	Clears all MAC tables.
1468  *	Sets defaults to all registers.
1469  *	Resets RX and TX descriptor rings.
1470  *	Resets PHY.
1471  * This method can be called after mvneta_port_down() to return the port
1472  *	settings to defaults.
1473  */
1474 static void mvneta_defaults_set(struct mvneta_port *pp)
1475 {
1476 	int cpu;
1477 	int queue;
1478 	u32 val;
1479 	int max_cpu = num_present_cpus();
1480 
1481 	/* Clear all Cause registers */
1482 	on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
1483 
1484 	/* Mask all interrupts */
1485 	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
1486 	mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
1487 
1488 	/* Enable MBUS Retry bit16 */
1489 	mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
1490 
1491 	/* Set CPU queue access map. CPUs are assigned to the RX and
1492 	 * TX queues modulo their number. If there is only one TX
1493 	 * queue then it is assigned to the CPU associated to the
1494 	 * default RX queue.
1495 	 */
1496 	for_each_present_cpu(cpu) {
1497 		int rxq_map = 0, txq_map = 0;
1498 		int rxq, txq;
1499 		if (!pp->neta_armada3700) {
1500 			for (rxq = 0; rxq < rxq_number; rxq++)
1501 				if ((rxq % max_cpu) == cpu)
1502 					rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
1503 
1504 			for (txq = 0; txq < txq_number; txq++)
1505 				if ((txq % max_cpu) == cpu)
1506 					txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);
1507 
1508 			/* With only one TX queue we configure a special case
1509 			 * which will allow to get all the irq on a single
1510 			 * CPU
1511 			 */
1512 			if (txq_number == 1)
1513 				txq_map = (cpu == pp->rxq_def) ?
1514 					MVNETA_CPU_TXQ_ACCESS(0) : 0;
1515 
1516 		} else {
1517 			txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
1518 			rxq_map = MVNETA_CPU_RXQ_ACCESS_ALL_MASK;
1519 		}
1520 
1521 		mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
1522 	}
1523 
1524 	/* Reset RX and TX DMAs */
1525 	mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
1526 	mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
1527 
1528 	/* Disable Legacy WRR, Disable EJP, Release from reset */
1529 	mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
1530 	for (queue = 0; queue < txq_number; queue++) {
1531 		mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
1532 		mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
1533 	}
1534 
1535 	mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
1536 	mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
1537 
1538 	/* Set Port Acceleration Mode */
1539 	if (pp->bm_priv)
1540 		/* HW buffer management + legacy parser */
1541 		val = MVNETA_ACC_MODE_EXT2;
1542 	else
1543 		/* SW buffer management + legacy parser */
1544 		val = MVNETA_ACC_MODE_EXT1;
1545 	mvreg_write(pp, MVNETA_ACC_MODE, val);
1546 
1547 	if (pp->bm_priv)
1548 		mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr);
1549 
1550 	/* Update val of portCfg register accordingly with all RxQueue types */
1551 	val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
1552 	mvreg_write(pp, MVNETA_PORT_CONFIG, val);
1553 
1554 	val = 0;
1555 	mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
1556 	mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
1557 
1558 	/* Build PORT_SDMA_CONFIG_REG */
1559 	val = 0;
1560 
1561 	/* Default burst size */
1562 	val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1563 	val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1564 	val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
1565 
1566 #if defined(__BIG_ENDIAN)
1567 	val |= MVNETA_DESC_SWAP;
1568 #endif
1569 
1570 	/* Assign port SDMA configuration */
1571 	mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
1572 
1573 	/* Disable PHY polling in hardware, since we're using the
1574 	 * kernel phylib to do this.
1575 	 */
1576 	val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
1577 	val &= ~MVNETA_PHY_POLLING_ENABLE;
1578 	mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
1579 
1580 	mvneta_set_ucast_table(pp, -1);
1581 	mvneta_set_special_mcast_table(pp, -1);
1582 	mvneta_set_other_mcast_table(pp, -1);
1583 
1584 	/* Set port interrupt enable register - default enable all */
1585 	mvreg_write(pp, MVNETA_INTR_ENABLE,
1586 		    (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1587 		     | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
1588 
1589 	mvneta_mib_counters_clear(pp);
1590 }
1591 
1592 /* Set max sizes for tx queues */
1593 static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
1594 
1595 {
1596 	u32 val, size, mtu;
1597 	int queue;
1598 
1599 	mtu = max_tx_size * 8;
1600 	if (mtu > MVNETA_TX_MTU_MAX)
1601 		mtu = MVNETA_TX_MTU_MAX;
1602 
1603 	/* Set MTU */
1604 	val = mvreg_read(pp, MVNETA_TX_MTU);
1605 	val &= ~MVNETA_TX_MTU_MAX;
1606 	val |= mtu;
1607 	mvreg_write(pp, MVNETA_TX_MTU, val);
1608 
1609 	/* TX token size and all TXQs token size must be larger that MTU */
1610 	val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
1611 
1612 	size = val & MVNETA_TX_TOKEN_SIZE_MAX;
1613 	if (size < mtu) {
1614 		size = mtu;
1615 		val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
1616 		val |= size;
1617 		mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
1618 	}
1619 	for (queue = 0; queue < txq_number; queue++) {
1620 		val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
1621 
1622 		size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
1623 		if (size < mtu) {
1624 			size = mtu;
1625 			val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
1626 			val |= size;
1627 			mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1628 		}
1629 	}
1630 }
1631 
1632 /* Set unicast address */
1633 static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
1634 				  int queue)
1635 {
1636 	unsigned int unicast_reg;
1637 	unsigned int tbl_offset;
1638 	unsigned int reg_offset;
1639 
1640 	/* Locate the Unicast table entry */
1641 	last_nibble = (0xf & last_nibble);
1642 
1643 	/* offset from unicast tbl base */
1644 	tbl_offset = (last_nibble / 4) * 4;
1645 
1646 	/* offset within the above reg  */
1647 	reg_offset = last_nibble % 4;
1648 
1649 	unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
1650 
1651 	if (queue == -1) {
1652 		/* Clear accepts frame bit at specified unicast DA tbl entry */
1653 		unicast_reg &= ~(0xff << (8 * reg_offset));
1654 	} else {
1655 		unicast_reg &= ~(0xff << (8 * reg_offset));
1656 		unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1657 	}
1658 
1659 	mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
1660 }
1661 
1662 /* Set mac address */
1663 static void mvneta_mac_addr_set(struct mvneta_port *pp,
1664 				const unsigned char *addr, int queue)
1665 {
1666 	unsigned int mac_h;
1667 	unsigned int mac_l;
1668 
1669 	if (queue != -1) {
1670 		mac_l = (addr[4] << 8) | (addr[5]);
1671 		mac_h = (addr[0] << 24) | (addr[1] << 16) |
1672 			(addr[2] << 8) | (addr[3] << 0);
1673 
1674 		mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1675 		mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1676 	}
1677 
1678 	/* Accept frames of this address */
1679 	mvneta_set_ucast_addr(pp, addr[5], queue);
1680 }
1681 
1682 /* Set the number of packets that will be received before RX interrupt
1683  * will be generated by HW.
1684  */
1685 static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1686 				    struct mvneta_rx_queue *rxq, u32 value)
1687 {
1688 	mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1689 		    value | MVNETA_RXQ_NON_OCCUPIED(0));
1690 }
1691 
1692 /* Set the time delay in usec before RX interrupt will be generated by
1693  * HW.
1694  */
1695 static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1696 				    struct mvneta_rx_queue *rxq, u32 value)
1697 {
1698 	u32 val;
1699 	unsigned long clk_rate;
1700 
1701 	clk_rate = clk_get_rate(pp->clk);
1702 	val = (clk_rate / 1000000) * value;
1703 
1704 	mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1705 }
1706 
1707 /* Set threshold for TX_DONE pkts coalescing */
1708 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1709 					 struct mvneta_tx_queue *txq, u32 value)
1710 {
1711 	u32 val;
1712 
1713 	val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1714 
1715 	val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1716 	val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1717 
1718 	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1719 }
1720 
1721 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1722 static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1723 				u32 phys_addr, void *virt_addr,
1724 				struct mvneta_rx_queue *rxq)
1725 {
1726 	int i;
1727 
1728 	rx_desc->buf_phys_addr = phys_addr;
1729 	i = rx_desc - rxq->descs;
1730 	rxq->buf_virt_addr[i] = virt_addr;
1731 }
1732 
1733 /* Decrement sent descriptors counter */
1734 static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1735 				     struct mvneta_tx_queue *txq,
1736 				     int sent_desc)
1737 {
1738 	u32 val;
1739 
1740 	/* Only 255 TX descriptors can be updated at once */
1741 	while (sent_desc > 0xff) {
1742 		val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1743 		mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1744 		sent_desc = sent_desc - 0xff;
1745 	}
1746 
1747 	val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1748 	mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1749 }
1750 
1751 /* Get number of TX descriptors already sent by HW */
1752 static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1753 					struct mvneta_tx_queue *txq)
1754 {
1755 	u32 val;
1756 	int sent_desc;
1757 
1758 	val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1759 	sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1760 		MVNETA_TXQ_SENT_DESC_SHIFT;
1761 
1762 	return sent_desc;
1763 }
1764 
1765 /* Get number of sent descriptors and decrement counter.
1766  *  The number of sent descriptors is returned.
1767  */
1768 static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1769 				     struct mvneta_tx_queue *txq)
1770 {
1771 	int sent_desc;
1772 
1773 	/* Get number of sent descriptors */
1774 	sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1775 
1776 	/* Decrement sent descriptors counter */
1777 	if (sent_desc)
1778 		mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1779 
1780 	return sent_desc;
1781 }
1782 
1783 /* Set TXQ descriptors fields relevant for CSUM calculation */
1784 static u32 mvneta_txq_desc_csum(int l3_offs, __be16 l3_proto,
1785 				int ip_hdr_len, int l4_proto)
1786 {
1787 	u32 command;
1788 
1789 	/* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1790 	 * G_L4_chk, L4_type; required only for checksum
1791 	 * calculation
1792 	 */
1793 	command =  l3_offs    << MVNETA_TX_L3_OFF_SHIFT;
1794 	command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1795 
1796 	if (l3_proto == htons(ETH_P_IP))
1797 		command |= MVNETA_TXD_IP_CSUM;
1798 	else
1799 		command |= MVNETA_TX_L3_IP6;
1800 
1801 	if (l4_proto == IPPROTO_TCP)
1802 		command |=  MVNETA_TX_L4_CSUM_FULL;
1803 	else if (l4_proto == IPPROTO_UDP)
1804 		command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1805 	else
1806 		command |= MVNETA_TX_L4_CSUM_NOT;
1807 
1808 	return command;
1809 }
1810 
1811 
1812 /* Display more error info */
1813 static void mvneta_rx_error(struct mvneta_port *pp,
1814 			    struct mvneta_rx_desc *rx_desc)
1815 {
1816 	struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1817 	u32 status = rx_desc->status;
1818 
1819 	/* update per-cpu counter */
1820 	u64_stats_update_begin(&stats->syncp);
1821 	stats->rx_errors++;
1822 	u64_stats_update_end(&stats->syncp);
1823 
1824 	switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1825 	case MVNETA_RXD_ERR_CRC:
1826 		netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1827 			   status, rx_desc->data_size);
1828 		break;
1829 	case MVNETA_RXD_ERR_OVERRUN:
1830 		netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1831 			   status, rx_desc->data_size);
1832 		break;
1833 	case MVNETA_RXD_ERR_LEN:
1834 		netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1835 			   status, rx_desc->data_size);
1836 		break;
1837 	case MVNETA_RXD_ERR_RESOURCE:
1838 		netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1839 			   status, rx_desc->data_size);
1840 		break;
1841 	}
1842 }
1843 
1844 /* Handle RX checksum offload based on the descriptor's status */
1845 static int mvneta_rx_csum(struct mvneta_port *pp, u32 status)
1846 {
1847 	if ((pp->dev->features & NETIF_F_RXCSUM) &&
1848 	    (status & MVNETA_RXD_L3_IP4) &&
1849 	    (status & MVNETA_RXD_L4_CSUM_OK))
1850 		return CHECKSUM_UNNECESSARY;
1851 
1852 	return CHECKSUM_NONE;
1853 }
1854 
1855 /* Return tx queue pointer (find last set bit) according to <cause> returned
1856  * form tx_done reg. <cause> must not be null. The return value is always a
1857  * valid queue for matching the first one found in <cause>.
1858  */
1859 static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1860 						     u32 cause)
1861 {
1862 	int queue = fls(cause) - 1;
1863 
1864 	return &pp->txqs[queue];
1865 }
1866 
1867 /* Free tx queue skbuffs */
1868 static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1869 				 struct mvneta_tx_queue *txq, int num,
1870 				 struct netdev_queue *nq, bool napi)
1871 {
1872 	unsigned int bytes_compl = 0, pkts_compl = 0;
1873 	struct xdp_frame_bulk bq;
1874 	int i;
1875 
1876 	xdp_frame_bulk_init(&bq);
1877 
1878 	rcu_read_lock(); /* need for xdp_return_frame_bulk */
1879 
1880 	for (i = 0; i < num; i++) {
1881 		struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index];
1882 		struct mvneta_tx_desc *tx_desc = txq->descs +
1883 			txq->txq_get_index;
1884 
1885 		mvneta_txq_inc_get(txq);
1886 
1887 		if (buf->type == MVNETA_TYPE_XDP_NDO ||
1888 		    buf->type == MVNETA_TYPE_SKB)
1889 			dma_unmap_single(pp->dev->dev.parent,
1890 					 tx_desc->buf_phys_addr,
1891 					 tx_desc->data_size, DMA_TO_DEVICE);
1892 		if ((buf->type == MVNETA_TYPE_TSO ||
1893 		     buf->type == MVNETA_TYPE_SKB) && buf->skb) {
1894 			bytes_compl += buf->skb->len;
1895 			pkts_compl++;
1896 			dev_kfree_skb_any(buf->skb);
1897 		} else if ((buf->type == MVNETA_TYPE_XDP_TX ||
1898 			    buf->type == MVNETA_TYPE_XDP_NDO) && buf->xdpf) {
1899 			if (napi && buf->type == MVNETA_TYPE_XDP_TX)
1900 				xdp_return_frame_rx_napi(buf->xdpf);
1901 			else
1902 				xdp_return_frame_bulk(buf->xdpf, &bq);
1903 		}
1904 	}
1905 	xdp_flush_frame_bulk(&bq);
1906 
1907 	rcu_read_unlock();
1908 
1909 	netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
1910 }
1911 
1912 /* Handle end of transmission */
1913 static void mvneta_txq_done(struct mvneta_port *pp,
1914 			   struct mvneta_tx_queue *txq)
1915 {
1916 	struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1917 	int tx_done;
1918 
1919 	tx_done = mvneta_txq_sent_desc_proc(pp, txq);
1920 	if (!tx_done)
1921 		return;
1922 
1923 	mvneta_txq_bufs_free(pp, txq, tx_done, nq, true);
1924 
1925 	txq->count -= tx_done;
1926 
1927 	if (netif_tx_queue_stopped(nq)) {
1928 		if (txq->count <= txq->tx_wake_threshold)
1929 			netif_tx_wake_queue(nq);
1930 	}
1931 }
1932 
1933 /* Refill processing for SW buffer management */
1934 /* Allocate page per descriptor */
1935 static int mvneta_rx_refill(struct mvneta_port *pp,
1936 			    struct mvneta_rx_desc *rx_desc,
1937 			    struct mvneta_rx_queue *rxq,
1938 			    gfp_t gfp_mask)
1939 {
1940 	dma_addr_t phys_addr;
1941 	struct page *page;
1942 
1943 	page = page_pool_alloc_pages(rxq->page_pool,
1944 				     gfp_mask | __GFP_NOWARN);
1945 	if (!page)
1946 		return -ENOMEM;
1947 
1948 	phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction;
1949 	mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
1950 
1951 	return 0;
1952 }
1953 
1954 /* Handle tx checksum */
1955 static u32 mvneta_skb_tx_csum(struct sk_buff *skb)
1956 {
1957 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1958 		int ip_hdr_len = 0;
1959 		__be16 l3_proto = vlan_get_protocol(skb);
1960 		u8 l4_proto;
1961 
1962 		if (l3_proto == htons(ETH_P_IP)) {
1963 			struct iphdr *ip4h = ip_hdr(skb);
1964 
1965 			/* Calculate IPv4 checksum and L4 checksum */
1966 			ip_hdr_len = ip4h->ihl;
1967 			l4_proto = ip4h->protocol;
1968 		} else if (l3_proto == htons(ETH_P_IPV6)) {
1969 			struct ipv6hdr *ip6h = ipv6_hdr(skb);
1970 
1971 			/* Read l4_protocol from one of IPv6 extra headers */
1972 			if (skb_network_header_len(skb) > 0)
1973 				ip_hdr_len = (skb_network_header_len(skb) >> 2);
1974 			l4_proto = ip6h->nexthdr;
1975 		} else
1976 			return MVNETA_TX_L4_CSUM_NOT;
1977 
1978 		return mvneta_txq_desc_csum(skb_network_offset(skb),
1979 					    l3_proto, ip_hdr_len, l4_proto);
1980 	}
1981 
1982 	return MVNETA_TX_L4_CSUM_NOT;
1983 }
1984 
1985 /* Drop packets received by the RXQ and free buffers */
1986 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1987 				 struct mvneta_rx_queue *rxq)
1988 {
1989 	int rx_done, i;
1990 
1991 	rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1992 	if (rx_done)
1993 		mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1994 
1995 	if (pp->bm_priv) {
1996 		for (i = 0; i < rx_done; i++) {
1997 			struct mvneta_rx_desc *rx_desc =
1998 						  mvneta_rxq_next_desc_get(rxq);
1999 			u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
2000 			struct mvneta_bm_pool *bm_pool;
2001 
2002 			bm_pool = &pp->bm_priv->bm_pools[pool_id];
2003 			/* Return dropped buffer to the pool */
2004 			mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2005 					      rx_desc->buf_phys_addr);
2006 		}
2007 		return;
2008 	}
2009 
2010 	for (i = 0; i < rxq->size; i++) {
2011 		struct mvneta_rx_desc *rx_desc = rxq->descs + i;
2012 		void *data = rxq->buf_virt_addr[i];
2013 		if (!data || !(rx_desc->buf_phys_addr))
2014 			continue;
2015 
2016 		page_pool_put_full_page(rxq->page_pool, data, false);
2017 	}
2018 	if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
2019 		xdp_rxq_info_unreg(&rxq->xdp_rxq);
2020 	page_pool_destroy(rxq->page_pool);
2021 	rxq->page_pool = NULL;
2022 }
2023 
2024 static void
2025 mvneta_update_stats(struct mvneta_port *pp,
2026 		    struct mvneta_stats *ps)
2027 {
2028 	struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2029 
2030 	u64_stats_update_begin(&stats->syncp);
2031 	stats->es.ps.rx_packets += ps->rx_packets;
2032 	stats->es.ps.rx_bytes += ps->rx_bytes;
2033 	/* xdp */
2034 	stats->es.ps.xdp_redirect += ps->xdp_redirect;
2035 	stats->es.ps.xdp_pass += ps->xdp_pass;
2036 	stats->es.ps.xdp_drop += ps->xdp_drop;
2037 	u64_stats_update_end(&stats->syncp);
2038 }
2039 
2040 static inline
2041 int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
2042 {
2043 	struct mvneta_rx_desc *rx_desc;
2044 	int curr_desc = rxq->first_to_refill;
2045 	int i;
2046 
2047 	for (i = 0; (i < rxq->refill_num) && (i < 64); i++) {
2048 		rx_desc = rxq->descs + curr_desc;
2049 		if (!(rx_desc->buf_phys_addr)) {
2050 			if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) {
2051 				struct mvneta_pcpu_stats *stats;
2052 
2053 				pr_err("Can't refill queue %d. Done %d from %d\n",
2054 				       rxq->id, i, rxq->refill_num);
2055 
2056 				stats = this_cpu_ptr(pp->stats);
2057 				u64_stats_update_begin(&stats->syncp);
2058 				stats->es.refill_error++;
2059 				u64_stats_update_end(&stats->syncp);
2060 				break;
2061 			}
2062 		}
2063 		curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc);
2064 	}
2065 	rxq->refill_num -= i;
2066 	rxq->first_to_refill = curr_desc;
2067 
2068 	return i;
2069 }
2070 
2071 static void
2072 mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2073 		    struct xdp_buff *xdp, int sync_len)
2074 {
2075 	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
2076 	int i;
2077 
2078 	if (likely(!xdp_buff_has_frags(xdp)))
2079 		goto out;
2080 
2081 	for (i = 0; i < sinfo->nr_frags; i++)
2082 		page_pool_put_full_page(rxq->page_pool,
2083 					skb_frag_page(&sinfo->frags[i]), true);
2084 
2085 out:
2086 	page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data),
2087 			   sync_len, true);
2088 }
2089 
2090 static int
2091 mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
2092 			struct xdp_frame *xdpf, int *nxmit_byte, bool dma_map)
2093 {
2094 	struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
2095 	struct device *dev = pp->dev->dev.parent;
2096 	struct mvneta_tx_desc *tx_desc;
2097 	int i, num_frames = 1;
2098 	struct page *page;
2099 
2100 	if (unlikely(xdp_frame_has_frags(xdpf)))
2101 		num_frames += sinfo->nr_frags;
2102 
2103 	if (txq->count + num_frames >= txq->size)
2104 		return MVNETA_XDP_DROPPED;
2105 
2106 	for (i = 0; i < num_frames; i++) {
2107 		struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2108 		skb_frag_t *frag = NULL;
2109 		int len = xdpf->len;
2110 		dma_addr_t dma_addr;
2111 
2112 		if (unlikely(i)) { /* paged area */
2113 			frag = &sinfo->frags[i - 1];
2114 			len = skb_frag_size(frag);
2115 		}
2116 
2117 		tx_desc = mvneta_txq_next_desc_get(txq);
2118 		if (dma_map) {
2119 			/* ndo_xdp_xmit */
2120 			void *data;
2121 
2122 			data = unlikely(frag) ? skb_frag_address(frag)
2123 					      : xdpf->data;
2124 			dma_addr = dma_map_single(dev, data, len,
2125 						  DMA_TO_DEVICE);
2126 			if (dma_mapping_error(dev, dma_addr)) {
2127 				mvneta_txq_desc_put(txq);
2128 				goto unmap;
2129 			}
2130 
2131 			buf->type = MVNETA_TYPE_XDP_NDO;
2132 		} else {
2133 			page = unlikely(frag) ? skb_frag_page(frag)
2134 					      : virt_to_page(xdpf->data);
2135 			dma_addr = page_pool_get_dma_addr(page);
2136 			if (unlikely(frag))
2137 				dma_addr += skb_frag_off(frag);
2138 			else
2139 				dma_addr += sizeof(*xdpf) + xdpf->headroom;
2140 			dma_sync_single_for_device(dev, dma_addr, len,
2141 						   DMA_BIDIRECTIONAL);
2142 			buf->type = MVNETA_TYPE_XDP_TX;
2143 		}
2144 		buf->xdpf = unlikely(i) ? NULL : xdpf;
2145 
2146 		tx_desc->command = unlikely(i) ? 0 : MVNETA_TXD_F_DESC;
2147 		tx_desc->buf_phys_addr = dma_addr;
2148 		tx_desc->data_size = len;
2149 		*nxmit_byte += len;
2150 
2151 		mvneta_txq_inc_put(txq);
2152 	}
2153 	/*last descriptor */
2154 	tx_desc->command |= MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
2155 
2156 	txq->pending += num_frames;
2157 	txq->count += num_frames;
2158 
2159 	return MVNETA_XDP_TX;
2160 
2161 unmap:
2162 	for (i--; i >= 0; i--) {
2163 		mvneta_txq_desc_put(txq);
2164 		tx_desc = txq->descs + txq->next_desc_to_proc;
2165 		dma_unmap_single(dev, tx_desc->buf_phys_addr,
2166 				 tx_desc->data_size,
2167 				 DMA_TO_DEVICE);
2168 	}
2169 
2170 	return MVNETA_XDP_DROPPED;
2171 }
2172 
2173 static int
2174 mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
2175 {
2176 	struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2177 	struct mvneta_tx_queue *txq;
2178 	struct netdev_queue *nq;
2179 	int cpu, nxmit_byte = 0;
2180 	struct xdp_frame *xdpf;
2181 	u32 ret;
2182 
2183 	xdpf = xdp_convert_buff_to_frame(xdp);
2184 	if (unlikely(!xdpf))
2185 		return MVNETA_XDP_DROPPED;
2186 
2187 	cpu = smp_processor_id();
2188 	txq = &pp->txqs[cpu % txq_number];
2189 	nq = netdev_get_tx_queue(pp->dev, txq->id);
2190 
2191 	__netif_tx_lock(nq, cpu);
2192 	ret = mvneta_xdp_submit_frame(pp, txq, xdpf, &nxmit_byte, false);
2193 	if (ret == MVNETA_XDP_TX) {
2194 		u64_stats_update_begin(&stats->syncp);
2195 		stats->es.ps.tx_bytes += nxmit_byte;
2196 		stats->es.ps.tx_packets++;
2197 		stats->es.ps.xdp_tx++;
2198 		u64_stats_update_end(&stats->syncp);
2199 
2200 		mvneta_txq_pend_desc_add(pp, txq, 0);
2201 	} else {
2202 		u64_stats_update_begin(&stats->syncp);
2203 		stats->es.ps.xdp_tx_err++;
2204 		u64_stats_update_end(&stats->syncp);
2205 	}
2206 	__netif_tx_unlock(nq);
2207 
2208 	return ret;
2209 }
2210 
2211 static int
2212 mvneta_xdp_xmit(struct net_device *dev, int num_frame,
2213 		struct xdp_frame **frames, u32 flags)
2214 {
2215 	struct mvneta_port *pp = netdev_priv(dev);
2216 	struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2217 	int i, nxmit_byte = 0, nxmit = 0;
2218 	int cpu = smp_processor_id();
2219 	struct mvneta_tx_queue *txq;
2220 	struct netdev_queue *nq;
2221 	u32 ret;
2222 
2223 	if (unlikely(test_bit(__MVNETA_DOWN, &pp->state)))
2224 		return -ENETDOWN;
2225 
2226 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2227 		return -EINVAL;
2228 
2229 	txq = &pp->txqs[cpu % txq_number];
2230 	nq = netdev_get_tx_queue(pp->dev, txq->id);
2231 
2232 	__netif_tx_lock(nq, cpu);
2233 	for (i = 0; i < num_frame; i++) {
2234 		ret = mvneta_xdp_submit_frame(pp, txq, frames[i], &nxmit_byte,
2235 					      true);
2236 		if (ret != MVNETA_XDP_TX)
2237 			break;
2238 
2239 		nxmit++;
2240 	}
2241 
2242 	if (unlikely(flags & XDP_XMIT_FLUSH))
2243 		mvneta_txq_pend_desc_add(pp, txq, 0);
2244 	__netif_tx_unlock(nq);
2245 
2246 	u64_stats_update_begin(&stats->syncp);
2247 	stats->es.ps.tx_bytes += nxmit_byte;
2248 	stats->es.ps.tx_packets += nxmit;
2249 	stats->es.ps.xdp_xmit += nxmit;
2250 	stats->es.ps.xdp_xmit_err += num_frame - nxmit;
2251 	u64_stats_update_end(&stats->syncp);
2252 
2253 	return nxmit;
2254 }
2255 
2256 static int
2257 mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2258 	       struct bpf_prog *prog, struct xdp_buff *xdp,
2259 	       u32 frame_sz, struct mvneta_stats *stats)
2260 {
2261 	unsigned int len, data_len, sync;
2262 	u32 ret, act;
2263 
2264 	len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
2265 	data_len = xdp->data_end - xdp->data;
2266 	act = bpf_prog_run_xdp(prog, xdp);
2267 
2268 	/* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
2269 	sync = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
2270 	sync = max(sync, len);
2271 
2272 	switch (act) {
2273 	case XDP_PASS:
2274 		stats->xdp_pass++;
2275 		return MVNETA_XDP_PASS;
2276 	case XDP_REDIRECT: {
2277 		int err;
2278 
2279 		err = xdp_do_redirect(pp->dev, xdp, prog);
2280 		if (unlikely(err)) {
2281 			mvneta_xdp_put_buff(pp, rxq, xdp, sync);
2282 			ret = MVNETA_XDP_DROPPED;
2283 		} else {
2284 			ret = MVNETA_XDP_REDIR;
2285 			stats->xdp_redirect++;
2286 		}
2287 		break;
2288 	}
2289 	case XDP_TX:
2290 		ret = mvneta_xdp_xmit_back(pp, xdp);
2291 		if (ret != MVNETA_XDP_TX)
2292 			mvneta_xdp_put_buff(pp, rxq, xdp, sync);
2293 		break;
2294 	default:
2295 		bpf_warn_invalid_xdp_action(pp->dev, prog, act);
2296 		fallthrough;
2297 	case XDP_ABORTED:
2298 		trace_xdp_exception(pp->dev, prog, act);
2299 		fallthrough;
2300 	case XDP_DROP:
2301 		mvneta_xdp_put_buff(pp, rxq, xdp, sync);
2302 		ret = MVNETA_XDP_DROPPED;
2303 		stats->xdp_drop++;
2304 		break;
2305 	}
2306 
2307 	stats->rx_bytes += frame_sz + xdp->data_end - xdp->data - data_len;
2308 	stats->rx_packets++;
2309 
2310 	return ret;
2311 }
2312 
2313 static void
2314 mvneta_swbm_rx_frame(struct mvneta_port *pp,
2315 		     struct mvneta_rx_desc *rx_desc,
2316 		     struct mvneta_rx_queue *rxq,
2317 		     struct xdp_buff *xdp, int *size,
2318 		     struct page *page)
2319 {
2320 	unsigned char *data = page_address(page);
2321 	int data_len = -MVNETA_MH_SIZE, len;
2322 	struct net_device *dev = pp->dev;
2323 	enum dma_data_direction dma_dir;
2324 
2325 	if (*size > MVNETA_MAX_RX_BUF_SIZE) {
2326 		len = MVNETA_MAX_RX_BUF_SIZE;
2327 		data_len += len;
2328 	} else {
2329 		len = *size;
2330 		data_len += len - ETH_FCS_LEN;
2331 	}
2332 	*size = *size - len;
2333 
2334 	dma_dir = page_pool_get_dma_dir(rxq->page_pool);
2335 	dma_sync_single_for_cpu(dev->dev.parent,
2336 				rx_desc->buf_phys_addr,
2337 				len, dma_dir);
2338 
2339 	rx_desc->buf_phys_addr = 0;
2340 
2341 	/* Prefetch header */
2342 	prefetch(data);
2343 	xdp_buff_clear_frags_flag(xdp);
2344 	xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE,
2345 			 data_len, true);
2346 }
2347 
2348 static void
2349 mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
2350 			    struct mvneta_rx_desc *rx_desc,
2351 			    struct mvneta_rx_queue *rxq,
2352 			    struct xdp_buff *xdp, int *size,
2353 			    struct page *page)
2354 {
2355 	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
2356 	struct net_device *dev = pp->dev;
2357 	enum dma_data_direction dma_dir;
2358 	int data_len, len;
2359 
2360 	if (*size > MVNETA_MAX_RX_BUF_SIZE) {
2361 		len = MVNETA_MAX_RX_BUF_SIZE;
2362 		data_len = len;
2363 	} else {
2364 		len = *size;
2365 		data_len = len - ETH_FCS_LEN;
2366 	}
2367 	dma_dir = page_pool_get_dma_dir(rxq->page_pool);
2368 	dma_sync_single_for_cpu(dev->dev.parent,
2369 				rx_desc->buf_phys_addr,
2370 				len, dma_dir);
2371 	rx_desc->buf_phys_addr = 0;
2372 
2373 	if (!xdp_buff_has_frags(xdp))
2374 		sinfo->nr_frags = 0;
2375 
2376 	if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) {
2377 		skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags++];
2378 
2379 		skb_frag_fill_page_desc(frag, page,
2380 					pp->rx_offset_correction, data_len);
2381 
2382 		if (!xdp_buff_has_frags(xdp)) {
2383 			sinfo->xdp_frags_size = *size;
2384 			xdp_buff_set_frags_flag(xdp);
2385 		}
2386 		if (page_is_pfmemalloc(page))
2387 			xdp_buff_set_frag_pfmemalloc(xdp);
2388 	} else {
2389 		page_pool_put_full_page(rxq->page_pool, page, true);
2390 	}
2391 	*size -= len;
2392 }
2393 
2394 static struct sk_buff *
2395 mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
2396 		      struct xdp_buff *xdp, u32 desc_status)
2397 {
2398 	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
2399 	u32 metasize = xdp->data - xdp->data_meta;
2400 	struct sk_buff *skb;
2401 	u8 num_frags;
2402 
2403 	if (unlikely(xdp_buff_has_frags(xdp)))
2404 		num_frags = sinfo->nr_frags;
2405 
2406 	skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
2407 	if (!skb)
2408 		return ERR_PTR(-ENOMEM);
2409 
2410 	skb_mark_for_recycle(skb);
2411 
2412 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
2413 	skb_put(skb, xdp->data_end - xdp->data);
2414 	if (metasize)
2415 		skb_metadata_set(skb, metasize);
2416 	skb->ip_summed = mvneta_rx_csum(pp, desc_status);
2417 
2418 	if (unlikely(xdp_buff_has_frags(xdp)))
2419 		xdp_update_skb_frags_info(skb, num_frags, sinfo->xdp_frags_size,
2420 					  num_frags * xdp->frame_sz,
2421 					  xdp_buff_get_skb_flags(xdp));
2422 
2423 	return skb;
2424 }
2425 
2426 /* Main rx processing when using software buffer management */
2427 static int mvneta_rx_swbm(struct napi_struct *napi,
2428 			  struct mvneta_port *pp, int budget,
2429 			  struct mvneta_rx_queue *rxq)
2430 {
2431 	int rx_proc = 0, rx_todo, refill, size = 0;
2432 	struct net_device *dev = pp->dev;
2433 	struct mvneta_stats ps = {};
2434 	struct bpf_prog *xdp_prog;
2435 	u32 desc_status, frame_sz;
2436 	struct xdp_buff xdp_buf;
2437 
2438 	xdp_init_buff(&xdp_buf, PAGE_SIZE, &rxq->xdp_rxq);
2439 	xdp_buf.data_hard_start = NULL;
2440 
2441 	/* Get number of received packets */
2442 	rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
2443 
2444 	xdp_prog = READ_ONCE(pp->xdp_prog);
2445 
2446 	/* Fairness NAPI loop */
2447 	while (rx_proc < budget && rx_proc < rx_todo) {
2448 		struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
2449 		u32 rx_status, index;
2450 		struct sk_buff *skb;
2451 		struct page *page;
2452 
2453 		index = rx_desc - rxq->descs;
2454 		page = (struct page *)rxq->buf_virt_addr[index];
2455 
2456 		rx_status = rx_desc->status;
2457 		rx_proc++;
2458 		rxq->refill_num++;
2459 
2460 		if (rx_status & MVNETA_RXD_FIRST_DESC) {
2461 			/* Check errors only for FIRST descriptor */
2462 			if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
2463 				mvneta_rx_error(pp, rx_desc);
2464 				goto next;
2465 			}
2466 
2467 			size = rx_desc->data_size;
2468 			frame_sz = size - ETH_FCS_LEN;
2469 			desc_status = rx_status;
2470 
2471 			mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
2472 					     &size, page);
2473 		} else {
2474 			if (unlikely(!xdp_buf.data_hard_start)) {
2475 				rx_desc->buf_phys_addr = 0;
2476 				page_pool_put_full_page(rxq->page_pool, page,
2477 							true);
2478 				goto next;
2479 			}
2480 
2481 			mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf,
2482 						    &size, page);
2483 		} /* Middle or Last descriptor */
2484 
2485 		if (!(rx_status & MVNETA_RXD_LAST_DESC))
2486 			/* no last descriptor this time */
2487 			continue;
2488 
2489 		if (size) {
2490 			mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
2491 			goto next;
2492 		}
2493 
2494 		if (xdp_prog &&
2495 		    mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps))
2496 			goto next;
2497 
2498 		skb = mvneta_swbm_build_skb(pp, rxq->page_pool, &xdp_buf, desc_status);
2499 		if (IS_ERR(skb)) {
2500 			struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2501 
2502 			mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
2503 
2504 			u64_stats_update_begin(&stats->syncp);
2505 			stats->es.skb_alloc_error++;
2506 			stats->rx_dropped++;
2507 			u64_stats_update_end(&stats->syncp);
2508 
2509 			goto next;
2510 		}
2511 
2512 		ps.rx_bytes += skb->len;
2513 		ps.rx_packets++;
2514 
2515 		skb->protocol = eth_type_trans(skb, dev);
2516 		napi_gro_receive(napi, skb);
2517 next:
2518 		xdp_buf.data_hard_start = NULL;
2519 	}
2520 
2521 	if (xdp_buf.data_hard_start)
2522 		mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
2523 
2524 	if (ps.xdp_redirect)
2525 		xdp_do_flush();
2526 
2527 	if (ps.rx_packets)
2528 		mvneta_update_stats(pp, &ps);
2529 
2530 	/* return some buffers to hardware queue, one at a time is too slow */
2531 	refill = mvneta_rx_refill_queue(pp, rxq);
2532 
2533 	/* Update rxq management counters */
2534 	mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill);
2535 
2536 	return ps.rx_packets;
2537 }
2538 
2539 /* Main rx processing when using hardware buffer management */
2540 static int mvneta_rx_hwbm(struct napi_struct *napi,
2541 			  struct mvneta_port *pp, int rx_todo,
2542 			  struct mvneta_rx_queue *rxq)
2543 {
2544 	struct net_device *dev = pp->dev;
2545 	int rx_done;
2546 	u32 rcvd_pkts = 0;
2547 	u32 rcvd_bytes = 0;
2548 
2549 	/* Get number of received packets */
2550 	rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
2551 
2552 	if (rx_todo > rx_done)
2553 		rx_todo = rx_done;
2554 
2555 	rx_done = 0;
2556 
2557 	/* Fairness NAPI loop */
2558 	while (rx_done < rx_todo) {
2559 		struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
2560 		struct mvneta_bm_pool *bm_pool = NULL;
2561 		struct sk_buff *skb;
2562 		unsigned char *data;
2563 		dma_addr_t phys_addr;
2564 		u32 rx_status, frag_size;
2565 		int rx_bytes, err;
2566 		u8 pool_id;
2567 
2568 		rx_done++;
2569 		rx_status = rx_desc->status;
2570 		rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
2571 		data = (u8 *)(uintptr_t)rx_desc->buf_cookie;
2572 		phys_addr = rx_desc->buf_phys_addr;
2573 		pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
2574 		bm_pool = &pp->bm_priv->bm_pools[pool_id];
2575 
2576 		if (!mvneta_rxq_desc_is_first_last(rx_status) ||
2577 		    (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
2578 err_drop_frame_ret_pool:
2579 			/* Return the buffer to the pool */
2580 			mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2581 					      rx_desc->buf_phys_addr);
2582 err_drop_frame:
2583 			mvneta_rx_error(pp, rx_desc);
2584 			/* leave the descriptor untouched */
2585 			continue;
2586 		}
2587 
2588 		if (rx_bytes <= rx_copybreak) {
2589 			/* better copy a small frame and not unmap the DMA region */
2590 			skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
2591 			if (unlikely(!skb))
2592 				goto err_drop_frame_ret_pool;
2593 
2594 			dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev,
2595 			                              rx_desc->buf_phys_addr,
2596 			                              MVNETA_MH_SIZE + NET_SKB_PAD,
2597 			                              rx_bytes,
2598 			                              DMA_FROM_DEVICE);
2599 			skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD,
2600 				     rx_bytes);
2601 
2602 			skb->protocol = eth_type_trans(skb, dev);
2603 			skb->ip_summed = mvneta_rx_csum(pp, rx_status);
2604 			napi_gro_receive(napi, skb);
2605 
2606 			rcvd_pkts++;
2607 			rcvd_bytes += rx_bytes;
2608 
2609 			/* Return the buffer to the pool */
2610 			mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2611 					      rx_desc->buf_phys_addr);
2612 
2613 			/* leave the descriptor and buffer untouched */
2614 			continue;
2615 		}
2616 
2617 		/* Refill processing */
2618 		err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
2619 		if (err) {
2620 			struct mvneta_pcpu_stats *stats;
2621 
2622 			netdev_err(dev, "Linux processing - Can't refill\n");
2623 
2624 			stats = this_cpu_ptr(pp->stats);
2625 			u64_stats_update_begin(&stats->syncp);
2626 			stats->es.refill_error++;
2627 			u64_stats_update_end(&stats->syncp);
2628 
2629 			goto err_drop_frame_ret_pool;
2630 		}
2631 
2632 		frag_size = bm_pool->hwbm_pool.frag_size;
2633 
2634 		skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
2635 
2636 		/* After refill old buffer has to be unmapped regardless
2637 		 * the skb is successfully built or not.
2638 		 */
2639 		dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr,
2640 				 bm_pool->buf_size, DMA_FROM_DEVICE);
2641 		if (!skb)
2642 			goto err_drop_frame;
2643 
2644 		rcvd_pkts++;
2645 		rcvd_bytes += rx_bytes;
2646 
2647 		/* Linux processing */
2648 		skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
2649 		skb_put(skb, rx_bytes);
2650 
2651 		skb->protocol = eth_type_trans(skb, dev);
2652 		skb->ip_summed = mvneta_rx_csum(pp, rx_status);
2653 
2654 		napi_gro_receive(napi, skb);
2655 	}
2656 
2657 	if (rcvd_pkts) {
2658 		struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2659 
2660 		u64_stats_update_begin(&stats->syncp);
2661 		stats->es.ps.rx_packets += rcvd_pkts;
2662 		stats->es.ps.rx_bytes += rcvd_bytes;
2663 		u64_stats_update_end(&stats->syncp);
2664 	}
2665 
2666 	/* Update rxq management counters */
2667 	mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
2668 
2669 	return rx_done;
2670 }
2671 
2672 static void mvneta_free_tso_hdrs(struct mvneta_port *pp,
2673 				 struct mvneta_tx_queue *txq)
2674 {
2675 	struct device *dev = pp->dev->dev.parent;
2676 	int i;
2677 
2678 	for (i = 0; i < MVNETA_MAX_TSO_PAGES; i++) {
2679 		if (txq->tso_hdrs[i]) {
2680 			dma_free_coherent(dev, MVNETA_TSO_PAGE_SIZE,
2681 					  txq->tso_hdrs[i],
2682 					  txq->tso_hdrs_phys[i]);
2683 			txq->tso_hdrs[i] = NULL;
2684 		}
2685 	}
2686 }
2687 
2688 static int mvneta_alloc_tso_hdrs(struct mvneta_port *pp,
2689 				 struct mvneta_tx_queue *txq)
2690 {
2691 	struct device *dev = pp->dev->dev.parent;
2692 	int i, num;
2693 
2694 	num = DIV_ROUND_UP(txq->size, MVNETA_TSO_PER_PAGE);
2695 	for (i = 0; i < num; i++) {
2696 		txq->tso_hdrs[i] = dma_alloc_coherent(dev, MVNETA_TSO_PAGE_SIZE,
2697 						      &txq->tso_hdrs_phys[i],
2698 						      GFP_KERNEL);
2699 		if (!txq->tso_hdrs[i]) {
2700 			mvneta_free_tso_hdrs(pp, txq);
2701 			return -ENOMEM;
2702 		}
2703 	}
2704 
2705 	return 0;
2706 }
2707 
2708 static char *mvneta_get_tso_hdr(struct mvneta_tx_queue *txq, dma_addr_t *dma)
2709 {
2710 	int index, offset;
2711 
2712 	index = txq->txq_put_index / MVNETA_TSO_PER_PAGE;
2713 	offset = (txq->txq_put_index % MVNETA_TSO_PER_PAGE) * TSO_HEADER_SIZE;
2714 
2715 	*dma = txq->tso_hdrs_phys[index] + offset;
2716 
2717 	return txq->tso_hdrs[index] + offset;
2718 }
2719 
2720 static void mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_tx_queue *txq,
2721 			       struct tso_t *tso, int size, bool is_last)
2722 {
2723 	struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2724 	int hdr_len = skb_tcp_all_headers(skb);
2725 	struct mvneta_tx_desc *tx_desc;
2726 	dma_addr_t hdr_phys;
2727 	char *hdr;
2728 
2729 	hdr = mvneta_get_tso_hdr(txq, &hdr_phys);
2730 	tso_build_hdr(skb, hdr, tso, size, is_last);
2731 
2732 	tx_desc = mvneta_txq_next_desc_get(txq);
2733 	tx_desc->data_size = hdr_len;
2734 	tx_desc->command = mvneta_skb_tx_csum(skb);
2735 	tx_desc->command |= MVNETA_TXD_F_DESC;
2736 	tx_desc->buf_phys_addr = hdr_phys;
2737 	buf->type = MVNETA_TYPE_TSO;
2738 	buf->skb = NULL;
2739 
2740 	mvneta_txq_inc_put(txq);
2741 }
2742 
2743 static inline int
2744 mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
2745 		    struct sk_buff *skb, char *data, int size,
2746 		    bool last_tcp, bool is_last)
2747 {
2748 	struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2749 	struct mvneta_tx_desc *tx_desc;
2750 
2751 	tx_desc = mvneta_txq_next_desc_get(txq);
2752 	tx_desc->data_size = size;
2753 	tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
2754 						size, DMA_TO_DEVICE);
2755 	if (unlikely(dma_mapping_error(dev->dev.parent,
2756 		     tx_desc->buf_phys_addr))) {
2757 		mvneta_txq_desc_put(txq);
2758 		return -ENOMEM;
2759 	}
2760 
2761 	tx_desc->command = 0;
2762 	buf->type = MVNETA_TYPE_SKB;
2763 	buf->skb = NULL;
2764 
2765 	if (last_tcp) {
2766 		/* last descriptor in the TCP packet */
2767 		tx_desc->command = MVNETA_TXD_L_DESC;
2768 
2769 		/* last descriptor in SKB */
2770 		if (is_last)
2771 			buf->skb = skb;
2772 	}
2773 	mvneta_txq_inc_put(txq);
2774 	return 0;
2775 }
2776 
2777 static void mvneta_release_descs(struct mvneta_port *pp,
2778 				 struct mvneta_tx_queue *txq,
2779 				 int first, int num)
2780 {
2781 	int desc_idx, i;
2782 
2783 	desc_idx = first + num;
2784 	if (desc_idx >= txq->size)
2785 		desc_idx -= txq->size;
2786 
2787 	for (i = num; i >= 0; i--) {
2788 		struct mvneta_tx_desc *tx_desc = txq->descs + desc_idx;
2789 		struct mvneta_tx_buf *buf = &txq->buf[desc_idx];
2790 
2791 		if (buf->type == MVNETA_TYPE_SKB)
2792 			dma_unmap_single(pp->dev->dev.parent,
2793 					 tx_desc->buf_phys_addr,
2794 					 tx_desc->data_size,
2795 					 DMA_TO_DEVICE);
2796 
2797 		mvneta_txq_desc_put(txq);
2798 
2799 		if (desc_idx == 0)
2800 			desc_idx = txq->size;
2801 		desc_idx -= 1;
2802 	}
2803 }
2804 
2805 static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
2806 			 struct mvneta_tx_queue *txq)
2807 {
2808 	int hdr_len, total_len, data_left;
2809 	int first_desc, desc_count = 0;
2810 	struct mvneta_port *pp = netdev_priv(dev);
2811 	struct tso_t tso;
2812 
2813 	/* Count needed descriptors */
2814 	if ((txq->count + tso_count_descs(skb)) >= txq->size)
2815 		return 0;
2816 
2817 	if (skb_headlen(skb) < skb_tcp_all_headers(skb)) {
2818 		pr_info("*** Is this even possible?\n");
2819 		return 0;
2820 	}
2821 
2822 	first_desc = txq->txq_put_index;
2823 
2824 	/* Initialize the TSO handler, and prepare the first payload */
2825 	hdr_len = tso_start(skb, &tso);
2826 
2827 	total_len = skb->len - hdr_len;
2828 	while (total_len > 0) {
2829 		data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
2830 		total_len -= data_left;
2831 		desc_count++;
2832 
2833 		/* prepare packet headers: MAC + IP + TCP */
2834 		mvneta_tso_put_hdr(skb, txq, &tso, data_left, total_len == 0);
2835 
2836 		while (data_left > 0) {
2837 			int size;
2838 			desc_count++;
2839 
2840 			size = min_t(int, tso.size, data_left);
2841 
2842 			if (mvneta_tso_put_data(dev, txq, skb,
2843 						 tso.data, size,
2844 						 size == data_left,
2845 						 total_len == 0))
2846 				goto err_release;
2847 			data_left -= size;
2848 
2849 			tso_build_data(skb, &tso, size);
2850 		}
2851 	}
2852 
2853 	return desc_count;
2854 
2855 err_release:
2856 	/* Release all used data descriptors; header descriptors must not
2857 	 * be DMA-unmapped.
2858 	 */
2859 	mvneta_release_descs(pp, txq, first_desc, desc_count - 1);
2860 	return 0;
2861 }
2862 
2863 /* Handle tx fragmentation processing */
2864 static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
2865 				  struct mvneta_tx_queue *txq)
2866 {
2867 	struct mvneta_tx_desc *tx_desc;
2868 	int i, nr_frags = skb_shinfo(skb)->nr_frags;
2869 	int first_desc = txq->txq_put_index;
2870 
2871 	for (i = 0; i < nr_frags; i++) {
2872 		struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2873 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2874 		void *addr = skb_frag_address(frag);
2875 
2876 		tx_desc = mvneta_txq_next_desc_get(txq);
2877 		tx_desc->data_size = skb_frag_size(frag);
2878 
2879 		tx_desc->buf_phys_addr =
2880 			dma_map_single(pp->dev->dev.parent, addr,
2881 				       tx_desc->data_size, DMA_TO_DEVICE);
2882 
2883 		if (dma_mapping_error(pp->dev->dev.parent,
2884 				      tx_desc->buf_phys_addr)) {
2885 			mvneta_txq_desc_put(txq);
2886 			goto error;
2887 		}
2888 
2889 		if (i == nr_frags - 1) {
2890 			/* Last descriptor */
2891 			tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
2892 			buf->skb = skb;
2893 		} else {
2894 			/* Descriptor in the middle: Not First, Not Last */
2895 			tx_desc->command = 0;
2896 			buf->skb = NULL;
2897 		}
2898 		buf->type = MVNETA_TYPE_SKB;
2899 		mvneta_txq_inc_put(txq);
2900 	}
2901 
2902 	return 0;
2903 
2904 error:
2905 	/* Release all descriptors that were used to map fragments of
2906 	 * this packet, as well as the corresponding DMA mappings
2907 	 */
2908 	mvneta_release_descs(pp, txq, first_desc, i - 1);
2909 	return -ENOMEM;
2910 }
2911 
2912 /* Main tx processing */
2913 static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
2914 {
2915 	struct mvneta_port *pp = netdev_priv(dev);
2916 	u16 txq_id = skb_get_queue_mapping(skb);
2917 	struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
2918 	struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2919 	struct mvneta_tx_desc *tx_desc;
2920 	int len = skb->len;
2921 	int frags = 0;
2922 	u32 tx_cmd;
2923 
2924 	if (!netif_running(dev))
2925 		goto out;
2926 
2927 	if (skb_is_gso(skb)) {
2928 		frags = mvneta_tx_tso(skb, dev, txq);
2929 		goto out;
2930 	}
2931 
2932 	frags = skb_shinfo(skb)->nr_frags + 1;
2933 
2934 	/* Get a descriptor for the first part of the packet */
2935 	tx_desc = mvneta_txq_next_desc_get(txq);
2936 
2937 	tx_cmd = mvneta_skb_tx_csum(skb);
2938 
2939 	tx_desc->data_size = skb_headlen(skb);
2940 
2941 	tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
2942 						tx_desc->data_size,
2943 						DMA_TO_DEVICE);
2944 	if (unlikely(dma_mapping_error(dev->dev.parent,
2945 				       tx_desc->buf_phys_addr))) {
2946 		mvneta_txq_desc_put(txq);
2947 		frags = 0;
2948 		goto out;
2949 	}
2950 
2951 	buf->type = MVNETA_TYPE_SKB;
2952 	if (frags == 1) {
2953 		/* First and Last descriptor */
2954 		tx_cmd |= MVNETA_TXD_FLZ_DESC;
2955 		tx_desc->command = tx_cmd;
2956 		buf->skb = skb;
2957 		mvneta_txq_inc_put(txq);
2958 	} else {
2959 		/* First but not Last */
2960 		tx_cmd |= MVNETA_TXD_F_DESC;
2961 		buf->skb = NULL;
2962 		mvneta_txq_inc_put(txq);
2963 		tx_desc->command = tx_cmd;
2964 		/* Continue with other skb fragments */
2965 		if (mvneta_tx_frag_process(pp, skb, txq)) {
2966 			dma_unmap_single(dev->dev.parent,
2967 					 tx_desc->buf_phys_addr,
2968 					 tx_desc->data_size,
2969 					 DMA_TO_DEVICE);
2970 			mvneta_txq_desc_put(txq);
2971 			frags = 0;
2972 			goto out;
2973 		}
2974 	}
2975 
2976 out:
2977 	if (frags > 0) {
2978 		struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
2979 		struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2980 
2981 		netdev_tx_sent_queue(nq, len);
2982 
2983 		txq->count += frags;
2984 		if (txq->count >= txq->tx_stop_threshold)
2985 			netif_tx_stop_queue(nq);
2986 
2987 		/* This is not really the true transmit point, since we batch
2988 		 * up several before hitting the hardware, but is the best we
2989 		 * can do without more complexity to walk the packets in the
2990 		 * pending section of the transmit queue.
2991 		 */
2992 		skb_tx_timestamp(skb);
2993 
2994 		if (!netdev_xmit_more() || netif_xmit_stopped(nq) ||
2995 		    txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
2996 			mvneta_txq_pend_desc_add(pp, txq, frags);
2997 		else
2998 			txq->pending += frags;
2999 
3000 		u64_stats_update_begin(&stats->syncp);
3001 		stats->es.ps.tx_bytes += len;
3002 		stats->es.ps.tx_packets++;
3003 		u64_stats_update_end(&stats->syncp);
3004 	} else {
3005 		dev->stats.tx_dropped++;
3006 		dev_kfree_skb_any(skb);
3007 	}
3008 
3009 	return NETDEV_TX_OK;
3010 }
3011 
3012 
3013 /* Free tx resources, when resetting a port */
3014 static void mvneta_txq_done_force(struct mvneta_port *pp,
3015 				  struct mvneta_tx_queue *txq)
3016 
3017 {
3018 	struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
3019 	int tx_done = txq->count;
3020 
3021 	mvneta_txq_bufs_free(pp, txq, tx_done, nq, false);
3022 
3023 	/* reset txq */
3024 	txq->count = 0;
3025 	txq->txq_put_index = 0;
3026 	txq->txq_get_index = 0;
3027 }
3028 
3029 /* Handle tx done - called in softirq context. The <cause_tx_done> argument
3030  * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
3031  */
3032 static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
3033 {
3034 	struct mvneta_tx_queue *txq;
3035 	struct netdev_queue *nq;
3036 	int cpu = smp_processor_id();
3037 
3038 	while (cause_tx_done) {
3039 		txq = mvneta_tx_done_policy(pp, cause_tx_done);
3040 
3041 		nq = netdev_get_tx_queue(pp->dev, txq->id);
3042 		__netif_tx_lock(nq, cpu);
3043 
3044 		if (txq->count)
3045 			mvneta_txq_done(pp, txq);
3046 
3047 		__netif_tx_unlock(nq);
3048 		cause_tx_done &= ~((1 << txq->id));
3049 	}
3050 }
3051 
3052 /* Compute crc8 of the specified address, using a unique algorithm ,
3053  * according to hw spec, different than generic crc8 algorithm
3054  */
3055 static int mvneta_addr_crc(unsigned char *addr)
3056 {
3057 	int crc = 0;
3058 	int i;
3059 
3060 	for (i = 0; i < ETH_ALEN; i++) {
3061 		int j;
3062 
3063 		crc = (crc ^ addr[i]) << 8;
3064 		for (j = 7; j >= 0; j--) {
3065 			if (crc & (0x100 << j))
3066 				crc ^= 0x107 << j;
3067 		}
3068 	}
3069 
3070 	return crc;
3071 }
3072 
3073 /* This method controls the net device special MAC multicast support.
3074  * The Special Multicast Table for MAC addresses supports MAC of the form
3075  * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
3076  * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
3077  * Table entries in the DA-Filter table. This method set the Special
3078  * Multicast Table appropriate entry.
3079  */
3080 static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
3081 					  unsigned char last_byte,
3082 					  int queue)
3083 {
3084 	unsigned int smc_table_reg;
3085 	unsigned int tbl_offset;
3086 	unsigned int reg_offset;
3087 
3088 	/* Register offset from SMC table base    */
3089 	tbl_offset = (last_byte / 4);
3090 	/* Entry offset within the above reg */
3091 	reg_offset = last_byte % 4;
3092 
3093 	smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
3094 					+ tbl_offset * 4));
3095 
3096 	if (queue == -1)
3097 		smc_table_reg &= ~(0xff << (8 * reg_offset));
3098 	else {
3099 		smc_table_reg &= ~(0xff << (8 * reg_offset));
3100 		smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
3101 	}
3102 
3103 	mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
3104 		    smc_table_reg);
3105 }
3106 
3107 /* This method controls the network device Other MAC multicast support.
3108  * The Other Multicast Table is used for multicast of another type.
3109  * A CRC-8 is used as an index to the Other Multicast Table entries
3110  * in the DA-Filter table.
3111  * The method gets the CRC-8 value from the calling routine and
3112  * sets the Other Multicast Table appropriate entry according to the
3113  * specified CRC-8 .
3114  */
3115 static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
3116 					unsigned char crc8,
3117 					int queue)
3118 {
3119 	unsigned int omc_table_reg;
3120 	unsigned int tbl_offset;
3121 	unsigned int reg_offset;
3122 
3123 	tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
3124 	reg_offset = crc8 % 4;	     /* Entry offset within the above reg   */
3125 
3126 	omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
3127 
3128 	if (queue == -1) {
3129 		/* Clear accepts frame bit at specified Other DA table entry */
3130 		omc_table_reg &= ~(0xff << (8 * reg_offset));
3131 	} else {
3132 		omc_table_reg &= ~(0xff << (8 * reg_offset));
3133 		omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
3134 	}
3135 
3136 	mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
3137 }
3138 
3139 /* The network device supports multicast using two tables:
3140  *    1) Special Multicast Table for MAC addresses of the form
3141  *       0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
3142  *       The MAC DA[7:0] bits are used as a pointer to the Special Multicast
3143  *       Table entries in the DA-Filter table.
3144  *    2) Other Multicast Table for multicast of another type. A CRC-8 value
3145  *       is used as an index to the Other Multicast Table entries in the
3146  *       DA-Filter table.
3147  */
3148 static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
3149 				 int queue)
3150 {
3151 	unsigned char crc_result = 0;
3152 
3153 	if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
3154 		mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
3155 		return 0;
3156 	}
3157 
3158 	crc_result = mvneta_addr_crc(p_addr);
3159 	if (queue == -1) {
3160 		if (pp->mcast_count[crc_result] == 0) {
3161 			netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
3162 				    crc_result);
3163 			return -EINVAL;
3164 		}
3165 
3166 		pp->mcast_count[crc_result]--;
3167 		if (pp->mcast_count[crc_result] != 0) {
3168 			netdev_info(pp->dev,
3169 				    "After delete there are %d valid Mcast for crc8=0x%02x\n",
3170 				    pp->mcast_count[crc_result], crc_result);
3171 			return -EINVAL;
3172 		}
3173 	} else
3174 		pp->mcast_count[crc_result]++;
3175 
3176 	mvneta_set_other_mcast_addr(pp, crc_result, queue);
3177 
3178 	return 0;
3179 }
3180 
3181 /* Configure Fitering mode of Ethernet port */
3182 static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
3183 					  int is_promisc)
3184 {
3185 	u32 port_cfg_reg, val;
3186 
3187 	port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
3188 
3189 	val = mvreg_read(pp, MVNETA_TYPE_PRIO);
3190 
3191 	/* Set / Clear UPM bit in port configuration register */
3192 	if (is_promisc) {
3193 		/* Accept all Unicast addresses */
3194 		port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
3195 		val |= MVNETA_FORCE_UNI;
3196 		mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
3197 		mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
3198 	} else {
3199 		/* Reject all Unicast addresses */
3200 		port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
3201 		val &= ~MVNETA_FORCE_UNI;
3202 	}
3203 
3204 	mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
3205 	mvreg_write(pp, MVNETA_TYPE_PRIO, val);
3206 }
3207 
3208 /* register unicast and multicast addresses */
3209 static void mvneta_set_rx_mode(struct net_device *dev)
3210 {
3211 	struct mvneta_port *pp = netdev_priv(dev);
3212 	struct netdev_hw_addr *ha;
3213 
3214 	if (dev->flags & IFF_PROMISC) {
3215 		/* Accept all: Multicast + Unicast */
3216 		mvneta_rx_unicast_promisc_set(pp, 1);
3217 		mvneta_set_ucast_table(pp, pp->rxq_def);
3218 		mvneta_set_special_mcast_table(pp, pp->rxq_def);
3219 		mvneta_set_other_mcast_table(pp, pp->rxq_def);
3220 	} else {
3221 		/* Accept single Unicast */
3222 		mvneta_rx_unicast_promisc_set(pp, 0);
3223 		mvneta_set_ucast_table(pp, -1);
3224 		mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def);
3225 
3226 		if (dev->flags & IFF_ALLMULTI) {
3227 			/* Accept all multicast */
3228 			mvneta_set_special_mcast_table(pp, pp->rxq_def);
3229 			mvneta_set_other_mcast_table(pp, pp->rxq_def);
3230 		} else {
3231 			/* Accept only initialized multicast */
3232 			mvneta_set_special_mcast_table(pp, -1);
3233 			mvneta_set_other_mcast_table(pp, -1);
3234 
3235 			if (!netdev_mc_empty(dev)) {
3236 				netdev_for_each_mc_addr(ha, dev) {
3237 					mvneta_mcast_addr_set(pp, ha->addr,
3238 							      pp->rxq_def);
3239 				}
3240 			}
3241 		}
3242 	}
3243 }
3244 
3245 /* Interrupt handling - the callback for request_irq() */
3246 static irqreturn_t mvneta_isr(int irq, void *dev_id)
3247 {
3248 	struct mvneta_port *pp = (struct mvneta_port *)dev_id;
3249 
3250 	mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
3251 	napi_schedule(&pp->napi);
3252 
3253 	return IRQ_HANDLED;
3254 }
3255 
3256 /* Interrupt handling - the callback for request_percpu_irq() */
3257 static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id)
3258 {
3259 	struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
3260 
3261 	disable_percpu_irq(port->pp->dev->irq);
3262 	napi_schedule(&port->napi);
3263 
3264 	return IRQ_HANDLED;
3265 }
3266 
3267 static void mvneta_link_change(struct mvneta_port *pp)
3268 {
3269 	u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
3270 
3271 	phylink_pcs_change(&pp->phylink_pcs,
3272 			   !!(gmac_stat & MVNETA_GMAC_LINK_UP));
3273 }
3274 
3275 /* NAPI handler
3276  * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
3277  * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
3278  * Bits 8 -15 of the cause Rx Tx register indicate that are received
3279  * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
3280  * Each CPU has its own causeRxTx register
3281  */
3282 static int mvneta_poll(struct napi_struct *napi, int budget)
3283 {
3284 	int rx_done = 0;
3285 	u32 cause_rx_tx;
3286 	int rx_queue;
3287 	struct mvneta_port *pp = netdev_priv(napi->dev);
3288 	struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
3289 
3290 	if (!netif_running(pp->dev)) {
3291 		napi_complete(napi);
3292 		return rx_done;
3293 	}
3294 
3295 	/* Read cause register */
3296 	cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
3297 	if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) {
3298 		u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
3299 
3300 		mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
3301 
3302 		if (cause_misc & (MVNETA_CAUSE_PHY_STATUS_CHANGE |
3303 				  MVNETA_CAUSE_LINK_CHANGE))
3304 			mvneta_link_change(pp);
3305 	}
3306 
3307 	/* Release Tx descriptors */
3308 	if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
3309 		mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
3310 		cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
3311 	}
3312 
3313 	/* For the case where the last mvneta_poll did not process all
3314 	 * RX packets
3315 	 */
3316 	cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
3317 		port->cause_rx_tx;
3318 
3319 	rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
3320 	if (rx_queue) {
3321 		rx_queue = rx_queue - 1;
3322 		if (pp->bm_priv)
3323 			rx_done = mvneta_rx_hwbm(napi, pp, budget,
3324 						 &pp->rxqs[rx_queue]);
3325 		else
3326 			rx_done = mvneta_rx_swbm(napi, pp, budget,
3327 						 &pp->rxqs[rx_queue]);
3328 	}
3329 
3330 	if (rx_done < budget) {
3331 		cause_rx_tx = 0;
3332 		napi_complete_done(napi, rx_done);
3333 
3334 		if (pp->neta_armada3700) {
3335 			unsigned long flags;
3336 
3337 			local_irq_save(flags);
3338 			mvreg_write(pp, MVNETA_INTR_NEW_MASK,
3339 				    MVNETA_RX_INTR_MASK(rxq_number) |
3340 				    MVNETA_TX_INTR_MASK(txq_number) |
3341 				    MVNETA_MISCINTR_INTR_MASK);
3342 			local_irq_restore(flags);
3343 		} else {
3344 			enable_percpu_irq(pp->dev->irq, 0);
3345 		}
3346 	}
3347 
3348 	if (pp->neta_armada3700)
3349 		pp->cause_rx_tx = cause_rx_tx;
3350 	else
3351 		port->cause_rx_tx = cause_rx_tx;
3352 
3353 	return rx_done;
3354 }
3355 
3356 static int mvneta_create_page_pool(struct mvneta_port *pp,
3357 				   struct mvneta_rx_queue *rxq, int size)
3358 {
3359 	struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog);
3360 	struct page_pool_params pp_params = {
3361 		.order = 0,
3362 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
3363 		.pool_size = size,
3364 		.nid = NUMA_NO_NODE,
3365 		.dev = pp->dev->dev.parent,
3366 		.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
3367 		.offset = pp->rx_offset_correction,
3368 		.max_len = MVNETA_MAX_RX_BUF_SIZE,
3369 	};
3370 	int err;
3371 
3372 	rxq->page_pool = page_pool_create(&pp_params);
3373 	if (IS_ERR(rxq->page_pool)) {
3374 		err = PTR_ERR(rxq->page_pool);
3375 		rxq->page_pool = NULL;
3376 		return err;
3377 	}
3378 
3379 	err = __xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0,
3380 				 PAGE_SIZE);
3381 	if (err < 0)
3382 		goto err_free_pp;
3383 
3384 	err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
3385 					 rxq->page_pool);
3386 	if (err)
3387 		goto err_unregister_rxq;
3388 
3389 	return 0;
3390 
3391 err_unregister_rxq:
3392 	xdp_rxq_info_unreg(&rxq->xdp_rxq);
3393 err_free_pp:
3394 	page_pool_destroy(rxq->page_pool);
3395 	rxq->page_pool = NULL;
3396 	return err;
3397 }
3398 
3399 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
3400 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
3401 			   int num)
3402 {
3403 	int i, err;
3404 
3405 	err = mvneta_create_page_pool(pp, rxq, num);
3406 	if (err < 0)
3407 		return err;
3408 
3409 	for (i = 0; i < num; i++) {
3410 		memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
3411 		if (mvneta_rx_refill(pp, rxq->descs + i, rxq,
3412 				     GFP_KERNEL) != 0) {
3413 			netdev_err(pp->dev,
3414 				   "%s:rxq %d, %d of %d buffs  filled\n",
3415 				   __func__, rxq->id, i, num);
3416 			break;
3417 		}
3418 	}
3419 
3420 	/* Add this number of RX descriptors as non occupied (ready to
3421 	 * get packets)
3422 	 */
3423 	mvneta_rxq_non_occup_desc_add(pp, rxq, i);
3424 
3425 	return i;
3426 }
3427 
3428 /* Free all packets pending transmit from all TXQs and reset TX port */
3429 static void mvneta_tx_reset(struct mvneta_port *pp)
3430 {
3431 	int queue;
3432 
3433 	/* free the skb's in the tx ring */
3434 	for (queue = 0; queue < txq_number; queue++)
3435 		mvneta_txq_done_force(pp, &pp->txqs[queue]);
3436 
3437 	mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
3438 	mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
3439 }
3440 
3441 static void mvneta_rx_reset(struct mvneta_port *pp)
3442 {
3443 	mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
3444 	mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
3445 }
3446 
3447 /* Rx/Tx queue initialization/cleanup methods */
3448 
3449 static int mvneta_rxq_sw_init(struct mvneta_port *pp,
3450 			      struct mvneta_rx_queue *rxq)
3451 {
3452 	rxq->size = pp->rx_ring_size;
3453 
3454 	/* Allocate memory for RX descriptors */
3455 	rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
3456 					rxq->size * MVNETA_DESC_ALIGNED_SIZE,
3457 					&rxq->descs_phys, GFP_KERNEL);
3458 	if (!rxq->descs)
3459 		return -ENOMEM;
3460 
3461 	rxq->last_desc = rxq->size - 1;
3462 
3463 	return 0;
3464 }
3465 
3466 static void mvneta_rxq_hw_init(struct mvneta_port *pp,
3467 			       struct mvneta_rx_queue *rxq)
3468 {
3469 	/* Set Rx descriptors queue starting address */
3470 	mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
3471 	mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
3472 
3473 	/* Set coalescing pkts and time */
3474 	mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
3475 	mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
3476 
3477 	if (!pp->bm_priv) {
3478 		/* Set Offset */
3479 		mvneta_rxq_offset_set(pp, rxq, 0);
3480 		mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
3481 					MVNETA_MAX_RX_BUF_SIZE :
3482 					MVNETA_RX_BUF_SIZE(pp->pkt_size));
3483 		mvneta_rxq_bm_disable(pp, rxq);
3484 		mvneta_rxq_fill(pp, rxq, rxq->size);
3485 	} else {
3486 		/* Set Offset */
3487 		mvneta_rxq_offset_set(pp, rxq,
3488 				      NET_SKB_PAD - pp->rx_offset_correction);
3489 
3490 		mvneta_rxq_bm_enable(pp, rxq);
3491 		/* Fill RXQ with buffers from RX pool */
3492 		mvneta_rxq_long_pool_set(pp, rxq);
3493 		mvneta_rxq_short_pool_set(pp, rxq);
3494 		mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
3495 	}
3496 }
3497 
3498 /* Create a specified RX queue */
3499 static int mvneta_rxq_init(struct mvneta_port *pp,
3500 			   struct mvneta_rx_queue *rxq)
3501 
3502 {
3503 	int ret;
3504 
3505 	ret = mvneta_rxq_sw_init(pp, rxq);
3506 	if (ret < 0)
3507 		return ret;
3508 
3509 	mvneta_rxq_hw_init(pp, rxq);
3510 
3511 	return 0;
3512 }
3513 
3514 /* Cleanup Rx queue */
3515 static void mvneta_rxq_deinit(struct mvneta_port *pp,
3516 			      struct mvneta_rx_queue *rxq)
3517 {
3518 	mvneta_rxq_drop_pkts(pp, rxq);
3519 
3520 	if (rxq->descs)
3521 		dma_free_coherent(pp->dev->dev.parent,
3522 				  rxq->size * MVNETA_DESC_ALIGNED_SIZE,
3523 				  rxq->descs,
3524 				  rxq->descs_phys);
3525 
3526 	rxq->descs             = NULL;
3527 	rxq->last_desc         = 0;
3528 	rxq->next_desc_to_proc = 0;
3529 	rxq->descs_phys        = 0;
3530 	rxq->first_to_refill   = 0;
3531 	rxq->refill_num        = 0;
3532 }
3533 
3534 static int mvneta_txq_sw_init(struct mvneta_port *pp,
3535 			      struct mvneta_tx_queue *txq)
3536 {
3537 	int cpu, err;
3538 
3539 	txq->size = pp->tx_ring_size;
3540 
3541 	/* A queue must always have room for at least one skb.
3542 	 * Therefore, stop the queue when the free entries reaches
3543 	 * the maximum number of descriptors per skb.
3544 	 */
3545 	txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
3546 	txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
3547 
3548 	/* Allocate memory for TX descriptors */
3549 	txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
3550 					txq->size * MVNETA_DESC_ALIGNED_SIZE,
3551 					&txq->descs_phys, GFP_KERNEL);
3552 	if (!txq->descs)
3553 		return -ENOMEM;
3554 
3555 	txq->last_desc = txq->size - 1;
3556 
3557 	txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL);
3558 	if (!txq->buf)
3559 		return -ENOMEM;
3560 
3561 	/* Allocate DMA buffers for TSO MAC/IP/TCP headers */
3562 	err = mvneta_alloc_tso_hdrs(pp, txq);
3563 	if (err)
3564 		return err;
3565 
3566 	/* Setup XPS mapping */
3567 	if (pp->neta_armada3700)
3568 		cpu = 0;
3569 	else if (txq_number > 1)
3570 		cpu = txq->id % num_present_cpus();
3571 	else
3572 		cpu = pp->rxq_def % num_present_cpus();
3573 	cpumask_set_cpu(cpu, &txq->affinity_mask);
3574 	netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id);
3575 
3576 	return 0;
3577 }
3578 
3579 static void mvneta_txq_hw_init(struct mvneta_port *pp,
3580 			       struct mvneta_tx_queue *txq)
3581 {
3582 	/* Set maximum bandwidth for enabled TXQs */
3583 	mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
3584 	mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
3585 
3586 	/* Set Tx descriptors queue starting address */
3587 	mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
3588 	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
3589 
3590 	mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
3591 }
3592 
3593 /* Create and initialize a tx queue */
3594 static int mvneta_txq_init(struct mvneta_port *pp,
3595 			   struct mvneta_tx_queue *txq)
3596 {
3597 	int ret;
3598 
3599 	ret = mvneta_txq_sw_init(pp, txq);
3600 	if (ret < 0)
3601 		return ret;
3602 
3603 	mvneta_txq_hw_init(pp, txq);
3604 
3605 	return 0;
3606 }
3607 
3608 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
3609 static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
3610 				 struct mvneta_tx_queue *txq)
3611 {
3612 	struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
3613 
3614 	kfree(txq->buf);
3615 
3616 	mvneta_free_tso_hdrs(pp, txq);
3617 	if (txq->descs)
3618 		dma_free_coherent(pp->dev->dev.parent,
3619 				  txq->size * MVNETA_DESC_ALIGNED_SIZE,
3620 				  txq->descs, txq->descs_phys);
3621 
3622 	netdev_tx_reset_queue(nq);
3623 
3624 	txq->buf               = NULL;
3625 	txq->descs             = NULL;
3626 	txq->last_desc         = 0;
3627 	txq->next_desc_to_proc = 0;
3628 	txq->descs_phys        = 0;
3629 }
3630 
3631 static void mvneta_txq_hw_deinit(struct mvneta_port *pp,
3632 				 struct mvneta_tx_queue *txq)
3633 {
3634 	/* Set minimum bandwidth for disabled TXQs */
3635 	mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
3636 	mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
3637 
3638 	/* Set Tx descriptors queue starting address and size */
3639 	mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
3640 	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
3641 }
3642 
3643 static void mvneta_txq_deinit(struct mvneta_port *pp,
3644 			      struct mvneta_tx_queue *txq)
3645 {
3646 	mvneta_txq_sw_deinit(pp, txq);
3647 	mvneta_txq_hw_deinit(pp, txq);
3648 }
3649 
3650 /* Cleanup all Tx queues */
3651 static void mvneta_cleanup_txqs(struct mvneta_port *pp)
3652 {
3653 	int queue;
3654 
3655 	for (queue = 0; queue < txq_number; queue++)
3656 		mvneta_txq_deinit(pp, &pp->txqs[queue]);
3657 }
3658 
3659 /* Cleanup all Rx queues */
3660 static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
3661 {
3662 	int queue;
3663 
3664 	for (queue = 0; queue < rxq_number; queue++)
3665 		mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
3666 }
3667 
3668 
3669 /* Init all Rx queues */
3670 static int mvneta_setup_rxqs(struct mvneta_port *pp)
3671 {
3672 	int queue;
3673 
3674 	for (queue = 0; queue < rxq_number; queue++) {
3675 		int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
3676 
3677 		if (err) {
3678 			netdev_err(pp->dev, "%s: can't create rxq=%d\n",
3679 				   __func__, queue);
3680 			mvneta_cleanup_rxqs(pp);
3681 			return err;
3682 		}
3683 	}
3684 
3685 	return 0;
3686 }
3687 
3688 /* Init all tx queues */
3689 static int mvneta_setup_txqs(struct mvneta_port *pp)
3690 {
3691 	int queue;
3692 
3693 	for (queue = 0; queue < txq_number; queue++) {
3694 		int err = mvneta_txq_init(pp, &pp->txqs[queue]);
3695 		if (err) {
3696 			netdev_err(pp->dev, "%s: can't create txq=%d\n",
3697 				   __func__, queue);
3698 			mvneta_cleanup_txqs(pp);
3699 			return err;
3700 		}
3701 	}
3702 
3703 	return 0;
3704 }
3705 
3706 static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface)
3707 {
3708 	int ret;
3709 
3710 	ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface);
3711 	if (ret)
3712 		return ret;
3713 
3714 	return phy_power_on(pp->comphy);
3715 }
3716 
3717 static int mvneta_config_interface(struct mvneta_port *pp,
3718 				   phy_interface_t interface)
3719 {
3720 	int ret = 0;
3721 
3722 	if (pp->comphy) {
3723 		if (interface == PHY_INTERFACE_MODE_SGMII ||
3724 		    interface == PHY_INTERFACE_MODE_1000BASEX ||
3725 		    interface == PHY_INTERFACE_MODE_2500BASEX) {
3726 			ret = mvneta_comphy_init(pp, interface);
3727 		}
3728 	} else {
3729 		switch (interface) {
3730 		case PHY_INTERFACE_MODE_QSGMII:
3731 			mvreg_write(pp, MVNETA_SERDES_CFG,
3732 				    MVNETA_QSGMII_SERDES_PROTO);
3733 			break;
3734 
3735 		case PHY_INTERFACE_MODE_SGMII:
3736 		case PHY_INTERFACE_MODE_1000BASEX:
3737 			mvreg_write(pp, MVNETA_SERDES_CFG,
3738 				    MVNETA_SGMII_SERDES_PROTO);
3739 			break;
3740 
3741 		case PHY_INTERFACE_MODE_2500BASEX:
3742 			mvreg_write(pp, MVNETA_SERDES_CFG,
3743 				    MVNETA_HSGMII_SERDES_PROTO);
3744 			break;
3745 		default:
3746 			break;
3747 		}
3748 	}
3749 
3750 	pp->phy_interface = interface;
3751 
3752 	return ret;
3753 }
3754 
3755 static void mvneta_start_dev(struct mvneta_port *pp)
3756 {
3757 	int cpu;
3758 
3759 	WARN_ON(mvneta_config_interface(pp, pp->phy_interface));
3760 
3761 	mvneta_max_rx_size_set(pp, pp->pkt_size);
3762 	mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
3763 
3764 	/* start the Rx/Tx activity */
3765 	mvneta_port_enable(pp);
3766 
3767 	if (!pp->neta_armada3700) {
3768 		/* Enable polling on the port */
3769 		for_each_online_cpu(cpu) {
3770 			struct mvneta_pcpu_port *port =
3771 				per_cpu_ptr(pp->ports, cpu);
3772 
3773 			napi_enable(&port->napi);
3774 		}
3775 	} else {
3776 		napi_enable(&pp->napi);
3777 	}
3778 
3779 	/* Unmask interrupts. It has to be done from each CPU */
3780 	on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3781 
3782 	mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3783 		    MVNETA_CAUSE_PHY_STATUS_CHANGE |
3784 		    MVNETA_CAUSE_LINK_CHANGE);
3785 
3786 	phylink_start(pp->phylink);
3787 
3788 	/* We may have called phylink_speed_down before */
3789 	phylink_speed_up(pp->phylink);
3790 
3791 	netif_tx_start_all_queues(pp->dev);
3792 
3793 	clear_bit(__MVNETA_DOWN, &pp->state);
3794 }
3795 
3796 static void mvneta_stop_dev(struct mvneta_port *pp)
3797 {
3798 	unsigned int cpu;
3799 
3800 	set_bit(__MVNETA_DOWN, &pp->state);
3801 
3802 	if (device_may_wakeup(&pp->dev->dev))
3803 		phylink_speed_down(pp->phylink, false);
3804 
3805 	phylink_stop(pp->phylink);
3806 
3807 	if (!pp->neta_armada3700) {
3808 		for_each_online_cpu(cpu) {
3809 			struct mvneta_pcpu_port *port =
3810 				per_cpu_ptr(pp->ports, cpu);
3811 
3812 			napi_disable(&port->napi);
3813 		}
3814 	} else {
3815 		napi_disable(&pp->napi);
3816 	}
3817 
3818 	netif_carrier_off(pp->dev);
3819 
3820 	mvneta_port_down(pp);
3821 	netif_tx_stop_all_queues(pp->dev);
3822 
3823 	/* Stop the port activity */
3824 	mvneta_port_disable(pp);
3825 
3826 	/* Clear all ethernet port interrupts */
3827 	on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
3828 
3829 	/* Mask all ethernet port interrupts */
3830 	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3831 
3832 	mvneta_tx_reset(pp);
3833 	mvneta_rx_reset(pp);
3834 
3835 	WARN_ON(phy_power_off(pp->comphy));
3836 }
3837 
3838 static void mvneta_percpu_enable(void *arg)
3839 {
3840 	struct mvneta_port *pp = arg;
3841 
3842 	enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
3843 }
3844 
3845 static void mvneta_percpu_disable(void *arg)
3846 {
3847 	struct mvneta_port *pp = arg;
3848 
3849 	disable_percpu_irq(pp->dev->irq);
3850 }
3851 
3852 /* Change the device mtu */
3853 static int mvneta_change_mtu(struct net_device *dev, int mtu)
3854 {
3855 	struct mvneta_port *pp = netdev_priv(dev);
3856 	struct bpf_prog *prog = pp->xdp_prog;
3857 	int ret;
3858 
3859 	if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
3860 		netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
3861 			    mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
3862 		mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
3863 	}
3864 
3865 	if (prog && !prog->aux->xdp_has_frags &&
3866 	    mtu > MVNETA_MAX_RX_BUF_SIZE) {
3867 		netdev_info(dev, "Illegal MTU %d for XDP prog without frags\n",
3868 			    mtu);
3869 
3870 		return -EINVAL;
3871 	}
3872 
3873 	WRITE_ONCE(dev->mtu, mtu);
3874 
3875 	if (!netif_running(dev)) {
3876 		if (pp->bm_priv)
3877 			mvneta_bm_update_mtu(pp, mtu);
3878 
3879 		netdev_update_features(dev);
3880 		return 0;
3881 	}
3882 
3883 	/* The interface is running, so we have to force a
3884 	 * reallocation of the queues
3885 	 */
3886 	mvneta_stop_dev(pp);
3887 	on_each_cpu(mvneta_percpu_disable, pp, true);
3888 
3889 	mvneta_cleanup_txqs(pp);
3890 	mvneta_cleanup_rxqs(pp);
3891 
3892 	if (pp->bm_priv)
3893 		mvneta_bm_update_mtu(pp, mtu);
3894 
3895 	pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
3896 
3897 	ret = mvneta_setup_rxqs(pp);
3898 	if (ret) {
3899 		netdev_err(dev, "unable to setup rxqs after MTU change\n");
3900 		return ret;
3901 	}
3902 
3903 	ret = mvneta_setup_txqs(pp);
3904 	if (ret) {
3905 		netdev_err(dev, "unable to setup txqs after MTU change\n");
3906 		return ret;
3907 	}
3908 
3909 	on_each_cpu(mvneta_percpu_enable, pp, true);
3910 	mvneta_start_dev(pp);
3911 
3912 	netdev_update_features(dev);
3913 
3914 	return 0;
3915 }
3916 
3917 static netdev_features_t mvneta_fix_features(struct net_device *dev,
3918 					     netdev_features_t features)
3919 {
3920 	struct mvneta_port *pp = netdev_priv(dev);
3921 
3922 	if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
3923 		features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
3924 		netdev_info(dev,
3925 			    "Disable IP checksum for MTU greater than %dB\n",
3926 			    pp->tx_csum_limit);
3927 	}
3928 
3929 	return features;
3930 }
3931 
3932 /* Get mac address */
3933 static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
3934 {
3935 	u32 mac_addr_l, mac_addr_h;
3936 
3937 	mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
3938 	mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
3939 	addr[0] = (mac_addr_h >> 24) & 0xFF;
3940 	addr[1] = (mac_addr_h >> 16) & 0xFF;
3941 	addr[2] = (mac_addr_h >> 8) & 0xFF;
3942 	addr[3] = mac_addr_h & 0xFF;
3943 	addr[4] = (mac_addr_l >> 8) & 0xFF;
3944 	addr[5] = mac_addr_l & 0xFF;
3945 }
3946 
3947 /* Handle setting mac address */
3948 static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
3949 {
3950 	struct mvneta_port *pp = netdev_priv(dev);
3951 	struct sockaddr *sockaddr = addr;
3952 	int ret;
3953 
3954 	ret = eth_prepare_mac_addr_change(dev, addr);
3955 	if (ret < 0)
3956 		return ret;
3957 	/* Remove previous address table entry */
3958 	mvneta_mac_addr_set(pp, dev->dev_addr, -1);
3959 
3960 	/* Set new addr in hw */
3961 	mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def);
3962 
3963 	eth_commit_mac_addr_change(dev, addr);
3964 	return 0;
3965 }
3966 
3967 static struct mvneta_port *mvneta_pcs_to_port(struct phylink_pcs *pcs)
3968 {
3969 	return container_of(pcs, struct mvneta_port, phylink_pcs);
3970 }
3971 
3972 static unsigned int mvneta_pcs_inband_caps(struct phylink_pcs *pcs,
3973 					   phy_interface_t interface)
3974 {
3975 	/* When operating in an 802.3z mode, we must have AN enabled:
3976 	 * "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
3977 	 * When <PortType> = 1 (1000BASE-X) this field must be set to 1."
3978 	 * Therefore, inband is "required".
3979 	 */
3980 	if (phy_interface_mode_is_8023z(interface))
3981 		return LINK_INBAND_ENABLE;
3982 
3983 	/* QSGMII, SGMII and RGMII can be configured to use inband
3984 	 * signalling of the AN result. Indicate these as "possible".
3985 	 */
3986 	if (interface == PHY_INTERFACE_MODE_SGMII ||
3987 	    interface == PHY_INTERFACE_MODE_QSGMII ||
3988 	    phy_interface_mode_is_rgmii(interface))
3989 		return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
3990 
3991 	/* For any other modes, indicate that inband is not supported. */
3992 	return LINK_INBAND_DISABLE;
3993 }
3994 
3995 static void mvneta_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
3996 				 struct phylink_link_state *state)
3997 {
3998 	struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
3999 	u32 gmac_stat;
4000 
4001 	gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
4002 
4003 	if (gmac_stat & MVNETA_GMAC_SPEED_1000)
4004 		state->speed =
4005 			state->interface == PHY_INTERFACE_MODE_2500BASEX ?
4006 			SPEED_2500 : SPEED_1000;
4007 	else if (gmac_stat & MVNETA_GMAC_SPEED_100)
4008 		state->speed = SPEED_100;
4009 	else
4010 		state->speed = SPEED_10;
4011 
4012 	state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE);
4013 	state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
4014 	state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
4015 
4016 	if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE)
4017 		state->pause |= MLO_PAUSE_RX;
4018 	if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE)
4019 		state->pause |= MLO_PAUSE_TX;
4020 }
4021 
4022 static int mvneta_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
4023 			     phy_interface_t interface,
4024 			     const unsigned long *advertising,
4025 			     bool permit_pause_to_mac)
4026 {
4027 	struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
4028 	u32 mask, val, an, old_an, changed;
4029 
4030 	mask = MVNETA_GMAC_INBAND_AN_ENABLE |
4031 	       MVNETA_GMAC_INBAND_RESTART_AN |
4032 	       MVNETA_GMAC_AN_SPEED_EN |
4033 	       MVNETA_GMAC_AN_FLOW_CTRL_EN |
4034 	       MVNETA_GMAC_AN_DUPLEX_EN;
4035 
4036 	if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) {
4037 		mask |= MVNETA_GMAC_CONFIG_MII_SPEED |
4038 			MVNETA_GMAC_CONFIG_GMII_SPEED |
4039 			MVNETA_GMAC_CONFIG_FULL_DUPLEX;
4040 		val = MVNETA_GMAC_INBAND_AN_ENABLE;
4041 
4042 		if (interface == PHY_INTERFACE_MODE_SGMII) {
4043 			/* SGMII mode receives the speed and duplex from PHY */
4044 			val |= MVNETA_GMAC_AN_SPEED_EN |
4045 			       MVNETA_GMAC_AN_DUPLEX_EN;
4046 		} else {
4047 			/* 802.3z mode has fixed speed and duplex */
4048 			val |= MVNETA_GMAC_CONFIG_GMII_SPEED |
4049 			       MVNETA_GMAC_CONFIG_FULL_DUPLEX;
4050 
4051 			/* The FLOW_CTRL_EN bit selects either the hardware
4052 			 * automatically or the CONFIG_FLOW_CTRL manually
4053 			 * controls the GMAC pause mode.
4054 			 */
4055 			if (permit_pause_to_mac)
4056 				val |= MVNETA_GMAC_AN_FLOW_CTRL_EN;
4057 
4058 			/* Update the advertisement bits */
4059 			mask |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
4060 			if (phylink_test(advertising, Pause))
4061 				val |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
4062 		}
4063 	} else {
4064 		/* Phy or fixed speed - disable in-band AN modes */
4065 		val = 0;
4066 	}
4067 
4068 	old_an = an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4069 	an = (an & ~mask) | val;
4070 	changed = old_an ^ an;
4071 	if (changed)
4072 		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, an);
4073 
4074 	/* We are only interested in the advertisement bits changing */
4075 	return !!(changed & MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL);
4076 }
4077 
4078 static void mvneta_pcs_an_restart(struct phylink_pcs *pcs)
4079 {
4080 	struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
4081 	u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4082 
4083 	mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
4084 		    gmac_an | MVNETA_GMAC_INBAND_RESTART_AN);
4085 	mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
4086 		    gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN);
4087 }
4088 
4089 static const struct phylink_pcs_ops mvneta_phylink_pcs_ops = {
4090 	.pcs_inband_caps = mvneta_pcs_inband_caps,
4091 	.pcs_get_state = mvneta_pcs_get_state,
4092 	.pcs_config = mvneta_pcs_config,
4093 	.pcs_an_restart = mvneta_pcs_an_restart,
4094 };
4095 
4096 static struct phylink_pcs *mvneta_mac_select_pcs(struct phylink_config *config,
4097 						 phy_interface_t interface)
4098 {
4099 	struct net_device *ndev = to_net_dev(config->dev);
4100 	struct mvneta_port *pp = netdev_priv(ndev);
4101 
4102 	return &pp->phylink_pcs;
4103 }
4104 
4105 static int mvneta_mac_prepare(struct phylink_config *config, unsigned int mode,
4106 			      phy_interface_t interface)
4107 {
4108 	struct net_device *ndev = to_net_dev(config->dev);
4109 	struct mvneta_port *pp = netdev_priv(ndev);
4110 	u32 val;
4111 
4112 	if (pp->phy_interface != interface ||
4113 	    phylink_autoneg_inband(mode)) {
4114 		/* Force the link down when changing the interface or if in
4115 		 * in-band mode. According to Armada 370 documentation, we
4116 		 * can only change the port mode and in-band enable when the
4117 		 * link is down.
4118 		 */
4119 		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4120 		val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
4121 		val |= MVNETA_GMAC_FORCE_LINK_DOWN;
4122 		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
4123 	}
4124 
4125 	if (pp->phy_interface != interface)
4126 		WARN_ON(phy_power_off(pp->comphy));
4127 
4128 	/* Enable the 1ms clock */
4129 	if (phylink_autoneg_inband(mode)) {
4130 		unsigned long rate = clk_get_rate(pp->clk);
4131 
4132 		mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER,
4133 			    MVNETA_GMAC_1MS_CLOCK_ENABLE | (rate / 1000));
4134 	}
4135 
4136 	return 0;
4137 }
4138 
4139 static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
4140 			      const struct phylink_link_state *state)
4141 {
4142 	struct net_device *ndev = to_net_dev(config->dev);
4143 	struct mvneta_port *pp = netdev_priv(ndev);
4144 	u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
4145 	u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
4146 	u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4);
4147 
4148 	new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X;
4149 	new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE |
4150 				   MVNETA_GMAC2_PORT_RESET);
4151 	new_ctrl4 = gmac_ctrl4 & ~(MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE);
4152 
4153 	/* Even though it might look weird, when we're configured in
4154 	 * SGMII or QSGMII mode, the RGMII bit needs to be set.
4155 	 */
4156 	new_ctrl2 |= MVNETA_GMAC2_PORT_RGMII;
4157 
4158 	if (state->interface == PHY_INTERFACE_MODE_QSGMII ||
4159 	    state->interface == PHY_INTERFACE_MODE_SGMII ||
4160 	    phy_interface_mode_is_8023z(state->interface))
4161 		new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE;
4162 
4163 	if (!phylink_autoneg_inband(mode)) {
4164 		/* Phy or fixed speed - nothing to do, leave the
4165 		 * configured speed, duplex and flow control as-is.
4166 		 */
4167 	} else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
4168 		/* SGMII mode receives the state from the PHY */
4169 		new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE;
4170 	} else {
4171 		/* 802.3z negotiation - only 1000base-X */
4172 		new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X;
4173 	}
4174 
4175 	/* When at 2.5G, the link partner can send frames with shortened
4176 	 * preambles.
4177 	 */
4178 	if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
4179 		new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE;
4180 
4181 	if (new_ctrl0 != gmac_ctrl0)
4182 		mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0);
4183 	if (new_ctrl2 != gmac_ctrl2)
4184 		mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2);
4185 	if (new_ctrl4 != gmac_ctrl4)
4186 		mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4);
4187 
4188 	if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) {
4189 		while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
4190 			MVNETA_GMAC2_PORT_RESET) != 0)
4191 			continue;
4192 	}
4193 }
4194 
4195 static int mvneta_mac_finish(struct phylink_config *config, unsigned int mode,
4196 			     phy_interface_t interface)
4197 {
4198 	struct net_device *ndev = to_net_dev(config->dev);
4199 	struct mvneta_port *pp = netdev_priv(ndev);
4200 	u32 val, clk;
4201 
4202 	/* Disable 1ms clock if not in in-band mode */
4203 	if (!phylink_autoneg_inband(mode)) {
4204 		clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
4205 		clk &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
4206 		mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, clk);
4207 	}
4208 
4209 	if (pp->phy_interface != interface)
4210 		/* Enable the Serdes PHY */
4211 		WARN_ON(mvneta_config_interface(pp, interface));
4212 
4213 	/* Allow the link to come up if in in-band mode, otherwise the
4214 	 * link is forced via mac_link_down()/mac_link_up()
4215 	 */
4216 	if (phylink_autoneg_inband(mode)) {
4217 		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4218 		val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
4219 		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
4220 	}
4221 
4222 	return 0;
4223 }
4224 
4225 static void mvneta_mac_link_down(struct phylink_config *config,
4226 				 unsigned int mode, phy_interface_t interface)
4227 {
4228 	struct net_device *ndev = to_net_dev(config->dev);
4229 	struct mvneta_port *pp = netdev_priv(ndev);
4230 	u32 val;
4231 
4232 	mvneta_port_down(pp);
4233 
4234 	if (!phylink_autoneg_inband(mode)) {
4235 		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4236 		val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
4237 		val |= MVNETA_GMAC_FORCE_LINK_DOWN;
4238 		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
4239 	}
4240 }
4241 
4242 static void mvneta_mac_link_up(struct phylink_config *config,
4243 			       struct phy_device *phy,
4244 			       unsigned int mode, phy_interface_t interface,
4245 			       int speed, int duplex,
4246 			       bool tx_pause, bool rx_pause)
4247 {
4248 	struct net_device *ndev = to_net_dev(config->dev);
4249 	struct mvneta_port *pp = netdev_priv(ndev);
4250 	u32 val;
4251 
4252 	if (!phylink_autoneg_inband(mode)) {
4253 		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4254 		val &= ~(MVNETA_GMAC_FORCE_LINK_DOWN |
4255 			 MVNETA_GMAC_CONFIG_MII_SPEED |
4256 			 MVNETA_GMAC_CONFIG_GMII_SPEED |
4257 			 MVNETA_GMAC_CONFIG_FLOW_CTRL |
4258 			 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
4259 		val |= MVNETA_GMAC_FORCE_LINK_PASS;
4260 
4261 		if (speed == SPEED_1000 || speed == SPEED_2500)
4262 			val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
4263 		else if (speed == SPEED_100)
4264 			val |= MVNETA_GMAC_CONFIG_MII_SPEED;
4265 
4266 		if (duplex == DUPLEX_FULL)
4267 			val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
4268 
4269 		if (tx_pause || rx_pause)
4270 			val |= MVNETA_GMAC_CONFIG_FLOW_CTRL;
4271 
4272 		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
4273 	} else {
4274 		/* When inband doesn't cover flow control or flow control is
4275 		 * disabled, we need to manually configure it. This bit will
4276 		 * only have effect if MVNETA_GMAC_AN_FLOW_CTRL_EN is unset.
4277 		 */
4278 		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4279 		val &= ~MVNETA_GMAC_CONFIG_FLOW_CTRL;
4280 
4281 		if (tx_pause || rx_pause)
4282 			val |= MVNETA_GMAC_CONFIG_FLOW_CTRL;
4283 
4284 		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
4285 	}
4286 
4287 	mvneta_port_up(pp);
4288 }
4289 
4290 static void mvneta_mac_disable_tx_lpi(struct phylink_config *config)
4291 {
4292 	struct mvneta_port *pp = netdev_priv(to_net_dev(config->dev));
4293 	u32 lpi1;
4294 
4295 	lpi1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
4296 	lpi1 &= ~(MVNETA_LPI_CTRL_1_REQUEST_ENABLE |
4297 		  MVNETA_LPI_CTRL_1_REQUEST_FORCE |
4298 		  MVNETA_LPI_CTRL_1_MANUAL_MODE);
4299 	mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi1);
4300 }
4301 
4302 static int mvneta_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
4303 				    bool tx_clk_stop)
4304 {
4305 	struct mvneta_port *pp = netdev_priv(to_net_dev(config->dev));
4306 	u32 ts, tw, lpi0, lpi1, status;
4307 
4308 	status = mvreg_read(pp, MVNETA_GMAC_STATUS);
4309 	if (status & MVNETA_GMAC_SPEED_1000) {
4310 		/* At 1G speeds, the timer resolution are 1us, and
4311 		 * 802.3 says tw is 16.5us. Round up to 17us.
4312 		 */
4313 		tw = 17;
4314 		ts = timer;
4315 	} else {
4316 		/* At 100M speeds, the timer resolutions are 10us, and
4317 		 * 802.3 says tw is 30us.
4318 		 */
4319 		tw = 3;
4320 		ts = DIV_ROUND_UP(timer, 10);
4321 	}
4322 
4323 	if (ts > 255)
4324 		ts = 255;
4325 
4326 	/* Configure ts */
4327 	lpi0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
4328 	lpi0 = u32_replace_bits(lpi0, ts, MVNETA_LPI_CTRL_0_TS);
4329 	mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi0);
4330 
4331 	/* Configure tw and enable LPI generation */
4332 	lpi1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
4333 	lpi1 = u32_replace_bits(lpi1, tw, MVNETA_LPI_CTRL_1_TW);
4334 	lpi1 |= MVNETA_LPI_CTRL_1_REQUEST_ENABLE;
4335 	mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi1);
4336 
4337 	return 0;
4338 }
4339 
4340 static const struct phylink_mac_ops mvneta_phylink_ops = {
4341 	.mac_select_pcs = mvneta_mac_select_pcs,
4342 	.mac_prepare = mvneta_mac_prepare,
4343 	.mac_config = mvneta_mac_config,
4344 	.mac_finish = mvneta_mac_finish,
4345 	.mac_link_down = mvneta_mac_link_down,
4346 	.mac_link_up = mvneta_mac_link_up,
4347 	.mac_disable_tx_lpi = mvneta_mac_disable_tx_lpi,
4348 	.mac_enable_tx_lpi = mvneta_mac_enable_tx_lpi,
4349 };
4350 
4351 static int mvneta_mdio_probe(struct mvneta_port *pp)
4352 {
4353 	struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
4354 	int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0);
4355 
4356 	if (err)
4357 		netdev_err(pp->dev, "could not attach PHY: %d\n", err);
4358 
4359 	phylink_ethtool_get_wol(pp->phylink, &wol);
4360 	device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
4361 
4362 	/* PHY WoL may be enabled but device wakeup disabled */
4363 	if (wol.supported)
4364 		device_set_wakeup_enable(&pp->dev->dev, !!wol.wolopts);
4365 
4366 	return err;
4367 }
4368 
4369 static void mvneta_mdio_remove(struct mvneta_port *pp)
4370 {
4371 	phylink_disconnect_phy(pp->phylink);
4372 }
4373 
4374 /* Electing a CPU must be done in an atomic way: it should be done
4375  * after or before the removal/insertion of a CPU and this function is
4376  * not reentrant.
4377  */
4378 static void mvneta_percpu_elect(struct mvneta_port *pp)
4379 {
4380 	int elected_cpu = 0, max_cpu, cpu;
4381 
4382 	/* Use the cpu associated to the rxq when it is online, in all
4383 	 * the other cases, use the cpu 0 which can't be offline.
4384 	 */
4385 	if (pp->rxq_def < nr_cpu_ids && cpu_online(pp->rxq_def))
4386 		elected_cpu = pp->rxq_def;
4387 
4388 	max_cpu = num_present_cpus();
4389 
4390 	for_each_online_cpu(cpu) {
4391 		int rxq_map = 0, txq_map = 0;
4392 		int rxq;
4393 
4394 		for (rxq = 0; rxq < rxq_number; rxq++)
4395 			if ((rxq % max_cpu) == cpu)
4396 				rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
4397 
4398 		if (cpu == elected_cpu)
4399 			/* Map the default receive queue to the elected CPU */
4400 			rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
4401 
4402 		/* We update the TX queue map only if we have one
4403 		 * queue. In this case we associate the TX queue to
4404 		 * the CPU bound to the default RX queue
4405 		 */
4406 		if (txq_number == 1)
4407 			txq_map = (cpu == elected_cpu) ?
4408 				MVNETA_CPU_TXQ_ACCESS(0) : 0;
4409 		else
4410 			txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
4411 				MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
4412 
4413 		mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
4414 
4415 		/* Update the interrupt mask on each CPU according the
4416 		 * new mapping
4417 		 */
4418 		smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
4419 					 pp, true);
4420 	}
4421 };
4422 
4423 static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
4424 {
4425 	int other_cpu;
4426 	struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
4427 						  node_online);
4428 	struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
4429 
4430 	/* Armada 3700's per-cpu interrupt for mvneta is broken, all interrupts
4431 	 * are routed to CPU 0, so we don't need all the cpu-hotplug support
4432 	 */
4433 	if (pp->neta_armada3700)
4434 		return 0;
4435 
4436 	netdev_lock(port->napi.dev);
4437 	spin_lock(&pp->lock);
4438 	/*
4439 	 * Configuring the driver for a new CPU while the driver is
4440 	 * stopping is racy, so just avoid it.
4441 	 */
4442 	if (pp->is_stopped) {
4443 		spin_unlock(&pp->lock);
4444 		netdev_unlock(port->napi.dev);
4445 		return 0;
4446 	}
4447 	netif_tx_stop_all_queues(pp->dev);
4448 
4449 	/*
4450 	 * We have to synchronise on tha napi of each CPU except the one
4451 	 * just being woken up
4452 	 */
4453 	for_each_online_cpu(other_cpu) {
4454 		if (other_cpu != cpu) {
4455 			struct mvneta_pcpu_port *other_port =
4456 				per_cpu_ptr(pp->ports, other_cpu);
4457 
4458 			napi_synchronize(&other_port->napi);
4459 		}
4460 	}
4461 
4462 	/* Mask all ethernet port interrupts */
4463 	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
4464 	napi_enable_locked(&port->napi);
4465 
4466 	/*
4467 	 * Enable per-CPU interrupts on the CPU that is
4468 	 * brought up.
4469 	 */
4470 	mvneta_percpu_enable(pp);
4471 
4472 	/*
4473 	 * Enable per-CPU interrupt on the one CPU we care
4474 	 * about.
4475 	 */
4476 	mvneta_percpu_elect(pp);
4477 
4478 	/* Unmask all ethernet port interrupts */
4479 	on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
4480 	mvreg_write(pp, MVNETA_INTR_MISC_MASK,
4481 		    MVNETA_CAUSE_PHY_STATUS_CHANGE |
4482 		    MVNETA_CAUSE_LINK_CHANGE);
4483 	netif_tx_start_all_queues(pp->dev);
4484 	spin_unlock(&pp->lock);
4485 	netdev_unlock(port->napi.dev);
4486 
4487 	return 0;
4488 }
4489 
4490 static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node)
4491 {
4492 	struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
4493 						  node_online);
4494 	struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
4495 
4496 	/*
4497 	 * Thanks to this lock we are sure that any pending cpu election is
4498 	 * done.
4499 	 */
4500 	spin_lock(&pp->lock);
4501 	/* Mask all ethernet port interrupts */
4502 	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
4503 	spin_unlock(&pp->lock);
4504 
4505 	napi_synchronize(&port->napi);
4506 	napi_disable(&port->napi);
4507 	/* Disable per-CPU interrupts on the CPU that is brought down. */
4508 	mvneta_percpu_disable(pp);
4509 	return 0;
4510 }
4511 
4512 static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
4513 {
4514 	struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
4515 						  node_dead);
4516 
4517 	/* Check if a new CPU must be elected now this on is down */
4518 	spin_lock(&pp->lock);
4519 	mvneta_percpu_elect(pp);
4520 	spin_unlock(&pp->lock);
4521 	/* Unmask all ethernet port interrupts */
4522 	on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
4523 	mvreg_write(pp, MVNETA_INTR_MISC_MASK,
4524 		    MVNETA_CAUSE_PHY_STATUS_CHANGE |
4525 		    MVNETA_CAUSE_LINK_CHANGE);
4526 	netif_tx_start_all_queues(pp->dev);
4527 	return 0;
4528 }
4529 
4530 static int mvneta_open(struct net_device *dev)
4531 {
4532 	struct mvneta_port *pp = netdev_priv(dev);
4533 	int ret;
4534 
4535 	pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
4536 
4537 	ret = mvneta_setup_rxqs(pp);
4538 	if (ret)
4539 		return ret;
4540 
4541 	ret = mvneta_setup_txqs(pp);
4542 	if (ret)
4543 		goto err_cleanup_rxqs;
4544 
4545 	/* Connect to port interrupt line */
4546 	if (pp->neta_armada3700)
4547 		ret = request_irq(pp->dev->irq, mvneta_isr, 0,
4548 				  dev->name, pp);
4549 	else
4550 		ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr,
4551 					 dev->name, pp->ports);
4552 	if (ret) {
4553 		netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
4554 		goto err_cleanup_txqs;
4555 	}
4556 
4557 	if (!pp->neta_armada3700) {
4558 		/* Enable per-CPU interrupt on all the CPU to handle our RX
4559 		 * queue interrupts
4560 		 */
4561 		on_each_cpu(mvneta_percpu_enable, pp, true);
4562 
4563 		pp->is_stopped = false;
4564 		/* Register a CPU notifier to handle the case where our CPU
4565 		 * might be taken offline.
4566 		 */
4567 		ret = cpuhp_state_add_instance_nocalls(online_hpstate,
4568 						       &pp->node_online);
4569 		if (ret)
4570 			goto err_free_irq;
4571 
4572 		ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
4573 						       &pp->node_dead);
4574 		if (ret)
4575 			goto err_free_online_hp;
4576 	}
4577 
4578 	ret = mvneta_mdio_probe(pp);
4579 	if (ret < 0) {
4580 		netdev_err(dev, "cannot probe MDIO bus\n");
4581 		goto err_free_dead_hp;
4582 	}
4583 
4584 	mvneta_start_dev(pp);
4585 
4586 	return 0;
4587 
4588 err_free_dead_hp:
4589 	if (!pp->neta_armada3700)
4590 		cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
4591 						    &pp->node_dead);
4592 err_free_online_hp:
4593 	if (!pp->neta_armada3700)
4594 		cpuhp_state_remove_instance_nocalls(online_hpstate,
4595 						    &pp->node_online);
4596 err_free_irq:
4597 	if (pp->neta_armada3700) {
4598 		free_irq(pp->dev->irq, pp);
4599 	} else {
4600 		on_each_cpu(mvneta_percpu_disable, pp, true);
4601 		free_percpu_irq(pp->dev->irq, pp->ports);
4602 	}
4603 err_cleanup_txqs:
4604 	mvneta_cleanup_txqs(pp);
4605 err_cleanup_rxqs:
4606 	mvneta_cleanup_rxqs(pp);
4607 	return ret;
4608 }
4609 
4610 /* Stop the port, free port interrupt line */
4611 static int mvneta_stop(struct net_device *dev)
4612 {
4613 	struct mvneta_port *pp = netdev_priv(dev);
4614 
4615 	if (!pp->neta_armada3700) {
4616 		/* Inform that we are stopping so we don't want to setup the
4617 		 * driver for new CPUs in the notifiers. The code of the
4618 		 * notifier for CPU online is protected by the same spinlock,
4619 		 * so when we get the lock, the notifier work is done.
4620 		 */
4621 		spin_lock(&pp->lock);
4622 		pp->is_stopped = true;
4623 		spin_unlock(&pp->lock);
4624 
4625 		mvneta_stop_dev(pp);
4626 		mvneta_mdio_remove(pp);
4627 
4628 		cpuhp_state_remove_instance_nocalls(online_hpstate,
4629 						    &pp->node_online);
4630 		cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
4631 						    &pp->node_dead);
4632 		on_each_cpu(mvneta_percpu_disable, pp, true);
4633 		free_percpu_irq(dev->irq, pp->ports);
4634 	} else {
4635 		mvneta_stop_dev(pp);
4636 		mvneta_mdio_remove(pp);
4637 		free_irq(dev->irq, pp);
4638 	}
4639 
4640 	mvneta_cleanup_rxqs(pp);
4641 	mvneta_cleanup_txqs(pp);
4642 
4643 	return 0;
4644 }
4645 
4646 static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4647 {
4648 	struct mvneta_port *pp = netdev_priv(dev);
4649 
4650 	return phylink_mii_ioctl(pp->phylink, ifr, cmd);
4651 }
4652 
4653 static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
4654 			    struct netlink_ext_ack *extack)
4655 {
4656 	bool need_update, running = netif_running(dev);
4657 	struct mvneta_port *pp = netdev_priv(dev);
4658 	struct bpf_prog *old_prog;
4659 
4660 	if (prog && !prog->aux->xdp_has_frags &&
4661 	    dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
4662 		NL_SET_ERR_MSG_MOD(extack, "prog does not support XDP frags");
4663 		return -EOPNOTSUPP;
4664 	}
4665 
4666 	if (pp->bm_priv) {
4667 		NL_SET_ERR_MSG_MOD(extack,
4668 				   "Hardware Buffer Management not supported on XDP");
4669 		return -EOPNOTSUPP;
4670 	}
4671 
4672 	need_update = !!pp->xdp_prog != !!prog;
4673 	if (running && need_update)
4674 		mvneta_stop(dev);
4675 
4676 	old_prog = xchg(&pp->xdp_prog, prog);
4677 	if (old_prog)
4678 		bpf_prog_put(old_prog);
4679 
4680 	if (running && need_update)
4681 		return mvneta_open(dev);
4682 
4683 	return 0;
4684 }
4685 
4686 static int mvneta_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4687 {
4688 	switch (xdp->command) {
4689 	case XDP_SETUP_PROG:
4690 		return mvneta_xdp_setup(dev, xdp->prog, xdp->extack);
4691 	default:
4692 		return -EINVAL;
4693 	}
4694 }
4695 
4696 /* Ethtool methods */
4697 
4698 /* Set link ksettings (phy address, speed) for ethtools */
4699 static int
4700 mvneta_ethtool_set_link_ksettings(struct net_device *ndev,
4701 				  const struct ethtool_link_ksettings *cmd)
4702 {
4703 	struct mvneta_port *pp = netdev_priv(ndev);
4704 
4705 	return phylink_ethtool_ksettings_set(pp->phylink, cmd);
4706 }
4707 
4708 /* Get link ksettings for ethtools */
4709 static int
4710 mvneta_ethtool_get_link_ksettings(struct net_device *ndev,
4711 				  struct ethtool_link_ksettings *cmd)
4712 {
4713 	struct mvneta_port *pp = netdev_priv(ndev);
4714 
4715 	return phylink_ethtool_ksettings_get(pp->phylink, cmd);
4716 }
4717 
4718 static int mvneta_ethtool_nway_reset(struct net_device *dev)
4719 {
4720 	struct mvneta_port *pp = netdev_priv(dev);
4721 
4722 	return phylink_ethtool_nway_reset(pp->phylink);
4723 }
4724 
4725 /* Set interrupt coalescing for ethtools */
4726 static int
4727 mvneta_ethtool_set_coalesce(struct net_device *dev,
4728 			    struct ethtool_coalesce *c,
4729 			    struct kernel_ethtool_coalesce *kernel_coal,
4730 			    struct netlink_ext_ack *extack)
4731 {
4732 	struct mvneta_port *pp = netdev_priv(dev);
4733 	int queue;
4734 
4735 	for (queue = 0; queue < rxq_number; queue++) {
4736 		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4737 		rxq->time_coal = c->rx_coalesce_usecs;
4738 		rxq->pkts_coal = c->rx_max_coalesced_frames;
4739 		mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
4740 		mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
4741 	}
4742 
4743 	for (queue = 0; queue < txq_number; queue++) {
4744 		struct mvneta_tx_queue *txq = &pp->txqs[queue];
4745 		txq->done_pkts_coal = c->tx_max_coalesced_frames;
4746 		mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
4747 	}
4748 
4749 	return 0;
4750 }
4751 
4752 /* get coalescing for ethtools */
4753 static int
4754 mvneta_ethtool_get_coalesce(struct net_device *dev,
4755 			    struct ethtool_coalesce *c,
4756 			    struct kernel_ethtool_coalesce *kernel_coal,
4757 			    struct netlink_ext_ack *extack)
4758 {
4759 	struct mvneta_port *pp = netdev_priv(dev);
4760 
4761 	c->rx_coalesce_usecs        = pp->rxqs[0].time_coal;
4762 	c->rx_max_coalesced_frames  = pp->rxqs[0].pkts_coal;
4763 
4764 	c->tx_max_coalesced_frames =  pp->txqs[0].done_pkts_coal;
4765 	return 0;
4766 }
4767 
4768 
4769 static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
4770 				    struct ethtool_drvinfo *drvinfo)
4771 {
4772 	strscpy(drvinfo->driver, MVNETA_DRIVER_NAME,
4773 		sizeof(drvinfo->driver));
4774 	strscpy(drvinfo->version, MVNETA_DRIVER_VERSION,
4775 		sizeof(drvinfo->version));
4776 	strscpy(drvinfo->bus_info, dev_name(&dev->dev),
4777 		sizeof(drvinfo->bus_info));
4778 }
4779 
4780 
4781 static void
4782 mvneta_ethtool_get_ringparam(struct net_device *netdev,
4783 			     struct ethtool_ringparam *ring,
4784 			     struct kernel_ethtool_ringparam *kernel_ring,
4785 			     struct netlink_ext_ack *extack)
4786 {
4787 	struct mvneta_port *pp = netdev_priv(netdev);
4788 
4789 	ring->rx_max_pending = MVNETA_MAX_RXD;
4790 	ring->tx_max_pending = MVNETA_MAX_TXD;
4791 	ring->rx_pending = pp->rx_ring_size;
4792 	ring->tx_pending = pp->tx_ring_size;
4793 }
4794 
4795 static int
4796 mvneta_ethtool_set_ringparam(struct net_device *dev,
4797 			     struct ethtool_ringparam *ring,
4798 			     struct kernel_ethtool_ringparam *kernel_ring,
4799 			     struct netlink_ext_ack *extack)
4800 {
4801 	struct mvneta_port *pp = netdev_priv(dev);
4802 
4803 	if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
4804 		return -EINVAL;
4805 	pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
4806 		ring->rx_pending : MVNETA_MAX_RXD;
4807 
4808 	pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
4809 				   MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
4810 	if (pp->tx_ring_size != ring->tx_pending)
4811 		netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
4812 			    pp->tx_ring_size, ring->tx_pending);
4813 
4814 	if (netif_running(dev)) {
4815 		mvneta_stop(dev);
4816 		if (mvneta_open(dev)) {
4817 			netdev_err(dev,
4818 				   "error on opening device after ring param change\n");
4819 			return -ENOMEM;
4820 		}
4821 	}
4822 
4823 	return 0;
4824 }
4825 
4826 static void mvneta_ethtool_get_pauseparam(struct net_device *dev,
4827 					  struct ethtool_pauseparam *pause)
4828 {
4829 	struct mvneta_port *pp = netdev_priv(dev);
4830 
4831 	phylink_ethtool_get_pauseparam(pp->phylink, pause);
4832 }
4833 
4834 static int mvneta_ethtool_set_pauseparam(struct net_device *dev,
4835 					 struct ethtool_pauseparam *pause)
4836 {
4837 	struct mvneta_port *pp = netdev_priv(dev);
4838 
4839 	return phylink_ethtool_set_pauseparam(pp->phylink, pause);
4840 }
4841 
4842 static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
4843 				       u8 *data)
4844 {
4845 	if (sset == ETH_SS_STATS) {
4846 		struct mvneta_port *pp = netdev_priv(netdev);
4847 		int i;
4848 
4849 		for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
4850 			ethtool_puts(&data, mvneta_statistics[i].name);
4851 
4852 		if (!pp->bm_priv) {
4853 			page_pool_ethtool_stats_get_strings(data);
4854 		}
4855 	}
4856 }
4857 
4858 static void
4859 mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
4860 				 struct mvneta_ethtool_stats *es)
4861 {
4862 	unsigned int start;
4863 	int cpu;
4864 
4865 	for_each_possible_cpu(cpu) {
4866 		struct mvneta_pcpu_stats *stats;
4867 		u64 skb_alloc_error;
4868 		u64 refill_error;
4869 		u64 xdp_redirect;
4870 		u64 xdp_xmit_err;
4871 		u64 xdp_tx_err;
4872 		u64 xdp_pass;
4873 		u64 xdp_drop;
4874 		u64 xdp_xmit;
4875 		u64 xdp_tx;
4876 
4877 		stats = per_cpu_ptr(pp->stats, cpu);
4878 		do {
4879 			start = u64_stats_fetch_begin(&stats->syncp);
4880 			skb_alloc_error = stats->es.skb_alloc_error;
4881 			refill_error = stats->es.refill_error;
4882 			xdp_redirect = stats->es.ps.xdp_redirect;
4883 			xdp_pass = stats->es.ps.xdp_pass;
4884 			xdp_drop = stats->es.ps.xdp_drop;
4885 			xdp_xmit = stats->es.ps.xdp_xmit;
4886 			xdp_xmit_err = stats->es.ps.xdp_xmit_err;
4887 			xdp_tx = stats->es.ps.xdp_tx;
4888 			xdp_tx_err = stats->es.ps.xdp_tx_err;
4889 		} while (u64_stats_fetch_retry(&stats->syncp, start));
4890 
4891 		es->skb_alloc_error += skb_alloc_error;
4892 		es->refill_error += refill_error;
4893 		es->ps.xdp_redirect += xdp_redirect;
4894 		es->ps.xdp_pass += xdp_pass;
4895 		es->ps.xdp_drop += xdp_drop;
4896 		es->ps.xdp_xmit += xdp_xmit;
4897 		es->ps.xdp_xmit_err += xdp_xmit_err;
4898 		es->ps.xdp_tx += xdp_tx;
4899 		es->ps.xdp_tx_err += xdp_tx_err;
4900 	}
4901 }
4902 
4903 static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
4904 {
4905 	struct mvneta_ethtool_stats stats = {};
4906 	const struct mvneta_statistic *s;
4907 	void __iomem *base = pp->base;
4908 	u32 high, low;
4909 	u64 val;
4910 	int i;
4911 
4912 	mvneta_ethtool_update_pcpu_stats(pp, &stats);
4913 	for (i = 0, s = mvneta_statistics;
4914 	     s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
4915 	     s++, i++) {
4916 		switch (s->type) {
4917 		case T_REG_32:
4918 			val = readl_relaxed(base + s->offset);
4919 			pp->ethtool_stats[i] += val;
4920 			break;
4921 		case T_REG_64:
4922 			/* Docs say to read low 32-bit then high */
4923 			low = readl_relaxed(base + s->offset);
4924 			high = readl_relaxed(base + s->offset + 4);
4925 			val = (u64)high << 32 | low;
4926 			pp->ethtool_stats[i] += val;
4927 			break;
4928 		case T_SW:
4929 			switch (s->offset) {
4930 			case ETHTOOL_STAT_EEE_WAKEUP:
4931 				val = phylink_get_eee_err(pp->phylink);
4932 				pp->ethtool_stats[i] += val;
4933 				break;
4934 			case ETHTOOL_STAT_SKB_ALLOC_ERR:
4935 				pp->ethtool_stats[i] = stats.skb_alloc_error;
4936 				break;
4937 			case ETHTOOL_STAT_REFILL_ERR:
4938 				pp->ethtool_stats[i] = stats.refill_error;
4939 				break;
4940 			case ETHTOOL_XDP_REDIRECT:
4941 				pp->ethtool_stats[i] = stats.ps.xdp_redirect;
4942 				break;
4943 			case ETHTOOL_XDP_PASS:
4944 				pp->ethtool_stats[i] = stats.ps.xdp_pass;
4945 				break;
4946 			case ETHTOOL_XDP_DROP:
4947 				pp->ethtool_stats[i] = stats.ps.xdp_drop;
4948 				break;
4949 			case ETHTOOL_XDP_TX:
4950 				pp->ethtool_stats[i] = stats.ps.xdp_tx;
4951 				break;
4952 			case ETHTOOL_XDP_TX_ERR:
4953 				pp->ethtool_stats[i] = stats.ps.xdp_tx_err;
4954 				break;
4955 			case ETHTOOL_XDP_XMIT:
4956 				pp->ethtool_stats[i] = stats.ps.xdp_xmit;
4957 				break;
4958 			case ETHTOOL_XDP_XMIT_ERR:
4959 				pp->ethtool_stats[i] = stats.ps.xdp_xmit_err;
4960 				break;
4961 			}
4962 			break;
4963 		}
4964 	}
4965 }
4966 
4967 static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data)
4968 {
4969 	struct page_pool_stats stats = {};
4970 	int i;
4971 
4972 	for (i = 0; i < rxq_number; i++) {
4973 		if (pp->rxqs[i].page_pool)
4974 			page_pool_get_stats(pp->rxqs[i].page_pool, &stats);
4975 	}
4976 
4977 	page_pool_ethtool_stats_get(data, &stats);
4978 }
4979 
4980 static void mvneta_ethtool_get_stats(struct net_device *dev,
4981 				     struct ethtool_stats *stats, u64 *data)
4982 {
4983 	struct mvneta_port *pp = netdev_priv(dev);
4984 	int i;
4985 
4986 	mvneta_ethtool_update_stats(pp);
4987 
4988 	for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
4989 		*data++ = pp->ethtool_stats[i];
4990 
4991 	if (!pp->bm_priv)
4992 		mvneta_ethtool_pp_stats(pp, data);
4993 }
4994 
4995 static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
4996 {
4997 	if (sset == ETH_SS_STATS) {
4998 		int count = ARRAY_SIZE(mvneta_statistics);
4999 		struct mvneta_port *pp = netdev_priv(dev);
5000 
5001 		if (!pp->bm_priv)
5002 			count += page_pool_ethtool_stats_get_count();
5003 
5004 		return count;
5005 	}
5006 
5007 	return -EOPNOTSUPP;
5008 }
5009 
5010 static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
5011 {
5012 	return MVNETA_RSS_LU_TABLE_SIZE;
5013 }
5014 
5015 static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
5016 				    struct ethtool_rxnfc *info,
5017 				    u32 *rules __always_unused)
5018 {
5019 	switch (info->cmd) {
5020 	case ETHTOOL_GRXRINGS:
5021 		info->data =  rxq_number;
5022 		return 0;
5023 	default:
5024 		return -EOPNOTSUPP;
5025 	}
5026 }
5027 
5028 static int  mvneta_config_rss(struct mvneta_port *pp)
5029 {
5030 	int cpu;
5031 	u32 val;
5032 
5033 	netif_tx_stop_all_queues(pp->dev);
5034 
5035 	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
5036 
5037 	if (!pp->neta_armada3700) {
5038 		/* We have to synchronise on the napi of each CPU */
5039 		for_each_online_cpu(cpu) {
5040 			struct mvneta_pcpu_port *pcpu_port =
5041 				per_cpu_ptr(pp->ports, cpu);
5042 
5043 			napi_synchronize(&pcpu_port->napi);
5044 			napi_disable(&pcpu_port->napi);
5045 		}
5046 	} else {
5047 		napi_synchronize(&pp->napi);
5048 		napi_disable(&pp->napi);
5049 	}
5050 
5051 	pp->rxq_def = pp->indir[0];
5052 
5053 	/* Update unicast mapping */
5054 	mvneta_set_rx_mode(pp->dev);
5055 
5056 	/* Update val of portCfg register accordingly with all RxQueue types */
5057 	val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
5058 	mvreg_write(pp, MVNETA_PORT_CONFIG, val);
5059 
5060 	/* Update the elected CPU matching the new rxq_def */
5061 	spin_lock(&pp->lock);
5062 	mvneta_percpu_elect(pp);
5063 	spin_unlock(&pp->lock);
5064 
5065 	if (!pp->neta_armada3700) {
5066 		/* We have to synchronise on the napi of each CPU */
5067 		for_each_online_cpu(cpu) {
5068 			struct mvneta_pcpu_port *pcpu_port =
5069 				per_cpu_ptr(pp->ports, cpu);
5070 
5071 			napi_enable(&pcpu_port->napi);
5072 		}
5073 	} else {
5074 		napi_enable(&pp->napi);
5075 	}
5076 
5077 	netif_tx_start_all_queues(pp->dev);
5078 
5079 	return 0;
5080 }
5081 
5082 static int mvneta_ethtool_set_rxfh(struct net_device *dev,
5083 				   struct ethtool_rxfh_param *rxfh,
5084 				   struct netlink_ext_ack *extack)
5085 {
5086 	struct mvneta_port *pp = netdev_priv(dev);
5087 
5088 	/* Current code for Armada 3700 doesn't support RSS features yet */
5089 	if (pp->neta_armada3700)
5090 		return -EOPNOTSUPP;
5091 
5092 	/* We require at least one supported parameter to be changed
5093 	 * and no change in any of the unsupported parameters
5094 	 */
5095 	if (rxfh->key ||
5096 	    (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
5097 	     rxfh->hfunc != ETH_RSS_HASH_TOP))
5098 		return -EOPNOTSUPP;
5099 
5100 	if (!rxfh->indir)
5101 		return 0;
5102 
5103 	memcpy(pp->indir, rxfh->indir, MVNETA_RSS_LU_TABLE_SIZE);
5104 
5105 	return mvneta_config_rss(pp);
5106 }
5107 
5108 static int mvneta_ethtool_get_rxfh(struct net_device *dev,
5109 				   struct ethtool_rxfh_param *rxfh)
5110 {
5111 	struct mvneta_port *pp = netdev_priv(dev);
5112 
5113 	/* Current code for Armada 3700 doesn't support RSS features yet */
5114 	if (pp->neta_armada3700)
5115 		return -EOPNOTSUPP;
5116 
5117 	rxfh->hfunc = ETH_RSS_HASH_TOP;
5118 
5119 	if (!rxfh->indir)
5120 		return 0;
5121 
5122 	memcpy(rxfh->indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
5123 
5124 	return 0;
5125 }
5126 
5127 static void mvneta_ethtool_get_wol(struct net_device *dev,
5128 				   struct ethtool_wolinfo *wol)
5129 {
5130 	struct mvneta_port *pp = netdev_priv(dev);
5131 
5132 	phylink_ethtool_get_wol(pp->phylink, wol);
5133 }
5134 
5135 static int mvneta_ethtool_set_wol(struct net_device *dev,
5136 				  struct ethtool_wolinfo *wol)
5137 {
5138 	struct mvneta_port *pp = netdev_priv(dev);
5139 	int ret;
5140 
5141 	ret = phylink_ethtool_set_wol(pp->phylink, wol);
5142 	if (!ret)
5143 		device_set_wakeup_enable(&dev->dev, !!wol->wolopts);
5144 
5145 	return ret;
5146 }
5147 
5148 static int mvneta_ethtool_get_eee(struct net_device *dev,
5149 				  struct ethtool_keee *eee)
5150 {
5151 	struct mvneta_port *pp = netdev_priv(dev);
5152 
5153 	return phylink_ethtool_get_eee(pp->phylink, eee);
5154 }
5155 
5156 static int mvneta_ethtool_set_eee(struct net_device *dev,
5157 				  struct ethtool_keee *eee)
5158 {
5159 	struct mvneta_port *pp = netdev_priv(dev);
5160 
5161 	/* The Armada 37x documents do not give limits for this other than
5162 	 * it being an 8-bit register.
5163 	 */
5164 	if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255)
5165 		return -EINVAL;
5166 
5167 	return phylink_ethtool_set_eee(pp->phylink, eee);
5168 }
5169 
5170 static void mvneta_clear_rx_prio_map(struct mvneta_port *pp)
5171 {
5172 	mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, 0);
5173 }
5174 
5175 static void mvneta_map_vlan_prio_to_rxq(struct mvneta_port *pp, u8 pri, u8 rxq)
5176 {
5177 	u32 val = mvreg_read(pp, MVNETA_VLAN_PRIO_TO_RXQ);
5178 
5179 	val &= ~MVNETA_VLAN_PRIO_RXQ_MAP(pri, 0x7);
5180 	val |= MVNETA_VLAN_PRIO_RXQ_MAP(pri, rxq);
5181 
5182 	mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, val);
5183 }
5184 
5185 static int mvneta_enable_per_queue_rate_limit(struct mvneta_port *pp)
5186 {
5187 	unsigned long core_clk_rate;
5188 	u32 refill_cycles;
5189 	u32 val;
5190 
5191 	core_clk_rate = clk_get_rate(pp->clk);
5192 	if (!core_clk_rate)
5193 		return -EINVAL;
5194 
5195 	refill_cycles = MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS /
5196 			(NSEC_PER_SEC / core_clk_rate);
5197 
5198 	if (refill_cycles > MVNETA_REFILL_MAX_NUM_CLK)
5199 		return -EINVAL;
5200 
5201 	/* Enable bw limit algorithm version 3 */
5202 	val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG);
5203 	val &= ~(MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 | MVNETA_TXQ_CMD1_BW_LIM_EN);
5204 	mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val);
5205 
5206 	/* Set the base refill rate */
5207 	mvreg_write(pp, MVNETA_REFILL_NUM_CLK_REG, refill_cycles);
5208 
5209 	return 0;
5210 }
5211 
5212 static void mvneta_disable_per_queue_rate_limit(struct mvneta_port *pp)
5213 {
5214 	u32 val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG);
5215 
5216 	val |= (MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 | MVNETA_TXQ_CMD1_BW_LIM_EN);
5217 	mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val);
5218 }
5219 
5220 static int mvneta_setup_queue_rates(struct mvneta_port *pp, int queue,
5221 				    u64 min_rate, u64 max_rate)
5222 {
5223 	u32 refill_val, rem;
5224 	u32 val = 0;
5225 
5226 	/* Convert to from Bps to bps */
5227 	max_rate *= 8;
5228 
5229 	if (min_rate)
5230 		return -EINVAL;
5231 
5232 	refill_val = div_u64_rem(max_rate, MVNETA_TXQ_RATE_LIMIT_RESOLUTION,
5233 				 &rem);
5234 
5235 	if (rem || !refill_val ||
5236 	    refill_val > MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX)
5237 		return -EINVAL;
5238 
5239 	val = refill_val;
5240 	val |= (MVNETA_TXQ_BUCKET_REFILL_PERIOD <<
5241 		MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT);
5242 
5243 	mvreg_write(pp, MVNETA_TXQ_BUCKET_REFILL_REG(queue), val);
5244 
5245 	return 0;
5246 }
5247 
5248 static int mvneta_setup_mqprio(struct net_device *dev,
5249 			       struct tc_mqprio_qopt_offload *mqprio)
5250 {
5251 	struct mvneta_port *pp = netdev_priv(dev);
5252 	int rxq, txq, tc, ret;
5253 	u8 num_tc;
5254 
5255 	if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS)
5256 		return 0;
5257 
5258 	num_tc = mqprio->qopt.num_tc;
5259 
5260 	if (num_tc > rxq_number)
5261 		return -EINVAL;
5262 
5263 	mvneta_clear_rx_prio_map(pp);
5264 
5265 	if (!num_tc) {
5266 		mvneta_disable_per_queue_rate_limit(pp);
5267 		netdev_reset_tc(dev);
5268 		return 0;
5269 	}
5270 
5271 	netdev_set_num_tc(dev, mqprio->qopt.num_tc);
5272 
5273 	for (tc = 0; tc < mqprio->qopt.num_tc; tc++) {
5274 		netdev_set_tc_queue(dev, tc, mqprio->qopt.count[tc],
5275 				    mqprio->qopt.offset[tc]);
5276 
5277 		for (rxq = mqprio->qopt.offset[tc];
5278 		     rxq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc];
5279 		     rxq++) {
5280 			if (rxq >= rxq_number)
5281 				return -EINVAL;
5282 
5283 			mvneta_map_vlan_prio_to_rxq(pp, tc, rxq);
5284 		}
5285 	}
5286 
5287 	if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
5288 		mvneta_disable_per_queue_rate_limit(pp);
5289 		return 0;
5290 	}
5291 
5292 	if (mqprio->qopt.num_tc > txq_number)
5293 		return -EINVAL;
5294 
5295 	ret = mvneta_enable_per_queue_rate_limit(pp);
5296 	if (ret)
5297 		return ret;
5298 
5299 	for (tc = 0; tc < mqprio->qopt.num_tc; tc++) {
5300 		for (txq = mqprio->qopt.offset[tc];
5301 		     txq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc];
5302 		     txq++) {
5303 			if (txq >= txq_number)
5304 				return -EINVAL;
5305 
5306 			ret = mvneta_setup_queue_rates(pp, txq,
5307 						       mqprio->min_rate[tc],
5308 						       mqprio->max_rate[tc]);
5309 			if (ret)
5310 				return ret;
5311 		}
5312 	}
5313 
5314 	return 0;
5315 }
5316 
5317 static int mvneta_setup_tc(struct net_device *dev, enum tc_setup_type type,
5318 			   void *type_data)
5319 {
5320 	switch (type) {
5321 	case TC_SETUP_QDISC_MQPRIO:
5322 		return mvneta_setup_mqprio(dev, type_data);
5323 	default:
5324 		return -EOPNOTSUPP;
5325 	}
5326 }
5327 
5328 static const struct net_device_ops mvneta_netdev_ops = {
5329 	.ndo_open            = mvneta_open,
5330 	.ndo_stop            = mvneta_stop,
5331 	.ndo_start_xmit      = mvneta_tx,
5332 	.ndo_set_rx_mode     = mvneta_set_rx_mode,
5333 	.ndo_set_mac_address = mvneta_set_mac_addr,
5334 	.ndo_change_mtu      = mvneta_change_mtu,
5335 	.ndo_fix_features    = mvneta_fix_features,
5336 	.ndo_get_stats64     = mvneta_get_stats64,
5337 	.ndo_eth_ioctl        = mvneta_ioctl,
5338 	.ndo_bpf	     = mvneta_xdp,
5339 	.ndo_xdp_xmit        = mvneta_xdp_xmit,
5340 	.ndo_setup_tc	     = mvneta_setup_tc,
5341 };
5342 
5343 static const struct ethtool_ops mvneta_eth_tool_ops = {
5344 	.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
5345 				     ETHTOOL_COALESCE_MAX_FRAMES,
5346 	.nway_reset	= mvneta_ethtool_nway_reset,
5347 	.get_link       = ethtool_op_get_link,
5348 	.set_coalesce   = mvneta_ethtool_set_coalesce,
5349 	.get_coalesce   = mvneta_ethtool_get_coalesce,
5350 	.get_drvinfo    = mvneta_ethtool_get_drvinfo,
5351 	.get_ringparam  = mvneta_ethtool_get_ringparam,
5352 	.set_ringparam	= mvneta_ethtool_set_ringparam,
5353 	.get_pauseparam	= mvneta_ethtool_get_pauseparam,
5354 	.set_pauseparam	= mvneta_ethtool_set_pauseparam,
5355 	.get_strings	= mvneta_ethtool_get_strings,
5356 	.get_ethtool_stats = mvneta_ethtool_get_stats,
5357 	.get_sset_count	= mvneta_ethtool_get_sset_count,
5358 	.get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size,
5359 	.get_rxnfc	= mvneta_ethtool_get_rxnfc,
5360 	.get_rxfh	= mvneta_ethtool_get_rxfh,
5361 	.set_rxfh	= mvneta_ethtool_set_rxfh,
5362 	.get_link_ksettings = mvneta_ethtool_get_link_ksettings,
5363 	.set_link_ksettings = mvneta_ethtool_set_link_ksettings,
5364 	.get_wol        = mvneta_ethtool_get_wol,
5365 	.set_wol        = mvneta_ethtool_set_wol,
5366 	.get_ts_info	= ethtool_op_get_ts_info,
5367 	.get_eee	= mvneta_ethtool_get_eee,
5368 	.set_eee	= mvneta_ethtool_set_eee,
5369 };
5370 
5371 /* Initialize hw */
5372 static int mvneta_init(struct device *dev, struct mvneta_port *pp)
5373 {
5374 	int queue;
5375 
5376 	/* Disable port */
5377 	mvneta_port_disable(pp);
5378 
5379 	/* Set port default values */
5380 	mvneta_defaults_set(pp);
5381 
5382 	pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL);
5383 	if (!pp->txqs)
5384 		return -ENOMEM;
5385 
5386 	/* Initialize TX descriptor rings */
5387 	for (queue = 0; queue < txq_number; queue++) {
5388 		struct mvneta_tx_queue *txq = &pp->txqs[queue];
5389 		txq->id = queue;
5390 		txq->size = pp->tx_ring_size;
5391 		txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
5392 	}
5393 
5394 	pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL);
5395 	if (!pp->rxqs)
5396 		return -ENOMEM;
5397 
5398 	/* Create Rx descriptor rings */
5399 	for (queue = 0; queue < rxq_number; queue++) {
5400 		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5401 		rxq->id = queue;
5402 		rxq->size = pp->rx_ring_size;
5403 		rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
5404 		rxq->time_coal = MVNETA_RX_COAL_USEC;
5405 		rxq->buf_virt_addr
5406 			= devm_kmalloc_array(pp->dev->dev.parent,
5407 					     rxq->size,
5408 					     sizeof(*rxq->buf_virt_addr),
5409 					     GFP_KERNEL);
5410 		if (!rxq->buf_virt_addr)
5411 			return -ENOMEM;
5412 	}
5413 
5414 	return 0;
5415 }
5416 
5417 /* platform glue : initialize decoding windows */
5418 static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
5419 				     const struct mbus_dram_target_info *dram)
5420 {
5421 	u32 win_enable;
5422 	u32 win_protect;
5423 	int i;
5424 
5425 	for (i = 0; i < 6; i++) {
5426 		mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
5427 		mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
5428 
5429 		if (i < 4)
5430 			mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
5431 	}
5432 
5433 	win_enable = 0x3f;
5434 	win_protect = 0;
5435 
5436 	if (dram) {
5437 		for (i = 0; i < dram->num_cs; i++) {
5438 			const struct mbus_dram_window *cs = dram->cs + i;
5439 
5440 			mvreg_write(pp, MVNETA_WIN_BASE(i),
5441 				    (cs->base & 0xffff0000) |
5442 				    (cs->mbus_attr << 8) |
5443 				    dram->mbus_dram_target_id);
5444 
5445 			mvreg_write(pp, MVNETA_WIN_SIZE(i),
5446 				    (cs->size - 1) & 0xffff0000);
5447 
5448 			win_enable &= ~(1 << i);
5449 			win_protect |= 3 << (2 * i);
5450 		}
5451 	} else {
5452 		if (pp->neta_ac5)
5453 			mvreg_write(pp, MVNETA_WIN_BASE(0),
5454 				    (MVNETA_AC5_CNM_DDR_ATTR << 8) |
5455 				    MVNETA_AC5_CNM_DDR_TARGET);
5456 		/* For Armada3700 open default 4GB Mbus window, leaving
5457 		 * arbitration of target/attribute to a different layer
5458 		 * of configuration.
5459 		 */
5460 		mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000);
5461 		win_enable &= ~BIT(0);
5462 		win_protect = 3;
5463 	}
5464 
5465 	mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
5466 	mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
5467 }
5468 
5469 /* Power up the port */
5470 static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
5471 {
5472 	/* MAC Cause register should be cleared */
5473 	mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
5474 
5475 	if (phy_mode != PHY_INTERFACE_MODE_QSGMII &&
5476 	    phy_mode != PHY_INTERFACE_MODE_SGMII &&
5477 	    !phy_interface_mode_is_8023z(phy_mode) &&
5478 	    !phy_interface_mode_is_rgmii(phy_mode))
5479 		return -EINVAL;
5480 
5481 	/* Ensure LPI is disabled */
5482 	mvneta_mac_disable_tx_lpi(&pp->phylink_config);
5483 
5484 	return 0;
5485 }
5486 
5487 /* Device initialization routine */
5488 static int mvneta_probe(struct platform_device *pdev)
5489 {
5490 	struct device_node *dn = pdev->dev.of_node;
5491 	struct device_node *bm_node;
5492 	struct mvneta_port *pp;
5493 	struct net_device *dev;
5494 	struct phylink *phylink;
5495 	struct phy *comphy;
5496 	char hw_mac_addr[ETH_ALEN];
5497 	phy_interface_t phy_mode;
5498 	const char *mac_from;
5499 	int tx_csum_limit;
5500 	int err;
5501 	int cpu;
5502 
5503 	dev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(struct mvneta_port),
5504 				      txq_number, rxq_number);
5505 	if (!dev)
5506 		return -ENOMEM;
5507 
5508 	dev->tx_queue_len = MVNETA_MAX_TXD;
5509 	dev->watchdog_timeo = 5 * HZ;
5510 	dev->netdev_ops = &mvneta_netdev_ops;
5511 	dev->ethtool_ops = &mvneta_eth_tool_ops;
5512 
5513 	pp = netdev_priv(dev);
5514 	spin_lock_init(&pp->lock);
5515 	pp->dn = dn;
5516 
5517 	pp->rxq_def = rxq_def;
5518 	pp->indir[0] = rxq_def;
5519 
5520 	err = of_get_phy_mode(dn, &phy_mode);
5521 	if (err) {
5522 		dev_err(&pdev->dev, "incorrect phy-mode\n");
5523 		return err;
5524 	}
5525 
5526 	pp->phy_interface = phy_mode;
5527 
5528 	comphy = devm_of_phy_get(&pdev->dev, dn, NULL);
5529 	if (comphy == ERR_PTR(-EPROBE_DEFER))
5530 		return -EPROBE_DEFER;
5531 
5532 	if (IS_ERR(comphy))
5533 		comphy = NULL;
5534 
5535 	pp->comphy = comphy;
5536 
5537 	pp->base = devm_platform_ioremap_resource(pdev, 0);
5538 	if (IS_ERR(pp->base))
5539 		return PTR_ERR(pp->base);
5540 
5541 	/* Get special SoC configurations */
5542 	if (of_device_is_compatible(dn, "marvell,armada-3700-neta"))
5543 		pp->neta_armada3700 = true;
5544 	if (of_device_is_compatible(dn, "marvell,armada-ac5-neta")) {
5545 		pp->neta_armada3700 = true;
5546 		pp->neta_ac5 = true;
5547 	}
5548 
5549 	dev->irq = irq_of_parse_and_map(dn, 0);
5550 	if (dev->irq == 0)
5551 		return -EINVAL;
5552 
5553 	pp->clk = devm_clk_get(&pdev->dev, "core");
5554 	if (IS_ERR(pp->clk))
5555 		pp->clk = devm_clk_get(&pdev->dev, NULL);
5556 	if (IS_ERR(pp->clk)) {
5557 		err = PTR_ERR(pp->clk);
5558 		goto err_free_irq;
5559 	}
5560 
5561 	clk_prepare_enable(pp->clk);
5562 
5563 	pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
5564 	if (!IS_ERR(pp->clk_bus))
5565 		clk_prepare_enable(pp->clk_bus);
5566 
5567 	pp->phylink_pcs.ops = &mvneta_phylink_pcs_ops;
5568 
5569 	pp->phylink_config.dev = &dev->dev;
5570 	pp->phylink_config.type = PHYLINK_NETDEV;
5571 	pp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 |
5572 		MAC_100 | MAC_1000FD | MAC_2500FD;
5573 
5574 	/* Setup EEE. Choose 250us idle. Only supported in SGMII modes. */
5575 	__set_bit(PHY_INTERFACE_MODE_QSGMII, pp->phylink_config.lpi_interfaces);
5576 	__set_bit(PHY_INTERFACE_MODE_SGMII, pp->phylink_config.lpi_interfaces);
5577 	pp->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD;
5578 	pp->phylink_config.lpi_timer_default = 250;
5579 	pp->phylink_config.eee_enabled_default = true;
5580 
5581 	phy_interface_set_rgmii(pp->phylink_config.supported_interfaces);
5582 	__set_bit(PHY_INTERFACE_MODE_QSGMII,
5583 		  pp->phylink_config.supported_interfaces);
5584 	if (comphy) {
5585 		/* If a COMPHY is present, we can support any of the serdes
5586 		 * modes and switch between them.
5587 		 */
5588 		__set_bit(PHY_INTERFACE_MODE_SGMII,
5589 			  pp->phylink_config.supported_interfaces);
5590 		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
5591 			  pp->phylink_config.supported_interfaces);
5592 		__set_bit(PHY_INTERFACE_MODE_2500BASEX,
5593 			  pp->phylink_config.supported_interfaces);
5594 	} else if (phy_mode == PHY_INTERFACE_MODE_2500BASEX) {
5595 		/* No COMPHY, with only 2500BASE-X mode supported */
5596 		__set_bit(PHY_INTERFACE_MODE_2500BASEX,
5597 			  pp->phylink_config.supported_interfaces);
5598 	} else if (phy_mode == PHY_INTERFACE_MODE_1000BASEX ||
5599 		   phy_mode == PHY_INTERFACE_MODE_SGMII) {
5600 		/* No COMPHY, we can switch between 1000BASE-X and SGMII */
5601 		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
5602 			  pp->phylink_config.supported_interfaces);
5603 		__set_bit(PHY_INTERFACE_MODE_SGMII,
5604 			  pp->phylink_config.supported_interfaces);
5605 	}
5606 
5607 	phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode,
5608 				 phy_mode, &mvneta_phylink_ops);
5609 	if (IS_ERR(phylink)) {
5610 		err = PTR_ERR(phylink);
5611 		goto err_clk;
5612 	}
5613 
5614 	pp->phylink = phylink;
5615 
5616 	/* Alloc per-cpu port structure */
5617 	pp->ports = alloc_percpu(struct mvneta_pcpu_port);
5618 	if (!pp->ports) {
5619 		err = -ENOMEM;
5620 		goto err_free_phylink;
5621 	}
5622 
5623 	/* Alloc per-cpu stats */
5624 	pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
5625 	if (!pp->stats) {
5626 		err = -ENOMEM;
5627 		goto err_free_ports;
5628 	}
5629 
5630 	err = of_get_ethdev_address(dn, dev);
5631 	if (!err) {
5632 		mac_from = "device tree";
5633 	} else {
5634 		mvneta_get_mac_addr(pp, hw_mac_addr);
5635 		if (is_valid_ether_addr(hw_mac_addr)) {
5636 			mac_from = "hardware";
5637 			eth_hw_addr_set(dev, hw_mac_addr);
5638 		} else {
5639 			mac_from = "random";
5640 			eth_hw_addr_random(dev);
5641 		}
5642 	}
5643 
5644 	if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) {
5645 		if (tx_csum_limit < 0 ||
5646 		    tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) {
5647 			tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
5648 			dev_info(&pdev->dev,
5649 				 "Wrong TX csum limit in DT, set to %dB\n",
5650 				 MVNETA_TX_CSUM_DEF_SIZE);
5651 		}
5652 	} else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) {
5653 		tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
5654 	} else {
5655 		tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE;
5656 	}
5657 
5658 	pp->tx_csum_limit = tx_csum_limit;
5659 
5660 	pp->dram_target_info = mv_mbus_dram_info();
5661 	/* Armada3700 requires setting default configuration of Mbus
5662 	 * windows, however without using filled mbus_dram_target_info
5663 	 * structure.
5664 	 */
5665 	if (pp->dram_target_info || pp->neta_armada3700)
5666 		mvneta_conf_mbus_windows(pp, pp->dram_target_info);
5667 
5668 	pp->tx_ring_size = MVNETA_MAX_TXD;
5669 	pp->rx_ring_size = MVNETA_MAX_RXD;
5670 
5671 	pp->dev = dev;
5672 	SET_NETDEV_DEV(dev, &pdev->dev);
5673 
5674 	pp->id = global_port_id++;
5675 
5676 	/* Obtain access to BM resources if enabled and already initialized */
5677 	bm_node = of_parse_phandle(dn, "buffer-manager", 0);
5678 	if (bm_node) {
5679 		pp->bm_priv = mvneta_bm_get(bm_node);
5680 		if (pp->bm_priv) {
5681 			err = mvneta_bm_port_init(pdev, pp);
5682 			if (err < 0) {
5683 				dev_info(&pdev->dev,
5684 					 "use SW buffer management\n");
5685 				mvneta_bm_put(pp->bm_priv);
5686 				pp->bm_priv = NULL;
5687 			}
5688 		}
5689 		/* Set RX packet offset correction for platforms, whose
5690 		 * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit
5691 		 * platforms and 0B for 32-bit ones.
5692 		 */
5693 		pp->rx_offset_correction = max(0,
5694 					       NET_SKB_PAD -
5695 					       MVNETA_RX_PKT_OFFSET_CORRECTION);
5696 	}
5697 	of_node_put(bm_node);
5698 
5699 	/* sw buffer management */
5700 	if (!pp->bm_priv)
5701 		pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
5702 
5703 	err = mvneta_init(&pdev->dev, pp);
5704 	if (err < 0)
5705 		goto err_netdev;
5706 
5707 	err = mvneta_port_power_up(pp, pp->phy_interface);
5708 	if (err < 0) {
5709 		dev_err(&pdev->dev, "can't power up port\n");
5710 		goto err_netdev;
5711 	}
5712 
5713 	/* Armada3700 network controller does not support per-cpu
5714 	 * operation, so only single NAPI should be initialized.
5715 	 */
5716 	if (pp->neta_armada3700) {
5717 		netif_napi_add(dev, &pp->napi, mvneta_poll);
5718 	} else {
5719 		for_each_present_cpu(cpu) {
5720 			struct mvneta_pcpu_port *port =
5721 				per_cpu_ptr(pp->ports, cpu);
5722 
5723 			netif_napi_add(dev, &port->napi, mvneta_poll);
5724 			port->pp = pp;
5725 		}
5726 	}
5727 
5728 	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5729 			NETIF_F_TSO | NETIF_F_RXCSUM;
5730 	dev->hw_features |= dev->features;
5731 	dev->vlan_features |= dev->features;
5732 	if (!pp->bm_priv)
5733 		dev->xdp_features = NETDEV_XDP_ACT_BASIC |
5734 				    NETDEV_XDP_ACT_REDIRECT |
5735 				    NETDEV_XDP_ACT_NDO_XMIT |
5736 				    NETDEV_XDP_ACT_RX_SG |
5737 				    NETDEV_XDP_ACT_NDO_XMIT_SG;
5738 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
5739 	netif_set_tso_max_segs(dev, MVNETA_MAX_TSO_SEGS);
5740 
5741 	/* MTU range: 68 - 9676 */
5742 	dev->min_mtu = ETH_MIN_MTU;
5743 	/* 9676 == 9700 - 20 and rounding to 8 */
5744 	dev->max_mtu = 9676;
5745 
5746 	err = register_netdev(dev);
5747 	if (err < 0) {
5748 		dev_err(&pdev->dev, "failed to register\n");
5749 		goto err_netdev;
5750 	}
5751 
5752 	netdev_info(dev, "Using %s mac address %pM\n", mac_from,
5753 		    dev->dev_addr);
5754 
5755 	platform_set_drvdata(pdev, pp->dev);
5756 
5757 	return 0;
5758 
5759 err_netdev:
5760 	if (pp->bm_priv) {
5761 		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
5762 		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
5763 				       1 << pp->id);
5764 		mvneta_bm_put(pp->bm_priv);
5765 	}
5766 	free_percpu(pp->stats);
5767 err_free_ports:
5768 	free_percpu(pp->ports);
5769 err_free_phylink:
5770 	if (pp->phylink)
5771 		phylink_destroy(pp->phylink);
5772 err_clk:
5773 	clk_disable_unprepare(pp->clk_bus);
5774 	clk_disable_unprepare(pp->clk);
5775 err_free_irq:
5776 	irq_dispose_mapping(dev->irq);
5777 	return err;
5778 }
5779 
5780 /* Device removal routine */
5781 static void mvneta_remove(struct platform_device *pdev)
5782 {
5783 	struct net_device  *dev = platform_get_drvdata(pdev);
5784 	struct mvneta_port *pp = netdev_priv(dev);
5785 
5786 	unregister_netdev(dev);
5787 	clk_disable_unprepare(pp->clk_bus);
5788 	clk_disable_unprepare(pp->clk);
5789 	free_percpu(pp->ports);
5790 	free_percpu(pp->stats);
5791 	irq_dispose_mapping(dev->irq);
5792 	phylink_destroy(pp->phylink);
5793 
5794 	if (pp->bm_priv) {
5795 		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
5796 		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
5797 				       1 << pp->id);
5798 		mvneta_bm_put(pp->bm_priv);
5799 	}
5800 }
5801 
5802 #ifdef CONFIG_PM_SLEEP
5803 static int mvneta_suspend(struct device *device)
5804 {
5805 	int queue;
5806 	struct net_device *dev = dev_get_drvdata(device);
5807 	struct mvneta_port *pp = netdev_priv(dev);
5808 
5809 	if (!netif_running(dev))
5810 		goto clean_exit;
5811 
5812 	if (!pp->neta_armada3700) {
5813 		spin_lock(&pp->lock);
5814 		pp->is_stopped = true;
5815 		spin_unlock(&pp->lock);
5816 
5817 		cpuhp_state_remove_instance_nocalls(online_hpstate,
5818 						    &pp->node_online);
5819 		cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
5820 						    &pp->node_dead);
5821 	}
5822 
5823 	rtnl_lock();
5824 	mvneta_stop_dev(pp);
5825 	rtnl_unlock();
5826 
5827 	for (queue = 0; queue < rxq_number; queue++) {
5828 		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5829 
5830 		mvneta_rxq_drop_pkts(pp, rxq);
5831 	}
5832 
5833 	for (queue = 0; queue < txq_number; queue++) {
5834 		struct mvneta_tx_queue *txq = &pp->txqs[queue];
5835 
5836 		mvneta_txq_hw_deinit(pp, txq);
5837 	}
5838 
5839 clean_exit:
5840 	netif_device_detach(dev);
5841 	clk_disable_unprepare(pp->clk_bus);
5842 	clk_disable_unprepare(pp->clk);
5843 
5844 	return 0;
5845 }
5846 
5847 static int mvneta_resume(struct device *device)
5848 {
5849 	struct platform_device *pdev = to_platform_device(device);
5850 	struct net_device *dev = dev_get_drvdata(device);
5851 	struct mvneta_port *pp = netdev_priv(dev);
5852 	int err, queue;
5853 
5854 	clk_prepare_enable(pp->clk);
5855 	if (!IS_ERR(pp->clk_bus))
5856 		clk_prepare_enable(pp->clk_bus);
5857 	if (pp->dram_target_info || pp->neta_armada3700)
5858 		mvneta_conf_mbus_windows(pp, pp->dram_target_info);
5859 	if (pp->bm_priv) {
5860 		err = mvneta_bm_port_init(pdev, pp);
5861 		if (err < 0) {
5862 			dev_info(&pdev->dev, "use SW buffer management\n");
5863 			pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
5864 			pp->bm_priv = NULL;
5865 		}
5866 	}
5867 	mvneta_defaults_set(pp);
5868 	err = mvneta_port_power_up(pp, pp->phy_interface);
5869 	if (err < 0) {
5870 		dev_err(device, "can't power up port\n");
5871 		return err;
5872 	}
5873 
5874 	netif_device_attach(dev);
5875 
5876 	if (!netif_running(dev))
5877 		return 0;
5878 
5879 	for (queue = 0; queue < rxq_number; queue++) {
5880 		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5881 
5882 		rxq->next_desc_to_proc = 0;
5883 		mvneta_rxq_hw_init(pp, rxq);
5884 	}
5885 
5886 	for (queue = 0; queue < txq_number; queue++) {
5887 		struct mvneta_tx_queue *txq = &pp->txqs[queue];
5888 
5889 		txq->next_desc_to_proc = 0;
5890 		mvneta_txq_hw_init(pp, txq);
5891 	}
5892 
5893 	if (!pp->neta_armada3700) {
5894 		spin_lock(&pp->lock);
5895 		pp->is_stopped = false;
5896 		spin_unlock(&pp->lock);
5897 		cpuhp_state_add_instance_nocalls(online_hpstate,
5898 						 &pp->node_online);
5899 		cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
5900 						 &pp->node_dead);
5901 	}
5902 
5903 	rtnl_lock();
5904 	mvneta_start_dev(pp);
5905 	rtnl_unlock();
5906 	mvneta_set_rx_mode(dev);
5907 
5908 	return 0;
5909 }
5910 #endif
5911 
5912 static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume);
5913 
5914 static const struct of_device_id mvneta_match[] = {
5915 	{ .compatible = "marvell,armada-370-neta" },
5916 	{ .compatible = "marvell,armada-xp-neta" },
5917 	{ .compatible = "marvell,armada-3700-neta" },
5918 	{ .compatible = "marvell,armada-ac5-neta" },
5919 	{ }
5920 };
5921 MODULE_DEVICE_TABLE(of, mvneta_match);
5922 
5923 static struct platform_driver mvneta_driver = {
5924 	.probe = mvneta_probe,
5925 	.remove = mvneta_remove,
5926 	.driver = {
5927 		.name = MVNETA_DRIVER_NAME,
5928 		.of_match_table = mvneta_match,
5929 		.pm = &mvneta_pm_ops,
5930 	},
5931 };
5932 
5933 static int __init mvneta_driver_init(void)
5934 {
5935 	int ret;
5936 
5937 	BUILD_BUG_ON_NOT_POWER_OF_2(MVNETA_TSO_PER_PAGE);
5938 
5939 	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvneta:online",
5940 				      mvneta_cpu_online,
5941 				      mvneta_cpu_down_prepare);
5942 	if (ret < 0)
5943 		goto out;
5944 	online_hpstate = ret;
5945 	ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead",
5946 				      NULL, mvneta_cpu_dead);
5947 	if (ret)
5948 		goto err_dead;
5949 
5950 	ret = platform_driver_register(&mvneta_driver);
5951 	if (ret)
5952 		goto err;
5953 	return 0;
5954 
5955 err:
5956 	cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
5957 err_dead:
5958 	cpuhp_remove_multi_state(online_hpstate);
5959 out:
5960 	return ret;
5961 }
5962 module_init(mvneta_driver_init);
5963 
5964 static void __exit mvneta_driver_exit(void)
5965 {
5966 	platform_driver_unregister(&mvneta_driver);
5967 	cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
5968 	cpuhp_remove_multi_state(online_hpstate);
5969 }
5970 module_exit(mvneta_driver_exit);
5971 
5972 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
5973 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
5974 MODULE_LICENSE("GPL");
5975 
5976 module_param(rxq_number, int, 0444);
5977 module_param(txq_number, int, 0444);
5978 
5979 module_param(rxq_def, int, 0444);
5980 module_param(rx_copybreak, int, 0644);
5981