xref: /linux/drivers/net/ethernet/marvell/mvneta.c (revision f9c41a62bba3f3f7ef3541b2a025e3371bcbba97)
1 /*
2  * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
3  *
4  * Copyright (C) 2012 Marvell
5  *
6  * Rami Rosen <rosenr@marvell.com>
7  * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
8  *
9  * This file is licensed under the terms of the GNU General Public
10  * License version 2. This program is licensed "as is" without any
11  * warranty of any kind, whether express or implied.
12  */
13 
14 #include <linux/kernel.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/platform_device.h>
18 #include <linux/skbuff.h>
19 #include <linux/inetdevice.h>
20 #include <linux/mbus.h>
21 #include <linux/module.h>
22 #include <linux/interrupt.h>
23 #include <net/ip.h>
24 #include <net/ipv6.h>
25 #include <linux/of.h>
26 #include <linux/of_irq.h>
27 #include <linux/of_mdio.h>
28 #include <linux/of_net.h>
29 #include <linux/of_address.h>
30 #include <linux/phy.h>
31 #include <linux/clk.h>
32 
33 /* Registers */
34 #define MVNETA_RXQ_CONFIG_REG(q)                (0x1400 + ((q) << 2))
35 #define      MVNETA_RXQ_HW_BUF_ALLOC            BIT(1)
36 #define      MVNETA_RXQ_PKT_OFFSET_ALL_MASK     (0xf    << 8)
37 #define      MVNETA_RXQ_PKT_OFFSET_MASK(offs)   ((offs) << 8)
38 #define MVNETA_RXQ_THRESHOLD_REG(q)             (0x14c0 + ((q) << 2))
39 #define      MVNETA_RXQ_NON_OCCUPIED(v)         ((v) << 16)
40 #define MVNETA_RXQ_BASE_ADDR_REG(q)             (0x1480 + ((q) << 2))
41 #define MVNETA_RXQ_SIZE_REG(q)                  (0x14a0 + ((q) << 2))
42 #define      MVNETA_RXQ_BUF_SIZE_SHIFT          19
43 #define      MVNETA_RXQ_BUF_SIZE_MASK           (0x1fff << 19)
44 #define MVNETA_RXQ_STATUS_REG(q)                (0x14e0 + ((q) << 2))
45 #define      MVNETA_RXQ_OCCUPIED_ALL_MASK       0x3fff
46 #define MVNETA_RXQ_STATUS_UPDATE_REG(q)         (0x1500 + ((q) << 2))
47 #define      MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT  16
48 #define      MVNETA_RXQ_ADD_NON_OCCUPIED_MAX    255
49 #define MVNETA_PORT_RX_RESET                    0x1cc0
50 #define      MVNETA_PORT_RX_DMA_RESET           BIT(0)
51 #define MVNETA_PHY_ADDR                         0x2000
52 #define      MVNETA_PHY_ADDR_MASK               0x1f
53 #define MVNETA_MBUS_RETRY                       0x2010
54 #define MVNETA_UNIT_INTR_CAUSE                  0x2080
55 #define MVNETA_UNIT_CONTROL                     0x20B0
56 #define      MVNETA_PHY_POLLING_ENABLE          BIT(1)
57 #define MVNETA_WIN_BASE(w)                      (0x2200 + ((w) << 3))
58 #define MVNETA_WIN_SIZE(w)                      (0x2204 + ((w) << 3))
59 #define MVNETA_WIN_REMAP(w)                     (0x2280 + ((w) << 2))
60 #define MVNETA_BASE_ADDR_ENABLE                 0x2290
61 #define MVNETA_PORT_CONFIG                      0x2400
62 #define      MVNETA_UNI_PROMISC_MODE            BIT(0)
63 #define      MVNETA_DEF_RXQ(q)                  ((q) << 1)
64 #define      MVNETA_DEF_RXQ_ARP(q)              ((q) << 4)
65 #define      MVNETA_TX_UNSET_ERR_SUM            BIT(12)
66 #define      MVNETA_DEF_RXQ_TCP(q)              ((q) << 16)
67 #define      MVNETA_DEF_RXQ_UDP(q)              ((q) << 19)
68 #define      MVNETA_DEF_RXQ_BPDU(q)             ((q) << 22)
69 #define      MVNETA_RX_CSUM_WITH_PSEUDO_HDR     BIT(25)
70 #define      MVNETA_PORT_CONFIG_DEFL_VALUE(q)   (MVNETA_DEF_RXQ(q)       | \
71 						 MVNETA_DEF_RXQ_ARP(q)	 | \
72 						 MVNETA_DEF_RXQ_TCP(q)	 | \
73 						 MVNETA_DEF_RXQ_UDP(q)	 | \
74 						 MVNETA_DEF_RXQ_BPDU(q)	 | \
75 						 MVNETA_TX_UNSET_ERR_SUM | \
76 						 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
77 #define MVNETA_PORT_CONFIG_EXTEND                0x2404
78 #define MVNETA_MAC_ADDR_LOW                      0x2414
79 #define MVNETA_MAC_ADDR_HIGH                     0x2418
80 #define MVNETA_SDMA_CONFIG                       0x241c
81 #define      MVNETA_SDMA_BRST_SIZE_16            4
82 #define      MVNETA_NO_DESC_SWAP                 0x0
83 #define      MVNETA_RX_BRST_SZ_MASK(burst)       ((burst) << 1)
84 #define      MVNETA_RX_NO_DATA_SWAP              BIT(4)
85 #define      MVNETA_TX_NO_DATA_SWAP              BIT(5)
86 #define      MVNETA_TX_BRST_SZ_MASK(burst)       ((burst) << 22)
87 #define MVNETA_PORT_STATUS                       0x2444
88 #define      MVNETA_TX_IN_PRGRS                  BIT(1)
89 #define      MVNETA_TX_FIFO_EMPTY                BIT(8)
90 #define MVNETA_RX_MIN_FRAME_SIZE                 0x247c
91 #define MVNETA_TYPE_PRIO                         0x24bc
92 #define      MVNETA_FORCE_UNI                    BIT(21)
93 #define MVNETA_TXQ_CMD_1                         0x24e4
94 #define MVNETA_TXQ_CMD                           0x2448
95 #define      MVNETA_TXQ_DISABLE_SHIFT            8
96 #define      MVNETA_TXQ_ENABLE_MASK              0x000000ff
97 #define MVNETA_ACC_MODE                          0x2500
98 #define MVNETA_CPU_MAP(cpu)                      (0x2540 + ((cpu) << 2))
99 #define      MVNETA_CPU_RXQ_ACCESS_ALL_MASK      0x000000ff
100 #define      MVNETA_CPU_TXQ_ACCESS_ALL_MASK      0x0000ff00
101 #define MVNETA_RXQ_TIME_COAL_REG(q)              (0x2580 + ((q) << 2))
102 #define MVNETA_INTR_NEW_CAUSE                    0x25a0
103 #define      MVNETA_RX_INTR_MASK(nr_rxqs)        (((1 << nr_rxqs) - 1) << 8)
104 #define MVNETA_INTR_NEW_MASK                     0x25a4
105 #define MVNETA_INTR_OLD_CAUSE                    0x25a8
106 #define MVNETA_INTR_OLD_MASK                     0x25ac
107 #define MVNETA_INTR_MISC_CAUSE                   0x25b0
108 #define MVNETA_INTR_MISC_MASK                    0x25b4
109 #define MVNETA_INTR_ENABLE                       0x25b8
110 #define      MVNETA_TXQ_INTR_ENABLE_ALL_MASK     0x0000ff00
111 #define      MVNETA_RXQ_INTR_ENABLE_ALL_MASK     0xff000000
112 #define MVNETA_RXQ_CMD                           0x2680
113 #define      MVNETA_RXQ_DISABLE_SHIFT            8
114 #define      MVNETA_RXQ_ENABLE_MASK              0x000000ff
115 #define MVETH_TXQ_TOKEN_COUNT_REG(q)             (0x2700 + ((q) << 4))
116 #define MVETH_TXQ_TOKEN_CFG_REG(q)               (0x2704 + ((q) << 4))
117 #define MVNETA_GMAC_CTRL_0                       0x2c00
118 #define      MVNETA_GMAC_MAX_RX_SIZE_SHIFT       2
119 #define      MVNETA_GMAC_MAX_RX_SIZE_MASK        0x7ffc
120 #define      MVNETA_GMAC0_PORT_ENABLE            BIT(0)
121 #define MVNETA_GMAC_CTRL_2                       0x2c08
122 #define      MVNETA_GMAC2_PSC_ENABLE             BIT(3)
123 #define      MVNETA_GMAC2_PORT_RGMII             BIT(4)
124 #define      MVNETA_GMAC2_PORT_RESET             BIT(6)
125 #define MVNETA_GMAC_STATUS                       0x2c10
126 #define      MVNETA_GMAC_LINK_UP                 BIT(0)
127 #define      MVNETA_GMAC_SPEED_1000              BIT(1)
128 #define      MVNETA_GMAC_SPEED_100               BIT(2)
129 #define      MVNETA_GMAC_FULL_DUPLEX             BIT(3)
130 #define      MVNETA_GMAC_RX_FLOW_CTRL_ENABLE     BIT(4)
131 #define      MVNETA_GMAC_TX_FLOW_CTRL_ENABLE     BIT(5)
132 #define      MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE     BIT(6)
133 #define      MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE     BIT(7)
134 #define MVNETA_GMAC_AUTONEG_CONFIG               0x2c0c
135 #define      MVNETA_GMAC_FORCE_LINK_DOWN         BIT(0)
136 #define      MVNETA_GMAC_FORCE_LINK_PASS         BIT(1)
137 #define      MVNETA_GMAC_CONFIG_MII_SPEED        BIT(5)
138 #define      MVNETA_GMAC_CONFIG_GMII_SPEED       BIT(6)
139 #define      MVNETA_GMAC_CONFIG_FULL_DUPLEX      BIT(12)
140 #define MVNETA_MIB_COUNTERS_BASE                 0x3080
141 #define      MVNETA_MIB_LATE_COLLISION           0x7c
142 #define MVNETA_DA_FILT_SPEC_MCAST                0x3400
143 #define MVNETA_DA_FILT_OTH_MCAST                 0x3500
144 #define MVNETA_DA_FILT_UCAST_BASE                0x3600
145 #define MVNETA_TXQ_BASE_ADDR_REG(q)              (0x3c00 + ((q) << 2))
146 #define MVNETA_TXQ_SIZE_REG(q)                   (0x3c20 + ((q) << 2))
147 #define      MVNETA_TXQ_SENT_THRESH_ALL_MASK     0x3fff0000
148 #define      MVNETA_TXQ_SENT_THRESH_MASK(coal)   ((coal) << 16)
149 #define MVNETA_TXQ_UPDATE_REG(q)                 (0x3c60 + ((q) << 2))
150 #define      MVNETA_TXQ_DEC_SENT_SHIFT           16
151 #define MVNETA_TXQ_STATUS_REG(q)                 (0x3c40 + ((q) << 2))
152 #define      MVNETA_TXQ_SENT_DESC_SHIFT          16
153 #define      MVNETA_TXQ_SENT_DESC_MASK           0x3fff0000
154 #define MVNETA_PORT_TX_RESET                     0x3cf0
155 #define      MVNETA_PORT_TX_DMA_RESET            BIT(0)
156 #define MVNETA_TX_MTU                            0x3e0c
157 #define MVNETA_TX_TOKEN_SIZE                     0x3e14
158 #define      MVNETA_TX_TOKEN_SIZE_MAX            0xffffffff
159 #define MVNETA_TXQ_TOKEN_SIZE_REG(q)             (0x3e40 + ((q) << 2))
160 #define      MVNETA_TXQ_TOKEN_SIZE_MAX           0x7fffffff
161 
162 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK	 0xff
163 
164 /* Descriptor ring Macros */
165 #define MVNETA_QUEUE_NEXT_DESC(q, index)	\
166 	(((index) < (q)->last_desc) ? ((index) + 1) : 0)
167 
168 /* Various constants */
169 
170 /* Coalescing */
171 #define MVNETA_TXDONE_COAL_PKTS		16
172 #define MVNETA_RX_COAL_PKTS		32
173 #define MVNETA_RX_COAL_USEC		100
174 
175 /* Timer */
176 #define MVNETA_TX_DONE_TIMER_PERIOD	10
177 
178 /* Napi polling weight */
179 #define MVNETA_RX_POLL_WEIGHT		64
180 
181 /* The two bytes Marvell header. Either contains a special value used
182  * by Marvell switches when a specific hardware mode is enabled (not
183  * supported by this driver) or is filled automatically by zeroes on
184  * the RX side. Those two bytes being at the front of the Ethernet
185  * header, they allow to have the IP header aligned on a 4 bytes
186  * boundary automatically: the hardware skips those two bytes on its
187  * own.
188  */
189 #define MVNETA_MH_SIZE			2
190 
191 #define MVNETA_VLAN_TAG_LEN             4
192 
193 #define MVNETA_CPU_D_CACHE_LINE_SIZE    32
194 #define MVNETA_TX_CSUM_MAX_SIZE		9800
195 #define MVNETA_ACC_MODE_EXT		1
196 
197 /* Timeout constants */
198 #define MVNETA_TX_DISABLE_TIMEOUT_MSEC	1000
199 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC	1000
200 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT	10000
201 
202 #define MVNETA_TX_MTU_MAX		0x3ffff
203 
204 /* Max number of Rx descriptors */
205 #define MVNETA_MAX_RXD 128
206 
207 /* Max number of Tx descriptors */
208 #define MVNETA_MAX_TXD 532
209 
210 /* descriptor aligned size */
211 #define MVNETA_DESC_ALIGNED_SIZE	32
212 
213 #define MVNETA_RX_PKT_SIZE(mtu) \
214 	ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
215 	      ETH_HLEN + ETH_FCS_LEN,			     \
216 	      MVNETA_CPU_D_CACHE_LINE_SIZE)
217 
218 #define MVNETA_RX_BUF_SIZE(pkt_size)   ((pkt_size) + NET_SKB_PAD)
219 
220 struct mvneta_stats {
221 	struct	u64_stats_sync syncp;
222 	u64	packets;
223 	u64	bytes;
224 };
225 
226 struct mvneta_port {
227 	int pkt_size;
228 	void __iomem *base;
229 	struct mvneta_rx_queue *rxqs;
230 	struct mvneta_tx_queue *txqs;
231 	struct timer_list tx_done_timer;
232 	struct net_device *dev;
233 
234 	u32 cause_rx_tx;
235 	struct napi_struct napi;
236 
237 	/* Flags */
238 	unsigned long flags;
239 #define MVNETA_F_TX_DONE_TIMER_BIT  0
240 
241 	/* Napi weight */
242 	int weight;
243 
244 	/* Core clock */
245 	struct clk *clk;
246 	u8 mcast_count[256];
247 	u16 tx_ring_size;
248 	u16 rx_ring_size;
249 	struct mvneta_stats tx_stats;
250 	struct mvneta_stats rx_stats;
251 
252 	struct mii_bus *mii_bus;
253 	struct phy_device *phy_dev;
254 	phy_interface_t phy_interface;
255 	struct device_node *phy_node;
256 	unsigned int link;
257 	unsigned int duplex;
258 	unsigned int speed;
259 };
260 
261 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
262  * layout of the transmit and reception DMA descriptors, and their
263  * layout is therefore defined by the hardware design
264  */
265 struct mvneta_tx_desc {
266 	u32  command;		/* Options used by HW for packet transmitting.*/
267 #define MVNETA_TX_L3_OFF_SHIFT	0
268 #define MVNETA_TX_IP_HLEN_SHIFT	8
269 #define MVNETA_TX_L4_UDP	BIT(16)
270 #define MVNETA_TX_L3_IP6	BIT(17)
271 #define MVNETA_TXD_IP_CSUM	BIT(18)
272 #define MVNETA_TXD_Z_PAD	BIT(19)
273 #define MVNETA_TXD_L_DESC	BIT(20)
274 #define MVNETA_TXD_F_DESC	BIT(21)
275 #define MVNETA_TXD_FLZ_DESC	(MVNETA_TXD_Z_PAD  | \
276 				 MVNETA_TXD_L_DESC | \
277 				 MVNETA_TXD_F_DESC)
278 #define MVNETA_TX_L4_CSUM_FULL	BIT(30)
279 #define MVNETA_TX_L4_CSUM_NOT	BIT(31)
280 
281 	u16  reserverd1;	/* csum_l4 (for future use)		*/
282 	u16  data_size;		/* Data size of transmitted packet in bytes */
283 	u32  buf_phys_addr;	/* Physical addr of transmitted buffer	*/
284 	u32  reserved2;		/* hw_cmd - (for future use, PMT)	*/
285 	u32  reserved3[4];	/* Reserved - (for future use)		*/
286 };
287 
288 struct mvneta_rx_desc {
289 	u32  status;		/* Info about received packet		*/
290 #define MVNETA_RXD_ERR_CRC		0x0
291 #define MVNETA_RXD_ERR_SUMMARY		BIT(16)
292 #define MVNETA_RXD_ERR_OVERRUN		BIT(17)
293 #define MVNETA_RXD_ERR_LEN		BIT(18)
294 #define MVNETA_RXD_ERR_RESOURCE		(BIT(17) | BIT(18))
295 #define MVNETA_RXD_ERR_CODE_MASK	(BIT(17) | BIT(18))
296 #define MVNETA_RXD_L3_IP4		BIT(25)
297 #define MVNETA_RXD_FIRST_LAST_DESC	(BIT(26) | BIT(27))
298 #define MVNETA_RXD_L4_CSUM_OK		BIT(30)
299 
300 	u16  reserved1;		/* pnc_info - (for future use, PnC)	*/
301 	u16  data_size;		/* Size of received packet in bytes	*/
302 	u32  buf_phys_addr;	/* Physical address of the buffer	*/
303 	u32  reserved2;		/* pnc_flow_id  (for future use, PnC)	*/
304 	u32  buf_cookie;	/* cookie for access to RX buffer in rx path */
305 	u16  reserved3;		/* prefetch_cmd, for future use		*/
306 	u16  reserved4;		/* csum_l4 - (for future use, PnC)	*/
307 	u32  reserved5;		/* pnc_extra PnC (for future use, PnC)	*/
308 	u32  reserved6;		/* hw_cmd (for future use, PnC and HWF)	*/
309 };
310 
311 struct mvneta_tx_queue {
312 	/* Number of this TX queue, in the range 0-7 */
313 	u8 id;
314 
315 	/* Number of TX DMA descriptors in the descriptor ring */
316 	int size;
317 
318 	/* Number of currently used TX DMA descriptor in the
319 	 * descriptor ring
320 	 */
321 	int count;
322 
323 	/* Array of transmitted skb */
324 	struct sk_buff **tx_skb;
325 
326 	/* Index of last TX DMA descriptor that was inserted */
327 	int txq_put_index;
328 
329 	/* Index of the TX DMA descriptor to be cleaned up */
330 	int txq_get_index;
331 
332 	u32 done_pkts_coal;
333 
334 	/* Virtual address of the TX DMA descriptors array */
335 	struct mvneta_tx_desc *descs;
336 
337 	/* DMA address of the TX DMA descriptors array */
338 	dma_addr_t descs_phys;
339 
340 	/* Index of the last TX DMA descriptor */
341 	int last_desc;
342 
343 	/* Index of the next TX DMA descriptor to process */
344 	int next_desc_to_proc;
345 };
346 
347 struct mvneta_rx_queue {
348 	/* rx queue number, in the range 0-7 */
349 	u8 id;
350 
351 	/* num of rx descriptors in the rx descriptor ring */
352 	int size;
353 
354 	/* counter of times when mvneta_refill() failed */
355 	int missed;
356 
357 	u32 pkts_coal;
358 	u32 time_coal;
359 
360 	/* Virtual address of the RX DMA descriptors array */
361 	struct mvneta_rx_desc *descs;
362 
363 	/* DMA address of the RX DMA descriptors array */
364 	dma_addr_t descs_phys;
365 
366 	/* Index of the last RX DMA descriptor */
367 	int last_desc;
368 
369 	/* Index of the next RX DMA descriptor to process */
370 	int next_desc_to_proc;
371 };
372 
373 static int rxq_number = 8;
374 static int txq_number = 8;
375 
376 static int rxq_def;
377 static int txq_def;
378 
379 #define MVNETA_DRIVER_NAME "mvneta"
380 #define MVNETA_DRIVER_VERSION "1.0"
381 
382 /* Utility/helper methods */
383 
384 /* Write helper method */
385 static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
386 {
387 	writel(data, pp->base + offset);
388 }
389 
390 /* Read helper method */
391 static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
392 {
393 	return readl(pp->base + offset);
394 }
395 
396 /* Increment txq get counter */
397 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
398 {
399 	txq->txq_get_index++;
400 	if (txq->txq_get_index == txq->size)
401 		txq->txq_get_index = 0;
402 }
403 
404 /* Increment txq put counter */
405 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
406 {
407 	txq->txq_put_index++;
408 	if (txq->txq_put_index == txq->size)
409 		txq->txq_put_index = 0;
410 }
411 
412 
413 /* Clear all MIB counters */
414 static void mvneta_mib_counters_clear(struct mvneta_port *pp)
415 {
416 	int i;
417 	u32 dummy;
418 
419 	/* Perform dummy reads from MIB counters */
420 	for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
421 		dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
422 }
423 
424 /* Get System Network Statistics */
425 struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
426 					     struct rtnl_link_stats64 *stats)
427 {
428 	struct mvneta_port *pp = netdev_priv(dev);
429 	unsigned int start;
430 
431 	memset(stats, 0, sizeof(struct rtnl_link_stats64));
432 
433 	do {
434 		start = u64_stats_fetch_begin_bh(&pp->rx_stats.syncp);
435 		stats->rx_packets = pp->rx_stats.packets;
436 		stats->rx_bytes	= pp->rx_stats.bytes;
437 	} while (u64_stats_fetch_retry_bh(&pp->rx_stats.syncp, start));
438 
439 
440 	do {
441 		start = u64_stats_fetch_begin_bh(&pp->tx_stats.syncp);
442 		stats->tx_packets = pp->tx_stats.packets;
443 		stats->tx_bytes	= pp->tx_stats.bytes;
444 	} while (u64_stats_fetch_retry_bh(&pp->tx_stats.syncp, start));
445 
446 	stats->rx_errors	= dev->stats.rx_errors;
447 	stats->rx_dropped	= dev->stats.rx_dropped;
448 
449 	stats->tx_dropped	= dev->stats.tx_dropped;
450 
451 	return stats;
452 }
453 
454 /* Rx descriptors helper methods */
455 
456 /* Checks whether the given RX descriptor is both the first and the
457  * last descriptor for the RX packet. Each RX packet is currently
458  * received through a single RX descriptor, so not having each RX
459  * descriptor with its first and last bits set is an error
460  */
461 static int mvneta_rxq_desc_is_first_last(struct mvneta_rx_desc *desc)
462 {
463 	return (desc->status & MVNETA_RXD_FIRST_LAST_DESC) ==
464 		MVNETA_RXD_FIRST_LAST_DESC;
465 }
466 
467 /* Add number of descriptors ready to receive new packets */
468 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
469 					  struct mvneta_rx_queue *rxq,
470 					  int ndescs)
471 {
472 	/* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
473 	 * be added at once
474 	 */
475 	while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
476 		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
477 			    (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
478 			     MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
479 		ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
480 	}
481 
482 	mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
483 		    (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
484 }
485 
486 /* Get number of RX descriptors occupied by received packets */
487 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
488 					struct mvneta_rx_queue *rxq)
489 {
490 	u32 val;
491 
492 	val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
493 	return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
494 }
495 
496 /* Update num of rx desc called upon return from rx path or
497  * from mvneta_rxq_drop_pkts().
498  */
499 static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
500 				       struct mvneta_rx_queue *rxq,
501 				       int rx_done, int rx_filled)
502 {
503 	u32 val;
504 
505 	if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
506 		val = rx_done |
507 		  (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
508 		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
509 		return;
510 	}
511 
512 	/* Only 255 descriptors can be added at once */
513 	while ((rx_done > 0) || (rx_filled > 0)) {
514 		if (rx_done <= 0xff) {
515 			val = rx_done;
516 			rx_done = 0;
517 		} else {
518 			val = 0xff;
519 			rx_done -= 0xff;
520 		}
521 		if (rx_filled <= 0xff) {
522 			val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
523 			rx_filled = 0;
524 		} else {
525 			val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
526 			rx_filled -= 0xff;
527 		}
528 		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
529 	}
530 }
531 
532 /* Get pointer to next RX descriptor to be processed by SW */
533 static struct mvneta_rx_desc *
534 mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
535 {
536 	int rx_desc = rxq->next_desc_to_proc;
537 
538 	rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
539 	return rxq->descs + rx_desc;
540 }
541 
542 /* Change maximum receive size of the port. */
543 static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
544 {
545 	u32 val;
546 
547 	val =  mvreg_read(pp, MVNETA_GMAC_CTRL_0);
548 	val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
549 	val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
550 		MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
551 	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
552 }
553 
554 
555 /* Set rx queue offset */
556 static void mvneta_rxq_offset_set(struct mvneta_port *pp,
557 				  struct mvneta_rx_queue *rxq,
558 				  int offset)
559 {
560 	u32 val;
561 
562 	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
563 	val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
564 
565 	/* Offset is in */
566 	val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
567 	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
568 }
569 
570 
571 /* Tx descriptors helper methods */
572 
573 /* Update HW with number of TX descriptors to be sent */
574 static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
575 				     struct mvneta_tx_queue *txq,
576 				     int pend_desc)
577 {
578 	u32 val;
579 
580 	/* Only 255 descriptors can be added at once ; Assume caller
581 	 * process TX desriptors in quanta less than 256
582 	 */
583 	val = pend_desc;
584 	mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
585 }
586 
587 /* Get pointer to next TX descriptor to be processed (send) by HW */
588 static struct mvneta_tx_desc *
589 mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
590 {
591 	int tx_desc = txq->next_desc_to_proc;
592 
593 	txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
594 	return txq->descs + tx_desc;
595 }
596 
597 /* Release the last allocated TX descriptor. Useful to handle DMA
598  * mapping failures in the TX path.
599  */
600 static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
601 {
602 	if (txq->next_desc_to_proc == 0)
603 		txq->next_desc_to_proc = txq->last_desc - 1;
604 	else
605 		txq->next_desc_to_proc--;
606 }
607 
608 /* Set rxq buf size */
609 static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
610 				    struct mvneta_rx_queue *rxq,
611 				    int buf_size)
612 {
613 	u32 val;
614 
615 	val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
616 
617 	val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
618 	val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
619 
620 	mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
621 }
622 
623 /* Disable buffer management (BM) */
624 static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
625 				  struct mvneta_rx_queue *rxq)
626 {
627 	u32 val;
628 
629 	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
630 	val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
631 	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
632 }
633 
634 
635 
636 /* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */
637 static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
638 {
639 	u32  val;
640 
641 	val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
642 
643 	if (enable)
644 		val |= MVNETA_GMAC2_PORT_RGMII;
645 	else
646 		val &= ~MVNETA_GMAC2_PORT_RGMII;
647 
648 	mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
649 }
650 
651 /* Config SGMII port */
652 static void mvneta_port_sgmii_config(struct mvneta_port *pp)
653 {
654 	u32 val;
655 
656 	val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
657 	val |= MVNETA_GMAC2_PSC_ENABLE;
658 	mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
659 }
660 
661 /* Start the Ethernet port RX and TX activity */
662 static void mvneta_port_up(struct mvneta_port *pp)
663 {
664 	int queue;
665 	u32 q_map;
666 
667 	/* Enable all initialized TXs. */
668 	mvneta_mib_counters_clear(pp);
669 	q_map = 0;
670 	for (queue = 0; queue < txq_number; queue++) {
671 		struct mvneta_tx_queue *txq = &pp->txqs[queue];
672 		if (txq->descs != NULL)
673 			q_map |= (1 << queue);
674 	}
675 	mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
676 
677 	/* Enable all initialized RXQs. */
678 	q_map = 0;
679 	for (queue = 0; queue < rxq_number; queue++) {
680 		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
681 		if (rxq->descs != NULL)
682 			q_map |= (1 << queue);
683 	}
684 
685 	mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
686 }
687 
688 /* Stop the Ethernet port activity */
689 static void mvneta_port_down(struct mvneta_port *pp)
690 {
691 	u32 val;
692 	int count;
693 
694 	/* Stop Rx port activity. Check port Rx activity. */
695 	val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
696 
697 	/* Issue stop command for active channels only */
698 	if (val != 0)
699 		mvreg_write(pp, MVNETA_RXQ_CMD,
700 			    val << MVNETA_RXQ_DISABLE_SHIFT);
701 
702 	/* Wait for all Rx activity to terminate. */
703 	count = 0;
704 	do {
705 		if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
706 			netdev_warn(pp->dev,
707 				    "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n",
708 				    val);
709 			break;
710 		}
711 		mdelay(1);
712 
713 		val = mvreg_read(pp, MVNETA_RXQ_CMD);
714 	} while (val & 0xff);
715 
716 	/* Stop Tx port activity. Check port Tx activity. Issue stop
717 	 * command for active channels only
718 	 */
719 	val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
720 
721 	if (val != 0)
722 		mvreg_write(pp, MVNETA_TXQ_CMD,
723 			    (val << MVNETA_TXQ_DISABLE_SHIFT));
724 
725 	/* Wait for all Tx activity to terminate. */
726 	count = 0;
727 	do {
728 		if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
729 			netdev_warn(pp->dev,
730 				    "TIMEOUT for TX stopped status=0x%08x\n",
731 				    val);
732 			break;
733 		}
734 		mdelay(1);
735 
736 		/* Check TX Command reg that all Txqs are stopped */
737 		val = mvreg_read(pp, MVNETA_TXQ_CMD);
738 
739 	} while (val & 0xff);
740 
741 	/* Double check to verify that TX FIFO is empty */
742 	count = 0;
743 	do {
744 		if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
745 			netdev_warn(pp->dev,
746 				    "TX FIFO empty timeout status=0x08%x\n",
747 				    val);
748 			break;
749 		}
750 		mdelay(1);
751 
752 		val = mvreg_read(pp, MVNETA_PORT_STATUS);
753 	} while (!(val & MVNETA_TX_FIFO_EMPTY) &&
754 		 (val & MVNETA_TX_IN_PRGRS));
755 
756 	udelay(200);
757 }
758 
759 /* Enable the port by setting the port enable bit of the MAC control register */
760 static void mvneta_port_enable(struct mvneta_port *pp)
761 {
762 	u32 val;
763 
764 	/* Enable port */
765 	val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
766 	val |= MVNETA_GMAC0_PORT_ENABLE;
767 	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
768 }
769 
770 /* Disable the port and wait for about 200 usec before retuning */
771 static void mvneta_port_disable(struct mvneta_port *pp)
772 {
773 	u32 val;
774 
775 	/* Reset the Enable bit in the Serial Control Register */
776 	val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
777 	val &= ~MVNETA_GMAC0_PORT_ENABLE;
778 	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
779 
780 	udelay(200);
781 }
782 
783 /* Multicast tables methods */
784 
785 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */
786 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
787 {
788 	int offset;
789 	u32 val;
790 
791 	if (queue == -1) {
792 		val = 0;
793 	} else {
794 		val = 0x1 | (queue << 1);
795 		val |= (val << 24) | (val << 16) | (val << 8);
796 	}
797 
798 	for (offset = 0; offset <= 0xc; offset += 4)
799 		mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
800 }
801 
802 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
803 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
804 {
805 	int offset;
806 	u32 val;
807 
808 	if (queue == -1) {
809 		val = 0;
810 	} else {
811 		val = 0x1 | (queue << 1);
812 		val |= (val << 24) | (val << 16) | (val << 8);
813 	}
814 
815 	for (offset = 0; offset <= 0xfc; offset += 4)
816 		mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
817 
818 }
819 
820 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
821 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
822 {
823 	int offset;
824 	u32 val;
825 
826 	if (queue == -1) {
827 		memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
828 		val = 0;
829 	} else {
830 		memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
831 		val = 0x1 | (queue << 1);
832 		val |= (val << 24) | (val << 16) | (val << 8);
833 	}
834 
835 	for (offset = 0; offset <= 0xfc; offset += 4)
836 		mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
837 }
838 
839 /* This method sets defaults to the NETA port:
840  *	Clears interrupt Cause and Mask registers.
841  *	Clears all MAC tables.
842  *	Sets defaults to all registers.
843  *	Resets RX and TX descriptor rings.
844  *	Resets PHY.
845  * This method can be called after mvneta_port_down() to return the port
846  *	settings to defaults.
847  */
848 static void mvneta_defaults_set(struct mvneta_port *pp)
849 {
850 	int cpu;
851 	int queue;
852 	u32 val;
853 
854 	/* Clear all Cause registers */
855 	mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
856 	mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
857 	mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
858 
859 	/* Mask all interrupts */
860 	mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
861 	mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
862 	mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
863 	mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
864 
865 	/* Enable MBUS Retry bit16 */
866 	mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
867 
868 	/* Set CPU queue access map - all CPUs have access to all RX
869 	 * queues and to all TX queues
870 	 */
871 	for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++)
872 		mvreg_write(pp, MVNETA_CPU_MAP(cpu),
873 			    (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
874 			     MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
875 
876 	/* Reset RX and TX DMAs */
877 	mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
878 	mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
879 
880 	/* Disable Legacy WRR, Disable EJP, Release from reset */
881 	mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
882 	for (queue = 0; queue < txq_number; queue++) {
883 		mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
884 		mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
885 	}
886 
887 	mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
888 	mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
889 
890 	/* Set Port Acceleration Mode */
891 	val = MVNETA_ACC_MODE_EXT;
892 	mvreg_write(pp, MVNETA_ACC_MODE, val);
893 
894 	/* Update val of portCfg register accordingly with all RxQueue types */
895 	val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def);
896 	mvreg_write(pp, MVNETA_PORT_CONFIG, val);
897 
898 	val = 0;
899 	mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
900 	mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
901 
902 	/* Build PORT_SDMA_CONFIG_REG */
903 	val = 0;
904 
905 	/* Default burst size */
906 	val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
907 	val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
908 
909 	val |= (MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP |
910 		MVNETA_NO_DESC_SWAP);
911 
912 	/* Assign port SDMA configuration */
913 	mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
914 
915 	mvneta_set_ucast_table(pp, -1);
916 	mvneta_set_special_mcast_table(pp, -1);
917 	mvneta_set_other_mcast_table(pp, -1);
918 
919 	/* Set port interrupt enable register - default enable all */
920 	mvreg_write(pp, MVNETA_INTR_ENABLE,
921 		    (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
922 		     | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
923 }
924 
925 /* Set max sizes for tx queues */
926 static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
927 
928 {
929 	u32 val, size, mtu;
930 	int queue;
931 
932 	mtu = max_tx_size * 8;
933 	if (mtu > MVNETA_TX_MTU_MAX)
934 		mtu = MVNETA_TX_MTU_MAX;
935 
936 	/* Set MTU */
937 	val = mvreg_read(pp, MVNETA_TX_MTU);
938 	val &= ~MVNETA_TX_MTU_MAX;
939 	val |= mtu;
940 	mvreg_write(pp, MVNETA_TX_MTU, val);
941 
942 	/* TX token size and all TXQs token size must be larger that MTU */
943 	val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
944 
945 	size = val & MVNETA_TX_TOKEN_SIZE_MAX;
946 	if (size < mtu) {
947 		size = mtu;
948 		val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
949 		val |= size;
950 		mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
951 	}
952 	for (queue = 0; queue < txq_number; queue++) {
953 		val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
954 
955 		size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
956 		if (size < mtu) {
957 			size = mtu;
958 			val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
959 			val |= size;
960 			mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
961 		}
962 	}
963 }
964 
965 /* Set unicast address */
966 static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
967 				  int queue)
968 {
969 	unsigned int unicast_reg;
970 	unsigned int tbl_offset;
971 	unsigned int reg_offset;
972 
973 	/* Locate the Unicast table entry */
974 	last_nibble = (0xf & last_nibble);
975 
976 	/* offset from unicast tbl base */
977 	tbl_offset = (last_nibble / 4) * 4;
978 
979 	/* offset within the above reg  */
980 	reg_offset = last_nibble % 4;
981 
982 	unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
983 
984 	if (queue == -1) {
985 		/* Clear accepts frame bit at specified unicast DA tbl entry */
986 		unicast_reg &= ~(0xff << (8 * reg_offset));
987 	} else {
988 		unicast_reg &= ~(0xff << (8 * reg_offset));
989 		unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
990 	}
991 
992 	mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
993 }
994 
995 /* Set mac address */
996 static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
997 				int queue)
998 {
999 	unsigned int mac_h;
1000 	unsigned int mac_l;
1001 
1002 	if (queue != -1) {
1003 		mac_l = (addr[4] << 8) | (addr[5]);
1004 		mac_h = (addr[0] << 24) | (addr[1] << 16) |
1005 			(addr[2] << 8) | (addr[3] << 0);
1006 
1007 		mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1008 		mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1009 	}
1010 
1011 	/* Accept frames of this address */
1012 	mvneta_set_ucast_addr(pp, addr[5], queue);
1013 }
1014 
1015 /* Set the number of packets that will be received before RX interrupt
1016  * will be generated by HW.
1017  */
1018 static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1019 				    struct mvneta_rx_queue *rxq, u32 value)
1020 {
1021 	mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1022 		    value | MVNETA_RXQ_NON_OCCUPIED(0));
1023 	rxq->pkts_coal = value;
1024 }
1025 
1026 /* Set the time delay in usec before RX interrupt will be generated by
1027  * HW.
1028  */
1029 static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1030 				    struct mvneta_rx_queue *rxq, u32 value)
1031 {
1032 	u32 val;
1033 	unsigned long clk_rate;
1034 
1035 	clk_rate = clk_get_rate(pp->clk);
1036 	val = (clk_rate / 1000000) * value;
1037 
1038 	mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1039 	rxq->time_coal = value;
1040 }
1041 
1042 /* Set threshold for TX_DONE pkts coalescing */
1043 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1044 					 struct mvneta_tx_queue *txq, u32 value)
1045 {
1046 	u32 val;
1047 
1048 	val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1049 
1050 	val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1051 	val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1052 
1053 	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1054 
1055 	txq->done_pkts_coal = value;
1056 }
1057 
1058 /* Trigger tx done timer in MVNETA_TX_DONE_TIMER_PERIOD msecs */
1059 static void mvneta_add_tx_done_timer(struct mvneta_port *pp)
1060 {
1061 	if (test_and_set_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags) == 0) {
1062 		pp->tx_done_timer.expires = jiffies +
1063 			msecs_to_jiffies(MVNETA_TX_DONE_TIMER_PERIOD);
1064 		add_timer(&pp->tx_done_timer);
1065 	}
1066 }
1067 
1068 
1069 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1070 static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1071 				u32 phys_addr, u32 cookie)
1072 {
1073 	rx_desc->buf_cookie = cookie;
1074 	rx_desc->buf_phys_addr = phys_addr;
1075 }
1076 
1077 /* Decrement sent descriptors counter */
1078 static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1079 				     struct mvneta_tx_queue *txq,
1080 				     int sent_desc)
1081 {
1082 	u32 val;
1083 
1084 	/* Only 255 TX descriptors can be updated at once */
1085 	while (sent_desc > 0xff) {
1086 		val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1087 		mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1088 		sent_desc = sent_desc - 0xff;
1089 	}
1090 
1091 	val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1092 	mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1093 }
1094 
1095 /* Get number of TX descriptors already sent by HW */
1096 static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1097 					struct mvneta_tx_queue *txq)
1098 {
1099 	u32 val;
1100 	int sent_desc;
1101 
1102 	val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1103 	sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1104 		MVNETA_TXQ_SENT_DESC_SHIFT;
1105 
1106 	return sent_desc;
1107 }
1108 
1109 /* Get number of sent descriptors and decrement counter.
1110  *  The number of sent descriptors is returned.
1111  */
1112 static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1113 				     struct mvneta_tx_queue *txq)
1114 {
1115 	int sent_desc;
1116 
1117 	/* Get number of sent descriptors */
1118 	sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1119 
1120 	/* Decrement sent descriptors counter */
1121 	if (sent_desc)
1122 		mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1123 
1124 	return sent_desc;
1125 }
1126 
1127 /* Set TXQ descriptors fields relevant for CSUM calculation */
1128 static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1129 				int ip_hdr_len, int l4_proto)
1130 {
1131 	u32 command;
1132 
1133 	/* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1134 	 * G_L4_chk, L4_type; required only for checksum
1135 	 * calculation
1136 	 */
1137 	command =  l3_offs    << MVNETA_TX_L3_OFF_SHIFT;
1138 	command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1139 
1140 	if (l3_proto == swab16(ETH_P_IP))
1141 		command |= MVNETA_TXD_IP_CSUM;
1142 	else
1143 		command |= MVNETA_TX_L3_IP6;
1144 
1145 	if (l4_proto == IPPROTO_TCP)
1146 		command |=  MVNETA_TX_L4_CSUM_FULL;
1147 	else if (l4_proto == IPPROTO_UDP)
1148 		command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1149 	else
1150 		command |= MVNETA_TX_L4_CSUM_NOT;
1151 
1152 	return command;
1153 }
1154 
1155 
1156 /* Display more error info */
1157 static void mvneta_rx_error(struct mvneta_port *pp,
1158 			    struct mvneta_rx_desc *rx_desc)
1159 {
1160 	u32 status = rx_desc->status;
1161 
1162 	if (!mvneta_rxq_desc_is_first_last(rx_desc)) {
1163 		netdev_err(pp->dev,
1164 			   "bad rx status %08x (buffer oversize), size=%d\n",
1165 			   rx_desc->status, rx_desc->data_size);
1166 		return;
1167 	}
1168 
1169 	switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1170 	case MVNETA_RXD_ERR_CRC:
1171 		netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1172 			   status, rx_desc->data_size);
1173 		break;
1174 	case MVNETA_RXD_ERR_OVERRUN:
1175 		netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1176 			   status, rx_desc->data_size);
1177 		break;
1178 	case MVNETA_RXD_ERR_LEN:
1179 		netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1180 			   status, rx_desc->data_size);
1181 		break;
1182 	case MVNETA_RXD_ERR_RESOURCE:
1183 		netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1184 			   status, rx_desc->data_size);
1185 		break;
1186 	}
1187 }
1188 
1189 /* Handle RX checksum offload */
1190 static void mvneta_rx_csum(struct mvneta_port *pp,
1191 			   struct mvneta_rx_desc *rx_desc,
1192 			   struct sk_buff *skb)
1193 {
1194 	if ((rx_desc->status & MVNETA_RXD_L3_IP4) &&
1195 	    (rx_desc->status & MVNETA_RXD_L4_CSUM_OK)) {
1196 		skb->csum = 0;
1197 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1198 		return;
1199 	}
1200 
1201 	skb->ip_summed = CHECKSUM_NONE;
1202 }
1203 
1204 /* Return tx queue pointer (find last set bit) according to causeTxDone reg */
1205 static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1206 						     u32 cause)
1207 {
1208 	int queue = fls(cause) - 1;
1209 
1210 	return (queue < 0 || queue >= txq_number) ? NULL : &pp->txqs[queue];
1211 }
1212 
1213 /* Free tx queue skbuffs */
1214 static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1215 				 struct mvneta_tx_queue *txq, int num)
1216 {
1217 	int i;
1218 
1219 	for (i = 0; i < num; i++) {
1220 		struct mvneta_tx_desc *tx_desc = txq->descs +
1221 			txq->txq_get_index;
1222 		struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
1223 
1224 		mvneta_txq_inc_get(txq);
1225 
1226 		if (!skb)
1227 			continue;
1228 
1229 		dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr,
1230 				 tx_desc->data_size, DMA_TO_DEVICE);
1231 		dev_kfree_skb_any(skb);
1232 	}
1233 }
1234 
1235 /* Handle end of transmission */
1236 static int mvneta_txq_done(struct mvneta_port *pp,
1237 			   struct mvneta_tx_queue *txq)
1238 {
1239 	struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1240 	int tx_done;
1241 
1242 	tx_done = mvneta_txq_sent_desc_proc(pp, txq);
1243 	if (tx_done == 0)
1244 		return tx_done;
1245 	mvneta_txq_bufs_free(pp, txq, tx_done);
1246 
1247 	txq->count -= tx_done;
1248 
1249 	if (netif_tx_queue_stopped(nq)) {
1250 		if (txq->size - txq->count >= MAX_SKB_FRAGS + 1)
1251 			netif_tx_wake_queue(nq);
1252 	}
1253 
1254 	return tx_done;
1255 }
1256 
1257 /* Refill processing */
1258 static int mvneta_rx_refill(struct mvneta_port *pp,
1259 			    struct mvneta_rx_desc *rx_desc)
1260 
1261 {
1262 	dma_addr_t phys_addr;
1263 	struct sk_buff *skb;
1264 
1265 	skb = netdev_alloc_skb(pp->dev, pp->pkt_size);
1266 	if (!skb)
1267 		return -ENOMEM;
1268 
1269 	phys_addr = dma_map_single(pp->dev->dev.parent, skb->head,
1270 				   MVNETA_RX_BUF_SIZE(pp->pkt_size),
1271 				   DMA_FROM_DEVICE);
1272 	if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
1273 		dev_kfree_skb(skb);
1274 		return -ENOMEM;
1275 	}
1276 
1277 	mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb);
1278 
1279 	return 0;
1280 }
1281 
1282 /* Handle tx checksum */
1283 static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1284 {
1285 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1286 		int ip_hdr_len = 0;
1287 		u8 l4_proto;
1288 
1289 		if (skb->protocol == htons(ETH_P_IP)) {
1290 			struct iphdr *ip4h = ip_hdr(skb);
1291 
1292 			/* Calculate IPv4 checksum and L4 checksum */
1293 			ip_hdr_len = ip4h->ihl;
1294 			l4_proto = ip4h->protocol;
1295 		} else if (skb->protocol == htons(ETH_P_IPV6)) {
1296 			struct ipv6hdr *ip6h = ipv6_hdr(skb);
1297 
1298 			/* Read l4_protocol from one of IPv6 extra headers */
1299 			if (skb_network_header_len(skb) > 0)
1300 				ip_hdr_len = (skb_network_header_len(skb) >> 2);
1301 			l4_proto = ip6h->nexthdr;
1302 		} else
1303 			return MVNETA_TX_L4_CSUM_NOT;
1304 
1305 		return mvneta_txq_desc_csum(skb_network_offset(skb),
1306 				skb->protocol, ip_hdr_len, l4_proto);
1307 	}
1308 
1309 	return MVNETA_TX_L4_CSUM_NOT;
1310 }
1311 
1312 /* Returns rx queue pointer (find last set bit) according to causeRxTx
1313  * value
1314  */
1315 static struct mvneta_rx_queue *mvneta_rx_policy(struct mvneta_port *pp,
1316 						u32 cause)
1317 {
1318 	int queue = fls(cause >> 8) - 1;
1319 
1320 	return (queue < 0 || queue >= rxq_number) ? NULL : &pp->rxqs[queue];
1321 }
1322 
1323 /* Drop packets received by the RXQ and free buffers */
1324 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1325 				 struct mvneta_rx_queue *rxq)
1326 {
1327 	int rx_done, i;
1328 
1329 	rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1330 	for (i = 0; i < rxq->size; i++) {
1331 		struct mvneta_rx_desc *rx_desc = rxq->descs + i;
1332 		struct sk_buff *skb = (struct sk_buff *)rx_desc->buf_cookie;
1333 
1334 		dev_kfree_skb_any(skb);
1335 		dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1336 				 rx_desc->data_size, DMA_FROM_DEVICE);
1337 	}
1338 
1339 	if (rx_done)
1340 		mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1341 }
1342 
1343 /* Main rx processing */
1344 static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1345 		     struct mvneta_rx_queue *rxq)
1346 {
1347 	struct net_device *dev = pp->dev;
1348 	int rx_done, rx_filled;
1349 
1350 	/* Get number of received packets */
1351 	rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1352 
1353 	if (rx_todo > rx_done)
1354 		rx_todo = rx_done;
1355 
1356 	rx_done = 0;
1357 	rx_filled = 0;
1358 
1359 	/* Fairness NAPI loop */
1360 	while (rx_done < rx_todo) {
1361 		struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
1362 		struct sk_buff *skb;
1363 		u32 rx_status;
1364 		int rx_bytes, err;
1365 
1366 		prefetch(rx_desc);
1367 		rx_done++;
1368 		rx_filled++;
1369 		rx_status = rx_desc->status;
1370 		skb = (struct sk_buff *)rx_desc->buf_cookie;
1371 
1372 		if (!mvneta_rxq_desc_is_first_last(rx_desc) ||
1373 		    (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
1374 			dev->stats.rx_errors++;
1375 			mvneta_rx_error(pp, rx_desc);
1376 			mvneta_rx_desc_fill(rx_desc, rx_desc->buf_phys_addr,
1377 					    (u32)skb);
1378 			continue;
1379 		}
1380 
1381 		dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1382 				 rx_desc->data_size, DMA_FROM_DEVICE);
1383 
1384 		rx_bytes = rx_desc->data_size -
1385 			(ETH_FCS_LEN + MVNETA_MH_SIZE);
1386 		u64_stats_update_begin(&pp->rx_stats.syncp);
1387 		pp->rx_stats.packets++;
1388 		pp->rx_stats.bytes += rx_bytes;
1389 		u64_stats_update_end(&pp->rx_stats.syncp);
1390 
1391 		/* Linux processing */
1392 		skb_reserve(skb, MVNETA_MH_SIZE);
1393 		skb_put(skb, rx_bytes);
1394 
1395 		skb->protocol = eth_type_trans(skb, dev);
1396 
1397 		mvneta_rx_csum(pp, rx_desc, skb);
1398 
1399 		napi_gro_receive(&pp->napi, skb);
1400 
1401 		/* Refill processing */
1402 		err = mvneta_rx_refill(pp, rx_desc);
1403 		if (err) {
1404 			netdev_err(pp->dev, "Linux processing - Can't refill\n");
1405 			rxq->missed++;
1406 			rx_filled--;
1407 		}
1408 	}
1409 
1410 	/* Update rxq management counters */
1411 	mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled);
1412 
1413 	return rx_done;
1414 }
1415 
1416 /* Handle tx fragmentation processing */
1417 static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
1418 				  struct mvneta_tx_queue *txq)
1419 {
1420 	struct mvneta_tx_desc *tx_desc;
1421 	int i;
1422 
1423 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1424 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1425 		void *addr = page_address(frag->page.p) + frag->page_offset;
1426 
1427 		tx_desc = mvneta_txq_next_desc_get(txq);
1428 		tx_desc->data_size = frag->size;
1429 
1430 		tx_desc->buf_phys_addr =
1431 			dma_map_single(pp->dev->dev.parent, addr,
1432 				       tx_desc->data_size, DMA_TO_DEVICE);
1433 
1434 		if (dma_mapping_error(pp->dev->dev.parent,
1435 				      tx_desc->buf_phys_addr)) {
1436 			mvneta_txq_desc_put(txq);
1437 			goto error;
1438 		}
1439 
1440 		if (i == (skb_shinfo(skb)->nr_frags - 1)) {
1441 			/* Last descriptor */
1442 			tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
1443 
1444 			txq->tx_skb[txq->txq_put_index] = skb;
1445 
1446 			mvneta_txq_inc_put(txq);
1447 		} else {
1448 			/* Descriptor in the middle: Not First, Not Last */
1449 			tx_desc->command = 0;
1450 
1451 			txq->tx_skb[txq->txq_put_index] = NULL;
1452 			mvneta_txq_inc_put(txq);
1453 		}
1454 	}
1455 
1456 	return 0;
1457 
1458 error:
1459 	/* Release all descriptors that were used to map fragments of
1460 	 * this packet, as well as the corresponding DMA mappings
1461 	 */
1462 	for (i = i - 1; i >= 0; i--) {
1463 		tx_desc = txq->descs + i;
1464 		dma_unmap_single(pp->dev->dev.parent,
1465 				 tx_desc->buf_phys_addr,
1466 				 tx_desc->data_size,
1467 				 DMA_TO_DEVICE);
1468 		mvneta_txq_desc_put(txq);
1469 	}
1470 
1471 	return -ENOMEM;
1472 }
1473 
1474 /* Main tx processing */
1475 static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
1476 {
1477 	struct mvneta_port *pp = netdev_priv(dev);
1478 	struct mvneta_tx_queue *txq = &pp->txqs[txq_def];
1479 	struct mvneta_tx_desc *tx_desc;
1480 	struct netdev_queue *nq;
1481 	int frags = 0;
1482 	u32 tx_cmd;
1483 
1484 	if (!netif_running(dev))
1485 		goto out;
1486 
1487 	frags = skb_shinfo(skb)->nr_frags + 1;
1488 	nq    = netdev_get_tx_queue(dev, txq_def);
1489 
1490 	/* Get a descriptor for the first part of the packet */
1491 	tx_desc = mvneta_txq_next_desc_get(txq);
1492 
1493 	tx_cmd = mvneta_skb_tx_csum(pp, skb);
1494 
1495 	tx_desc->data_size = skb_headlen(skb);
1496 
1497 	tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
1498 						tx_desc->data_size,
1499 						DMA_TO_DEVICE);
1500 	if (unlikely(dma_mapping_error(dev->dev.parent,
1501 				       tx_desc->buf_phys_addr))) {
1502 		mvneta_txq_desc_put(txq);
1503 		frags = 0;
1504 		goto out;
1505 	}
1506 
1507 	if (frags == 1) {
1508 		/* First and Last descriptor */
1509 		tx_cmd |= MVNETA_TXD_FLZ_DESC;
1510 		tx_desc->command = tx_cmd;
1511 		txq->tx_skb[txq->txq_put_index] = skb;
1512 		mvneta_txq_inc_put(txq);
1513 	} else {
1514 		/* First but not Last */
1515 		tx_cmd |= MVNETA_TXD_F_DESC;
1516 		txq->tx_skb[txq->txq_put_index] = NULL;
1517 		mvneta_txq_inc_put(txq);
1518 		tx_desc->command = tx_cmd;
1519 		/* Continue with other skb fragments */
1520 		if (mvneta_tx_frag_process(pp, skb, txq)) {
1521 			dma_unmap_single(dev->dev.parent,
1522 					 tx_desc->buf_phys_addr,
1523 					 tx_desc->data_size,
1524 					 DMA_TO_DEVICE);
1525 			mvneta_txq_desc_put(txq);
1526 			frags = 0;
1527 			goto out;
1528 		}
1529 	}
1530 
1531 	txq->count += frags;
1532 	mvneta_txq_pend_desc_add(pp, txq, frags);
1533 
1534 	if (txq->size - txq->count < MAX_SKB_FRAGS + 1)
1535 		netif_tx_stop_queue(nq);
1536 
1537 out:
1538 	if (frags > 0) {
1539 		u64_stats_update_begin(&pp->tx_stats.syncp);
1540 		pp->tx_stats.packets++;
1541 		pp->tx_stats.bytes += skb->len;
1542 		u64_stats_update_end(&pp->tx_stats.syncp);
1543 
1544 	} else {
1545 		dev->stats.tx_dropped++;
1546 		dev_kfree_skb_any(skb);
1547 	}
1548 
1549 	if (txq->count >= MVNETA_TXDONE_COAL_PKTS)
1550 		mvneta_txq_done(pp, txq);
1551 
1552 	/* If after calling mvneta_txq_done, count equals
1553 	 * frags, we need to set the timer
1554 	 */
1555 	if (txq->count == frags && frags > 0)
1556 		mvneta_add_tx_done_timer(pp);
1557 
1558 	return NETDEV_TX_OK;
1559 }
1560 
1561 
1562 /* Free tx resources, when resetting a port */
1563 static void mvneta_txq_done_force(struct mvneta_port *pp,
1564 				  struct mvneta_tx_queue *txq)
1565 
1566 {
1567 	int tx_done = txq->count;
1568 
1569 	mvneta_txq_bufs_free(pp, txq, tx_done);
1570 
1571 	/* reset txq */
1572 	txq->count = 0;
1573 	txq->txq_put_index = 0;
1574 	txq->txq_get_index = 0;
1575 }
1576 
1577 /* handle tx done - called from tx done timer callback */
1578 static u32 mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done,
1579 			      int *tx_todo)
1580 {
1581 	struct mvneta_tx_queue *txq;
1582 	u32 tx_done = 0;
1583 	struct netdev_queue *nq;
1584 
1585 	*tx_todo = 0;
1586 	while (cause_tx_done != 0) {
1587 		txq = mvneta_tx_done_policy(pp, cause_tx_done);
1588 		if (!txq)
1589 			break;
1590 
1591 		nq = netdev_get_tx_queue(pp->dev, txq->id);
1592 		__netif_tx_lock(nq, smp_processor_id());
1593 
1594 		if (txq->count) {
1595 			tx_done += mvneta_txq_done(pp, txq);
1596 			*tx_todo += txq->count;
1597 		}
1598 
1599 		__netif_tx_unlock(nq);
1600 		cause_tx_done &= ~((1 << txq->id));
1601 	}
1602 
1603 	return tx_done;
1604 }
1605 
1606 /* Compute crc8 of the specified address, using a unique algorithm ,
1607  * according to hw spec, different than generic crc8 algorithm
1608  */
1609 static int mvneta_addr_crc(unsigned char *addr)
1610 {
1611 	int crc = 0;
1612 	int i;
1613 
1614 	for (i = 0; i < ETH_ALEN; i++) {
1615 		int j;
1616 
1617 		crc = (crc ^ addr[i]) << 8;
1618 		for (j = 7; j >= 0; j--) {
1619 			if (crc & (0x100 << j))
1620 				crc ^= 0x107 << j;
1621 		}
1622 	}
1623 
1624 	return crc;
1625 }
1626 
1627 /* This method controls the net device special MAC multicast support.
1628  * The Special Multicast Table for MAC addresses supports MAC of the form
1629  * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
1630  * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1631  * Table entries in the DA-Filter table. This method set the Special
1632  * Multicast Table appropriate entry.
1633  */
1634 static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
1635 					  unsigned char last_byte,
1636 					  int queue)
1637 {
1638 	unsigned int smc_table_reg;
1639 	unsigned int tbl_offset;
1640 	unsigned int reg_offset;
1641 
1642 	/* Register offset from SMC table base    */
1643 	tbl_offset = (last_byte / 4);
1644 	/* Entry offset within the above reg */
1645 	reg_offset = last_byte % 4;
1646 
1647 	smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
1648 					+ tbl_offset * 4));
1649 
1650 	if (queue == -1)
1651 		smc_table_reg &= ~(0xff << (8 * reg_offset));
1652 	else {
1653 		smc_table_reg &= ~(0xff << (8 * reg_offset));
1654 		smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1655 	}
1656 
1657 	mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
1658 		    smc_table_reg);
1659 }
1660 
1661 /* This method controls the network device Other MAC multicast support.
1662  * The Other Multicast Table is used for multicast of another type.
1663  * A CRC-8 is used as an index to the Other Multicast Table entries
1664  * in the DA-Filter table.
1665  * The method gets the CRC-8 value from the calling routine and
1666  * sets the Other Multicast Table appropriate entry according to the
1667  * specified CRC-8 .
1668  */
1669 static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
1670 					unsigned char crc8,
1671 					int queue)
1672 {
1673 	unsigned int omc_table_reg;
1674 	unsigned int tbl_offset;
1675 	unsigned int reg_offset;
1676 
1677 	tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
1678 	reg_offset = crc8 % 4;	     /* Entry offset within the above reg   */
1679 
1680 	omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
1681 
1682 	if (queue == -1) {
1683 		/* Clear accepts frame bit at specified Other DA table entry */
1684 		omc_table_reg &= ~(0xff << (8 * reg_offset));
1685 	} else {
1686 		omc_table_reg &= ~(0xff << (8 * reg_offset));
1687 		omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1688 	}
1689 
1690 	mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
1691 }
1692 
1693 /* The network device supports multicast using two tables:
1694  *    1) Special Multicast Table for MAC addresses of the form
1695  *       0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
1696  *       The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1697  *       Table entries in the DA-Filter table.
1698  *    2) Other Multicast Table for multicast of another type. A CRC-8 value
1699  *       is used as an index to the Other Multicast Table entries in the
1700  *       DA-Filter table.
1701  */
1702 static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
1703 				 int queue)
1704 {
1705 	unsigned char crc_result = 0;
1706 
1707 	if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
1708 		mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
1709 		return 0;
1710 	}
1711 
1712 	crc_result = mvneta_addr_crc(p_addr);
1713 	if (queue == -1) {
1714 		if (pp->mcast_count[crc_result] == 0) {
1715 			netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
1716 				    crc_result);
1717 			return -EINVAL;
1718 		}
1719 
1720 		pp->mcast_count[crc_result]--;
1721 		if (pp->mcast_count[crc_result] != 0) {
1722 			netdev_info(pp->dev,
1723 				    "After delete there are %d valid Mcast for crc8=0x%02x\n",
1724 				    pp->mcast_count[crc_result], crc_result);
1725 			return -EINVAL;
1726 		}
1727 	} else
1728 		pp->mcast_count[crc_result]++;
1729 
1730 	mvneta_set_other_mcast_addr(pp, crc_result, queue);
1731 
1732 	return 0;
1733 }
1734 
1735 /* Configure Fitering mode of Ethernet port */
1736 static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
1737 					  int is_promisc)
1738 {
1739 	u32 port_cfg_reg, val;
1740 
1741 	port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
1742 
1743 	val = mvreg_read(pp, MVNETA_TYPE_PRIO);
1744 
1745 	/* Set / Clear UPM bit in port configuration register */
1746 	if (is_promisc) {
1747 		/* Accept all Unicast addresses */
1748 		port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
1749 		val |= MVNETA_FORCE_UNI;
1750 		mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
1751 		mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
1752 	} else {
1753 		/* Reject all Unicast addresses */
1754 		port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
1755 		val &= ~MVNETA_FORCE_UNI;
1756 	}
1757 
1758 	mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
1759 	mvreg_write(pp, MVNETA_TYPE_PRIO, val);
1760 }
1761 
1762 /* register unicast and multicast addresses */
1763 static void mvneta_set_rx_mode(struct net_device *dev)
1764 {
1765 	struct mvneta_port *pp = netdev_priv(dev);
1766 	struct netdev_hw_addr *ha;
1767 
1768 	if (dev->flags & IFF_PROMISC) {
1769 		/* Accept all: Multicast + Unicast */
1770 		mvneta_rx_unicast_promisc_set(pp, 1);
1771 		mvneta_set_ucast_table(pp, rxq_def);
1772 		mvneta_set_special_mcast_table(pp, rxq_def);
1773 		mvneta_set_other_mcast_table(pp, rxq_def);
1774 	} else {
1775 		/* Accept single Unicast */
1776 		mvneta_rx_unicast_promisc_set(pp, 0);
1777 		mvneta_set_ucast_table(pp, -1);
1778 		mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
1779 
1780 		if (dev->flags & IFF_ALLMULTI) {
1781 			/* Accept all multicast */
1782 			mvneta_set_special_mcast_table(pp, rxq_def);
1783 			mvneta_set_other_mcast_table(pp, rxq_def);
1784 		} else {
1785 			/* Accept only initialized multicast */
1786 			mvneta_set_special_mcast_table(pp, -1);
1787 			mvneta_set_other_mcast_table(pp, -1);
1788 
1789 			if (!netdev_mc_empty(dev)) {
1790 				netdev_for_each_mc_addr(ha, dev) {
1791 					mvneta_mcast_addr_set(pp, ha->addr,
1792 							      rxq_def);
1793 				}
1794 			}
1795 		}
1796 	}
1797 }
1798 
1799 /* Interrupt handling - the callback for request_irq() */
1800 static irqreturn_t mvneta_isr(int irq, void *dev_id)
1801 {
1802 	struct mvneta_port *pp = (struct mvneta_port *)dev_id;
1803 
1804 	/* Mask all interrupts */
1805 	mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1806 
1807 	napi_schedule(&pp->napi);
1808 
1809 	return IRQ_HANDLED;
1810 }
1811 
1812 /* NAPI handler
1813  * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
1814  * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
1815  * Bits 8 -15 of the cause Rx Tx register indicate that are received
1816  * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
1817  * Each CPU has its own causeRxTx register
1818  */
1819 static int mvneta_poll(struct napi_struct *napi, int budget)
1820 {
1821 	int rx_done = 0;
1822 	u32 cause_rx_tx;
1823 	unsigned long flags;
1824 	struct mvneta_port *pp = netdev_priv(napi->dev);
1825 
1826 	if (!netif_running(pp->dev)) {
1827 		napi_complete(napi);
1828 		return rx_done;
1829 	}
1830 
1831 	/* Read cause register */
1832 	cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) &
1833 		MVNETA_RX_INTR_MASK(rxq_number);
1834 
1835 	/* For the case where the last mvneta_poll did not process all
1836 	 * RX packets
1837 	 */
1838 	cause_rx_tx |= pp->cause_rx_tx;
1839 	if (rxq_number > 1) {
1840 		while ((cause_rx_tx != 0) && (budget > 0)) {
1841 			int count;
1842 			struct mvneta_rx_queue *rxq;
1843 			/* get rx queue number from cause_rx_tx */
1844 			rxq = mvneta_rx_policy(pp, cause_rx_tx);
1845 			if (!rxq)
1846 				break;
1847 
1848 			/* process the packet in that rx queue */
1849 			count = mvneta_rx(pp, budget, rxq);
1850 			rx_done += count;
1851 			budget -= count;
1852 			if (budget > 0) {
1853 				/* set off the rx bit of the
1854 				 * corresponding bit in the cause rx
1855 				 * tx register, so that next iteration
1856 				 * will find the next rx queue where
1857 				 * packets are received on
1858 				 */
1859 				cause_rx_tx &= ~((1 << rxq->id) << 8);
1860 			}
1861 		}
1862 	} else {
1863 		rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
1864 		budget -= rx_done;
1865 	}
1866 
1867 	if (budget > 0) {
1868 		cause_rx_tx = 0;
1869 		napi_complete(napi);
1870 		local_irq_save(flags);
1871 		mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1872 			    MVNETA_RX_INTR_MASK(rxq_number));
1873 		local_irq_restore(flags);
1874 	}
1875 
1876 	pp->cause_rx_tx = cause_rx_tx;
1877 	return rx_done;
1878 }
1879 
1880 /* tx done timer callback */
1881 static void mvneta_tx_done_timer_callback(unsigned long data)
1882 {
1883 	struct net_device *dev = (struct net_device *)data;
1884 	struct mvneta_port *pp = netdev_priv(dev);
1885 	int tx_done = 0, tx_todo = 0;
1886 
1887 	if (!netif_running(dev))
1888 		return ;
1889 
1890 	clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
1891 
1892 	tx_done = mvneta_tx_done_gbe(pp,
1893 				     (((1 << txq_number) - 1) &
1894 				      MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK),
1895 				     &tx_todo);
1896 	if (tx_todo > 0)
1897 		mvneta_add_tx_done_timer(pp);
1898 }
1899 
1900 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
1901 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
1902 			   int num)
1903 {
1904 	struct net_device *dev = pp->dev;
1905 	int i;
1906 
1907 	for (i = 0; i < num; i++) {
1908 		struct sk_buff *skb;
1909 		struct mvneta_rx_desc *rx_desc;
1910 		unsigned long phys_addr;
1911 
1912 		skb = dev_alloc_skb(pp->pkt_size);
1913 		if (!skb) {
1914 			netdev_err(dev, "%s:rxq %d, %d of %d buffs  filled\n",
1915 				__func__, rxq->id, i, num);
1916 			break;
1917 		}
1918 
1919 		rx_desc = rxq->descs + i;
1920 		memset(rx_desc, 0, sizeof(struct mvneta_rx_desc));
1921 		phys_addr = dma_map_single(dev->dev.parent, skb->head,
1922 					   MVNETA_RX_BUF_SIZE(pp->pkt_size),
1923 					   DMA_FROM_DEVICE);
1924 		if (unlikely(dma_mapping_error(dev->dev.parent, phys_addr))) {
1925 			dev_kfree_skb(skb);
1926 			break;
1927 		}
1928 
1929 		mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb);
1930 	}
1931 
1932 	/* Add this number of RX descriptors as non occupied (ready to
1933 	 * get packets)
1934 	 */
1935 	mvneta_rxq_non_occup_desc_add(pp, rxq, i);
1936 
1937 	return i;
1938 }
1939 
1940 /* Free all packets pending transmit from all TXQs and reset TX port */
1941 static void mvneta_tx_reset(struct mvneta_port *pp)
1942 {
1943 	int queue;
1944 
1945 	/* free the skb's in the hal tx ring */
1946 	for (queue = 0; queue < txq_number; queue++)
1947 		mvneta_txq_done_force(pp, &pp->txqs[queue]);
1948 
1949 	mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
1950 	mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
1951 }
1952 
1953 static void mvneta_rx_reset(struct mvneta_port *pp)
1954 {
1955 	mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
1956 	mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
1957 }
1958 
1959 /* Rx/Tx queue initialization/cleanup methods */
1960 
1961 /* Create a specified RX queue */
1962 static int mvneta_rxq_init(struct mvneta_port *pp,
1963 			   struct mvneta_rx_queue *rxq)
1964 
1965 {
1966 	rxq->size = pp->rx_ring_size;
1967 
1968 	/* Allocate memory for RX descriptors */
1969 	rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
1970 					rxq->size * MVNETA_DESC_ALIGNED_SIZE,
1971 					&rxq->descs_phys, GFP_KERNEL);
1972 	if (rxq->descs == NULL) {
1973 		netdev_err(pp->dev,
1974 			   "rxq=%d: Can't allocate %d bytes for %d RX descr\n",
1975 			   rxq->id, rxq->size * MVNETA_DESC_ALIGNED_SIZE,
1976 			   rxq->size);
1977 		return -ENOMEM;
1978 	}
1979 
1980 	BUG_ON(rxq->descs !=
1981 	       PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
1982 
1983 	rxq->last_desc = rxq->size - 1;
1984 
1985 	/* Set Rx descriptors queue starting address */
1986 	mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
1987 	mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
1988 
1989 	/* Set Offset */
1990 	mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD);
1991 
1992 	/* Set coalescing pkts and time */
1993 	mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
1994 	mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
1995 
1996 	/* Fill RXQ with buffers from RX pool */
1997 	mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size));
1998 	mvneta_rxq_bm_disable(pp, rxq);
1999 	mvneta_rxq_fill(pp, rxq, rxq->size);
2000 
2001 	return 0;
2002 }
2003 
2004 /* Cleanup Rx queue */
2005 static void mvneta_rxq_deinit(struct mvneta_port *pp,
2006 			      struct mvneta_rx_queue *rxq)
2007 {
2008 	mvneta_rxq_drop_pkts(pp, rxq);
2009 
2010 	if (rxq->descs)
2011 		dma_free_coherent(pp->dev->dev.parent,
2012 				  rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2013 				  rxq->descs,
2014 				  rxq->descs_phys);
2015 
2016 	rxq->descs             = NULL;
2017 	rxq->last_desc         = 0;
2018 	rxq->next_desc_to_proc = 0;
2019 	rxq->descs_phys        = 0;
2020 }
2021 
2022 /* Create and initialize a tx queue */
2023 static int mvneta_txq_init(struct mvneta_port *pp,
2024 			   struct mvneta_tx_queue *txq)
2025 {
2026 	txq->size = pp->tx_ring_size;
2027 
2028 	/* Allocate memory for TX descriptors */
2029 	txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2030 					txq->size * MVNETA_DESC_ALIGNED_SIZE,
2031 					&txq->descs_phys, GFP_KERNEL);
2032 	if (txq->descs == NULL) {
2033 		netdev_err(pp->dev,
2034 			   "txQ=%d: Can't allocate %d bytes for %d TX descr\n",
2035 			   txq->id, txq->size * MVNETA_DESC_ALIGNED_SIZE,
2036 			   txq->size);
2037 		return -ENOMEM;
2038 	}
2039 
2040 	/* Make sure descriptor address is cache line size aligned  */
2041 	BUG_ON(txq->descs !=
2042 	       PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2043 
2044 	txq->last_desc = txq->size - 1;
2045 
2046 	/* Set maximum bandwidth for enabled TXQs */
2047 	mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
2048 	mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
2049 
2050 	/* Set Tx descriptors queue starting address */
2051 	mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
2052 	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
2053 
2054 	txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL);
2055 	if (txq->tx_skb == NULL) {
2056 		dma_free_coherent(pp->dev->dev.parent,
2057 				  txq->size * MVNETA_DESC_ALIGNED_SIZE,
2058 				  txq->descs, txq->descs_phys);
2059 		return -ENOMEM;
2060 	}
2061 	mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2062 
2063 	return 0;
2064 }
2065 
2066 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
2067 static void mvneta_txq_deinit(struct mvneta_port *pp,
2068 			      struct mvneta_tx_queue *txq)
2069 {
2070 	kfree(txq->tx_skb);
2071 
2072 	if (txq->descs)
2073 		dma_free_coherent(pp->dev->dev.parent,
2074 				  txq->size * MVNETA_DESC_ALIGNED_SIZE,
2075 				  txq->descs, txq->descs_phys);
2076 
2077 	txq->descs             = NULL;
2078 	txq->last_desc         = 0;
2079 	txq->next_desc_to_proc = 0;
2080 	txq->descs_phys        = 0;
2081 
2082 	/* Set minimum bandwidth for disabled TXQs */
2083 	mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
2084 	mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
2085 
2086 	/* Set Tx descriptors queue starting address and size */
2087 	mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
2088 	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
2089 }
2090 
2091 /* Cleanup all Tx queues */
2092 static void mvneta_cleanup_txqs(struct mvneta_port *pp)
2093 {
2094 	int queue;
2095 
2096 	for (queue = 0; queue < txq_number; queue++)
2097 		mvneta_txq_deinit(pp, &pp->txqs[queue]);
2098 }
2099 
2100 /* Cleanup all Rx queues */
2101 static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
2102 {
2103 	int queue;
2104 
2105 	for (queue = 0; queue < rxq_number; queue++)
2106 		mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
2107 }
2108 
2109 
2110 /* Init all Rx queues */
2111 static int mvneta_setup_rxqs(struct mvneta_port *pp)
2112 {
2113 	int queue;
2114 
2115 	for (queue = 0; queue < rxq_number; queue++) {
2116 		int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
2117 		if (err) {
2118 			netdev_err(pp->dev, "%s: can't create rxq=%d\n",
2119 				   __func__, queue);
2120 			mvneta_cleanup_rxqs(pp);
2121 			return err;
2122 		}
2123 	}
2124 
2125 	return 0;
2126 }
2127 
2128 /* Init all tx queues */
2129 static int mvneta_setup_txqs(struct mvneta_port *pp)
2130 {
2131 	int queue;
2132 
2133 	for (queue = 0; queue < txq_number; queue++) {
2134 		int err = mvneta_txq_init(pp, &pp->txqs[queue]);
2135 		if (err) {
2136 			netdev_err(pp->dev, "%s: can't create txq=%d\n",
2137 				   __func__, queue);
2138 			mvneta_cleanup_txqs(pp);
2139 			return err;
2140 		}
2141 	}
2142 
2143 	return 0;
2144 }
2145 
2146 static void mvneta_start_dev(struct mvneta_port *pp)
2147 {
2148 	mvneta_max_rx_size_set(pp, pp->pkt_size);
2149 	mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
2150 
2151 	/* start the Rx/Tx activity */
2152 	mvneta_port_enable(pp);
2153 
2154 	/* Enable polling on the port */
2155 	napi_enable(&pp->napi);
2156 
2157 	/* Unmask interrupts */
2158 	mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2159 		    MVNETA_RX_INTR_MASK(rxq_number));
2160 
2161 	phy_start(pp->phy_dev);
2162 	netif_tx_start_all_queues(pp->dev);
2163 }
2164 
2165 static void mvneta_stop_dev(struct mvneta_port *pp)
2166 {
2167 	phy_stop(pp->phy_dev);
2168 
2169 	napi_disable(&pp->napi);
2170 
2171 	netif_carrier_off(pp->dev);
2172 
2173 	mvneta_port_down(pp);
2174 	netif_tx_stop_all_queues(pp->dev);
2175 
2176 	/* Stop the port activity */
2177 	mvneta_port_disable(pp);
2178 
2179 	/* Clear all ethernet port interrupts */
2180 	mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2181 	mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
2182 
2183 	/* Mask all ethernet port interrupts */
2184 	mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2185 	mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2186 	mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2187 
2188 	mvneta_tx_reset(pp);
2189 	mvneta_rx_reset(pp);
2190 }
2191 
2192 /* tx timeout callback - display a message and stop/start the network device */
2193 static void mvneta_tx_timeout(struct net_device *dev)
2194 {
2195 	struct mvneta_port *pp = netdev_priv(dev);
2196 
2197 	netdev_info(dev, "tx timeout\n");
2198 	mvneta_stop_dev(pp);
2199 	mvneta_start_dev(pp);
2200 }
2201 
2202 /* Return positive if MTU is valid */
2203 static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
2204 {
2205 	if (mtu < 68) {
2206 		netdev_err(dev, "cannot change mtu to less than 68\n");
2207 		return -EINVAL;
2208 	}
2209 
2210 	/* 9676 == 9700 - 20 and rounding to 8 */
2211 	if (mtu > 9676) {
2212 		netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu);
2213 		mtu = 9676;
2214 	}
2215 
2216 	if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
2217 		netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
2218 			mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
2219 		mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
2220 	}
2221 
2222 	return mtu;
2223 }
2224 
2225 /* Change the device mtu */
2226 static int mvneta_change_mtu(struct net_device *dev, int mtu)
2227 {
2228 	struct mvneta_port *pp = netdev_priv(dev);
2229 	int ret;
2230 
2231 	mtu = mvneta_check_mtu_valid(dev, mtu);
2232 	if (mtu < 0)
2233 		return -EINVAL;
2234 
2235 	dev->mtu = mtu;
2236 
2237 	if (!netif_running(dev))
2238 		return 0;
2239 
2240 	/* The interface is running, so we have to force a
2241 	 * reallocation of the RXQs
2242 	 */
2243 	mvneta_stop_dev(pp);
2244 
2245 	mvneta_cleanup_txqs(pp);
2246 	mvneta_cleanup_rxqs(pp);
2247 
2248 	pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
2249 
2250 	ret = mvneta_setup_rxqs(pp);
2251 	if (ret) {
2252 		netdev_err(pp->dev, "unable to setup rxqs after MTU change\n");
2253 		return ret;
2254 	}
2255 
2256 	mvneta_setup_txqs(pp);
2257 
2258 	mvneta_start_dev(pp);
2259 	mvneta_port_up(pp);
2260 
2261 	return 0;
2262 }
2263 
2264 /* Handle setting mac address */
2265 static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
2266 {
2267 	struct mvneta_port *pp = netdev_priv(dev);
2268 	u8 *mac = addr + 2;
2269 	int i;
2270 
2271 	if (netif_running(dev))
2272 		return -EBUSY;
2273 
2274 	/* Remove previous address table entry */
2275 	mvneta_mac_addr_set(pp, dev->dev_addr, -1);
2276 
2277 	/* Set new addr in hw */
2278 	mvneta_mac_addr_set(pp, mac, rxq_def);
2279 
2280 	/* Set addr in the device */
2281 	for (i = 0; i < ETH_ALEN; i++)
2282 		dev->dev_addr[i] = mac[i];
2283 
2284 	return 0;
2285 }
2286 
2287 static void mvneta_adjust_link(struct net_device *ndev)
2288 {
2289 	struct mvneta_port *pp = netdev_priv(ndev);
2290 	struct phy_device *phydev = pp->phy_dev;
2291 	int status_change = 0;
2292 
2293 	if (phydev->link) {
2294 		if ((pp->speed != phydev->speed) ||
2295 		    (pp->duplex != phydev->duplex)) {
2296 			u32 val;
2297 
2298 			val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2299 			val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
2300 				 MVNETA_GMAC_CONFIG_GMII_SPEED |
2301 				 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
2302 
2303 			if (phydev->duplex)
2304 				val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
2305 
2306 			if (phydev->speed == SPEED_1000)
2307 				val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
2308 			else
2309 				val |= MVNETA_GMAC_CONFIG_MII_SPEED;
2310 
2311 			mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
2312 
2313 			pp->duplex = phydev->duplex;
2314 			pp->speed  = phydev->speed;
2315 		}
2316 	}
2317 
2318 	if (phydev->link != pp->link) {
2319 		if (!phydev->link) {
2320 			pp->duplex = -1;
2321 			pp->speed = 0;
2322 		}
2323 
2324 		pp->link = phydev->link;
2325 		status_change = 1;
2326 	}
2327 
2328 	if (status_change) {
2329 		if (phydev->link) {
2330 			u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2331 			val |= (MVNETA_GMAC_FORCE_LINK_PASS |
2332 				MVNETA_GMAC_FORCE_LINK_DOWN);
2333 			mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
2334 			mvneta_port_up(pp);
2335 			netdev_info(pp->dev, "link up\n");
2336 		} else {
2337 			mvneta_port_down(pp);
2338 			netdev_info(pp->dev, "link down\n");
2339 		}
2340 	}
2341 }
2342 
2343 static int mvneta_mdio_probe(struct mvneta_port *pp)
2344 {
2345 	struct phy_device *phy_dev;
2346 
2347 	phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
2348 				 pp->phy_interface);
2349 	if (!phy_dev) {
2350 		netdev_err(pp->dev, "could not find the PHY\n");
2351 		return -ENODEV;
2352 	}
2353 
2354 	phy_dev->supported &= PHY_GBIT_FEATURES;
2355 	phy_dev->advertising = phy_dev->supported;
2356 
2357 	pp->phy_dev = phy_dev;
2358 	pp->link    = 0;
2359 	pp->duplex  = 0;
2360 	pp->speed   = 0;
2361 
2362 	return 0;
2363 }
2364 
2365 static void mvneta_mdio_remove(struct mvneta_port *pp)
2366 {
2367 	phy_disconnect(pp->phy_dev);
2368 	pp->phy_dev = NULL;
2369 }
2370 
2371 static int mvneta_open(struct net_device *dev)
2372 {
2373 	struct mvneta_port *pp = netdev_priv(dev);
2374 	int ret;
2375 
2376 	mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
2377 
2378 	pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
2379 
2380 	ret = mvneta_setup_rxqs(pp);
2381 	if (ret)
2382 		return ret;
2383 
2384 	ret = mvneta_setup_txqs(pp);
2385 	if (ret)
2386 		goto err_cleanup_rxqs;
2387 
2388 	/* Connect to port interrupt line */
2389 	ret = request_irq(pp->dev->irq, mvneta_isr, 0,
2390 			  MVNETA_DRIVER_NAME, pp);
2391 	if (ret) {
2392 		netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
2393 		goto err_cleanup_txqs;
2394 	}
2395 
2396 	/* In default link is down */
2397 	netif_carrier_off(pp->dev);
2398 
2399 	ret = mvneta_mdio_probe(pp);
2400 	if (ret < 0) {
2401 		netdev_err(dev, "cannot probe MDIO bus\n");
2402 		goto err_free_irq;
2403 	}
2404 
2405 	mvneta_start_dev(pp);
2406 
2407 	return 0;
2408 
2409 err_free_irq:
2410 	free_irq(pp->dev->irq, pp);
2411 err_cleanup_txqs:
2412 	mvneta_cleanup_txqs(pp);
2413 err_cleanup_rxqs:
2414 	mvneta_cleanup_rxqs(pp);
2415 	return ret;
2416 }
2417 
2418 /* Stop the port, free port interrupt line */
2419 static int mvneta_stop(struct net_device *dev)
2420 {
2421 	struct mvneta_port *pp = netdev_priv(dev);
2422 
2423 	mvneta_stop_dev(pp);
2424 	mvneta_mdio_remove(pp);
2425 	free_irq(dev->irq, pp);
2426 	mvneta_cleanup_rxqs(pp);
2427 	mvneta_cleanup_txqs(pp);
2428 	del_timer(&pp->tx_done_timer);
2429 	clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
2430 
2431 	return 0;
2432 }
2433 
2434 /* Ethtool methods */
2435 
2436 /* Get settings (phy address, speed) for ethtools */
2437 int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2438 {
2439 	struct mvneta_port *pp = netdev_priv(dev);
2440 
2441 	if (!pp->phy_dev)
2442 		return -ENODEV;
2443 
2444 	return phy_ethtool_gset(pp->phy_dev, cmd);
2445 }
2446 
2447 /* Set settings (phy address, speed) for ethtools */
2448 int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2449 {
2450 	struct mvneta_port *pp = netdev_priv(dev);
2451 
2452 	if (!pp->phy_dev)
2453 		return -ENODEV;
2454 
2455 	return phy_ethtool_sset(pp->phy_dev, cmd);
2456 }
2457 
2458 /* Set interrupt coalescing for ethtools */
2459 static int mvneta_ethtool_set_coalesce(struct net_device *dev,
2460 				       struct ethtool_coalesce *c)
2461 {
2462 	struct mvneta_port *pp = netdev_priv(dev);
2463 	int queue;
2464 
2465 	for (queue = 0; queue < rxq_number; queue++) {
2466 		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
2467 		rxq->time_coal = c->rx_coalesce_usecs;
2468 		rxq->pkts_coal = c->rx_max_coalesced_frames;
2469 		mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2470 		mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2471 	}
2472 
2473 	for (queue = 0; queue < txq_number; queue++) {
2474 		struct mvneta_tx_queue *txq = &pp->txqs[queue];
2475 		txq->done_pkts_coal = c->tx_max_coalesced_frames;
2476 		mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2477 	}
2478 
2479 	return 0;
2480 }
2481 
2482 /* get coalescing for ethtools */
2483 static int mvneta_ethtool_get_coalesce(struct net_device *dev,
2484 				       struct ethtool_coalesce *c)
2485 {
2486 	struct mvneta_port *pp = netdev_priv(dev);
2487 
2488 	c->rx_coalesce_usecs        = pp->rxqs[0].time_coal;
2489 	c->rx_max_coalesced_frames  = pp->rxqs[0].pkts_coal;
2490 
2491 	c->tx_max_coalesced_frames =  pp->txqs[0].done_pkts_coal;
2492 	return 0;
2493 }
2494 
2495 
2496 static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
2497 				    struct ethtool_drvinfo *drvinfo)
2498 {
2499 	strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
2500 		sizeof(drvinfo->driver));
2501 	strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
2502 		sizeof(drvinfo->version));
2503 	strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
2504 		sizeof(drvinfo->bus_info));
2505 }
2506 
2507 
2508 static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
2509 					 struct ethtool_ringparam *ring)
2510 {
2511 	struct mvneta_port *pp = netdev_priv(netdev);
2512 
2513 	ring->rx_max_pending = MVNETA_MAX_RXD;
2514 	ring->tx_max_pending = MVNETA_MAX_TXD;
2515 	ring->rx_pending = pp->rx_ring_size;
2516 	ring->tx_pending = pp->tx_ring_size;
2517 }
2518 
2519 static int mvneta_ethtool_set_ringparam(struct net_device *dev,
2520 					struct ethtool_ringparam *ring)
2521 {
2522 	struct mvneta_port *pp = netdev_priv(dev);
2523 
2524 	if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
2525 		return -EINVAL;
2526 	pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
2527 		ring->rx_pending : MVNETA_MAX_RXD;
2528 	pp->tx_ring_size = ring->tx_pending < MVNETA_MAX_TXD ?
2529 		ring->tx_pending : MVNETA_MAX_TXD;
2530 
2531 	if (netif_running(dev)) {
2532 		mvneta_stop(dev);
2533 		if (mvneta_open(dev)) {
2534 			netdev_err(dev,
2535 				   "error on opening device after ring param change\n");
2536 			return -ENOMEM;
2537 		}
2538 	}
2539 
2540 	return 0;
2541 }
2542 
2543 static const struct net_device_ops mvneta_netdev_ops = {
2544 	.ndo_open            = mvneta_open,
2545 	.ndo_stop            = mvneta_stop,
2546 	.ndo_start_xmit      = mvneta_tx,
2547 	.ndo_set_rx_mode     = mvneta_set_rx_mode,
2548 	.ndo_set_mac_address = mvneta_set_mac_addr,
2549 	.ndo_change_mtu      = mvneta_change_mtu,
2550 	.ndo_tx_timeout      = mvneta_tx_timeout,
2551 	.ndo_get_stats64     = mvneta_get_stats64,
2552 };
2553 
2554 const struct ethtool_ops mvneta_eth_tool_ops = {
2555 	.get_link       = ethtool_op_get_link,
2556 	.get_settings   = mvneta_ethtool_get_settings,
2557 	.set_settings   = mvneta_ethtool_set_settings,
2558 	.set_coalesce   = mvneta_ethtool_set_coalesce,
2559 	.get_coalesce   = mvneta_ethtool_get_coalesce,
2560 	.get_drvinfo    = mvneta_ethtool_get_drvinfo,
2561 	.get_ringparam  = mvneta_ethtool_get_ringparam,
2562 	.set_ringparam	= mvneta_ethtool_set_ringparam,
2563 };
2564 
2565 /* Initialize hw */
2566 static int mvneta_init(struct mvneta_port *pp, int phy_addr)
2567 {
2568 	int queue;
2569 
2570 	/* Disable port */
2571 	mvneta_port_disable(pp);
2572 
2573 	/* Set port default values */
2574 	mvneta_defaults_set(pp);
2575 
2576 	pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue),
2577 			   GFP_KERNEL);
2578 	if (!pp->txqs)
2579 		return -ENOMEM;
2580 
2581 	/* Initialize TX descriptor rings */
2582 	for (queue = 0; queue < txq_number; queue++) {
2583 		struct mvneta_tx_queue *txq = &pp->txqs[queue];
2584 		txq->id = queue;
2585 		txq->size = pp->tx_ring_size;
2586 		txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
2587 	}
2588 
2589 	pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue),
2590 			   GFP_KERNEL);
2591 	if (!pp->rxqs) {
2592 		kfree(pp->txqs);
2593 		return -ENOMEM;
2594 	}
2595 
2596 	/* Create Rx descriptor rings */
2597 	for (queue = 0; queue < rxq_number; queue++) {
2598 		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
2599 		rxq->id = queue;
2600 		rxq->size = pp->rx_ring_size;
2601 		rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
2602 		rxq->time_coal = MVNETA_RX_COAL_USEC;
2603 	}
2604 
2605 	return 0;
2606 }
2607 
2608 static void mvneta_deinit(struct mvneta_port *pp)
2609 {
2610 	kfree(pp->txqs);
2611 	kfree(pp->rxqs);
2612 }
2613 
2614 /* platform glue : initialize decoding windows */
2615 static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
2616 				     const struct mbus_dram_target_info *dram)
2617 {
2618 	u32 win_enable;
2619 	u32 win_protect;
2620 	int i;
2621 
2622 	for (i = 0; i < 6; i++) {
2623 		mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
2624 		mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
2625 
2626 		if (i < 4)
2627 			mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
2628 	}
2629 
2630 	win_enable = 0x3f;
2631 	win_protect = 0;
2632 
2633 	for (i = 0; i < dram->num_cs; i++) {
2634 		const struct mbus_dram_window *cs = dram->cs + i;
2635 		mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) |
2636 			    (cs->mbus_attr << 8) | dram->mbus_dram_target_id);
2637 
2638 		mvreg_write(pp, MVNETA_WIN_SIZE(i),
2639 			    (cs->size - 1) & 0xffff0000);
2640 
2641 		win_enable &= ~(1 << i);
2642 		win_protect |= 3 << (2 * i);
2643 	}
2644 
2645 	mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
2646 }
2647 
2648 /* Power up the port */
2649 static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
2650 {
2651 	u32 val;
2652 
2653 	/* MAC Cause register should be cleared */
2654 	mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
2655 
2656 	if (phy_mode == PHY_INTERFACE_MODE_SGMII)
2657 		mvneta_port_sgmii_config(pp);
2658 
2659 	mvneta_gmac_rgmii_set(pp, 1);
2660 
2661 	/* Cancel Port Reset */
2662 	val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
2663 	val &= ~MVNETA_GMAC2_PORT_RESET;
2664 	mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
2665 
2666 	while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
2667 		MVNETA_GMAC2_PORT_RESET) != 0)
2668 		continue;
2669 }
2670 
2671 /* Device initialization routine */
2672 static int mvneta_probe(struct platform_device *pdev)
2673 {
2674 	const struct mbus_dram_target_info *dram_target_info;
2675 	struct device_node *dn = pdev->dev.of_node;
2676 	struct device_node *phy_node;
2677 	u32 phy_addr;
2678 	struct mvneta_port *pp;
2679 	struct net_device *dev;
2680 	const char *mac_addr;
2681 	int phy_mode;
2682 	int err;
2683 
2684 	/* Our multiqueue support is not complete, so for now, only
2685 	 * allow the usage of the first RX queue
2686 	 */
2687 	if (rxq_def != 0) {
2688 		dev_err(&pdev->dev, "Invalid rxq_def argument: %d\n", rxq_def);
2689 		return -EINVAL;
2690 	}
2691 
2692 	dev = alloc_etherdev_mq(sizeof(struct mvneta_port), 8);
2693 	if (!dev)
2694 		return -ENOMEM;
2695 
2696 	dev->irq = irq_of_parse_and_map(dn, 0);
2697 	if (dev->irq == 0) {
2698 		err = -EINVAL;
2699 		goto err_free_netdev;
2700 	}
2701 
2702 	phy_node = of_parse_phandle(dn, "phy", 0);
2703 	if (!phy_node) {
2704 		dev_err(&pdev->dev, "no associated PHY\n");
2705 		err = -ENODEV;
2706 		goto err_free_irq;
2707 	}
2708 
2709 	phy_mode = of_get_phy_mode(dn);
2710 	if (phy_mode < 0) {
2711 		dev_err(&pdev->dev, "incorrect phy-mode\n");
2712 		err = -EINVAL;
2713 		goto err_free_irq;
2714 	}
2715 
2716 	mac_addr = of_get_mac_address(dn);
2717 
2718 	if (!mac_addr || !is_valid_ether_addr(mac_addr))
2719 		eth_hw_addr_random(dev);
2720 	else
2721 		memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
2722 
2723 	dev->tx_queue_len = MVNETA_MAX_TXD;
2724 	dev->watchdog_timeo = 5 * HZ;
2725 	dev->netdev_ops = &mvneta_netdev_ops;
2726 
2727 	SET_ETHTOOL_OPS(dev, &mvneta_eth_tool_ops);
2728 
2729 	pp = netdev_priv(dev);
2730 
2731 	pp->tx_done_timer.function = mvneta_tx_done_timer_callback;
2732 	init_timer(&pp->tx_done_timer);
2733 	clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
2734 
2735 	pp->weight = MVNETA_RX_POLL_WEIGHT;
2736 	pp->phy_node = phy_node;
2737 	pp->phy_interface = phy_mode;
2738 
2739 	pp->base = of_iomap(dn, 0);
2740 	if (pp->base == NULL) {
2741 		err = -ENOMEM;
2742 		goto err_free_irq;
2743 	}
2744 
2745 	pp->clk = devm_clk_get(&pdev->dev, NULL);
2746 	if (IS_ERR(pp->clk)) {
2747 		err = PTR_ERR(pp->clk);
2748 		goto err_unmap;
2749 	}
2750 
2751 	clk_prepare_enable(pp->clk);
2752 
2753 	pp->tx_done_timer.data = (unsigned long)dev;
2754 
2755 	pp->tx_ring_size = MVNETA_MAX_TXD;
2756 	pp->rx_ring_size = MVNETA_MAX_RXD;
2757 
2758 	pp->dev = dev;
2759 	SET_NETDEV_DEV(dev, &pdev->dev);
2760 
2761 	err = mvneta_init(pp, phy_addr);
2762 	if (err < 0) {
2763 		dev_err(&pdev->dev, "can't init eth hal\n");
2764 		goto err_clk;
2765 	}
2766 	mvneta_port_power_up(pp, phy_mode);
2767 
2768 	dram_target_info = mv_mbus_dram_info();
2769 	if (dram_target_info)
2770 		mvneta_conf_mbus_windows(pp, dram_target_info);
2771 
2772 	netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight);
2773 
2774 	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
2775 	dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2776 	dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2777 	dev->priv_flags |= IFF_UNICAST_FLT;
2778 
2779 	err = register_netdev(dev);
2780 	if (err < 0) {
2781 		dev_err(&pdev->dev, "failed to register\n");
2782 		goto err_deinit;
2783 	}
2784 
2785 	netdev_info(dev, "mac: %pM\n", dev->dev_addr);
2786 
2787 	platform_set_drvdata(pdev, pp->dev);
2788 
2789 	return 0;
2790 
2791 err_deinit:
2792 	mvneta_deinit(pp);
2793 err_clk:
2794 	clk_disable_unprepare(pp->clk);
2795 err_unmap:
2796 	iounmap(pp->base);
2797 err_free_irq:
2798 	irq_dispose_mapping(dev->irq);
2799 err_free_netdev:
2800 	free_netdev(dev);
2801 	return err;
2802 }
2803 
2804 /* Device removal routine */
2805 static int mvneta_remove(struct platform_device *pdev)
2806 {
2807 	struct net_device  *dev = platform_get_drvdata(pdev);
2808 	struct mvneta_port *pp = netdev_priv(dev);
2809 
2810 	unregister_netdev(dev);
2811 	mvneta_deinit(pp);
2812 	clk_disable_unprepare(pp->clk);
2813 	iounmap(pp->base);
2814 	irq_dispose_mapping(dev->irq);
2815 	free_netdev(dev);
2816 
2817 	platform_set_drvdata(pdev, NULL);
2818 
2819 	return 0;
2820 }
2821 
2822 static const struct of_device_id mvneta_match[] = {
2823 	{ .compatible = "marvell,armada-370-neta" },
2824 	{ }
2825 };
2826 MODULE_DEVICE_TABLE(of, mvneta_match);
2827 
2828 static struct platform_driver mvneta_driver = {
2829 	.probe = mvneta_probe,
2830 	.remove = mvneta_remove,
2831 	.driver = {
2832 		.name = MVNETA_DRIVER_NAME,
2833 		.of_match_table = mvneta_match,
2834 	},
2835 };
2836 
2837 module_platform_driver(mvneta_driver);
2838 
2839 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
2840 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
2841 MODULE_LICENSE("GPL");
2842 
2843 module_param(rxq_number, int, S_IRUGO);
2844 module_param(txq_number, int, S_IRUGO);
2845 
2846 module_param(rxq_def, int, S_IRUGO);
2847 module_param(txq_def, int, S_IRUGO);
2848